Module phiml.math
Vectorized operations, tensors with named dimensions.
This package provides a common interface for tensor operations. Is internally uses NumPy, TensorFlow or PyTorch.
Main classes: Tensor
, Shape
, DType
, Extrapolation
.
The provided operations are not implemented directly. Instead, they delegate the actual computation to either NumPy, TensorFlow or PyTorch, depending on the configuration. This allows the user to write simulation code once and have it run with various computation backends.
See the documentation at https://tum-pbs.github.io/PhiML/
Sub-modules
phiml.math.extrapolation
-
Extrapolations are used for padding tensors and sampling coordinates lying outside the tensor bounds. Standard extrapolations are listed as global …
phiml.math.magic
-
Magic methods allow custom classes to be compatible with various functions defined in
phiml.math
, analogous to how implementing__hash__
allows …
Global variables
var INF
-
Floating-point representation of positive infinity.
var NAN
-
Floating-point representation of NaN (not a number).
var NUMPY
-
Default backend for NumPy arrays and SciPy objects.
var PI
-
Value of π to double precision
var f
-
Automatic mapper for broadcast string formatting of tensors, resulting in tensors of strings. Used with the special
-f-
syntax.Examples
>>> from phiml.math import f >>> -f-f'String containing {tensor1} and {tensor2:.1f}' # Result is a str tensor containing all dims of tensor1 and tensor2
var math
-
Convenience alias for the module
phiml.math
. This way, you can import the module and contained items in one line.from phiml.math import math, Tensor, wrap, extrapolation, l2_loss
Expand source code
""" Vectorized operations, tensors with named dimensions. This package provides a common interface for tensor operations. Is internally uses NumPy, TensorFlow or PyTorch. Main classes: `Tensor`, `Shape`, `DType`, `Extrapolation`. The provided operations are not implemented directly. Instead, they delegate the actual computation to either NumPy, TensorFlow or PyTorch, depending on the configuration. This allows the user to write simulation code once and have it run with various computation backends. See the documentation at https://tum-pbs.github.io/PhiML/ """ from ..backend._dtype import DType from ..backend import NUMPY, precision, set_global_precision, get_precision, set_global_default_backend as use from ._shape import ( shape, Shape, EMPTY_SHAPE, DimFilter, spatial, channel, batch, instance, dual, non_batch, non_spatial, non_instance, non_channel, non_dual, non_primal, primal, merge_shapes, concat_shapes, IncompatibleShapes, enable_debug_checks, ) from ._magic_ops import ( slice_ as slice, unstack, stack, concat, ncat, tcat, ccat, scat, icat, dcat, expand, rename_dims, rename_dims as replace_dims, pack_dims, dpack, ipack, spack, cpack, unpack_dim, flatten, squeeze, b2i, c2b, c2d, i2b, s2b, si2d, d2i, d2s, copy_with, replace, find_differences ) from ._tensors import Tensor, wrap, tensor, layout, native, numpy_ as numpy, reshaped_numpy, Dict, to_dict, from_dict, is_scalar, BROADCAST_FORMATTER as f, save, load from ._sparse import dense, get_sparsity, get_format, to_format, is_sparse, sparse_tensor, stored_indices, stored_values, tensor_like, matrix_rank from .extrapolation import Extrapolation, as_extrapolation from ._ops import ( choose_backend_t as choose_backend, all_available, convert, seed, to_device, reshaped_native, reshaped_tensor, copy, native_call, print_ as print, slice_off, zeros, ones, fftfreq, random_normal, random_normal as randn, random_uniform, random_uniform as rand, random_permutation, pick_random, meshgrid, linspace, arange, arange as range, range_tensor, # creation operators (use default backend) zeros_like, ones_like, pad, swap_axes, # reshape operations sort, safe_div, where, nonzero, ravel_index, sum_ as sum, finite_sum, dsum, isum, ssum, csum, mean, finite_mean, dmean, imean, smean, cmean, std, prod, dprod, sprod, iprod, cprod, max_ as max, dmax, smax, imax, cmax, finite_max, min_ as min, dmin, smin, imin, cmin, finite_min, any_ as any, all_ as all, quantile, median, # reduce at_max, at_min, argmax, argmin, dot, abs_ as abs, sign, round_ as round, ceil, floor, maximum, minimum, clip, sqrt, exp, erf, log, log2, log10, sigmoid, soft_plus, softmax, sin, cos, tan, sinh, cosh, tanh, arcsin, arccos, arctan, arcsinh, arccosh, arctanh, log_gamma, factorial, incomplete_gamma, to_float, to_int32, to_int64, to_complex, imag, real, conjugate, angle, radians_to_degrees, degrees_to_radians, boolean_mask, is_finite, is_nan, is_inf, nan_to_0, closest_grid_values, grid_sample, scatter, gather, histogram, fft, ifft, convolve, cumulative_sum, dtype, cast, close, always_close, assert_close, equal, stop_gradient, pairwise_differences, pairwise_differences as pairwise_distances, map_pairs, with_diagonal, eigenvalues, svd, contains, count_occurrences, count_intersections, ) from ._nd import ( shift, index_shift, vec, const_vec, norm, squared_norm, normalize, normalize as vec_normalize, dim_mask, normalize_to, l1_loss, l2_loss, frequency_loss, spatial_gradient, laplace, neighbor_reduce, neighbor_mean, neighbor_sum, neighbor_max, neighbor_min, at_min_neighbor, at_max_neighbor, fourier_laplace, fourier_poisson, abs_square, downsample2x, upsample2x, sample_subgrid, masked_fill, finite_fill, find_closest, ) from ._trace import matrix_from_function from ._functional import ( LinearFunction, jit_compile_linear, jit_compile, jacobian, gradient, custom_gradient, print_gradient, safe_mul, map_types, map_s2b, map_i2b, map_c2b, map_d2b, map_d2c, map_c2d, broadcast, iterate, identity, trace_check, map_ as map, when_available, perf_counter, ) from ._optimize import solve_linear, solve_nonlinear, minimize, Solve, SolveInfo, ConvergenceException, NotConverged, Diverged, SolveTape, factor_ilu from ._deprecated import clip_length, cross_product, cross_product as cross, rotate_vector, rotation_matrix, length, length as vec_length, vec_squared import sys as _sys math = _sys.modules[__name__] """Convenience alias for the module `phiml.math`. This way, you can import the module and contained items in one line. ``` from phiml.math import math, Tensor, wrap, extrapolation, l2_loss ```""" PI = 3.14159265358979323846 """Value of π to double precision """ pi = PI # intentionally undocumented, use PI instead. Exists only as an anlog to numpy.pi INF = float("inf") """ Floating-point representation of positive infinity. """ inf = INF # intentionally undocumented, use INF instead. Exists only as an anlog to numpy.inf NAN = float("nan") """ Floating-point representation of NaN (not a number). """ nan = NAN # intentionally undocumented, use NAN instead. Exists only as an anlog to numpy.nan NUMPY = NUMPY # to show up in pdoc """Default backend for NumPy arrays and SciPy objects.""" f = f """ Automatic mapper for broadcast string formatting of tensors, resulting in tensors of strings. Used with the special `-f-` syntax. Examples: >>> from phiml.math import f >>> -f-f'String containing {tensor1} and {tensor2:.1f}' # Result is a str tensor containing all dims of tensor1 and tensor2 """ __all__ = [key for key in globals().keys() if not key.startswith('_')] __pdoc__ = { 'Extrapolation': False, 'Shape.__init__': False, 'SolveInfo.__init__': False, 'TensorDim.__init__': False, 'ConvergenceException.__init__': False, 'Diverged.__init__': False, 'NotConverged.__init__': False, 'LinearFunction.__init__': False, }
Functions
def abs(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes ||x||1. Complex
x
result in matching precision float values.Note: The gradient of this operation is undefined for x=0. TensorFlow and PyTorch return 0 while Jax returns 1.
Args
x
Tensor
orPhiTreeNode
Returns
Absolute value of
x
of same type asx
. def abs_square(complex_values: Union[phiml.math._tensors.Tensor, complex]) ‑> phiml.math._tensors.Tensor
def all(boolean_value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>) ‑> phiml.math._tensors.Tensor
-
Tests whether all entries of
boolean_tensor
areTrue
along the specified dimensions.Args
boolean_value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions. def all_available(*values) ‑> bool
-
Tests if all tensors contained in the given
values
are currently known and can be read. Placeholder tensors used to trace functions for just-in-time compilation or matrix construction are considered not available, even when they hold example values like with PyTorch's JIT.Tensors are not available during
jit_compile()
,jit_compile_linear()
or while using TensorFlow's legacy graph mode.Tensors are typically available when the backend operates in eager mode and is not currently tracing a function.
This can be used instead of the native checks
- PyTorch:
torch._C._get_tracing_state()
- TensorFlow:
tf.executing_eagerly()
- Jax:
isinstance(x, jax.core.Tracer)
Args
values
- Tensors to check.
Returns
True
if no value is a placeholder or being traced,False
otherwise. - PyTorch:
def always_close(t1: Union[numbers.Number, phiml.math._tensors.Tensor, bool], t2: Union[numbers.Number, phiml.math._tensors.Tensor, bool], rel_tolerance=1e-05, abs_tolerance=0, equal_nan=False) ‑> bool
-
Checks whether two tensors are guaranteed to be
close()
in all values. Unlikeclose()
, this function can be used with JIT compilation and with tensors of incompatible shapes. Incompatible tensors are never close.If one of the given tensors is being traced, the tensors are only equal if they reference the same native tensor. Otherwise, an element-wise equality check is performed.
See Also:
close()
.Args
t1
- First tensor or number to compare.
t2
- Second tensor or number to compare.
rel_tolerance
- Relative tolerance, only used if neither tensor is traced.
abs_tolerance
- Absolute tolerance, only used if neither tensor is traced.
equal_nan
- If
True
, tensors are considered close if they are NaN in the same places.
Returns
bool
def angle(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Compute the angle of a complex number. This is equal to atan(Im/Re) for most values.
Args
x
Tensor
orPhiTreeNode
Returns
Angle of complex number in radians.
def any(boolean_value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>) ‑> phiml.math._tensors.Tensor
-
Tests whether any entry of
boolean_tensor
isTrue
along the specified dimensions.Args
boolean_value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions. def arange(dim: phiml.math._shape.Shape, start_or_stop: Optional[int] = None, stop: Optional[int] = None, step=1, backend=None)
-
Returns evenly spaced values between
start
andstop
. If only one limit is given,0
is used for the start.See Also:
range_tensor()
,linspace()
,meshgrid()
.Args
dim
- Dimension name and type as
Shape
object. Thesize
ofdim
is interpreted asstop
unlessstart_or_stop
is specified. start_or_stop
- (Optional)
int
. Interpreted asstart
ifstop
is specified as well. Otherwise this isstop
. stop
- (Optional)
int
.stop
value. step
- Distance between values.
backend
- Backend to use for creating the tensor. If unspecified, uses the current default.
Returns
def arccos(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes the inverse of cos(x) of the
Tensor
orPhiTreeNode
x
. For real arguments, the result lies in the range [0, π]. def arccosh(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes the inverse of cosh(x) of the
Tensor
orPhiTreeNode
x
. def arcsin(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes the inverse of sin(x) of the
Tensor
orPhiTreeNode
x
. For real arguments, the result lies in the range [-π/2, π/2]. def arcsinh(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes the inverse of sinh(x) of the
Tensor
orPhiTreeNode
x
. def arctan(x: ~TensorOrTree, divide_by=None) ‑> ~TensorOrTree
-
Computes the inverse of tan(x) of the
Tensor
orPhiTreeNode
x
.Args
x
- Input. The single-argument
arctan()
function cannot output π/2 or -π/2 since tan(π/2) is infinite. divide_by
- If specified, computes
arctan(x/divide_by)
so that it can return π/2 and -π/2. This is equivalent to the commonarctan2
function.
def arctanh(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes the inverse of tanh(x) of the
Tensor
orPhiTreeNode
x
. def argmax(x: phiml.math._tensors.Tensor, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], index_dim=(indexᶜ=None))
-
Finds the maximum value along one or multiple dimensions and returns the corresponding index.
Args
x
Tensor
dim
- Dimensions along which the maximum should be determined. These are reduced in the operation.
index_dim
- Dimension listing the index components for multidimensional argmax.
Returns
Index tensor
idx
, such thatx[idx] = max(x)
. def argmin(x: phiml.math._tensors.Tensor, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], index_dim=(indexᶜ=None))
-
Finds the minimum value along one or multiple dimensions and returns the corresponding index.
Args
x
Tensor
dim
- Dimensions along which the minimum should be determined. These are reduced in the operation.
index_dim
- Dimension listing the index components for multidimensional argmin.
Returns
Index tensor
idx
, such thatx[idx] = min(x)
. def as_extrapolation(obj) ‑> Extrapolation
-
Creates an
Extrapolation
from a descriptor object.Args
obj
-
Extrapolation specification, one of the following:
Extrapolation
- Primitive name as
str
: periodic, zero, one, zero-gradient, symmetric, symmetric-gradient, antisymmetric, reflect, antireflect dict
containing exactly the keys'normal'
and'tangential'
dict
mapping spatial dimension names to extrapolations
Returns
Extrapolation
def assert_close(*values, rel_tolerance: float = 1e-05, abs_tolerance: float = 0, msg: str = '', verbose: bool = True, equal_nan=True)
-
Checks that all given tensors have equal values within the specified tolerance. Raises an AssertionError if the values of this tensor are not within tolerance of any of the other tensors.
Does not check that the shapes match as long as they can be broadcast to a common shape.
Args
values
- Tensors or native tensors or numbers or sequences of numbers.
rel_tolerance
- Relative tolerance.
abs_tolerance
- Absolute tolerance.
msg
- Optional error message.
verbose
- Whether to print conflicting values.
equal_nan
- If
False
,NaN
values will always trigger an assertion error.
def at_max(value, key: phiml.math._tensors.Tensor, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>)
-
Looks up the values of
value
at the positions where the maximum values inkey
are located alongdim
.Args
value
- Tensors or trees from which to lookup and return values. These tensors are indexed at the maximum index in `key´.
You can pass
arange()
(the type) to retrieve the picked indices. key
Tensor
containing at least one dimension ofdim
. The maximum index ofkey
is determined.dim
- Dimensions along which to compute the maximum of
key
.
Returns
The values of
other_tensors
at the positions where the maximum values invalue
are located alongdim
. def at_max_neighbor(values, key_grid: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None, offsets=(0, 1), diagonal=True) ‑> phiml.math._tensors.Tensor
-
Computes the min of neighboring values in
key_grid
along each dimension indims
and retrieves the corresponding values fromvalues
.Args
values
- Values to look up and return.
Tensor
or tree structure. key_grid
- Values to compare.
dims
- Dimensions along which neighbors should be averaged.
padding
- Padding at the upper edges of
grid
alongdims'. If not
None, the result tensor() will have the same shape() as
grid`. offsets
- Relative neighbor indices as
int
.0
refers to self, negative values to earlier (left) neighbors and positive values to later (right) neighbors. diagonal
- If
True
, performs sequential reductions along each axis, determining the minimum value along each axis independently. If the values ofkey_grid
depend onvalues
or their position in the grid, this can lead to undesired behavior.
Returns
Tree or
Tensor
like values. def at_min(value, key: phiml.math._tensors.Tensor, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>)
-
Looks up the values of
value
at the positions where the minimum values inkey
are located alongdim
.Args
value
- Tensors or trees from which to lookup and return values. These tensors are indexed at the minimum index in `key´.
You can pass
arange()
(the type) to retrieve the picked indices. key
Tensor
containing at least one dimension ofdim
. The minimum index ofkey
is determined.dim
- Dimensions along which to compute the minimum of
key
.
Returns
The values of
other_tensors
at the positions where the minimum values invalue
are located alongdim
. def at_min_neighbor(values, key_grid: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None, offsets=(0, 1), diagonal=True) ‑> phiml.math._tensors.Tensor
-
Computes the max of neighboring values in
key_grid
along each dimension indims
and retrieves the corresponding values fromvalues
.Args
values
- Values to look up and return.
key_grid
- Values to compare.
dims
- Dimensions along which neighbors should be averaged.
padding
- Padding at the upper edges of
grid
alongdims'. If not
None, the result tensor() will have the same shape() as
grid`. offsets
- Relative neighbor indices as
int
.0
refers to self, negative values to earlier (left) neighbors and positive values to later (right) neighbors. diagonal
- If
True
, performs sequential reductions along each axis, determining the minimum value along each axis independently. If the values ofkey_grid
depend onvalues
or their position in the grid, this can lead to undesired behavior.
Returns
Tree or
Tensor
like values. def b2i(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType
-
Change the type of all batch dimensions of
value
to instance dimensions. Seerename_dims()
. def batch(*args, **dims: Union[int, str, tuple, list, phiml.math._shape.Shape, ForwardRef('Tensor')])
-
Returns the batch dimensions of an existing
Shape
or creates a newShape
with only batch dimensions.Usage for filtering batch dimensions:
>>> batch_dims = batch(shape) >>> batch_dims = batch(tensor)
Usage for creating a
Shape
with only batch dimensions:>>> batch_shape = batch('undef', batch=2) (batch=2, undef=None)
Here, the dimension
undef
is created with an undefined size ofNone
. Undefined sizes are automatically filled in bytensor()
,wrap()
,stack()
andconcat()
.To create a shape with multiple types, use
merge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.See Also:
channel()
,spatial()
,instance()
Args
*args
-
Either
**dims
- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shape
containing only dimensions of type batch. def boolean_mask(x, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], mask: phiml.math._tensors.Tensor, preserve_names=False)
-
Discards values
x.dim[i]
wheremask.dim[i]=False
. All dimensions ofmask
that are notdim
are treated as batch dimensions.Alternative syntax:
x.dim[mask]
.Implementations:
- NumPy: Slicing
- PyTorch:
masked_select
- TensorFlow:
tf.boolean_mask
- Jax: Slicing
Args
x
Tensor
orSliceable
.dim
- Dimension of
x
to along which to discard slices. mask
- Boolean
Tensor
marking which values to keep. Must have the dimensiondim
matching `x´. preserve_names
- This only supports uniform 1D slicing. Batched slicing will remove item names if incompatible.
Returns
Selected values of
x
asTensor
with dimensions fromx
andmask
. def broadcast(function=None, dims=<function shape>, range=builtins.range, unwrap_scalars=True, simplify=False)
-
Function decorator for non-vectorized functions. When passing
Tensor
arguments to a broadcast function, the function is called once for each slice of the tensor. How tensors are sliced is determined bydims
. Decorating a function withbroadcast()
is equal to passing the function tophi.math.map()
.See Also:
map_()
Args
function
- Function to broadcast.
dims
- Dimensions which should be sliced.
function
is called once for each element indims
, i.e.dims.volume
times. Ifdims
is not specified, all dimensions from theSliceable
values inargs
andkwargs
will be mapped. range
- Optional range function. Can be used to generate
tqdm
output by passingtrange
. unwrap_scalars
- If
True
, passes the contents of scalarTensor
s instead of the tensor objects. simplify
- If
True
, reduces constant dims of output tensors that don't vary across broadcast slices.
Returns
Broadcast function
def c2b(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType
-
Change the type of all channel dimensions of
value
to batch dimensions. Seerename_dims()
. def c2d(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType
-
Change the type of all channel dimensions of
value
to dual dimensions. Seerename_dims()
. def cast(x: ~MagicType, dtype: Union[phiml.backend._dtype.DType, type]) ‑> ~OtherMagicType
-
Casts
x
to a different data type.Implementations:
- NumPy:
x.astype()
- PyTorch:
x.to()
- TensorFlow:
tf.cast
- Jax:
jax.numpy.array
See Also:
to_float()
,to_int32()
,to_int64()
,to_complex()
.Args
Returns
- NumPy:
def ceil(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes ⌈x⌉ of the
Tensor
orPhiTreeNode
x
. def channel(*args, **dims: Union[int, str, tuple, list, phiml.math._shape.Shape, ForwardRef('Tensor')])
-
Returns the channel dimensions of an existing
Shape
or creates a newShape
with only channel dimensions.Usage for filtering channel dimensions:
>>> channel_dims = channel(shape) >>> channel_dims = channel(tensor)
Usage for creating a
Shape
with only channel dimensions:>>> channel_shape = channel('undef', vector=2) (vector=2, undef=None)
Here, the dimension
undef
is created with an undefined size ofNone
. Undefined sizes are automatically filled in bytensor()
,wrap()
,stack()
andconcat()
.To create a shape with multiple types, use
merge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.See Also:
spatial()
,batch()
,instance()
Args
*args
-
Either
**dims
- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shape
containing only dimensions of type channel. def choose_backend(*values, prefer_default=False) ‑> phiml.backend._backend.Backend
-
Choose backend for given
Tensor
or native tensor values. Backends need to be registered to be available, e.g. viainit()
orset_global_default_backend()
.Args
*values
- Sequence of
Tensor
s, native tensors or constants. prefer_default
- Whether to always select the default backend if it can work with
values
, seedefault_backend()
.
Returns
The selected
phiml.math.backend.Backend
def clip(x: phiml.math._tensors.Tensor, lower_limit: Union[float, phiml.math._tensors.Tensor] = 0, upper_limit: Union[float, phiml.math._tensors.Tensor, phiml.math._shape.Shape] = 1)
-
Limits the values of the
Tensor
x
to lie betweenlower_limit
andupper_limit
(inclusive). def clip_length(vec: phiml.math._tensors.Tensor, min_len=0, max_len=1, vec_dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function channel>, eps: Union[float, phiml.math._tensors.Tensor] = None)
-
Clips the length of a vector to the interval
[min_len, max_len]
while keeping the direction. Zero-vectors remain zero-vectors.Args
vec
Tensor
min_len
- Lower clipping threshold.
max_len
- Upper clipping threshold.
vec_dim
- Dimensions to compute the length over. By default, all channel dimensions are used to compute the vector length.
eps
- Minimum vector length. Use to avoid
inf
gradients for zero-length vectors.
Returns
def close(*tensors, rel_tolerance: Union[float, phiml.math._tensors.Tensor] = 1e-05, abs_tolerance: Union[float, phiml.math._tensors.Tensor] = 0, equal_nan=False, reduce=<function shape>) ‑> bool
-
Checks whether all tensors have equal values within the specified tolerance.
Does not check that the shapes exactly match but if shapes are incompatible, returns
False
. Unlike withalways_close()
, all shapes must be compatible and tensors with different shapes are reshaped before comparing.See Also:
always_close()
.Args
*tensors
- At least two
Tensor
or tensor-like objects orNone
. The shapes of all tensors must be compatible but not all tensors must have all dimensions. If any argument isNone
, returnsTrue
only if all areNone
. rel_tolerance
- Relative tolerance
abs_tolerance
- Absolute tolerance
equal_nan
- If
True
, tensors are considered close if they are NaN in the same places.
Returns
bool
, whether all given tensors are equal to the first tensor within the specified tolerance. def closest_grid_values(grid: phiml.math._tensors.Tensor, coordinates: phiml.math._tensors.Tensor, extrap: e_.Extrapolation, stack_dim_prefix='closest_', **kwargs)
-
Finds the neighboring grid points in all directions and returns their values. The result will have 2^d values for each vector in coordinates in d dimensions.
If
coordinates
does not have a channel dimension with item names, the spatial dims ofgrid
will be used.Args
grid
- grid data. The grid is spanned by the spatial dimensions of the tensor
coordinates
- tensor with 1 channel dimension holding vectors pointing to locations in grid index space
extrap
- grid extrapolation
stack_dim_prefix
- For each spatial dimension
dim
, stacks lower and upper closest values along dimensionstack_dim_prefix+dim
. kwargs
- Additional information for the extrapolation.
Returns
Tensor
of shape (batch, coord_spatial, grid_spatial=(2, 2,…), grid_channel) def concat(values: Sequence[~PhiTreeNodeType], dim: Union[str, phiml.math._shape.Shape], /, expand_values=False, **kwargs) ‑> ~PhiTreeNodeType
-
Concatenates a sequence of
Shapable
objects, e.g.Tensor
, along one dimension. All values must have the same spatial, instance and channel dimensions and their sizes must be equal, except fordim
. Batch dimensions will be added as needed.Args
values
- Tuple or list of
Shapable
, such asTensor
dim
- Concatenation dimension, must be present in all
values
. The size alongdim
is determined fromvalues
and can be set to undefined (None
). Alternatively, astr
of the form't->name:t'
can be specified, wheret
is on ofb d i s c
denoting the dimension type. This first packs all dimensions of the input into a new dim with given name and type, then concatenates the values along this dim. expand_values
- If
True
, will first add missing dimensions to all values, not just batch dimensions. This allows tensors with different dimensions to be concatenated. The resulting tensor will have all dimensions that are present invalues
. **kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Concatenated
Tensor
Examples
>>> concat([math.zeros(batch(b=10)), math.ones(batch(b=10))], 'b') (bᵇ=20) 0.500 ± 0.500 (0e+00...1e+00)
>>> concat([vec(x=1, y=0), vec(z=2.)], 'vector') (x=1.000, y=0.000, z=2.000) float64
def concat_shapes(*shapes: Union[phiml.math._shape.Shape, Any]) ‑> phiml.math._shape.Shape
-
Creates a
Shape
listing the dimensions of allshapes
in the given order.See Also:
merge_shapes()
.Args
*shapes
- Shapes to concatenate. No two shapes must contain a dimension with the same name.
Returns
Combined
Shape
. def conjugate(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Args
x
- Real or complex
Tensor
orPhiTreeNode
or native tensor.
Returns
Complex conjugate of
x
ifx
is complex, elsex
. def const_vec(value: Union[float, phiml.math._tensors.Tensor], dim: Union[phiml.math._shape.Shape, tuple, list, str])
-
Creates a single-dimension tensor with all values equal to
value
.value
is not converted to the default backend, even when it is a Python primitive.Args
value
- Value for filling the vector.
dim
- Either single-dimension non-spatial Shape or
Shape
consisting of any number of spatial dimensions. In the latter case, a new channel dimension named'vector'
will be created from the spatial shape.
Returns
def contains(values: phiml.math._tensors.Tensor, query: phiml.math._tensors.Tensor, feature_dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function channel>) ‑> phiml.math._tensors.Tensor
-
For each query item, checks whether it is contained in
values
.See Also:
count_occurrences()
.Args
values
- Data
Tensor
containing allfeature_dims
. All non-batch and dims not specified asfeature_dims
are flattened. query
- Items to count the occurrences of. Must contain all
feature_dims
. feature_dims
- One item is considered to be the set of all values along
feature_dims
. The number of items in a tensor is given by all dims exceptfeature_dims
.
Returns
Integer
Tensor
matchingquery
withoutfeature_dims
. def convert(x, backend: phiml.backend._backend.Backend = None, use_dlpack=True)
-
Convert the native representation of a
Tensor
orPhiTreeNode
to the native format ofbackend
.Warning: This operation breaks the automatic differentiation chain.
See Also:
phiml.math.backend.convert()
.Args
x
Tensor
to convert. Ifx
is aPhiTreeNode
, its variable attributes are converted.backend
- Target backend. If
None
, uses the current default backend, seephiml.math.backend.default_backend()
.
Returns
Tensor
with native representation belonging tobackend
. def convolve(value: phiml.math._tensors.Tensor, kernel: phiml.math._tensors.Tensor, extrapolation: e_.Extrapolation = None, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>) ‑> phiml.math._tensors.Tensor
-
Computes the convolution of
value
andkernel
along the specified dims.Dual dims of
kernel
are reduced against the corresponding primal dims ofvalue
. All other primal dims ofvalue
are treated as batch.Args
value
Tensor
whose shape includes all spatial dimensions ofkernel
.kernel
Tensor
used as convolutional filter.dims
- Which dimensions to convolve over. Defaults to all spatial dims.
extrapolation
- If not None, pads
value
so that the result has the same shape asvalue
.
Returns
Tensor
with all non-reduced dims ofvalue
and additional non-dual dims fromkernel
. def copy(value: phiml.math._tensors.Tensor)
def copy_with(obj: ~PhiTreeNodeType, **updates) ‑> ~PhiTreeNodeType
-
Creates a copy of the given
PhiTreeNode
with updated values as specified inupdates
.If
obj
overrides__with_attrs__
, the copy will be created via that specific implementation. Otherwise, thecopy()
module andsetattr
will be used.Args
obj
PhiTreeNode
**updates
- Values to be replaced.
Returns
Copy of
obj
with updated values. def cos(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes cos(x) of the
Tensor
orPhiTreeNode
x
. def cosh(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes cosh(x) of the
Tensor
orPhiTreeNode
x
. def count_intersections(values: phiml.math._tensors.Tensor, arg_dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], list_dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function instance>, feature_dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function channel>) ‑> phiml.math._tensors.Tensor
-
Counts the number of elements that are part of each pair of lists.
Args
- values:
arg_dims
- Dims enumerating the input lists.
list_dims
- Dims listing the elements.
feature_dims
- Vector dims of one element. Elements are equal if all values along
feature_dims
are equal.
Returns
def count_occurrences(values: phiml.math._tensors.Tensor, query: phiml.math._tensors.Tensor, feature_dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function channel>) ‑> phiml.math._tensors.Tensor
-
For each query item, counts how often this value occurs in
values
.See Also:
contains()
.Args
values
- Data
Tensor
containing allfeature_dims
. All non-batch and dims not specified asfeature_dims
are flattened. query
- Items to count the occurrences of. Must contain all
feature_dims
. feature_dims
- One item is considered to be the set of all values along
feature_dims
. The number of items in a tensor is given by all dims exceptfeature_dims
.
Returns
Integer
Tensor
matchingquery
withoutfeature_dims
. def cpack(value, packed_dim: Union[str, phiml.math._shape.Shape], pos: Optional[int] = None, **kwargs)
-
Short for `pack_dims(…, dims=channel)
def cross(vec1: phiml.math._tensors.Tensor, vec2: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor
def cross_product(vec1: phiml.math._tensors.Tensor, vec2: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor
def cumulative_sum(x: phiml.math._tensors.Tensor, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], include_0=False, include_sum=True, index_dim: Union[str, phiml.math._shape.Shape, None] = None)
-
Performs a cumulative sum of
x
alongdim
.Implementations:
Args
x
Tensor
dim
- Dimension along which to sum, as
str
orShape
. If multiple dims are passed,x
the cumulative sum will be computed on the flattened array. include_0
- If
True
, adds a 0 to the result before the first value. include_sum
- If
False
, the total sum will be sliced off the result. index_dim
- If given, adds an index dimension for
dim
.
Returns
Tensor
with the same shape asx
. def custom_gradient(f: Callable, gradient: Callable, auxiliary_args: str = '')
-
Creates a function based on
f
that uses a custom gradient for the backpropagation pass.Warning This method can lead to memory leaks if the gradient function is not called. Make sure to pass tensors without gradients if the gradient is not required, see
stop_gradient()
.Args
f
- Forward function mapping
Tensor
argumentsx
to a singleTensor
output or sequence of tensorsy
. gradient
- Function to compute the vector-Jacobian product for backpropagation.
Will be called as
gradient(input_dict, *y, *dy) -> output_dict
whereinput_dict
contains all named arguments passed to the forward function andoutput_dict
contains only those parameters for which a gradient is defined. auxiliary_args
- Comma-separated parameter names of arguments that are not relevant to backpropagation.
Returns
Function with similar signature and return values as
f
. However, the returned function does not support keyword arguments. def d2i(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType
-
Change the type of all dual dimensions of
value
to instance dimensions. Seerename_dims()
. def d2s(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType
-
Change the type of all dual dimensions of
value
to spatial dimensions. Seerename_dims()
. def degrees_to_radians(deg: ~TensorOrTree) ‑> ~TensorOrTree
-
Convert degrees to radians.
def dense(x: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor
-
Convert a sparse tensor representation to an equivalent dense one in which all values are explicitly stored contiguously in memory.
Args
x
- Any
Tensor
. Python primitives likefloat
,int
orbool
will be converted toTensors
in the process.
Returns
Dense tensor.
def dim_mask(all_dims: Union[phiml.math._shape.Shape, tuple, list], dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], mask_dim=(vectorᶜ=None)) ‑> phiml.math._tensors.Tensor
-
Creates a masked vector with 1 elements for
dims
and 0 for all other dimensions inall_dims
.Args
all_dims
- All dimensions for which the vector should have an entry.
dims
- Dimensions marked as 1.
mask_dim
- Dimension of the masked vector. Item names are assigned automatically.
Returns
def dot(x: phiml.math._tensors.Tensor, x_dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], y: phiml.math._tensors.Tensor, y_dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None]) ‑> phiml.math._tensors.Tensor
-
Computes the dot product along the specified dimensions. Contracts
x_dims
withy_dims
by first multiplying the elements and then summing them up.For one dimension, this is equal to matrix-matrix or matrix-vector multiplication.
The function replaces the traditional
dot()
/tensordot
/matmul
/einsum
functions.- NumPy:
numpy.tensordot
,numpy.einsum
- PyTorch:
torch.tensordot
,torch.einsum
- TensorFlow:
tf.tensordot
,tf.einsum
- Jax:
jax.numpy.tensordot
,jax.numpy.einsum
Args
x
- First
Tensor
x_dims
- Dimensions of
x
to reduce againsty
y
- Second
Tensor
y_dims
- Dimensions of
y
to reduce againstx
.
Returns
Dot product as
Tensor
. - NumPy:
def downsample2x(grid: phiml.math._tensors.Tensor, padding: Extrapolation = zero-gradient, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>) ‑> phiml.math._tensors.Tensor
-
Resamples a regular grid to half the number of spatial sample points per dimension. The grid values at the new points are determined via mean (linear interpolation).
Args
grid
- full size grid
padding
- grid extrapolation. Used to insert an additional value for odd spatial dims
dims
- dims along which down-sampling is applied. If None, down-sample along all spatial dims.
grid
- Tensor:
padding
- Extrapolation: (Default value = extrapolation.BOUNDARY)
dims
- tuple or None: (Default value = None)
Returns
half-size grid
def dpack(value, packed_dim: Union[str, phiml.math._shape.Shape], pos: Optional[int] = None, **kwargs)
-
Short for `pack_dims(…, dims=dual)
def dtype(x) ‑> phiml.backend._dtype.DType
def dual(*args, **dims: Union[int, str, tuple, list, phiml.math._shape.Shape, ForwardRef('Tensor')])
-
Returns the dual dimensions of an existing
Shape
or creates a newShape
with only dual dimensions.Dual dimensions are assigned the prefix
~
to distinguish them from regular dimensions. This way, a regular and dual dimension of the same name can exist in oneShape
.Dual dimensions represent the input space and are typically only present on matrices or higher-order matrices. Dual dimensions behave like batch dimensions in regular operations, if supported. During matrix multiplication, they are matched against their regular counterparts by name (ignoring the
~
prefix).Usage for filtering dual dimensions:
>>> dual_dims = dual(shape) >>> dual_dims = dual(tensor)
Usage for creating a
Shape
with only dual dimensions:>>> dual('undef', points=2) (~undefᵈ=None, ~pointsᵈ=2)
Here, the dimension
undef
is created with an undefined size ofNone
. Undefined sizes are automatically filled in bytensor()
,wrap()
,stack()
andconcat()
.To create a shape with multiple types, use
merge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.See Also:
channel()
,batch()
,spatial()
Args
*args
-
Either
**dims
- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shape
containing only dimensions of type dual. def eigenvalues(matrix: phiml.math._tensors.Tensor, eigen_dim=(eigenvaluesᶜ=None))
-
Computes the eigenvalues of a square matrix. The matrix columns are listed along dual dimensions and the rows are listed along the corresponding non-dual dimensions. Row dims are matched by name if possible, else all primal dims are used.
Args
matrix
- Square matrix. Must have at least one dual dim and corresponding non-dual dim.
eigen_dim
- Dimension along which eigenvalues should be listed.
Returns
Tensor
listing the eigenvalues alongeigen_dim
. def enable_debug_checks()
-
Once called, additional type checks are enabled. This may result in a noticeable drop in performance.
def equal(*objects, equal_nan=False) ‑> bool
-
Checks whether all objects are equal.
See Also:
close()
,always_close()
.Args
*objects
- Objects to compare. Can be tensors or other objects or
None
equal_nan
- If all objects are tensor-like, whether to count
NaN
values as equal.
Returns
bool
, whether all given objects are equal to the first one. def erf(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes the error function erf(x) of the
Tensor
orPhiTreeNode
x
. def exp(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes exp(x) of the
Tensor
orPhiTreeNode
x
. def expand(value, *dims: Union[str, phiml.math._shape.Shape], **kwargs)
-
Adds dimensions to a
Tensor
or tensor-like object by implicitly repeating the tensor values along the new dimensions. Ifvalue
already contains any of the new dimensions, a size and type check is performed for these instead.If any of
dims
varies along a dimension that is present neither invalue
nor ondims
, it will also be added tovalue
.This function replaces the usual
tile
/repeat
functions of NumPy, PyTorch, TensorFlow and Jax.Additionally, it replaces the traditional
unsqueeze
/expand_dims
functions.Args
value
Shapable
, such asTensor
For tree nodes, expands all value attributes bydims
or the first variable attribute if no value attributes are set.*dims
- Dimensions to be added as
Shape
**kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
. def factor_ilu(matrix: phiml.math._tensors.Tensor, iterations: int, safe=False)
-
Incomplete LU factorization for dense or sparse matrices.
For sparse matrices, keeps the sparsity pattern of
matrix
. L and U will be trimmed to the respective areas, i.e. stored upper elements in L will be dropped, unless this would lead to varying numbers of stored elements along a batch dimension.Args
matrix
- Dense or sparse matrix to factor. Currently, compressed sparse matrices are decompressed before running the ILU algorithm.
iterations
- (Optional) Number of fixed-point iterations to perform. If not given, will be automatically determined from matrix size and sparsity.
safe
- If
False
(default), only matrices with a rank deficiency of up to 1 can be factored as all values of L and U are uniquely determined. For matrices with higher rank deficiencies, the result includesNaN
values. IfTrue
, the algorithm runs slightly slower but can factor highly rank-deficient matrices as well. However, then L is undeterdetermined and unused values of L are set to 0. Rank deficiencies of 1 occur frequently in periodic settings but higher ones are rare.
Returns
L
- Lower-triangular matrix as
Tensor
with all diagonal elements equal to 1. U
- Upper-triangular matrix as
Tensor
.
Examples
>>> matrix = wrap([[-2, 1, 0], >>> [1, -2, 1], >>> [0, 1, -2]], channel('row'), dual('col')) >>> L, U = math.factor_ilu(matrix) >>> math.print(L) row=0 1. 0. 0. along ~col row=1 -0.5 1. 0. along ~col row=2 0. -0.6666667 1. along ~col >>> math.print(L @ U, "L @ U") L @ U row=0 -2. 1. 0. along ~col row=1 1. -2. 1. along ~col row=2 0. 1. -2. along ~col
def factorial(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes factorial(x) of the
Tensor
orPhiTreeNode
x
. For floating-point numbers computes the continuous factorial using the gamma function. For integer numbers computes the exact factorial and returns the same integer type. However, this results in integer overflow for inputs larger than 12 (int32) or 19 (int64). def fft(x: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>) ‑> phiml.math._tensors.Tensor
-
Performs a fast Fourier transform (FFT) on all spatial dimensions of x.
The inverse operation is
ifft()
.Implementations:
- NumPy:
np.fft.fft
,numpy.fft.fft2
,numpy.fft.fftn
- PyTorch:
torch.fft.fft
- TensorFlow:
tf.signal.fft
,tf.signal.fft2d
,tf.signal.fft3d
- Jax:
jax.numpy.fft.fft
,jax.numpy.fft.fft2
jax.numpy.fft.fft
Args
x
- Uniform complex or float
Tensor
with at least one spatial dimension. dims
- Dimensions along which to perform the FFT.
If
None
, performs the FFT along all spatial dimensions ofx
.
Returns
Ƒ(x) as complex
Tensor
- NumPy:
def fftfreq(resolution: phiml.math._shape.Shape, dx: Union[float, phiml.math._tensors.Tensor] = 1, dtype: phiml.backend._dtype.DType = None)
-
Returns the discrete Fourier transform sample frequencies. These are the frequencies corresponding to the components of the result of
math.fft
on a tensor of shaperesolution
.Args
resolution
- Grid resolution measured in cells
dx
- Distance between sampling points in real space.
dtype
- Data type of the returned tensor (Default value = None)
Returns
Tensor
holding the frequencies of the corresponding values computed by math.fft def find_closest(vectors: phiml.math._tensors.Tensor, query: phiml.math._tensors.Tensor, method='kd', index_dim=(indexᶜ=None))
-
Finds the closest vector to
query
fromvectors
. This is implemented using a k-d tree built fromvectors
.Args
vectors
- Points to find.
query
- Target locations.
method
-
One of the following:
'dense'
: compute the pair-wise distances between all vectors and query points, then return the index of the smallest distance for each query point.'kd'
(default): Build a k-d tree fromvectors
and use it to query all points inquery
. The tree will be cached if this call is jit-compiled andvectors
is constant.
index_dim
- Dimension along which components should be listed as
Shape
. PassNone
to get 1D indices as scalars.
Returns
Index tensor
idx
so that the closest points toquery
arevectors[idx]
. def find_differences(tree1, tree2, compare_tensors_by_id=False, attr_type=<function value_attributes>, tensor_equality=None) ‑> Sequence[Tuple[str, str, Any, Any]]
-
Compares
tree1
andtree2
and returns all differences in the form(difference_description: str, variable_identifier: str, value1, value2)
.Args
tree1
- Nested tree or leaf
tree2
- Nested tree or leaf
compare_tensors_by_id
- Whether
Tensor
objects should be compared by identity or values. attr_type
- What attributes to compare, either
value_attributes
orvariable_attributes
. tensor_equality
- Function that compares two tensors for equality.
None
defaults toequal()
.
Returns
List of differences, each represented as a
tuple
. def finite_fill(values: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, distance: int = 1, diagonal: bool = True, padding=zero-gradient, padding_kwargs: dict = None) ‑> phiml.math._tensors.Tensor
-
Fills non-finite (NaN, inf, -inf) values from nearby finite values. Extrapolates the finite values of
values
fordistance
steps alongdims
. Where multiple finite values could fill an invalid value, the average is computed.Args
values
- Floating-point
Tensor
. All non-numeric values (NaN
,inf
,-inf
) are interpreted as invalid. dims
- Dimensions along which to fill invalid values from finite ones.
distance
- Number of extrapolation steps, each extrapolating one cell out.
diagonal
- Whether to extrapolate values to their diagonal neighbors per step.
padding
- Extrapolation of
values
. Determines whether to extrapolate from the edges as well. padding_kwargs
- Additional keyword arguments to be passed to
pad()
.
Returns
Tensor
of same shape asvalues
. def finite_max(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>, default: Union[complex, float] = nan)
-
Finds the maximum along
dim
ignoring all non-finite values.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
default
- Value to use where no finite value was encountered.
Returns
Tensor
without the reduced dimensions. def finite_mean(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>, default: Union[complex, float] = nan)
-
Computes the mean value of all finite values in
value
alongdim
.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
default
- Value to use where no finite value was encountered.
Returns
Tensor
without the reduced dimensions. def finite_min(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>, default: Union[complex, float] = nan)
-
Finds the minimum along
dim
ignoring all non-finite values.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
default
- Value to use where no finite value was encountered.
Returns
Tensor
without the reduced dimensions. def finite_sum(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>, default: Union[complex, float] = nan)
-
Sums all finite values in
value
alongdim
.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
default
- Value to use where no finite value was encountered.
Returns
Tensor
without the reduced dimensions. def flatten(value, flat_dim: phiml.math._shape.Shape = (flatⁱ=None), flatten_batch=False, **kwargs)
-
Returns a
Tensor
with the same values asvalue
but only a single dimensionflat_dim
. The order of the values in memory is not changed.Args
value
Shapable
, such asTensor
. If a non-Shaped
object or one with an emptyShape
is passed, it is returned without alteration.flat_dim
- Dimension name and type as
Shape
object. The size is ignored. flatten_batch
- Whether to flatten batch dimensions as well.
If
False
, batch dimensions are kept, only onn-batch dimensions are flattened. **kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
.Examples
>>> flatten(math.zeros(spatial(x=4, y=3))) (flatⁱ=12) const 0.0
def floor(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes ⌊x⌋ of the
Tensor
orPhiTreeNode
x
. def fourier_laplace(grid: phiml.math._tensors.Tensor, dx: Union[phiml.math._tensors.Tensor, phiml.math._shape.Shape, float, list, tuple], times: int = 1)
-
Applies the spatial laplace operator to the given tensor with periodic boundary conditions.
Note: The results of
fourier_laplace()
andlaplace()
are close but not identical.This implementation computes the laplace operator in Fourier space. The result for periodic fields is exact, i.e. no numerical instabilities can occur, even for higher-order derivatives.
Args
grid
- tensor, assumed to have periodic boundary conditions
dx
- distance between grid points, tensor-like, scalar or vector
times
- number of times the laplace operator is applied. The computational cost is independent of this parameter.
grid
- Tensor:
dx
- Tensor or Shape or float or list or tuple:
times
- int: (Default value = 1)
Returns
tensor of same shape as
tensor()
def fourier_poisson(grid: phiml.math._tensors.Tensor, dx: Union[phiml.math._tensors.Tensor, phiml.math._shape.Shape, float, list, tuple], times: int = 1)
-
Inverse operation to
fourier_laplace()
.Args
grid
- Tensor:
dx
- Tensor or Shape or float or list or tuple:
times
- int: (Default value = 1)
Returns:
def frequency_loss(x, frequency_falloff: float = 100, threshold=1e-05, ignore_mean=False, n=2) ‑> phiml.math._tensors.Tensor
-
Penalizes the squared
values
in frequency (Fourier) space. Lower frequencies are weighted more strongly then higher frequencies, depending onfrequency_falloff
.Args
x
Tensor
orPhiTreeNode
Values to penalize, typicallyactual - target
.frequency_falloff
- Large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally. Note: The total loss is not normalized. Varying the value will result in losses of different magnitudes.
threshold
- Frequency amplitudes below this value are ignored. Setting this to zero may cause infinities or NaN values during backpropagation.
ignore_mean
- If
True
, does not penalize the mean value (frequency=0 component).
Returns
Scalar loss value
def from_dict(dict_: dict, convert=False)
def gather(values, indices: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = None, pref_index_dim='index')
-
Gathers the entries of
values
at positions described byindices
. All non-channel dimensions ofindices
that are part ofvalues
but not indexed are treated as batch dimensions.See Also:
scatter()
.Args
values
Tensor
orphiml.math.matic.PhiTreeNode
containing values to gather.indices
int
Tensor
. Multidimensional position references invalues
. Must contain a single channel dimension for the index vector matching the number of dimensions to index. This channel dimension should list the dimension names to index as item names unless explicitly specified asdims
.dims
- (Optional) Dimensions indexed by
indices
. Alternatively, the dimensions can be specified as the item names of the channel dimension ofindices
. IfNone
and no index item names are specified, will default to all spatial dimensions or all instance dimensions, depending on which ones are present (but not both). pref_index_dim
- In case
indices
has multiple channel dims, use this dim as the index, treating the others as batch. Has no effect ifindices
only has one channel dim.
Returns
Tensor
with combined batch dimensions, channel dimensions ofvalues
and spatial/instance dimensions ofindices
. def get_format(x: phiml.math._tensors.Tensor) ‑> str
-
Returns the sparse storage format of a tensor.
Args
x
Tensor
Returns
One of
'coo'
,'csr'
,'csc'
,'dense'
. def get_precision() ‑> int
-
Gets the current target floating point precision in bits. The precision can be set globally using
set_global_precision()
or locally usingwith precision(p):
.Any Backend method may convert floating point values to this precision, even if the input had a different precision.
Returns
16 for half, 32 for single, 64 for double
def get_sparsity(x: phiml.math._tensors.Tensor)
-
Fraction of values currently stored on disk for the given
Tensor
x
. For sparse tensors, this isnnz / shape
.This is a lower limit on the number of values that will need to be processed for operations involving
x
. The actual number is often higher since many operations require data be laid out in a certain format. In these cases, missing values, such as zeros, are filled in before the operation.The following operations may return tensors whose values are only partially stored:
expand()
phiml.math.pairwise_distance()
withmax_distance
set.- Tracers used in
jit_compile_linear()
- Stacking any of the above.
Args
x
Tensor
Returns
The number of values that are actually stored on disk. This does not include additional information, such as position information / indices. For sparse matrices, this is equal to the number of nonzero values.
def gradient(f: Callable, wrt: str = None, get_output=True) ‑> Callable
-
Creates a function which computes the gradient of
f
.Example:
def loss_function(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = gradient(loss_function, 'x', get_output=False)(x, y) (loss, prediction), (dx, dy) = gradient(loss_function, 'x,y', get_output=True)(x, y)
Functional gradients are implemented for the following backends:
- PyTorch:
torch.autograd.grad
/torch.autograd.backward
- TensorFlow:
tf.GradientTape
- Jax:
jax.grad
When the gradient function is invoked,
f
is called with tensors that track the gradient. For PyTorch,arg.requires_grad = True
for all positional arguments off
.Args
f
- Function to be differentiated.
f
must return a floating pointTensor
with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function ifreturn_values=True
. All arguments for which the gradient is computed must be of dtype float or complex. get_output
- Whether the gradient function should also return the return values of
f
. wrt
- Comma-separated parameter names of
f
with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).
Returns
Function with the same arguments as
f
that returns the value off
, auxiliary data and gradient off
ifget_output=True
, else just the gradient off
. - PyTorch:
def grid_sample(grid: phiml.math._tensors.Tensor, coordinates: phiml.math._tensors.Tensor, extrap: Union[ForwardRef('e_.Extrapolation'), float, str], **kwargs)
-
Samples values of
grid
at the locations referenced bycoordinates
. Values lying in between sample points are determined via linear interpolation.If
coordinates
has a channel dimension, its item names are used to determine the grid dimensions ofgrid
. Otherwise, the spatial dims ofgrid
will be used.For values outside the valid bounds of
grid
(coord < 0 or coord > grid.shape - 1
),extrap
is used to determine the neighboring grid values. If the extrapolation does not support resampling, the grid is padded by one cell layer before resampling. In that case, values lying further outside will not be sampled according to the extrapolation.Args
grid
- Grid with at least one spatial dimension and no instance dimensions.
coordinates
- Coordinates with a single channel dimension called
'vector'
. The size of thevector
dimension must match the number of spatial dimensions ofgrid
. extrap
- Extrapolation used to determine the values of
grid
outside its valid bounds. kwargs
- Additional information for the extrapolation.
Returns
Tensor
with channel dimensions ofgrid
, spatial and instance dimensions ofcoordinates
and combined batch dimensions. def histogram(values: phiml.math._tensors.Tensor, bins: phiml.math._shape.Shape = (binsˢ=30), weights=1, same_bins: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = None)
-
Compute a histogram of a distribution of values.
Important Note: In its current implementation, values outside the range of bins may or may not be added to the outermost bins.
Args
values
Tensor
listing the values to be binned along spatial or instance dimensions. `values´ may not contain channel or dual dimensions.bins
- Either
Shape
specifying the number of equally-spaced bins to use or bin edge positions asTensor
with a spatial or instance dimension. weights
Tensor
assigning a weight to every value invalues
that will be added to the bin, default 1.same_bins
- Only used if
bins
is given as aShape
. Use the same bin sizes and positions across these batch dimensions. By default, bins will be chosen independently for each example.
Returns
def i2b(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType
-
Change the type of all instance dimensions of
value
to batch dimensions. Seerename_dims()
. def identity(x)
-
Identity function for one argument. Vararg functions cannot be transformed as the argument names are unknown.
Args
x
- Positional argument.
Returns
x
def ifft(k: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>)
def imag(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Returns the imaginary part of
x
. Ifx
does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.See Also:
real()
,conjugate()
.Args
x
Tensor
orPhiTreeNode
or native tensor.
Returns
Imaginary component of
x
ifx
is complex, zeros otherwise. def incomplete_gamma(a: ~TensorOrTree, x: ~TensorOrTree, upper=False, regularized=True) ‑> ~TensorOrTree
def index_shift(x: phiml.math._tensors.Tensor, offsets: Sequence[Union[int, phiml.math._tensors.Tensor]], padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None) ‑> List[phiml.math._tensors.Tensor]
-
Returns shifted versions of
x
according tooffsets
where each offset is anint
vector indexing some dimensions ofx
.See Also:
shift()
,neighbor_reduce()
.Args
x
- Input grid-like
Tensor
. offsets
- Sequence of offset vectors. Each offset is an
int
vector indexing some dimensions ofx
. Offsets can have different subsets of the dimensions ofx
. Missing dimensions count as 0. The value0
can also be passed as a zero-shift. padding
- Padding to be performed at the boundary so that the shifted versions have the same size as
x
. Must be one of the following:Extrapolation
,Tensor
or number for constant extrapolation, name of extrapolation asstr
. Can be set toNone
to disable padding. Then the result tensors will be smaller thanx
.
Returns
list
of shifted tensors. The number of return tensors is equal to the number ofoffsets
. def instance(*args, **dims: Union[int, str, tuple, list, phiml.math._shape.Shape, ForwardRef('Tensor')])
-
Returns the instance dimensions of an existing
Shape
or creates a newShape
with only instance dimensions.Usage for filtering instance dimensions:
>>> instance_dims = instance(shape) >>> instance_dims = instance(tensor)
Usage for creating a
Shape
with only instance dimensions:>>> instance_shape = instance('undef', points=2) (points=2, undef=None)
Here, the dimension
undef
is created with an undefined size ofNone
. Undefined sizes are automatically filled in bytensor()
,wrap()
,stack()
andconcat()
.To create a shape with multiple types, use
merge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.See Also:
channel()
,batch()
,spatial()
Args
*args
-
Either
**dims
- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shape
containing only dimensions of type instance. def ipack(value, packed_dim: Union[str, phiml.math._shape.Shape], pos: Optional[int] = None, **kwargs)
-
Short for `pack_dims(…, dims=instance)
def is_finite(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Returns a
Tensor
orPhiTreeNode
matchingx
with valuesTrue
wherex
has a finite value andFalse
otherwise. def is_inf(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Returns a
Tensor
orPhiTreeNode
matchingx
with valuesTrue
wherex
is+inf
or-inf
andFalse
otherwise. def is_nan(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Returns a
Tensor
orPhiTreeNode
matchingx
with valuesTrue
wherex
isNaN
andFalse
otherwise. def is_scalar(value) ‑> bool
-
Checks whether
value
has no dimensions.Args
value
Tensor
or Python primitive or native tensor.
Returns
bool
def is_sparse(x: phiml.math._tensors.Tensor)
-
Checks whether a tensor is represented in COO, CSR or CSC format. If the tensor is neither sparse nor dense, this function raises an error.
Args
x
Tensor
to test.
Returns
True
ifx
is sparse,False
ifx
is dense.Raises
AssertionError
ifx
is neither sparse nor fully dense. def iterate(map_function: Callable, iterations: Union[int, phiml.math._shape.Shape], *x0, f_kwargs: dict = None, range: Callable = builtins.range, measure: Callable = None, substeps: int = 1, **f_kwargs_)
-
Repeatedly call
function
, passing the previous output as the next input.If the function outputs more values than the number of arguments in
x0
, only the firstlen(x0)
ones are passed tomap_function
. However, all outputs will be returned byiterate()
.Args
map_function
- Function to call. Must be callable as
f(x0, **f_kwargs)
andf(f(x0, **f_kwargs), **f_kwargs)
. iterations
- Number of iterations as
int
or single-dimensionShape
. Ifint
, returns the final output ofmap_function
. IfShape
, returns the trajectory (x0
and all outputs ofmap_function
), stacking the values along this dimension. x0
- Initial positional arguments for
map_function
. Values that are initiallyNone
are not stacked with the other values ifiterations
is aShape
. range
- Range function. Can be used to generate tqdm output by passing
trange
. measure
- Function without arguments to call at the start and end (and in between if
isinstance(iterations, Shape)
) calls tomap_function
. The measure of each call tomap_function
ismeasure()
after minusmeasure()
before the call. substeps
- If > 1, iterates the function multiple times for each recorded step.
The returned trajectories as well as measurements only record the large steps, not the sub-steps.
The
arange()
is also only used on large steps, not sub-steps. f_kwargs
- Additional keyword arguments to be passed to
map_function
. These arguments can be of any type. f_kwargs_
- More keyword arguments.
Returns
final_or_trajectory
- Stacked trajectory or final output of
map_function
, depending oniterations
. measured
- Only if
measure
was specified, returns the measured value or trajectory tensor.
def jacobian(f: Callable, wrt: str = None, get_output=True) ‑> Callable
-
Creates a function which computes the Jacobian matrix of
f
. For scalar functions, consider usinggradient()
instead.Example:
def f(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = jacobian(loss_function, wrt='x', get_output=False)(x, y) (loss, prediction), (dx, dy) = jacobian(loss_function, wrt='x,y', get_output=True)(x, y)
Functional gradients are implemented for the following backends:
- PyTorch:
torch.autograd.grad
/torch.autograd.backward
- TensorFlow:
tf.GradientTape
- Jax:
jax.grad
When the gradient function is invoked,
f
is called with tensors that track the gradient. For PyTorch,arg.requires_grad = True
for all positional arguments off
.Args
f
- Function to be differentiated.
f
must return a floating pointTensor
with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function ifreturn_values=True
. All arguments for which the gradient is computed must be of dtype float or complex. get_output
- Whether the gradient function should also return the return values of
f
. wrt
- Comma-separated parameter names of
f
with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).
Returns
Function with the same arguments as
f
that returns the value off
, auxiliary data and Jacobian off
ifget_output=True
, else just the Jacobian off
. - PyTorch:
def jit_compile(f: Callable = None, auxiliary_args: str = '', forget_traces: bool = None) ‑> Callable
-
Compiles a graph based on the function
f
. The graph compilation is performed just-in-time (jit), e.g. when the returned function is called for the first time.The traced function will compute the same result as
f
but may run much faster. Some checks may be disabled in the compiled function.Can be used as a decorator:
@math.jit_compile def my_function(x: math.Tensor) -> math.Tensor:
Invoking the returned function may invoke re-tracing / re-compiling
f
after the first call if either- it is called with a different number of arguments,
- the tensor arguments have different dimension names or types (the dimension order also counts),
- any
Tensor
arguments require a different backend than previous invocations, PhiTreeNode
positional arguments do not match in non-variable properties.
Compilation is implemented for the following backends:
- PyTorch:
torch.jit.trace
- TensorFlow:
tf.function
- Jax:
jax.jit
Jit-compilations cannot be nested, i.e. you cannot call
jit_compile()
while another function is being compiled. An exception to this isjit_compile_linear()
which can be called from within a jit-compiled function.See Also:
jit_compile_linear()
Args
f
- Function to be traced.
All positional arguments must be of type
Tensor
orPhiTreeNode
returning a singleTensor
orPhiTreeNode
. auxiliary_args
- Comma-separated parameter names of arguments that are not relevant to backpropagation.
forget_traces
- If
True
, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces.
Returns
Function with similar signature and return values as
f
. def jit_compile_linear(f: Callable[[~X], ~Y] = None, auxiliary_args: str = None, forget_traces: bool = None) ‑> phiml.math._functional.LinearFunction[~X, ~Y]
-
Compile an optimized representation of the linear function
f
. For backends that support sparse tensors, a sparse matrix will be constructed forf
.Can be used as a decorator:
@math.jit_compile_linear def my_linear_function(x: math.Tensor) -> math.Tensor:
Unlike
jit_compile()
,jit_compile_linear()
can be called during a regular jit compilation.See Also:
jit_compile()
Args
f
- Function that is linear in its positional arguments.
All positional arguments must be of type
Tensor
andf
must return aTensor
. auxiliary_args
- Which parameters
f
is not linear in. These arguments are treated as conditioning arguments and will cause re-tracing on change. forget_traces
- If
True
, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces.
Returns
LinearFunction
with similar signature and return values asf
. def l1_loss(x, reduce: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>) ‑> phiml.math._tensors.Tensor
-
Computes ∑i ||xi||1, summing over all non-batch dimensions.
Args
x
Tensor
orPhiTreeNode
or 0D or 1D native tensor. ForPhiTreeNode
objects, only value the sum over all value attributes is computed.reduce
- Dimensions to reduce as
DimFilter
.
Returns
loss
Tensor
def l2_loss(x, reduce: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>) ‑> phiml.math._tensors.Tensor
-
Computes ∑i ||xi||22 / 2, summing over all non-batch dimensions.
Args
x
Tensor
orPhiTreeNode
or 0D or 1D native tensor. ForPhiTreeNode
objects, only value the sum over all value attributes is computed.reduce
- Dimensions to reduce as
DimFilter
.
Returns
loss
Tensor
def laplace(x: phiml.math._tensors.Tensor, dx: Union[float, phiml.math._tensors.Tensor] = 1, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = zero-gradient, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, weights: phiml.math._tensors.Tensor = None, padding_kwargs: dict = None)
-
Spatial Laplace operator as defined for scalar fields. If a vector field is passed, the laplace is computed component-wise.
Args
x
- n-dimensional field of shape (batch, spacial dimensions…, components)
dx
- scalar or 1d tensor
padding
- Padding mode.
Must be one of the following:
Extrapolation
,Tensor
or number for constant extrapolation, name of extrapolation asstr
. dims
- The second derivative along these dimensions is summed over
weights
- (Optional) Multiply the axis terms by these factors before summation. Must be a Tensor with a single channel dimension that lists all laplace dims by name.
padding_kwargs
- Additional keyword arguments to be passed to
pad()
.
Returns
Tensor
of same shape asx
def layout(objects, *shape: Union[str, phiml.math._shape.Shape]) ‑> phiml.math._tensors.Tensor
-
Wraps a Python tree in a
Tensor
, allowing elements to be accessed via dimensions. A python tree is a structure of nestedtuple
,list
,dict
and leaf objects where leaves can be any Python object.All keys of
dict
containers must be of typestr
. The keys are automatically assigned as item names along that dimension unless conflicting with other elements.Strings may also be used as containers.
Example:
>>> t = layout({'a': 'text', 'b': [0, 1]}, channel('dict,inner')) >>> t.inner[1].dict['a'].native() 'e'
Args
objects
- PyTree of
list
ortuple
. *shape
- Tensor dimensions
Returns
Tensor
. CallingTensor.native()
on the returned tensor will returnobjects
. def length(*args, **kwargs)
-
Deprecated. Use
norm()
instead. def linspace(start: Union[float, phiml.math._tensors.Tensor, tuple, list], stop: Union[float, phiml.math._tensors.Tensor, tuple, list], dim: phiml.math._shape.Shape) ‑> phiml.math._tensors.Tensor
-
Returns
number
evenly spaced numbers betweenstart
andstop
alongdim
.If
dim
contains multiple dimensions, evenly spaces values along each dimension, then stacks the result along a new channel dimension calledvector
.See Also:
arange()
,meshgrid()
.Args
start
- First value,
int
orTensor
. stop
- Last value,
int
orTensor
. dim
- Linspace dimension of integer size.
The size determines how many values to linearly space between
start
andstop
. The values will be laid out alongdim
.
Returns
Examples
>>> math.linspace(0, 1, spatial(x=5)) (0.000, 0.250, 0.500, 0.750, 1.000) along xˢ
>>> math.linspace(0, (-1, 1), spatial(x=3)) (0.000, 0.000); (-0.500, 0.500); (-1.000, 1.000) (xˢ=3, vectorᶜ=2)
def load(file: str)
-
Loads a
Tensor
or tree from a file previously written usingsave()
.All tensors are restored as NumPy arrays, not the backend-specific tensors they may have been written as. Use
convert()
to convert all or some of the tensors to a different backend.Args
file
- File to read.
Returns
Same type as what was written.
def log(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes the natural logarithm of the
Tensor
orPhiTreeNode
x
. def log10(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes log(x) of the
Tensor
orPhiTreeNode
x
with base 10. def log2(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes log(x) of the
Tensor
orPhiTreeNode
x
with base 2. def log_gamma(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes log(gamma(x)) of the
Tensor
orPhiTreeNode
x
. def map(function: Callable[..., ~Y], *args, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function shape>, range=builtins.range, unwrap_scalars=True, expand_results=False, simplify=False, **kwargs) ‑> Union[None, phiml.math._tensors.Tensor, ~Y]
-
Calls
function
on slices of the arguments and returns the stacked result.Args
function
- Function to be called on slices of
args
andkwargs
. Must return one or multiple values that can be stacked.None
may be returned but if any return value isNone
, all calls tofunction
must returnNone
in that position. *args
- Positional arguments for
function
. Values that areSliceable
will be sliced alongdims
. **kwargs
- Keyword arguments for
function
. Values that areSliceable
will be sliced alongdims
. dims
- Dimensions which should be sliced.
function
is called once for each element indims
, i.e.dims.volume
times. Ifdims
is not specified, all dimensions from theSliceable
values inargs
andkwargs
will be mapped. Passobject
to map only objects, not tensors of primitives (dtype.kind == object
). This will select onlylayout()
-type dimensions. range
- Optional range function. Can be used to generate
tqdm
output by passingtrange
. unwrap_scalars
- If
True
, passes the contents of scalarTensor
s instead of the tensor objects. simplify
- If
True
, reduces constant dims of output tensors that don't vary across mapped slices.
Returns
Tensor
of same shape asvalue
. def map_pairs(map_function: Callable, values: phiml.math._tensors.Tensor, connections: phiml.math._tensors.Tensor)
-
Evaluates
map_function
on all pairs of elements present in the sparsity pattern ofconnections
.Args
map_function
- Function with signature
(Tensor, Tensor) -> Tensor
. values
- Values to evaluate
map_function
on. Needs to have a spatial or instance dimension but must not have a dual dimension. connections
- Sparse tensor.
Returns
Tensor
with the sparse dimensions ofconnections
and all non-instance dimensions returned bymap_function
. def map_types(f: Callable, dims: Union[phiml.math._shape.Shape, tuple, list, str, Callable], dim_type: Union[str, Callable]) ‑> Callable
-
Wraps a function to change the dimension types of its
Tensor
andPhiTreeNode
arguments.Args
f
- Function to wrap.
dims
- Concrete dimensions or dimension type, such as
spatial()
orbatch()
. These dimensions will be mapped todim_type
for all positional function arguments. dim_type
- Dimension type, such as
spatial()
orbatch()
.f
will be called with dimensions remapped to this type.
Returns
Function with signature matching
f
. def masked_fill(values: phiml.math._tensors.Tensor, valid: phiml.math._tensors.Tensor, distance: int = 1) ‑> Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor]
-
Extrapolates the values of
values
which are marked by the nonzero values ofvalid
fordistance
steps in all spatial directions. Overlapping extrapolated values get averaged. Extrapolation also includes diagonals.Args
values
- Tensor which holds the values for extrapolation
valid
- Tensor with same size as
x
marking the values for extrapolation with nonzero values distance
- Number of extrapolation steps
Returns
values
- Extrapolation result
valid
- mask marking all valid values after extrapolation
def matrix_from_function(f: Callable, *args, auxiliary_args=None, auto_compress=False, sparsify_batch=None, separate_independent=False, **kwargs) ‑> Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor]
-
Trace a linear function and construct a matrix. Depending on the functional form of
f
, the returned matrix may be dense or sparse.Args
f
- Function to trace.
*args
- Arguments for
f
. auxiliary_args
- Arguments in which the function is not linear.
These parameters are not traced but passed on as given in
args
andkwargs
. auto_compress
- If
True
, returns a compressed matrix if supported by the backend. sparsify_batch
- If
False
, the matrix will be batched. IfTrue
, will create dual dimensions for the involved batch dimensions. This will result in one large matrix instead of a batch of matrices. **kwargs
- Keyword arguments for
f
.
Returns
def matrix_rank(matrix: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor
-
Approximates the rank of a matrix. The tolerances used depend on the current precision.
Args
matrix
- Sparse or dense matrix, i.e.
Tensor
with primal and dual dims.
Returns
Matrix rank.
def max(value: ~TensorOrTree, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>, key: phiml.math._tensors.Tensor = None) ‑> ~TensorOrTree
-
Determines the maximum value of
values
along the specified dimensions.Args
value
- (Sparse)
Tensor
orlist
/tuple
of Tensors. dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
key
- Optional comparison values. If specified, returns the value where
key
is maximal, seeat_max()
.
Returns
Tensor
without the reduced dimensions. def maximum(x: Union[float, phiml.math._tensors.Tensor], y: Union[float, phiml.math._tensors.Tensor], allow_none=False)
-
Computes the element-wise maximum of
x
andy
. def mean(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>, weight: Union[phiml.math._tensors.Tensor, list, tuple] = None, where_no_weight=nan, epsilon=1e-10) ‑> phiml.math._tensors.Tensor
-
Computes the mean over
values
along the specified dimensions.Args
value
- (Sparse)
Tensor
orlist
/tuple
of Tensors. dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
weight
- Optionally perform a weighted mean operation. Must broadcast to
value
. where_no_weight
- Value to use when the sum of all weights are smaller than
epsilon
. epsilon
- Only if
where_no_weight
. Threshold for usingwhere_no_weight
.
Returns
Tensor
without the reduced dimensions. def median(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>)
-
Reduces
dim
ofvalue
by picking the median value. For odd dimension sizes (ambigous choice), the linear average of the two median values is computed.Currently implemented via
quantile()
.Args
value
Tensor
dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
def merge_shapes(*objs: Union[phiml.math._shape.Shape, Any], order=(<function batch>, <function dual>, <function instance>, <function spatial>, <function channel>), allow_varying_sizes=False)
-
Combines
shapes
into a singleShape
, grouping dimensions by type. If dimensions with equal names are present in multiple shapes, their types and sizes must match.The shorthand
shape1 & shape2
merges shapes withcheck_exact=[spatial]
.See Also:
concat_shapes()
.Args
*objs
Shape
orShaped
objects to combine.order
- Dimension type order as
tuple
of type filters (channel()
,batch()
,spatial()
orinstance()
). Dimensions are grouped by type while merging.
Returns
Merged
Shape
Raises
IncompatibleShapes if the shapes are not compatible
def meshgrid(dims: Union[Callable, phiml.math._shape.Shape] = <function spatial>, stack_dim=(vectorᶜ=None), **dimensions: Union[int, phiml.math._tensors.Tensor, tuple, list, Any]) ‑> phiml.math._tensors.Tensor
-
Generate a mesh-grid
Tensor
from keyword dimensions.Args
**dimensions
- Mesh-grid dimensions, mapping names to values.
Values may be
int
, 1DTensor
or 1D native tensor. dims
- Dimension type of mesh-grid dimensions, one of
spatial()
,channel()
,batch()
,instance()
. stack_dim
- Channel dim along which grids are stacked.
This is optional for 1D mesh-grids. In that case returns a
Tensor
without a stack dim ifNone
or an emptyShape
is passed.
Returns
Mesh-grid
Tensor
with the dimensions ofdims
/dimensions
andstack_dim
.Examples
>>> math.meshgrid(x=2, y=2) (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.500 ± 0.500 (0e+00...1e+00)
>>> math.meshgrid(x=2, y=(-1, 1)) (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.250 ± 0.829 (-1e+00...1e+00)
>>> math.meshgrid(x=2, stack_dim=None) (0, 1) along xˢ
def min(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>, key: phiml.math._tensors.Tensor = None) ‑> phiml.math._tensors.Tensor
-
Determines the minimum value of
values
along the specified dimensions.Args
value
- (Sparse)
Tensor
orlist
/tuple
of Tensors. dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
key
- Optional comparison values. If specified, returns the value where
key
is minimal, seeat_min()
.
Returns
Tensor
without the reduced dimensions. def minimize(f: Callable[[~X], ~Y], solve: phiml.math._optimize.Solve[~X, ~Y]) ‑> ~X
-
Finds a minimum of the scalar function f(x). The
method
argument ofsolve
determines which optimizer is used. All optimizers supported byscipy.optimize.minimize
are supported, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html . Additionally a gradient descent solver with adaptive step size can be used withmethod='GD'
.math.minimize()
is limited to backends that supportjacobian()
, i.e. PyTorch, TensorFlow and Jax.To obtain additional information about the performed solve, use a
SolveTape
.See Also:
solve_nonlinear()
.Args
f
- Function whose output is subject to minimization.
All positional arguments of
f
are optimized and must beTensor
orPhiTreeNode
. Ifsolve.x0
is atuple
orlist
, it will be passed to f as varargs,f(*x0)
. To minimize a subset of the positional arguments, define a new (lambda) function depending only on those. The first return value off
must be a scalar floatTensor
orPhiTreeNode
. solve
Solve
object to specify method type, parameters and initial guess forx
.
Returns
x
- solution, the minimum point
x
.
Raises
NotConverged
- If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
- If the optimization failed prematurely.
def minimum(x: Union[float, phiml.math._tensors.Tensor], y: Union[float, phiml.math._tensors.Tensor], allow_none=False)
-
Computes the element-wise minimum of
x
andy
. def nan_to_0(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Replaces all NaN values in
x
with0
. def native(value: Union[phiml.math._tensors.Tensor, numbers.Number, tuple, list, Any])
-
Returns the native tensor representation of
value
. Ifvalue
is aTensor
, this is equal to callingTensor.native()
. Otherwise, checks thatvalue
is a valid tensor object and returns it.Args
value
Tensor
or native tensor or tensor-like.
Returns
Native tensor representation
Raises
ValueError if the tensor cannot be transposed to match target_shape
def native_call(f: Callable, *inputs: phiml.math._tensors.Tensor, channels_last=None, channel_dim='vector', spatial_dim=None)
-
Calls
f
with the native representations of theinputs
tensors in standard layout and returns the result as aTensor
.All inputs are converted to native tensors (including precision cast) depending on
channels_last
:channels_last=True
: Dimension layout(total_batch_size, spatial_dims…, total_channel_size)
channels_last=False
: Dimension layout(total_batch_size, total_channel_size, spatial_dims…)
All batch dimensions are compressed into a single dimension with
total_batch_size = input.shape.batch.volume
. The same is done for all channel dimensions.Additionally, missing batch and spatial dimensions are added so that all
inputs
have the same batch and spatial shape.Args
f
- Function to be called on native tensors of
inputs
. The function output must have the same dimension layout as the inputs, unless overridden byspatial_dim
, and the batch size must be identical. *inputs
- Uniform
Tensor
arguments channels_last
- (Optional) Whether to put channels as the last dimension of the native representation.
If
None
, the channels are put in the default position associated with the current backend, seephiml.math.backend.Backend.prefers_channels_last()
. channel_dim
- Name of the channel dimension of the result.
spatial_dim
- Name of the spatial dimension of the result.
Returns
Tensor
with batch and spatial dimensions ofinputs
, unless overridden byspatial_dim
, and single channel dimensionchannel_dim
. def ncat(values: Sequence[~PhiTreeNodeType], dim: phiml.math._shape.Shape, expand_values=False) ‑> ~PhiTreeNodeType
-
Concatenate named components along
dim
.Args
values
- Each value can contain multiple components of
dim
ifdim
is present in its shape. Else, it is interpreted as a single component whose name will be determined from the leftover item names ofdim
. dim
- Single dimension that has item names matching components of
values
. expand_values
- If
True
, will add all missing dimensions to values, not just batch dimensions. This allows tensors with different dimensions to be concatenated. The resulting tensor will have all dimensions that are present invalues
. IfFalse
, this may return a non-numeric object instead.
Returns
Same type as any value from
values
. def neighbor_max(grid: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None, extend_bounds=0) ‑> phiml.math._tensors.Tensor
-
neighbor_reduce()
withreduce_fun
set tomax_()
. def neighbor_mean(grid: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None, extend_bounds=0) ‑> phiml.math._tensors.Tensor
-
neighbor_reduce()
withreduce_fun
set tomean()
. def neighbor_min(grid: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None, extend_bounds=0) ‑> phiml.math._tensors.Tensor
-
neighbor_reduce()
withreduce_fun
set tomin_()
. def neighbor_reduce(reduce_fun: Callable, grid: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None, padding_kwargs: dict = None, extend_bounds=0) ‑> phiml.math._tensors.Tensor
-
Computes the sum/mean/min/max/prod/etc. of two neighboring values along each dimension in
dim
. The result tensor has one entry less thangrid
in each averaged dimension unlesspadding
is specified.With two
dims
, computes the mean of 4 values, in 3D, the mean of 8 values.Args
reduce_fun
- Reduction function, such as
sum_()
,mean()
,max_()
,min_()
,prod()
. grid
- Values to reduce.
dims
- Dimensions along which neighbors should be reduced.
padding
- Padding at the upper edges of
grid
alongdims'. If not
None, the result tensor() will have the same shape() as
grid`. padding_kwargs
- Additional keyword arguments to be passed to
pad()
.
Returns
def neighbor_sum(grid: phiml.math._tensors.Tensor, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None, extend_bounds=0) ‑> phiml.math._tensors.Tensor
-
neighbor_reduce()
withreduce_fun
set tosum_()
. def non_batch(obj) ‑> phiml.math._shape.Shape
def non_channel(obj) ‑> phiml.math._shape.Shape
def non_dual(obj) ‑> phiml.math._shape.Shape
def non_instance(obj) ‑> phiml.math._shape.Shape
def non_primal(obj) ‑> phiml.math._shape.Shape
def non_spatial(obj) ‑> phiml.math._shape.Shape
def nonzero(value: phiml.math._tensors.Tensor, list_dim: Union[phiml.math._shape.Shape, str, int] = (nonzeroⁱ=None), index_dim: phiml.math._shape.Shape = (vectorᶜ=None), element_dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function channel>, list_dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>, preserve_names=False)
-
Get spatial indices of non-zero / True values.
Batch dimensions are preserved by this operation. If channel dimensions are present, this method returns the indices where any component is nonzero.
Implementations:
- NumPy:
numpy.argwhere
- PyTorch:
torch.nonzero
- TensorFlow:
tf.where(tf.not_equal(values, 0))
- Jax:
jax.numpy.nonzero
Args
value
- spatial tensor to find non-zero / True values in.
list_dim
- Dimension listing non-zero values. If size specified, lists only the first
size
non-zero values. Special case: For retrieving only the first non-zero value, you may pass1
instead of aShape
of size 1. index_dim
- Index dimension.
element_dims
- Dims listing components of one value. A value is only considered
zero
if all components are 0. list_dims
- Dims in which non-zero elements are searched. These will be stored in the item names of
index_dim
.
Returns
Tensor
of shape (batch dims…,list_dim
=#non-zero,index_dim
=value.shape.spatial_rank) - NumPy:
def norm(vec: phiml.math._tensors.Tensor, vec_dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function channel>, eps: Union[float, phiml.math._tensors.Tensor] = None)
-
Computes the vector norm (L2 norm) of
vec()
defined as √∑v².Args
eps
- Minimum valid vector length. Use to avoid
inf
gradients for zero-norm vectors. Lengths shorter thaneps
are set to 0.
def normalize(vec: phiml.math._tensors.Tensor, vec_dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function channel>, epsilon=None, allow_infinite=False, allow_zero=False)
-
Normalizes the vectors in
vec()
. Ifvec_dim
is None, the combined channel dimensions ofvec()
are interpreted as a vector.Args
vec
Tensor
to normalize.vec_dim
- Dimensions to normalize over. By default, all channel dimensions are used to compute the vector length.
epsilon
- (Optional) Zero-length threshold. Vectors shorter than this length yield the unit vector (1, 0, 0, …).
If not specified, the zero-vector yields
NaN
as it cannot be normalized. allow_infinite
- Allow infinite components in vectors. These vectors will then only points towards the infinite components.
allow_zero
- Whether to return zero vectors for inputs smaller
epsilon
instead of a unit vector.
def normalize_to(target: phiml.math._tensors.Tensor, source: Union[float, phiml.math._tensors.Tensor], epsilon=1e-05)
def numpy(value: Union[phiml.math._tensors.Tensor, numbers.Number, tuple, list, Any])
-
Converts
value
to anumpy.ndarray
where value must be aTensor
, backend tensor or tensor-like. Ifvalue
is aTensor
, this is equal to callingTensor.numpy()
.Note: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use
Tensor.native()
instead.Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in
order
, aValueError
is raised.If
value
is a NumPy array, it may be returned directly.Returns
NumPy representation of
value
Raises
ValueError if the tensor cannot be transposed to match target_shape
def ones(*shape: phiml.math._shape.Shape, dtype: Union[phiml.backend._dtype.DType, tuple, type] = None) ‑> phiml.math._tensors.Tensor
-
Define a tensor with specified shape with value
1.0
/1
/True
everywhere.This method may not immediately allocate the memory to store the values.
See Also:
ones_like()
,zeros()
.Args
*shape
- This (possibly empty) sequence of
Shape
s is concatenated, preserving the order. dtype
- Data type as
DType
object. Defaults tofloat
matching the current precision setting.
Returns
def ones_like(value: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor
-
Create a
Tensor
containing only1.0
/1
/True
with the same shape and dtype asobj
. def pack_dims(value, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], packed_dim: Union[str, phiml.math._shape.Shape], pos: Optional[int] = None, **kwargs)
-
Compresses multiple dimensions into a single dimension by concatenating the elements. Elements along the new dimensions are laid out according to the order of
dims
. If the order ofdims
differs from the current dimension order, the tensor is transposed accordingly. This function replaces the traditionalreshape
for these cases.The type of the new dimension will be equal to the types of
dims
. Ifdims
have varying types, the new dimension will be a batch dimension.If none of
dims
exist onvalue
,packed_dim
will be added only if it is given with a definite size andvalue
is not a primitive type.See Also:
unpack_dim()
Args
value
Shapable
, such asTensor
.dims
- Dimensions to be compressed in the specified order.
packed_dim
- Single-dimension
Shape
. pos
- Index of new dimension.
None
for automatic,-1
for last,0
for first. **kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
.Examples
>>> pack_dims(math.zeros(spatial(x=4, y=3)), spatial, instance('points')) (pointsⁱ=12) const 0.0
def pad(value: phiml.math._tensors.Tensor, widths: Union[dict, tuple, list], mode: Union[ForwardRef('e_.Extrapolation'), phiml.math._tensors.Tensor, numbers.Number, str, dict] = 0, **kwargs) ‑> phiml.math._tensors.Tensor
-
Pads a tensor along the specified dimensions, determining the added values using the given extrapolation. Unlike
Extrapolation.pad()
, this function can handle negative widths which slice off outer values.Args
value
Tensor
to be paddedwidths
-
Number of values to add at the edge of
value
. Negative values can be used to slice off edge values. Must be one of the following:tuple
containing(lower: int, upper: int)
. This will pad all non-batch dimensions bylower
andupper
at the lower and upper edge, respectively.dict
mappingdim: str -> (lower: int, upper: int)
- Sequence of slicing
dict
s. This will add all values specified by the slicing dicts and is the inverse operation toslice_off()
. Exactly one value in each slicing dict must be aslice_()
object.
mode
- Padding mode used to determine values added from positive
widths
. Must be one of the following:Extrapolation
,Tensor
or number for constant extrapolation, name of extrapolation asstr
. kwargs
- Additional padding arguments.
These are ignored by the standard extrapolations defined in
phiml.math.extrapolation
but can be used to pass additional contextual information to custom extrapolations.
Returns
Padded
Tensor
Examples
>>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, 1), 'y': (2, 1)}, 0) (xˢ=12, yˢ=13) 0.641 ± 0.480 (0e+00...1e+00)
>>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, -1)}, 0) (xˢ=10, yˢ=10) 0.900 ± 0.300 (0e+00...1e+00)
def pairwise_differences(positions: phiml.math._tensors.Tensor, max_distance: Union[float, phiml.math._tensors.Tensor] = None, format: Union[str, phiml.math._tensors.Tensor] = 'dense', domain: Optional[Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor]] = None, periodic: Union[bool, phiml.math._tensors.Tensor] = False, method: str = 'auto', default: float = nan, avg_neighbors=8.0) ‑> phiml.math._tensors.Tensor
-
Computes the distance matrix containing the pairwise position differences between each pair of points. The matrix will consist of the channel and batch dimension of
positions
and the primal dimensions plus their dual counterparts, spanning the matrix. Points that are further apart thanmax_distance
(if specified) are assigned an invalid value given bydefault
. The diagonal of the matrix (self-distance) consists purely of zero-vectors and is always stored explicitly. The neighbors of the positions are listed along the dual dimension(s) of the matrix, and vectors point towards the neighbors.This function can operate in dense mode or sparse mode, depending on
format
. Ifformat=='dense'
or a denseTensor
, all possible pair-wise distances are considered and a full-rank tensor is returned. The value ofmethod
is ignored in that case.Otherwise, if
format
is a sparse format identifier or sparseTensor
, only a subset of distances is considered, depending onmethod
. In this case, the result is a sparse matrix with the same dimensionos as the dense tensor would have had.JIT behavior: This function can be JIT compiled with all backends. However, as the exact number of neighbors is unknown beforehand, all sparse methods rely on a variable-size buffer. PyTorch and TensorFlow allow variable shapes and behave the same way with JIT compilation as without. JAX, however, requires all tensor shapes to be known beforehand. This function will guess the required buffer size based on
avg_neighbors
and track the actually required sizes. When usingjit_compile()
, this will automatically trigger a re-tracing when a buffer overflow is detected. User callingjax.jit
manually must retrieve these sizes from the buffer API and implement buffer overflow handling.Args
positions
Tensor
. Channel dimensions are interpreted as position components. Instance and spatial dimensions list nodes.max_distance
- Scalar or
Tensor
specifying a max_radius for each point separately. Can contain additional batch dimensions but spatial/instance dimensions must matchpositions
if present. If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors. format
- Matrix format as
str
or concrete sparsity pattern asTensor
. Allowed strings are'dense'',
'sparse',
'csr',
'coo',
'csc'`. When aTensor
is passed, it needs to have all instance and spatial dims aspositions
as well as corresponding dual dimensions. The distances will be evaluated at all stored entries of theformat
tensor. domain
- Lower and upper corner of the bounding box. All positions must lie within this box. This must be specified to use with periodic boundaries.
periodic
- Which domain boundaries should be treated as periodic, i.e. particles on opposite sides are neighbors.
Can be specified as a
bool
for all sides or as a vector-valued booleanTensor
to specify periodicity by direction. default
- Value for distances greater than
max_distance
. Only for dense distance matrices. method
-
Neighbor search algorithm; only used if
format
is a sparse format orTensor
. The default,'auto'
lets the runtime decide on the best method. Supported methods:'sparse'
: GPU-supported hash grid implementation with fully sparse connectivity.'scipy-kd'
: SciPy's kd-tree implementation.
avg_neighbors
- Expected average number of neighbors. This is only relevant for hash grid searches, where it influences the default buffer sizes.
Returns
Distance matrix as sparse or dense
Tensor
, depending onformat
. For each spatial/instance dimension inpositions
, the matrix also contains a dual dimension of the same name and size. The matrix also contains all batch dimensions ofpositions
and the channel dimension ofpositions
.Examples
>>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles'))) >>> dx = pairwise_differences(pos, format='dense', max_distance=2) >>> dx.particles[0] (x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y)
def pairwise_distances(positions: phiml.math._tensors.Tensor, max_distance: Union[float, phiml.math._tensors.Tensor] = None, format: Union[str, phiml.math._tensors.Tensor] = 'dense', domain: Optional[Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor]] = None, periodic: Union[bool, phiml.math._tensors.Tensor] = False, method: str = 'auto', default: float = nan, avg_neighbors=8.0) ‑> phiml.math._tensors.Tensor
-
Computes the distance matrix containing the pairwise position differences between each pair of points. The matrix will consist of the channel and batch dimension of
positions
and the primal dimensions plus their dual counterparts, spanning the matrix. Points that are further apart thanmax_distance
(if specified) are assigned an invalid value given bydefault
. The diagonal of the matrix (self-distance) consists purely of zero-vectors and is always stored explicitly. The neighbors of the positions are listed along the dual dimension(s) of the matrix, and vectors point towards the neighbors.This function can operate in dense mode or sparse mode, depending on
format
. Ifformat=='dense'
or a denseTensor
, all possible pair-wise distances are considered and a full-rank tensor is returned. The value ofmethod
is ignored in that case.Otherwise, if
format
is a sparse format identifier or sparseTensor
, only a subset of distances is considered, depending onmethod
. In this case, the result is a sparse matrix with the same dimensionos as the dense tensor would have had.JIT behavior: This function can be JIT compiled with all backends. However, as the exact number of neighbors is unknown beforehand, all sparse methods rely on a variable-size buffer. PyTorch and TensorFlow allow variable shapes and behave the same way with JIT compilation as without. JAX, however, requires all tensor shapes to be known beforehand. This function will guess the required buffer size based on
avg_neighbors
and track the actually required sizes. When usingjit_compile()
, this will automatically trigger a re-tracing when a buffer overflow is detected. User callingjax.jit
manually must retrieve these sizes from the buffer API and implement buffer overflow handling.Args
positions
Tensor
. Channel dimensions are interpreted as position components. Instance and spatial dimensions list nodes.max_distance
- Scalar or
Tensor
specifying a max_radius for each point separately. Can contain additional batch dimensions but spatial/instance dimensions must matchpositions
if present. If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors. format
- Matrix format as
str
or concrete sparsity pattern asTensor
. Allowed strings are'dense'',
'sparse',
'csr',
'coo',
'csc'`. When aTensor
is passed, it needs to have all instance and spatial dims aspositions
as well as corresponding dual dimensions. The distances will be evaluated at all stored entries of theformat
tensor. domain
- Lower and upper corner of the bounding box. All positions must lie within this box. This must be specified to use with periodic boundaries.
periodic
- Which domain boundaries should be treated as periodic, i.e. particles on opposite sides are neighbors.
Can be specified as a
bool
for all sides or as a vector-valued booleanTensor
to specify periodicity by direction. default
- Value for distances greater than
max_distance
. Only for dense distance matrices. method
-
Neighbor search algorithm; only used if
format
is a sparse format orTensor
. The default,'auto'
lets the runtime decide on the best method. Supported methods:'sparse'
: GPU-supported hash grid implementation with fully sparse connectivity.'scipy-kd'
: SciPy's kd-tree implementation.
avg_neighbors
- Expected average number of neighbors. This is only relevant for hash grid searches, where it influences the default buffer sizes.
Returns
Distance matrix as sparse or dense
Tensor
, depending onformat
. For each spatial/instance dimension inpositions
, the matrix also contains a dual dimension of the same name and size. The matrix also contains all batch dimensions ofpositions
and the channel dimension ofpositions
.Examples
>>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles'))) >>> dx = pairwise_differences(pos, format='dense', max_distance=2) >>> dx.particles[0] (x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y)
def perf_counter(wait_for_tensor, *wait_for_tensors: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor
-
Get the time (
time.perf_counter()
) at which allwait_for_tensors
are computed. If all tensors are already available, returns the currenttime.perf_counter()
.Args
wait_for_tensor
Tensor
that need to be computed before the time is measured.*wait_for_tensors
- Additional tensors that need to be computed before the time is measured.
Returns
Time at which all
wait_for_tensors
are ready as a scalarTensor
. def pick_random(value: ~TensorOrTree, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], count: Union[int, phiml.math._shape.Shape, None] = 1, weight: Optional[phiml.math._tensors.Tensor] = None) ‑> ~TensorOrTree
-
Pick one or multiple random entries from
value
.Args
value
- Tensor or tree. When containing multiple tensors, the corresponding entries are picked on all tensors that have
dim
. You can passarange()
(the type) to retrieve the picked indices. dim
- Dimension along which to pick random entries.
Shape
with one dim. count
- Number of entries to pick. When specified as a
Shape
, lists picked values alongcount
instead ofdim
. weight
- Probability weight of each item along
dim
. Will be normalized to sum to 1.
Returns
Tensor
or tree equal tovalue
. def precision(floating_point_bits: int)
-
Sets the floating point precision for the local context.
Usage:
with precision(p):
This overrides the global setting, see
set_global_precision()
.Args
floating_point_bits
- 16 for half, 32 for single, 64 for double
def primal(obj) ‑> phiml.math._shape.Shape
def print(obj: Union[phiml.math._tensors.Tensor, PhiTreeNode, numbers.Number, tuple, list, None] = None, name: str = '')
-
Print a tensor with no more than two spatial dimensions, slicing it along all batch and channel dimensions.
Unlike NumPy's array printing, the dimensions are sorted. Elements along the alphabetically first dimension is printed to the right, the second dimension upward. Typically, this means x right, y up.
Args
obj
- tensor-like
name
- name of the tensor
Returns:
def print_gradient(value: phiml.math._tensors.Tensor, name='', detailed=False) ‑> phiml.math._tensors.Tensor
-
Prints the gradient vector of
value
when computed. The gradient atvalue
is the vector-Jacobian product of all operations between the output of this function and the loss value.The gradient is not printed in jit mode, see
jit_compile()
.Example
def f(x): x = math.print_gradient(x, 'dx') return math.l1_loss(x) math.jacobian(f)(math.ones(x=6))
Args
value
Tensor
for which the gradient may be computed later.name
- (Optional) Name to print along with the gradient values
detailed
- If
False
, prints a short summary of the gradient tensor.
Returns
identity()(value)
which when differentiated, prints the gradient vector. def prod(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>) ‑> phiml.math._tensors.Tensor
-
Multiplies
values
along the specified dimensions.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions. def quantile(value: phiml.math._tensors.Tensor, quantiles: Union[float, phiml.math._tensors.Tensor, tuple, list], dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>)
-
Compute the q-th quantile of
value
alongdim
for each q inquantiles
.Implementations:
- NumPy:
quantile
- PyTorch:
quantile
- TensorFlow:
tfp.stats.percentile
- Jax:
quantile
Args
value
Tensor
quantiles
- Single quantile or tensor of quantiles to compute.
Must be of type
float
,tuple
,list
orTensor
. dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to reduce the sequence of Tensors
Returns
Tensor
with dimensions ofquantiles
and non-reduced dimensions ofvalue
. - NumPy:
def radians_to_degrees(rad: ~TensorOrTree) ‑> ~TensorOrTree
-
Convert degrees to radians.
def rand(*shape: phiml.math._shape.Shape, low: Union[float, phiml.math._tensors.Tensor] = 0, high: Union[float, phiml.math._tensors.Tensor] = 1, dtype: Union[phiml.backend._dtype.DType, tuple, type] = None) ‑> phiml.math._tensors.Tensor
-
Creates a
Tensor
with the specified shape, filled with random values sampled from a uniform distribution.Args
*shape
- This (possibly empty) sequence of
Shape
s is concatenated, preserving the order. dtype
- (optional)
DType
or(kind, bits)
. The dtype kind must be one offloat
,int
,complex
. If not specified, afloat
tensor with the current default precision is created, seeget_precision()
. low
- Minimum value, included.
high
- Maximum value, excluded.
Returns
def randn(*shape: phiml.math._shape.Shape, dtype: Union[phiml.backend._dtype.DType, tuple, type] = None) ‑> phiml.math._tensors.Tensor
-
Creates a
Tensor
with the specified shape, filled with random values sampled from a normal / Gaussian distribution.Implementations:
- NumPy:
numpy.random.standard_normal
- PyTorch:
torch.randn
- TensorFlow:
tf.random.normal
- Jax:
jax.random.normal
Args
*shape
- This (possibly empty) sequence of
Shape
s is concatenated, preserving the order. dtype
- (optional) floating point
DType
. IfNone
, a float tensor with the current default precision is created, seeget_precision()
.
Returns
- NumPy:
def random_normal(*shape: phiml.math._shape.Shape, dtype: Union[phiml.backend._dtype.DType, tuple, type] = None) ‑> phiml.math._tensors.Tensor
-
Creates a
Tensor
with the specified shape, filled with random values sampled from a normal / Gaussian distribution.Implementations:
- NumPy:
numpy.random.standard_normal
- PyTorch:
torch.randn
- TensorFlow:
tf.random.normal
- Jax:
jax.random.normal
Args
*shape
- This (possibly empty) sequence of
Shape
s is concatenated, preserving the order. dtype
- (optional) floating point
DType
. IfNone
, a float tensor with the current default precision is created, seeget_precision()
.
Returns
- NumPy:
def random_permutation(*shape: Union[phiml.math._shape.Shape, Any], dims=<function non_batch>, index_dim=(indexᶜ=None)) ‑> phiml.math._tensors.Tensor
-
Generate random permutations of the integers between 0 and the size of
shape()
.When multiple dims are given, the permutation is randomized across all of them and tensor of multi-indices is returned.
Batch dims result in batches of permutations.
Args
*shape
Shape
of the result tensor, includingdims
and batches.*dims
- Sequence dims for an individual permutation. The total
Shape.volume
defines the maximum integer. All other dims fromshape()
are treated as batch.
Returns
def random_uniform(*shape: phiml.math._shape.Shape, low: Union[float, phiml.math._tensors.Tensor] = 0, high: Union[float, phiml.math._tensors.Tensor] = 1, dtype: Union[phiml.backend._dtype.DType, tuple, type] = None) ‑> phiml.math._tensors.Tensor
-
Creates a
Tensor
with the specified shape, filled with random values sampled from a uniform distribution.Args
*shape
- This (possibly empty) sequence of
Shape
s is concatenated, preserving the order. dtype
- (optional)
DType
or(kind, bits)
. The dtype kind must be one offloat
,int
,complex
. If not specified, afloat
tensor with the current default precision is created, seeget_precision()
. low
- Minimum value, included.
high
- Maximum value, excluded.
Returns
def range(dim: phiml.math._shape.Shape, start_or_stop: Optional[int] = None, stop: Optional[int] = None, step=1, backend=None)
-
Returns evenly spaced values between
start
andstop
. If only one limit is given,0
is used for the start.See Also:
range_tensor()
,linspace()
,meshgrid()
.Args
dim
- Dimension name and type as
Shape
object. Thesize
ofdim
is interpreted asstop
unlessstart_or_stop
is specified. start_or_stop
- (Optional)
int
. Interpreted asstart
ifstop
is specified as well. Otherwise this isstop
. stop
- (Optional)
int
.stop
value. step
- Distance between values.
backend
- Backend to use for creating the tensor. If unspecified, uses the current default.
Returns
def range_tensor(*shape: phiml.math._shape.Shape)
def ravel_index(index: phiml.math._tensors.Tensor, resolution: phiml.math._shape.Shape, dim=<function channel>, mode='undefined') ‑> phiml.math._tensors.Tensor
def real(x: ~TensorOrTree) ‑> ~TensorOrTree
-
See Also:
imag()
,conjugate()
.Args
x
Tensor
orPhiTreeNode
or native tensor.
Returns
Real component of
x
. def rename_dims(value: ~PhiTreeNodeType, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], names: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], **kwargs) ‑> ~PhiTreeNodeType
-
Change the name and optionally the type of some dimensions of
value
.Dimensions that are not present on value will be ignored. The corresponding new dimensions given by
names
will not be added.Args
value
Shape
orTensor
orShapable
.dims
- Existing dimensions of
value
as comma-separatedstr
,tuple
,list
,Shape
or filter function. names
-
Either
- Sequence of names matching
dims
astuple
,list
orstr
. This replaces only the dimension names but leaves the types untouched. Shape
matchingdims
to replace names and types.- Dimension type function to replace only types.
- Sequence of names matching
**kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
. def replace(obj: ~PhiTreeNodeType, **updates) ‑> ~PhiTreeNodeType
-
Creates a copy of the given
PhiTreeNode
with updated values as specified inupdates
.If
obj
overrides__with_attrs__
, the copy will be created via that specific implementation. Otherwise, thecopy()
module andsetattr
will be used.Args
obj
PhiTreeNode
**updates
- Values to be replaced.
Returns
Copy of
obj
with updated values. def replace_dims(value: ~PhiTreeNodeType, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], names: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], **kwargs) ‑> ~PhiTreeNodeType
-
Change the name and optionally the type of some dimensions of
value
.Dimensions that are not present on value will be ignored. The corresponding new dimensions given by
names
will not be added.Args
value
Shape
orTensor
orShapable
.dims
- Existing dimensions of
value
as comma-separatedstr
,tuple
,list
,Shape
or filter function. names
-
Either
- Sequence of names matching
dims
astuple
,list
orstr
. This replaces only the dimension names but leaves the types untouched. Shape
matchingdims
to replace names and types.- Dimension type function to replace only types.
- Sequence of names matching
**kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
. def reshaped_native(value: phiml.math._tensors.Tensor, groups: Union[tuple, list], force_expand: Any = True, to_numpy=False)
-
Returns a native representation of
value
where dimensions are laid out according togroups
.See Also:
native()
,pack_dims()
,reshaped_tensor()
,reshaped_numpy()
.Args
value
Tensor
groups
-
tuple
orlist
of dimensions to be packed into one native dimension. Each entry must be one of the following:str
: the name of one dimension that is present onvalue
.Shape
: Dimensions to be packed. Ifforce_expand
, missing dimensions are first added, otherwise they are ignored.- Filter function: Packs all dimensions of this type that are present on
value
. - Ellipsis
…
: Packs all remaining dimensions into this slot. Can only be passed once. None
or()
: Adds a singleton dimension.
Collections of or comma-separated dims may also be used but only if all dims are present on
value
. force_expand
bool
or sequence of dimensions. IfTrue
, repeats the tensor along missing dimensions. IfFalse
, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.to_numpy
- If True, converts the native tensor to a
numpy.ndarray
.
Returns
Native tensor with dimensions matching
groups
. def reshaped_numpy(value: phiml.math._tensors.Tensor, groups: Union[tuple, list], force_expand: Any = True) ‑> numpy.ndarray
-
Returns the NumPy representation of
value
where dimensions are laid out according togroups
.See Also:
numpy_()
,reshaped_native()
,pack_dims()
,reshaped_tensor()
.Args
value
Tensor
groups
- Sequence of dimension names as
str
or groups of dimensions to be packed_dim asShape
. force_expand
bool
or sequence of dimensions. IfTrue
, repeats the tensor along missing dimensions. IfFalse
, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.
Returns
NumPy
ndarray
with dimensions matchinggroups
. def reshaped_tensor(value: Any, groups: Union[tuple, list], check_sizes=False, convert=True)
-
Creates a
Tensor
from a native tensor or tensor-like whereby the dimensions ofvalue
are split according togroups
.See Also:
tensor()
,reshaped_native()
,unpack_dim()
.Args
value
- Native tensor or tensor-like.
groups
- Sequence of dimension groups to be packed_dim as
tuple[Shape]
orlist[Shape]
. check_sizes
- If True, group sizes must match the sizes of
value
exactly. Otherwise, allows singleton dimensions. convert
- If True, converts the data to the native format of the current default backend.
If False, wraps the data in a
Tensor
but keeps the given data reference if possible.
Returns
Tensor
with all dimensions fromgroups
def rotate_vector(vector: phiml.math._tensors.Tensor, angle: Union[float, phiml.math._tensors.Tensor, None], invert=False, dim='vector') ‑> phiml.math._tensors.Tensor
-
Rotates
vector
around the origin.Args
vector
- n-dimensional vector with exactly one channel dimension
angle
- Euler angle(s) or rotation matrix.
None
is interpreted as no rotation. invert
- Whether to apply the inverse rotation.
Returns
Rotated vector as
Tensor
def rotation_matrix(x: Union[float, phiml.math._tensors.Tensor, None], matrix_dim=(vectorᶜ=None)) ‑> Optional[phiml.math._tensors.Tensor]
-
Create a 2D or 3D rotation matrix from the corresponding angle(s).
Args
- x:
- 2D: scalar angle
- 3D: Either vector pointing along the rotation axis with rotation angle as length or Euler angles.
- Euler angles need to be laid out along a
angle()
channel dimension with dimension names listing the spatial dimensions. - E.g. a 90° rotation about the z-axis is represented by
vec('angles', x=0, y=0, z=PI/2)
. - If a rotation matrix is passed for
angle()
, it is returned without modification. matrix_dim
- Matrix dimension for 2D rotations. In 3D, the channel dimension of angle is used.
Returns
Matrix containing
matrix_dim
in primal and dual form as well as all non-channel dimensions ofx
. def round(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Rounds the
Tensor
orPhiTreeNode
x
to the closest integer. def s2b(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType
-
Change the type of all spatial dimensions of
value
to batch dimensions. Seerename_dims()
. def safe_div(x: Union[numbers.Number, phiml.math._tensors.Tensor], y: Union[numbers.Number, phiml.math._tensors.Tensor])
-
Computes x/y with the
Tensor
sx
andy
but returns 0 where y=0. def safe_mul(x: Union[numbers.Number, phiml.math._tensors.Tensor], y: Union[numbers.Number, phiml.math._tensors.Tensor])
-
Multiplication for tensors with non-finite values. Computes x·y in the forward pass but drops gradient contributions from infinite and
NaN
values. def sample_subgrid(grid: phiml.math._tensors.Tensor, start: phiml.math._tensors.Tensor, size: phiml.math._shape.Shape) ‑> phiml.math._tensors.Tensor
-
Samples a sub-grid from
grid
with equal distance between sampling points. The values at the new sample points are determined via linear interpolation.Args
grid
Tensor
to be resampled. Values are assumed to be sampled at cell centers.start
- Origin point of sub-grid within
grid
, measured in number of cells. Must have a single dimension calledvector
. Example:start=(1, 0.5)
would slice off the first grid point in dim 1 and take the mean of neighbouring points in dim 2. The order of dims must be equal tosize
andgrid.shape.spatial
. size
- Resolution of the sub-grid. Must not be larger than the resolution of
grid
. The order of dims must be equal tostart
andgrid.shape.spatial
.
Returns
Sub-grid as
Tensor
def save(file: str, obj)
-
Saves a
Tensor
or tree using NumPy. This function converts all tensors contained inobj
to NumPy tensors before storing. Each tensor is given a name corresponding to its path withinobj
, allowing reading only specific arrays from the file later on. Pickle is used for structures, but no reference toTensor
or its sub-classes is included.See Also:
load()
.Args
file
- Target file, will be stored as
.npz
. obj
Tensor
or tree to store.
def scatter(base_grid: Union[phiml.math._tensors.Tensor, phiml.math._shape.Shape], indices: Union[phiml.math._tensors.Tensor, dict], values: Union[float, phiml.math._tensors.Tensor], mode: Union[str, Callable] = 'update', outside_handling: str = 'check', indices_gradient=False, default=None, treat_as_batch=None)
-
Scatters
values
intobase_grid
atindices
. instance dimensions ofindices
and/orvalues
are reduced during scattering. Depending onmode
, this method has one of the following effects:mode='update'
: Replaces the values ofbase_grid
atindices
byvalues
. The result is undefined ifindices
contains duplicates.mode='add'
: Addsvalues
tobase_grid
atindices
. The values corresponding to duplicate indices are accumulated.mode='mean'
: Replaces the values ofbase_grid
atindices
by the mean of allvalues
with the same index.
Implementations:
- NumPy: Slice assignment /
numpy.add.at
- PyTorch:
torch.scatter
,torch.scatter_add
- TensorFlow:
tf.tensor_scatter_nd_add
,tf.tensor_scatter_nd_update
- Jax:
jax.lax.scatter_add
,jax.lax.scatter
See Also:
gather()
.Args
base_grid
Tensor
into whichvalues
are scattered.indices
Tensor
of n-dimensional indices at which to placevalues
. Must have a single channel dimension with size matching the number of spatial dimensions ofbase_grid
. This dimension is optional if the spatial rank is 1. Must also contain allscatter_dims
.values
Tensor
of values to scatter atindices
.mode
- Scatter mode as
str
or function. Supported modes are 'add', 'mean', 'update', 'max', 'min', 'prod', 'any', 'all'. The corresponding functions are the built-insum_()
,max´,
min, as well as the reduce functions in
phiml.math`. outside_handling
-
Defines how indices lying outside the bounds of
base_grid
are handled.'check'
: Raise an error if any index is out of bounds.'discard'
: Outside indices are ignored.'clamp'
: Outside indices are projected onto the closest point inside the grid.'undefined'
: All points are expected to lie inside the grid. Otherwise an error may be thrown or an undefined tensor may be returned.
indices_gradient
- Whether to allow the gradient of this operation to be backpropagated through
indices
. default
- Default value to use for bins into which no value is scattered.
By default,
NaN
is used for the modesupdate
andmean()
,0
forsum_()
,inf
for min and-inf
for max. This will upgrade the data type tofloat
if necessary. treat_as_batch
- Dimensions which should be treated like dims by this operation.
This can be used for scattering vectors along instance dims into a grid.
Normally, instance dims on
values
andindices
would not be matched tobase_grid
but when treated as batch, they will be.
Returns
Copy of
base_grid
with updated values atindices
. def seed(seed: int)
-
Sets the current seed of all backends and the built-in
random
package.Calling this function with a fixed value at the start of an application yields reproducible results as long as the same backend is used.
Args
seed
- Seed to use.
def set_global_precision(floating_point_bits: int)
-
Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends.
If
floating_point_bits
is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64. Operations may also convert floating point values to this precision, even if the input had a different precision.If
floating_point_bits
is None, new tensors will default to float32 unless specified otherwise. The output of math operations has the same precision as its inputs.Args
floating_point_bits
- one of (16, 32, 64, None)
def shape(obj, allow_unshaped=False) ‑> phiml.math._shape.Shape
-
If
obj
is aTensor
orShaped
, returns its shape. Ifobj
is aShape
, returnsobj
.This function can be passed as a
dim
argument to an operation to specify that it should act upon all dimensions.Args
obj
Tensor
orShape
orShaped
allow_unshaped
- If
True
, returns an empty shape for unsupported objects, else raises aValueError
.
Returns
def shift(x: phiml.math._tensors.Tensor, offsets: Sequence[int], dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = zero-gradient, stack_dim: Union[str, phiml.math._shape.Shape, None] = (shiftᶜ=None), extend_bounds: Union[int, tuple] = 0, padding_kwargs: dict = None) ‑> List[phiml.math._tensors.Tensor]
-
Shift the tensor
x
by a fixed offset, usingpadding
for edge values.This is similar to
numpy.roll()
but with major differences:- Values shifted in from the boundary are defined by
padding
. - Positive offsets represent negative shifts.
- Support for multi-dimensional shifts
See Also:
index_shift()
,neighbor_reduce()
.Args
x
- Input grid-like
Tensor
. offsets
tuple
listing shifts to compute, each must be anint
. OneTensor
will be returned for each entry.dims
- Dimensions along which to shift, defaults to all spatial dims of
x
. padding
- Padding to be performed at the boundary so that the shifted versions have the same size as
x
. Must be one of the following:Extrapolation
,Tensor
or number for constant extrapolation, name of extrapolation asstr
. Can be set toNone
to disable padding. Then the result tensors will be smaller thanx
. stack_dim
- Dimension along which the components corresponding to each dim in
dims
should be stacked. This can be set toNone
only ifdims
is a single dimension. extend_bounds
- Number of cells by which to pad the tensors in addition to the number required to maintain the size of
x
. Can only be used with a validpadding
. padding_kwargs
- Additional keyword arguments to be passed to
pad()
.
Returns
list
of shifted tensors. The number of return tensors is equal to the number ofoffsets
. - Values shifted in from the boundary are defined by
def si2d(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType
-
Change the type of all spatial and instance dimensions of
value
to dual dimensions. Seerename_dims()
. def sigmoid(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes the sigmoid function of the
Tensor
orPhiTreeNode
x
. def sign(x: ~TensorOrTree) ‑> ~TensorOrTree
-
The sign of positive numbers is 1 and -1 for negative numbers. The sign of 0 is undefined.
Args
x
Tensor
orPhiTreeNode
Returns
Tensor
orPhiTreeNode
matchingx
. def sin(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes sin(x) of the
Tensor
orPhiTreeNode
x
. def sinh(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes sinh(x) of the
Tensor
orPhiTreeNode
x
. def slice(value: ~PhiTreeNodeType, slices: Union[Dict[str, Union[int, slice_(), str, tuple, list, Any]], Any]) ‑> ~PhiTreeNodeType
-
Slices a
Tensor
orPhiTreeNode
along named dimensions.See Also:
unstack()
.Args
value
Tensor
orPhiTreeNode
orNumber
orNone
.slices
-
dict
mapping dimension names to slices. A slice can be one of the following:- An index (
int
) - A range (
slice_()
) - An item name (
str
) - Multiple item names (comma-separated
str
) - Multiple indices or item names (
tuple
orlist
)
- An index (
Returns
Tensor
orPhiTreeNode
of the same type asvalue
.Examples
>>> math.slice([vec(x=0, y=1), vec(x=2, y=3)], {'vector': 'y'}) [1, 3]
def slice_off(x, *slices: Dict[str, Union[slice_(), int, str]])
def soft_plus(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes softplus(x) of the
Tensor
orPhiTreeNode
x
. def softmax(x, reduce: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None])
-
Compute the softmax of
x
over any dimension. The softmax is e^x / ∑ e^x . def solve_linear(f: Union[Callable[[~X], ~Y], phiml.math._tensors.Tensor], y: ~Y, solve: phiml.math._optimize.Solve[~X, ~Y], *f_args, grad_for_f=False, f_kwargs: dict = None, **f_kwargs_) ‑> ~X
-
Solves the system of linear equations f(x) = y and returns x. This method will use the solver specified in
solve
. The following method identifiers are supported by all backends:'auto'
: Automatically choose a solver'CG'
: Conjugate gradient, only for symmetric and positive definite matrices.'CG-adaptive'
: Conjugate gradient with adaptive step size, only for symmetric and positive definite matrices.'biCG'
or'biCG-stab(0)'
: Biconjugate gradient'biCG-stab'
or'biCG-stab(1)'
: Biconjugate gradient stabilized, first order'biCG-stab(2)'
,'biCG-stab(4)'
, …: Biconjugate gradient stabilized, second or higher order'scipy-direct'
: SciPy direct solve always run oh the CPU usingscipy.sparse.linalg.spsolve
.'scipy-CG'
,'scipy-GMres'
,'scipy-biCG'
,'scipy-biCG-stab'
,'scipy-CGS'
,'scipy-QMR'
,'scipy-GCrotMK'
: SciPy iterative solvers always run oh the CPU, both in eager execution and JIT mode.
For maximum performance, compile
f
usingjit_compile_linear()
beforehand. Then, an optimized representation off
(such as a sparse matrix) will be used to solve the linear system.Caution: The matrix construction may potentially be performed each time
solve_linear()
is called if auxiliary arguments change. To prevent this, jit-compile the function that makes the call tosolve_linear()
.To obtain additional information about the performed solve, perform the solve within a
SolveTape
context. The used implementation can be obtained asSolveInfo.method
.The gradient of this operation will perform another linear solve with the parameters specified by
Solve.gradient_solve
.See Also:
solve_nonlinear()
,jit_compile_linear()
.Args
f
-
One of the following:
- Linear function with
Tensor
orPhiTreeNode
first parameter and return value.f
can have additional auxiliary arguments and return auxiliary values. - Dense matrix (
Tensor
with at least one dual dimension) - Sparse matrix (Sparse
Tensor
with at least one dual dimension) - Native tensor (not yet supported)
- Linear function with
y
- Desired output of
f(x)
asTensor
orPhiTreeNode
. solve
Solve
object specifying optimization method, parameters and initial guess forx
.*f_args
- Positional arguments to be passed to
f
aftersolve.x0
. These arguments will not be solved for. Supports vararg mode or pass all arguments as atuple
. f_kwargs
- Additional keyword arguments to be passed to
f
. These arguments are treated as auxiliary arguments and can be of any type.
Returns
x
- solution of the linear system of equations
f(x) = y
asTensor
orPhiTreeNode
.
Raises
NotConverged
- If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
- If the solve failed prematurely.
def solve_nonlinear(f: Callable, y, solve: phiml.math._optimize.Solve) ‑> phiml.math._tensors.Tensor
-
Solves the non-linear equation f(x) = y by minimizing the norm of the residual.
This method is limited to backends that support
jacobian()
, currently PyTorch, TensorFlow and Jax.To obtain additional information about the performed solve, use a
SolveTape
.See Also:
minimize()
,solve_linear()
.Args
f
- Function whose output is optimized to match
y
. All positional arguments off
are optimized and must beTensor
orPhiTreeNode
. The output off
must matchy
. y
- Desired output of
f(x)
asTensor
orPhiTreeNode
. solve
Solve
object specifying optimization method, parameters and initial guess forx
.
Returns
x
- Solution fulfilling
f(x) = y
within specified tolerance asTensor
orPhiTreeNode
.
Raises
NotConverged
- If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
- If the solve failed prematurely.
def sort(x: phiml.math._tensors.Tensor, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>) ‑> phiml.math._tensors.Tensor
-
Sort the values of
x
alongdim
. In order to sort a flattened array, usepack_dims()
first.Args
x
Tensor
dim
- Dimension to sort. If not present, sorting will be skipped. Defaults to non-batch dim.
Returns
Sorted
Tensor
orx
ifx
is constant alongdims
. def spack(value, packed_dim: Union[str, phiml.math._shape.Shape], pos: Optional[int] = None, **kwargs)
-
Short for `pack_dims(…, dims=spatial)
def sparse_tensor(indices: Optional[phiml.math._tensors.Tensor], values: Union[numbers.Number, phiml.math._tensors.Tensor], dense_shape: phiml.math._shape.Shape, can_contain_double_entries=True, indices_sorted=False, format=None, indices_constant: bool = True) ‑> phiml.math._tensors.Tensor
-
Construct a sparse tensor that stores
values
at the correspondingindices
and is 0 everywhere else. In addition to the sparse dimensions indexed byindices
, the tensor inherits all batch and channel dimensions fromvalues
.Args
indices
-
Tensor
encoding the positions of stored values. It can either list the individual stored indices (COO format) or encode only part of the index while containing other dimensions directly (compact format).For COO, it has the following dimensions:
- One instance dimension exactly matching the instance dimension on
values
. It enumerates the positions of stored entries. - One channel dimension.
Its item names must match the dimension names of
dense_shape
but the order can be arbitrary. - Any number of batch dimensions
You may pass
None
to create a sparse tensor with no entries. - One instance dimension exactly matching the instance dimension on
values
-
Tensor
containing the stored values at positions given byindices
. It has the following dimensions:- One instance dimension exactly matching the instance dimension on
indices
. It enumerates the values of stored entries. - Any number of channel dimensions if multiple values are stored at each index.
- Any number of batch dimensions
- One instance dimension exactly matching the instance dimension on
dense_shape
- Dimensions listed in
indices
. The order can differ from the item names ofindices
. can_contain_double_entries
- Whether some indices might occur more than once. If so, values at the same index will be summed.
indices_sorted
- Whether the indices are sorted in ascending order given the dimension order of the item names of
indices
. indices_constant
- Whether the positions of the non-zero values are fixed.
If
True
, JIT compilation will not create a placeholder forindices
. format
- Sparse format in which to store the data, such as
'coo'
or'csr'
. Seeget_format()
. IfNone
, uses the format in which the indices were given.
Returns
Sparse
Tensor
with the specifiedformat
. def spatial(*args, **dims: Union[int, str, tuple, list, phiml.math._shape.Shape, ForwardRef('Tensor')])
-
Returns the spatial dimensions of an existing
Shape
or creates a newShape
with only spatial dimensions.Usage for filtering spatial dimensions:
>>> spatial_dims = spatial(shape) >>> spatial_dims = spatial(tensor)
Usage for creating a
Shape
with only spatial dimensions:>>> spatial_shape = spatial('undef', x=2, y=3) (x=2, y=3, undef=None)
Here, the dimension
undef
is created with an undefined size ofNone
. Undefined sizes are automatically filled in bytensor()
,wrap()
,stack()
andconcat()
.To create a shape with multiple types, use
merge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.See Also:
channel()
,batch()
,instance()
Args
*args
-
Either
**dims
- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shape
containing only dimensions of type spatial. def spatial_gradient(grid: phiml.math._tensors.Tensor, dx: Union[float, phiml.math._tensors.Tensor] = 1, difference: str = 'central', padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = zero-gradient, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, stack_dim: Union[str, phiml.math._shape.Shape, None] = (gradientᶜ=None), pad=0, padding_kwargs: dict = None) ‑> phiml.math._tensors.Tensor
-
Calculates the spatial_gradient of a scalar channel from finite differences. The spatial_gradient vectors are in reverse order, lowest dimension first.
Args
grid
- grid values
dims
- (Optional) Dimensions along which the spatial derivative will be computed. sequence of dimension names
dx
- Physical distance between grid points,
float
orTensor
. When passing a vector-valuedTensor
, the dx values should be listed alongstack_dim
, matchingdims
. difference
- type of difference, one of ('forward', 'backward', 'central') (default 'forward')
padding
- Padding mode.
Must be one of the following:
Extrapolation
,Tensor
or number for constant extrapolation, name of extrapolation asstr
. stack_dim
- name of the new vector dimension listing the spatial_gradient w.r.t. the various axes
pad
- How many cells to extend the result compared to
grid
. This value is added to the internal padding. For non-trivial extrapolations, this gives the correct result while manual padding before or after this operation would not respect the boundary locations. padding_kwargs
- Additional keyword arguments to be passed to
pad()
.
Returns
def sqrt(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes sqrt(x) of the
Tensor
orPhiTreeNode
x
. def squared_norm(vec: phiml.math._tensors.Tensor, vec_dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function channel>)
def squeeze(x: ~PhiTreeNodeType, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None]) ‑> ~PhiTreeNodeType
-
Remove specific singleton (volume=1) dims from
x
.Args
x
- Tensor or composite type / tree.
dims
- Singleton dims to remove.
Returns
Same type as
x
. def stack(values: Union[Sequence[~PhiTreeNodeType], Dict[str, ~PhiTreeNodeType]], dim: Union[str, phiml.math._shape.Shape], expand_values=False, simplify=False, layout_non_matching=False, **kwargs) ‑> ~PhiTreeNodeType
-
Stacks
values
along the new dimensiondim
. All values must have the same spatial, instance and channel dimensions. If the dimension sizes vary, the resulting tensor will be non-uniform. Batch dimensions will be added as needed.Stacking tensors is performed lazily, i.e. the memory is allocated only when needed. This makes repeated stacking and slicing along the same dimension very efficient, i.e. jit-compiled functions will not perform these operations.
Args
values
- Collection of
Shapable
, such asTensor
If adict
, keys must be of typestr
and are used as item names alongdim
. dim
Shape
with a least one dimension. None of these dimensions can be present with any of thevalues
. Ifdim
is a single-dimension shape, its size is determined fromlen(values)
and can be left undefined (None
). Ifdim
is a multi-dimension shape, its volume must be equal tolen(values)
.expand_values
- If
True
, will first add missing dimensions to all values, not just batch dimensions. This allows tensors with different dimensions to be stacked. The resulting tensor will have all dimensions that are present invalues
. IfFalse
, this may return a non-numeric object instead. simplify
- If
True
and all values are equal, returns one value without adding the dimension. layout_non_matching
- If non-matching values should be stacked using a Layout object, i.e. should be put into a named list instead.
**kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Tensor
containingvalues
stacked alongdim
.Examples
>>> stack({'x': 0, 'y': 1}, channel('vector')) (x=0, y=1)
>>> stack([math.zeros(batch(b=2)), math.ones(batch(b=2))], channel(c='x,y')) (x=0.000, y=1.000); (x=0.000, y=1.000) (bᵇ=2, cᶜ=x,y)
>>> stack([vec(x=1, y=0), vec(x=2, y=3.)], batch('b')) (x=1.000, y=0.000); (x=2.000, y=3.000) (bᵇ=2, vectorᶜ=x,y)
def std(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>) ‑> phiml.math._tensors.Tensor
-
Computes the standard deviation over
values
along the specified dimensions.Warning: The standard deviation of non-uniform tensors along the stack dimension is undefined.
Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions. def stop_gradient(x)
-
Disables gradients for the given tensor. This may switch off the gradients for
x
itself or create a copy ofx
with disabled gradients.Implementations:
- PyTorch:
x.detach()
- TensorFlow:
tf.stop_gradient
- Jax:
jax.lax.stop_gradient
Args
x
Tensor
orPhiTreeNode
for which gradients should be disabled.
Returns
Copy of
x
. - PyTorch:
def stored_indices(x: phiml.math._tensors.Tensor, list_dim=(entriesⁱ=None), index_dim=(indexᶜ=None), invalid='discard') ‑> phiml.math._tensors.Tensor
-
Returns the indices of the stored values for a given `Tensor``. For sparse tensors, this will return the stored indices tensor. For collapsed tensors, only the stored dimensions will be returned.
Args
x
Tensor
list_dim
- Dimension along which stored indices should be laid out.
invalid
- One of
'discard'
,'clamp'
,'keep'
Filter result by valid indices. Internally, invalid indices may be stored for performance reasons.
Returns
Tensor
representing all indices of stored values. def stored_values(x: phiml.math._tensors.Tensor, list_dim=(entriesⁱ=None), invalid='discard') ‑> phiml.math._tensors.Tensor
-
Returns the stored values for a given `Tensor``.
For sparse tensors, this will return only the stored entries.
Dense tensors are reshaped so that all non-batch dimensions are packed into
list_dim
. Batch dimensions are preserved.Args
x
Tensor
list_dim
- Dimension along which stored values should be laid out.
invalid
- One of
'discard'
,'clamp'
,'keep'
Filter result by valid indices. Internally, invalid indices may be stored for performance reasons.
Returns
Tensor
representing all values stored to representx
. def sum(value: ~TensorOrTree, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function non_batch>) ‑> ~TensorOrTree
-
Sums
values
along the specified dimensions.Args
value
- (Sparse)
Tensor
orlist
/tuple
of Tensors. dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions. def svd(x: phiml.math._tensors.Tensor, feature_dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function channel>, list_dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = None, latent_dim=(singularᶜ=None), full_matrices=False)
-
Singular value decomposition.
The original matrix is approximated by
(latent_to_value * singular.T) @ latents
orlatent_to_value @ (singular * latents)
.Warning: Even for well-defined SVDs, different backend use different sign conventions, causing results to differ.
Args
x
- Matrix containing
feature_dim
andlist_dim
. feature_dim
- Dimensions that list the features (columns).
list_dim
- Dimensions that list the data points (rows).
latent_dim
- Latent dimension. If a size is specified, truncates the SVD to this size.
full_matrices
- If
True
, return full-sized (square) matrices for latent_by_example and latent_to_value. These may not match the singular values.
Returns
latents
- Latent vectors of each item listed.
Tensor
withlist_dim
andlatent_dim
. singular
- List of singular values.
Tensor
withlatent_dim
. features
- Stacked normalized features / trends. This matrix can be used to compute the original value from a latent vector.
Tensor
withlatent_dim
andfeature_dim
.
def swap_axes(x, axes)
-
Swap the dimension order of
x
. This operation is generally not necessary forTensor
s because tensors will be reshaped under the hood or when getting the native/numpy representations. It can be used to transpose native tensors.Implementations:
- NumPy:
numpy.transpose
- PyTorch:
x.permute
- TensorFlow:
tf.transpose
- Jax:
jax.numpy.transpose
Args
Returns
Tensor
or native tensor, depending onx
. - NumPy:
def tan(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes tan(x) of the
Tensor
orPhiTreeNode
x
. def tanh(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Computes tanh(x) of the
Tensor
orPhiTreeNode
x
. def tcat(values: Sequence[~PhiTreeNodeType], dim_type: Callable, expand_values=False, default_name='tcat') ‑> ~PhiTreeNodeType
-
Concatenate values by dim type. This function first packs all dimensions of
dim_type
into one dim, then concatenates allvalues
. Values that do not have a dim ofdim_type
are considered a size-1 slice.The name of the first matching dim of
dim_type
is used as the concatenated output dim name. If no value has a matching dim,default_name
is used instead.Args
values
- Values to be concatenated.
dim_type
- Dimension type along which to concatenate.
expand_values
- Whether to add missing other non-batch dims to values as needed.
default_name
- Concatenation dim name if none of the values have a matching dim.
Returns
Same type as any value.
def tensor(data, *shape: Union[phiml.math._shape.Shape, str, list], convert: bool = True, default_list_dim=(vectorᶜ=None)) ‑> phiml.math._tensors.Tensor
-
Create a Tensor from the specified
data
. Ifconvert=True
, convertsdata
to the preferred format of the default backend.data
must be one of the following:- Number: returns a dimensionless Tensor.
- Native tensor such as NumPy array, TensorFlow tensor or PyTorch tensor.
tuple
orlist
of numbers: backs the Tensor with native tensor.tuple
orlist
of non-numbers: creates tensors for the items and stacks them.- Tensor: renames dimensions and dimension types if
names
is specified. Converts all internal native values of the tensor ifconvert=True
. - Shape: creates a 1D tensor listing the dimension sizes.
While specifying
names
is optional in some cases, it is recommended to always specify them.Dimension types are always inferred from the dimension names if specified.
Implementations:
- NumPy:
numpy.array
- PyTorch:
torch.tensor
,torch.from_numpy
- TensorFlow:
tf.convert_to_tensor
- Jax:
jax.numpy.array
See Also:
wrap()
which usesconvert=False
,layout()
.Args
data
- native tensor, sparse COO / CSR / CSC matrix, scalar, sequence,
Shape
orTensor
shape
- Ordered dimensions and types. If sizes are defined, they will be checked against
data
.You may also pass a single <code>str</code> specifying dimension in the format
name:tor
name:t=(item_names)` wheret
refers to the type letter, one of s,i,c,d,b. Alternatively, you can pass alist
of shapes which will callreshaped_tensor()
. convert
- If True, converts the data to the native format of the current default backend.
If False, wraps the data in a
Tensor
but keeps the given data reference if possible.
Raises
AssertionError
- if dimension names are not provided and cannot automatically be inferred
ValueError
- if
data
is not tensor-like
Returns
Tensor containing same values as data
Examples
>>> tensor([1, 2, 3], channel(vector='x,y,z')) (x=1, y=2, z=3)
>>> tensor([1., 2, 3], channel(vector='x,y,z')) (x=1.000, y=2.000, z=3.000) float64
>>> tensor(numpy.zeros([10, 8, 6, 2]), batch('batch'), spatial('x,y'), channel(vector='x,y')) (batchᵇ=10, xˢ=8, yˢ=6, vectorᶜ=x,y) float64 const 0.0
>>> tensor([(0, 1), (0, 2), (1, 3)], instance('particles'), channel(vector='x,y')) (x=0, y=1); (x=0, y=2); (x=1, y=3) (particlesⁱ=3, vectorᶜ=x,y)
>>> tensor(numpy.random.randn(10)) (vectorᶜ=10) float64 -0.128 ± 1.197 (-2e+00...2e+00)
def tensor_like(existing_tensor: phiml.math._tensors.Tensor, values: Union[numbers.Number, phiml.math._tensors.Tensor, bool], value_order: str = None)
-
Creates a tensor with the same format and shape as
existing_tensor
.Args
existing_tensor
- Any
Tensor
, sparse or dense. values
- New values to replace the existing values by.
If
existing_tensor
is sparse,values
must broadcast to the instance dimension listing the stored indices. value_order
- Order of
values
compared toexisting_tensor
, only relevant ifexisting_tensor
is sparse. If'original'
, the values are ordered like the values that was used to create the first tensor with this sparsity pattern. If'as existing'
, the values match the current order ofexisting_tensor
. Note that the order of values may be changed upon creating a sparse tensor.
Returns
def to_complex(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Converts the given tensor to complex floating point format with the currently specified precision.
The precision can be set globally using
math.set_global_precision()
and locally usingwith math.precision()
.See the documentation at https://tum-pbs.github.io/PhiML/Data_Types.html
See Also:
cast()
.Args
x
- values to convert
Returns
Tensor
of same shape asx
def to_device(value, device: phiml.backend._backend.ComputeDevice, convert=True, use_dlpack=True)
-
Allocates the tensors of
value
ondevice
. If the value already exists on that device, this function may either create a copy ofvalue
or returnvalue
directly.See Also:
to_cpu()
.Args
value
Tensor
orPhiTreeNode
or native tensor.device
- Device to allocate value on.
Either
ComputeDevice
or categorystr
, such as'CPU'
or'GPU'
. convert
- Whether to convert tensors that do not belong to the corresponding backend to compatible native tensors.
If
False
, this function has no effect on numpy tensors. use_dlpack
- Only if
convert==True
. Whether to use the DLPack library to convert from one GPU-enabled backend to another.
Returns
Same type as
value
. def to_dict(value: Union[phiml.math._tensors.Tensor, phiml.math._shape.Shape])
-
Returns a serializable form of a
Tensor
orShape
. The result can be written to a JSON file, for example.See Also:
from_dict()
.Args
Returns
Serializable Python tree of primitives
def to_float(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Converts the given tensor to floating point format with the currently specified precision.
The precision can be set globally using
math.set_global_precision()
and locally usingwith math.precision()
.See the documentation at https://tum-pbs.github.io/PhiML/Data_Types.html
See Also:
cast()
.Args
x
Tensor
orPhiTreeNode
to convert
Returns
Tensor
orPhiTreeNode
matchingx
. def to_format(x: phiml.math._tensors.Tensor, format: str)
-
Converts a
Tensor
to the specified sparse format or to a dense tensor.Args
x
- Sparse or dense
Tensor
format
- Target format. One of
'dense'
,'coo'
,'csr'
, or'csc'
. Additionally,'sparse'
can be passed to convert dense matrices to a sparse format, decided based on the backend forx
.
Returns
Tensor
of the specified format. def to_int32(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Converts the
Tensor
orPhiTreeNode
x
to 32-bit integer. def to_int64(x: ~TensorOrTree) ‑> ~TensorOrTree
-
Converts the
Tensor
orPhiTreeNode
x
to 64-bit integer. def trace_check(traced_function, *args, **kwargs) ‑> Tuple[bool, str]
-
Tests if
f(*args, **kwargs)
has already been traced for arguments compatible withargs
andkwargs
. If true, jit-compiled functions are very fast since the Python function is not actually called anymore.Args
traced_function
- Transformed Function, e.g. jit-compiled or linear function.
*args
- Hypothetical arguments to be passed to
f
**kwargs
- Hypothetical keyword arguments to be passed to
f
Returns
def unpack_dim(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None], *unpacked_dims: Union[phiml.math._shape.Shape, Sequence[phiml.math._shape.Shape]], **kwargs)
-
Decompresses a dimension by unstacking the elements along it. This function replaces the traditional
reshape
for these cases. The compressed dimensiondim
is assumed to contain elements laid out according to the order ofunpacked_dims
.If
dim
does not exist onvalue
, this function will returnvalue
as-is. This includes primitive types.See Also:
pack_dims()
Args
value
Shapable
, such asTensor
, for which one dimension should be split.dim
- Single dimension to be decompressed.
*unpacked_dims
- Either vararg
Shape
, ordered dimensions to replacedim
, fulfillingunpacked_dims.volume == shape(self)[dim].rank
. This results in a single tensor output. Alternatively, pass atuple
orlist
of shapes to unpack a dim into multiple tensors whose combined volumes matchdim.size
. **kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
.Examples
>>> unpack_dim(math.zeros(instance(points=12)), 'points', spatial(x=4, y=3)) (xˢ=4, yˢ=3) const 0.0
def unstack(value, dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None]) ‑> tuple
-
Un-stacks a
Sliceable
along one or multiple dimensions.If multiple dimensions are given, the order of elements will be according to the dimension order in
dim
, i.e. elements along the last dimension will be neighbors in the returnedtuple
. If no dimension is given or none of the given dimensions exists onvalue
, returns a list containing onlyvalue
.See Also:
slice_()
.Args
value
Shapable
, such asTensor
dim
- Dimensions as
Shape
or comma-separatedstr
or dimension type, i.e.channel()
,spatial()
,instance()
,batch()
.
Returns
tuple
of objects matching the type ofvalue
.Examples
>>> unstack(expand(0, spatial(x=5)), 'x') (0.0, 0.0, 0.0, 0.0, 0.0)
def upsample2x(grid: phiml.math._tensors.Tensor, padding: Extrapolation = zero-gradient, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function spatial>, padding_kwargs: dict = None) ‑> phiml.math._tensors.Tensor
-
Resamples a regular grid to double the number of spatial sample points per dimension. The grid values at the new points are determined via linear interpolation.
Args
grid
- half-size grid
padding
- grid extrapolation
dims
- dims along which up-sampling is applied. If None, up-sample along all spatial dims.
grid
- Tensor:
padding
- Extrapolation: (Default value = extrapolation.BOUNDARY)
dims
- tuple or None: (Default value = None)
padding_kwargs
- Additional keyword arguments to be passed to
pad()
.
Returns
double-size grid
def use(backend: Union[str, phiml.backend._backend.Backend]) ‑> phiml.backend._backend.Backend
-
Sets the given backend as default. This setting can be overridden using
with backend:
.See
default_backend()
,choose_backend_t()
.Args
backend
Backend
or backend name to set as default. Possible names are'torch'
,'tensorflow'
,'jax'
,'numpy'
.
Returns
The chosen backend as a `Backend´ instance.
def vec(name: Union[str, phiml.math._shape.Shape] = 'vector', *sequence, tuple_dim=(sequenceˢ=None), list_dim=(sequenceⁱ=None), **components) ‑> phiml.math._tensors.Tensor
-
Lay out the given values along a channel dimension without converting them to the current backend.
Args
name
- Dimension name.
*sequence
- Component values that will also be used as item names.
If specified,
components
must be empty. **components
- Values by component name. If specified, no additional positional arguments must be given.
tuple_dim
- Dimension for
tuple
values passed as components, e.g.vec(x=(0, 1), ...)
list_dim
- Dimension for
list
values passed as components, e.g.vec(x=[0, 1], ...)
Returns
Examples
>>> vec(x=1, y=0, z=-1) (x=1, y=0, z=-1)
>>> vec(x=1., z=0) (x=1.000, z=0.000)
>>> vec(x=tensor([1, 2, 3], instance('particles')), y=0) (x=1, y=0); (x=2, y=0); (x=3, y=0) (particlesⁱ=3, vectorᶜ=x,y)
>>> vec(x=0, y=[0, 1]) (x=0, y=0); (x=0, y=1) (vectorᶜ=x,y, sequenceⁱ=2)
>>> vec(x=0, y=(0, 1)) (x=0, y=0); (x=0, y=1) (sequenceˢ=2, vectorᶜ=x,y)
def vec_length(*args, **kwargs)
-
Deprecated. Use
norm()
instead. def vec_normalize(vec: phiml.math._tensors.Tensor, vec_dim: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = <function channel>, epsilon=None, allow_infinite=False, allow_zero=False)
-
Normalizes the vectors in
vec()
. Ifvec_dim
is None, the combined channel dimensions ofvec()
are interpreted as a vector.Args
vec
Tensor
to normalize.vec_dim
- Dimensions to normalize over. By default, all channel dimensions are used to compute the vector length.
epsilon
- (Optional) Zero-length threshold. Vectors shorter than this length yield the unit vector (1, 0, 0, …).
If not specified, the zero-vector yields
NaN
as it cannot be normalized. allow_infinite
- Allow infinite components in vectors. These vectors will then only points towards the infinite components.
allow_zero
- Whether to return zero vectors for inputs smaller
epsilon
instead of a unit vector.
def vec_squared(*args, **kwargs)
-
Deprecated. Use
squared_norm()
instead. def when_available(runnable: Callable, *tensor_args: phiml.math._tensors.Tensor)
-
Calls
runnable(*tensor_args)
once the concrete values of all tensors are available. In eager mode,runnable
is called immediately. When jit-compiled,runnable
is called after the jit-compiled function has returned.Args
runnable
- Function to call as
runnable(*tensor_args)
. This can be alambda
function. *tensor_args
Tensor
values to pass torunnable
with concrete values.
def where(condition: Union[phiml.math._tensors.Tensor, float, int], value_true: Union[phiml.math._tensors.Tensor, float, int, Any] = None, value_false: Union[phiml.math._tensors.Tensor, float, int, Any] = None)
-
Builds a tensor by choosing either values from
value_true
orvalue_false
depending oncondition
. Ifcondition
is not of type boolean, non-zero values are interpreted as True.This function requires non-None values for
value_true
andvalue_false
. To get the indices of True / non-zero values, use :func:nonzero()
.Args
condition
- determines where to choose values from value_true or from value_false
value_true
- Values to pick where
condition != 0 / True
value_false
- Values to pick where
condition == 0 / False
Returns
Tensor
containing dimensions of all inputs. def with_diagonal(matrix: phiml.math._tensors.Tensor, values: Union[float, phiml.math._tensors.Tensor], check_square=True)
-
Create a copy of
matrix
, replacing the diagonal elements. Ifmatrix
is sparse, diagonal zeros (and possibly other explicitly stored zeros) will be dropped from the sparse matrix.This function currently only supports sparse COO,CSR,CSC SciPy matrices.
Args
matrix
Tensor
with at least one dual dim.values
- Diagonal values
check_square
- If
True
allow this function only for square matrices.
Returns
def wrap(data, *shape: Union[phiml.math._shape.Shape, str, list], default_list_dim=(vectorᶜ=None)) ‑> phiml.math._tensors.Tensor
-
Short for
tensor()
withconvert=False
. def zeros(*shape: phiml.math._shape.Shape, dtype: Union[phiml.backend._dtype.DType, tuple, type] = None) ‑> phiml.math._tensors.Tensor
-
Define a tensor with specified shape with value
0.0
/0
/False
everywhere.This method may not immediately allocate the memory to store the values.
See Also:
zeros_like()
,ones()
.Args
*shape
- This (possibly empty) sequence of
Shape
s is concatenated, preserving the order. dtype
- Data type as
DType
object. Defaults tofloat
matching the current precision setting.
Returns
def zeros_like(obj: Union[phiml.math._tensors.Tensor, PhiTreeNode]) ‑> Union[phiml.math._tensors.Tensor, PhiTreeNode]
-
Create a
Tensor
containing only0.0
/0
/False
with the same shape and dtype asobj
.
Classes
class ConvergenceException
-
Base class for exceptions raised when a solve does not converge.
See Also:
Diverged
,NotConverged
.Expand source code
class ConvergenceException(RuntimeError): """ Base class for exceptions raised when a solve does not converge. See Also: `Diverged`, `NotConverged`. """ def __init__(self, result: SolveInfo): RuntimeError.__init__(self, result.msg) self.result: SolveInfo = result """ `SolveInfo` holding information about the solve. """
Ancestors
- builtins.RuntimeError
- builtins.Exception
- builtins.BaseException
Subclasses
- phiml.math._optimize.Diverged
- phiml.math._optimize.NotConverged
Instance variables
var result
-
SolveInfo
holding information about the solve.
class DType (kind: type, bits: int = None, precision: int = None)
-
Instances of
DType
represent the kind and size of data elements. The data type of tensors can be obtained viaTensor.dtype
.The following kinds of data types are supported:
float
with 32 / 64 bitscomplex
with 64 / 128 bitsint
with 8 / 16 / 32 / 64 bitsbool
with 8 bitsstr
with 8n bits
Unlike with many computing libraries, there are no global variables corresponding to the available types. Instead, data types can simply be instantiated as needed.
Args
kind
- Python type, one of
(bool, int, float, complex, str)
bits
- number of bits per element, a multiple of 8.
Expand source code
class DType: """ Instances of `DType` represent the kind and size of data elements. The data type of tensors can be obtained via `Tensor.dtype`. The following kinds of data types are supported: * `float` with 32 / 64 bits * `complex` with 64 / 128 bits * `int` with 8 / 16 / 32 / 64 bits * `bool` with 8 bits * `str` with 8*n* bits Unlike with many computing libraries, there are no global variables corresponding to the available types. Instead, data types can simply be instantiated as needed. """ def __init__(self, kind: type, bits: int = None, precision: int = None): """ Args: kind: Python type, one of `(bool, int, float, complex, str)` bits: number of bits per element, a multiple of 8. """ assert kind in (bool, int, float, complex, str, object) if kind is bool: assert bits is None, "Bits may not be set for bool or object" assert precision is None, f"Precision may only be specified for float or complex but got {kind}, precision={precision}" bits = 8 elif kind == object: assert bits is None, "bits may not be set for bool or object" assert precision is None, f"Precision may only be specified for float or complex but got {kind}, precision={precision}" bits = int(np.round(np.log2(sys.maxsize))) + 1 elif precision is not None: assert bits is None, "Specify either bits or precision when creating a DType but not both." assert kind in [float, complex], f"Precision may only be specified for float or complex but got {kind}, precision={precision}" if kind == float: bits = precision else: bits = precision * 2 else: assert isinstance(bits, int), f"bits must be an int but got {type(bits)}" self.kind = kind """ Python class corresponding to the type of data, ignoring precision. One of (bool, int, float, complex, str) """ self.bits = bits """ Number of bits used to store a single value of this type. See `DType.itemsize`. """ @property def precision(self): """ Floating point precision. Only defined if `kind in (float, complex)`. For complex values, returns half of `DType.bits`. """ if self.kind == float: return self.bits if self.kind == complex: return self.bits // 2 else: return None @property def itemsize(self): """ Number of bytes used to storea single value of this type. See `DType.bits`. """ assert self.bits % 8 == 0 return self.bits // 8 def __eq__(self, other): if isinstance(other, DType): return self.kind == other.kind and self.bits == other.bits elif other in {bool, int, float, complex, object}: return self.kind == other else: return False def __ne__(self, other): return not self == other def __hash__(self): return hash(self.kind) + hash(self.bits) def __repr__(self): return f"{self.kind.__name__}{self.bits}" @staticmethod def as_dtype(value: Union['DType', tuple, type, None]) -> Union['DType', None]: if isinstance(value, DType): return value elif value is int: return DType(int, 32) elif value is float: from . import get_precision return DType(float, get_precision()) elif value is complex: from . import get_precision return DType(complex, 2 * get_precision()) elif value is None: return None elif isinstance(value, tuple): return DType(*value) elif value is str: raise ValueError("str DTypes must specify precision") else: return DType(value) # bool, object
Static methods
def as_dtype(value: Union[ForwardRef('DType'), tuple, type, None]) ‑> Optional[phiml.backend._dtype.DType]
Instance variables
var bits
-
Number of bits used to store a single value of this type. See
DType.itemsize
. prop itemsize
-
Number of bytes used to storea single value of this type. See
DType.bits
.Expand source code
@property def itemsize(self): """ Number of bytes used to storea single value of this type. See `DType.bits`. """ assert self.bits % 8 == 0 return self.bits // 8
var kind
-
Python class corresponding to the type of data, ignoring precision. One of (bool, int, float, complex, str)
prop precision
-
Floating point precision. Only defined if
kind in (float, complex)
. For complex values, returns half ofDType.bits
.Expand source code
@property def precision(self): """ Floating point precision. Only defined if `kind in (float, complex)`. For complex values, returns half of `DType.bits`. """ if self.kind == float: return self.bits if self.kind == complex: return self.bits // 2 else: return None
class Dict (*args, **kwargs)
-
Dictionary of
Tensor
orPhiTreeNode
values. Dicts are not themselves tensors and do not have a shape. Uselayout()
to treatdict
instances like tensors.In addition to dictionary functions, supports mathematical operators with other
Dict
s and lookup via.key
syntax.Dict
implementsPhiTreeNode
so instances can be passed to math operations likesin()
.Expand source code
class Dict(dict): """ Dictionary of `Tensor` or `phiml.math.magic.PhiTreeNode` values. Dicts are not themselves tensors and do not have a shape. Use `layout()` to treat `dict` instances like tensors. In addition to dictionary functions, supports mathematical operators with other `Dict`s and lookup via `.key` syntax. `Dict` implements `phiml.math.magic.PhiTreeNode` so instances can be passed to math operations like `sin`. """ def __value_attrs__(self): return tuple(self.keys()) # --- Dict[key] --- def __getattr__(self, key): try: return self[key] except KeyError as k: raise AttributeError(k) def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): try: del self[key] except KeyError as k: raise AttributeError(k) # --- operators --- def __neg__(self): return Dict({k: -v for k, v in self.items()}) def __invert__(self): return Dict({k: ~v for k, v in self.items()}) def __abs__(self): return Dict({k: abs(v) for k, v in self.items()}) def __round__(self, n=None): return Dict({k: round(v) for k, v in self.items()}) def __add__(self, other): if isinstance(other, Dict): return Dict({key: val + other[key] for key, val in self.items()}) else: return Dict({key: val + other for key, val in self.items()}) def __radd__(self, other): if isinstance(other, Dict): return Dict({key: other[key] + val for key, val in self.items()}) else: return Dict({key: other + val for key, val in self.items()}) def __sub__(self, other): if isinstance(other, Dict): return Dict({key: val - other[key] for key, val in self.items()}) else: return Dict({key: val - other for key, val in self.items()}) def __rsub__(self, other): if isinstance(other, Dict): return Dict({key: other[key] - val for key, val in self.items()}) else: return Dict({key: other - val for key, val in self.items()}) def __mul__(self, other): if isinstance(other, Dict): return Dict({key: val * other[key] for key, val in self.items()}) else: return Dict({key: val * other for key, val in self.items()}) def __rmul__(self, other): if isinstance(other, Dict): return Dict({key: other[key] * val for key, val in self.items()}) else: return Dict({key: other * val for key, val in self.items()}) def __truediv__(self, other): if isinstance(other, Dict): return Dict({key: val / other[key] for key, val in self.items()}) else: return Dict({key: val / other for key, val in self.items()}) def __rtruediv__(self, other): if isinstance(other, Dict): return Dict({key: other[key] / val for key, val in self.items()}) else: return Dict({key: other / val for key, val in self.items()}) def __floordiv__(self, other): if isinstance(other, Dict): return Dict({key: val // other[key] for key, val in self.items()}) else: return Dict({key: val // other for key, val in self.items()}) def __rfloordiv__(self, other): if isinstance(other, Dict): return Dict({key: other[key] // val for key, val in self.items()}) else: return Dict({key: other // val for key, val in self.items()}) def __pow__(self, power, modulo=None): assert modulo is None if isinstance(power, Dict): return Dict({key: val ** power[key] for key, val in self.items()}) else: return Dict({key: val ** power for key, val in self.items()}) def __rpow__(self, other): if isinstance(other, Dict): return Dict({key: other[key] ** val for key, val in self.items()}) else: return Dict({key: other ** val for key, val in self.items()}) def __mod__(self, other): if isinstance(other, Dict): return Dict({key: val % other[key] for key, val in self.items()}) else: return Dict({key: val % other for key, val in self.items()}) def __rmod__(self, other): if isinstance(other, Dict): return Dict({key: other[key] % val for key, val in self.items()}) else: return Dict({key: other % val for key, val in self.items()}) def __eq__(self, other): if isinstance(other, Dict): return Dict({key: val == other[key] for key, val in self.items()}) else: return Dict({key: val == other for key, val in self.items()}) def __ne__(self, other): if isinstance(other, Dict): return Dict({key: val != other[key] for key, val in self.items()}) else: return Dict({key: val != other for key, val in self.items()}) def __lt__(self, other): if isinstance(other, Dict): return Dict({key: val < other[key] for key, val in self.items()}) else: return Dict({key: val < other for key, val in self.items()}) def __le__(self, other): if isinstance(other, Dict): return Dict({key: val <= other[key] for key, val in self.items()}) else: return Dict({key: val <= other for key, val in self.items()}) def __gt__(self, other): if isinstance(other, Dict): return Dict({key: val > other[key] for key, val in self.items()}) else: return Dict({key: val > other for key, val in self.items()}) def __ge__(self, other): if isinstance(other, Dict): return Dict({key: val >= other[key] for key, val in self.items()}) else: return Dict({key: val >= other for key, val in self.items()}) # --- overridden methods --- def copy(self): return Dict(self)
Ancestors
- builtins.dict
Methods
def copy(self)
-
D.copy() -> a shallow copy of D
class Diverged
-
Raised if the optimization was stopped prematurely and cannot continue. This may indicate that no solution exists.
The values of the last estimate
x
may or may not be finite.This exception inherits from
ConvergenceException
.See Also:
NotConverged
.Expand source code
class Diverged(ConvergenceException): """ Raised if the optimization was stopped prematurely and cannot continue. This may indicate that no solution exists. The values of the last estimate `x` may or may not be finite. This exception inherits from `ConvergenceException`. See Also: `NotConverged`. """ def __init__(self, result: SolveInfo): ConvergenceException.__init__(self, result)
Ancestors
- phiml.math._optimize.ConvergenceException
- builtins.RuntimeError
- builtins.Exception
- builtins.BaseException
class IncompatibleShapes (message, *shapes: phiml.math._shape.Shape)
-
Raised when the shape of a tensor does not match the other arguments.
Expand source code
class IncompatibleShapes(Exception): """ Raised when the shape of a tensor does not match the other arguments. """ def __init__(self, message, *shapes: Shape): Exception.__init__(self, message) self.shapes = shapes
Ancestors
- builtins.Exception
- builtins.BaseException
class LinearFunction
-
Just-in-time compiled linear function of
Tensor
arguments and return values.Use
jit_compile_linear()
to create a linear function representation.Expand source code
class LinearFunction(Generic[X, Y], Callable[[X], Y]): """ Just-in-time compiled linear function of `Tensor` arguments and return values. Use `jit_compile_linear()` to create a linear function representation. """ def __init__(self, f, auxiliary_args: Set[str], forget_traces: bool): self.f = f self.f_params = function_parameters(f) self.auxiliary_args = auxiliary_args self.forget_traces = forget_traces self.matrices_and_biases: Dict[SignatureKey, Tuple[SparseCoordinateTensor, Tensor, Tuple]] = {} self.nl_jit = JitFunction(f, self.auxiliary_args, forget_traces) # for backends that do not support sparse matrices def _get_or_trace(self, key: SignatureKey, args: tuple, f_kwargs: dict): if not key.tracing and key in self.matrices_and_biases: return self.matrices_and_biases[key] else: if self.forget_traces: self.matrices_and_biases.clear() _TRACING_LINEAR.append(self) try: matrix, bias, raw_out = matrix_from_function(self.f, *args, **f_kwargs, auto_compress=True, _return_raw_output=True) finally: assert _TRACING_LINEAR.pop(-1) is self if not key.tracing: self.matrices_and_biases[key] = matrix, bias, raw_out if len(self.matrices_and_biases) >= 4: warnings.warn(f"""Φ-ML-lin: The compiled linear function '{f_name(self.f)}' was traced {len(self.matrices_and_biases)} times. Performing many traces may be slow and cause memory leaks. Tensors in auxiliary arguments (all except the first parameter unless specified otherwise) are compared by reference, not by tensor values. Auxiliary arguments: {key.auxiliary_kwargs} Multiple linear traces can be avoided by jit-compiling the code that calls the linear function or setting forget_traces=True.""", RuntimeWarning, stacklevel=3) return matrix, bias, raw_out def __call__(self, *args: X, **kwargs) -> Y: try: key, tensors, natives, x, aux_kwargs = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args) except LinearTraceInProgress: return self.f(*args, **kwargs) assert tensors, "Linear function requires at least one argument" if any(isinstance(t, ShiftLinTracer) for t in tensors): # TODO: if t is identity, use cached ShiftLinTracer, otherwise multiply two ShiftLinTracers return self.f(*args, **kwargs) if not key.backend.supports(Backend.sparse_coo_tensor): # This might be called inside a Jax linear solve # warnings.warn(f"Sparse matrices are not supported by {backend}. Falling back to regular jit compilation.", RuntimeWarning) if not math.all_available(*tensors): # avoid nested tracing, Typical case jax.scipy.sparse.cg(LinearFunction). Nested traces cannot be reused which results in lots of traces per cg. ML_LOGGER.debug(f"Φ-ML-lin: Running '{f_name(self.f)}' as-is with {key.backend} because it is being traced.") return self.f(*args, **kwargs) else: return self.nl_jit(*args, **kwargs) matrix, bias, (out_tree, out_tensors) = self._get_or_trace(key, args, aux_kwargs) result = matrix @ tensors[0] + bias out_tensors = list(out_tensors) out_tensors[0] = result return assemble_tree(out_tree, out_tensors) def sparse_matrix(self, *args, **kwargs): """ Create an explicit representation of this linear function as a sparse matrix. See Also: `sparse_matrix_and_bias()`. Args: *args: Function arguments. This determines the size of the matrix. **kwargs: Additional keyword arguments for the linear function. Returns: Sparse matrix representation with `values` property and `native()` method. """ key, *_, aux_kwargs = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args) matrix, bias, *_ = self._get_or_trace(key, args, aux_kwargs) assert math.close(bias, 0), "This is an affine function and cannot be represented by a single matrix. Use sparse_matrix_and_bias() instead." return matrix def sparse_matrix_and_bias(self, *args, **kwargs): """ Create an explicit representation of this affine function as a sparse matrix and a bias vector. Args: *args: Positional arguments to the linear function. This determines the size of the matrix. **kwargs: Additional keyword arguments for the linear function. Returns: matrix: Sparse matrix representation with `values` property and `native()` method. bias: `Tensor` """ key, *_, aux_kwargs = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args) return self._get_or_trace(key, args, aux_kwargs)[:2] def __repr__(self): return f"lin({f_name(self.f)})"
Ancestors
- collections.abc.Callable
- typing.Generic
Methods
def sparse_matrix(self, *args, **kwargs)
-
Create an explicit representation of this linear function as a sparse matrix.
See Also:
sparse_matrix_and_bias()
.Args
*args
- Function arguments. This determines the size of the matrix.
**kwargs
- Additional keyword arguments for the linear function.
Returns
Sparse matrix representation with
values
property andnative()
method. def sparse_matrix_and_bias(self, *args, **kwargs)
-
Create an explicit representation of this affine function as a sparse matrix and a bias vector.
Args
*args
- Positional arguments to the linear function. This determines the size of the matrix.
**kwargs
- Additional keyword arguments for the linear function.
Returns
class NotConverged
-
Raised during optimization if the desired accuracy was not reached within the maximum number of iterations.
This exception inherits from
ConvergenceException
.See Also:
Diverged
.Expand source code
class NotConverged(ConvergenceException): """ Raised during optimization if the desired accuracy was not reached within the maximum number of iterations. This exception inherits from `ConvergenceException`. See Also: `Diverged`. """ def __init__(self, result: SolveInfo): ConvergenceException.__init__(self, result)
Ancestors
- phiml.math._optimize.ConvergenceException
- builtins.RuntimeError
- builtins.Exception
- builtins.BaseException
class Shape
-
Shapes enumerate dimensions, each consisting of a name, size and type.
There are five types of dimensions:
batch()
,dual()
,spatial()
,channel()
, andinstance()
.To construct a
Shape
, usebatch()
,dual()
,spatial()
,channel()
orinstance()
, depending on the desired dimension type. To create a shape with multiple types, usemerge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.The
__init__
constructor is for internal use only.Expand source code
class Shape: """ Shapes enumerate dimensions, each consisting of a name, size and type. There are five types of dimensions: `batch`, `dual`, `spatial`, `channel`, and `instance`. """ def __init__(self, sizes: tuple, names: tuple, types: tuple, item_names: tuple): """ To construct a `Shape`, use `batch`, `dual`, `spatial`, `channel` or `instance`, depending on the desired dimension type. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. The `__init__` constructor is for internal use only. """ if len(sizes) > 0 and any(s is not None and not isinstance(s, int) for s in sizes): from ._tensors import Tensor sizes = tuple([s if isinstance(s, Tensor) or s is None else int(s) for s in sizes]) # TODO replace this by an assert self.sizes: tuple = sizes """ Ordered dimension sizes as `tuple`. The size of a dimension can be an `int` or a `Tensor` for [non-uniform shapes](https://tum-pbs.github.io/PhiML/Non_Uniform.html). See Also: `Shape.get_size()`, `Shape.size`, `Shape.shape`. """ self.names: Tuple[str, ...] = names """ Ordered dimension names as `tuple[str]`. See Also: `Shape.name`. """ self.types: Tuple[str, ...] = types # undocumented, may be private self.item_names: Tuple[Optional[Tuple[str, ...]], ...] = (None,) * len(sizes) if item_names is None else item_names # undocumented if DEBUG_CHECKS: assert len(sizes) == len(names) == len(types) == len(item_names), f"sizes={sizes}, names={names}, types={types}, item_names={item_names}" assert len(set(names)) == len(names), f"Duplicate dimension names: {names}" assert all(isinstance(n, str) for n in names), f"All names must be of type string but got {names}" assert isinstance(self.item_names, tuple) assert all([items is None or isinstance(items, tuple) for items in self.item_names]) assert all([items is None or all([isinstance(n, str) for n in items]) for items in self.item_names]) from ._tensors import Tensor for name, size in zip(names, sizes): if isinstance(size, Tensor): assert size.rank > 0 for name, size, item_names in zip(self.names, self.sizes, self.item_names): if item_names is not None: try: int(size) except Exception: raise AssertionError(f"When item names are present, the size must be an integer type") assert len(item_names) == size, f"Number of item names ({len(item_names)}) does not match size {size}" for item_name in item_names: assert item_name, f"Empty item name" assert len(set(item_names)) == len(item_names), f"Duplicate item names in shape {self} at dim '{name}': {item_names}" for name, type in zip(names, types): if type == DUAL_DIM: assert name.startswith('~'), f"Dual dimensions must start with '~' but got '{name}' in {self}" def _check_is_valid_tensor_shape(self): if DEBUG_CHECKS: from ._tensors import Tensor for name, size in zip(self.names, self.sizes): if size is not None and isinstance(size, Tensor): assert size.rank > 0 for dim in size.shape.names: assert dim in self.names, f"Dimension {name} varies along {dim} but {dim} is not part of the Shape {self}" def _to_dict(self, include_sizes=True): result = dict(names=self.names, types=self.types, item_names=self.item_names) if include_sizes: if not all([isinstance(s, int)] for s in self.sizes): raise NotImplementedError() result['sizes'] = self.sizes return result @staticmethod def _from_dict(dict_: dict): names = tuple(dict_['names']) sizes = list(dict_['sizes']) if 'sizes' in dict_ else [None] * len(names) item_names = tuple([None if n is None else tuple(n) for n in dict_['item_names']]) for i, n in enumerate(item_names): if n and sizes[i] is None: sizes[i] = len(n) return Shape(tuple(sizes), names, tuple(dict_['types']), item_names) @property def name_list(self): return list(self.names) @property def _named_sizes(self): return zip(self.names, self.sizes) @property def _dimensions(self): return zip(self.sizes, self.names, self.types, self.item_names) @property def untyped_dict(self): """ Returns: `dict` containing dimension names as keys. The values are either the item names as `tuple` if available, otherwise the size. """ return {name: self.get_item_names(i) or self.get_size(i) for i, name in enumerate(self.names)} def __len__(self): return len(self.sizes) def __contains__(self, item): if isinstance(item, (str, tuple, list)): dims = parse_dim_order(item) return all(dim in self.names for dim in dims) elif isinstance(item, Shape): return all([d in self.names for d in item.names]) else: raise ValueError(item) def isdisjoint(self, other: Union['Shape', tuple, list, str]): """ Shapes are disjoint if all dimension names of one shape do not occur in the other shape. """ other = parse_dim_order(other) return not any(dim in self.names for dim in other) def __iter__(self): return iter(self[i] for i in range(self.rank)) def index(self, dim: Union[str, 'Shape', None]) -> int: """ Finds the index of the dimension within this `Shape`. See Also: `Shape.indices()`. Args: dim: Dimension name or single-dimension `Shape`. Returns: Index as `int`. """ if dim is None: return None elif isinstance(dim, str): if dim not in self.names: raise ValueError(f"Shape {self} has no dimension '{dim}'") return self.names.index(dim) elif isinstance(dim, Shape): assert dim.rank == 1, f"index() requires a single dimension as input but got {dim}. Use indices() for multiple dimensions." return self.names.index(dim.name) else: raise ValueError(f"index() requires a single dimension as input but got {dim}") def indices(self, dims: Union[tuple, list, 'Shape']) -> Tuple[int]: """ Finds the indices of the given dimensions within this `Shape`. See Also: `Shape.index()`. Args: dims: Sequence of dimensions as `tuple`, `list` or `Shape`. Returns: Indices as `tuple[int]`. """ if isinstance(dims, (list, tuple, set)): return tuple([self.index(n) for n in dims if n in self.names]) elif isinstance(dims, Shape): return tuple([self.index(n) for n in dims.names if n in self.names]) else: raise ValueError(f"indices() requires a sequence of dimensions but got {dims}") def get_size(self, dim: Union[str, 'Shape', int], default=None): """ See Also: `Shape.get_sizes()`, `Shape.size` Args: dim: Dimension, either as name `str` or single-dimension `Shape` or index `int`. default: (Optional) If the dim does not exist, return this value instead of raising an error. Returns: Size associated with `dim` as `int` or `Tensor`. """ if isinstance(dim, int): assert default is None, "Cannot use a default value when passing an int for dim" return self.sizes[dim] if isinstance(dim, Shape): assert dim.rank == 1, f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes." dim = dim.name if isinstance(dim, str): if dim not in self.names: if default is None: raise KeyError(f"get_size() failed because '{dim}' is not part of Shape {self} and no default value was provided") else: return default return self.sizes[self.names.index(dim)] else: raise ValueError(f"get_size() requires a single dim name but got {dim}. Use indices() to get multiple sizes.") def get_sizes(self, dims: Union[tuple, list, 'Shape']) -> tuple: """ See Also: `Shape.get_size()` Args: dims: Dimensions as `tuple`, `list` or `Shape`. Returns: `tuple` """ assert isinstance(dims, (tuple, list, Shape)), f"get_sizes() requires a sequence of dimensions but got {dims}" return tuple([self.get_size(dim) for dim in dims]) def get_type(self, dim: Union[str, 'Shape']) -> str: # undocumented, use get_dim_type() instead. if isinstance(dim, str): return self.types[self.names.index(dim)] elif isinstance(dim, Shape): assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}" return self.types[self.names.index(dim.name)] else: raise ValueError(dim) def get_dim_type(self, dim: Union[str, 'Shape']) -> Callable: """ Args: dim: Dimension, either as name `str` or single-dimension `Shape`. Returns: Dimension type, one of `batch`, `spatial`, `instance`, `channel`. """ return DIM_FUNCTIONS[self.get_type(dim)] def get_types(self, dims: Union[tuple, list, 'Shape']) -> tuple: # undocumented, do not use if isinstance(dims, (tuple, list)): return tuple(self.get_type(n) for n in dims) elif isinstance(dims, Shape): return tuple(self.get_type(n) for n in dims.names) else: raise ValueError(dims) def get_item_names(self, dim: Union[str, 'Shape', int], fallback_spatial=False) -> Union[tuple, None]: """ Args: fallback_spatial: If `True` and no item names are defined for `dim` and `dim` is a channel dimension, the spatial dimension names are interpreted as item names along `dim` in the order they are listed in this `Shape`. dim: Dimension, either as `int` index, `str` name or single-dimension `Shape`. Returns: Item names as `tuple` or `None` if not defined. """ if isinstance(dim, int): result = self.item_names[dim] elif isinstance(dim, str): result = self.item_names[self.index(dim)] elif isinstance(dim, Shape): assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}" result = self.item_names[self.names.index(dim.name)] else: raise ValueError(dim) if result is not None: return result elif fallback_spatial and self.spatial_rank == self.get_size(dim) and self.get_type(dim) == CHANNEL_DIM: return self.spatial.names else: return None def flipped(self, dims: Union[List[str], Tuple[str]]): item_names = list(self.item_names) for dim in dims: if dim in self.names: dim_i_n = self.get_item_names(dim) if dim_i_n is not None: item_names[self.index(dim)] = tuple(reversed(dim_i_n)) return Shape(self.sizes, self.names, self.types, tuple(item_names)) def __getitem__(self, selection): if isinstance(selection, int): return Shape((self.sizes[selection],), (self.names[selection],), (self.types[selection],), (self.item_names[selection],)) elif isinstance(selection, slice): return Shape(self.sizes[selection], self.names[selection], self.types[selection], self.item_names[selection]) elif isinstance(selection, str): if ',' in selection: selection = [self.index(s.strip()) for s in selection.split(',')] else: selection = self.index(selection) return self[selection] elif isinstance(selection, Shape): selection = selection.names if isinstance(selection, (tuple, list)): selection = [self.index(s) if isinstance(s, str) else s for s in selection] return Shape(tuple([self.sizes[i] for i in selection]), tuple([self.names[i] for i in selection]), tuple([self.types[i] for i in selection]), tuple([self.item_names[i] for i in selection])) raise AssertionError("Can only access shape elements as shape[int], shape[str], shape[slice], shape[Sequence] or shape[Shape]") @property def reversed(self): return Shape(tuple(reversed(self.sizes)), tuple(reversed(self.names)), tuple(reversed(self.types)), tuple(reversed(self.item_names))) @property def batch(self) -> 'Shape': """ Filters this shape, returning only the batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == BATCH_DIM]] @property def non_batch(self) -> 'Shape': """ Filters this shape, returning only the non-batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != BATCH_DIM]] @property def spatial(self) -> 'Shape': """ Filters this shape, returning only the spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == SPATIAL_DIM]] @property def non_spatial(self) -> 'Shape': """ Filters this shape, returning only the non-spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != SPATIAL_DIM]] @property def instance(self) -> 'Shape': """ Filters this shape, returning only the instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == INSTANCE_DIM]] @property def non_instance(self) -> 'Shape': """ Filters this shape, returning only the non-instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != INSTANCE_DIM]] @property def channel(self) -> 'Shape': """ Filters this shape, returning only the channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == CHANNEL_DIM]] @property def non_channel(self) -> 'Shape': """ Filters this shape, returning only the non-channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != CHANNEL_DIM]] @property def dual(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == DUAL_DIM]] @property def non_dual(self) -> 'Shape': """ Filters this shape, returning only the non-dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != DUAL_DIM]] @property def primal(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t not in [DUAL_DIM, BATCH_DIM]]] @property def non_primal(self) -> 'Shape': """ Filters this shape, returning only batch and dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t in [DUAL_DIM, BATCH_DIM]]] @property def transposed(self): if self.channel_rank > 0: replacement = {DUAL_DIM: CHANNEL_DIM, CHANNEL_DIM: DUAL_DIM} elif self.instance_rank > 0: replacement = {DUAL_DIM: INSTANCE_DIM, INSTANCE_DIM: DUAL_DIM} elif self.spatial_rank > 0: replacement = {DUAL_DIM: SPATIAL_DIM, SPATIAL_DIM: DUAL_DIM} elif self.dual_rank > 0: warnings.warn(f"Transposing {self} is ill-defined because there are not primal dims. Replacing dual dims by channel dims.", SyntaxWarning) replacement = {DUAL_DIM: CHANNEL_DIM} else: raise ValueError(f"Cannot transpose shape {self} as it has no channel or instance or spatial dims.") return self._with_types(tuple([replacement.get(t, t) for t in self.types])) def transpose(self, dims: DimFilter): if callable(dims) and dims in TYPE_BY_FUNCTION: dims = TYPE_BY_FUNCTION[dims] replacement = {DUAL_DIM: dims, dims: DUAL_DIM} return self._with_types(tuple([replacement.get(t, t) for t in self.types])) dims = self.only(dims) return self.replace(dims, dims.transposed) @property def non_singleton(self) -> 'Shape': """ Filters this shape, returning only non-singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ return self[[i for i, s in enumerate(self.sizes) if not _size_equal(s, 1)]] @property def singleton(self) -> 'Shape': """ Filters this shape, returning only singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ return self[[i for i, s in enumerate(self.sizes) if _size_equal(s, 1)]] def assert_all_sizes_defined(self): """ Filters this shape, returning only singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ for n, s in zip(self.names, self.sizes): assert s is not None, f"All sizes must be defined but dim '{n}' is undefined in shape {self}" def as_channel(self): """Returns a copy of this `Shape` with all dimensions of type *channel*.""" return channel(**self.untyped_dict) def as_batch(self): """Returns a copy of this `Shape` with all dimensions of type *batch*.""" return batch(**self.untyped_dict) def as_spatial(self): """Returns a copy of this `Shape` with all dimensions of type *spatial*.""" return spatial(**self.untyped_dict) def as_instance(self): """Returns a copy of this `Shape` with all dimensions of type *instance*.""" return instance(**self.untyped_dict) def as_dual(self): """Returns a copy of this `Shape` with all dimensions of type *dual*.""" return dual(**self.untyped_dict) def as_type(self, new_type: Callable): """Returns a copy of this `Shape` with all dimensions of the given type, either `batch`, `dual`, `spatial`, `instance`, or `channel` .""" return new_type(**self.untyped_dict) def _more_dual(self): return Shape(self.sizes, tuple('~' + n for n in self.names), (DUAL_DIM,) * len(self.names), self.item_names) def _less_dual(self, default_type='unknown_primal'): names = tuple(n[1:] if n.startswith('~') else n for n in self.names) types = [t if t != DUAL_DIM else (DUAL_DIM if n.startswith('~~') else default_type) for n, t in zip(self.names, self.types)] return Shape(self.sizes, names, tuple(types), self.item_names) def unstack(self, dim='dims') -> Tuple['Shape']: """ Slices this `Shape` along a dimension. The dimension listing the sizes of the shape is referred to as `'dims'`. Non-uniform tensor shapes may be unstacked along other dimensions as well, see https://tum-pbs.github.io/PhiML/Non_Uniform.html Args: dim: dimension to unstack Returns: slices of this shape """ if dim == 'dims': return tuple(Shape((self.sizes[i],), (self.names[i],), (self.types[i],), (self.item_names[i],)) for i in range(self.rank)) if dim not in self and self.is_uniform: return tuple([self]) from ._tensors import Tensor if dim in self: inner = self.without(dim) dim_size = self.get_size(dim) else: inner = self dim_size = self.shape.get_size(dim) sizes = [] for size in inner.sizes: if isinstance(size, Tensor) and dim in size.shape: sizes.append(size._unstack(dim)) dim_size = size.shape.get_size(dim) else: sizes.append(size) assert isinstance(dim_size, int) shapes = tuple(Shape(tuple([int(size[i]) if isinstance(size, tuple) else size for size in sizes]), inner.names, inner.types, inner.item_names) for i in range(dim_size)) return shapes @property def name(self) -> str: """ Only for Shapes containing exactly one single dimension. Returns the name of the dimension. See Also: `Shape.names`. """ assert self.rank == 1, f"Shape.name is only defined for shapes of rank 1. shape={self}" return self.names[0] @property def size(self): """ Only for Shapes containing exactly one single dimension. Returns the size of the dimension. See Also: `Shape.sizes`, `Shape.get_size()`. """ assert self.rank == 1, f"Shape.size is only defined for shapes of rank 1 but has dims {self}" return self.sizes[0] @property def type(self) -> str: """ Only for Shapes containing exactly one single dimension. Returns the type of the dimension. See Also: `Shape.get_type()`. """ assert self.rank == 1, "Shape.type is only defined for shapes of rank 1." return self.types[0] @property def dim_type(self): types = set(self.types) assert len(types) == 1, f"Shape contains multiple types: {self}" return DIM_FUNCTIONS[next(iter(types))] def __int__(self): assert self.rank == 1, "int(Shape) is only defined for shapes of rank 1." return self.sizes[0] def mask(self, names: Union[tuple, list, set, 'Shape']): """ Returns a binary sequence corresponding to the names of this Shape. A value of 1 means that a dimension of this Shape is contained in `names`. Args: names: instance of dimension names: tuple or list or set: Returns: binary sequence """ if isinstance(names, str): names = [names] elif isinstance(names, Shape): names = names.names mask = [1 if name in names else 0 for name in self.names] return tuple(mask) def __repr__(self): def size_repr(size, items): if items is not None: items_str = ",".join(items) return items_str if len(items_str) <= 12 else f"{size}:{items[0][:5]}..." return size strings = [f"{name}{SUPERSCRIPT.get(dim_type, '?')}={size_repr(size, items)}" for size, name, dim_type, items in self._dimensions] return '(' + ', '.join(strings) + ')' def __eq__(self, other): if not isinstance(other, Shape): return False if self.names != other.names or self.types != other.types: return False for size1, size2 in zip(self.sizes, other.sizes): equal = size1 == size2 assert isinstance(equal, (bool, math.Tensor)) if isinstance(equal, math.Tensor): equal = equal.all if not equal: return False for names1, names2 in zip(self.item_names, other.item_names): if names1 != names2: return False return True def __ne__(self, other): return not self == other def __bool__(self): return self.rank > 0 def _reorder(self, names: Union[tuple, list, 'Shape']) -> 'Shape': assert len(names) == self.rank if isinstance(names, Shape): names = names.names order = [self.index(n) for n in names] return self[order] def _order_group(self, names: Union[tuple, list, 'Shape']) -> list: """ Reorders the dimensions of this `Shape` so that `names` are clustered together and occur in the specified order. """ if isinstance(names, Shape): names = names.names result = [] for dim in self.names: if dim not in result: if dim in names: result.extend(names) else: result.append(dim) return result def __and__(self, other): if other is dual: return concat_shapes(self, self.primal.as_dual()) return merge_shapes(self, other) def __rand__(self, other): if other is dual: return concat_shapes(self.primal.as_dual(), self) return merge_shapes(self, other) def _expand(self, dim: 'Shape', pos=None) -> 'Shape': """**Deprecated.** Use `phiml.math.merge_shapes()` or `phiml.math.concat_shapes()` instead. """ if not dim: return self assert dim.name not in self, f"Cannot expand shape {self} by {dim} because dimension already exists." assert isinstance(dim, Shape) and dim.rank == 1, f"Shape.expand() requires a single dimension as a Shape but got {dim}" if pos is None: same_type_dims = self[[i for i, t in enumerate(self.types) if t == dim.type]] if len(same_type_dims) > 0: pos = self.index(same_type_dims.names[0]) else: pos = { BATCH_DIM: 0, DUAL_DIM: self.batch_rank, INSTANCE_DIM: self.batch_rank + self.dual_rank, SPATIAL_DIM: self.batch.rank + self.dual_rank + self.instance_rank, CHANNEL_DIM: self.rank + 1 }[dim.type] elif pos < 0: pos += self.rank + 1 sizes = list(self.sizes) names = list(self.names) types = list(self.types) item_names = list(self.item_names) sizes.insert(pos, dim.size) names.insert(pos, dim.name) types.insert(pos, dim.type) item_names.insert(pos, dim.item_names[0]) return Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names)) def without(self, dims: 'DimFilter') -> 'Shape': """ Builds a new shape from this one that is missing all given dimensions. Dimensions in `dims` that are not part of this Shape are ignored. The complementary operation is `Shape.only()`. Args: dims: Single dimension (str) or instance of dimensions (tuple, list, Shape) dims: Dimensions to exclude as `str` or `tuple` or `list` or `Shape`. Dimensions that are not included in this shape are ignored. Returns: Shape without specified dimensions """ if dims is None: # subtract none return self elif callable(dims): dims = dims(self) if isinstance(dims, str): return self[[i for i in range(self.rank) if self.names[i] not in parse_dim_order(dims)]] elif isinstance(dims, Shape): return self[[i for i in range(self.rank) if self.names[i] not in dims.names]] if isinstance(dims, (tuple, list, set)) and all([isinstance(d, str) for d in dims]): return self[[i for i in range(self.rank) if self.names[i] not in dims]] elif isinstance(dims, (tuple, list, set)): result = self for wo in dims: result = result.without(wo) return result else: raise ValueError(dims) def only(self, dims: 'DimFilter', reorder=False): """ Builds a new shape from this one that only contains the given dimensions. Dimensions in `dims` that are not part of this Shape are ignored. The complementary operation is :func:`Shape.without`. Args: dims: comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function. reorder: If `False`, keeps the dimension order as defined in this shape. If `True`, reorders the dimensions of this shape to match the order of `dims`. Returns: Shape containing only specified dimensions """ if dims is None: # keep none return EMPTY_SHAPE if callable(dims): dims = dims(self) if isinstance(dims, str): dims = parse_dim_order(dims) if isinstance(dims, Shape): dims = dims.names if isinstance(dims, (tuple, list, set)): if all(isinstance(d, int) for d in dims): if not reorder: dims = tuple(sorted(dims)) return self[dims] dim_names = [] for d in dims: if callable(d): d = d(self) if isinstance(d, str): dim_names.append(d) elif isinstance(d, Shape): dim_names.extend(d.names) else: raise ValueError(f"Format not understood for Shape.only(): {dims}") if reorder: dim_names = [d.name if isinstance(d, Shape) else d for d in dim_names] assert all(isinstance(d, str) for d in dim_names) return self[[self.names.index(d) for d in dim_names if d in self.names]] else: dim_names = [d.name if isinstance(d, Shape) else d for d in dim_names] assert all(isinstance(d, str) for d in dim_names) return self[[i for i in range(self.rank) if self.names[i] in dim_names]] raise ValueError(dims) def is_compatible(self, *others: 'Shape'): """ Checks if this shape and the others can be broadcast. Args: others: Other shapes. Returns: `True` only if all shapes are compatible. """ try: merge_shapes(self, *others) return True except IncompatibleShapes: return False @property def rank(self) -> int: """ Returns the number of dimensions. Equal to `len(shape)`. See `Shape.is_empty`, `Shape.batch_rank`, `Shape.spatial_rank`, `Shape.channel_rank`. """ return len(self.sizes) @property def batch_rank(self) -> int: """ Number of batch dimensions """ return sum([1 for ty in self.types if ty == BATCH_DIM]) @property def instance_rank(self) -> int: return sum([1 for ty in self.types if ty == INSTANCE_DIM]) @property def spatial_rank(self) -> int: """ Number of spatial dimensions """ return sum([1 for ty in self.types if ty == SPATIAL_DIM]) @property def dual_rank(self) -> int: """ Number of spatial dimensions """ return sum([1 for ty in self.types if ty == DUAL_DIM]) @property def channel_rank(self) -> int: """ Number of channel dimensions """ return sum([1 for ty in self.types if ty == CHANNEL_DIM]) @property def well_defined(self): """ Returns `True` if no dimension size is `None`. Shapes with undefined sizes may be used in `phiml.math.tensor()`, `phiml.math.wrap()`, `phiml.math.stack()` or `phiml.math.concat()`. To create an undefined size, call a constructor function (`batch()`, `spatial()`, `channel()`, `instance()`) with positional `str` arguments, e.g. `spatial('x')`. """ for size in self.sizes: if size is None: return False return True @property def defined(self): return self[[i for i, size in enumerate(self.sizes) if size is not None]] @property def undefined(self): return self[[i for i, size in enumerate(self.sizes) if size is None]] @property def shape(self) -> 'Shape': """ Higher-order `Shape`. The returned shape will always contain the channel dimension `dims` with a size equal to the `Shape.rank` of this shape. For uniform shapes, `Shape.shape` will only contain the dimension `dims` but the shapes of [non-uniform shapes](https://tum-pbs.github.io/PhiML/Non_Uniform.html) may contain additional dimensions. See Also: `Shape.is_uniform`. Returns: `Shape`. """ from . import Tensor shape = Shape((self.rank,), ('dims',), (CHANNEL_DIM,), (self.names,)) for size in self.sizes: if isinstance(size, Tensor): shape = shape & size.shape return shape @property def is_uniform(self) -> bool: """ A shape is uniform if it all sizes have a single integer value. See Also: `Shape.is_non_uniform`, `Shape.shape`. """ from ._tensors import Tensor return all(not isinstance(s, Tensor) for s in self.sizes) @property def is_non_uniform(self) -> bool: """ A shape is non-uniform if the size of any dimension varies along another dimension. See Also: `Shape.is_uniform`, `Shape.shape`. """ return not self.is_uniform @property def non_uniform(self) -> 'Shape': """ Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension. See Also `Shape.non_uniform_shape` """ from . import Tensor indices = [i for i, size in enumerate(self.sizes) if isinstance(size, Tensor) and size.rank > 0] return self[indices] @property def non_uniform_shape(self): """ Returns the stack dimensions of non-uniform shapes. This is equal to `Shape.shape` excluding the `dims` dimension. For example, when stacking `(x=3)` and `(x=2)` along `vector`, the resulting shape is non_uniform. Its `non_uniform_shape` is `vector` and its `non_uniform` dimension is `x`. See Also `Shape.non_uniform`. """ from . import Tensor shape = EMPTY_SHAPE for size in self.sizes: if isinstance(size, Tensor): shape = shape & size.shape return shape def with_size(self, size: Union[int, Sequence[str]]): """ Only for single-dimension shapes. Returns a `Shape` representing this dimension but with a different size. See Also: `Shape.with_sizes()`. Args: size: Replacement size for this dimension. Returns: `Shape` """ assert self.rank == 1, "Shape.with_size() is only defined for shapes of rank 1." return self.with_sizes([size]) def with_sizes(self, sizes: Union[Sequence[int], Sequence[Tuple[str, ...]], 'Shape', int], keep_item_names=True): """ Returns a new `Shape` matching the dimension names and types of `self` but with different sizes. See Also: `Shape.with_size()`. Args: sizes: One of * `tuple` / `list` of same length as `self` containing replacement sizes or replacement item names. * `Shape` of any rank. Replaces sizes for dimensions shared by `sizes` and `self`. * `int`: new size for all dimensions keep_item_names: If `False`, forgets all item names. If `True`, keeps item names where the size does not change. Returns: `Shape` with same names and types as `self`. """ if isinstance(sizes, (int, str)): sizes = [sizes] * len(self.sizes) if isinstance(sizes, Shape): item_names = [sizes.get_item_names(dim) if dim in sizes else self.get_item_names(dim) for dim in self.names] sizes = [sizes.get_size(dim) if dim in sizes else s for dim, s in self._named_sizes] return Shape(tuple(sizes), self.names, self.types, tuple(item_names)) else: assert len(sizes) == len(self.sizes), f"Failed to set sizes of Shape {self} to {sizes} because rank does not match." sizes_ = [] item_names = [] for i, obj in enumerate(sizes): new_size, new_item_names = Shape._size_and_item_names_from_obj(obj, self.sizes[i], self.item_names[i], keep_item_names) sizes_.append(new_size) item_names.append(new_item_names) return Shape(tuple(sizes_), self.names, self.types, tuple(item_names)) @staticmethod def _size_and_item_names_from_obj(obj, prev_size, prev_item_names, keep_item_names=True): if isinstance(obj, str): obj = [s.strip() for s in obj.split(',')] if isinstance(obj, (tuple, list)): return len(obj), tuple(obj) elif isinstance(obj, Number): return obj, prev_item_names if keep_item_names and (prev_size is None or _size_equal(obj, prev_size)) else None elif isinstance(obj, math.Tensor) or obj is None: return obj, None elif isinstance(obj, Shape): return obj.rank, obj.names else: raise ValueError(f"sizes can only contain int, str or Tensor but got {type(obj)}") def without_sizes(self): """ Returns: `Shape` with all sizes undefined (`None`) """ return Shape((None,) * self.rank, self.names, self.types, (None,) * self.rank) def _replace_single_size(self, dim: str, size: int, keep_item_names: bool = False): new_sizes = list(self.sizes) new_sizes[self.index(dim)] = size return self.with_sizes(new_sizes, keep_item_names=keep_item_names) def with_dim_size(self, dim: Union[str, 'Shape'], size: Union[int, 'math.Tensor', str, tuple, list], keep_item_names=True): """ Returns a new `Shape` that has a different size for `dim`. Args: dim: Dimension for which to replace the size, `Shape` or `str`. size: New size, `int` or `Tensor` Returns: `Shape` with same names and types as `self`. """ if isinstance(dim, Shape): dim = dim.name assert isinstance(dim, str) new_size, new_item_names = Shape._size_and_item_names_from_obj(size, self.get_size(dim), self.get_item_names(dim), keep_item_names) return self.replace(dim, Shape((new_size,), (dim,), (self.get_type(dim),), (new_item_names,)), keep_item_names=keep_item_names) def _with_names(self, names: Union[str, tuple, list]): if isinstance(names, str): names = parse_dim_names(names, self.rank) names = [n if n is not None else o for n, o in zip(names, self.names)] return Shape(self.sizes, tuple(names), self.types, self.item_names) def _replace_names_and_types(self, dims: Union['Shape', str, tuple, list], new: Union['Shape', str, tuple, list]) -> 'Shape': """ Returns a copy of `self` with `dims` replaced by `new`. Dimensions that are not present in `self` are ignored. The dimension order is preserved. Args: dims: Dimensions to replace. new: New dimensions, must have same length as `dims`. If a `Shape` is given, replaces the dimension types and item names as well. Returns: `Shape` with same rank and dimension order as `self`. """ dims = parse_dim_order(dims) sizes = [math.rename_dims(s, dims, new) if isinstance(s, math.Tensor) else s for s in self.sizes] new = parse_dim_order(new) if isinstance(new, str) else new names = list(self.names) types = list(self.types) item_names = list(self.item_names) for old_name, new_dim in zip(dims, new): if old_name in self: if isinstance(new_dim, Shape): names[self.index(old_name)] = new_dim.name types[self.index(old_name)] = new_dim.type item_names[self.index(old_name)] = new_dim.item_names[0] else: names[self.index(old_name)] = _apply_prefix(new_dim, types[self.index(old_name)]) return Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names)) def replace(self, dims: Union['Shape', str, tuple, list], new: 'Shape', keep_item_names=True, replace_item_names: DimFilter = None) -> 'Shape': """ Returns a copy of `self` with `dims` replaced by `new`. Dimensions that are not present in `self` are ignored. The dimension order is preserved. Args: dims: Dimensions to replace. new: New dimensions, must have same length as `dims`. If a `Shape` is given, replaces the dimension types and item names as well. keep_item_names: Keeps existing item names for dimensions where `new` does not specify item names if the new dimension has the same size. replace_item_names: For which dims the item names should be replaced as well. Returns: `Shape` with same rank and dimension order as `self`. """ dims = parse_dim_order(dims) assert isinstance(new, Shape), f"new must be a Shape but got {new}" names = list(self.names) sizes = list(self.sizes) types = list(self.types) item_names = list(self.item_names) for i in self.indices(self.only(replace_item_names)): if item_names[i]: if len(new) > len(dims): raise NotImplementedError else: name_map = {d: n for d, n in zip(dims, new.names)} item_names[i] = tuple([name_map.get(n, n) for n in item_names[i]]) if len(new) > len(dims): # Put all in one spot assert len(dims) == 1, "Cannot replace 2+ dims by more replacements" index = self.index(dims[0]) return concat_shapes(self[:index], new, self[index+1:]) for old_name, new_dim in zip(dims, new): if old_name in self: names[self.index(old_name)] = new_dim.name types[self.index(old_name)] = new_dim.type if new_dim.item_names[0]: item_names[self.index(old_name)] = new_dim.item_names[0] elif not _size_equal(new_dim.size, self.get_size(old_name)) or not keep_item_names: item_names[self.index(old_name)] = None # forget previous item names sizes[self.index(old_name)] = new_dim.size replaced = Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names)) if len(new) == len(dims): return replaced to_remove = dims[-(len(dims) - len(new)):] return replaced.without(to_remove) def _with_types(self, types: Union['Shape', str, Tuple[str, ...], List[str]]): """ Only for internal use. Note: This method does not rename dimensions to comply with type requirements (e.g. ~ for dual dims). """ if isinstance(types, Shape): types = tuple([types.get_type(name) if name in types else self_type for name, self_type in zip(self.names, self.types)]) elif isinstance(types, str): types = (types,) * self.rank elif isinstance(types, (tuple, list)): types = tuple(types) else: raise ValueError(types) names = tuple([_apply_prefix(name, t) for name, t in zip(self.names, types)]) return Shape(self.sizes, names, types, self.item_names) def _with_item_names(self, item_names: tuple): return Shape(self.sizes, self.names, self.types, item_names) def _with_item_name(self, dim: str, item_name: tuple): if dim not in self: return self item_names = list(self.item_names) item_names[self.index(dim)] = item_name return Shape(self.sizes, self.names, self.types, tuple(item_names)) def _perm(self, names: Tuple[str]) -> List[int]: assert len(set(names)) == len(names), f"No duplicates allowed but got {names}" assert len(names) >= len(self.names), f"Cannot find permutation for {self} given {names} because names {set(self.names) - set(names)} are missing" assert len(names) <= len(self.names), f"Cannot find permutation for {self} given {names} because too many names were passed: {names}" perm = [self.names.index(name) for name in names] return perm @property def volume(self) -> Union[int, None]: """ Returns the total number of values contained in a tensor of this shape. This is the product of all dimension sizes. Returns: volume as `int` or `Tensor` or `None` if the shape is not `Shape.well_defined` """ result = 1 for size in self.sizes: if size is None: return None result *= size from ._tensors import Tensor if not isinstance(result, Tensor): return result result /= self.non_uniform_shape.volume # We summed up the items -> undo multiplication return int(result.sum) @property def is_empty(self) -> bool: """ True if this shape has no dimensions. Equivalent to `Shape.rank` `== 0`. """ return len(self.sizes) == 0 def after_pad(self, widths: dict) -> 'Shape': sizes = list(self.sizes) item_names = list(self.item_names) for dim, (lo, up) in widths.items(): if dim in self.names: sizes[self.index(dim)] += lo + up item_names[self.index(dim)] = None return Shape(tuple(sizes), self.names, self.types, tuple(item_names)) def prepare_gather(self, dim: str, selection: Union[slice, int, 'Shape', str, tuple, list]) -> Union[slice, List[int]]: """ Parse a slice object for a specific dimension. Args: dim: Name of dimension to slice. selection: Slice object. Returns: """ if isinstance(selection, Shape): selection = selection.name if selection.rank == 1 else selection.names if isinstance(selection, str) and ',' in selection: selection = parse_dim_order(selection) if isinstance(selection, str): # single item name item_names = self.get_item_names(dim, fallback_spatial=True) assert item_names is not None, f"No item names defined for dim '{dim}' in tensor {self.shape} and dimension size does not match spatial rank." assert selection in item_names, f"Accessing tensor.{dim}['{selection}'] failed. Item names are {item_names}." selection = item_names.index(selection) if isinstance(selection, (tuple, list)): selection = list(selection) if any([isinstance(s, str) for s in selection]): item_names = self.get_item_names(dim, fallback_spatial=True) for i, s in enumerate(selection): if isinstance(s, str): assert item_names is not None, f"Accessing tensor.{dim}['{s}'] failed because no item names are present on tensor {self.shape}" assert s in item_names, f"Accessing tensor.{dim}['{s}'] failed. Item names are {item_names}." selection[i] = item_names.index(s) if not selection: # empty selection = slice(0, 0) return selection def prepare_renaming_gather(self, dim: str, selection: Union[slice, int, 'Shape', str, tuple, list]): if isinstance(selection, str) and '->' in selection: selection, new_names = selection.split('->') if new_names == '?': return self.prepare_gather(dim, selection), self[dim]._with_item_names((None,)) else: return self.prepare_gather(dim, selection), self[dim].with_size(new_names) else: return self.prepare_gather(dim, selection), None def resolve_index(self, index: Dict[str, Union[slice, int, 'Shape', str, tuple, list]]) -> Dict[str, Union[slice, int, tuple, list]]: """ Replaces item names by the corresponding indices. Args: index: n-dimensional index or slice. Returns: Same index but without any reference to item names. """ return {dim: self.prepare_gather(dim, s) for dim, s in index.items()} def after_gather(self, selection: dict) -> 'Shape': from . import Tensor if self.is_non_uniform: sizes = [(s[selection] if isinstance(s, Tensor) else s) for s in self.sizes] sizes = [(int(s) if isinstance(s, Tensor) and s.rank == 0 else s) for s in sizes] result = self.with_sizes(sizes) else: result = self for sel_dim, sel in selection.items(): if sel_dim not in self.names: continue sel = self.prepare_gather(sel_dim, sel) if isinstance(sel, int): result = result.without(sel_dim) elif isinstance(sel, slice): step = int(sel.step) if sel.step is not None else 1 start = int(sel.start) if sel.start is not None else (0 if step > 0 else self.get_size(sel_dim)-1) stop = int(sel.stop) if sel.stop is not None else (self.get_size(sel_dim) if step > 0 else -1) if stop < 0 and step > 0: stop += self.get_size(sel_dim) assert stop >= 0 if start < 0 and step > 0: start += self.get_size(sel_dim) assert start >= 0 stop = min(stop, self.get_size(sel_dim)) new_size = math.to_int64(math.ceil(math.wrap((stop - start) / step))) if new_size.rank == 0: new_size = int(new_size) # NumPy array not allowed because not hashable result = result._replace_single_size(sel_dim, new_size, keep_item_names=True) if step < 0: result = result.flipped([sel_dim]) if self.get_item_names(sel_dim) is not None: result = result._with_item_name(sel_dim, tuple(self.get_item_names(sel_dim)[sel])) elif isinstance(sel, (tuple, list)): result = result._replace_single_size(sel_dim, len(sel)) if self.get_item_names(sel_dim) is not None: result = result._with_item_name(sel_dim, tuple([self.get_item_names(sel_dim)[i] for i in sel])) elif isinstance(sel, Tensor): if sel.dtype.kind == bool: raise NotImplementedError("Shape.after_gather(Tensor[bool]) not yet implemented") # from ._ops import nonzero # sel = nonzero(sel) if sel.dtype.kind == int: assert len(selection) == 1, f"When slicing a Shape with Tensor[int], only one sel item is allowed but got {sel}" sel_shape = shape(sel) assert sel_shape.channel_rank == 1 and sel_shape.channel.item_names[0], f"Shape.after_gather(Tensor[int]) requires indices to have a single channel dim with item names but got {sel}" indexed = sel_shape.channel.item_names[0] assert indexed in self, f"All indexed dims {indexed} must be part of sliced Shape {self}" from ._ops import slice_ sizes = [slice_(s, sel) for s in self.sizes] return self.with_sizes(sizes).without(indexed) & sel_shape.non_channel else: raise NotImplementedError(f"{type(sel)} not supported. Only (int, slice) allowed.") return result def meshgrid(self, names=False): """ Builds a sequence containing all multi-indices within a tensor of this shape. All indices are returned as `dict` mapping dimension names to `int` indices. The corresponding values can be retrieved from Tensors and other Sliceables using `tensor[index]`. This function currently only supports uniform tensors. Args: names: If `True`, replace indices by their item names if available. Returns: `dict` iterator. """ assert self.is_uniform, f"Shape.meshgrid() is currently not supported for non-uniform tensors, {self}" indices = [0] * self.rank while True: if names: yield {dim: (names[index] if names is not None else index) for dim, index, names in zip(self.names, indices, self.item_names)} else: yield {dim: index for dim, index in zip(self.names, indices)} for i in range(self.rank-1, -1, -1): indices[i] = (indices[i] + 1) % self.sizes[i] if indices[i] != 0: break else: return def first_index(self, names=False): return next(iter(self.meshgrid(names=names))) def are_adjacent(self, dims: Union[str, tuple, list, set, 'Shape']): indices = self.indices(dims) return (max(indices) - min(indices)) == len(dims) - 1 def __add__(self, other): if isinstance(other, Shape) and self.isdisjoint(other): return concat_shapes(self, other) return self._op2(other, lambda s, o: s + o, 0) def __radd__(self, other): return self._op2(other, lambda s, o: o + s, 0) def __sub__(self, other): if isinstance(other, (str, Shape, tuple, list, set)) or callable(other): return self.without(other) return self._op2(other, lambda s, o: s - o, 0) def __rsub__(self, other): return self._op2(other, lambda s, o: o - s, 0) def __mul__(self, other): return self._op2(other, lambda s, o: s * o, 1) def __rmul__(self, other): return self._op2(other, lambda s, o: o * s, 1) def _op2(self, other, fun, default: int): if isinstance(other, int): return Shape(tuple([fun(s, other) for s in self.sizes]), self.names, self.types, (None,) * self.rank) else: return NotImplemented def __hash__(self): return hash(self.names) @staticmethod def __stack__(values, dim: 'Shape', **kwargs): return shape_stack(dim, *values)
Instance variables
prop batch : Shape
-
Filters this shape, returning only the batch dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def batch(self) -> 'Shape': """ Filters this shape, returning only the batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == BATCH_DIM]]
prop batch_rank : int
-
Number of batch dimensions
Expand source code
@property def batch_rank(self) -> int: """ Number of batch dimensions """ return sum([1 for ty in self.types if ty == BATCH_DIM])
prop channel : Shape
-
Filters this shape, returning only the channel dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def channel(self) -> 'Shape': """ Filters this shape, returning only the channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == CHANNEL_DIM]]
prop channel_rank : int
-
Number of channel dimensions
Expand source code
@property def channel_rank(self) -> int: """ Number of channel dimensions """ return sum([1 for ty in self.types if ty == CHANNEL_DIM])
prop defined
-
Expand source code
@property def defined(self): return self[[i for i, size in enumerate(self.sizes) if size is not None]]
prop dim_type
-
Expand source code
@property def dim_type(self): types = set(self.types) assert len(types) == 1, f"Shape contains multiple types: {self}" return DIM_FUNCTIONS[next(iter(types))]
prop dual : Shape
-
Filters this shape, returning only the dual dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def dual(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == DUAL_DIM]]
prop dual_rank : int
-
Number of spatial dimensions
Expand source code
@property def dual_rank(self) -> int: """ Number of spatial dimensions """ return sum([1 for ty in self.types if ty == DUAL_DIM])
prop instance : Shape
-
Filters this shape, returning only the instance dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def instance(self) -> 'Shape': """ Filters this shape, returning only the instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == INSTANCE_DIM]]
prop instance_rank : int
-
Expand source code
@property def instance_rank(self) -> int: return sum([1 for ty in self.types if ty == INSTANCE_DIM])
prop is_empty : bool
-
True if this shape has no dimensions. Equivalent to
Shape.rank
== 0
.Expand source code
@property def is_empty(self) -> bool: """ True if this shape has no dimensions. Equivalent to `Shape.rank` `== 0`. """ return len(self.sizes) == 0
prop is_non_uniform : bool
-
A shape is non-uniform if the size of any dimension varies along another dimension.
See Also:
Shape.is_uniform
,Shape.shape
.Expand source code
@property def is_non_uniform(self) -> bool: """ A shape is non-uniform if the size of any dimension varies along another dimension. See Also: `Shape.is_uniform`, `Shape.shape`. """ return not self.is_uniform
prop is_uniform : bool
-
A shape is uniform if it all sizes have a single integer value.
See Also:
Shape.is_non_uniform
,Shape.shape
.Expand source code
@property def is_uniform(self) -> bool: """ A shape is uniform if it all sizes have a single integer value. See Also: `Shape.is_non_uniform`, `Shape.shape`. """ from ._tensors import Tensor return all(not isinstance(s, Tensor) for s in self.sizes)
prop name : str
-
Only for Shapes containing exactly one single dimension. Returns the name of the dimension.
See Also:
Shape.names
.Expand source code
@property def name(self) -> str: """ Only for Shapes containing exactly one single dimension. Returns the name of the dimension. See Also: `Shape.names`. """ assert self.rank == 1, f"Shape.name is only defined for shapes of rank 1. shape={self}" return self.names[0]
prop name_list
-
Expand source code
@property def name_list(self): return list(self.names)
var names
-
Ordered dimension names as
tuple[str]
.See Also:
Shape.name
. prop non_batch : Shape
-
Filters this shape, returning only the non-batch dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_batch(self) -> 'Shape': """ Filters this shape, returning only the non-batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != BATCH_DIM]]
prop non_channel : Shape
-
Filters this shape, returning only the non-channel dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_channel(self) -> 'Shape': """ Filters this shape, returning only the non-channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != CHANNEL_DIM]]
prop non_dual : Shape
-
Filters this shape, returning only the non-dual dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_dual(self) -> 'Shape': """ Filters this shape, returning only the non-dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != DUAL_DIM]]
prop non_instance : Shape
-
Filters this shape, returning only the non-instance dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_instance(self) -> 'Shape': """ Filters this shape, returning only the non-instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != INSTANCE_DIM]]
prop non_primal : Shape
-
Filters this shape, returning only batch and dual dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_primal(self) -> 'Shape': """ Filters this shape, returning only batch and dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t in [DUAL_DIM, BATCH_DIM]]]
prop non_singleton : Shape
-
Filters this shape, returning only non-singleton dimensions as a new
Shape
object. Dimensions are singleton if their size is exactly1
.Returns
New
Shape
objectExpand source code
@property def non_singleton(self) -> 'Shape': """ Filters this shape, returning only non-singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ return self[[i for i, s in enumerate(self.sizes) if not _size_equal(s, 1)]]
prop non_spatial : Shape
-
Filters this shape, returning only the non-spatial dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_spatial(self) -> 'Shape': """ Filters this shape, returning only the non-spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != SPATIAL_DIM]]
prop non_uniform : Shape
-
Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension.
See Also
Shape.non_uniform_shape
Expand source code
@property def non_uniform(self) -> 'Shape': """ Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension. See Also `Shape.non_uniform_shape` """ from . import Tensor indices = [i for i, size in enumerate(self.sizes) if isinstance(size, Tensor) and size.rank > 0] return self[indices]
prop non_uniform_shape
-
Returns the stack dimensions of non-uniform shapes. This is equal to
Shape.shape
excluding thedims
dimension.For example, when stacking
(x=3)
and(x=2)
alongvector
, the resulting shape is non_uniform. Itsnon_uniform_shape
isvector
and itsnon_uniform
dimension isx
.See Also
Shape.non_uniform
.Expand source code
@property def non_uniform_shape(self): """ Returns the stack dimensions of non-uniform shapes. This is equal to `Shape.shape` excluding the `dims` dimension. For example, when stacking `(x=3)` and `(x=2)` along `vector`, the resulting shape is non_uniform. Its `non_uniform_shape` is `vector` and its `non_uniform` dimension is `x`. See Also `Shape.non_uniform`. """ from . import Tensor shape = EMPTY_SHAPE for size in self.sizes: if isinstance(size, Tensor): shape = shape & size.shape return shape
prop primal : Shape
-
Filters this shape, returning only the dual dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def primal(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t not in [DUAL_DIM, BATCH_DIM]]]
prop rank : int
-
Returns the number of dimensions. Equal to
len(shape())
.See
Shape.is_empty
,Shape.batch_rank
,Shape.spatial_rank
,Shape.channel_rank
.Expand source code
@property def rank(self) -> int: """ Returns the number of dimensions. Equal to `len(shape)`. See `Shape.is_empty`, `Shape.batch_rank`, `Shape.spatial_rank`, `Shape.channel_rank`. """ return len(self.sizes)
prop reversed
-
Expand source code
@property def reversed(self): return Shape(tuple(reversed(self.sizes)), tuple(reversed(self.names)), tuple(reversed(self.types)), tuple(reversed(self.item_names)))
prop shape : Shape
-
Higher-order
Shape
. The returned shape will always contain the channel dimensiondims
with a size equal to theShape.rank
of this shape.For uniform shapes,
Shape.shape
will only contain the dimensiondims
but the shapes of non-uniform shapes may contain additional dimensions.See Also:
Shape.is_uniform
.Returns
Expand source code
@property def shape(self) -> 'Shape': """ Higher-order `Shape`. The returned shape will always contain the channel dimension `dims` with a size equal to the `Shape.rank` of this shape. For uniform shapes, `Shape.shape` will only contain the dimension `dims` but the shapes of [non-uniform shapes](https://tum-pbs.github.io/PhiML/Non_Uniform.html) may contain additional dimensions. See Also: `Shape.is_uniform`. Returns: `Shape`. """ from . import Tensor shape = Shape((self.rank,), ('dims',), (CHANNEL_DIM,), (self.names,)) for size in self.sizes: if isinstance(size, Tensor): shape = shape & size.shape return shape
prop singleton : Shape
-
Filters this shape, returning only singleton dimensions as a new
Shape
object. Dimensions are singleton if their size is exactly1
.Returns
New
Shape
objectExpand source code
@property def singleton(self) -> 'Shape': """ Filters this shape, returning only singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ return self[[i for i, s in enumerate(self.sizes) if _size_equal(s, 1)]]
prop size
-
Only for Shapes containing exactly one single dimension. Returns the size of the dimension.
See Also:
Shape.sizes
,Shape.get_size()
.Expand source code
@property def size(self): """ Only for Shapes containing exactly one single dimension. Returns the size of the dimension. See Also: `Shape.sizes`, `Shape.get_size()`. """ assert self.rank == 1, f"Shape.size is only defined for shapes of rank 1 but has dims {self}" return self.sizes[0]
var sizes
-
Ordered dimension sizes as
tuple
. The size of a dimension can be anint
or aTensor
for non-uniform shapes.See Also:
Shape.get_size()
,Shape.size
,Shape.shape
. prop spatial : Shape
-
Filters this shape, returning only the spatial dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def spatial(self) -> 'Shape': """ Filters this shape, returning only the spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == SPATIAL_DIM]]
prop spatial_rank : int
-
Number of spatial dimensions
Expand source code
@property def spatial_rank(self) -> int: """ Number of spatial dimensions """ return sum([1 for ty in self.types if ty == SPATIAL_DIM])
prop transposed
-
Expand source code
@property def transposed(self): if self.channel_rank > 0: replacement = {DUAL_DIM: CHANNEL_DIM, CHANNEL_DIM: DUAL_DIM} elif self.instance_rank > 0: replacement = {DUAL_DIM: INSTANCE_DIM, INSTANCE_DIM: DUAL_DIM} elif self.spatial_rank > 0: replacement = {DUAL_DIM: SPATIAL_DIM, SPATIAL_DIM: DUAL_DIM} elif self.dual_rank > 0: warnings.warn(f"Transposing {self} is ill-defined because there are not primal dims. Replacing dual dims by channel dims.", SyntaxWarning) replacement = {DUAL_DIM: CHANNEL_DIM} else: raise ValueError(f"Cannot transpose shape {self} as it has no channel or instance or spatial dims.") return self._with_types(tuple([replacement.get(t, t) for t in self.types]))
prop type : str
-
Only for Shapes containing exactly one single dimension. Returns the type of the dimension.
See Also:
Shape.get_type()
.Expand source code
@property def type(self) -> str: """ Only for Shapes containing exactly one single dimension. Returns the type of the dimension. See Also: `Shape.get_type()`. """ assert self.rank == 1, "Shape.type is only defined for shapes of rank 1." return self.types[0]
prop undefined
-
Expand source code
@property def undefined(self): return self[[i for i, size in enumerate(self.sizes) if size is None]]
prop untyped_dict
-
Returns
dict
containing dimension names as keys. The values are either the item names astuple
if available, otherwise the size.Expand source code
@property def untyped_dict(self): """ Returns: `dict` containing dimension names as keys. The values are either the item names as `tuple` if available, otherwise the size. """ return {name: self.get_item_names(i) or self.get_size(i) for i, name in enumerate(self.names)}
prop volume : Optional[int]
-
Returns the total number of values contained in a tensor of this shape. This is the product of all dimension sizes.
Returns
volume as
int
orTensor
orNone
if the shape is notShape.well_defined
Expand source code
@property def volume(self) -> Union[int, None]: """ Returns the total number of values contained in a tensor of this shape. This is the product of all dimension sizes. Returns: volume as `int` or `Tensor` or `None` if the shape is not `Shape.well_defined` """ result = 1 for size in self.sizes: if size is None: return None result *= size from ._tensors import Tensor if not isinstance(result, Tensor): return result result /= self.non_uniform_shape.volume # We summed up the items -> undo multiplication return int(result.sum)
prop well_defined
-
Returns
True
if no dimension size isNone
.Shapes with undefined sizes may be used in
tensor()
,wrap()
,stack()
orconcat()
.To create an undefined size, call a constructor function (
batch()
,spatial()
,channel()
,instance()
) with positionalstr
arguments, e.g.spatial('x')
.Expand source code
@property def well_defined(self): """ Returns `True` if no dimension size is `None`. Shapes with undefined sizes may be used in `phiml.math.tensor()`, `phiml.math.wrap()`, `phiml.math.stack()` or `phiml.math.concat()`. To create an undefined size, call a constructor function (`batch()`, `spatial()`, `channel()`, `instance()`) with positional `str` arguments, e.g. `spatial('x')`. """ for size in self.sizes: if size is None: return False return True
Methods
def after_gather(self, selection: dict) ‑> phiml.math._shape.Shape
def after_pad(self, widths: dict) ‑> phiml.math._shape.Shape
def are_adjacent(self, dims: Union[str, tuple, list, set, ForwardRef('Shape')])
def as_batch(self)
-
Returns a copy of this
Shape
with all dimensions of type batch. def as_channel(self)
-
Returns a copy of this
Shape
with all dimensions of type channel. def as_dual(self)
-
Returns a copy of this
Shape
with all dimensions of type dual. def as_instance(self)
-
Returns a copy of this
Shape
with all dimensions of type instance. def as_spatial(self)
-
Returns a copy of this
Shape
with all dimensions of type spatial. def as_type(self, new_type: Callable)
def assert_all_sizes_defined(self)
def first_index(self, names=False)
def flipped(self, dims: Union[List[str], Tuple[str]])
def get_dim_type(self, dim: Union[str, ForwardRef('Shape')]) ‑> Callable
-
Args
dim
- Dimension, either as name
str
or single-dimensionShape
.
Returns
Dimension type, one of
batch()
,spatial()
,instance()
,channel()
. def get_item_names(self, dim: Union[str, ForwardRef('Shape'), int], fallback_spatial=False) ‑> Optional[tuple]
-
Args
fallback_spatial
- If
True
and no item names are defined fordim
anddim
is a channel dimension, the spatial dimension names are interpreted as item names alongdim
in the order they are listed in thisShape
. dim
- Dimension, either as
int
index,str
name or single-dimensionShape
.
Returns
Item names as
tuple
orNone
if not defined. def get_size(self, dim: Union[str, ForwardRef('Shape'), int], default=None)
-
See Also:
Shape.get_sizes()
,Shape.size
Args
dim
- Dimension, either as name
str
or single-dimensionShape
or indexint
. default
- (Optional) If the dim does not exist, return this value instead of raising an error.
Returns
Size associated with
dim
asint
orTensor
. def get_sizes(self, dims: Union[tuple, list, ForwardRef('Shape')]) ‑> tuple
def get_type(self, dim: Union[str, ForwardRef('Shape')]) ‑> str
def get_types(self, dims: Union[tuple, list, ForwardRef('Shape')]) ‑> tuple
def index(self, dim: Union[str, ForwardRef('Shape'), None]) ‑> int
-
Finds the index of the dimension within this
Shape
.See Also:
Shape.indices()
.Args
dim
- Dimension name or single-dimension
Shape
.
Returns
Index as
int
. def indices(self, dims: Union[tuple, list, ForwardRef('Shape')]) ‑> Tuple[int]
-
Finds the indices of the given dimensions within this
Shape
.See Also:
Shape.index()
.Args
dims
- Sequence of dimensions as
tuple
,list
orShape
.
Returns
Indices as
tuple[int]
. def is_compatible(self, *others: Shape)
-
Checks if this shape and the others can be broadcast.
Args
others
- Other shapes.
Returns
True
only if all shapes are compatible. def isdisjoint(self, other: Union[ForwardRef('Shape'), tuple, list, str])
-
Shapes are disjoint if all dimension names of one shape do not occur in the other shape.
def mask(self, names: Union[tuple, list, set, ForwardRef('Shape')])
-
Returns a binary sequence corresponding to the names of this Shape. A value of 1 means that a dimension of this Shape is contained in
names
.Args
names
- instance of dimension
names
- tuple or list or set:
Returns
binary sequence
def meshgrid(self, names=False)
-
Builds a sequence containing all multi-indices within a tensor of this shape. All indices are returned as
dict
mapping dimension names toint
indices.The corresponding values can be retrieved from Tensors and other Sliceables using
tensor()[index]
.This function currently only supports uniform tensors.
Args
names
- If
True
, replace indices by their item names if available.
Returns
dict
iterator. def only(self, dims: DimFilter, reorder=False)
-
Builds a new shape from this one that only contains the given dimensions. Dimensions in
dims
that are not part of this Shape are ignored.The complementary operation is :func:
Shape.without()
.Args
dims
- comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function.
reorder
- If
False
, keeps the dimension order as defined in this shape. IfTrue
, reorders the dimensions of this shape to match the order ofdims
.
Returns
Shape containing only specified dimensions
def prepare_gather(self, dim: str, selection: Union[slice_(), int, ForwardRef('Shape'), str, tuple, list]) ‑> Union[slice_(), List[int]]
-
Parse a slice object for a specific dimension.
Args
dim
- Name of dimension to slice.
selection
- Slice object.
Returns:
def prepare_renaming_gather(self, dim: str, selection: Union[slice_(), int, ForwardRef('Shape'), str, tuple, list])
def replace(self, dims: Union[ForwardRef('Shape'), tuple, list, str], new: Shape, keep_item_names=True, replace_item_names: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None] = None) ‑> phiml.math._shape.Shape
-
Returns a copy of
self
withdims
replaced bynew
. Dimensions that are not present inself
are ignored.The dimension order is preserved.
Args
dims
- Dimensions to replace.
new
- New dimensions, must have same length as
dims
. If aShape
is given, replaces the dimension types and item names as well. keep_item_names
- Keeps existing item names for dimensions where
new
does not specify item names if the new dimension has the same size. replace_item_names
- For which dims the item names should be replaced as well.
Returns
Shape
with same rank and dimension order asself
. def resolve_index(self, index: Dict[str, Union[slice_(), int, ForwardRef('Shape'), str, tuple, list]]) ‑> Dict[str, Union[slice_(), int, tuple, list]]
-
Replaces item names by the corresponding indices.
Args
index
- n-dimensional index or slice.
Returns
Same index but without any reference to item names.
def transpose(self, dims: Union[str, Sequence[+T_co], set, ForwardRef('Shape'), Callable, None])
def unstack(self, dim='dims') ‑> Tuple[phiml.math._shape.Shape]
-
Slices this
Shape
along a dimension. The dimension listing the sizes of the shape is referred to as'dims'
.Non-uniform tensor shapes may be unstacked along other dimensions as well, see https://tum-pbs.github.io/PhiML/Non_Uniform.html
Args
dim
- dimension to unstack
Returns
slices of this shape
def with_dim_size(self, dim: Union[str, ForwardRef('Shape')], size: Union[int, ForwardRef('math.Tensor'), str, tuple, list], keep_item_names=True)
def with_size(self, size: Union[int, Sequence[str]])
-
Only for single-dimension shapes. Returns a
Shape
representing this dimension but with a different size.See Also:
Shape.with_sizes()
.Args
size
- Replacement size for this dimension.
Returns
def with_sizes(self, sizes: Union[Sequence[int], Sequence[Tuple[str, ...]], ForwardRef('Shape'), int], keep_item_names=True)
-
Returns a new
Shape
matching the dimension names and types ofself
but with different sizes.See Also:
Shape.with_size()
.Args
sizes
-
One of
tuple
/list
of same length asself
containing replacement sizes or replacement item names.Shape
of any rank. Replaces sizes for dimensions shared bysizes
andself
.int
: new size for all dimensions
keep_item_names
- If
False
, forgets all item names. IfTrue
, keeps item names where the size does not change.
Returns
Shape
with same names and types asself
. def without(self, dims: DimFilter) ‑> phiml.math._shape.Shape
-
Builds a new shape from this one that is missing all given dimensions. Dimensions in
dims
that are not part of this Shape are ignored.The complementary operation is
Shape.only()
.Args
dims
- Single dimension (str) or instance of dimensions (tuple, list, Shape)
dims
- Dimensions to exclude as
str
ortuple
orlist
orShape
. Dimensions that are not included in this shape are ignored.
Returns
Shape without specified dimensions
def without_sizes(self)
-
Returns
Shape
with all sizes undefined (None
)
class Solve (method: Optional[str] = 'auto', rel_tol: Union[float, phiml.math._tensors.Tensor] = None, abs_tol: Union[float, phiml.math._tensors.Tensor] = None, x0: Union[~X, Any] = None, max_iterations: Union[int, phiml.math._tensors.Tensor] = 1000, suppress: Union[tuple, list] = (), preprocess_y: Callable = None, preprocess_y_args: tuple = (), preconditioner: Optional[str] = None, rank_deficiency: int = None, gradient_solve: Optional[ForwardRef('Solve[Y, X]')] = None)
-
Specifies parameters and stopping criteria for solving a minimization problem or system of equations.
Expand source code
class Solve(Generic[X, Y]): """ Specifies parameters and stopping criteria for solving a minimization problem or system of equations. """ def __init__(self, method: Union[str, None] = 'auto', rel_tol: Union[float, Tensor] = None, abs_tol: Union[float, Tensor] = None, x0: Union[X, Any] = None, max_iterations: Union[int, Tensor] = 1000, suppress: Union[tuple, list] = (), preprocess_y: Callable = None, preprocess_y_args: tuple = (), preconditioner: Optional[str] = None, rank_deficiency: int = None, gradient_solve: Union['Solve[Y, X]', None] = None): method = method or 'auto' assert isinstance(method, str) self.method: str = method """ Optimization method to use. Available solvers depend on the solve function that is used to perform the solve. """ self.rel_tol: Tensor = math.to_float(wrap(rel_tol)) if rel_tol is not None else None """Relative tolerance for linear solves only, defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. This must be unset or `0` for minimization problems. For systems of equations *f(x)=y*, the final tolerance is `max(rel_tol * norm(y), abs_tol)`. """ self.abs_tol: Tensor = math.to_float(wrap(abs_tol)) if abs_tol is not None else None """ Absolut tolerance for optimization problems and linear solves. Defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. For systems of equations *f(x)=y*, the final tolerance is `max(rel_tol * norm(y), abs_tol)`. """ self.max_iterations: Tensor = math.to_int32(wrap(max_iterations)) """ Maximum number of iterations to perform before raising a `NotConverged` error is raised. """ self.x0 = x0 """ Initial guess for the method, of same type and dimensionality as the solve result. This property must be set to a value compatible with the solution `x` before running a method. """ self.preprocess_y: Callable = preprocess_y """ Function to be applied to the right-hand-side vector of an equation system before solving the system. This property is propagated to gradient solves by default. """ self.preprocess_y_args: tuple = preprocess_y_args assert all(issubclass(err, ConvergenceException) for err in suppress) self.suppress: tuple = tuple(suppress) """ Error types to suppress; `tuple` of `ConvergenceException` types. For these errors, the solve function will instead return the partial result without raising the error. """ self.preconditioner = preconditioner self.rank_deficiency: int = rank_deficiency """Rank deficiency of matrix or linear function. If not specified, will be determined for (implicit or explicit) matrix solves and assumed 0 for function-based solves.""" self._gradient_solve: Solve[Y, X] = gradient_solve self.id = str(uuid.uuid4()) # not altered by copy_with(), so that the lookup SolveTape[Solve] works after solve has been copied @property def gradient_solve(self) -> 'Solve[Y, X]': """ Parameters to use for the gradient pass when an implicit gradient is computed. If `None`, a duplicate of this `Solve` is created for the gradient solve. In any case, the gradient solve information will be stored in `gradient_solve.result`. """ if self._gradient_solve is None: self._gradient_solve = Solve(self.method, self.rel_tol, self.abs_tol, None, self.max_iterations, self.suppress, self.preprocess_y, self.preprocess_y_args) return self._gradient_solve def __repr__(self): return f"{self.method} with tolerance {self.rel_tol} (rel), {self.abs_tol} (abs), max_iterations={self.max_iterations}" + (" including preprocessing" if self.preprocess_y else "") def __eq__(self, other): if not isinstance(other, Solve): return False if self.method != other.method \ or not math.equal(self.abs_tol, other.abs_tol) \ or not math.equal(self.rel_tol, other.rel_tol) \ or (self.max_iterations != other.max_iterations).any \ or self.preprocess_y is not other.preprocess_y \ or self.suppress != other.suppress: return False return self.x0 == other.x0 def __variable_attrs__(self): return 'x0', 'rel_tol', 'abs_tol', 'max_iterations' def __value_attrs__(self): return self.__variable_attrs__() def with_defaults(self, mode: str): assert mode in ('solve', 'optimization') result = self if result.rel_tol is None: result = copy_with(result, rel_tol=_default_tolerance() if mode == 'solve' else wrap(0.)) if result.abs_tol is None: result = copy_with(result, abs_tol=_default_tolerance()) return result def with_preprocessing(self, preprocess_y: Callable, *args) -> 'Solve': """ Adds preprocessing to this `Solve` and all corresponding gradient solves. Args: preprocess_y: Preprocessing function. *args: Arguments for the preprocessing function. Returns: Copy of this `Solve` with given preprocessing. """ assert self.preprocess_y is None, f"preprocessing for linear solve '{self}' already set" gradient_solve = self._gradient_solve.with_preprocessing(preprocess_y, *args) if self._gradient_solve is not None else None return copy_with(self, preprocess_y=preprocess_y, preprocess_y_args=args, _gradient_solve=gradient_solve)
Ancestors
- typing.Generic
Instance variables
var abs_tol
-
Absolut tolerance for optimization problems and linear solves. Defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. For systems of equations f(x)=y, the final tolerance is
max(rel_tol * norm(y), abs_tol)
. prop gradient_solve : Solve[Y, X]
-
Parameters to use for the gradient pass when an implicit gradient is computed. If
None
, a duplicate of thisSolve
is created for the gradient solve.In any case, the gradient solve information will be stored in
gradient_solve.result
.Expand source code
@property def gradient_solve(self) -> 'Solve[Y, X]': """ Parameters to use for the gradient pass when an implicit gradient is computed. If `None`, a duplicate of this `Solve` is created for the gradient solve. In any case, the gradient solve information will be stored in `gradient_solve.result`. """ if self._gradient_solve is None: self._gradient_solve = Solve(self.method, self.rel_tol, self.abs_tol, None, self.max_iterations, self.suppress, self.preprocess_y, self.preprocess_y_args) return self._gradient_solve
var max_iterations
-
Maximum number of iterations to perform before raising a
NotConverged
error is raised. var method
-
Optimization method to use. Available solvers depend on the solve function that is used to perform the solve.
var preprocess_y
-
Function to be applied to the right-hand-side vector of an equation system before solving the system. This property is propagated to gradient solves by default.
var rank_deficiency
-
Rank deficiency of matrix or linear function. If not specified, will be determined for (implicit or explicit) matrix solves and assumed 0 for function-based solves.
var rel_tol
-
Relative tolerance for linear solves only, defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. This must be unset or
0
for minimization problems. For systems of equations f(x)=y, the final tolerance ismax(rel_tol * norm(y), abs_tol)
. var suppress
-
Error types to suppress;
tuple
ofConvergenceException
types. For these errors, the solve function will instead return the partial result without raising the error. var x0
-
Initial guess for the method, of same type and dimensionality as the solve result. This property must be set to a value compatible with the solution
x
before running a method.
Methods
def with_defaults(self, mode: str)
def with_preprocessing(self, preprocess_y: Callable, *args) ‑> phiml.math._optimize.Solve
class SolveInfo
-
Stores information about the solution or trajectory of a solve.
When representing the full optimization trajectory, all tracked quantities will have an additional
trajectory
batch dimension.Expand source code
class SolveInfo(Generic[X, Y]): """ Stores information about the solution or trajectory of a solve. When representing the full optimization trajectory, all tracked quantities will have an additional `trajectory` batch dimension. """ def __init__(self, solve: Solve, x: X, residual: Union[Y, None], iterations: Union[Tensor, None], function_evaluations: Union[Tensor, None], converged: Tensor, diverged: Tensor, method: str, msg: Tensor, solve_time: float): # tuple.__new__(SolveInfo, (x, residual, iterations, function_evaluations, converged, diverged)) self.solve: Solve[X, Y] = solve """ `Solve`, Parameters specified for the solve. """ self.x: X = x """ `Tensor` or `phiml.math.magic.PhiTreeNode`, solution estimate. """ self.residual: Y = residual """ `Tensor` or `phiml.math.magic.PhiTreeNode`, residual vector for systems of equations or function value for minimization problems. """ self.iterations: Tensor = iterations """ `Tensor`, number of performed iterations to reach this state. """ self.function_evaluations: Tensor = function_evaluations """ `Tensor`, how often the function (or its gradient function) was called. """ self.converged: Tensor = converged """ `Tensor`, whether the residual is within the specified tolerance. """ self.diverged: Tensor = diverged """ `Tensor`, whether the solve has diverged at this point. """ self.method = method """ `str`, which method and implementation that was used. """ if all_available(diverged, converged, iterations): _, res_tensors = disassemble_tree(residual, cache=False) msg_fun = partial(_default_solve_info_msg, solve=solve) msg = map_(msg_fun, msg, converged.trajectory[-1], diverged.trajectory[-1], iterations.trajectory[-1], method=method, residual=res_tensors[0], dims=converged.shape.without('trajectory')) self.msg = msg """ `str`, termination message """ self.solve_time = solve_time """ Time spent in Backend solve function (in seconds) """ def __repr__(self): return f"{self.method}: {self.converged.trajectory[-1].sum} converged, {self.diverged.trajectory[-1].sum} diverged" def snapshot(self, index): return SolveInfo(self.solve, self.x.trajectory[index], self.residual.trajectory[index], self.iterations.trajectory[index], self.function_evaluations.trajectory[index], self.converged.trajectory[index], self.diverged.trajectory[index], self.method, self.msg, self.solve_time) def convergence_check(self, only_warn: bool): if not all_available(self.diverged, self.converged): return if self.diverged.any: if Diverged not in self.solve.suppress: if only_warn: warnings.warn(self.msg, ConvergenceWarning) else: raise Diverged(self) if not self.converged.trajectory[-1].all: if NotConverged not in self.solve.suppress: if only_warn: warnings.warn(self.msg, ConvergenceWarning) else: raise NotConverged(self)
Ancestors
- typing.Generic
Instance variables
var converged
-
Tensor
, whether the residual is within the specified tolerance. var diverged
-
Tensor
, whether the solve has diverged at this point. var function_evaluations
-
Tensor
, how often the function (or its gradient function) was called. var iterations
-
Tensor
, number of performed iterations to reach this state. var method
-
str
, which method and implementation that was used. var msg
-
str
, termination message var residual
-
Tensor
orPhiTreeNode
, residual vector for systems of equations or function value for minimization problems. var solve
-
Solve
, Parameters specified for the solve. var solve_time
-
Time spent in Backend solve function (in seconds)
var x
-
Tensor
orPhiTreeNode
, solution estimate.
Methods
def convergence_check(self, only_warn: bool)
def snapshot(self, index)
class SolveTape (*solves: phiml.math._optimize.Solve, record_trajectories=False)
-
Used to record additional information about solves invoked via
solve_linear()
,solve_nonlinear()
orminimize()
. While aSolveTape
is active, certain performance optimizations and algorithm implementations may be disabled.To access a
SolveInfo
of a recorded solve, use>>> solve = Solve(method, ...) >>> with SolveTape() as solves: >>> x = math.solve_linear(f, y, solve) >>> result: SolveInfo = solves[solve] # get by Solve >>> result: SolveInfo = solves[0] # get by index
Args
Expand source code
class SolveTape: """ Used to record additional information about solves invoked via `solve_linear()`, `solve_nonlinear()` or `minimize()`. While a `SolveTape` is active, certain performance optimizations and algorithm implementations may be disabled. To access a `SolveInfo` of a recorded solve, use >>> solve = Solve(method, ...) >>> with SolveTape() as solves: >>> x = math.solve_linear(f, y, solve) >>> result: SolveInfo = solves[solve] # get by Solve >>> result: SolveInfo = solves[0] # get by index """ def __init__(self, *solves: Solve, record_trajectories=False): """ Args: *solves: (Optional) Select specific `solves` to be recorded. If none is given, records all solves that occur within the scope of this `SolveTape`. record_trajectories: When enabled, the entries of `SolveInfo` will contain an additional batch dimension named `trajectory`. """ self.record_only_ids = [s.id for s in solves] self.record_trajectories = record_trajectories self.solves: List[SolveInfo] = [] def should_record_trajectory_for(self, solve: Solve): if not self.record_trajectories: return False if not self.record_only_ids: return True return solve.id in self.record_only_ids def __enter__(self): _SOLVE_TAPES.append(self) return self def __exit__(self, exc_type, exc_val, exc_tb): _SOLVE_TAPES.remove(self) def _add(self, solve: Solve, trj: bool, result: SolveInfo): if any(s.solve.id == solve.id for s in self.solves): warnings.warn("SolveTape contains two results for the same solve settings. SolveTape[solve] will return the first solve result.", RuntimeWarning) if self.record_only_ids and solve.id not in self.record_only_ids: return # this solve should not be recorded if self.record_trajectories: assert trj, "Solve did not record a trajectory." self.solves.append(result) elif trj: self.solves.append(result.snapshot(-1)) else: self.solves.append(result) def __getitem__(self, item) -> SolveInfo: if isinstance(item, int): return self.solves[item] else: assert isinstance(item, Solve) solves = [s for s in self.solves if s.solve.id == item.id] if len(solves) == 0: raise KeyError(f"No solve recorded with key '{item}'.") assert len(solves) == 1 return solves[0] def __iter__(self): return iter(self.solves) def __len__(self): return len(self.solves)
Methods
def should_record_trajectory_for(self, solve: phiml.math._optimize.Solve)
class Tensor
-
Abstract base class to represent structured data of one data type. This class replaces the native tensor classes
numpy.ndarray
,torch.Tensor
,tensorflow.Tensor
orjax.numpy.ndarray
as the main data container in Φ-ML.Tensor
instances are different from native tensors in two important ways:- The dimensions of Tensors have names and types.
- Tensors can have non-uniform shapes, meaning that the size of dimensions can vary along other dimensions.
To check whether a value is a tensor, use
isinstance(value, Tensor)
.To construct a Tensor, use
tensor()
,wrap()
or one of the basic tensor creation functions, see https://tum-pbs.github.io/PhiML/Tensors.html .Tensors are not editable. When backed by an editable native tensor, e.g. a
numpy.ndarray
, do not edit the underlying data structure.Expand source code
class Tensor: """ Abstract base class to represent structured data of one data type. This class replaces the native tensor classes `numpy.ndarray`, `torch.Tensor`, `tensorflow.Tensor` or `jax.numpy.ndarray` as the main data container in Φ-ML. `Tensor` instances are different from native tensors in two important ways: * The dimensions of Tensors have *names* and *types*. * Tensors can have non-uniform shapes, meaning that the size of dimensions can vary along other dimensions. To check whether a value is a tensor, use `isinstance(value, Tensor)`. To construct a Tensor, use `phiml.math.tensor()`, `phiml.math.wrap()` or one of the basic tensor creation functions, see https://tum-pbs.github.io/PhiML/Tensors.html . Tensors are not editable. When backed by an editable native tensor, e.g. a `numpy.ndarray`, do not edit the underlying data structure. """ def __init__(self): if DEBUG_CHECKS: self._init_stack = traceback.extract_stack() def native(self, order: Union[str, tuple, list, Shape] = None, force_expand=True, to_numpy=False): """ Returns a native tensor object with the dimensions ordered according to `order`. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. Additionally, groups of dimensions can be specified to pack dims, see `phiml.math.reshaped_native()`. Args: order: (Optional) Order of dimension names as comma-separated string, list or `Shape`. force_expand: If `False`, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions. to_numpy: Whether to convert the tensor to a NumPy `ndarray`. Returns: Native tensor representation, such as PyTorch tensor or NumPy array. Raises: ValueError if the tensor cannot be transposed to match target_shape """ if isinstance(order, (tuple, list)): return reshaped_native(self, order, force_expand=force_expand, to_numpy=to_numpy) elif order is None: assert self.rank <= 1, f"When calling Tensor.native() or Tensor.numpy(), the dimension order must be specified for Tensors with more than one dimension, e.g. '{','.join(self._shape.names)}'. The listed default dimension order can vary depending on the chosen backend. Consider using math.reshaped_native(Tensor) instead." order = self._shape.names else: order = parse_dim_order(order) native = self._transposed_native(order, force_expand) return choose_backend(native).numpy(native) if to_numpy else native def _transposed_native(self, order: Sequence[str], force_expand: bool): raise NotImplementedError(self.__class__) def numpy(self, order: Union[str, tuple, list, Shape] = None, force_expand=True) -> np.ndarray: """ Converts this tensor to a `numpy.ndarray` with dimensions ordered according to `order`. *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use `Tensor.native()` instead. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. If this `Tensor` is backed by a NumPy array, a reference to this array may be returned. See Also: `phiml.math.numpy()` Args: order: (Optional) Order of dimension names as comma-separated string, list or `Shape`. Returns: NumPy representation Raises: ValueError if the tensor cannot be transposed to match target_shape """ return self.native(order, force_expand, to_numpy=True) def __array__(self, dtype=None): # NumPy conversion if self.rank > 1: warnings.warn("Automatic conversion of Φ-ML tensors to NumPy can cause problems because the dimension order is not guaranteed.", SyntaxWarning, stacklevel=3) return self.numpy(self._shape) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # NumPy interface if len(inputs) != 2: return NotImplemented if ufunc.__name__ == 'multiply': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x * y, lambda x, y: choose_backend(x, y).mul(x, y), 'mul', '*') else: return self._op2(inputs[0], lambda x, y: y * x, lambda x, y: choose_backend(x, y).mul(y, x), 'rmul', '*') if ufunc.__name__ == 'add': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x + y, lambda x, y: choose_backend(x, y).add(x, y), 'add', '+') else: return self._op2(inputs[0], lambda x, y: y + x, lambda x, y: choose_backend(x, y).add(y, x), 'radd', '+') if ufunc.__name__ == 'subtract': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x - y, lambda x, y: choose_backend(x, y).sub(x, y), 'add', '-') else: return self._op2(inputs[0], lambda x, y: y - x, lambda x, y: choose_backend(x, y).sub(y, x), 'rsub', '-') if ufunc.__name__ in ['divide', 'true_divide']: if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x / y, lambda x, y: choose_backend(x, y).div(x, y), 'true_divide', '/') else: return self._op2(inputs[0], lambda x, y: y / x, lambda x, y: choose_backend(x, y).div(y, x), 'r_true_divide', '/') if ufunc.__name__ == 'floor_divide': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x // y, lambda x, y: choose_backend(x, y).floordiv(x, y), 'floor_divide', '//') else: return self._op2(inputs[0], lambda x, y: y // x, lambda x, y: choose_backend(x, y).floordiv(y, x), 'r_floor_divide', '//') if ufunc.__name__ == 'remainder': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x % y, lambda x, y: choose_backend(x, y).mod(x, y), 'remainder', '%') else: return self._op2(inputs[0], lambda x, y: y % x, lambda x, y: choose_backend(x, y).mod(y, x), 'r_remainder', '%') if ufunc.__name__ == 'power': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x ** y, lambda x, y: choose_backend(x, y).pow(x, y), 'power', '**') else: return self._op2(inputs[0], lambda x, y: y ** x, lambda x, y: choose_backend(x, y).pow(y, x), 'r_power', '**') if ufunc.__name__ == 'equal': return self.__eq__(inputs[1] if self is inputs[0] else inputs[0]) if ufunc.__name__ == 'not_equal': return self.__ne__(inputs[1] if self is inputs[0] else inputs[0]) if ufunc.__name__ == 'greater': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x > y, lambda x, y: choose_backend(x, y).greater_than(x, y), 'greater', '>') else: return self._op2(inputs[0], lambda x, y: y > x, lambda x, y: choose_backend(x, y).greater_than(y, x), 'r_greater', '>') if ufunc.__name__ == 'greater_equal': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x >= y, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'greater_equal', '>=') else: return self._op2(inputs[0], lambda x, y: y >= x, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'r_greater_equal', '>=') if ufunc.__name__ == 'less': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x < y, lambda x, y: choose_backend(x, y).greater_than(y, x), 'less', '<') else: return self._op2(inputs[0], lambda x, y: y < x, lambda x, y: choose_backend(x, y).greater_than(x, y), 'r_less', '<') if ufunc.__name__ == 'less_equal': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x <= y, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'less_equal', '<=') else: return self._op2(inputs[0], lambda x, y: y <= x, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'r_less_equal', '<=') if ufunc.__name__ == 'left_shift': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x << y, lambda x, y: choose_backend(x, y).shift_bits_left(x, y), 'left_shift', '<<') else: return self._op2(inputs[0], lambda x, y: y << x, lambda x, y: choose_backend(x, y).shift_bits_left(y, x), 'r_left_shift', '<<') if ufunc.__name__ == 'right_shift': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x >> y, lambda x, y: choose_backend(x, y).shift_bits_right(x, y), 'right_shift', '>>') else: return self._op2(inputs[0], lambda x, y: y >> x, lambda x, y: choose_backend(x, y).shift_bits_right(y, x), 'r_right_shift', '>>') raise NotImplementedError(f"NumPy function '{ufunc.__name__}' is not compatible with Φ-ML tensors.") @property def dtype(self) -> DType: """ Data type of the elements of this `Tensor`. """ raise NotImplementedError(self.__class__) @property def shape(self) -> Shape: """ The `Shape` lists the dimensions with their sizes, names and types. """ raise NotImplementedError(self.__class__) @property def backend(self) -> Backend: from ._ops import choose_backend_t return choose_backend_t(self) default_backend = backend def _with_shape_replaced(self, new_shape: Shape): raise NotImplementedError(self.__class__) def _with_natives_replaced(self, natives: list): """ Replaces all n _natives() of this Tensor with the first n elements of the list and removes them from the list. """ raise NotImplementedError(self.__class__) @property def rank(self) -> int: """ Number of explicit dimensions of this `Tensor`. Equal to `tensor.shape.rank`. This replaces [`numpy.ndarray.ndim`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html) / [`torch.Tensor.dim`](https://pytorch.org/docs/master/generated/torch.Tensor.dim.html) / [`tf.rank()`](https://www.tensorflow.org/api_docs/python/tf/rank) / [`jax.numpy.ndim()`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndim.html). """ return self.shape.rank @property def _is_tracer(self) -> bool: """ Tracers store additional internal information. They should not be converted to `native()` in intermediate operations. TensorStack prevents performing the actual stack operation if one of its component tensors is special. """ raise NotImplementedError(self.__class__) def _to_dict(self): return cached(self)._to_dict() def __len__(self): return self.shape.volume if self.rank == 1 else NotImplemented def __bool__(self): assert self.rank == 0, f"Cannot convert tensor with non-empty shape {self.shape} to bool. Use tensor.any or tensor.all instead." from ._ops import all_ if not self.default_backend.supports(Backend.jit_compile): # NumPy return bool(self.native()) if self.rank == 0 else bool(all_(self).native()) else: # __bool__ does not work with TensorFlow tracing. # TensorFlow needs to see a tf.Tensor in loop conditions but won't allow bool() invocations. # However, this function must always return a Python bool. raise AssertionError("To evaluate the boolean value of a Tensor, use 'Tensor.all'.") @property def all(self): """ Whether all values of this `Tensor` are `True` as a native bool. """ from ._ops import all_, cast if self.rank == 0: return cast(self, DType(bool)).native() else: return all_(self, dim=self.shape).native() @property def any(self): """ Whether this `Tensor` contains a `True` value as a native bool. """ from ._ops import any_, cast if self.rank == 0: return cast(self, DType(bool)).native() else: return any_(self, dim=self.shape).native() @property def mean(self): """ Mean value of this `Tensor` as a native scalar. """ from ._ops import mean return mean(self, dim=self.shape).native() @property def finite_mean(self): """ Mean value of all finite values in this `Tensor` as a native scalar. """ from ._ops import finite_mean return finite_mean(self, dim=self.shape).native() @property def std(self): """ Standard deviation of this `Tensor` as a native scalar. """ from ._ops import std return std(self, dim=self.shape).native() @property def sum(self): """ Sum of all values of this `Tensor` as a native scalar. """ from ._ops import sum_ return sum_(self, dim=self.shape).native() @property def finite_sum(self): """ Sum of all finite values of this `Tensor` as a native scalar. """ from ._ops import finite_sum return finite_sum(self, dim=self.shape).native() @property def min(self): """ Minimum value of this `Tensor` as a native scalar. """ from ._ops import min_ return min_(self, dim=self.shape).native() @property def finite_min(self): """ Minimum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_min return finite_min(self, dim=self.shape).native() @property def max(self): """ Maximum value of this `Tensor` as a native scalar. """ from ._ops import max_ return max_(self, dim=self.shape).native() @property def finite_max(self): """ Maximum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_max return finite_max(self, dim=self.shape).native() @property def real(self) -> 'Tensor': """ Returns the real part of this tensor. See Also: `phiml.math.real()` """ from ._ops import real return real(self) @property def imag(self) -> 'Tensor': """ Returns the imaginary part of this tensor. If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor. See Also: `phiml.math.imag()` """ from ._ops import imag return imag(self) @property def available(self) -> bool: """ A tensor is available if it stores concrete values and these can currently be read. Tracers used inside jit compilation are typically not available. See Also: `phiml.math.jit_compile()`. """ if self._is_tracer: return False natives = self._natives() natives_available = [choose_backend(native).is_available(native) for native in natives] return all(natives_available) @property def device(self) -> Union[ComputeDevice, None]: """ Returns the `ComputeDevice` that this tensor is allocated on. The device belongs to this tensor's `default_backend`. See Also: `Tensor.default_backend`. """ natives = self._natives() if not natives: return None return self.default_backend.get_device(natives[0]) def __int__(self): return int(self.native()) if self.shape.volume == 1 else NotImplemented def __float__(self): return float(self.native()) if self.shape.volume == 1 else NotImplemented def __complex__(self): return complex(self.native()) if self.shape.volume == 1 else NotImplemented def __index__(self): assert self.shape.volume == 1, f"Only scalar tensors can be converted to index but has shape {self.shape}" assert self.dtype.kind == int, f"Only int tensors can be converted to index but dtype is {self.dtype}" return int(self.native()) def __contains__(self, item): if isinstance(item, Shape): return item in self.shape elif isinstance(item, BoundDim): return item.name in self.shape elif isinstance(item, _BoundDims): return item.dims in self.shape elif isinstance(item, str): assert self.dtype.kind != object, "str in Tensor not allowed for object-type Tensors" return item in self.shape raise ValueError(f"'dim in Tensor' requires dim to be a Shape or str but got {item}") def __repr__(self): return format_tensor(self, PrintOptions()) def _repr_pretty_(self, printer, cycle): printer.text(format_tensor(self, PrintOptions(colors=DEFAULT_COLORS))) def print(self, layout='full', float_format=None, threshold=8, include_shape=None, include_dtype=None): print(format_tensor(self, PrintOptions(layout=layout, float_format=float_format, threshold=threshold, colors=DEFAULT_COLORS, include_shape=include_shape, include_dtype=include_dtype))) def __format__(self, format_spec: str): if BROADCAST_FORMATTER.values is not None: return BROADCAST_FORMATTER.register_formatted(self, format_spec) specs = format_spec.split(':') layout_ = 'auto' for possible_layout in ['summary', 'full', 'row', 'numpy']: if possible_layout in specs: assert layout_ == 'auto', f"Two layout identifiers encountered in '{format_spec}'" layout_ = possible_layout include_shape = 'shape' in specs or (False if 'no-shape' in specs else None) include_dtype = 'dtype' in specs or (False if 'no-dtype' in specs else None) color = 'color' in specs or (False if 'no-color' in specs else None) threshold = 8 float_format = None for spec in specs: if spec.startswith('threshold='): threshold = int(spec[len('threshold='):]) elif '.' in spec: float_format = spec result = format_tensor(self, PrintOptions(layout_, float_format, threshold, color, include_shape, include_dtype)) return result def __getitem__(self, item) -> 'Tensor': if isinstance(item, Tensor): if item.dtype.kind == bool: from ._ops import boolean_mask return boolean_mask(self, item.shape.non_batch or item.shape, item) elif item.dtype.kind == int: from ._ops import gather return gather(self, item) else: raise AssertionError(f"Index tensor must be of dtype int (gather) or bool (boolean_mask) but got {item}") item = slicing_dict(self, item) selections = {} sliced = self for dim, selection in item.items(): if dim not in self.shape: continue selection, new_dim = self.shape.prepare_renaming_gather(dim, selection) # Either handle slicing directly or add it to the dict if isinstance(selection, (tuple, list)): result = [sliced[{dim: i}] for i in selection] stack_dim = sliced.shape[dim].after_gather({dim: selection}) sliced = stack(result, stack_dim) if new_dim is not None: sliced = rename_dims(sliced, dim, new_dim) elif isinstance(selection, Tensor) and selection.dtype.kind == bool: from ._ops import boolean_mask sliced = boolean_mask(sliced, dim, selection) elif isinstance(selection, Tensor) and selection.dtype.kind == int: from ._ops import gather sliced = gather(sliced, selection, dims=dim) else: selections[dim] = selection return sliced._getitem(selections) if selections else sliced def _getitem(self, selection: dict) -> 'Tensor': """ Slice the tensor along specified dimensions. Args: selection: dim_name: str -> Union[int, slice] selection: dict: Returns: """ raise NotImplementedError() def __setitem__(self, key, value): raise SyntaxError("Tensors are not editable to preserve the autodiff chain. This feature might be added in the future. To update part of a tensor, use math.where() or math.scatter()") def __unstack__(self, dims: Tuple[str, ...]) -> Tuple['Tensor', ...]: # from phiml.math.magic.Sliceable if len(dims) == 1: return self._unstack(dims[0]) else: return NotImplemented def _unstack(self, dim: str): """ Splits this tensor along the specified dimension. The returned tensors have the same dimensions as this tensor save the unstacked dimension. Raises an error if the dimension is not part of the `Shape` of this `Tensor`. See Also: `TensorDim.unstack()` Args: dim: name of dimension to unstack Returns: tuple of tensors """ raise NotImplementedError() @staticmethod def __stack__(values: tuple, dim: Shape, **_kwargs) -> 'Tensor': if any(isinstance(v, Layout) for v in values): layout_ = [v for v in values if isinstance(v, Layout)][0] return layout_.__stack__(values, dim, **_kwargs) from ._ops import stack_tensors return stack_tensors(values, dim) def __expand__(self, dims: Shape, **kwargs) -> 'Tensor': return expand_tensor(self, dims) @staticmethod def __concat__(values: tuple, dim: str, **kwargs) -> 'Tensor': from ._ops import concat_tensor return concat_tensor(values, dim) def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'Tensor': return self._with_shape_replaced(rename_dims(self.shape, dims, new_dims)) def __unpack_dim__(self, dim: str, unpacked_dims: Shape, **kwargs) -> 'Tensor': if self.shape.is_uniform: native = self._transposed_native(self.shape.names, True) new_shape = self.shape.replace(dim, unpacked_dims) if not new_shape.well_defined: assert new_shape.undefined.rank <= 1, f"At most one dim can have an undefined size to be inferred during un-packing but got {new_shape}" missing = self.shape.volume / new_shape.defined.volume sizes = [missing if s is None else s for s in new_shape.sizes] new_shape = new_shape.with_sizes(sizes) if new_shape.is_uniform: native_reshaped = choose_backend(native).reshape(native, new_shape.sizes) return NativeTensor(native_reshaped, new_shape) else: split_dim = new_shape.non_uniform_shape[-1] i = 0 result = [] for idx in split_dim.meshgrid(): s = new_shape.after_gather(idx).get_size(new_shape.non_uniform.name) sliced = self[{dim: slice(i, i + s)}] result.append(sliced._with_shape_replaced(sliced.shape.replace(dim, unpacked_dims - split_dim))) i += s return stack(result, split_dim) else: tensors = self._tensors if dim == self._stack_dim.name: for udim in unpacked_dims: tensors = [TensorStack(tensors[o::len(tensors)//udim.size], udim) for o in range(len(tensors)//udim.size)] assert len(tensors) == 1 return tensors[0] raise NotImplementedError def __pack_dims__(self, dims: Tuple[str, ...], packed_dim: Shape, pos: Union[int, None], **kwargs) -> 'Tensor': order = self.shape._order_group(dims) if self.shape.is_uniform: native = self._transposed_native(order, force_expand=True) if pos is None: pos = min(self.shape.indices(dims)) new_shape = self.shape.without(dims)._expand(packed_dim.with_sizes([self.shape.only(dims).volume]), pos) native = choose_backend(native).reshape(native, new_shape.sizes) return NativeTensor(native, new_shape) else: from ._ops import concat_tensor value = cached(self) assert isinstance(value, TensorStack) inner_packed = [pack_dims(t, dims, packed_dim) for t in value._tensors] return concat_tensor(inner_packed, packed_dim.name) def __cast__(self, dtype: DType): return self._op1(lambda native: choose_backend(native).cast(native, dtype=dtype)) def dimension(self, name: Union[str, Shape]) -> 'TensorDim': """ Returns a reference to a specific dimension of this tensor. This is equivalent to the syntax `tensor.<name>`. The dimension need not be part of the `Tensor.shape` in which case its size is 1. Args: name: dimension name Returns: `TensorDim` corresponding to a dimension of this tensor """ if isinstance(name, str): return TensorDim(self, name) elif isinstance(name, Shape): return TensorDim(self, name.name) else: raise ValueError(name) def pack(self, dims, packed_dim): """ See `pack_dims()` """ from ._ops import pack_dims return pack_dims(self, dims, packed_dim) def unpack(self, dim, unpacked_dims): """ See `unpack_dim()` """ from ._ops import unpack_dim return unpack_dim(self, dim, unpacked_dims) @property def T(self): return self._with_shape_replaced(self.shape.transposed) @property def Ti(self): return self._with_shape_replaced(self.shape.transpose(instance)) @property def Tc(self): return self._with_shape_replaced(self.shape.transpose(channel)) @property def Ts(self): return self._with_shape_replaced(self.shape.transpose(channel)) def map(self, function: Callable, dims=shape_, range=range, unwrap_scalars=True, **kwargs): from ._functional import map_ return map_(function, self, dims=dims, range=range, unwrap_scalars=unwrap_scalars, **kwargs) def __getattr__(self, name): if name.startswith('__'): # called by hasattr in magic ops raise AttributeError if name.startswith('_'): raise AttributeError(f"'{type(self)}' object has no attribute '{name}'") if name == 'is_tensor_like': # TensorFlow replaces abs() while tracing and checks for this attribute raise AttributeError(f"'{type(self)}' object has no attribute '{name}'") assert name not in ('shape', '_shape', 'tensor'), name return TensorDim(self, name) def __add__(self, other): return self._op2(other, lambda x, y: x + y, lambda x, y: choose_backend(x, y).add(x, y), 'add', '+') def __radd__(self, other): return self._op2(other, lambda x, y: y + x, lambda x, y: choose_backend(x, y).add(y, x), 'radd', '+') def __sub__(self, other): return self._op2(other, lambda x, y: x - y, lambda x, y: choose_backend(x, y).sub(x, y), 'sub', '-') def __rsub__(self, other): return self._op2(other, lambda x, y: y - x, lambda x, y: choose_backend(x, y).sub(y, x), 'rsub', '-') def __and__(self, other): return self._op2(other, lambda x, y: x & y, lambda x, y: choose_backend(x, y).and_(x, y), 'and', '&') def __rand__(self, other): return self._op2(other, lambda x, y: y & x, lambda x, y: choose_backend(x, y).and_(y, x), 'rand', '&') def __or__(self, other): return self._op2(other, lambda x, y: x | y, lambda x, y: choose_backend(x, y).or_(x, y), 'or', '|') def __ror__(self, other): return self._op2(other, lambda x, y: y | x, lambda x, y: choose_backend(x, y).or_(y, x), 'ror', '|') def __xor__(self, other): return self._op2(other, lambda x, y: x ^ y, lambda x, y: choose_backend(x, y).xor(x, y), 'xor', '^') def __rxor__(self, other): return self._op2(other, lambda x, y: y ^ x, lambda x, y: choose_backend(x, y).xor(y, x), 'rxor', '^') def __mul__(self, other): return self._op2(other, lambda x, y: x * y, lambda x, y: choose_backend(x, y).mul(x, y), 'mul', '*') def __rmul__(self, other): return self._op2(other, lambda x, y: y * x, lambda x, y: choose_backend(x, y).mul(y, x), 'rmul', '*') def __truediv__(self, other): return self._op2(other, lambda x, y: x / y, lambda x, y: choose_backend(x, y).div(x, y), 'truediv', '/') def __rtruediv__(self, other): return self._op2(other, lambda x, y: y / x, lambda x, y: choose_backend(x, y).div(y, x), 'rtruediv', '/') def __divmod__(self, other): return self._op2(other, lambda x, y: divmod(x, y), lambda x, y: divmod(x, y), 'divmod', 'divmod') def __rdivmod__(self, other): return self._op2(other, lambda x, y: divmod(y, x), lambda x, y: divmod(y, x), 'rdivmod', 'divmod') def __floordiv__(self, other): return self._op2(other, lambda x, y: x // y, lambda x, y: choose_backend(x, y).floordiv(x, y), 'floordiv', '//') def __rfloordiv__(self, other): return self._op2(other, lambda x, y: y // x, lambda x, y: choose_backend(x, y).floordiv(y, x), 'rfloordiv', '//') def __pow__(self, power, modulo=None): assert modulo is None return self._op2(power, lambda x, y: x ** y, lambda x, y: choose_backend(x, y).pow(x, y), 'pow', '**') def __rpow__(self, other): return self._op2(other, lambda x, y: y ** x, lambda x, y: choose_backend(x, y).pow(y, x), 'rpow', '**') def __mod__(self, other): return self._op2(other, lambda x, y: x % y, lambda x, y: choose_backend(x, y).mod(x, y), 'mod', '%') def __rmod__(self, other): return self._op2(other, lambda x, y: y % x, lambda x, y: choose_backend(x, y).mod(y, x), 'rmod', '%') def __eq__(self, other) -> 'Tensor': if self is other: return expand(True, self.shape) if _EQUALITY_REDUCE[-1]['type'] == 'ref': return wrap(self is other) elif _EQUALITY_REDUCE[-1]['type'] == 'shape_and_value': if set(self.shape) != set(other.shape): return wrap(False) from ._ops import close return wrap(close(self, other, rel_tolerance=_EQUALITY_REDUCE[-1]['rel_tolerance'], abs_tolerance=_EQUALITY_REDUCE[-1]['abs_tolerance'], equal_nan=_EQUALITY_REDUCE[-1]['equal_nan'])) if other is None: other = float('nan') if self.shape.is_compatible(shape(other)): return self._op2(other, lambda x, y: x == y, lambda x, y: choose_backend(x, y).equal(x, y), 'eq', '==') else: return wrap(False) def __ne__(self, other) -> 'Tensor': if _EQUALITY_REDUCE[-1]['type'] == 'ref': return wrap(self is not other) elif _EQUALITY_REDUCE[-1]['type'] == 'shape_and_value': if set(self.shape) != set(other.shape): return wrap(True) from ._ops import close return wrap(not close(self, other, rel_tolerance=_EQUALITY_REDUCE[-1]['rel_tolerance'], abs_tolerance=_EQUALITY_REDUCE[-1]['abs_tolerance'], equal_nan=_EQUALITY_REDUCE[-1]['equal_nan'])) if other is None: other = float('nan') if self.shape.is_compatible(shape(other)): return self._op2(other, lambda x, y: x != y, lambda x, y: choose_backend(x, y).not_equal(x, y), 'ne', '!=') else: return wrap(True) def __lt__(self, other): return self._op2(other, lambda x, y: x < y, lambda x, y: choose_backend(x, y).greater_than(y, x), 'lt', '<') def __le__(self, other): return self._op2(other, lambda x, y: x <= y, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'le', '<=') def __gt__(self, other): return self._op2(other, lambda x, y: x > y, lambda x, y: choose_backend(x, y).greater_than(x, y), 'gt', '>') def __ge__(self, other): return self._op2(other, lambda x, y: x >= y, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'ge', '>=') def __lshift__(self, other): return self._op2(other, lambda x, y: x << y, lambda x, y: choose_backend(x, y).shift_bits_left(x, y), 'lshift', '<<') def __rlshift__(self, other): return self._op2(other, lambda y, x: x << y, lambda y, x: choose_backend(x, y).shift_bits_left(x, y), 'lshift', '<<') def __rshift__(self, other): return self._op2(other, lambda x, y: x >> y, lambda x, y: choose_backend(x, y).shift_bits_right(x, y), 'rshift', '>>') def __rrshift__(self, other): return self._op2(other, lambda y, x: x >> y, lambda y, x: choose_backend(x, y).shift_bits_right(x, y), 'rshift', '>>') def __abs__(self): return self._op1(lambda t: choose_backend(t).abs(t)) def __round__(self, n=None): return self._op1(lambda t: choose_backend(t).round(t)) def __copy__(self): return self._op1(lambda t: choose_backend(t).copy(t, only_mutable=True)) def __deepcopy__(self, memodict={}): return self._op1(lambda t: choose_backend(t).copy(t, only_mutable=False)) def __neg__(self) -> 'Tensor': return self._op1(lambda t: -t) def __invert__(self) -> 'Tensor': return self._op1(lambda t: choose_backend(t).invert(t)) def __reversed__(self): assert self.shape.channel.rank == 1 return self[::-1] def __iter__(self): if self.rank == 1: return iter(self.native()) elif self.rank == 0: return iter([self.native()]) else: native = reshaped_native(self, [self.shape]) return iter(native) def __matmul__(self, other): from ._ops import dot assert isinstance(other, Tensor), f"Matmul '@' requires two Tensor arguments but got {type(other)}" if not self.shape.dual_rank and self.shape.channel_rank: match = self.shape.channel.only(other.shape.channel) if match: return dot(self, match, other, match) match_names = self.shape.dual.as_batch().names if not match_names: # this is not a matrix assert self.shape.primal.only(other.shape).is_empty, f"Cannot compute matmul {self.shape} @ {other.shape}. First argument is not a matrix; it has no dual dimensions." return self * other match_primal = other.shape.only(match_names, reorder=True) if not match_primal: assert non_batch(other).non_dual.rank == 1, f"Cannot multiply {self.shape} @ {other.shape} because arg2 does not have appropriate non-dual dimensions" assert non_batch(other).non_dual.size == match_primal.volume, f"Cannot multiply {self.shape} @ {other.shape} because dual dims of arg1 have no match" match_primal = non_batch(other).non_dual match_dual = self.shape.dual.only(match_primal.as_dual(), reorder=True) left_arg = pack_dims(self, match_dual, dual('_reduce')) right_arg = pack_dims(other, match_primal, channel('_reduce')) return dot(left_arg, '~_reduce', right_arg, '_reduce') # def __rmatmul__(self, other): def _tensor(self, other) -> 'Tensor': if isinstance(other, Tensor): return other elif isinstance(other, (tuple, list)) and any(isinstance(v, Tensor) for v in other): if 'vector' in self.shape: outer_dim = self.shape['vector'] elif self.shape.channel_rank == 1: outer_dim = self.shape.channel else: raise ValueError(f"Cannot combine tensor of shape {self.shape} with tuple {tuple([type(v).__name__ for v in other])}") remaining_shape = self.shape.without(outer_dim) other_items = [v if isinstance(v, Tensor) else compatible_tensor(v, compat_shape=remaining_shape, compat_natives=self._natives(), convert=False) for v in other] other_stacked = stack(other_items, outer_dim, expand_values=True) return other_stacked else: return compatible_tensor(other, compat_shape=self.shape, compat_natives=self._natives(), convert=False) def _op1(self, native_function) -> 'Tensor': """ Transform the values of this tensor given a function that can be applied to any native tensor. Args: native_function: Returns: """ raise NotImplementedError(self.__class__) def _op2(self, other, operator: Callable, native_function: Callable, op_name: str = 'unknown', op_symbol: str = '?') -> 'Tensor': """ Apply a broadcast operation on two tensors. Args: other: second argument operator: function (Tensor, Tensor) -> Tensor, used to propagate the operation to children tensors to have Python choose the callee native_function: function (native tensor, native tensor) -> native tensor op_name: Name of the python function without leading and trailing `__`. Examples: 'add', 'radd', 'sub', 'mul', 'and', 'eq', 'ge'. op_symbol: Operation symbol, such as '+', '-', '&', '%', '>=' Returns: `Tensor` """ raise NotImplementedError(self.__class__) def _natives(self) -> tuple: raise NotImplementedError(self.__class__) def _spec_dict(self) -> dict: raise NotImplementedError(self.__class__) @classmethod def _from_spec_and_natives(cls, spec: dict, natives: list): raise NotImplementedError(cls) def _simplify(self): """ Does not cache this value but if it is already cached, returns the cached version. """ return self
Subclasses
- phiml.math._sparse.CompactSparseTensor
- phiml.math._sparse.CompressedSparseMatrix
- phiml.math._sparse.SparseCoordinateTensor
- phiml.math._tensors.Layout
- phiml.math._tensors.NativeTensor
- phiml.math._tensors.TensorStack
- phiml.math._trace.GatherLinTracer
- phiml.math._trace.ShiftLinTracer
- phiml.math._trace.SparseLinTracer
Instance variables
prop T
-
Expand source code
@property def T(self): return self._with_shape_replaced(self.shape.transposed)
prop Tc
-
Expand source code
@property def Tc(self): return self._with_shape_replaced(self.shape.transpose(channel))
prop Ti
-
Expand source code
@property def Ti(self): return self._with_shape_replaced(self.shape.transpose(instance))
prop Ts
-
Expand source code
@property def Ts(self): return self._with_shape_replaced(self.shape.transpose(channel))
prop all
-
Whether all values of this
Tensor
areTrue
as a native bool.Expand source code
@property def all(self): """ Whether all values of this `Tensor` are `True` as a native bool. """ from ._ops import all_, cast if self.rank == 0: return cast(self, DType(bool)).native() else: return all_(self, dim=self.shape).native()
prop any
-
Whether this
Tensor
contains aTrue
value as a native bool.Expand source code
@property def any(self): """ Whether this `Tensor` contains a `True` value as a native bool. """ from ._ops import any_, cast if self.rank == 0: return cast(self, DType(bool)).native() else: return any_(self, dim=self.shape).native()
prop available : bool
-
A tensor is available if it stores concrete values and these can currently be read.
Tracers used inside jit compilation are typically not available.
See Also:
jit_compile()
.Expand source code
@property def available(self) -> bool: """ A tensor is available if it stores concrete values and these can currently be read. Tracers used inside jit compilation are typically not available. See Also: `phiml.math.jit_compile()`. """ if self._is_tracer: return False natives = self._natives() natives_available = [choose_backend(native).is_available(native) for native in natives] return all(natives_available)
prop backend : phiml.backend._backend.Backend
-
Expand source code
@property def backend(self) -> Backend: from ._ops import choose_backend_t return choose_backend_t(self)
prop default_backend : phiml.backend._backend.Backend
-
Expand source code
@property def backend(self) -> Backend: from ._ops import choose_backend_t return choose_backend_t(self)
prop device : Optional[phiml.backend._backend.ComputeDevice]
-
Returns the
ComputeDevice
that this tensor is allocated on. The device belongs to this tensor'sdefault_backend
.See Also:
Tensor.default_backend
.Expand source code
@property def device(self) -> Union[ComputeDevice, None]: """ Returns the `ComputeDevice` that this tensor is allocated on. The device belongs to this tensor's `default_backend`. See Also: `Tensor.default_backend`. """ natives = self._natives() if not natives: return None return self.default_backend.get_device(natives[0])
prop dtype : phiml.backend._dtype.DType
-
Data type of the elements of this
Tensor
.Expand source code
@property def dtype(self) -> DType: """ Data type of the elements of this `Tensor`. """ raise NotImplementedError(self.__class__)
prop finite_max
-
Maximum finite value of this
Tensor
as a native scalar.Expand source code
@property def finite_max(self): """ Maximum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_max return finite_max(self, dim=self.shape).native()
prop finite_mean
-
Mean value of all finite values in this
Tensor
as a native scalar.Expand source code
@property def finite_mean(self): """ Mean value of all finite values in this `Tensor` as a native scalar. """ from ._ops import finite_mean return finite_mean(self, dim=self.shape).native()
prop finite_min
-
Minimum finite value of this
Tensor
as a native scalar.Expand source code
@property def finite_min(self): """ Minimum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_min return finite_min(self, dim=self.shape).native()
prop finite_sum
-
Sum of all finite values of this
Tensor
as a native scalar.Expand source code
@property def finite_sum(self): """ Sum of all finite values of this `Tensor` as a native scalar. """ from ._ops import finite_sum return finite_sum(self, dim=self.shape).native()
prop imag : Tensor
-
Returns the imaginary part of this tensor. If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.
See Also:
imag()
Expand source code
@property def imag(self) -> 'Tensor': """ Returns the imaginary part of this tensor. If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor. See Also: `phiml.math.imag()` """ from ._ops import imag return imag(self)
prop max
-
Maximum value of this
Tensor
as a native scalar.Expand source code
@property def max(self): """ Maximum value of this `Tensor` as a native scalar. """ from ._ops import max_ return max_(self, dim=self.shape).native()
prop mean
-
Mean value of this
Tensor
as a native scalar.Expand source code
@property def mean(self): """ Mean value of this `Tensor` as a native scalar. """ from ._ops import mean return mean(self, dim=self.shape).native()
prop min
-
Minimum value of this
Tensor
as a native scalar.Expand source code
@property def min(self): """ Minimum value of this `Tensor` as a native scalar. """ from ._ops import min_ return min_(self, dim=self.shape).native()
prop rank : int
-
Number of explicit dimensions of this
Tensor
. Equal totensor.shape.rank
. This replacesnumpy.ndarray.ndim
/torch.Tensor.dim
/tf.rank()
/jax.numpy.ndim()
.Expand source code
@property def rank(self) -> int: """ Number of explicit dimensions of this `Tensor`. Equal to `tensor.shape.rank`. This replaces [`numpy.ndarray.ndim`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html) / [`torch.Tensor.dim`](https://pytorch.org/docs/master/generated/torch.Tensor.dim.html) / [`tf.rank()`](https://www.tensorflow.org/api_docs/python/tf/rank) / [`jax.numpy.ndim()`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndim.html). """ return self.shape.rank
prop real : Tensor
-
Returns the real part of this tensor.
See Also:
real()
Expand source code
@property def real(self) -> 'Tensor': """ Returns the real part of this tensor. See Also: `phiml.math.real()` """ from ._ops import real return real(self)
prop shape : phiml.math._shape.Shape
-
The
Shape
lists the dimensions with their sizes, names and types.Expand source code
@property def shape(self) -> Shape: """ The `Shape` lists the dimensions with their sizes, names and types. """ raise NotImplementedError(self.__class__)
prop std
-
Standard deviation of this
Tensor
as a native scalar.Expand source code
@property def std(self): """ Standard deviation of this `Tensor` as a native scalar. """ from ._ops import std return std(self, dim=self.shape).native()
prop sum
-
Sum of all values of this
Tensor
as a native scalar.Expand source code
@property def sum(self): """ Sum of all values of this `Tensor` as a native scalar. """ from ._ops import sum_ return sum_(self, dim=self.shape).native()
Methods
def dimension(self, name: Union[str, phiml.math._shape.Shape]) ‑> phiml.math._tensors.TensorDim
-
Returns a reference to a specific dimension of this tensor. This is equivalent to the syntax
tensor.<name>
.The dimension need not be part of the
Tensor.shape
in which case its size is 1.Args
name
- dimension name
Returns
TensorDim
corresponding to a dimension of this tensor def map(self, function: Callable, dims=<function shape>, range=builtins.range, unwrap_scalars=True, **kwargs)
def native(self, order: Union[phiml.math._shape.Shape, tuple, list, str] = None, force_expand=True, to_numpy=False)
-
Returns a native tensor object with the dimensions ordered according to
order
.Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in
order
, aValueError
is raised.Additionally, groups of dimensions can be specified to pack dims, see
reshaped_native()
.Args
order
- (Optional) Order of dimension names as comma-separated string, list or
Shape
. force_expand
- If
False
, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions. to_numpy
- Whether to convert the tensor to a NumPy
ndarray
.
Returns
Native tensor representation, such as PyTorch tensor or NumPy array.
Raises
ValueError if the tensor cannot be transposed to match target_shape
def numpy(self, order: Union[phiml.math._shape.Shape, tuple, list, str] = None, force_expand=True) ‑> numpy.ndarray
-
Converts this tensor to a
numpy.ndarray
with dimensions ordered according toorder
.Note: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use
Tensor.native()
instead.Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in
order
, aValueError
is raised.If this
Tensor
is backed by a NumPy array, a reference to this array may be returned.See Also:
numpy_()
Args
order
- (Optional) Order of dimension names as comma-separated string, list or
Shape
.
Returns
NumPy representation
Raises
ValueError if the tensor cannot be transposed to match target_shape
def pack(self, dims, packed_dim)
-
See
pack_dims()
def print(self, layout='full', float_format=None, threshold=8, include_shape=None, include_dtype=None)
def unpack(self, dim, unpacked_dims)
-
See
unpack_dim()