Module phi.math
Vectorized operations, tensors with named dimensions.
This package provides a common interface for tensor operations. Is internally uses NumPy, TensorFlow or PyTorch.
Main classes: Tensor
, Shape
, DType
, Extrapolation
.
The provided operations are not implemented directly. Instead, they delegate the actual computation to either NumPy, TensorFlow or PyTorch, depending on the configuration. This allows the user to write simulation code once and have it run with various computation backends.
See the documentation at https://tum-pbs.github.io/PhiFlow/Math.html
Expand source code
"""
Vectorized operations, tensors with named dimensions.
This package provides a common interface for tensor operations.
Is internally uses NumPy, TensorFlow or PyTorch.
Main classes: `Tensor`, `Shape`, `DType`, `Extrapolation`.
The provided operations are not implemented directly.
Instead, they delegate the actual computation to either NumPy, TensorFlow or PyTorch, depending on the configuration.
This allows the user to write simulation code once and have it run with various computation backends.
See the documentation at https://tum-pbs.github.io/PhiFlow/Math.html
"""
from .backend._dtype import DType
from .backend import NUMPY, precision, set_global_precision, get_precision
from ._shape import (
shape, Shape, EMPTY_SHAPE, DimFilter,
spatial, channel, batch, instance, dual,
non_batch, non_spatial, non_instance, non_channel, non_dual, non_primal, primal,
merge_shapes, concat_shapes, IncompatibleShapes,
enable_debug_checks,
)
from ._magic_ops import (
slice_ as slice, unstack,
stack, concat, expand,
rename_dims, rename_dims as replace_dims, pack_dims, unpack_dim, flatten,
b2i, c2b, i2b, s2b, si2d,
copy_with, replace
)
from ._tensors import wrap, tensor, layout, Tensor, Dict, to_dict, from_dict, is_scalar, BROADCAST_FORMATTER as f
from ._sparse import dense, get_sparsity, get_format, sparse_tensor, stored_indices, stored_values, tensor_like
from .extrapolation import Extrapolation
from ._ops import (
choose_backend_t as choose_backend, all_available, convert, seed, to_device,
native, numpy, reshaped_native, reshaped_tensor, reshaped_numpy, copy, native_call,
print_ as print,
map_ as map,
zeros, ones, fftfreq, random_normal, random_uniform, meshgrid, linspace, arange as range, range_tensor, # creation operators (use default backend)
zeros_like, ones_like,
pad,
transpose, # reshape operations
safe_div, safe_div as divide_no_nan,
where, nonzero,
sum_ as sum, finite_sum, mean, finite_mean, std, prod, max_ as max, finite_max, min_ as min, finite_min, any_ as any, all_ as all, quantile, median, # reduce
dot,
abs_ as abs, sign,
round_ as round, ceil, floor,
maximum, minimum, clip,
sqrt, exp, log, log2, log10, sigmoid, soft_plus,
sin, cos, tan, sinh, cosh, tanh, arcsin, arccos, arctan, arcsinh, arccosh, arctanh, log_gamma, factorial,
to_float, to_int32, to_int64, to_complex, imag, real, conjugate,
degrees,
boolean_mask,
is_finite, is_finite as isfinite, is_nan, is_inf,
closest_grid_values, grid_sample, scatter, gather,
histogram,
fft, ifft, convolve, cumulative_sum,
dtype, cast,
close, assert_close,
stop_gradient,
pairwise_distances, map_pairs,
)
from ._nd import (
shift,
vec, const_vec, vec_abs, vec_abs as vec_length, vec_squared, vec_normalize, cross_product, rotate_vector, dim_mask,
normalize_to,
l1_loss, l2_loss, frequency_loss,
spatial_gradient, laplace,
fourier_laplace, fourier_poisson, abs_square,
downsample2x, upsample2x, sample_subgrid,
masked_fill, finite_fill
)
from ._trace import matrix_from_function
from ._functional import (
LinearFunction, jit_compile_linear, jit_compile,
jacobian, jacobian as gradient, functional_gradient, custom_gradient, print_gradient,
map_types, map_s2b, map_i2b, map_c2b,
broadcast,
iterate,
identity,
trace_check,
)
from ._optimize import solve_linear, solve_nonlinear, minimize, Solve, SolveInfo, ConvergenceException, NotConverged, Diverged, SolveTape, factor_ilu
PI = 3.14159265358979323846
"""Value of π to double precision """
pi = PI # intentionally undocumented, use PI instead. Exists only as an anlog to numpy.pi
INF = float("inf")
""" Floating-point representation of positive infinity. """
inf = INF # intentionally undocumented, use INF instead. Exists only as an anlog to numpy.inf
NAN = float("nan")
""" Floating-point representation of NaN (not a number). """
nan = NAN # intentionally undocumented, use NAN instead. Exists only as an anlog to numpy.nan
NUMPY = NUMPY # to show up in pdoc
"""Default backend for NumPy arrays and SciPy objects."""
f = f
"""
Automatic mapper for broadcast string formatting of tensors, resulting in tensors of strings.
Used with the special `-f-` syntax.
Examples:
>>> from phi.math import f
>>> -f-f'String containing {tensor1} and {tensor2:.1f}'
# Result is a str tensor containing all dims of tensor1 and tensor2
"""
__all__ = [key for key in globals().keys() if not key.startswith('_')]
__pdoc__ = {
'Extrapolation': False,
'Shape.__init__': False,
'SolveInfo.__init__': False,
'TensorDim.__init__': False,
'ConvergenceException.__init__': False,
'Diverged.__init__': False,
'NotConverged.__init__': False,
'LinearFunction.__init__': False,
}
Sub-modules
phi.math.backend
-
Low-level library wrappers for delegating vector operations.
phi.math.extrapolation
-
Extrapolations are used for padding tensors and sampling coordinates lying outside the tensor bounds. Standard extrapolations are listed as global …
phi.math.magic
-
Magic methods allow custom classes to be compatible with various functions defined in
phi.math
, analogous to how implementing__hash__
allows …
Global variables
var INF
-
Floating-point representation of positive infinity.
var NAN
-
Floating-point representation of NaN (not a number).
var NUMPY
-
Default backend for NumPy arrays and SciPy objects.
var PI
-
Value of π to double precision
var f
-
Automatic mapper for broadcast string formatting of tensors, resulting in tensors of strings. Used with the special
-f-
syntax.Examples
>>> from phi.math import f >>> -f-f'String containing {tensor1} and {tensor2:.1f}' # Result is a str tensor containing all dims of tensor1 and tensor2
Functions
def abs(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes ||x||1. Complex
x
result in matching precision float values.Note: The gradient of this operation is undefined for x=0. TensorFlow and PyTorch return 0 while Jax returns 1.
Args
x
Tensor
orPhiTreeNode
Returns
Absolute value of
x
of same type asx
.Expand source code
def abs_(x) -> Union[Tensor, PhiTreeNode]: """ Computes *||x||<sub>1</sub>*. Complex `x` result in matching precision float values. *Note*: The gradient of this operation is undefined for *x=0*. TensorFlow and PyTorch return 0 while Jax returns 1. Args: x: `Tensor` or `phi.math.magic.PhiTreeNode` Returns: Absolute value of `x` of same type as `x`. """ return _backend_op1(x, Backend.abs)
def abs_square(complex_values: phi.math._tensors.Tensor) ‑> phi.math._tensors.Tensor
-
Squared magnitude of complex values.
Args
complex_values
- complex
Tensor
Returns
Tensor
- real valued magnitude squared
Expand source code
def abs_square(complex_values: Tensor) -> Tensor: """ Squared magnitude of complex values. Args: complex_values: complex `Tensor` Returns: Tensor: real valued magnitude squared """ return math.imag(complex_values) ** 2 + math.real(complex_values) ** 2
def all(boolean_tensor: Union[phi.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>) ‑> phi.math._tensors.Tensor
-
Tests whether all entries of
boolean_tensor
areTrue
along the specified dimensions.Args
boolean_tensor
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions.Expand source code
def all_(boolean_tensor: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor: """ Tests whether all entries of `boolean_tensor` are `True` along the specified dimensions. Args: boolean_tensor: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_all, boolean_tensor, dim)
def all_available(*values: phi.math._tensors.Tensor) ‑> bool
-
Tests if the values of all given tensors are known and can be read at this point. Tracing placeholders are considered not available, even when they hold example values.
Tensors are not available during
jit_compile()
,jit_compile_linear()
or while using TensorFlow's legacy graph mode.Tensors are typically available when the backend operates in eager mode and is not currently tracing a function.
This can be used instead of the native checks
- PyTorch:
torch._C._get_tracing_state()
- TensorFlow:
tf.executing_eagerly()
- Jax:
isinstance(x, jax.core.Tracer)
Args
values
- Tensors to check.
Returns
True
if no value is a placeholder or being traced,False
otherwise.Expand source code
def all_available(*values: Tensor) -> bool: """ Tests if the values of all given tensors are known and can be read at this point. Tracing placeholders are considered not available, even when they hold example values. Tensors are not available during `jit_compile()`, `jit_compile_linear()` or while using TensorFlow's legacy graph mode. Tensors are typically available when the backend operates in eager mode and is not currently tracing a function. This can be used instead of the native checks * PyTorch: `torch._C._get_tracing_state()` * TensorFlow: `tf.executing_eagerly()` * Jax: `isinstance(x, jax.core.Tracer)` Args: values: Tensors to check. Returns: `True` if no value is a placeholder or being traced, `False` otherwise. """ return all([v.available for v in values])
- PyTorch:
def any(boolean_tensor: Union[phi.math._tensors.Tensor, list, tuple], dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>) ‑> phi.math._tensors.Tensor
-
Tests whether any entry of
boolean_tensor
isTrue
along the specified dimensions.Args
boolean_tensor
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions.Expand source code
def any_(boolean_tensor: Union[Tensor, list, tuple], dim: DimFilter = non_batch) -> Tensor: """ Tests whether any entry of `boolean_tensor` is `True` along the specified dimensions. Args: boolean_tensor: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_any, boolean_tensor, dim)
def arccos(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes the inverse of cos(x) of the
Tensor
orPhiTreeNode
x
. For real arguments, the result lies in the range [0, π].Expand source code
def arccos(x) -> Union[Tensor, PhiTreeNode]: """ Computes the inverse of *cos(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. For real arguments, the result lies in the range [0, π]. """ return _backend_op1(x, Backend.cos)
def arccosh(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes the inverse of cosh(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def arccosh(x) -> Union[Tensor, PhiTreeNode]: """ Computes the inverse of *cosh(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.arccosh)
def arcsin(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes the inverse of sin(x) of the
Tensor
orPhiTreeNode
x
. For real arguments, the result lies in the range [-π/2, π/2].Expand source code
def arcsin(x) -> Union[Tensor, PhiTreeNode]: """ Computes the inverse of *sin(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. For real arguments, the result lies in the range [-π/2, π/2]. """ return _backend_op1(x, Backend.arcsin)
def arcsinh(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes the inverse of sinh(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def arcsinh(x) -> Union[Tensor, PhiTreeNode]: """ Computes the inverse of *sinh(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.arcsinh)
def arctan(x, divide_by=None) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes the inverse of tan(x) of the
Tensor
orPhiTreeNode
x
.Args
x
- Input. The single-argument
arctan()
function cannot output π/2 or -π/2 since tan(π/2) is infinite. divide_by
- If specified, computes
arctan(x/divide_by)
so that it can return π/2 and -π/2. This is equivalent to the commonarctan2
function.
Expand source code
def arctan(x, divide_by=None) -> Union[Tensor, PhiTreeNode]: """ Computes the inverse of *tan(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. Args: x: Input. The single-argument `arctan` function cannot output π/2 or -π/2 since tan(π/2) is infinite. divide_by: If specified, computes `arctan(x/divide_by)` so that it can return π/2 and -π/2. This is equivalent to the common `arctan2` function. """ if divide_by is None: return _backend_op1(x, Backend.arctan) else: divide_by = to_float(divide_by) return custom_op2(x, divide_by, arctan, lambda a, b: choose_backend(a, b).arctan2(a, b), 'arctan')
def arctanh(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes the inverse of tanh(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def arctanh(x) -> Union[Tensor, PhiTreeNode]: """ Computes the inverse of *tanh(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.arctanh)
def assert_close(*values, rel_tolerance: float = 1e-05, abs_tolerance: float = 0, msg: str = '', verbose: bool = True)
-
Checks that all given tensors have equal values within the specified tolerance. Raises an AssertionError if the values of this tensor are not within tolerance of any of the other tensors.
Does not check that the shapes match as long as they can be broadcast to a common shape.
Args
values
- Tensors or native tensors or numbers or sequences of numbers.
rel_tolerance
- Relative tolerance.
abs_tolerance
- Absolute tolerance.
msg
- Optional error message.
verbose
- Whether to print conflicting values.
Expand source code
def assert_close(*values, rel_tolerance: float = 1e-5, abs_tolerance: float = 0, msg: str = "", verbose: bool = True): """ Checks that all given tensors have equal values within the specified tolerance. Raises an AssertionError if the values of this tensor are not within tolerance of any of the other tensors. Does not check that the shapes match as long as they can be broadcast to a common shape. Args: values: Tensors or native tensors or numbers or sequences of numbers. rel_tolerance: Relative tolerance. abs_tolerance: Absolute tolerance. msg: Optional error message. verbose: Whether to print conflicting values. """ if not values: return phi_tensors = [t for t in values if isinstance(t, Tensor)] if phi_tensors: values = [compatible_tensor(t, phi_tensors[0].shape)._simplify() for t in values] # use Tensor to infer dimensions for other in values[1:]: _assert_close(values[0], other, rel_tolerance, abs_tolerance, msg, verbose) elif all(isinstance(v, PhiTreeNode) for v in values): tree0, tensors0 = disassemble_tree(values[0]) for value in values[1:]: tree, tensors_ = disassemble_tree(value) assert tree0 == tree, f"Tree structures do not match: {tree0} and {tree}" for t0, t in zip(tensors0, tensors_): _assert_close(t0, t, rel_tolerance, abs_tolerance, msg, verbose) else: np_values = [choose_backend(t).numpy(t) for t in values] for other in np_values[1:]: np.testing.assert_allclose(np_values[0], other, rel_tolerance, abs_tolerance, err_msg=msg, verbose=verbose)
def b2i(value)
-
Change the type of all batch dimensions of
value
to instance dimensions. Seerename_dims()
.Expand source code
def b2i(value): """ Change the type of all *batch* dimensions of `value` to *instance* dimensions. See `rename_dims`. """ return rename_dims(value, batch, instance)
def batch(*args, **dims: Union[int, str, tuple, list, phi.math._shape.Shape]) ‑> phi.math._shape.Shape
-
Returns the batch dimensions of an existing
Shape
or creates a newShape
with only batch dimensions.Usage for filtering batch dimensions:
>>> batch_dims = batch(shape) >>> batch_dims = batch(tensor)
Usage for creating a
Shape
with only batch dimensions:>>> batch_shape = batch('undef', batch=2) (batch=2, undef=None)
Here, the dimension
undef
is created with an undefined size ofNone
. Undefined sizes are automatically filled in bytensor()
,wrap()
,stack()
andconcat()
.To create a shape with multiple types, use
merge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.See Also:
channel()
,spatial()
,instance()
Args
*args
-
Either
**dims
- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shape
containing only dimensions of type batch.Expand source code
def batch(*args, **dims: Union[int, str, tuple, list, Shape]) -> Shape: """ Returns the batch dimensions of an existing `Shape` or creates a new `Shape` with only batch dimensions. Usage for filtering batch dimensions: >>> batch_dims = batch(shape) >>> batch_dims = batch(tensor) Usage for creating a `Shape` with only batch dimensions: >>> batch_shape = batch('undef', batch=2) (batch=2, undef=None) Here, the dimension `undef` is created with an undefined size of `None`. Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. See Also: `channel`, `spatial`, `instance` Args: *args: Either * `Shape` or `Tensor` to filter or * Names of dimensions with undefined sizes as `str`. **dims: Dimension sizes and names. Must be empty when used as a filter operation. Returns: `Shape` containing only dimensions of type batch. """ from .magic import Shaped if all(isinstance(arg, str) for arg in args) or dims: return _construct_shape(BATCH_DIM, '', *args, **dims) elif len(args) == 1 and isinstance(args[0], Shape): return args[0].batch elif len(args) == 1 and isinstance(args[0], Shaped): return shape(args[0]).batch else: raise AssertionError(f"batch() must be called either as a selector batch(Shape) or batch(Tensor) or as a constructor batch(*names, **dims). Got *args={args}, **dims={dims}")
def boolean_mask(x: phi.math._tensors.Tensor, dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable], mask: phi.math._tensors.Tensor)
-
Discards values
x.dim[i]
wheremask.dim[i]=False
. All dimensions ofmask
that are notdim
are treated as batch dimensions.Alternative syntax:
x.dim[mask]
.Implementations:
- NumPy: Slicing
- PyTorch:
masked_select
- TensorFlow:
tf.boolean_mask
- Jax: Slicing
Args
x
Tensor
of values.dim
- Dimension of
x
to along which to discard slices. mask
- Boolean
Tensor
marking which values to keep. Must have the dimensiondim
matching `x´.
Returns
Selected values of
x
asTensor
with dimensions fromx
andmask
.Expand source code
def boolean_mask(x: Tensor, dim: DimFilter, mask: Tensor): """ Discards values `x.dim[i]` where `mask.dim[i]=False`. All dimensions of `mask` that are not `dim` are treated as batch dimensions. Alternative syntax: `x.dim[mask]`. Implementations: * NumPy: Slicing * PyTorch: [`masked_select`](https://pytorch.org/docs/stable/generated/torch.masked_select.html) * TensorFlow: [`tf.boolean_mask`](https://www.tensorflow.org/api_docs/python/tf/boolean_mask) * Jax: Slicing Args: x: `Tensor` of values. dim: Dimension of `x` to along which to discard slices. mask: Boolean `Tensor` marking which values to keep. Must have the dimension `dim` matching `x´. Returns: Selected values of `x` as `Tensor` with dimensions from `x` and `mask`. """ dim, original_dim = mask.shape.only(dim), dim # ToDo assert dim, f"mask dimension '{original_dim}' must be present on the mask {mask.shape}" assert dim.rank == 1, f"boolean mask only supports 1D selection" def uniform_boolean_mask(x: Tensor, mask_1d: Tensor): if dim in x.shape: x_native = x.native(x.shape.names) # order does not matter mask_native = mask_1d.native() # only has 1 dim backend = choose_backend(x_native, mask_native) result_native = backend.boolean_mask(x_native, mask_native, axis=x.shape.index(dim)) new_shape = x.shape.with_sizes(backend.staticshape(result_native)) return NativeTensor(result_native, new_shape) else: total = int(sum_(to_int64(mask_1d), mask_1d.shape)) new_shape = mask_1d.shape.with_sizes([total]) return expand(x, new_shape) return broadcast_op(uniform_boolean_mask, [x, mask], iter_dims=mask.shape.without(dim))
def broadcast(f)
-
Function decorator for non-vectorized functions. When passing a
Tensor
argument to a broadcast function, the function is called once for each element of the tensor.Only positionsl arguments, not keyword arguments are broadcast.
See Also:
map_()
Args
f
- Function.
Returns
Broadcast function
Expand source code
def broadcast(f): """ Function decorator for non-vectorized functions. When passing a `Tensor` argument to a broadcast function, the function is called once for each element of the tensor. Only positionsl arguments, not keyword arguments are broadcast. See Also: `phi.math.map` Args: f: Function. Returns: Broadcast function """ @wraps(f) def broadcast_(*args, **kwargs): return math.map_(f, *args, **kwargs) return broadcast_
def c2b(value)
-
Change the type of all channel dimensions of
value
to batch dimensions. Seerename_dims()
.Expand source code
def c2b(value): """ Change the type of all *channel* dimensions of `value` to *batch* dimensions. See `rename_dims`. """ return rename_dims(value, channel, batch)
def cast(x: ~MagicType, dtype: Union[phi.math.backend._dtype.DType, type]) ‑> ~OtherMagicType
-
Casts
x
to a different data type.Implementations:
- NumPy:
x.astype()
- PyTorch:
x.to()
- TensorFlow:
tf.cast
- Jax:
jax.numpy.array
See Also:
to_float()
,to_int32()
,to_int64()
,to_complex()
.Args
Returns
Expand source code
def cast(x: MagicType, dtype: Union[DType, type]) -> OtherMagicType: """ Casts `x` to a different data type. Implementations: * NumPy: [`x.astype()`](numpy.ndarray.astype) * PyTorch: [`x.to()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.to) * TensorFlow: [`tf.cast`](https://www.tensorflow.org/api_docs/python/tf/cast) * Jax: [`jax.numpy.array`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.array.html) See Also: `to_float`, `to_int32`, `to_int64`, `to_complex`. Args: x: `Tensor` dtype: New data type as `phi.math.DType`, e.g. `DType(int, 16)`. Returns: `Tensor` with data type `dtype` """ if not isinstance(dtype, DType): dtype = DType.as_dtype(dtype) if hasattr(x, '__cast__'): return x.__cast__(dtype) elif isinstance(x, (Number, bool)): return dtype.kind(x) elif isinstance(x, PhiTreeNode): attrs = {key: getattr(x, key) for key in value_attributes(x)} new_attrs = {k: cast(v, dtype) for k, v in attrs.items()} return copy_with(x, **new_attrs) try: backend = choose_backend(x) return backend.cast(x, dtype) except NoBackendFound: if dtype.kind == bool: return bool(x) raise ValueError(f"Cannot cast object of type '{type(x).__name__}'")
- NumPy:
def ceil(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes ⌈x⌉ of the
Tensor
orPhiTreeNode
x
.Expand source code
def ceil(x) -> Union[Tensor, PhiTreeNode]: """ Computes *⌈x⌉* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.ceil)
def channel(*args, **dims: Union[int, str, tuple, list, phi.math._shape.Shape]) ‑> phi.math._shape.Shape
-
Returns the channel dimensions of an existing
Shape
or creates a newShape
with only channel dimensions.Usage for filtering channel dimensions:
>>> channel_dims = channel(shape) >>> channel_dims = channel(tensor)
Usage for creating a
Shape
with only channel dimensions:>>> channel_shape = channel('undef', vector=2) (vector=2, undef=None)
Here, the dimension
undef
is created with an undefined size ofNone
. Undefined sizes are automatically filled in bytensor()
,wrap()
,stack()
andconcat()
.To create a shape with multiple types, use
merge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.See Also:
spatial()
,batch()
,instance()
Args
*args
-
Either
**dims
- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shape
containing only dimensions of type channel.Expand source code
def channel(*args, **dims: Union[int, str, tuple, list, Shape]) -> Shape: """ Returns the channel dimensions of an existing `Shape` or creates a new `Shape` with only channel dimensions. Usage for filtering channel dimensions: >>> channel_dims = channel(shape) >>> channel_dims = channel(tensor) Usage for creating a `Shape` with only channel dimensions: >>> channel_shape = channel('undef', vector=2) (vector=2, undef=None) Here, the dimension `undef` is created with an undefined size of `None`. Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. See Also: `spatial`, `batch`, `instance` Args: *args: Either * `Shape` or `Tensor` to filter or * Names of dimensions with undefined sizes as `str`. **dims: Dimension sizes and names. Must be empty when used as a filter operation. Returns: `Shape` containing only dimensions of type channel. """ from .magic import Shaped if all(isinstance(arg, str) for arg in args) or dims: return _construct_shape(CHANNEL_DIM, '', *args, **dims) elif len(args) == 1 and isinstance(args[0], Shape): return args[0].channel elif len(args) == 1 and isinstance(args[0], Shaped): return shape(args[0]).channel else: raise AssertionError(f"channel() must be called either as a selector channel(Shape) or channel(Tensor) or as a constructor channel(*names, **dims). Got *args={args}, **dims={dims}")
def choose_backend(*values, prefer_default=False) ‑> phi.math.backend._backend.Backend
-
Choose backend for given
Tensor
or native tensor values. Backends need to be registered to be available, e.g. via the global importphi.<backend>
ordetect_backends()
.Args
*values
- Sequence of
Tensor
s, native tensors or constants. prefer_default
- Whether to always select the default backend if it can work with
values
, seedefault_backend()
.
Returns
The selected
Backend
Expand source code
def choose_backend_t(*values, prefer_default=False) -> Backend: """ Choose backend for given `Tensor` or native tensor values. Backends need to be registered to be available, e.g. via the global import `phi.<backend>` or `phi.detect_backends()`. Args: *values: Sequence of `Tensor`s, native tensors or constants. prefer_default: Whether to always select the default backend if it can work with `values`, see `default_backend()`. Returns: The selected `phi.math.backend.Backend` """ natives = sum([v._natives() if isinstance(v, Tensor) else (v,) for v in values], ()) return choose_backend(*natives, prefer_default=prefer_default)
def clip(x: phi.math._tensors.Tensor, lower_limit: Union[phi.math._tensors.Tensor, float], upper_limit: Union[phi.math._tensors.Tensor, float])
-
Limits the values of the
Tensor
x
to lie betweenlower_limit
andupper_limit
(inclusive).Expand source code
def clip(x: Tensor, lower_limit: Union[float, Tensor], upper_limit: Union[float, Tensor]): """ Limits the values of the `Tensor` `x` to lie between `lower_limit` and `upper_limit` (inclusive). """ if isinstance(lower_limit, Number) and isinstance(upper_limit, Number): def clip_(x): return x._op1(lambda native: choose_backend(native).clip(native, lower_limit, upper_limit)) return broadcast_op(clip_, [x]) else: return maximum(lower_limit, minimum(x, upper_limit))
def close(*tensors, rel_tolerance=1e-05, abs_tolerance=0) ‑> bool
-
Checks whether all tensors have equal values within the specified tolerance.
Does not check that the shapes exactly match. Tensors with different shapes are reshaped before comparing.
Args
*tensors
Tensor
or tensor-like (constant) eachrel_tolerance
- relative tolerance (Default value = 1e-5)
abs_tolerance
- absolute tolerance (Default value = 0)
Returns
Whether all given tensors are equal to the first tensor within the specified tolerance.
Expand source code
def close(*tensors, rel_tolerance=1e-5, abs_tolerance=0) -> bool: """ Checks whether all tensors have equal values within the specified tolerance. Does not check that the shapes exactly match. Tensors with different shapes are reshaped before comparing. Args: *tensors: `Tensor` or tensor-like (constant) each rel_tolerance: relative tolerance (Default value = 1e-5) abs_tolerance: absolute tolerance (Default value = 0) Returns: Whether all given tensors are equal to the first tensor within the specified tolerance. """ tensors = [wrap(t) for t in tensors] for other in tensors[1:]: if not _close(tensors[0], other, rel_tolerance=rel_tolerance, abs_tolerance=abs_tolerance): return False return True
def closest_grid_values(grid: phi.math._tensors.Tensor, coordinates: phi.math._tensors.Tensor, extrap: e_.Extrapolation, stack_dim_prefix='closest_', **kwargs)
-
Finds the neighboring grid points in all spatial directions and returns their values. The result will have 2^d values for each vector in coordiantes in d dimensions.
Args
grid
- grid data. The grid is spanned by the spatial dimensions of the tensor
coordinates
- tensor with 1 channel dimension holding vectors pointing to locations in grid index space
extrap
- grid extrapolation
stack_dim_prefix
- For each spatial dimension
dim
, stacks lower and upper closest values along dimensionstack_dim_prefix+dim
. kwargs
- Additional information for the extrapolation.
Returns
Tensor of shape (batch, coord_spatial, grid_spatial=(2, 2,…), grid_channel)
Expand source code
def closest_grid_values(grid: Tensor, coordinates: Tensor, extrap: 'e_.Extrapolation', stack_dim_prefix='closest_', **kwargs): """ Finds the neighboring grid points in all spatial directions and returns their values. The result will have 2^d values for each vector in coordiantes in d dimensions. Args: grid: grid data. The grid is spanned by the spatial dimensions of the tensor coordinates: tensor with 1 channel dimension holding vectors pointing to locations in grid index space extrap: grid extrapolation stack_dim_prefix: For each spatial dimension `dim`, stacks lower and upper closest values along dimension `stack_dim_prefix+dim`. kwargs: Additional information for the extrapolation. Returns: Tensor of shape (batch, coord_spatial, grid_spatial=(2, 2,...), grid_channel) """ return broadcast_op(functools.partial(_closest_grid_values, extrap=extrap, stack_dim_prefix=stack_dim_prefix, pad_kwargs=kwargs), [grid, coordinates])
def concat(values: Union[tuple, list], dim: Union[str, phi.math._shape.Shape], expand_values=False, **kwargs)
-
Concatenates a sequence of
Shapable
objects, e.g.Tensor
, along one dimension. All values must have the same spatial, instance and channel dimensions and their sizes must be equal, except fordim
. Batch dimensions will be added as needed.Args
values
- Tuple or list of
Shapable
, such asTensor
dim
- Concatenation dimension, must be present in all
values
. The size alongdim
is determined fromvalues
and can be set to undefined (None
). expand_values
- If
True
, will first add missing dimensions to all values, not just batch dimensions. This allows tensors with different dimensions to be concatenated. The resulting tensor will have all dimensions that are present invalues
. **kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Concatenated
Tensor
Examples
>>> concat([math.zeros(batch(b=10)), math.ones(batch(b=10))], 'b') (bᵇ=20) 0.500 ± 0.500 (0e+00...1e+00)
>>> concat([vec(x=1, y=0), vec(z=2.)], 'vector') (x=1.000, y=0.000, z=2.000) float64
Expand source code
def concat(values: Union[tuple, list], dim: Union[str, Shape], expand_values=False, **kwargs): """ Concatenates a sequence of `phi.math.magic.Shapable` objects, e.g. `Tensor`, along one dimension. All values must have the same spatial, instance and channel dimensions and their sizes must be equal, except for `dim`. Batch dimensions will be added as needed. Args: values: Tuple or list of `phi.math.magic.Shapable`, such as `phi.math.Tensor` dim: Concatenation dimension, must be present in all `values`. The size along `dim` is determined from `values` and can be set to undefined (`None`). expand_values: If `True`, will first add missing dimensions to all values, not just batch dimensions. This allows tensors with different dimensions to be concatenated. The resulting tensor will have all dimensions that are present in `values`. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments. Returns: Concatenated `Tensor` Examples: >>> concat([math.zeros(batch(b=10)), math.ones(batch(b=10))], 'b') (bᵇ=20) 0.500 ± 0.500 (0e+00...1e+00) >>> concat([vec(x=1, y=0), vec(z=2.)], 'vector') (x=1.000, y=0.000, z=2.000) float64 """ assert len(values) > 0, f"concat() got empty sequence {values}" if isinstance(dim, Shape): dim = dim.name assert isinstance(dim, str), f"dim must be a str or Shape but got '{dim}' of type {type(dim)}" # Add missing dimensions if expand_values: all_dims = merge_shapes(*values, allow_varying_sizes=True) all_dims = all_dims.with_dim_size(dim, 1, keep_item_names=False) values = [expand(v, all_dims.without(shape(v))) for v in values] else: for v in values: assert dim in shape(v), f"dim must be present in the shapes of all values bot got value {type(v).__name__} with shape {shape(v)}" for v in values[1:]: assert set(non_batch(v).names) == set(non_batch(values[0]).names), f"Concatenated values must have the same non-batch dimensions but got {non_batch(values[0])} and {non_batch(v)}" all_batch_dims = merge_shapes(*[batch(v) for v in values]) values = [expand(v, all_batch_dims) for v in values] # --- First try __concat__ --- for v in values: if isinstance(v, Shapable): if hasattr(v, '__concat__'): result = v.__concat__(values, dim, **kwargs) if result is not NotImplemented: assert isinstance(result, Shapable), f"__concat__ must return a Shapable object but got {type(result).__name__} from {type(v).__name__} {v}" return result # --- Next: try concat attributes for tree nodes --- if all(isinstance(v, PhiTreeNode) for v in values): attributes = all_attributes(values[0]) if attributes and all(all_attributes(v) == attributes for v in values): new_attrs = {} for a in attributes: common_shape = merge_shapes(*[shape(getattr(v, a)).without(dim) for v in values]) a_values = [expand(getattr(v, a), common_shape & shape(v).only(dim)) for v in values] # expand by dim if missing, and dims of others new_attrs[a] = concat(a_values, dim, **kwargs) return copy_with(values[0], **new_attrs) else: warnings.warn(f"Failed to concat values using value attributes because attributes differ among values {values}") # --- Fallback: slice and stack --- try: unstacked = sum([unstack(v, dim) for v in values], ()) except MagicNotImplemented: raise MagicNotImplemented(f"concat: No value implemented __concat__ and not all values were Sliceable along {dim}. values = {[type(v) for v in values]}") if len(unstacked) > 8: warnings.warn(f"concat() default implementation is slow on large dimensions ({dim}={len(unstacked)}). Please implement __concat__()", RuntimeWarning, stacklevel=2) dim = shape(values[0])[dim].with_size(None) try: return stack(unstacked, dim, **kwargs) except MagicNotImplemented: raise MagicNotImplemented(f"concat: No value implemented __concat__ and slices could not be stacked. values = {[type(v) for v in values]}")
def concat_shapes(*shapes: Union[phi.math._shape.Shape, Any]) ‑> phi.math._shape.Shape
-
Creates a
Shape
listing the dimensions of allshapes
in the given order.See Also:
merge_shapes()
.Args
*shapes
- Shapes to concatenate. No two shapes must contain a dimension with the same name.
Returns
Combined
Shape
.Expand source code
def concat_shapes(*shapes: Union[Shape, Any]) -> Shape: """ Creates a `Shape` listing the dimensions of all `shapes` in the given order. See Also: `merge_shapes()`. Args: *shapes: Shapes to concatenate. No two shapes must contain a dimension with the same name. Returns: Combined `Shape`. """ shapes = [obj if isinstance(obj, Shape) else shape(obj) for obj in shapes] names = sum([s.names for s in shapes], ()) if len(set(names)) != len(names): raise IncompatibleShapes(f"Cannot concatenate shapes {list(shapes)}. Duplicate dimension names are not allowed.") sizes = sum([s.sizes for s in shapes], ()) types = sum([s.types for s in shapes], ()) item_names = sum([s.item_names for s in shapes], ()) return Shape(sizes, names, types, item_names)
def conjugate(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Args
x
- Real or complex
Tensor
orPhiTreeNode
or native tensor.
Returns
Complex conjugate of
x
ifx
is complex, elsex
.Expand source code
def conjugate(x) -> Union[Tensor, PhiTreeNode]: """ See Also: `imag()`, `real()`. Args: x: Real or complex `Tensor` or `phi.math.magic.PhiTreeNode` or native tensor. Returns: Complex conjugate of `x` if `x` is complex, else `x`. """ return _backend_op1(x, Backend.conj)
def const_vec(value: Union[phi.math._tensors.Tensor, float], dim: Union[phi.math._shape.Shape, tuple, list, str])
-
Creates a single-dimension tensor with all values equal to
value
.value
is not converted to the default backend, even when it is a Python primitive.Args
value
- Value for filling the vector.
dim
- Either single-dimension non-spatial Shape or
Shape
consisting of any number of spatial dimensions. In the latter case, a new channel dimension named'vector'
will be created from the spatial shape.
Returns
Expand source code
def const_vec(value: Union[float, Tensor], dim: Union[Shape, tuple, list, str]): """ Creates a single-dimension tensor with all values equal to `value`. `value` is not converted to the default backend, even when it is a Python primitive. Args: value: Value for filling the vector. dim: Either single-dimension non-spatial Shape or `Shape` consisting of any number of spatial dimensions. In the latter case, a new channel dimension named `'vector'` will be created from the spatial shape. Returns: `Tensor` """ if isinstance(dim, Shape): if dim.spatial: assert not dim.non_spatial, f"When creating a vector given spatial dimensions, the shape may only contain spatial dimensions but got {dim}" shape = channel(vector=dim.names) else: assert dim.rank == 1, f"Cannot create vector from {dim}" shape = dim else: dims = parse_dim_order(dim) shape = channel(vector=dims) return wrap([value] * shape.size, shape)
def convert(x, backend: phi.math.backend._backend.Backend = None, use_dlpack=True)
-
Convert the native representation of a
Tensor
orPhiTreeNode
to the native format ofphi.math.backend
.Warning: This operation breaks the automatic differentiation chain.
See Also:
convert()
.Args
x
Tensor
to convert. Ifx
is aPhiTreeNode
, its variable attributes are converted.backend
- Target backend. If
None
, uses the current default backend, seedefault_backend()
.
Returns
Tensor
with native representation belonging tophi.math.backend
.Expand source code
def convert(x, backend: Backend = None, use_dlpack=True): """ Convert the native representation of a `Tensor` or `phi.math.magic.PhiTreeNode` to the native format of `backend`. *Warning*: This operation breaks the automatic differentiation chain. See Also: `phi.math.backend.convert()`. Args: x: `Tensor` to convert. If `x` is a `phi.math.magic.PhiTreeNode`, its variable attributes are converted. backend: Target backend. If `None`, uses the current default backend, see `phi.math.backend.default_backend()`. Returns: `Tensor` with native representation belonging to `backend`. """ if isinstance(x, Tensor): return x._op1(lambda native: b_convert(native, backend, use_dlpack=use_dlpack)) elif isinstance(x, PhiTreeNode): return copy_with(x, **{a: convert(getattr(x, a), backend, use_dlpack=use_dlpack) for a in variable_attributes(x)}) else: return b_convert(x, backend, use_dlpack=use_dlpack)
def convolve(value: phi.math._tensors.Tensor, kernel: phi.math._tensors.Tensor, extrapolation: e_.Extrapolation = None) ‑> phi.math._tensors.Tensor
-
Computes the convolution of
value
andkernel
along the spatial axes ofkernel
.The channel dimensions of
value
are reduced against the equally named dimensions ofkernel
. The result will have the non-reduced channel dimensions ofkernel
.Args
value
Tensor
whose shape includes all spatial dimensions ofkernel
.kernel
Tensor
used as convolutional filter.extrapolation
- If not None, pads
value
so that the result has the same shape asvalue
.
Returns
Expand source code
def convolve(value: Tensor, kernel: Tensor, extrapolation: 'e_.Extrapolation' = None) -> Tensor: """ Computes the convolution of `value` and `kernel` along the spatial axes of `kernel`. The channel dimensions of `value` are reduced against the equally named dimensions of `kernel`. The result will have the non-reduced channel dimensions of `kernel`. Args: value: `Tensor` whose shape includes all spatial dimensions of `kernel`. kernel: `Tensor` used as convolutional filter. extrapolation: If not None, pads `value` so that the result has the same shape as `value`. Returns: `Tensor` """ assert all(dim in value.shape for dim in kernel.shape.spatial.names), f"Value must have all spatial dimensions of kernel but got value {value} kernel {kernel}" conv_shape = kernel.shape.spatial in_channels = value.shape.channel out_channels = kernel.shape.channel.without(in_channels) batch = value.shape.batch & kernel.shape.batch if extrapolation is not None and extrapolation != e_.ZERO: value = pad(value, {dim: (kernel.shape.get_size(dim) // 2, (kernel.shape.get_size(dim) - 1) // 2) for dim in conv_shape.names}, extrapolation) native_kernel = reshaped_native(kernel, (batch, out_channels, in_channels, *conv_shape.names), force_expand=in_channels) native_value = reshaped_native(value, (batch, in_channels, *conv_shape.names), force_expand=batch) backend = choose_backend(native_value, native_kernel) native_result = backend.conv(native_value, native_kernel, zero_padding=extrapolation == e_.ZERO) result = reshaped_tensor(native_result, (batch, out_channels, *conv_shape)) return result
def copy(value: phi.math._tensors.Tensor)
-
Copies the data buffer and encapsulating
Tensor
object.Args
value
Tensor
to be copied.
Returns
Copy of
value
.Expand source code
def copy(value: Tensor): """ Copies the data buffer and encapsulating `Tensor` object. Args: value: `Tensor` to be copied. Returns: Copy of `value`. """ if value._is_tracer: warnings.warn("Tracing tensors cannot be copied.", RuntimeWarning) return value return value._op1(lambda native: choose_backend(native).copy(native))
def copy_with(obj: ~PhiTreeNodeType, **updates) ‑> ~PhiTreeNodeType
-
Creates a copy of the given
PhiTreeNode
with updated values as specified inupdates
.If
obj
overrides__with_attrs__
, the copy will be created via that specific implementation. Otherwise, thecopy()
module andsetattr
will be used.Args
obj
PhiTreeNode
**updates
- Values to be replaced.
Returns
Copy of
obj
with updated values.Expand source code
def replace(obj: PhiTreeNodeType, **updates) -> PhiTreeNodeType: """ Creates a copy of the given `phi.math.magic.PhiTreeNode` with updated values as specified in `updates`. If `obj` overrides `__with_attrs__`, the copy will be created via that specific implementation. Otherwise, the `copy` module and `setattr` will be used. Args: obj: `phi.math.magic.PhiTreeNode` **updates: Values to be replaced. Returns: Copy of `obj` with updated values. """ if hasattr(obj, '__with_attrs__'): return obj.__with_attrs__(**updates) elif isinstance(obj, (Number, bool)): return obj elif dataclasses.is_dataclass(obj): return dataclasses.replace(obj, **updates) else: cpy = copy.copy(obj) for attr, value in updates.items(): setattr(cpy, attr, value) return cpy
def cos(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes cos(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def cos(x) -> Union[Tensor, PhiTreeNode]: """ Computes *cos(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.cos)
def cosh(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes cosh(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def cosh(x) -> Union[Tensor, PhiTreeNode]: """ Computes *cosh(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.cosh)
def cross_product(vec1: phi.math._tensors.Tensor, vec2: phi.math._tensors.Tensor) ‑> phi.math._tensors.Tensor
-
Computes the cross product of two vectors in 2D.
Args
vec1
Tensor
with a single channel dimension called'vector'
vec2
Tensor
with a single channel dimension called'vector'
Returns
Expand source code
def cross_product(vec1: Tensor, vec2: Tensor) -> Tensor: """ Computes the cross product of two vectors in 2D. Args: vec1: `Tensor` with a single channel dimension called `'vector'` vec2: `Tensor` with a single channel dimension called `'vector'` Returns: `Tensor` """ vec1 = math.tensor(vec1) vec2 = math.tensor(vec2) spatial_rank = vec1.vector.size if 'vector' in vec1.shape else vec2.vector.size if spatial_rank == 2: # Curl in 2D assert vec2.vector.exists if vec1.vector.exists: v1_x, v1_y = vec1.vector v2_x, v2_y = vec2.vector return v1_x * v2_y - v1_y * v2_x else: v2_x, v2_y = vec2.vector return vec1 * math.stack_tensors([-v2_y, v2_x], channel('vector')) elif spatial_rank == 3: # Curl in 3D raise NotImplementedError(f'spatial_rank={spatial_rank} not yet implemented') else: raise AssertionError(f'dims = {spatial_rank}. Vector product not available in > 3 dimensions')
def cumulative_sum(x: phi.math._tensors.Tensor, dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable])
-
Performs a cumulative sum of
x
alongdim
.Implementations:
Args
Returns
Tensor
with the same shape asx
.Expand source code
def cumulative_sum(x: Tensor, dim: DimFilter): """ Performs a cumulative sum of `x` along `dim`. Implementations: * NumPy: [`cumsum`](https://numpy.org/doc/stable/reference/generated/numpy.cumsum.html) * PyTorch: [`cumsum`](https://pytorch.org/docs/stable/generated/torch.cumsum.html) * TensorFlow: [`cumsum`](https://www.tensorflow.org/api_docs/python/tf/math/cumsum) * Jax: [`cumsum`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.cumsum.html) Args: x: `Tensor` dim: Dimension along which to sum, as `str` or `Shape`. Returns: `Tensor` with the same shape as `x`. """ dim = x.shape.only(dim) assert len(dim) == 1, f"dim must be a single dimension but got {dim}" native_x = x.native(x.shape) native_result = choose_backend(native_x).cumsum(native_x, x.shape.index(dim)) return NativeTensor(native_result, x.shape)
def custom_gradient(f: Callable, gradient: Callable, auxiliary_args: str = '')
-
Creates a function based on
f
that uses a custom gradient for the backpropagation pass.Warning This method can lead to memory leaks if the gradient function is not called. Make sure to pass tensors without gradients if the gradient is not required, see
stop_gradient()
.Args
f
- Forward function mapping
Tensor
argumentsx
to a singleTensor
output or sequence of tensorsy
. gradient
- Function to compute the vector-Jacobian product for backpropagation.
Will be called as
gradient(input_dict, *y, *dy) -> output_dict
whereinput_dict
contains all named arguments passed to the forward function andoutput_dict
contains only those parameters for which a gradient is defined. auxiliary_args
- Comma-separated parameter names of arguments that are not relevant to backpropagation.
Returns
Function with similar signature and return values as
f
. However, the returned function does not support keyword arguments.Expand source code
def custom_gradient(f: Callable, gradient: Callable, auxiliary_args: str = ''): """ Creates a function based on `f` that uses a custom gradient for the backpropagation pass. *Warning* This method can lead to memory leaks if the gradient function is not called. Make sure to pass tensors without gradients if the gradient is not required, see `stop_gradient()`. Args: f: Forward function mapping `Tensor` arguments `x` to a single `Tensor` output or sequence of tensors `y`. gradient: Function to compute the vector-Jacobian product for backpropagation. Will be called as `gradient(input_dict, *y, *dy) -> output_dict` where `input_dict` contains all named arguments passed to the forward function and `output_dict` contains only those parameters for which a gradient is defined. auxiliary_args: Comma-separated parameter names of arguments that are not relevant to backpropagation. Returns: Function with similar signature and return values as `f`. However, the returned function does not support keyword arguments. """ auxiliary_args = set(s.strip() for s in auxiliary_args.split(',') if s.strip()) return CustomGradientFunction(f, gradient, auxiliary_args)
def degrees(deg)
-
Convert degrees to radians.
Expand source code
def degrees(deg): """ Convert degrees to radians. """ return deg * (3.1415 / 180.)
def dense(x: phi.math._tensors.Tensor) ‑> phi.math._tensors.Tensor
-
Convert a sparse tensor representation to an equivalent dense one in which all values are explicitly stored contiguously in memory.
Args
x
- Any
Tensor
. Python primitives likefloat
,int
orbool
will be converted toTensors
in the process.
Returns
Dense tensor.
Expand source code
def dense(x: Tensor) -> Tensor: """ Convert a sparse tensor representation to an equivalent dense one in which all values are explicitly stored contiguously in memory. Args: x: Any `Tensor`. Python primitives like `float`, `int` or `bool` will be converted to `Tensors` in the process. Returns: Dense tensor. """ from phi.math import reshaped_tensor if isinstance(x, SparseCoordinateTensor): from ._ops import scatter return scatter(x.shape, x._indices, x._values, mode='add', outside_handling='undefined') elif isinstance(x, CompressedSparseMatrix): ind_batch, channels, native_indices, native_pointers, native_values, native_shape = x._native_csr_components() native_dense = x.default_backend.csr_to_dense(native_indices, native_pointers, native_values, native_shape) return reshaped_tensor(native_dense, [ind_batch, x._compressed_dims, x._uncompressed_dims, channels]) elif isinstance(x, NativeTensor): return x elif isinstance(x, Tensor): return cached(x) elif isinstance(x, (Number, bool)): return wrap(x)
def dim_mask(all_dims: Union[phi.math._shape.Shape, tuple, list], dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable], mask_dim=(vectorᶜ=None)) ‑> phi.math._tensors.Tensor
-
Creates a masked vector with 1 elements for
dims
and 0 for all other dimensions inall_dims
.Args
all_dims
- All dimensions for which the vector should have an entry.
dims
- Dimensions marked as 1.
mask_dim
- Dimension of the masked vector. Item names are assigned automatically.
Returns
Expand source code
def dim_mask(all_dims: Union[Shape, tuple, list], dims: DimFilter, mask_dim=channel('vector')) -> Tensor: """ Creates a masked vector with 1 elements for `dims` and 0 for all other dimensions in `all_dims`. Args: all_dims: All dimensions for which the vector should have an entry. dims: Dimensions marked as 1. mask_dim: Dimension of the masked vector. Item names are assigned automatically. Returns: `Tensor` """ assert isinstance(all_dims, (Shape, tuple, list)), f"all_dims must be a tuple or Shape but got {type(all_dims)}" assert isinstance(mask_dim, Shape) and mask_dim.rank == 1, f"mask_dim must be a single-dimension Shape but got {mask_dim}" if isinstance(all_dims, (tuple, list)): all_dims = spatial(*all_dims) dims = all_dims.only(dims) mask = [1 if dim in dims else 0 for dim in all_dims] mask_dim = mask_dim.with_size(all_dims.names) return wrap(mask, mask_dim)
def divide_no_nan(x: Union[phi.math._tensors.Tensor, float], y: Union[phi.math._tensors.Tensor, float])
-
Computes x/y with the
Tensor
sx
andy
but returns 0 where y=0.Expand source code
def safe_div(x: Union[float, Tensor], y: Union[float, Tensor]): """ Computes *x/y* with the `Tensor`s `x` and `y` but returns 0 where *y=0*. """ return custom_op2(x, y, l_operator=safe_div, l_native_function=lambda x_, y_: choose_backend(x_, y_).divide_no_nan(x_, y_), r_operator=lambda y_, x_: safe_div(x_, y_), r_native_function=lambda y_, x_: choose_backend(x_, y_).divide_no_nan(x_, y_), op_name='divide_no_nan')
def dot(x: phi.math._tensors.Tensor, x_dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable], y: phi.math._tensors.Tensor, y_dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable]) ‑> phi.math._tensors.Tensor
-
Computes the dot product along the specified dimensions. Contracts
x_dims
withy_dims
by first multiplying the elements and then summing them up.For one dimension, this is equal to matrix-matrix or matrix-vector multiplication.
The function replaces the traditional
dot()
/tensordot
/matmul
/einsum
functions.- NumPy:
numpy.tensordot
,numpy.einsum
- PyTorch:
torch.tensordot
,torch.einsum
- TensorFlow:
tf.tensordot
,tf.einsum
- Jax:
jax.numpy.tensordot
,jax.numpy.einsum
Args
x
- First
Tensor
x_dims
- Dimensions of
x
to reduce againsty
y
- Second
Tensor
y_dims
- Dimensions of
y
to reduce againstx
.
Returns
Dot product as
Tensor
.Expand source code
def dot(x: Tensor, x_dims: DimFilter, y: Tensor, y_dims: DimFilter) -> Tensor: """ Computes the dot product along the specified dimensions. Contracts `x_dims` with `y_dims` by first multiplying the elements and then summing them up. For one dimension, this is equal to matrix-matrix or matrix-vector multiplication. The function replaces the traditional `dot` / `tensordot` / `matmul` / `einsum` functions. * NumPy: [`numpy.tensordot`](https://numpy.org/doc/stable/reference/generated/numpy.tensordot.html), [`numpy.einsum`](https://numpy.org/doc/stable/reference/generated/numpy.einsum.html) * PyTorch: [`torch.tensordot`](https://pytorch.org/docs/stable/generated/torch.tensordot.html#torch.tensordot), [`torch.einsum`](https://pytorch.org/docs/stable/generated/torch.einsum.html) * TensorFlow: [`tf.tensordot`](https://www.tensorflow.org/api_docs/python/tf/tensordot), [`tf.einsum`](https://www.tensorflow.org/api_docs/python/tf/einsum) * Jax: [`jax.numpy.tensordot`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.tensordot.html), [`jax.numpy.einsum`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.einsum.html) Args: x: First `Tensor` x_dims: Dimensions of `x` to reduce against `y` y: Second `Tensor` y_dims: Dimensions of `y` to reduce against `x`. Returns: Dot product as `Tensor`. """ x_dims = x.shape.only(x_dims) y_dims = y.shape.only(y_dims) if not x_dims: assert y_dims.volume == 1, f"Cannot compute dot product between dimensions {x_dims} on {x.shape} and {y_dims} on {y.shape}" y = y[{d: 0 for d in y_dims.names}] return x * y if not y_dims: assert x_dims.volume == 1, f"Cannot compute dot product between dimensions {x_dims} on {x.shape} and {y_dims} on {y.shape}" x = x[{d: 0 for d in x_dims.names}] return x * y if isinstance(x, CompressedSparseMatrix): if isinstance(y, (CompressedSparseMatrix, SparseCoordinateTensor)): if x_dims.isdisjoint(sparse_dims(x)) and y_dims.isdisjoint(sparse_dims(y)): return x._op2(y, lambda vx, vy: dot(vx, x_dims, vy, y_dims), None, 'dot', '@') if x_dims.only(sparse_dims(x)) and y_dims.only(sparse_dims(y)): raise NotImplementedError("sparse-sparse multiplication not yet supported") raise NotImplementedError return dot_compressed_dense(x, x_dims, y, y_dims) elif isinstance(y, CompressedSparseMatrix): if isinstance(x, (CompressedSparseMatrix, SparseCoordinateTensor)): raise NotImplementedError("sparse-sparse multiplication not yet supported") return dot_compressed_dense(y, y_dims, x, x_dims) if isinstance(x, SparseCoordinateTensor): if isinstance(y, (CompressedSparseMatrix, SparseCoordinateTensor)): if x_dims.isdisjoint(sparse_dims(x)) and y_dims.isdisjoint(sparse_dims(y)): return x._op2(y, lambda vx, vy: dot(vx, x_dims, vy, y_dims), None, 'dot', '@') raise NotImplementedError("sparse-sparse multiplication not yet supported") return dot_coordinate_dense(x, x_dims, y, y_dims) elif isinstance(y, SparseCoordinateTensor): if isinstance(x, (CompressedSparseMatrix, SparseCoordinateTensor)): raise NotImplementedError("sparse-sparse multiplication not yet supported") return dot_coordinate_dense(y, y_dims, x, x_dims) x_native = x.native(x.shape) y_native = y.native(y.shape) backend = choose_backend(x_native, y_native) remaining_shape_x = x.shape.without(x_dims) remaining_shape_y = y.shape.without(y_dims) assert x_dims.volume == y_dims.volume, f"Failed to reduce {x_dims} against {y_dims} in dot product of {x.shape} and {y.shape}. Sizes do not match." if remaining_shape_y.isdisjoint(remaining_shape_x): # no shared batch dimensions -> tensordot result_native = backend.tensordot(x_native, x.shape.indices(x_dims), y_native, y.shape.indices(y_dims)) result_shape = concat_shapes(remaining_shape_x, remaining_shape_y) else: # shared batch dimensions -> einsum result_shape = merge_shapes(x.shape.without(x_dims), y.shape.without(y_dims)) REDUCE_LETTERS = list('ijklmn') KEEP_LETTERS = list('abcdefgh') x_letters = [(REDUCE_LETTERS if dim in x_dims else KEEP_LETTERS).pop(0) for dim in x.shape.names] letter_map = {dim: letter for dim, letter in zip(x.shape.names, x_letters)} REDUCE_LETTERS = list('ijklmn') y_letters = [] for dim in y.shape.names: if dim in y_dims: y_letters.append(REDUCE_LETTERS.pop(0)) else: if dim in x.shape and dim not in x_dims: y_letters.append(letter_map[dim]) else: next_letter = KEEP_LETTERS.pop(0) letter_map[dim] = next_letter y_letters.append(next_letter) keep_letters = [letter_map[dim] for dim in result_shape.names] subscripts = f'{"".join(x_letters)},{"".join(y_letters)}->{"".join(keep_letters)}' result_native = backend.einsum(subscripts, x_native, y_native) return NativeTensor(result_native, result_shape)
- NumPy:
def downsample2x(grid: phi.math._tensors.Tensor, padding: Extrapolation = boundary, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function spatial>) ‑> phi.math._tensors.Tensor
-
Resamples a regular grid to half the number of spatial sample points per dimension. The grid values at the new points are determined via mean (linear interpolation).
Args
grid
- full size grid
padding
- grid extrapolation. Used to insert an additional value for odd spatial dims
dims
- dims along which down-sampling is applied. If None, down-sample along all spatial dims.
grid
- Tensor:
padding
- Extrapolation: (Default value = extrapolation.BOUNDARY)
dims
- tuple or None: (Default value = None)
Returns
half-size grid
Expand source code
def downsample2x(grid: Tensor, padding: Extrapolation = extrapolation.BOUNDARY, dims: DimFilter = spatial) -> Tensor: """ Resamples a regular grid to half the number of spatial sample points per dimension. The grid values at the new points are determined via mean (linear interpolation). Args: grid: full size grid padding: grid extrapolation. Used to insert an additional value for odd spatial dims dims: dims along which down-sampling is applied. If None, down-sample along all spatial dims. grid: Tensor: padding: Extrapolation: (Default value = extrapolation.BOUNDARY) dims: tuple or None: (Default value = None) Returns: half-size grid """ dims = grid.shape.only(dims).names odd_dimensions = [dim for dim in dims if grid.shape.get_size(dim) % 2 != 0] grid = math.pad(grid, {dim: (0, 1) for dim in odd_dimensions}, padding) for dim in dims: grid = (grid[{dim: slice(1, None, 2)}] + grid[{dim: slice(0, None, 2)}]) / 2 return grid
def dtype(x) ‑> phi.math.backend._dtype.DType
-
Expand source code
def dtype(x) -> DType: """ Returns the data type of `x`. Args: x: `Tensor` or native tensor. Returns: `DType` """ if isinstance(x, Tensor): return x.dtype else: return choose_backend(x).dtype(x)
def dual(*args, **dims: Union[int, str, tuple, list, phi.math._shape.Shape]) ‑> phi.math._shape.Shape
-
Returns the dual dimensions of an existing
Shape
or creates a newShape
with only dual dimensions.Dual dimensions are assigned the prefix
~
to distinguish them from regular dimensions. This way, a regular and dual dimension of the same name can exist in oneShape
.Dual dimensions represent the input space and are typically only present on matrices or higher-order matrices. Dual dimensions behave like batch dimensions in regular operations, if supported. During matrix multiplication, they are matched against their regular counterparts by name (ignoring the
~
prefix).Usage for filtering dual dimensions:
>>> dual_dims = dual(shape) >>> dual_dims = dual(tensor)
Usage for creating a
Shape
with only dual dimensions:>>> dual('undef', points=2) (~undefᵈ=None, ~pointsᵈ=2)
Here, the dimension
undef
is created with an undefined size ofNone
. Undefined sizes are automatically filled in bytensor()
,wrap()
,stack()
andconcat()
.To create a shape with multiple types, use
merge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.See Also:
channel()
,batch()
,spatial()
Args
*args
-
Either
**dims
- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shape
containing only dimensions of type dual.Expand source code
def dual(*args, **dims: Union[int, str, tuple, list, Shape]) -> Shape: """ Returns the dual dimensions of an existing `Shape` or creates a new `Shape` with only dual dimensions. Dual dimensions are assigned the prefix `~` to distinguish them from regular dimensions. This way, a regular and dual dimension of the same name can exist in one `Shape`. Dual dimensions represent the input space and are typically only present on matrices or higher-order matrices. Dual dimensions behave like batch dimensions in regular operations, if supported. During matrix multiplication, they are matched against their regular counterparts by name (ignoring the `~` prefix). Usage for filtering dual dimensions: >>> dual_dims = dual(shape) >>> dual_dims = dual(tensor) Usage for creating a `Shape` with only dual dimensions: >>> dual('undef', points=2) (~undefᵈ=None, ~pointsᵈ=2) Here, the dimension `undef` is created with an undefined size of `None`. Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. See Also: `channel`, `batch`, `spatial` Args: *args: Either * `Shape` or `Tensor` to filter or * Names of dimensions with undefined sizes as `str`. **dims: Dimension sizes and names. Must be empty when used as a filter operation. Returns: `Shape` containing only dimensions of type dual. """ from .magic import Shaped if all(isinstance(arg, str) for arg in args) or dims: return _construct_shape(DUAL_DIM, '~', *args, **dims) elif len(args) == 1 and isinstance(args[0], Shape): return args[0].dual elif len(args) == 1 and isinstance(args[0], Shaped): return shape(args[0]).dual else: raise AssertionError(f"dual() must be called either as a selector dual(Shape) or dual(Tensor) or as a constructor dual(*names, **dims). Got *args={args}, **dims={dims}")
def enable_debug_checks()
-
Once called, additional type checks are enabled. This may result in a noticeable drop in performance.
Expand source code
def enable_debug_checks(): """ Once called, additional type checks are enabled. This may result in a noticeable drop in performance. """ DEBUG_CHECKS.append(True)
def exp(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes exp(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def exp(x) -> Union[Tensor, PhiTreeNode]: """ Computes *exp(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.exp)
def expand(value, *dims: phi.math._shape.Shape, **kwargs)
-
Adds dimensions to a
Tensor
or tensor-like object by implicitly repeating the tensor values along the new dimensions. Ifvalue
already contains any of the new dimensions, a size and type check is performed for these instead.If any of
dims
varies along a dimension that is present neither invalue
nor ondims
, it will also be added tovalue
.This function replaces the usual
tile
/repeat
functions of NumPy, PyTorch, TensorFlow and Jax.Additionally, it replaces the traditional
unsqueeze
/expand_dims
functions.Args
value
Shapable
, such asTensor
For tree nodes, expands all value attributes bydims
or the first variable attribute if no value attributes are set.*dims
- Dimensions to be added as
Shape
**kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
.Expand source code
def expand(value, *dims: Shape, **kwargs): """ Adds dimensions to a `Tensor` or tensor-like object by implicitly repeating the tensor values along the new dimensions. If `value` already contains any of the new dimensions, a size and type check is performed for these instead. If any of `dims` varies along a dimension that is present neither in `value` nor on `dims`, it will also be added to `value`. This function replaces the usual `tile` / `repeat` functions of [NumPy](https://numpy.org/doc/stable/reference/generated/numpy.tile.html), [PyTorch](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.repeat), [TensorFlow](https://www.tensorflow.org/api_docs/python/tf/tile) and [Jax](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.tile.html). Additionally, it replaces the traditional `unsqueeze` / `expand_dims` functions. Args: value: `phi.math.magic.Shapable`, such as `phi.math.Tensor` For tree nodes, expands all value attributes by `dims` or the first variable attribute if no value attributes are set. *dims: Dimensions to be added as `Shape` **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments. Returns: Same type as `value`. """ dims = concat_shapes(*dims) combined = merge_shapes(value, dims) # check that existing sizes match if not dims.without(shape(value)): # no new dims to add if set(dims) == set(shape(value).only(dims)): # sizes and item names might differ, though return value dims &= combined.shape.without('dims') # add missing non-uniform dims # --- First try __stack__ if hasattr(value, '__expand__'): result = value.__expand__(dims, **kwargs) if result is not NotImplemented: return result # --- Next try Tree Node --- if isinstance(value, PhiTreeNode): attributes = value_attributes(value) if hasattr(value, '__value_attrs__') else [variable_attributes(value)[0]] new_attributes = {a: expand(getattr(value, a), dims, **kwargs) for a in attributes} return copy_with(value, **new_attributes) # --- Fallback: stack --- if hasattr(value, '__stack__'): if dims.volume > 8: warnings.warn(f"expand() default implementation is slow on large shapes {dims}. Please implement __expand__() for {type(value).__name__} as defined in phi.math.magic", RuntimeWarning, stacklevel=2) for dim in reversed(dims): value = stack((value,) * dim.size, dim, **kwargs) assert value is not NotImplemented, "Value must implement either __expand__ or __stack__" return value try: # value may be a native scalar from ._tensors import expand_tensor, wrap value = wrap(value) except ValueError: raise AssertionError(f"Cannot expand non-shapable object {type(value)}") return expand_tensor(value, dims)
def factor_ilu(matrix: phi.math._tensors.Tensor, iterations: int, safe=False)
-
Incomplete LU factorization for dense or sparse matrices.
For sparse matrices, keeps the sparsity pattern of
matrix
. L and U will be trimmed to the respective areas, i.e. stored upper elements in L will be dropped, unless this would lead to varying numbers of stored elements along a batch dimension.Args
matrix
- Dense or sparse matrix to factor. Currently, compressed sparse matrices are decompressed before running the ILU algorithm.
iterations
- (Optional) Number of fixed-point iterations to perform. If not given, will be automatically determined from matrix size and sparsity.
safe
- If
False
(default), only matrices with a rank deficiency of up to 1 can be factored as all values of L and U are uniquely determined. For matrices with higher rank deficiencies, the result includesNaN
values. IfTrue
, the algorithm runs slightly slower but can factor highly rank-deficient matrices as well. However, then L is undeterdetermined and unused values of L are set to 0. Rank deficiencies of 1 occur frequently in periodic settings but higher ones are rare.
Returns
L
- Lower-triangular matrix as
Tensor
with all diagonal elements equal to 1. U
- Upper-triangular matrix as
Tensor
.
Examples
>>> matrix = wrap([[-2, 1, 0], >>> [1, -2, 1], >>> [0, 1, -2]], channel('row'), dual('col')) >>> L, U = math.factor_ilu(matrix) >>> math.print(L) row=0 1. 0. 0. along ~col row=1 -0.5 1. 0. along ~col row=2 0. -0.6666667 1. along ~col >>> math.print(L @ U, "L @ U") L @ U row=0 -2. 1. 0. along ~col row=1 1. -2. 1. along ~col row=2 0. 1. -2. along ~col
Expand source code
def factor_ilu(matrix: Tensor, iterations: int, safe=False): """ Incomplete LU factorization for dense or sparse matrices. For sparse matrices, keeps the sparsity pattern of `matrix`. L and U will be trimmed to the respective areas, i.e. stored upper elements in L will be dropped, unless this would lead to varying numbers of stored elements along a batch dimension. Args: matrix: Dense or sparse matrix to factor. Currently, compressed sparse matrices are decompressed before running the ILU algorithm. iterations: (Optional) Number of fixed-point iterations to perform. If not given, will be automatically determined from matrix size and sparsity. safe: If `False` (default), only matrices with a rank deficiency of up to 1 can be factored as all values of L and U are uniquely determined. For matrices with higher rank deficiencies, the result includes `NaN` values. If `True`, the algorithm runs slightly slower but can factor highly rank-deficient matrices as well. However, then L is undeterdetermined and unused values of L are set to 0. Rank deficiencies of 1 occur frequently in periodic settings but higher ones are rare. Returns: L: Lower-triangular matrix as `Tensor` with all diagonal elements equal to 1. U: Upper-triangular matrix as `Tensor`. Examples: >>> matrix = wrap([[-2, 1, 0], >>> [1, -2, 1], >>> [0, 1, -2]], channel('row'), dual('col')) >>> L, U = math.factor_ilu(matrix) >>> math.print(L) row=0 1. 0. 0. along ~col row=1 -0.5 1. 0. along ~col row=2 0. -0.6666667 1. along ~col >>> math.print(L @ U, "L @ U") L @ U row=0 -2. 1. 0. along ~col row=1 1. -2. 1. along ~col row=2 0. 1. -2. along ~col """ if isinstance(matrix, CompressedSparseMatrix): matrix = matrix.decompress() if isinstance(matrix, SparseCoordinateTensor): ind_batch, channels, indices, values, shape = matrix._native_coo_components(dual, matrix=True) (l_idx_nat, l_val_nat), (u_idx_nat, u_val_nat) = incomplete_lu_coo(indices, values, shape, iterations, safe) col_dims = matrix._shape.only(dual) row_dims = matrix._dense_shape.without(col_dims) l_indices = matrix._unpack_indices(l_idx_nat[..., 0], l_idx_nat[..., 1], row_dims, col_dims, ind_batch) u_indices = matrix._unpack_indices(u_idx_nat[..., 0], u_idx_nat[..., 1], row_dims, col_dims, ind_batch) l_values = reshaped_tensor(l_val_nat, [ind_batch, instance(matrix._values), channels], convert=False) u_values = reshaped_tensor(u_val_nat, [ind_batch, instance(matrix._values), channels], convert=False) lower = SparseCoordinateTensor(l_indices, l_values, matrix._dense_shape, matrix._can_contain_double_entries, matrix._indices_sorted, matrix._default) upper = SparseCoordinateTensor(u_indices, u_values, matrix._dense_shape, matrix._can_contain_double_entries, matrix._indices_sorted, matrix._default) else: # dense matrix native_matrix = reshaped_native(matrix, [batch, non_batch(matrix).non_dual, dual, EMPTY_SHAPE]) l_native, u_native = incomplete_lu_dense(native_matrix, iterations, safe) lower = reshaped_tensor(l_native, [batch(matrix), non_batch(matrix).non_dual, dual(matrix), EMPTY_SHAPE]) upper = reshaped_tensor(u_native, [batch(matrix), non_batch(matrix).non_dual, dual(matrix), EMPTY_SHAPE]) return lower, upper
def factorial(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes factorial(x) of the
Tensor
orPhiTreeNode
x
. For floating-point numbers computes the continuous factorial using the gamma function. For integer numbers computes the exact factorial and returns the same integer type. However, this results in integer overflow for inputs larger than 12 (int32) or 19 (int64).Expand source code
def factorial(x) -> Union[Tensor, PhiTreeNode]: """ Computes *factorial(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. For floating-point numbers computes the continuous factorial using the gamma function. For integer numbers computes the exact factorial and returns the same integer type. However, this results in integer overflow for inputs larger than 12 (int32) or 19 (int64). """ return _backend_op1(x, Backend.factorial)
def fft(x: phi.math._tensors.Tensor, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function spatial>) ‑> phi.math._tensors.Tensor
-
Performs a fast Fourier transform (FFT) on all spatial dimensions of x.
The inverse operation is
ifft()
.Implementations:
- NumPy:
np.fft.fft
,numpy.fft.fft2
,numpy.fft.fftn
- PyTorch:
torch.fft.fft
- TensorFlow:
tf.signal.fft
,tf.signal.fft2d
,tf.signal.fft3d
- Jax:
jax.numpy.fft.fft
,jax.numpy.fft.fft2
jax.numpy.fft.fft
Args
x
- Uniform complex or float
Tensor
with at least one spatial dimension. dims
- Dimensions along which to perform the FFT.
If
None
, performs the FFT along all spatial dimensions ofx
.
Returns
Ƒ(x) as complex
Tensor
Expand source code
def fft(x: Tensor, dims: DimFilter = spatial) -> Tensor: """ Performs a fast Fourier transform (FFT) on all spatial dimensions of x. The inverse operation is `ifft()`. Implementations: * NumPy: [`np.fft.fft`](https://numpy.org/doc/stable/reference/generated/numpy.fft.fft.html), [`numpy.fft.fft2`](https://numpy.org/doc/stable/reference/generated/numpy.fft.fft2.html), [`numpy.fft.fftn`](https://numpy.org/doc/stable/reference/generated/numpy.fft.fftn.html) * PyTorch: [`torch.fft.fft`](https://pytorch.org/docs/stable/fft.html) * TensorFlow: [`tf.signal.fft`](https://www.tensorflow.org/api_docs/python/tf/signal/fft), [`tf.signal.fft2d`](https://www.tensorflow.org/api_docs/python/tf/signal/fft2d), [`tf.signal.fft3d`](https://www.tensorflow.org/api_docs/python/tf/signal/fft3d) * Jax: [`jax.numpy.fft.fft`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.fft.fft.html), [`jax.numpy.fft.fft2`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.fft.fft2.html) [`jax.numpy.fft.fft`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.fft.fftn.html) Args: x: Uniform complex or float `Tensor` with at least one spatial dimension. dims: Dimensions along which to perform the FFT. If `None`, performs the FFT along all spatial dimensions of `x`. Returns: *Ƒ(x)* as complex `Tensor` """ dims = x.shape.only(dims) x_native = x.native(x.shape) result_native = choose_backend(x_native).fft(x_native, x.shape.indices(dims)) return NativeTensor(result_native, x.shape)
- NumPy:
def fftfreq(resolution: phi.math._shape.Shape, dx: Union[phi.math._tensors.Tensor, float] = 1, dtype: phi.math.backend._dtype.DType = None)
-
Returns the discrete Fourier transform sample frequencies. These are the frequencies corresponding to the components of the result of
math.fft
on a tensor of shaperesolution
.Args
resolution
- Grid resolution measured in cells
dx
- Distance between sampling points in real space.
dtype
- Data type of the returned tensor (Default value = None)
Returns
Tensor
holding the frequencies of the corresponding values computed by math.fftExpand source code
def fftfreq(resolution: Shape, dx: Union[Tensor, float] = 1, dtype: DType = None): """ Returns the discrete Fourier transform sample frequencies. These are the frequencies corresponding to the components of the result of `math.fft` on a tensor of shape `resolution`. Args: resolution: Grid resolution measured in cells dx: Distance between sampling points in real space. dtype: Data type of the returned tensor (Default value = None) Returns: `Tensor` holding the frequencies of the corresponding values computed by math.fft """ k = meshgrid(**{dim: np.fft.fftfreq(int(n)) for dim, n in resolution.spatial._named_sizes}) k /= dx return to_float(k) if dtype is None else cast(k, dtype)
def finite_fill(values: phi.math._tensors.Tensor, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function spatial>, distance: int = 1, diagonal: bool = True, padding=boundary) ‑> Tuple[phi.math._tensors.Tensor, phi.math._tensors.Tensor]
-
Fills non-finite (NaN, inf, -inf) values from nearby finite values. Extrapolates the finite values of
values
fordistance
steps alongdims
. Where multiple finite values could fill an invalid value, the average is computed.Args
values
- Floating-point
Tensor
. All non-numeric values (NaN
,inf
,-inf
) are interpreted as invalid. dims
- Dimensions along which to fill invalid values from finite ones.
distance
- Number of extrapolation steps, each extrapolating one cell out.
diagonal
- Whether to extrapolate values to their diagonal neighbors per step.
padding
- Extrapolation of
values
. Determines whether to extrapolate from the edges as well.
Returns
Tensor
of same shape asvalues
.Expand source code
def finite_fill(values: Tensor, dims: DimFilter = spatial, distance: int = 1, diagonal: bool = True, padding=extrapolation.BOUNDARY) -> Tuple[Tensor, Tensor]: """ Fills non-finite (NaN, inf, -inf) values from nearby finite values. Extrapolates the finite values of `values` for `distance` steps along `dims`. Where multiple finite values could fill an invalid value, the average is computed. Args: values: Floating-point `Tensor`. All non-numeric values (`NaN`, `inf`, `-inf`) are interpreted as invalid. dims: Dimensions along which to fill invalid values from finite ones. distance: Number of extrapolation steps, each extrapolating one cell out. diagonal: Whether to extrapolate values to their diagonal neighbors per step. padding: Extrapolation of `values`. Determines whether to extrapolate from the edges as well. Returns: `Tensor` of same shape as `values`. """ if diagonal: distance = min(distance, max(values.shape.sizes)) dims = values.shape.only(dims) for _ in range(distance): valid = math.is_finite(values) valid_values = math.where(valid, values, 0) overlap = valid for dim in dims: values_l, values_r = shift(valid_values, (-1, 1), dims=dim, padding=padding) valid_values = math.sum_(values_l + values_r + valid_values, dim='shift') mask_l, mask_r = shift(overlap, (-1, 1), dims=dim, padding=padding) overlap = math.sum_(mask_l + mask_r + overlap, dim='shift') values = math.where(valid, values, valid_values / overlap) else: distance = min(distance, sum(values.shape.sizes)) for _ in range(distance): neighbors = concat(shift(values, (-1, 1), dims, padding=padding, stack_dim=channel('neighbors')), 'neighbors') finite = math.is_finite(neighbors) avg_neighbors = math.sum_(math.where(finite, neighbors, 0), 'neighbors') / math.sum_(finite, 'neighbors') values = math.where(math.is_finite(values), values, avg_neighbors) return values
def finite_max(value, dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>, default: Union[complex, float] = nan)
-
Finds the maximum along
dim
ignoring all non-finite values.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
default
- Value to use where no finite value was encountered.
Returns
Tensor
without the reduced dimensions.Expand source code
def finite_max(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')): """ Finds the maximum along `dim` ignoring all non-finite values. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors default: Value to use where no finite value was encountered. Returns: `Tensor` without the reduced dimensions. """ value_inf = where(is_finite(value), value, float('-inf')) result_inf = max_(value_inf, dim) return where(is_finite(result_inf), result_inf, default)
def finite_mean(value, dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>, default: Union[complex, float] = nan)
-
Computes the mean value of all finite values in
value
alongdim
.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
default
- Value to use where no finite value was encountered.
Returns
Tensor
without the reduced dimensions.Expand source code
def finite_mean(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')): """ Computes the mean value of all finite values in `value` along `dim`. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors default: Value to use where no finite value was encountered. Returns: `Tensor` without the reduced dimensions. """ finite = is_finite(value) summed = sum_(where(finite, value, 0), dim) count = sum_(finite, dim) mean_nan = summed / count return where(is_finite(mean_nan), mean_nan, default)
def finite_min(value, dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>, default: Union[complex, float] = nan)
-
Finds the minimum along
dim
ignoring all non-finite values.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
default
- Value to use where no finite value was encountered.
Returns
Tensor
without the reduced dimensions.Expand source code
def finite_min(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')): """ Finds the minimum along `dim` ignoring all non-finite values. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors default: Value to use where no finite value was encountered. Returns: `Tensor` without the reduced dimensions. """ value_inf = where(is_finite(value), value, float('inf')) result_inf = min_(value_inf, dim) return where(is_finite(result_inf), result_inf, default)
def finite_sum(value, dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>, default: Union[complex, float] = nan)
-
Sums all finite values in
value
alongdim
.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
default
- Value to use where no finite value was encountered.
Returns
Tensor
without the reduced dimensions.Expand source code
def finite_sum(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')): """ Sums all finite values in `value` along `dim`. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors default: Value to use where no finite value was encountered. Returns: `Tensor` without the reduced dimensions. """ finite = is_finite(value) summed = sum_(where(finite, value, 0), dim) return where(any_(finite, dim), summed, default)
def flatten(value, flat_dim: phi.math._shape.Shape = (flatⁱ=None), flatten_batch=False, **kwargs)
-
Returns a
Tensor
with the same values asvalue
but only a single dimensionflat_dim
. The order of the values in memory is not changed.Args
value
Shapable
, such asTensor
.flat_dim
- Dimension name and type as
Shape
object. The size is ignored. flatten_batch
- Whether to flatten batch dimensions as well.
If
False
, batch dimensions are kept, only onn-batch dimensions are flattened. **kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
.Examples
>>> flatten(math.zeros(spatial(x=4, y=3))) (flatⁱ=12) const 0.0
Expand source code
def flatten(value, flat_dim: Shape = instance('flat'), flatten_batch=False, **kwargs): """ Returns a `Tensor` with the same values as `value` but only a single dimension `flat_dim`. The order of the values in memory is not changed. Args: value: `phi.math.magic.Shapable`, such as `Tensor`. flat_dim: Dimension name and type as `Shape` object. The size is ignored. flatten_batch: Whether to flatten batch dimensions as well. If `False`, batch dimensions are kept, only onn-batch dimensions are flattened. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments. Returns: Same type as `value`. Examples: >>> flatten(math.zeros(spatial(x=4, y=3))) (flatⁱ=12) const 0.0 """ assert isinstance(flat_dim, Shape) and flat_dim.rank == 1, flat_dim assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}" # --- First try __flatten__ --- if hasattr(value, '__flatten__'): result = value.__flatten__(flat_dim, flatten_batch, **kwargs) if result is not NotImplemented: return result # There is no tree node implementation for flatten because pack_dims is just as fast # --- Fallback: pack_dims --- return pack_dims(value, shape(value) if flatten_batch else non_batch(value), flat_dim, **kwargs)
def floor(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes ⌊x⌋ of the
Tensor
orPhiTreeNode
x
.Expand source code
def floor(x) -> Union[Tensor, PhiTreeNode]: """ Computes *⌊x⌋* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.floor)
def fourier_laplace(grid: phi.math._tensors.Tensor, dx: Union[phi.math._tensors.Tensor, phi.math._shape.Shape, float, list, tuple], times: int = 1)
-
Applies the spatial laplace operator to the given tensor with periodic boundary conditions.
Note: The results of
fourier_laplace()
andlaplace()
are close but not identical.This implementation computes the laplace operator in Fourier space. The result for periodic fields is exact, i.e. no numerical instabilities can occur, even for higher-order derivatives.
Args
grid
- tensor, assumed to have periodic boundary conditions
dx
- distance between grid points, tensor-like, scalar or vector
times
- number of times the laplace operator is applied. The computational cost is independent of this parameter.
grid
- Tensor:
dx
- Tensor or Shape or float or list or tuple:
times
- int: (Default value = 1)
Returns
tensor of same shape as
tensor()
Expand source code
def fourier_laplace(grid: Tensor, dx: Union[Tensor, Shape, float, list, tuple], times: int = 1): """ Applies the spatial laplace operator to the given tensor with periodic boundary conditions. *Note:* The results of `fourier_laplace` and `laplace` are close but not identical. This implementation computes the laplace operator in Fourier space. The result for periodic fields is exact, i.e. no numerical instabilities can occur, even for higher-order derivatives. Args: grid: tensor, assumed to have periodic boundary conditions dx: distance between grid points, tensor-like, scalar or vector times: number of times the laplace operator is applied. The computational cost is independent of this parameter. grid: Tensor: dx: Tensor or Shape or float or list or tuple: times: int: (Default value = 1) Returns: tensor of same shape as `tensor` """ frequencies = math.fft(math.to_complex(grid)) k_squared = math.sum_(math.fftfreq(grid.shape) ** 2, 'vector') fft_laplace = -(2 * np.pi) ** 2 * k_squared result = math.real(math.ifft(frequencies * fft_laplace ** times)) return math.cast(result / wrap(dx) ** 2, grid.dtype)
def fourier_poisson(grid: phi.math._tensors.Tensor, dx: Union[phi.math._tensors.Tensor, phi.math._shape.Shape, float, list, tuple], times: int = 1)
-
Inverse operation to
fourier_laplace()
.Args
grid
- Tensor:
dx
- Tensor or Shape or float or list or tuple:
times
- int: (Default value = 1)
Returns:
Expand source code
def fourier_poisson(grid: Tensor, dx: Union[Tensor, Shape, float, list, tuple], times: int = 1): """ Inverse operation to `fourier_laplace`. Args: grid: Tensor: dx: Tensor or Shape or float or list or tuple: times: int: (Default value = 1) Returns: """ frequencies = math.fft(math.to_complex(grid)) k_squared = math.sum_(math.fftfreq(grid.shape) ** 2, 'vector') fft_laplace = -(2 * np.pi) ** 2 * k_squared # fft_laplace.tensor[(0,) * math.ndims(k_squared)] = math.inf # assume NumPy array to edit result = math.real(math.ifft(math.safe_div(frequencies, math.to_complex(fft_laplace ** times)))) return math.cast(result * wrap(dx) ** 2, grid.dtype)
def frequency_loss(x, frequency_falloff: float = 100, threshold=1e-05, ignore_mean=False, n=2) ‑> phi.math._tensors.Tensor
-
Penalizes the squared
values
in frequency (Fourier) space. Lower frequencies are weighted more strongly then higher frequencies, depending onfrequency_falloff
.Args
x
Tensor
orPhiTreeNode
Values to penalize, typicallyactual - target
.frequency_falloff
- Large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally. Note: The total loss is not normalized. Varying the value will result in losses of different magnitudes.
threshold
- Frequency amplitudes below this value are ignored. Setting this to zero may cause infinities or NaN values during backpropagation.
ignore_mean
- If
True
, does not penalize the mean value (frequency=0 component).
Returns
Scalar loss value
Expand source code
def frequency_loss(x, frequency_falloff: float = 100, threshold=1e-5, ignore_mean=False, n=2) -> Tensor: """ Penalizes the squared `values` in frequency (Fourier) space. Lower frequencies are weighted more strongly then higher frequencies, depending on `frequency_falloff`. Args: x: `Tensor` or `phi.math.magic.PhiTreeNode` Values to penalize, typically `actual - target`. frequency_falloff: Large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally. *Note*: The total loss is not normalized. Varying the value will result in losses of different magnitudes. threshold: Frequency amplitudes below this value are ignored. Setting this to zero may cause infinities or NaN values during backpropagation. ignore_mean: If `True`, does not penalize the mean value (frequency=0 component). Returns: Scalar loss value """ assert n in (1, 2) if isinstance(x, Tensor): if ignore_mean: x -= math.mean(x, x.shape.non_batch) k_squared = vec_squared(math.fftfreq(x.shape.spatial)) weights = math.exp(-0.5 * k_squared * frequency_falloff ** 2) diff_fft = abs_square(math.fft(x) * weights) diff_fft = math.sqrt(math.maximum(diff_fft, threshold)) return l2_loss(diff_fft) if n == 2 else l1_loss(diff_fft) elif isinstance(x, PhiTreeNode): losses = [frequency_loss(getattr(x, a), frequency_falloff, threshold, ignore_mean, n) for a in variable_values(x)] return sum(losses) else: raise ValueError(x)
def from_dict(dict_: dict, convert=False)
-
Loads a
Tensor
orShape
from a serialized form.See Also:
to_dict()
.Args
dict_
- Serialized tensor properties.
convert
- Whether to convert the data to the current backend format or keep it as a Numpy array.
Returns
Expand source code
def from_dict(dict_: dict, convert=False): """ Loads a `Tensor` or `Shape` from a serialized form. See Also: `to_dict()`. Args: dict_: Serialized tensor properties. convert: Whether to convert the data to the current backend format or keep it as a Numpy array. Returns: `Tensor` or `Shape`. """ shape = Shape._from_dict(dict_) if 'data' in dict_: return tensor(dict_['data'], shape, convert=convert) else: return shape
def functional_gradient(f: Callable, wrt: str = None, get_output=True) ‑> Callable
-
Creates a function which computes the gradient of
f
.Example:
def loss_function(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = functional_gradient(loss_function, 'x', get_output=False)(x, y) (loss, prediction), (dx, dy) = functional_gradient(loss_function, 'x,y', get_output=True)(x, y)
Functional gradients are implemented for the following backends:
- PyTorch:
torch.autograd.grad
/torch.autograd.backward
- TensorFlow:
tf.GradientTape
- Jax:
jax.grad
When the gradient function is invoked,
f
is called with tensors that track the gradient. For PyTorch,arg.requires_grad = True
for all positional arguments off
.Args
f
- Function to be differentiated.
f
must return a floating pointTensor
with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function ifreturn_values=True
. All arguments for which the gradient is computed must be of dtype float or complex. get_output
- Whether the gradient function should also return the return values of
f
. wrt
- Comma-separated parameter names of
f
with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).
Returns
Function with the same arguments as
f
that returns the value off
, auxiliary data and gradient off
ifget_output=True
, else just the gradient off
.Expand source code
def functional_gradient(f: Callable, wrt: str = None, get_output=True) -> Callable: """ Creates a function which computes the gradient of `f`. Example: ```python def loss_function(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = functional_gradient(loss_function, 'x', get_output=False)(x, y) (loss, prediction), (dx, dy) = functional_gradient(loss_function, 'x,y', get_output=True)(x, y) ``` Functional gradients are implemented for the following backends: * PyTorch: [`torch.autograd.grad`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.grad) / [`torch.autograd.backward`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.backward) * TensorFlow: [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape) * Jax: [`jax.grad`](https://jax.readthedocs.io/en/latest/jax.html#jax.grad) When the gradient function is invoked, `f` is called with tensors that track the gradient. For PyTorch, `arg.requires_grad = True` for all positional arguments of `f`. Args: f: Function to be differentiated. `f` must return a floating point `Tensor` with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if `return_values=True`. All arguments for which the gradient is computed must be of dtype float or complex. get_output: Whether the gradient function should also return the return values of `f`. wrt: Comma-separated parameter names of `f` with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged). Returns: Function with the same arguments as `f` that returns the value of `f`, auxiliary data and gradient of `f` if `get_output=True`, else just the gradient of `f`. """ f_params, wrt = simplify_wrt(f, wrt) return GradientFunction(f, f_params, wrt, get_output, is_f_scalar=True)
- PyTorch:
def gather(values: phi.math._tensors.Tensor, indices: phi.math._tensors.Tensor, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable, None] = None)
-
Gathers the entries of
values
at positions described byindices
. All non-channel dimensions ofindices
that are part ofvalues
but not indexed are treated as batch dimensions.See Also:
scatter()
.Args
values
Tensor
containing values to gather.indices
int
Tensor
. Multidimensional position references invalues
. Must contain a single channel dimension for the index vector matching the number of dimensons to index. This channel dimension should list the dimension names to index as item names unless explicitly specified asdims
.dims
- (Optional) Dimensions indexed by
indices
. Alternatively, the dimensions can be specified as the item names of the channel dimension ofindices
. IfNone
and no index item names are specified, will default to all spatial dimensions or all instance dimensions, depending on which ones are present (but not both).
Returns
Tensor
with combined batch dimensions, channel dimensions ofvalues
and spatial/instance dimensions ofindices
.Expand source code
def gather(values: Tensor, indices: Tensor, dims: Union[DimFilter, None] = None): """ Gathers the entries of `values` at positions described by `indices`. All non-channel dimensions of `indices` that are part of `values` but not indexed are treated as batch dimensions. See Also: `scatter()`. Args: values: `Tensor` containing values to gather. indices: `int` `Tensor`. Multidimensional position references in `values`. Must contain a single channel dimension for the index vector matching the number of dimensons to index. This channel dimension should list the dimension names to index as item names unless explicitly specified as `dims`. dims: (Optional) Dimensions indexed by `indices`. Alternatively, the dimensions can be specified as the item names of the channel dimension of `indices`. If `None` and no index item names are specified, will default to all spatial dimensions or all instance dimensions, depending on which ones are present (but not both). Returns: `Tensor` with combined batch dimensions, channel dimensions of `values` and spatial/instance dimensions of `indices`. """ assert channel(indices).rank < 2, f"indices can at most have one channel dimension but got {indices.shape}" if dims is None: if channel(indices) and channel(indices).item_names[0]: dims = channel(indices).item_names[0] else: # Fallback to spatial / instance warnings.warn(f"Indexing without item names is not recommended. Got indices {indices.shape}", SyntaxWarning, stacklevel=2) assert values.shape.instance.is_empty or values.shape.spatial.is_empty, f"Specify gather dimensions for values with both instance and spatial dimensions. Got {values.shape}" dims = values.shape.instance if values.shape.spatial.is_empty else values.shape.spatial if indices.dtype.kind == bool: indices = to_int32(indices) dims = parse_dim_order(dims) assert dims in values.shape, f"Trying to index non-existant dimensions with indices {indices.shape} into values {values.shape}" treat_as_batch = non_channel(indices).only(values.shape).without(dims) batch_ = (values.shape.batch & indices.shape.batch).without(dims) & treat_as_batch channel_ = values.shape.without(dims).without(batch_) index_list_dims = indices.shape.non_channel.without(batch_) squeeze_index_list = False if not index_list_dims: index_list_dims = instance('_single_index') squeeze_index_list = True native_values = reshaped_native(values, [batch_, *dims, channel_]) native_indices = reshaped_native(indices, [batch_, *index_list_dims, channel(indices)]) backend = choose_backend(native_values, native_indices) native_result = backend.batched_gather_nd(native_values, native_indices) result = reshaped_tensor(native_result, [batch_, *index_list_dims, channel_], convert=False) if squeeze_index_list: result = result[{'_single_index': 0}] return result
def get_format(x: phi.math._tensors.Tensor) ‑> str
-
Returns the sparse storage format of a tensor.
Args
x
Tensor
Returns
One of
'coo'
,'csr'
,'csc'
,'dense'
.Expand source code
def get_format(x: Tensor) -> str: """ Returns the sparse storage format of a tensor. Args: x: `Tensor` Returns: One of `'coo'`, `'csr'`, `'csc'`, `'dense'`. """ if isinstance(x, SparseCoordinateTensor): return 'coo' elif isinstance(x, CompressedSparseMatrix): if dual(x._uncompressed_dims): return 'csr' else: assert not dual(x._uncompressed_dims), f"Compressed matrix {x.shape} does not match 'csr' or 'csc' because dual dimensions are present in rows and columns." return 'csc' elif isinstance(x, TensorStack): formats = [get_format(t) for t in x._tensors] if all(f == formats[0] for f in formats): return formats[0] return 'mixed' else: return 'dense'
def get_precision() ‑> int
-
Gets the current target floating point precision in bits. The precision can be set globally using
set_global_precision()
or locally usingwith precision(p):
.Any Backend method may convert floating point values to this precision, even if the input had a different precision.
Returns
16 for half, 32 for single, 64 for double
Expand source code
def get_precision() -> int: """ Gets the current target floating point precision in bits. The precision can be set globally using `set_global_precision()` or locally using `with precision(p):`. Any Backend method may convert floating point values to this precision, even if the input had a different precision. Returns: 16 for half, 32 for single, 64 for double """ return _PRECISION[-1]
def get_sparsity(x: phi.math._tensors.Tensor)
-
Fraction of values currently stored on disk for the given
Tensor
x
. For sparse tensors, this isnnz / shape
.This is a lower limit on the number of values that will need to be processed for operations involving
x
. The actual number is often higher since many operations require data be laid out in a certain format. In these cases, missing values, such as zeros, are filled in before the operation.The following operations may return tensors whose values are only partially stored:
expand()
phi.math.pairwise_distance()
withmax_distance
set.- Tracers used in
jit_compile_linear()
- Stacking any of the above.
Args
x
Tensor
Returns
The number of values that are actually stored on disk. This does not include additional information, such as position information / indices. For sparse matrices, this is equal to the number of nonzero values.
Expand source code
def get_sparsity(x: Tensor): """ Fraction of values currently stored on disk for the given `Tensor` `x`. For sparse tensors, this is `nnz / shape`. This is a lower limit on the number of values that will need to be processed for operations involving `x`. The actual number is often higher since many operations require data be laid out in a certain format. In these cases, missing values, such as zeros, are filled in before the operation. The following operations may return tensors whose values are only partially stored: * `phi.math.expand()` * `phi.math.pairwise_distance()` with `max_distance` set. * Tracers used in `phi.math.jit_compile_linear()` * Stacking any of the above. Args: x: `Tensor` Returns: The number of values that are actually stored on disk. This does not include additional information, such as position information / indices. For sparse matrices, this is equal to the number of nonzero values. """ return stored_values(x, invalid='keep').shape.volume / x.shape.volume
def gradient(f: Callable, wrt: str = None, get_output=True) ‑> Callable
-
Creates a function which computes the Jacobian matrix of
f
. For scalar functions, consider usingfunctional_gradient()
instead.Example:
def f(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = jacobian(loss_function, wrt='x', get_output=False)(x, y) (loss, prediction), (dx, dy) = jacobian(loss_function, wrt='x,y', get_output=True)(x, y)
Functional gradients are implemented for the following backends:
- PyTorch:
torch.autograd.grad
/torch.autograd.backward
- TensorFlow:
tf.GradientTape
- Jax:
jax.grad
When the gradient function is invoked,
f
is called with tensors that track the gradient. For PyTorch,arg.requires_grad = True
for all positional arguments off
.Args
f
- Function to be differentiated.
f
must return a floating pointTensor
with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function ifreturn_values=True
. All arguments for which the gradient is computed must be of dtype float or complex. get_output
- Whether the gradient function should also return the return values of
f
. wrt
- Comma-separated parameter names of
f
with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).
Returns
Function with the same arguments as
f
that returns the value off
, auxiliary data and Jacobian off
ifget_output=True
, else just the Jacobian off
.Expand source code
def jacobian(f: Callable, wrt: str = None, get_output=True) -> Callable: """ Creates a function which computes the Jacobian matrix of `f`. For scalar functions, consider using `functional_gradient()` instead. Example: ```python def f(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = jacobian(loss_function, wrt='x', get_output=False)(x, y) (loss, prediction), (dx, dy) = jacobian(loss_function, wrt='x,y', get_output=True)(x, y) ``` Functional gradients are implemented for the following backends: * PyTorch: [`torch.autograd.grad`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.grad) / [`torch.autograd.backward`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.backward) * TensorFlow: [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape) * Jax: [`jax.grad`](https://jax.readthedocs.io/en/latest/jax.html#jax.grad) When the gradient function is invoked, `f` is called with tensors that track the gradient. For PyTorch, `arg.requires_grad = True` for all positional arguments of `f`. Args: f: Function to be differentiated. `f` must return a floating point `Tensor` with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if `return_values=True`. All arguments for which the gradient is computed must be of dtype float or complex. get_output: Whether the gradient function should also return the return values of `f`. wrt: Comma-separated parameter names of `f` with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged). Returns: Function with the same arguments as `f` that returns the value of `f`, auxiliary data and Jacobian of `f` if `get_output=True`, else just the Jacobian of `f`. """ f_params, wrt = simplify_wrt(f, wrt) return GradientFunction(f, f_params, wrt, get_output, is_f_scalar=False)
- PyTorch:
def grid_sample(grid: phi.math._tensors.Tensor, coordinates: phi.math._tensors.Tensor, extrap: e_.Extrapolation, **kwargs)
-
Samples values of
grid
at the locations referenced bycoordinates
. Values lying in between sample points are determined via linear interpolation.For values outside the valid bounds of
grid
(coord < 0 or coord > grid.shape - 1
),extrap
is used to determine the neighboring grid values. If the extrapolation does not support resampling, the grid is padded by one cell layer before resampling. In that case, values lying further outside will not be sampled according to the extrapolation.Args
grid
- Grid with at least one spatial dimension and no instance dimensions.
coordinates
- Coordinates with a single channel dimension called
'vector'
. The size of thevector
dimension must match the number of spatial dimensions ofgrid
. extrap
- Extrapolation used to determine the values of
grid
outside its valid bounds. kwargs
- Additional information for the extrapolation.
Returns
Tensor
with channel dimensions ofgrid
, spatial and instance dimensions ofcoordinates
and combined batch dimensions.Expand source code
def grid_sample(grid: Tensor, coordinates: Tensor, extrap: 'e_.Extrapolation', **kwargs): """ Samples values of `grid` at the locations referenced by `coordinates`. Values lying in between sample points are determined via linear interpolation. For values outside the valid bounds of `grid` (`coord < 0 or coord > grid.shape - 1`), `extrap` is used to determine the neighboring grid values. If the extrapolation does not support resampling, the grid is padded by one cell layer before resampling. In that case, values lying further outside will not be sampled according to the extrapolation. Args: grid: Grid with at least one spatial dimension and no instance dimensions. coordinates: Coordinates with a single channel dimension called `'vector'`. The size of the `vector` dimension must match the number of spatial dimensions of `grid`. extrap: Extrapolation used to determine the values of `grid` outside its valid bounds. kwargs: Additional information for the extrapolation. Returns: `Tensor` with channel dimensions of `grid`, spatial and instance dimensions of `coordinates` and combined batch dimensions. """ result = broadcast_op(functools.partial(_grid_sample, extrap=extrap, pad_kwargs=kwargs), [grid, coordinates]) return result
def histogram(values: phi.math._tensors.Tensor, bins: phi.math._shape.Shape = (binsˢ=30), weights=1, same_bins: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = None)
-
Compute a histogram of a distribution of values.
Important Note: In its current implementation, values outside the range of bins may or may not be added to the outermost bins.
Args
values
Tensor
listing the values to be binned along spatial or instance dimensions. `values´ may not contain channel or dual dimensions.bins
- Either
Shape
specifying the number of equally-spaced bins to use or bin edge positions asTensor
with a spatial or instance dimension. weights
Tensor
assigning a weight to every value invalues
that will be added to the bin, default 1.same_bins
- Only used if
bins
is given as aShape
. Use the same bin sizes and positions across these batch dimensions. By default, bins will be chosen independently for each example.
Returns
Expand source code
def histogram(values: Tensor, bins: Shape or Tensor = spatial(bins=30), weights=1, same_bins: DimFilter = None): """ Compute a histogram of a distribution of values. *Important Note:* In its current implementation, values outside the range of bins may or may not be added to the outermost bins. Args: values: `Tensor` listing the values to be binned along spatial or instance dimensions. `values´ may not contain channel or dual dimensions. bins: Either `Shape` specifying the number of equally-spaced bins to use or bin edge positions as `Tensor` with a spatial or instance dimension. weights: `Tensor` assigning a weight to every value in `values` that will be added to the bin, default 1. same_bins: Only used if `bins` is given as a `Shape`. Use the same bin sizes and positions across these batch dimensions. By default, bins will be chosen independently for each example. Returns: hist: `Tensor` containing all batch dimensions and the `bins` dimension with dtype matching `weights`. bin_edges: `Tensor` bin_center: `Tensor` """ assert isinstance(values, Tensor), f"values must be a Tensor but got {type(values)}" assert channel(values).is_empty, f"Only 1D histograms supported but values have a channel dimension: {values.shape}" assert dual(values).is_empty, f"values cannot contain dual dimensions but got shape {values.shape}" weights = wrap(weights) if isinstance(bins, Shape): def equal_bins(v): return linspace(finite_min(v, shape), finite_max(v, shape), bins.with_size(bins.size + 1)) bins = broadcast_op(equal_bins, [values], iter_dims=(batch(values) & batch(weights)).without(same_bins)) assert isinstance(bins, Tensor), f"bins must be a Tensor but got {type(bins)}" assert non_batch(bins).rank == 1, f"bins must contain exactly one spatial or instance dimension listing the bin edges but got shape {bins.shape}" assert channel(bins).rank == dual(bins).rank == 0, f"bins cannot have any channel or dual dimensions but got shape {bins.shape}" tensors = [values, bins] if weights is None else [values, weights, bins] backend = choose_backend_t(*tensors) def histogram_uniform(values: Tensor, bin_edges: Tensor, weights): batch_dims = batch(values) & batch(bin_edges) & batch(weights) value_dims = non_batch(values) & non_batch(weights) values_native = reshaped_native(values, [batch_dims, value_dims]) weights_native = reshaped_native(weights, [batch_dims, value_dims]) bin_edges_native = reshaped_native(bin_edges, [batch_dims, non_batch(bin_edges)]) hist_native = backend.histogram1d(values_native, weights_native, bin_edges_native) hist = reshaped_tensor(hist_native, [batch_dims, non_batch(bin_edges).with_size(non_batch(bin_edges).size - 1)]) return hist # return stack_tensors([bin_edges, hist], channel(vector=[bin_edges.shape.name, 'hist'])) bin_center = (bins[{non_batch(bins).name: slice(1, None)}] + bins[{non_batch(bins).name: slice(0, -1)}]) / 2 bin_center = expand(bin_center, channel(vector=non_batch(bins).names)) bin_edges = stack_tensors([bins], channel(values)) if channel(values) else bins return broadcast_op(histogram_uniform, [values, bins, weights]), bin_edges, bin_center
def i2b(value)
-
Change the type of all instance dimensions of
value
to batch dimensions. Seerename_dims()
.Expand source code
def i2b(value): """ Change the type of all *instance* dimensions of `value` to *batch* dimensions. See `rename_dims`. """ return rename_dims(value, instance, batch)
def identity(x)
-
Identity function for one argument. Vararg functions cannot be transformed as the argument names are unknown.
Args
x
- Positional argument.
Returns
x
Expand source code
def identity(x): """ Identity function for one argument. Vararg functions cannot be transformed as the argument names are unknown. Args: x: Positional argument. Returns: `x` """ return x
def ifft(k: phi.math._tensors.Tensor, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function spatial>)
-
Inverse of
fft()
.Args
k
- Complex or float
Tensor
with at least one spatial dimension. dims
- Dimensions along which to perform the inverse FFT.
If
None
, performs the inverse FFT along all spatial dimensions ofk
.
Returns
Ƒ-1(k) as complex
Tensor
Expand source code
def ifft(k: Tensor, dims: DimFilter = spatial): """ Inverse of `fft()`. Args: k: Complex or float `Tensor` with at least one spatial dimension. dims: Dimensions along which to perform the inverse FFT. If `None`, performs the inverse FFT along all spatial dimensions of `k`. Returns: *Ƒ<sup>-1</sup>(k)* as complex `Tensor` """ dims = k.shape.only(dims) k_native = k.native(k.shape) result_native = choose_backend(k_native).ifft(k_native, k.shape.indices(dims)) return NativeTensor(result_native, k.shape)
def imag(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Returns the imaginary part of
x
. Ifx
does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.See Also:
real()
,conjugate()
.Args
x
Tensor
orPhiTreeNode
or native tensor.
Returns
Imaginary component of
x
ifx
is complex, zeros otherwise.Expand source code
def imag(x) -> Union[Tensor, PhiTreeNode]: """ Returns the imaginary part of `x`. If `x` does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor. See Also: `real()`, `conjugate()`. Args: x: `Tensor` or `phi.math.magic.PhiTreeNode` or native tensor. Returns: Imaginary component of `x` if `x` is complex, zeros otherwise. """ return _backend_op1(x, Backend.imag)
def instance(*args, **dims: Union[int, str, tuple, list, phi.math._shape.Shape]) ‑> phi.math._shape.Shape
-
Returns the instance dimensions of an existing
Shape
or creates a newShape
with only instance dimensions.Usage for filtering instance dimensions:
>>> instance_dims = instance(shape) >>> instance_dims = instance(tensor)
Usage for creating a
Shape
with only instance dimensions:>>> instance_shape = instance('undef', points=2) (points=2, undef=None)
Here, the dimension
undef
is created with an undefined size ofNone
. Undefined sizes are automatically filled in bytensor()
,wrap()
,stack()
andconcat()
.To create a shape with multiple types, use
merge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.See Also:
channel()
,batch()
,spatial()
Args
*args
-
Either
**dims
- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shape
containing only dimensions of type instance.Expand source code
def instance(*args, **dims: Union[int, str, tuple, list, Shape]) -> Shape: """ Returns the instance dimensions of an existing `Shape` or creates a new `Shape` with only instance dimensions. Usage for filtering instance dimensions: >>> instance_dims = instance(shape) >>> instance_dims = instance(tensor) Usage for creating a `Shape` with only instance dimensions: >>> instance_shape = instance('undef', points=2) (points=2, undef=None) Here, the dimension `undef` is created with an undefined size of `None`. Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. See Also: `channel`, `batch`, `spatial` Args: *args: Either * `Shape` or `Tensor` to filter or * Names of dimensions with undefined sizes as `str`. **dims: Dimension sizes and names. Must be empty when used as a filter operation. Returns: `Shape` containing only dimensions of type instance. """ from .magic import Shaped if all(isinstance(arg, str) for arg in args) or dims: return _construct_shape(INSTANCE_DIM, '', *args, **dims) elif len(args) == 1 and isinstance(args[0], Shape): return args[0].instance elif len(args) == 1 and isinstance(args[0], Shaped): return shape(args[0]).instance else: raise AssertionError(f"instance() must be called either as a selector instance(Shape) or instance(Tensor) or as a constructor instance(*names, **dims). Got *args={args}, **dims={dims}")
def is_finite(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Returns a
Tensor
orPhiTreeNode
matchingx
with valuesTrue
wherex
has a finite value andFalse
otherwise.Expand source code
def is_finite(x) -> Union[Tensor, PhiTreeNode]: """ Returns a `Tensor` or `phi.math.magic.PhiTreeNode` matching `x` with values `True` where `x` has a finite value and `False` otherwise. """ return _backend_op1(x, Backend.isfinite)
def is_inf(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Returns a
Tensor
orPhiTreeNode
matchingx
with valuesTrue
wherex
is+inf
or-inf
andFalse
otherwise.Expand source code
def is_inf(x) -> Union[Tensor, PhiTreeNode]: """ Returns a `Tensor` or `phi.math.magic.PhiTreeNode` matching `x` with values `True` where `x` is `+inf` or `-inf` and `False` otherwise. """ return _backend_op1(x, Backend.isnan)
def is_nan(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Returns a
Tensor
orPhiTreeNode
matchingx
with valuesTrue
wherex
isNaN
andFalse
otherwise.Expand source code
def is_nan(x) -> Union[Tensor, PhiTreeNode]: """ Returns a `Tensor` or `phi.math.magic.PhiTreeNode` matching `x` with values `True` where `x` is `NaN` and `False` otherwise. """ return _backend_op1(x, Backend.isnan)
def is_scalar(value) ‑> bool
-
Checks whether
value
has no dimensions.Args
value
Tensor
or Python primitive or native tensor.
Returns
bool
Expand source code
def is_scalar(value) -> bool: """ Checks whether `value` has no dimensions. Args: value: `Tensor` or Python primitive or native tensor. Returns: `bool` """ if isinstance(value, Tensor): return value.shape.rank == 0 elif isinstance(value, numbers.Number): return True else: return len(choose_backend(value).staticshape(value)) == 0
def isfinite(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Returns a
Tensor
orPhiTreeNode
matchingx
with valuesTrue
wherex
has a finite value andFalse
otherwise.Expand source code
def is_finite(x) -> Union[Tensor, PhiTreeNode]: """ Returns a `Tensor` or `phi.math.magic.PhiTreeNode` matching `x` with values `True` where `x` has a finite value and `False` otherwise. """ return _backend_op1(x, Backend.isfinite)
def iterate(f: Callable, iterations: Union[int, phi.math._shape.Shape], *x0, f_kwargs: dict = None, range: Callable = builtins.range, measure: Callable = None, **f_kwargs_)
-
Repeatedly call
function
, passing the previous output as the next input.Args
f
- Function to call. Must be callable as
f(x0, **f_kwargs)
andf(f(x0, **f_kwargs), **f_kwargs)
. iterations
- Number of iterations as
int
or single-dimensionShape
. Ifint
, returns the final output off
. IfShape
, returns the trajectory (x0
and all outputs off
), stacking the values along this dimension. x0
- Initial positional arguments for
f
. range
- Range function. Can be used to generate tqdm output by passing
trange
. measure
- Function without arguments to call at the start and end (and in between if
isinstance(iterations, Shape)
) calls tof
. The measure of each call tof
ismeasure()
after minusmeasure()
before the call. f_kwargs
- Additional keyword arguments to be passed to
f
. These arguments can be of any type. f_kwargs_
- More keyword arguments.
Returns
trajectory
- Trajectory of final output of
f
, depending oniterations
. measured
- Only if
measure
was specified, returns the measured value or trajectory tensor.
Expand source code
def iterate(f: Callable, iterations: Union[int, Shape], *x0, f_kwargs: dict = None, range: Callable = range, measure: Callable = None, **f_kwargs_): """ Repeatedly call `function`, passing the previous output as the next input. Args: f: Function to call. Must be callable as `f(x0, **f_kwargs)` and `f(f(x0, **f_kwargs), **f_kwargs)`. iterations: Number of iterations as `int` or single-dimension `Shape`. If `int`, returns the final output of `f`. If `Shape`, returns the trajectory (`x0` and all outputs of `f`), stacking the values along this dimension. x0: Initial positional arguments for `f`. range: Range function. Can be used to generate tqdm output by passing `trange`. measure: Function without arguments to call at the start and end (and in between if `isinstance(iterations, Shape)`) calls to `f`. The measure of each call to `f` is `measure()` after minus `measure()` before the call. f_kwargs: Additional keyword arguments to be passed to `f`. These arguments can be of any type. f_kwargs_: More keyword arguments. Returns: trajectory: Trajectory of final output of `f`, depending on `iterations`. measured: Only if `measure` was specified, returns the measured value or trajectory tensor. """ if f_kwargs is None: f_kwargs = {} f_kwargs.update(f_kwargs_) x = x0 if isinstance(iterations, int): start_time = measure() if measure else None for _ in range(iterations): x = f(*x, **f_kwargs) if not isinstance(x, tuple): x = (x,) assert len(x) == len(x0), f"Function to iterate must return {len(x0)} outputs to match input but got {x}" result = x[0] if len(x0) == 1 else x return (result, measure() - start_time) if measure else result elif isinstance(iterations, Shape): xs = [x0] ts = [measure()] if measure else None for _ in range(iterations.size): x = f(*x, **f_kwargs) if not isinstance(x, tuple): x = (x,) assert len(x) == len(x0), f"Function to iterate must return {len(x0)} outputs to match input but got {x}" xs.append(x) if measure: ts.append(measure()) xs = [stack(item, iterations.with_size(None)) for item in zip(*xs)] result = xs[0] if len(x0) == 1 else xs ts = np.asarray(ts) return (result, wrap(ts[1:] - ts[:-1], iterations.with_size(None))) if measure else result else: raise ValueError(f"iterations must be an int or Shape but got {type(iterations)}")
def jacobian(f: Callable, wrt: str = None, get_output=True) ‑> Callable
-
Creates a function which computes the Jacobian matrix of
f
. For scalar functions, consider usingfunctional_gradient()
instead.Example:
def f(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = jacobian(loss_function, wrt='x', get_output=False)(x, y) (loss, prediction), (dx, dy) = jacobian(loss_function, wrt='x,y', get_output=True)(x, y)
Functional gradients are implemented for the following backends:
- PyTorch:
torch.autograd.grad
/torch.autograd.backward
- TensorFlow:
tf.GradientTape
- Jax:
jax.grad
When the gradient function is invoked,
f
is called with tensors that track the gradient. For PyTorch,arg.requires_grad = True
for all positional arguments off
.Args
f
- Function to be differentiated.
f
must return a floating pointTensor
with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function ifreturn_values=True
. All arguments for which the gradient is computed must be of dtype float or complex. get_output
- Whether the gradient function should also return the return values of
f
. wrt
- Comma-separated parameter names of
f
with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).
Returns
Function with the same arguments as
f
that returns the value off
, auxiliary data and Jacobian off
ifget_output=True
, else just the Jacobian off
.Expand source code
def jacobian(f: Callable, wrt: str = None, get_output=True) -> Callable: """ Creates a function which computes the Jacobian matrix of `f`. For scalar functions, consider using `functional_gradient()` instead. Example: ```python def f(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = jacobian(loss_function, wrt='x', get_output=False)(x, y) (loss, prediction), (dx, dy) = jacobian(loss_function, wrt='x,y', get_output=True)(x, y) ``` Functional gradients are implemented for the following backends: * PyTorch: [`torch.autograd.grad`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.grad) / [`torch.autograd.backward`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.backward) * TensorFlow: [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape) * Jax: [`jax.grad`](https://jax.readthedocs.io/en/latest/jax.html#jax.grad) When the gradient function is invoked, `f` is called with tensors that track the gradient. For PyTorch, `arg.requires_grad = True` for all positional arguments of `f`. Args: f: Function to be differentiated. `f` must return a floating point `Tensor` with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if `return_values=True`. All arguments for which the gradient is computed must be of dtype float or complex. get_output: Whether the gradient function should also return the return values of `f`. wrt: Comma-separated parameter names of `f` with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged). Returns: Function with the same arguments as `f` that returns the value of `f`, auxiliary data and Jacobian of `f` if `get_output=True`, else just the Jacobian of `f`. """ f_params, wrt = simplify_wrt(f, wrt) return GradientFunction(f, f_params, wrt, get_output, is_f_scalar=False)
- PyTorch:
def jit_compile(f: Callable = None, auxiliary_args: str = '', forget_traces: bool = None) ‑> Callable
-
Compiles a graph based on the function
f
. The graph compilation is performed just-in-time (jit), e.g. when the returned function is called for the first time.The traced function will compute the same result as
f
but may run much faster. Some checks may be disabled in the compiled function.Can be used as a decorator:
@math.jit_compile def my_function(x: math.Tensor) -> math.Tensor:
Invoking the returned function may invoke re-tracing / re-compiling
f
after the first call if either- it is called with a different number of arguments,
- the tensor arguments have different dimension names or types (the dimension order also counts),
- any
Tensor
arguments require a different backend than previous invocations, PhiTreeNode
positional arguments do not match in non-variable properties.
Compilation is implemented for the following backends:
- PyTorch:
torch.jit.trace
- TensorFlow:
tf.function
- Jax:
jax.jit
Jit-compilations cannot be nested, i.e. you cannot call
jit_compile()
while another function is being compiled. An exception to this isjit_compile_linear()
which can be called from within a jit-compiled function.See Also:
jit_compile_linear()
Args
f
- Function to be traced.
All positional arguments must be of type
Tensor
orPhiTreeNode
returning a singleTensor
orPhiTreeNode
. auxiliary_args
- Comma-separated parameter names of arguments that are not relevant to backpropagation.
forget_traces
- If
True
, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces.
Returns
Function with similar signature and return values as
f
.Expand source code
def jit_compile(f: Callable = None, auxiliary_args: str = '', forget_traces: bool = None) -> Callable: """ Compiles a graph based on the function `f`. The graph compilation is performed just-in-time (jit), e.g. when the returned function is called for the first time. The traced function will compute the same result as `f` but may run much faster. Some checks may be disabled in the compiled function. Can be used as a decorator: ```python @math.jit_compile def my_function(x: math.Tensor) -> math.Tensor: ``` Invoking the returned function may invoke re-tracing / re-compiling `f` after the first call if either * it is called with a different number of arguments, * the tensor arguments have different dimension names or types (the dimension order also counts), * any `Tensor` arguments require a different backend than previous invocations, * `phi.math.magic.PhiTreeNode` positional arguments do not match in non-variable properties. Compilation is implemented for the following backends: * PyTorch: [`torch.jit.trace`](https://pytorch.org/docs/stable/jit.html) * TensorFlow: [`tf.function`](https://www.tensorflow.org/guide/function) * Jax: [`jax.jit`](https://jax.readthedocs.io/en/latest/notebooks/quickstart.html#using-jit-to-speed-up-functions) Jit-compilations cannot be nested, i.e. you cannot call `jit_compile()` while another function is being compiled. An exception to this is `jit_compile_linear()` which can be called from within a jit-compiled function. See Also: `jit_compile_linear()` Args: f: Function to be traced. All positional arguments must be of type `Tensor` or `phi.math.magic.PhiTreeNode` returning a single `Tensor` or `phi.math.magic.PhiTreeNode`. auxiliary_args: Comma-separated parameter names of arguments that are not relevant to backpropagation. forget_traces: If `True`, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces. Returns: Function with similar signature and return values as `f`. """ if f is None: kwargs = {k: v for k, v in locals().items() if v is not None} return partial(jit_compile, **kwargs) auxiliary_args = set(s.strip() for s in auxiliary_args.split(',') if s.strip()) return f if isinstance(f, (JitFunction, LinearFunction)) and f.auxiliary_args == auxiliary_args else JitFunction(f, auxiliary_args, forget_traces or False)
def jit_compile_linear(f: Callable[[~X], ~Y] = None, auxiliary_args: str = None, forget_traces: bool = None) ‑> phi.math._functional.LinearFunction[~X, ~Y]
-
Compile an optimized representation of the linear function
f
. For backends that support sparse tensors, a sparse matrix will be constructed forf
.Can be used as a decorator:
@math.jit_compile_linear def my_linear_function(x: math.Tensor) -> math.Tensor:
Unlike
jit_compile()
,jit_compile_linear()
can be called during a regular jit compilation.See Also:
jit_compile()
Args
f
- Function that is linear in its positional arguments.
All positional arguments must be of type
Tensor
andf
must return aTensor
. auxiliary_args
- Which parameters
f
is not linear in. These arguments are treated as conditioning arguments and will cause re-tracing on change. forget_traces
- If
True
, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces.
Returns
LinearFunction
with similar signature and return values asf
.Expand source code
def jit_compile_linear(f: Callable[[X], Y] = None, auxiliary_args: str = None, forget_traces: bool = None) -> 'LinearFunction[X, Y]': """ Compile an optimized representation of the linear function `f`. For backends that support sparse tensors, a sparse matrix will be constructed for `f`. Can be used as a decorator: ```python @math.jit_compile_linear def my_linear_function(x: math.Tensor) -> math.Tensor: ``` Unlike `jit_compile()`, `jit_compile_linear()` can be called during a regular jit compilation. See Also: `jit_compile()` Args: f: Function that is linear in its positional arguments. All positional arguments must be of type `Tensor` and `f` must return a `Tensor`. auxiliary_args: Which parameters `f` is not linear in. These arguments are treated as conditioning arguments and will cause re-tracing on change. forget_traces: If `True`, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces. Returns: `LinearFunction` with similar signature and return values as `f`. """ if f is None: kwargs = {k: v for k, v in locals().items() if v is not None} return partial(jit_compile_linear, **kwargs) if isinstance(f, JitFunction): f = f.f # cannot trace linear function from jitted version if isinstance(auxiliary_args, str): auxiliary_args = set(s.strip() for s in auxiliary_args.split(',') if s.strip()) else: assert auxiliary_args is None f_params = function_parameters(f) auxiliary_args = f_params[1:] return f if isinstance(f, LinearFunction) and f.auxiliary_args == auxiliary_args else LinearFunction(f, auxiliary_args, forget_traces or False)
def l1_loss(x, reduce: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>) ‑> phi.math._tensors.Tensor
-
Computes ∑i ||xi||1, summing over all non-batch dimensions.
Args
x
Tensor
orPhiTreeNode
or 0D or 1D native tensor. ForPhiTreeNode
objects, only value the sum over all value attributes is computed.reduce
- Dimensions to reduce as
DimFilter
.
Returns
loss
Tensor
Expand source code
def l1_loss(x, reduce: DimFilter = math.non_batch) -> Tensor: """ Computes *∑<sub>i</sub> ||x<sub>i</sub>||<sub>1</sub>*, summing over all non-batch dimensions. Args: x: `Tensor` or `phi.math.magic.PhiTreeNode` or 0D or 1D native tensor. For `phi.math.magic.PhiTreeNode` objects, only value the sum over all value attributes is computed. reduce: Dimensions to reduce as `DimFilter`. Returns: loss: `Tensor` """ if isinstance(x, Tensor): return math.sum_(abs(x), reduce) elif isinstance(x, PhiTreeNode): return sum([l1_loss(getattr(x, a), reduce) for a in variable_values(x)]) else: try: backend = math.choose_backend(x) shape = backend.staticshape(x) if len(shape) == 0: return abs(x) elif len(shape) == 1: return backend.sum(abs(x)) else: raise ValueError("l2_loss is only defined for 0D and 1D native tensors. For higher-dimensional data, use Φ-Flow tensors.") except math.NoBackendFound: raise ValueError(x)
def l2_loss(x, reduce: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>) ‑> phi.math._tensors.Tensor
-
Computes ∑i ||xi||22 / 2, summing over all non-batch dimensions.
Args
x
Tensor
orPhiTreeNode
or 0D or 1D native tensor. ForPhiTreeNode
objects, only value the sum over all value attributes is computed.reduce
- Dimensions to reduce as
DimFilter
.
Returns
loss
Tensor
Expand source code
def l2_loss(x, reduce: DimFilter = math.non_batch) -> Tensor: """ Computes *∑<sub>i</sub> ||x<sub>i</sub>||<sub>2</sub><sup>2</sup> / 2*, summing over all non-batch dimensions. Args: x: `Tensor` or `phi.math.magic.PhiTreeNode` or 0D or 1D native tensor. For `phi.math.magic.PhiTreeNode` objects, only value the sum over all value attributes is computed. reduce: Dimensions to reduce as `DimFilter`. Returns: loss: `Tensor` """ if isinstance(x, Tensor): if x.dtype.kind == complex: x = abs(x) return math.sum_(x ** 2, reduce) * 0.5 elif isinstance(x, PhiTreeNode): return sum([l2_loss(getattr(x, a), reduce) for a in variable_values(x)]) else: try: backend = math.choose_backend(x) shape = backend.staticshape(x) if len(shape) == 0: return x ** 2 * 0.5 elif len(shape) == 1: return backend.sum(x ** 2) * 0.5 else: raise ValueError("l2_loss is only defined for 0D and 1D native tensors. For higher-dimensional data, use Φ-Flow tensors.") except math.NoBackendFound: raise ValueError(x)
def laplace(x: phi.math._tensors.Tensor, dx: Union[phi.math._tensors.Tensor, float] = 1, padding: Union[Extrapolation, float, phi.math._tensors.Tensor] = boundary, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function spatial>, weights: phi.math._tensors.Tensor = None)
-
Spatial Laplace operator as defined for scalar fields. If a vector field is passed, the laplace is computed component-wise.
Args
x
- n-dimensional field of shape (batch, spacial dimensions…, components)
dx
- scalar or 1d tensor
padding
- extrapolation
dims
- The second derivative along these dimensions is summed over
weights
- (Optional) Multiply the axis terms by these factors before summation. Must be a Tensor with a single channel dimension that lists all laplace dims by name.
Returns
Tensor
of same shape asx
Expand source code
def laplace(x: Tensor, dx: Union[Tensor, float] = 1, padding: Union[Extrapolation, float, Tensor] = extrapolation.BOUNDARY, dims: DimFilter = spatial, weights: Tensor = None): """ Spatial Laplace operator as defined for scalar fields. If a vector field is passed, the laplace is computed component-wise. Args: x: n-dimensional field of shape (batch, spacial dimensions..., components) dx: scalar or 1d tensor padding: extrapolation dims: The second derivative along these dimensions is summed over weights: (Optional) Multiply the axis terms by these factors before summation. Must be a Tensor with a single channel dimension that lists all laplace dims by name. Returns: `phi.math.Tensor` of same shape as `x` """ if isinstance(dx, (tuple, list)): dx = wrap(dx, batch('_laplace')) elif isinstance(dx, Tensor) and dx.vector.exists: dx = rename_dims(dx, 'vector', batch('_laplace')) if isinstance(x, Extrapolation): return x.spatial_gradient() left, center, right = shift(wrap(x), (-1, 0, 1), dims, padding, stack_dim=batch('_laplace')) result = (left + right - 2 * center) / (dx ** 2) if weights is not None: dim_names = x.shape.only(dims).names assert channel(weights).rank == 1 and channel(weights).item_names is not None, f"weights must have one channel dimension listing the laplace dims but got {shape(weights)}" assert set(channel(weights).item_names[0]) >= set(dim_names), f"the channel dim of weights must contain all laplace dims {dim_names} but only has {channel(weights).item_names}" result *= rename_dims(weights, channel, batch('_laplace')) result = math.sum_(result, '_laplace') return result
def layout(objects, *shape: phi.math._shape.Shape) ‑> phi.math._tensors.Tensor
-
Wraps a Python tree in a
Tensor
, allowing elements to be accessed via dimensions. A python tree is a structure of nestedtuple
,list
,dict
and leaf objects where leaves can be any Python object.All keys of
dict
containers must be of typestr
. The keys are automatically assigned as item names along that dimension unless conflicting with other elements.Strings may also be used as containers.
Example:
>>> t = layout({'a': 'text', 'b': [0, 1]}, channel('dict,inner')) >>> t.inner[1].dict['a'].native() 'e'
Args
objects
- PyTree of
list
ortuple
. *shape
- Tensor dimensions
Returns
Tensor
. CallingTensor.native()
on the returned tensor will returnobjects
.Expand source code
def layout(objects, *shape: Shape) -> Tensor: """ Wraps a Python tree in a `Tensor`, allowing elements to be accessed via dimensions. A python tree is a structure of nested `tuple`, `list`, `dict` and *leaf* objects where leaves can be any Python object. All keys of `dict` containers must be of type `str`. The keys are automatically assigned as item names along that dimension unless conflicting with other elements. Strings may also be used as containers. Example: >>> t = layout({'a': 'text', 'b': [0, 1]}, channel('dict,inner')) >>> t.inner[1].dict['a'].native() 'e' See Also: `tensor()`, `wrap()`. Args: objects: PyTree of `list` or `tuple`. *shape: Tensor dimensions Returns: `Tensor`. Calling `Tensor.native()` on the returned tensor will return `objects`. """ assert all(isinstance(s, Shape) for s in shape), f"shape needs to be one or multiple Shape instances but got {shape}" shape = EMPTY_SHAPE if len(shape) == 0 else concat_shapes(*shape) if isinstance(objects, Layout): assert objects.shape == shape return objects if not shape.well_defined: def recursive_determine_shape(native, shape: Shape): if not shape: return shape if isinstance(native, dict): assert all([isinstance(k, str) for k in native.keys()]), f"All dict keys in PyTrees must be str but got {tuple(native.keys())}" shape = shape.replace(shape[0], shape[0].with_size(tuple(native.keys()))) if shape.rank == 1: return shape.with_sizes((len(native),)) inner_shape = shape[1:] if isinstance(native, (tuple, list)): inner_shapes = [recursive_determine_shape(n, inner_shape) for n in native] elif isinstance(native, dict): inner_shapes = [recursive_determine_shape(n, inner_shape) for n in native.values()] else: raise ValueError(native) return shape_stack(shape[0], *inner_shapes) shape = recursive_determine_shape(objects, shape) return Layout(objects, shape) # if shape.volume == 1: # objects = np.asarray(objects, dtype=object) # # if isinstance(objects, (tuple, list)): # objects = np.asarray(objects, dtype=object) # if isinstance(objects, np.ndarray) and objects.dtype == object: # return Layout(objects, shape) # else: # assert shape.volume == 1, f"Cannot layout object of type {objects} along {shape}, a tuple, list or object array is required."
def linspace(start: Union[phi.math._tensors.Tensor, float], stop: Union[phi.math._tensors.Tensor, float], dim: phi.math._shape.Shape) ‑> phi.math._tensors.Tensor
-
Returns
number
evenly spaced numbers betweenstart
andstop
.See Also:
arange()
,meshgrid()
.Args
start
- First value,
int
orTensor
. stop
- Last value,
int
orTensor
. dim
- Linspace dimension of integer size.
The size determines how many values to linearly space between
start
andstop
. The values will be laid out alongdim
.
Returns
Examples
>>> math.linspace(0, 1, spatial(x=5)) (0.000, 0.250, 0.500, 0.750, 1.000) along xˢ
>>> math.linspace(0, (-1, 1), spatial(x=3)) (0.000, 0.000); (-0.500, 0.500); (-1.000, 1.000) (xˢ=3, vectorᶜ=2)
Expand source code
def linspace(start: Union[float, Tensor], stop: Union[float, Tensor], dim: Shape) -> Tensor: """ Returns `number` evenly spaced numbers between `start` and `stop`. See Also: `arange()`, `meshgrid()`. Args: start: First value, `int` or `Tensor`. stop: Last value, `int` or `Tensor`. dim: Linspace dimension of integer size. The size determines how many values to linearly space between `start` and `stop`. The values will be laid out along `dim`. Returns: `Tensor` Examples: >>> math.linspace(0, 1, spatial(x=5)) (0.000, 0.250, 0.500, 0.750, 1.000) along xˢ >>> math.linspace(0, (-1, 1), spatial(x=3)) (0.000, 0.000); (-0.500, 0.500); (-1.000, 1.000) (xˢ=3, vectorᶜ=2) """ assert isinstance(dim, Shape) and dim.rank == 1, f"dim must be a single-dimension Shape but got {dim}" if is_scalar(start) and is_scalar(stop): if isinstance(start, Tensor): start = start.native() if isinstance(stop, Tensor): stop = stop.native() native_linspace = choose_backend(start, stop, prefer_default=True).linspace(start, stop, dim.size) return NativeTensor(native_linspace, dim) else: return map_(linspace, start, stop, dim=dim)
def log(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes the natural logarithm of the
Tensor
orPhiTreeNode
x
.Expand source code
def log(x) -> Union[Tensor, PhiTreeNode]: """ Computes the natural logarithm of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.log)
def log10(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes log(x) of the
Tensor
orPhiTreeNode
x
with base 10.Expand source code
def log10(x) -> Union[Tensor, PhiTreeNode]: """ Computes *log(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x` with base 10. """ return _backend_op1(x, Backend.log10)
def log2(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes log(x) of the
Tensor
orPhiTreeNode
x
with base 2.Expand source code
def log2(x) -> Union[Tensor, PhiTreeNode]: """ Computes *log(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x` with base 2. """ return _backend_op1(x, Backend.log2)
def log_gamma(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes log(gamma(x)) of the
Tensor
orPhiTreeNode
x
.Expand source code
def log_gamma(x) -> Union[Tensor, PhiTreeNode]: """ Computes *log(gamma(x))* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.log_gamma)
def map(function, *values, range=builtins.range, **kwargs) ‑> Optional[phi.math._tensors.Tensor]
-
Calls
function
on all elements ofvalues
.Args
function
- Function to be called on single elements contained in
value
. Must return a value that can be stored in tensors. *values
Tensors
containing positional arguments forfunction
. Number of tensors must matchfunction
signature.range
- Range function. Can be used to generate tqdm output by passing
trange
. **kwargs
- Non-
Tensor
keyword arguments forfunction
. Their shapes are not broadcast with the positional arguments.
Returns
Tensor
of same shape asvalue
.Expand source code
def map_(function, *values, range=range, **kwargs) -> Union[Tensor, None]: """ Calls `function` on all elements of `values`. Args: function: Function to be called on single elements contained in `value`. Must return a value that can be stored in tensors. *values: `Tensors` containing positional arguments for `function`. Number of tensors must match `function` signature. range: Range function. Can be used to generate tqdm output by passing `trange`. **kwargs: Non-`Tensor` keyword arguments for `function`. Their shapes are not broadcast with the positional arguments. Returns: `Tensor` of same shape as `value`. """ if not values: return function(**kwargs) values = [v if isinstance(v, Shapable) else wrap(v) for v in values] shape = merge_shapes(*[v.shape for v in values]) flat = [pack_dims(expand(v, shape), shape, channel(flat=shape.volume)) for v in values] result = [] results = None for _, items in zip(range(flat[0].flat.size_or_1), zip(*flat)): f_output = function(*items, **kwargs) if isinstance(f_output, tuple): if results is None: results = [[] for _ in f_output] for result_i, output_i in zip(results, f_output): result_i.append(output_i) else: result.append(f_output) if results is None: if any(r is None for r in result): assert all(r is None for r in result), f"map function returned None for some elements, {result}" return None return unpack_dim(stack(result, channel('_c')) if isinstance(result, Shapable) else wrap(result, channel('_c')), '_c', shape) else: for i, result_i in enumerate(results): if any(r is None for r in result_i): assert all(r is None for r in result_i), f"map function returned None for some elements at output index {i}, {result_i}" results[i] = None return tuple([unpack_dim(stack(result_i, channel('_c')) if isinstance(result_i, Shapable) else wrap(result_i, channel('_c')), '_c', shape) for result_i in results])
def map_c2b(f: Callable) ‑> Callable
-
Map channel dimensions to batch dimensions. Short for
map_types()(f, instance(), batch())
.Expand source code
def map_c2b(f: Callable) -> Callable: """ Map channel dimensions to batch dimensions. Short for `map_types(f, instance, batch)`. """ return map_types(f, channel, batch)
def map_i2b(f: Callable) ‑> Callable
-
Map instance dimensions to batch dimensions. Short for
map_types()(f, instance(), batch())
.Expand source code
def map_i2b(f: Callable) -> Callable: """ Map instance dimensions to batch dimensions. Short for `map_types(f, instance, batch)`. """ return map_types(f, instance, batch)
def map_pairs(map_function: Callable, values: phi.math._tensors.Tensor, connections: phi.math._tensors.Tensor)
-
Evaluates
map_function
on all pairs of elements present in the sparsity pattern ofconnections
.Args
map_function
- Function with signature
(Tensor, Tensor) -> Tensor
. values
- Values to evaluate
map_function
on. Needs to have a spatial or instance dimension but must not have a dual dimension. connections
- Sparse tensor.
Returns
Tensor
with the sparse dimensions ofconnections
and all non-instance dimensions returned bymap_function
.Expand source code
def map_pairs(map_function: Callable, values: Tensor, connections: Tensor): """ Evaluates `map_function` on all pairs of elements present in the sparsity pattern of `connections`. Args: map_function: Function with signature `(Tensor, Tensor) -> Tensor`. values: Values to evaluate `map_function` on. Needs to have a spatial or instance dimension but must not have a dual dimension. connections: Sparse tensor. Returns: `Tensor` with the sparse dimensions of `connections` and all non-instance dimensions returned by `map_function`. """ assert dual(values).is_empty, f"values must not have a dual dimension but got {values.shape}" inst_dim = non_batch(values).non_channel.non_dual.name indices = stored_indices(connections, invalid='clamp') origin = values[{inst_dim: indices[inst_dim]}] target = values[{inst_dim: indices['~' + inst_dim]}] result = map_function(origin, target) return tensor_like(connections, result, value_order='as existing')
def map_s2b(f: Callable) ‑> Callable
-
Map spatial dimensions to batch dimensions. Short for
map_types()(f, spatial(), batch())
.Expand source code
def map_s2b(f: Callable) -> Callable: """ Map spatial dimensions to batch dimensions. Short for `map_types(f, spatial, batch)`. """ return map_types(f, spatial, batch)
def map_types(f: Callable, dims: Union[phi.math._shape.Shape, tuple, list, str, Callable], dim_type: Union[str, Callable]) ‑> Callable
-
Wraps a function to change the dimension types of its
Tensor
andPhiTreeNode
arguments.Args
f
- Function to wrap.
dims
- Concrete dimensions or dimension type, such as
spatial()
orbatch()
. These dimensions will be mapped todim_type
for all positional function arguments. dim_type
- Dimension type, such as
spatial()
orbatch()
.f
will be called with dimensions remapped to this type.
Returns
Function with signature matching
f
.Expand source code
def map_types(f: Callable, dims: Union[Shape, tuple, list, str, Callable], dim_type: Union[Callable, str]) -> Callable: """ Wraps a function to change the dimension types of its `Tensor` and `phi.math.magic.PhiTreeNode` arguments. Args: f: Function to wrap. dims: Concrete dimensions or dimension type, such as `spatial` or `batch`. These dimensions will be mapped to `dim_type` for all positional function arguments. dim_type: Dimension type, such as `spatial` or `batch`. `f` will be called with dimensions remapped to this type. Returns: Function with signature matching `f`. """ def forward_retype(obj, input_types: Shape): tree, tensors = disassemble_tree(obj) retyped = [] for t in tensors: for dim in t.shape.only(dims): t = t.dimension(dim).as_type(dim_type) input_types = math.merge_shapes(input_types, dim.with_size(None)) retyped.append(t) return assemble_tree(tree, retyped), input_types def reverse_retype(obj, input_types: Shape): tree, tensors = disassemble_tree(obj) retyped = [] for t in tensors: for dim in t.shape.only(input_types.names): t = t.dimension(dim).as_type(input_types.get_type(dim)) retyped.append(t) return assemble_tree(tree, retyped) @wraps(f) def retyped_f(*args, **kwargs): input_types = EMPTY_SHAPE retyped_args = [] for arg in args: retyped_arg, input_types = forward_retype(arg, input_types) retyped_args.append(retyped_arg) output = f(*retyped_args, **kwargs) restored_output = reverse_retype(output, input_types) return restored_output return retyped_f
def masked_fill(values: phi.math._tensors.Tensor, valid: phi.math._tensors.Tensor, distance: int = 1) ‑> Tuple[phi.math._tensors.Tensor, phi.math._tensors.Tensor]
-
Extrapolates the values of
values
which are marked by the nonzero values ofvalid
fordistance
steps in all spatial directions. Overlapping extrapolated values get averaged. Extrapolation also includes diagonals.Args
values
- Tensor which holds the values for extrapolation
valid
- Tensor with same size as
x
marking the values for extrapolation with nonzero values distance
- Number of extrapolation steps
Returns
values
- Extrapolation result
valid
- mask marking all valid values after extrapolation
Expand source code
def masked_fill(values: Tensor, valid: Tensor, distance: int = 1) -> Tuple[Tensor, Tensor]: """ Extrapolates the values of `values` which are marked by the nonzero values of `valid` for `distance` steps in all spatial directions. Overlapping extrapolated values get averaged. Extrapolation also includes diagonals. Args: values: Tensor which holds the values for extrapolation valid: Tensor with same size as `x` marking the values for extrapolation with nonzero values distance: Number of extrapolation steps Returns: values: Extrapolation result valid: mask marking all valid values after extrapolation """ def binarize(x): return math.safe_div(x, x) distance = min(distance, max(values.shape.sizes)) for _ in range(distance): valid = binarize(valid) valid_values = valid * values overlap = valid # count how many values we are adding for dim in values.shape.spatial.names: values_l, values_r = shift(valid_values, (-1, 1), dims=dim, padding=extrapolation.ZERO) valid_values = math.sum_(values_l + values_r + valid_values, dim='shift') mask_l, mask_r = shift(overlap, (-1, 1), dims=dim, padding=extrapolation.ZERO) overlap = math.sum_(mask_l + mask_r + overlap, dim='shift') extp = math.safe_div(valid_values, overlap) # take mean where extrapolated values overlap values = math.where(valid, values, math.where(binarize(overlap), extp, values)) valid = overlap return values, binarize(valid)
def matrix_from_function(f: Callable, *args, auxiliary_args=None, auto_compress=False, sparsify_batch=None, separate_independent=False, **kwargs) ‑> Tuple[phi.math._tensors.Tensor, phi.math._tensors.Tensor]
-
Trace a linear function and construct a matrix. Depending on the functional form of
f
, the returned matrix may be dense or sparse.Args
f
- Function to trace.
*args
- Arguments for
f
. auxiliary_args
- Arguments in which the function is not linear.
These parameters are not traced but passed on as given in
args
andkwargs
. auto_compress
- If
True
, returns a compressed matrix if supported by the backend. sparsify_batch
- If
False
, the matrix will be batched. IfTrue
, will create dual dimensions for the involved batch dimensions. This will result in one large matrix instead of a batch of matrices. **kwargs
- Keyword arguments for
f
.
Returns
Expand source code
def matrix_from_function(f: Callable, *args, auxiliary_args=None, auto_compress=False, sparsify_batch=None, separate_independent=False, # not fully implemented, requires auto_compress=False **kwargs) -> Tuple[Tensor, Tensor]: """ Trace a linear function and construct a matrix. Depending on the functional form of `f`, the returned matrix may be dense or sparse. Args: f: Function to trace. *args: Arguments for `f`. auxiliary_args: Arguments in which the function is not linear. These parameters are not traced but passed on as given in `args` and `kwargs`. auto_compress: If `True`, returns a compressed matrix if supported by the backend. sparsify_batch: If `False`, the matrix will be batched. If `True`, will create dual dimensions for the involved batch dimensions. This will result in one large matrix instead of a batch of matrices. **kwargs: Keyword arguments for `f`. Returns: matrix: Matrix representing the linear dependency of the output `f` on the input of `f`. Input dimensions will be `dual` dimensions of the matrix while output dimensions will be regular. bias: Bias for affine functions or zero-vector if the function is purely linear. """ assert isinstance(auxiliary_args, str) or auxiliary_args is None, f"auxiliary_args must be a comma-separated str but got {auxiliary_args}" from ._functional import function_parameters, f_name f_params = function_parameters(f) aux = set(s.strip() for s in auxiliary_args.split(',') if s.strip()) if isinstance(auxiliary_args, str) else f_params[1:] all_args = {**kwargs, **{f_params[i]: v for i, v in enumerate(args)}} aux_args = {k: v for k, v in all_args.items() if k in aux} trace_args = {k: v for k, v in all_args.items() if k not in aux} tree, tensors = disassemble_tree(trace_args) target_backend = choose_backend_t(*tensors) # --- Trace function --- with NUMPY: src = TracerSource(tensors[0].shape, tensors[0].dtype, tuple(trace_args.keys())[0], 0) tracer = ShiftLinTracer(src, {EMPTY_SHAPE: math.ones()}, tensors[0].shape, math.zeros(tensors[0].shape, dtype=tensors[0].dtype)) x_kwargs = assemble_tree(tree, [tracer]) result = f(**x_kwargs, **aux_args) _, result_tensors = disassemble_tree(result) assert len(result_tensors) == 1, f"Linear function output must be or contain a single Tensor but got {result}" tracer = result_tensors[0]._simplify() assert tracer._is_tracer, f"Tracing linear function '{f_name(f)}' failed. Make sure only linear operations are used. Output: {tracer.shape}" # --- Convert to COO --- if sparsify_batch is None: if auto_compress: sparsify_batch = not target_backend.supports(Backend.csr_matrix_batched) else: sparsify_batch = not target_backend.supports(Backend.sparse_coo_tensor_batched) matrix, bias = tracer_to_coo(tracer, sparsify_batch, separate_independent) # --- Compress --- if not auto_compress: return matrix, bias if matrix.default_backend.supports(Backend.mul_csr_dense) and target_backend.supports(Backend.mul_csr_dense): return matrix.compress_rows(), bias # elif backend.supports(Backend.mul_csc_dense): # return matrix.compress_cols(), tracer.bias else: return matrix, bias
def max(value: Union[phi.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>) ‑> phi.math._tensors.Tensor
-
Determines the maximum value of
values
along the specified dimensions.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions.Expand source code
def max_(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor: """ Determines the maximum value of `values` along the specified dimensions. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_max, value, dim)
def maximum(x: Union[phi.math._tensors.Tensor, float], y: Union[phi.math._tensors.Tensor, float])
-
Computes the element-wise maximum of
x
andy
.Expand source code
def maximum(x: Union[Tensor, float], y: Union[Tensor, float]): """ Computes the element-wise maximum of `x` and `y`. """ return custom_op2(x, y, maximum, lambda x_, y_: choose_backend(x_, y_).maximum(x_, y_), op_name='maximum')
def mean(value: Union[phi.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>) ‑> phi.math._tensors.Tensor
-
Computes the mean over
values
along the specified dimensions.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions.Expand source code
def mean(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor: """ Computes the mean over `values` along the specified dimensions. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_mean, value, dim)
def median(value, dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>)
-
Reduces
dim
ofvalue
by picking the median value. For odd dimension sizes (ambigous choice), the linear average of the two median values is computed.Currently implemented via
quantile()
.Args
value
Tensor
dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Expand source code
def median(value, dim: DimFilter = non_batch): """ Reduces `dim` of `value` by picking the median value. For odd dimension sizes (ambigous choice), the linear average of the two median values is computed. Currently implemented via `quantile()`. Args: value: `Tensor` dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` """ return quantile(value, 0.5, dim)
def merge_shapes(*objs: Union[phi.math._shape.Shape, Any], order=(<function batch>, <function dual>, <function instance>, <function spatial>, <function channel>), allow_varying_sizes=False)
-
Combines
shapes
into a singleShape
, grouping dimensions by type. If dimensions with equal names are present in multiple shapes, their types and sizes must match.The shorthand
shape1 & shape2
merges shapes withcheck_exact=[spatial]
.See Also:
concat_shapes()
.Args
*objs
Shape
orShaped
objects to combine.order
- Dimension type order as
tuple
of type filters (channel()
,batch()
,spatial()
orinstance()
). Dimensions are grouped by type while merging.
Returns
Merged
Shape
Raises
IncompatibleShapes if the shapes are not compatible
Expand source code
def merge_shapes(*objs: Union[Shape, Any], order=(batch, dual, instance, spatial, channel), allow_varying_sizes=False): """ Combines `shapes` into a single `Shape`, grouping dimensions by type. If dimensions with equal names are present in multiple shapes, their types and sizes must match. The shorthand `shape1 & shape2` merges shapes with `check_exact=[spatial]`. See Also: `concat_shapes()`. Args: *objs: `Shape` or `Shaped` objects to combine. order: Dimension type order as `tuple` of type filters (`channel`, `batch`, `spatial` or `instance`). Dimensions are grouped by type while merging. Returns: Merged `Shape` Raises: IncompatibleShapes if the shapes are not compatible """ if not objs: return EMPTY_SHAPE shapes = [obj if isinstance(obj, Shape) else shape(obj) for obj in objs] merged = [] for dim_type in order: type_group = dim_type(shapes[0]) for sh in shapes[1:]: sh = dim_type(sh) for dim in sh: if dim not in type_group: type_group = type_group._expand(dim, pos=-1) else: # check size match sizes_match = _size_equal(dim.size, type_group.get_size(dim.name)) if allow_varying_sizes: if not sizes_match: type_group = type_group.with_dim_size(dim, None) else: if not sizes_match: raise IncompatibleShapes(f"Cannot merge shapes {shapes} because dimension '{dim.name}' exists with different sizes.", *shapes) names1 = type_group.get_item_names(dim) names2 = sh.get_item_names(dim) if names1 is not None and names2 is not None and len(names1) > 1: if names1 != names2: if set(names1) == set(names2): raise IncompatibleShapes(f"Inconsistent component order: '{','.join(names1)}' vs '{','.join(names2)}' in dimension '{dim.name}'. Failed to merge shapes {shapes}", *shapes) else: raise IncompatibleShapes(f"Cannot merge shapes {shapes} because dimension '{dim.name}' exists with different item names.", *shapes) elif names1 is None and names2 is not None: type_group = type_group._with_item_name(dim, tuple(names2)) merged.append(type_group) return concat_shapes(*merged)
def meshgrid(dims: Union[Callable, phi.math._shape.Shape] = <function spatial>, stack_dim=(vectorᶜ=None), **dimensions: Union[int, phi.math._tensors.Tensor]) ‑> phi.math._tensors.Tensor
-
Generate a mesh-grid
Tensor
from keyword dimensions.Args
**dimensions
- Mesh-grid dimensions, mapping names to values.
Values may be
int
, 1DTensor
or 1D native tensor. dims
- Dimension type of mesh-grid dimensions, one of
spatial()
,channel()
,batch()
,instance()
. stack_dim
- Channel dim along which grids are stacked.
This is optional for 1D mesh-grids. In that case returns a
Tensor
without a stack dim ifNone
or an emptyShape
is passed.
Returns
Mesh-grid
Tensor
with the dimensions ofdims
/dimensions
andstack_dim
.Examples
>>> math.meshgrid(x=2, y=2) (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.500 ± 0.500 (0e+00...1e+00)
>>> math.meshgrid(x=2, y=(-1, 1)) (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.250 ± 0.829 (-1e+00...1e+00)
>>> math.meshgrid(x=2, stack_dim=None) (0, 1) along xˢ
Expand source code
def meshgrid(dims: Union[Callable, Shape] = spatial, stack_dim=channel('vector'), **dimensions: Union[int, Tensor]) -> Tensor: """ Generate a mesh-grid `Tensor` from keyword dimensions. Args: **dimensions: Mesh-grid dimensions, mapping names to values. Values may be `int`, 1D `Tensor` or 1D native tensor. dims: Dimension type of mesh-grid dimensions, one of `spatial`, `channel`, `batch`, `instance`. stack_dim: Channel dim along which grids are stacked. This is optional for 1D mesh-grids. In that case returns a `Tensor` without a stack dim if `None` or an empty `Shape` is passed. Returns: Mesh-grid `Tensor` with the dimensions of `dims` / `dimensions` and `stack_dim`. Examples: >>> math.meshgrid(x=2, y=2) (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.500 ± 0.500 (0e+00...1e+00) >>> math.meshgrid(x=2, y=(-1, 1)) (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.250 ± 0.829 (-1e+00...1e+00) >>> math.meshgrid(x=2, stack_dim=None) (0, 1) along xˢ """ assert 'dim_type' not in dimensions, f"dim_type has been renamed to dims" assert not stack_dim or stack_dim.name not in dimensions if isinstance(dims, Shape): assert not dimensions, f"When passing a Shape to meshgrid(), no kwargs are allowed" dimensions = {d: s for d, s in zip(dims.names, dims.sizes)} grid_shape = dims dim_values = [tuple(range(s)) for s in dims.sizes] else: dim_type = dims assert callable(dim_type), f"dims must be a Shape or dimension type but got {dims}" dim_values = [] dim_sizes = [] for dim, spec in dimensions.items(): if isinstance(spec, int): dim_values.append(tuple(range(spec))) dim_sizes.append(spec) elif isinstance(spec, Tensor): assert spec.rank == 1, f"Only 1D sequences allowed, got {spec} for dimension '{dim}'." dim_values.append(spec.native()) dim_sizes.append(spec.shape.volume) else: backend = choose_backend(spec) shape = backend.staticshape(spec) assert len(shape) == 1, "Only 1D sequences allowed, got {spec} for dimension '{dim}'." dim_values.append(spec) dim_sizes.append(shape[0]) grid_shape = dim_type(**{dim: size for dim, size in zip(dimensions.keys(), dim_sizes)}) backend = choose_backend(*dim_values, prefer_default=True) indices_list = backend.meshgrid(*dim_values) channels = [NativeTensor(t, grid_shape) for t in indices_list] if not stack_dim: assert len(channels) == 1, f"meshgrid with multiple dimension requires a valid stack_dim but got {stack_dim}" return channels[0] if stack_dim.item_names[0] is None: stack_dim = stack_dim.with_size(tuple(dimensions.keys())) return stack_tensors(channels, stack_dim)
def min(value: Union[phi.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>) ‑> phi.math._tensors.Tensor
-
Determines the minimum value of
values
along the specified dimensions.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions.Expand source code
def min_(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor: """ Determines the minimum value of `values` along the specified dimensions. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_min, value, dim)
def minimize(f: Callable[[~X], ~Y], solve: phi.math._optimize.Solve[~X, ~Y]) ‑> ~X
-
Finds a minimum of the scalar function f(x). The
method
argument ofsolve
determines which optimizer is used. All optimizers supported byscipy.optimize.minimize
are supported, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html . Additionally a gradient descent solver with adaptive step size can be used withmethod='GD'
.math.minimize()
is limited to backends that supportjacobian()
, i.e. PyTorch, TensorFlow and Jax.To obtain additional information about the performed solve, use a
SolveTape
.See Also:
solve_nonlinear()
.Args
f
- Function whose output is subject to minimization.
All positional arguments of
f
are optimized and must beTensor
orPhiTreeNode
. Ifsolve.x0
is atuple
orlist
, it will be passed to f as varargs,f(*x0)
. To minimize a subset of the positional arguments, define a new (lambda) function depending only on those. The first return value off
must be a scalar floatTensor
orPhiTreeNode
. solve
Solve
object to specify method type, parameters and initial guess forx
.
Returns
x
- solution, the minimum point
x
.
Raises
NotConverged
- If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
- If the optimization failed prematurely.
Expand source code
def minimize(f: Callable[[X], Y], solve: Solve[X, Y]) -> X: """ Finds a minimum of the scalar function *f(x)*. The `method` argument of `solve` determines which optimizer is used. All optimizers supported by `scipy.optimize.minimize` are supported, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html . Additionally a gradient descent solver with adaptive step size can be used with `method='GD'`. `math.minimize()` is limited to backends that support `jacobian()`, i.e. PyTorch, TensorFlow and Jax. To obtain additional information about the performed solve, use a `SolveTape`. See Also: `solve_nonlinear()`. Args: f: Function whose output is subject to minimization. All positional arguments of `f` are optimized and must be `Tensor` or `phi.math.magic.PhiTreeNode`. If `solve.x0` is a `tuple` or `list`, it will be passed to *f* as varargs, `f(*x0)`. To minimize a subset of the positional arguments, define a new (lambda) function depending only on those. The first return value of `f` must be a scalar float `Tensor` or `phi.math.magic.PhiTreeNode`. solve: `Solve` object to specify method type, parameters and initial guess for `x`. Returns: x: solution, the minimum point `x`. Raises: NotConverged: If the desired accuracy was not be reached within the maximum number of iterations. Diverged: If the optimization failed prematurely. """ solve = solve.with_defaults('optimization') assert (solve.rel_tol == 0).all, f"rel_tol must be zero for minimize() but got {solve.rel_tol}" assert solve.preprocess_y is None, "minimize() does not allow preprocess_y" x0_nest, x0_tensors = disassemble_tree(solve.x0) x0_tensors = [to_float(t) for t in x0_tensors] backend = choose_backend_t(*x0_tensors, prefer_default=True) batch_dims = merge_shapes(*[t.shape for t in x0_tensors]).batch x0_natives = [] for t in x0_tensors: t._expand() assert t.shape.is_uniform x0_natives.append(reshaped_native(t, [batch_dims, t.shape.non_batch])) x0_flat = backend.concat(x0_natives, -1) def unflatten_assemble(x_flat, additional_dims: Shape = EMPTY_SHAPE, convert=True): i = 0 x_tensors = [] for x0_native, x0_tensor in zip(x0_natives, x0_tensors): vol = backend.shape(x0_native)[-1] flat_native = x_flat[..., i:i + vol] x_tensors.append(reshaped_tensor(flat_native, [*additional_dims, batch_dims, x0_tensor.shape.non_batch], convert=convert)) i += vol x = assemble_tree(x0_nest, x_tensors) return x def native_function(x_flat): x = unflatten_assemble(x_flat) if isinstance(x, (tuple, list)): y = f(*x) else: y = f(x) _, y_tensors = disassemble_tree(y) assert not non_batch(y_tensors[0]), f"Failed to minimize '{f.__name__}' because it returned a non-scalar output {shape(y_tensors[0])}. Reduce all non-batch dimensions, e.g. using math.l2_loss()" try: loss_native = reshaped_native(y_tensors[0], [batch_dims], force_expand=False) except AssertionError: raise AssertionError(f"Failed to minimize '{f.__name__}' because its output loss {shape(y_tensors[0])} has more batch dimensions than the initial guess {batch_dims}.") return y_tensors[0].sum, (loss_native,) atol = backend.to_float(reshaped_native(solve.abs_tol, [batch_dims])) maxi = reshaped_numpy(solve.max_iterations, [batch_dims]) trj = _SOLVE_TAPES and any(t.should_record_trajectory_for(solve) for t in _SOLVE_TAPES) t = time.perf_counter() ret = backend.minimize(solve.method, native_function, x0_flat, atol, maxi, trj) t = time.perf_counter() - t if not trj: assert isinstance(ret, SolveResult) converged = reshaped_tensor(ret.converged, [batch_dims]) diverged = reshaped_tensor(ret.diverged, [batch_dims]) x = unflatten_assemble(ret.x) iterations = reshaped_tensor(ret.iterations, [batch_dims]) function_evaluations = reshaped_tensor(ret.function_evaluations, [batch_dims]) residual = reshaped_tensor(ret.residual, [batch_dims]) result = SolveInfo(solve, x, residual, iterations, function_evaluations, converged, diverged, ret.method, ret.message, t) else: # trajectory assert isinstance(ret, (tuple, list)) and all(isinstance(r, SolveResult) for r in ret) converged = reshaped_tensor(ret[-1].converged, [batch_dims]) diverged = reshaped_tensor(ret[-1].diverged, [batch_dims]) x = unflatten_assemble(ret[-1].x) x_ = unflatten_assemble(numpy.stack([r.x for r in ret]), additional_dims=batch('trajectory'), convert=False) residual = stack([reshaped_tensor(r.residual, [batch_dims]) for r in ret], batch('trajectory')) iterations = reshaped_tensor(ret[-1].iterations, [batch_dims]) function_evaluations = stack([reshaped_tensor(r.function_evaluations, [batch_dims]) for r in ret], batch('trajectory')) result = SolveInfo(solve, x_, residual, iterations, function_evaluations, converged, diverged, ret[-1].method, ret[-1].message, t) for tape in _SOLVE_TAPES: tape._add(solve, trj, result) result.convergence_check(False) # raises ConvergenceException return x
def minimum(x: Union[phi.math._tensors.Tensor, float], y: Union[phi.math._tensors.Tensor, float])
-
Computes the element-wise minimum of
x
andy
.Expand source code
def minimum(x: Union[Tensor, float], y: Union[Tensor, float]): """ Computes the element-wise minimum of `x` and `y`. """ return custom_op2(x, y, minimum, lambda x_, y_: choose_backend(x_, y_).minimum(x_, y_), op_name='minimum')
def native(value: Union[phi.math._tensors.Tensor, numbers.Number, tuple, list, Any])
-
Returns the native tensor representation of
value
. Ifvalue
is aTensor
, this is equal to callingTensor.native()
. Otherwise, checks thatvalue
is a valid tensor object and returns it.Args
value
Tensor
or native tensor or tensor-like.
Returns
Native tensor representation
Raises
ValueError if the tensor cannot be transposed to match target_shape
Expand source code
def native(value: Union[Tensor, Number, tuple, list, Any]): """ Returns the native tensor representation of `value`. If `value` is a `phi.math.Tensor`, this is equal to calling `phi.math.Tensor.native()`. Otherwise, checks that `value` is a valid tensor object and returns it. Args: value: `Tensor` or native tensor or tensor-like. Returns: Native tensor representation Raises: ValueError if the tensor cannot be transposed to match target_shape """ if isinstance(value, Tensor): return value.native() else: choose_backend(value) # check that value is a native tensor return value
def native_call(f: Callable, *inputs: phi.math._tensors.Tensor, channels_last=None, channel_dim='vector', spatial_dim=None)
-
Calls
f
with the native representations of theinputs
tensors in standard layout and returns the result as aTensor
.All inputs are converted to native tensors (including precision cast) depending on
channels_last
:channels_last=True
: Dimension layout(total_batch_size, spatial_dims…, total_channel_size)
channels_last=False
: Dimension layout(total_batch_size, total_channel_size, spatial_dims…)
All batch dimensions are compressed into a single dimension with
total_batch_size = input.shape.batch.volume
. The same is done for all channel dimensions.Additionally, missing batch and spatial dimensions are added so that all
inputs
have the same batch and spatial shape.Args
f
- Function to be called on native tensors of
inputs
. The function output must have the same dimension layout as the inputs, unless overridden byspatial_dim
, and the batch size must be identical. *inputs
- Uniform
Tensor
arguments channels_last
- (Optional) Whether to put channels as the last dimension of the native representation.
If
None
, the channels are put in the default position associated with the current backend, seeBackend.prefers_channels_last()
. channel_dim
- Name of the channel dimension of the result.
spatial_dim
- Name of the spatial dimension of the result.
Returns
Tensor
with batch and spatial dimensions ofinputs
, unless overridden byspatial_dim
, and single channel dimensionchannel_dim
.Expand source code
def native_call(f: Callable, *inputs: Tensor, channels_last=None, channel_dim='vector', spatial_dim=None): """ Calls `f` with the native representations of the `inputs` tensors in standard layout and returns the result as a `Tensor`. All inputs are converted to native tensors (including precision cast) depending on `channels_last`: * `channels_last=True`: Dimension layout `(total_batch_size, spatial_dims..., total_channel_size)` * `channels_last=False`: Dimension layout `(total_batch_size, total_channel_size, spatial_dims...)` All batch dimensions are compressed into a single dimension with `total_batch_size = input.shape.batch.volume`. The same is done for all channel dimensions. Additionally, missing batch and spatial dimensions are added so that all `inputs` have the same batch and spatial shape. Args: f: Function to be called on native tensors of `inputs`. The function output must have the same dimension layout as the inputs, unless overridden by `spatial_dim`, and the batch size must be identical. *inputs: Uniform `Tensor` arguments channels_last: (Optional) Whether to put channels as the last dimension of the native representation. If `None`, the channels are put in the default position associated with the current backend, see `phi.math.backend.Backend.prefers_channels_last()`. channel_dim: Name of the channel dimension of the result. spatial_dim: Name of the spatial dimension of the result. Returns: `Tensor` with batch and spatial dimensions of `inputs`, unless overridden by `spatial_dim`, and single channel dimension `channel_dim`. """ if channels_last is None: try: backend = choose_backend(f) except NoBackendFound: backend = choose_backend_t(*inputs, prefer_default=True) channels_last = backend.prefers_channels_last() batch = merge_shapes(*[i.shape.batch for i in inputs]) spatial = merge_shapes(*[i.shape.spatial for i in inputs]) natives = [] for i in inputs: groups = (batch, *i.shape.spatial.names, i.shape.channel) if channels_last else (batch, i.shape.channel, *i.shape.spatial.names) natives.append(reshaped_native(i, groups, force_expand=False)) output = f(*natives) if isinstance(channel_dim, str): channel_dim = channel(channel_dim) assert isinstance(channel_dim, Shape), "channel_dim must be a Shape or str" if isinstance(output, (tuple, list)): raise NotImplementedError() else: if spatial_dim is None: groups = (batch, *spatial, channel_dim) if channels_last else (batch, channel_dim, *spatial) else: if isinstance(spatial_dim, str): spatial_dim = spatial(spatial_dim) assert isinstance(spatial_dim, Shape), "spatial_dim must be a Shape or str" groups = (batch, *spatial_dim, channel_dim) if channels_last else (batch, channel_dim, *spatial_dim) result = reshaped_tensor(output, groups, convert=False) if result.shape.get_size(channel_dim.name) == 1 and not channel_dim.item_names[0]: result = result.dimension(channel_dim.name)[0] # remove vector dim if not required return result
def non_batch(obj) ‑> phi.math._shape.Shape
-
Returns the non-batch dimensions of an object.
Args
Returns
Expand source code
def non_batch(obj) -> Shape: """ Returns the non-batch dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ from .magic import Shaped if isinstance(obj, Shape): return obj.non_batch elif isinstance(obj, Shaped): return shape(obj).non_batch else: raise AssertionError(f"non_batch() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def non_channel(obj) ‑> phi.math._shape.Shape
-
Returns the non-channel dimensions of an object.
Args
Returns
Expand source code
def non_channel(obj) -> Shape: """ Returns the non-channel dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ from .magic import Shaped if isinstance(obj, Shape): return obj.non_channel elif isinstance(obj, Shaped): return shape(obj).non_channel else: raise AssertionError(f"non_channel() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def non_dual(obj) ‑> phi.math._shape.Shape
-
Returns the non-dual dimensions of an object.
Args
Returns
Expand source code
def non_dual(obj) -> Shape: """ Returns the non-dual dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ from .magic import Shaped if isinstance(obj, Shape): return obj.non_dual elif isinstance(obj, Shaped): return shape(obj).non_dual else: raise AssertionError(f"non_dual() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def non_instance(obj) ‑> phi.math._shape.Shape
-
Returns the non-instance dimensions of an object.
Args
Returns
Expand source code
def non_instance(obj) -> Shape: """ Returns the non-instance dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ from .magic import Shaped if isinstance(obj, Shape): return obj.non_instance elif isinstance(obj, Shaped): return shape(obj).non_instance else: raise AssertionError(f"non_instance() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def non_primal(obj) ‑> phi.math._shape.Shape
-
Returns the batch and dual dimensions of an object.
Args
Returns
Expand source code
def non_primal(obj) -> Shape: """ Returns the batch and dual dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ from .magic import Shaped if isinstance(obj, Shape): return obj.non_primal elif isinstance(obj, Shaped): return shape(obj).non_primal else: raise AssertionError(f"non_dual() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def non_spatial(obj) ‑> phi.math._shape.Shape
-
Returns the non-spatial dimensions of an object.
Args
Returns
Expand source code
def non_spatial(obj) -> Shape: """ Returns the non-spatial dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ from .magic import Shaped if isinstance(obj, Shape): return obj.non_spatial elif isinstance(obj, Shaped): return shape(obj).non_spatial else: raise AssertionError(f"non_spatial() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def nonzero(value: phi.math._tensors.Tensor, list_dim: Union[str, phi.math._shape.Shape] = (nonzeroⁱ=None), index_dim: phi.math._shape.Shape = (vectorᶜ=None))
-
Get spatial indices of non-zero / True values.
Batch dimensions are preserved by this operation. If channel dimensions are present, this method returns the indices where any component is nonzero.
Implementations:
- NumPy:
numpy.argwhere
- PyTorch:
torch.nonzero
- TensorFlow:
tf.where(tf.not_equal(values, 0))
- Jax:
jax.numpy.nonzero
Args
value
- spatial tensor to find non-zero / True values in.
list_dim
- Dimension listing non-zero values.
index_dim
- Index dimension.
Returns
Tensor
of shape (batch dims…,list_dim
=#non-zero,index_dim
=value.shape.spatial_rank)Expand source code
def nonzero(value: Tensor, list_dim: Union[Shape, str] = instance('nonzero'), index_dim: Shape = channel('vector')): """ Get spatial indices of non-zero / True values. Batch dimensions are preserved by this operation. If channel dimensions are present, this method returns the indices where any component is nonzero. Implementations: * NumPy: [`numpy.argwhere`](https://numpy.org/doc/stable/reference/generated/numpy.argwhere.html) * PyTorch: [`torch.nonzero`](https://pytorch.org/docs/stable/generated/torch.nonzero.html) * TensorFlow: [`tf.where(tf.not_equal(values, 0))`](https://www.tensorflow.org/api_docs/python/tf/where) * Jax: [`jax.numpy.nonzero`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.nonzero.html) Args: value: spatial tensor to find non-zero / True values in. list_dim: Dimension listing non-zero values. index_dim: Index dimension. Returns: `Tensor` of shape (batch dims..., `list_dim`=#non-zero, `index_dim`=value.shape.spatial_rank) """ if value.shape.channel_rank > 0: value = sum_(abs(value), value.shape.channel) if isinstance(list_dim, str): list_dim = instance(list_dim) def unbatched_nonzero(value: Tensor): if isinstance(value, CompressedSparseMatrix): value = value.decompress() if isinstance(value, SparseCoordinateTensor): nonzero_values = nonzero(value._values) nonzero_indices = value._indices[nonzero_values] return nonzero_indices else: dims = value.shape.non_channel native = reshaped_native(value, [*dims]) backend = choose_backend(native) indices = backend.nonzero(native) indices_shape = Shape(backend.staticshape(indices), (list_dim.name, index_dim.name), (list_dim.type, index_dim.type), (None, dims.names)) return NativeTensor(indices, indices_shape) return broadcast_op(unbatched_nonzero, [value], iter_dims=value.shape.batch.names)
- NumPy:
def normalize_to(target: phi.math._tensors.Tensor, source: Union[phi.math._tensors.Tensor, float], epsilon=1e-05)
-
Multiplies the target so that its sum matches the source.
Args
Returns
Normalized tensor of the same shape as target
Expand source code
def normalize_to(target: Tensor, source: Union[float, Tensor], epsilon=1e-5): """ Multiplies the target so that its sum matches the source. Args: target: `Tensor` source: `Tensor` or constant epsilon: Small number to prevent division by zero. Returns: Normalized tensor of the same shape as target """ target_total = math.sum_(target) denominator = math.maximum(target_total, epsilon) if epsilon is not None else target_total source_total = math.sum_(source) return target * (source_total / denominator)
def numpy(value: Union[phi.math._tensors.Tensor, numbers.Number, tuple, list, Any])
-
Converts
value
to anumpy.ndarray
where value must be aTensor
, backend tensor or tensor-like. Ifvalue
is aTensor
, this is equal to callingTensor.numpy()
.Note: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use
Tensor.native()
instead.Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in
order
, aValueError
is raised.If
value
is a NumPy array, it may be returned directly.Returns
NumPy representation of
value
Raises
ValueError if the tensor cannot be transposed to match target_shape
Expand source code
def numpy(value: Union[Tensor, Number, tuple, list, Any]): """ Converts `value` to a `numpy.ndarray` where value must be a `Tensor`, backend tensor or tensor-like. If `value` is a `phi.math.Tensor`, this is equal to calling `phi.math.Tensor.numpy()`. *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use `Tensor.native()` instead. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. If `value` is a NumPy array, it may be returned directly. Returns: NumPy representation of `value` Raises: ValueError if the tensor cannot be transposed to match target_shape """ if isinstance(value, Tensor): return value.numpy() else: backend = choose_backend(value) return backend.numpy(value)
def ones(*shape: phi.math._shape.Shape, dtype=None) ‑> phi.math._tensors.Tensor
-
Define a tensor with specified shape with value
1.0
/1
/True
everywhere.This method may not immediately allocate the memory to store the values.
See Also:
ones_like()
,zeros()
.Args
*shape
- This (possibly empty) sequence of
Shape
s is concatenated, preserving the order. dtype
- Data type as
DType
object. Defaults tofloat
matching the current precision setting.
Returns
Expand source code
def ones(*shape: Shape, dtype=None) -> Tensor: """ Define a tensor with specified shape with value `1.0`/ `1` / `True` everywhere. This method may not immediately allocate the memory to store the values. See Also: `ones_like()`, `zeros()`. Args: *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order. dtype: Data type as `DType` object. Defaults to `float` matching the current precision setting. Returns: `Tensor` """ return _initialize(lambda shape: expand_tensor(NativeTensor(default_backend().ones((), dtype=DType.as_dtype(dtype)), EMPTY_SHAPE), shape), shape)
def ones_like(value: phi.math._tensors.Tensor) ‑> phi.math._tensors.Tensor
-
Create a
Tensor
containing only1.0
/1
/True
with the same shape and dtype asobj
.Expand source code
def ones_like(value: Tensor) -> Tensor: """ Create a `Tensor` containing only `1.0` / `1` / `True` with the same shape and dtype as `obj`. """ return zeros_like(value) + 1
def pack_dims(value, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable], packed_dim: phi.math._shape.Shape, pos: Optional[int] = None, **kwargs)
-
Compresses multiple dimensions into a single dimension by concatenating the elements. Elements along the new dimensions are laid out according to the order of
dims
. If the order ofdims
differs from the current dimension order, the tensor is transposed accordingly. This function replaces the traditionalreshape
for these cases.The type of the new dimension will be equal to the types of
dims
. Ifdims
have varying types, the new dimension will be a batch dimension.If none of
dims
exist onvalue
,packed_dim
will be added only if it is given with a definite size andvalue
is not a primitive type.See Also:
unpack_dim()
Args
value
Shapable
, such asTensor
.dims
- Dimensions to be compressed in the specified order.
packed_dim
- Single-dimension
Shape
. pos
- Index of new dimension.
None
for automatic,-1
for last,0
for first. **kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
.Examples
>>> pack_dims(math.zeros(spatial(x=4, y=3)), spatial, instance('points')) (pointsⁱ=12) const 0.0
Expand source code
def pack_dims(value, dims: DimFilter, packed_dim: Shape, pos: Optional[int] = None, **kwargs): """ Compresses multiple dimensions into a single dimension by concatenating the elements. Elements along the new dimensions are laid out according to the order of `dims`. If the order of `dims` differs from the current dimension order, the tensor is transposed accordingly. This function replaces the traditional `reshape` for these cases. The type of the new dimension will be equal to the types of `dims`. If `dims` have varying types, the new dimension will be a batch dimension. If none of `dims` exist on `value`, `packed_dim` will be added only if it is given with a definite size and `value` is not a primitive type. See Also: `unpack_dim()` Args: value: `phi.math.magic.Shapable`, such as `phi.math.Tensor`. dims: Dimensions to be compressed in the specified order. packed_dim: Single-dimension `Shape`. pos: Index of new dimension. `None` for automatic, `-1` for last, `0` for first. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments. Returns: Same type as `value`. Examples: >>> pack_dims(math.zeros(spatial(x=4, y=3)), spatial, instance('points')) (pointsⁱ=12) const 0.0 """ if isinstance(value, (Number, bool)): return value assert isinstance(value, Shapable) and isinstance(value, Sliceable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}" dims = shape(value).only(dims, reorder=True) if packed_dim in shape(value): assert packed_dim in dims, f"Cannot pack dims into new dimension {packed_dim} because it already exists on value {value} and is not packed." if len(dims) == 0 or all(dim not in shape(value) for dim in dims): return value if packed_dim.size is None else expand(value, packed_dim, **kwargs) # Inserting size=1 can cause shape errors elif len(dims) == 1: return rename_dims(value, dims, packed_dim, **kwargs) # --- First try __pack_dims__ --- if hasattr(value, '__pack_dims__'): result = value.__pack_dims__(dims.names, packed_dim, pos, **kwargs) if result is not NotImplemented: return result # --- Next try Tree Node --- if isinstance(value, PhiTreeNode): new_attributes = {a: pack_dims(getattr(value, a), dims, packed_dim, pos=pos, **kwargs) for a in all_attributes(value)} return copy_with(value, **new_attributes) # --- Fallback: unstack and stack --- if shape(value).only(dims).volume > 8: warnings.warn(f"pack_dims() default implementation is slow on large dimensions ({shape(value).only(dims)}). Please implement __pack_dims__() for {type(value).__name__} as defined in phi.math.magic", RuntimeWarning, stacklevel=2) return stack(unstack(value, dims), packed_dim, **kwargs)
def pad(value: phi.math._tensors.Tensor, widths: dict, mode: Union[ForwardRef('e_.Extrapolation'), phi.math._tensors.Tensor, numbers.Number], **kwargs) ‑> phi.math._tensors.Tensor
-
Pads a tensor along the specified dimensions, determining the added values using the given extrapolation. Unlike
Extrapolation.pad()
, this function can handle negative widths which slice off outer values.Args
value
Tensor
to be paddedwidths
dict
mapping dimension name (str
) to(lower, upper)
wherelower
andupper
areint
that can be positive (pad), negative (slice) or zero (pass).mode
Extrapolation
used to determine values added from positivewidths
. Assumes constant extrapolation if given a number orTensor
instead.kwargs
- Additional padding arguments.
These are ignored by the standard extrapolations defined in
phi.math.extrapolation
but can be used to pass additional contextual information to custom extrapolations. Grid classes fromphi.field
will pass the argumentbounds: Box
.
Returns
Padded
Tensor
Examples
>>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, 1), 'y': (2, 1)}, 0) (xˢ=12, yˢ=13) 0.641 ± 0.480 (0e+00...1e+00)
>>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, -1)}, 0) (xˢ=10, yˢ=10) 0.900 ± 0.300 (0e+00...1e+00)
Expand source code
def pad(value: Tensor, widths: dict, mode: Union['e_.Extrapolation', Tensor, Number], **kwargs) -> Tensor: """ Pads a tensor along the specified dimensions, determining the added values using the given extrapolation. Unlike `Extrapolation.pad()`, this function can handle negative widths which slice off outer values. Args: value: `Tensor` to be padded widths: `dict` mapping dimension name (`str`) to `(lower, upper)` where `lower` and `upper` are `int` that can be positive (pad), negative (slice) or zero (pass). mode: `Extrapolation` used to determine values added from positive `widths`. Assumes constant extrapolation if given a number or `Tensor` instead. kwargs: Additional padding arguments. These are ignored by the standard extrapolations defined in `phi.math.extrapolation` but can be used to pass additional contextual information to custom extrapolations. Grid classes from `phi.field` will pass the argument `bounds: Box`. Returns: Padded `Tensor` Examples: >>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, 1), 'y': (2, 1)}, 0) (xˢ=12, yˢ=13) 0.641 ± 0.480 (0e+00...1e+00) >>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, -1)}, 0) (xˢ=10, yˢ=10) 0.900 ± 0.300 (0e+00...1e+00) """ mode = mode if isinstance(mode, e_.Extrapolation) else e_.ConstantExtrapolation(mode) has_negative_widths = any(w0 < 0 or w1 < 0 for w0, w1 in widths.values()) has_positive_widths = any(w0 > 0 or w1 > 0 for w0, w1 in widths.values()) slices = None if has_negative_widths: slices = {dim: slice(max(0, -w[0]), min(0, w[1]) or None) for dim, w in widths.items()} widths = {dim: (max(0, w[0]), max(0, w[1])) for dim, w in widths.items()} result_padded = mode.pad(value, widths, **kwargs) if has_positive_widths else value result_sliced = result_padded[slices] if has_negative_widths else result_padded return result_sliced
def pairwise_distances(positions: phi.math._tensors.Tensor, max_distance: Union[phi.math._tensors.Tensor, float] = None, format: str = 'dense', default: Optional[float] = None, method: str = 'sparse') ‑> phi.math._tensors.Tensor
-
Computes the distance matrix containing the pairwise position differences between each pair of points. Points that are further apart than
max_distance
(if specified) are assigned a distance value of0
. The diagonal of the matrix (self-distance) also consists purely of zero-vectors and may or may not be stored explicitly.Args
positions
Tensor
. Channel dimensions are interpreted as position components. Instance and spatial dimensions list nodes.max_distance
- Scalar or
Tensor
specifying a max_radius for each point separately. Can contain additional batch dimensions but spatial/instance dimensions must matchpositions
if present. If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors. format
- Matrix format as
str
or concrete sparsity pattern asTensor
. Allowed strings are'dense',
'csr',
'coo',
'csc'`. When aTensor
is passed, it needs to have all instance and spatial dims aspositions
as well as corresponding dual dimensions. The distances will be evaluated at all stored entries of theformat
tensor. default
- Value the sparse tensor returns for non-stored values. Must be
0
orNone
.
Returns
Distance matrix as sparse or dense
Tensor
, depending onformat
. For each spatial/instance dimension inpositions
, the matrix also contains a dual dimension of the same name and size. The matrix also contains all batch dimensions ofpositions
and one channel dimension calledvector
.Examples
>>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles'))) >>> dx = pairwise_distances(pos, format='dense', max_distance=2) >>> dx.particles[0] (x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y)
Expand source code
def pairwise_distances(positions: Tensor, max_distance: Union[float, Tensor] = None, format: str = 'dense', default: Optional[float] = None, method: str = 'sparse') -> Tensor: """ Computes the distance matrix containing the pairwise position differences between each pair of points. Points that are further apart than `max_distance` (if specified) are assigned a distance value of `0`. The diagonal of the matrix (self-distance) also consists purely of zero-vectors and may or may not be stored explicitly. Args: positions: `Tensor`. Channel dimensions are interpreted as position components. Instance and spatial dimensions list nodes. max_distance: Scalar or `Tensor` specifying a max_radius for each point separately. Can contain additional batch dimensions but spatial/instance dimensions must match `positions` if present. If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors. format: Matrix format as `str` or concrete sparsity pattern as `Tensor`. Allowed strings are `'dense', `'csr'`, `'coo'`, `'csc'`. When a `Tensor` is passed, it needs to have all instance and spatial dims as `positions` as well as corresponding dual dimensions. The distances will be evaluated at all stored entries of the `format` tensor. default: Value the sparse tensor returns for non-stored values. Must be `0` or `None`. Returns: Distance matrix as sparse or dense `Tensor`, depending on `format`. For each spatial/instance dimension in `positions`, the matrix also contains a dual dimension of the same name and size. The matrix also contains all batch dimensions of `positions` and one channel dimension called `vector`. Examples: >>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles'))) >>> dx = pairwise_distances(pos, format='dense', max_distance=2) >>> dx.particles[0] (x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y) """ assert isinstance(positions, Tensor), f"positions must be a Tensor but got {type(positions)}" assert default in [0, None], f"default value must be either 0 or None but got '{default}'" primal_dims = positions.shape.non_batch.non_channel.non_dual dual_dims = dual(**primal_dims.untyped_dict) if isinstance(format, Tensor): # sparse connectivity specified, no neighborhood search required assert max_distance is None, "max_distance not allowed when connectivity is specified (passing a Tensor for format)" return map_pairs(lambda p1, p2: p2 - p1, positions, format) # --- Dense --- elif format == 'dense': dx = unpack_dim(pack_dims(positions, non_batch(positions).non_channel.non_dual, instance('_tmp')), '_tmp', dual_dims) - positions if max_distance is not None: neighbors = sum_(dx ** 2, channel) <= max_distance ** 2 default = float('nan') if default is None else default dx = where(neighbors, dx, default) return dx # --- Sparse neighbor search from here on --- assert max_distance is not None, "max_distance must be specified when computing distance in sparse format" max_distance = wrap(max_distance) index_dtype = DType(int, 32) backend = choose_backend_t(positions, max_distance) batch_shape = batch(positions) & batch(max_distance) if not dual_dims.well_defined: assert dual_dims.rank == 1, f"others_dims sizes must be specified when passing more then one dimension but got {dual_dims}" dual_dims = dual_dims.with_size(primal_dims.volume) # --- Determine mode --- tmp_pair_count = None pair_count = None table_len = None mode = 'vectorize' if batch_shape.volume > 1 and batch_shape.is_uniform else 'loop' if backend.is_available(positions): if mode == 'vectorize': # ToDo determine limits from positions? build_cells+bincount would be enough pair_count = 7 else: # tracing if backend.requires_fixed_shapes_when_tracing(): # ToDo use fixed limits (set by user) pair_count = 7 mode = 'vectorize' # --- Run neighborhood search --- from .backend._partition import find_neighbors, find_neighbors_matscipy, find_neighbors_sklearn if mode == 'loop': indices = [] values = [] for b in batch_shape.meshgrid(): native_positions = reshaped_native(positions[b], [primal_dims, channel(positions)]) native_max_dist = max_distance[b].native() if method == 'sparse': nat_rows, nat_cols, nat_vals = find_neighbors(native_positions, native_max_dist, None, periodic=False, default=default) elif method == 'matscipy': assert positions.available, f"Cannot jit-compile matscipy neighborhood search" nat_rows, nat_cols, nat_vals = find_neighbors_matscipy(native_positions, native_max_dist, None, periodic=False) elif method == 'sklearn': assert positions.available, f"Cannot jit-compile matscipy neighborhood search" nat_rows, nat_cols, nat_vals = find_neighbors_sklearn(native_positions, native_max_dist) else: raise ValueError(method) nat_indices = backend.stack([nat_rows, nat_cols], -1) indices.append(reshaped_tensor(nat_indices, [instance('pairs'), channel(vector=primal_dims.names + dual_dims.names)], convert=False)) values.append(reshaped_tensor(nat_vals, [instance('pairs'), channel(positions)])) indices = stack(indices, batch_shape) values = stack(values, batch_shape) elif mode == 'vectorize': raise NotImplementedError # native_positions = reshaped_native(positions, [batch_shape, primal_dims, channel(positions)]) # native_max_dist = reshaped_native(max_distance, [batch_shape, primal_dims], force_expand=False) # def single_search(pos, r): # return find_neighbors(pos, r, None, periodic=False, pair_count=pair_count, default=default) # nat_rows, nat_cols, nat_vals = backend.vectorized_call(single_search, native_positions, native_max_dist, output_dtypes=(index_dtype, index_dtype, positions.dtype)) # nat_indices = backend.stack([nat_rows, nat_cols], -1) # indices = reshaped_tensor(nat_indices, [batch_shape, instance('pairs'), channel(vector=primal_dims.names + dual_dims.names)], convert=False) # values = reshaped_tensor(nat_vals, [batch_shape, instance('pairs'), channel(positions)]) else: raise RuntimeError # --- Assemble sparse matrix --- dense_shape = primal_dims & dual_dims coo = SparseCoordinateTensor(indices, values, dense_shape, can_contain_double_entries=False, indices_sorted=False, default=default) return to_format(coo, format)
def precision(floating_point_bits: int)
-
Sets the floating point precision for the local context.
Usage:
with precision(p):
This overrides the global setting, see
set_global_precision()
.Args
floating_point_bits
- 16 for half, 32 for single, 64 for double
Expand source code
@contextmanager def precision(floating_point_bits: int): """ Sets the floating point precision for the local context. Usage: `with precision(p):` This overrides the global setting, see `set_global_precision()`. Args: floating_point_bits: 16 for half, 32 for single, 64 for double """ _PRECISION.append(floating_point_bits) try: yield None finally: _PRECISION.pop(-1)
def primal(obj) ‑> phi.math._shape.Shape
-
Returns the instance, spatial and channel dimensions of an object.
Args
Returns
Expand source code
def primal(obj) -> Shape: """ Returns the instance, spatial and channel dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ from .magic import Shaped if isinstance(obj, Shape): return obj.primal elif isinstance(obj, Shaped): return shape(obj).primal else: raise AssertionError(f"primal() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def print(obj: Union[phi.math._tensors.Tensor, PhiTreeNode, numbers.Number, tuple, list, None] = None, name: str = '')
-
Print a tensor with no more than two spatial dimensions, slicing it along all batch and channel dimensions.
Unlike NumPy's array printing, the dimensions are sorted. Elements along the alphabetically first dimension is printed to the right, the second dimension upward. Typically, this means x right, y up.
Args
obj
- tensor-like
name
- name of the tensor
Returns:
Expand source code
def print_(obj: Union[Tensor, PhiTreeNode, Number, tuple, list, None] = None, name: str = ""): """ Print a tensor with no more than two spatial dimensions, slicing it along all batch and channel dimensions. Unlike NumPy's array printing, the dimensions are sorted. Elements along the alphabetically first dimension is printed to the right, the second dimension upward. Typically, this means x right, y up. Args: obj: tensor-like name: name of the tensor Returns: """ def variables(obj) -> dict: if hasattr(obj, '__variable_attrs__') or hasattr(obj, '__value_attrs__'): return {f".{a}": getattr(obj, a) for a in variable_attributes(obj)} elif isinstance(obj, (tuple, list)): return {f"[{i}]": item for i, item in enumerate(obj)} elif isinstance(obj, dict): return obj else: raise ValueError(f"Not PhiTreeNode: {type(obj)}") if name: print(" " * 12 + name) if obj is None: print("None") elif isinstance(obj, Tensor): print(f"{obj:full}") elif isinstance(obj, PhiTreeNode): for n, val in variables(obj).items(): print_(val, name + n) else: print(f"{wrap(obj):full}")
def print_gradient(value: phi.math._tensors.Tensor, name='', detailed=False) ‑> phi.math._tensors.Tensor
-
Prints the gradient vector of
value
when computed. The gradient atvalue
is the vector-Jacobian product of all operations between the output of this function and the loss value.The gradient is not printed in jit mode, see
jit_compile()
.Example
def f(x): x = math.print_gradient(x, 'dx') return math.l1_loss(x) math.jacobian(f)(math.ones(x=6))
Args
value
Tensor
for which the gradient may be computed later.name
- (Optional) Name to print along with the gradient values
detailed
- If
False
, prints a short summary of the gradient tensor.
Returns
identity()(value)
which when differentiated, prints the gradient vector.Expand source code
def print_gradient(value: Tensor, name="", detailed=False) -> Tensor: """ Prints the gradient vector of `value` when computed. The gradient at `value` is the vector-Jacobian product of all operations between the output of this function and the loss value. The gradient is not printed in jit mode, see `jit_compile()`. Example: ```python def f(x): x = math.print_gradient(x, 'dx') return math.l1_loss(x) math.jacobian(f)(math.ones(x=6)) ``` Args: value: `Tensor` for which the gradient may be computed later. name: (Optional) Name to print along with the gradient values detailed: If `False`, prints a short summary of the gradient tensor. Returns: `identity(value)` which when differentiated, prints the gradient vector. """ def print_grad(params: dict, _y, dx): param_name, x = next(iter(params.items())) if math.all_available(x, dx): if detailed: math.print_(dx, name=name) else: print(f"{name}: \t{dx}") else: print(f"Cannot print gradient for {param_name}, data not available.") return {param_name: dx} identity = custom_gradient(lambda x: x, print_grad) return identity(value)
def prod(value: Union[phi.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>) ‑> phi.math._tensors.Tensor
-
Multiplies
values
along the specified dimensions.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions.Expand source code
def prod(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor: """ Multiplies `values` along the specified dimensions. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_prod, value, dim, require_all_dims_present=True)
def quantile(value: phi.math._tensors.Tensor, quantiles: Union[float, tuple, list, phi.math._tensors.Tensor], dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>)
-
Compute the q-th quantile of
value
alongdim
for each q inquantiles
.Implementations:
- NumPy:
quantile
- PyTorch:
quantile
- TensorFlow:
tfp.stats.percentile
- Jax:
quantile
Args
value
Tensor
quantiles
- Single quantile or tensor of quantiles to compute.
Must be of type
float
,tuple
,list
orTensor
. dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to reduce the sequence of Tensors
Returns
Tensor
with dimensions ofquantiles
and non-reduced dimensions ofvalue
.Expand source code
def quantile(value: Tensor, quantiles: Union[float, tuple, list, Tensor], dim: DimFilter = non_batch): """ Compute the q-th quantile of `value` along `dim` for each q in `quantiles`. Implementations: * NumPy: [`quantile`](https://numpy.org/doc/stable/reference/generated/numpy.quantile.html) * PyTorch: [`quantile`](https://pytorch.org/docs/stable/generated/torch.quantile.html#torch.quantile) * TensorFlow: [`tfp.stats.percentile`](https://www.tensorflow.org/probability/api_docs/python/tfp/stats/percentile) * Jax: [`quantile`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.quantile.html) Args: value: `Tensor` quantiles: Single quantile or tensor of quantiles to compute. Must be of type `float`, `tuple`, `list` or `Tensor`. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to reduce the sequence of Tensors Returns: `Tensor` with dimensions of `quantiles` and non-reduced dimensions of `value`. """ dims = value.shape.only(dim) native_values = reshaped_native(value, [*value.shape.without(dims), value.shape.only(dims)]) backend = choose_backend(native_values) q = tensor(quantiles, default_list_dim=instance('quantiles')) native_quantiles = reshaped_native(q, [q.shape]) native_result = backend.quantile(native_values, native_quantiles) return reshaped_tensor(native_result, [q.shape, *value.shape.without(dims)])
- NumPy:
def random_normal(*shape: phi.math._shape.Shape, dtype=None) ‑> phi.math._tensors.Tensor
-
Creates a
Tensor
with the specified shape, filled with random values sampled from a normal / Gaussian distribution.Implementations:
- NumPy:
numpy.random.standard_normal
- PyTorch:
torch.randn
- TensorFlow:
tf.random.normal
- Jax:
jax.random.normal
Args
*shape
- This (possibly empty) sequence of
Shape
s is concatenated, preserving the order. dtype
- (optional) floating point
DType
. IfNone
, a float tensor with the current default precision is created, seeget_precision()
.
Returns
Expand source code
def random_normal(*shape: Shape, dtype=None) -> Tensor: """ Creates a `Tensor` with the specified shape, filled with random values sampled from a normal / Gaussian distribution. Implementations: * NumPy: [`numpy.random.standard_normal`](https://numpy.org/doc/stable/reference/random/generated/numpy.random.standard_normal.html) * PyTorch: [`torch.randn`](https://pytorch.org/docs/stable/generated/torch.randn.html) * TensorFlow: [`tf.random.normal`](https://www.tensorflow.org/api_docs/python/tf/random/normal) * Jax: [`jax.random.normal`](https://jax.readthedocs.io/en/latest/_autosummary/jax.random.normal.html) Args: *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order. dtype: (optional) floating point `DType`. If `None`, a float tensor with the current default precision is created, see `get_precision()`. Returns: `Tensor` """ def uniform_random_normal(shape): native = choose_backend(*shape.sizes, prefer_default=True).random_normal(shape.sizes, DType.as_dtype(dtype)) return NativeTensor(native, shape) return _initialize(uniform_random_normal, shape)
- NumPy:
def random_uniform(*shape: phi.math._shape.Shape, low: Union[phi.math._tensors.Tensor, float] = 0, high: Union[phi.math._tensors.Tensor, float] = 1, dtype: Union[phi.math.backend._dtype.DType, tuple] = None) ‑> phi.math._tensors.Tensor
-
Creates a
Tensor
with the specified shape, filled with random values sampled from a uniform distribution.Args
*shape
- This (possibly empty) sequence of
Shape
s is concatenated, preserving the order. dtype
- (optional)
DType
or(kind, bits)
. The dtype kind must be one offloat
,int
,complex
. If not specified, afloat
tensor with the current default precision is created, seeget_precision()
. low
- Minimum value, included.
high
- Maximum value, excluded.
Returns
Expand source code
def random_uniform(*shape: Shape, low: Union[Tensor, float] = 0, high: Union[Tensor, float] = 1, dtype: Union[DType, tuple] = None) -> Tensor: """ Creates a `Tensor` with the specified shape, filled with random values sampled from a uniform distribution. Args: *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order. dtype: (optional) `DType` or `(kind, bits)`. The dtype kind must be one of `float`, `int`, `complex`. If not specified, a `float` tensor with the current default precision is created, see `get_precision()`. low: Minimum value, included. high: Maximum value, excluded. Returns: `Tensor` """ def uniform_random_uniform(shape): native = choose_backend(low, high, *shape.sizes, prefer_default=True).random_uniform(shape.sizes, low, high, DType.as_dtype(dtype)) return NativeTensor(native, shape) return _initialize(uniform_random_uniform, shape)
def range(dim: phi.math._shape.Shape, start_or_stop: Optional[int] = None, stop: Optional[int] = None, step=1)
-
Returns evenly spaced values between
start
andstop
. If only one limit is given,0
is used for the start.See Also:
range_tensor()
,linspace()
,meshgrid()
.Args
dim
- Dimension name and type as
Shape
object. Thesize
ofdim
is interpreted asstop
unlessstart_or_stop
is specified. start_or_stop
- (Optional)
int
. Interpreted asstart
ifstop
is specified as well. Otherwise this isstop
. stop
- (Optional)
int
.stop
value. step
- Distance between values.
Returns
Expand source code
def arange(dim: Shape, start_or_stop: Union[int, None] = None, stop: Union[int, None] = None, step=1): """ Returns evenly spaced values between `start` and `stop`. If only one limit is given, `0` is used for the start. See Also: `range_tensor()`, `linspace()`, `meshgrid()`. Args: dim: Dimension name and type as `Shape` object. The `size` of `dim` is interpreted as `stop` unless `start_or_stop` is specified. start_or_stop: (Optional) `int`. Interpreted as `start` if `stop` is specified as well. Otherwise this is `stop`. stop: (Optional) `int`. `stop` value. step: Distance between values. Returns: `Tensor` """ if start_or_stop is None: assert stop is None, "start_or_stop must be specified when stop is given." assert isinstance(dim.size, int), "When start_or_stop is not specified, dim.size must be an integer." start, stop = 0, dim.size elif stop is None: start, stop = 0, start_or_stop else: start = start_or_stop native = choose_backend(start, stop, prefer_default=True).range(start, stop, step, DType(int, 32)) return NativeTensor(native, dim.with_sizes([stop - start]))
def range_tensor(*shape: phi.math._shape.Shape)
-
Returns a
Tensor
with givenshape()
containing the linear indices of each element. For 1D tensors, this equivalent toarange()
withstep=1
.See Also:
arange()
,meshgrid()
.Args
shape
- Tensor shape.
Returns
Expand source code
def range_tensor(*shape: Shape): """ Returns a `Tensor` with given `shape` containing the linear indices of each element. For 1D tensors, this equivalent to `arange()` with `step=1`. See Also: `arange()`, `meshgrid()`. Args: shape: Tensor shape. Returns: `Tensor` """ shape = concat_shapes(*shape) data = arange(spatial('range'), 0, shape.volume) return unpack_dim(data, 'range', shape)
def real(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
See Also:
imag()
,conjugate()
.Args
x
Tensor
orPhiTreeNode
or native tensor.
Returns
Real component of
x
.Expand source code
def real(x) -> Union[Tensor, PhiTreeNode]: """ See Also: `imag()`, `conjugate()`. Args: x: `Tensor` or `phi.math.magic.PhiTreeNode` or native tensor. Returns: Real component of `x`. """ return _backend_op1(x, Backend.real)
def rename_dims(value, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable], names: Union[str, tuple, list, set, phi.math._shape.Shape, Callable], **kwargs)
-
Change the name and optionally the type of some dimensions of
value
.Dimensions that are not present on value will be ignored. The corresponding new dimensions given by
names
will not be added.Args
value
Shape
orTensor
orShapable
.dims
- Existing dimensions of
value
as comma-separatedstr
,tuple
,list
,Shape
or filter function. names
-
Either
- Sequence of names matching
dims
astuple
,list
orstr
. This replaces only the dimension names but leaves the types untouched. Shape
matchingdims
to replace names and types.- Dimension type function to replace only types.
- Sequence of names matching
**kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
.Expand source code
def rename_dims(value, dims: DimFilter, names: DimFilter, **kwargs): """ Change the name and optionally the type of some dimensions of `value`. Dimensions that are not present on value will be ignored. The corresponding new dimensions given by `names` will not be added. Args: value: `Shape` or `Tensor` or `Shapable`. dims: Existing dimensions of `value` as comma-separated `str`, `tuple`, `list`, `Shape` or filter function. names: Either * Sequence of names matching `dims` as `tuple`, `list` or `str`. This replaces only the dimension names but leaves the types untouched. * `Shape` matching `dims` to replace names and types. * Dimension type function to replace only types. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments. Returns: Same type as `value`. """ if isinstance(value, Shape): return value._replace_names_and_types(dims, names) elif isinstance(value, (Number, bool)): return value assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be a Shape or Shapable but got {type(value).__name__}" dims = shape(value).only(dims).names if callable(dims) else parse_dim_order(dims) if isinstance(names, str): names = parse_dim_order(names) elif callable(names): names: Shape = names(*dims).with_sizes(shape(value)) assert len(dims) == len(names), f"names and dims must be of equal length but got #dims={len(dims)} and #names={len(names)}" existing_dims = shape(value).only(dims, reorder=True) if not existing_dims: return value existing_names = [n for i, n in enumerate(names) if dims[i] in existing_dims] existing_names = existing_dims._replace_names_and_types(existing_dims, existing_names) # --- First try __replace_dims__ --- if hasattr(value, '__replace_dims__'): result = value.__replace_dims__(existing_dims.names, existing_names, **kwargs) if result is not NotImplemented: return result # --- Next try Tree Node --- if isinstance(value, PhiTreeNode): new_attributes = {a: rename_dims(getattr(value, a), existing_dims, existing_names, **kwargs) for a in all_attributes(value)} return copy_with(value, **new_attributes) # --- Fallback: unstack and stack --- if shape(value).only(existing_dims).volume > 8: warnings.warn(f"rename_dims() default implementation is slow on large dimensions ({shape(value).only(dims)}). Please implement __replace_dims__() for {type(value).__name__} as defined in phi.math.magic", RuntimeWarning, stacklevel=2) for old_name, new_dim in zip(existing_dims.names, existing_names): value = stack(unstack(value, old_name), new_dim, **kwargs) return value
def replace(obj: ~PhiTreeNodeType, **updates) ‑> ~PhiTreeNodeType
-
Creates a copy of the given
PhiTreeNode
with updated values as specified inupdates
.If
obj
overrides__with_attrs__
, the copy will be created via that specific implementation. Otherwise, thecopy()
module andsetattr
will be used.Args
obj
PhiTreeNode
**updates
- Values to be replaced.
Returns
Copy of
obj
with updated values.Expand source code
def replace(obj: PhiTreeNodeType, **updates) -> PhiTreeNodeType: """ Creates a copy of the given `phi.math.magic.PhiTreeNode` with updated values as specified in `updates`. If `obj` overrides `__with_attrs__`, the copy will be created via that specific implementation. Otherwise, the `copy` module and `setattr` will be used. Args: obj: `phi.math.magic.PhiTreeNode` **updates: Values to be replaced. Returns: Copy of `obj` with updated values. """ if hasattr(obj, '__with_attrs__'): return obj.__with_attrs__(**updates) elif isinstance(obj, (Number, bool)): return obj elif dataclasses.is_dataclass(obj): return dataclasses.replace(obj, **updates) else: cpy = copy.copy(obj) for attr, value in updates.items(): setattr(cpy, attr, value) return cpy
def replace_dims(value, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable], names: Union[str, tuple, list, set, phi.math._shape.Shape, Callable], **kwargs)
-
Change the name and optionally the type of some dimensions of
value
.Dimensions that are not present on value will be ignored. The corresponding new dimensions given by
names
will not be added.Args
value
Shape
orTensor
orShapable
.dims
- Existing dimensions of
value
as comma-separatedstr
,tuple
,list
,Shape
or filter function. names
-
Either
- Sequence of names matching
dims
astuple
,list
orstr
. This replaces only the dimension names but leaves the types untouched. Shape
matchingdims
to replace names and types.- Dimension type function to replace only types.
- Sequence of names matching
**kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
.Expand source code
def rename_dims(value, dims: DimFilter, names: DimFilter, **kwargs): """ Change the name and optionally the type of some dimensions of `value`. Dimensions that are not present on value will be ignored. The corresponding new dimensions given by `names` will not be added. Args: value: `Shape` or `Tensor` or `Shapable`. dims: Existing dimensions of `value` as comma-separated `str`, `tuple`, `list`, `Shape` or filter function. names: Either * Sequence of names matching `dims` as `tuple`, `list` or `str`. This replaces only the dimension names but leaves the types untouched. * `Shape` matching `dims` to replace names and types. * Dimension type function to replace only types. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments. Returns: Same type as `value`. """ if isinstance(value, Shape): return value._replace_names_and_types(dims, names) elif isinstance(value, (Number, bool)): return value assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be a Shape or Shapable but got {type(value).__name__}" dims = shape(value).only(dims).names if callable(dims) else parse_dim_order(dims) if isinstance(names, str): names = parse_dim_order(names) elif callable(names): names: Shape = names(*dims).with_sizes(shape(value)) assert len(dims) == len(names), f"names and dims must be of equal length but got #dims={len(dims)} and #names={len(names)}" existing_dims = shape(value).only(dims, reorder=True) if not existing_dims: return value existing_names = [n for i, n in enumerate(names) if dims[i] in existing_dims] existing_names = existing_dims._replace_names_and_types(existing_dims, existing_names) # --- First try __replace_dims__ --- if hasattr(value, '__replace_dims__'): result = value.__replace_dims__(existing_dims.names, existing_names, **kwargs) if result is not NotImplemented: return result # --- Next try Tree Node --- if isinstance(value, PhiTreeNode): new_attributes = {a: rename_dims(getattr(value, a), existing_dims, existing_names, **kwargs) for a in all_attributes(value)} return copy_with(value, **new_attributes) # --- Fallback: unstack and stack --- if shape(value).only(existing_dims).volume > 8: warnings.warn(f"rename_dims() default implementation is slow on large dimensions ({shape(value).only(dims)}). Please implement __replace_dims__() for {type(value).__name__} as defined in phi.math.magic", RuntimeWarning, stacklevel=2) for old_name, new_dim in zip(existing_dims.names, existing_names): value = stack(unstack(value, old_name), new_dim, **kwargs) return value
def reshaped_native(value: phi.math._tensors.Tensor, groups: Union[tuple, list], force_expand: Any = True, to_numpy=False)
-
Returns a native representation of
value
where dimensions are laid out according togroups
.See Also:
native()
,pack_dims()
,reshaped_tensor()
,reshaped_numpy()
.Args
value
Tensor
groups
-
tuple
orlist
of dimensions to be packed into one native dimension. Each entry must be one of the following:str
: the name of one dimension that is present onvalue
.Shape
: Dimensions to be packed. Ifforce_expand
, missing dimensions are first added, otherwise they are ignored.- Filter function: Packs all dimensions of this type that are present on
value
.
force_expand
bool
or sequence of dimensions. IfTrue
, repeats the tensor along missing dimensions. IfFalse
, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.to_numpy
- If True, converts the native tensor to a
numpy.ndarray
.
Returns
Native tensor with dimensions matching
groups
.Expand source code
def reshaped_native(value: Tensor, groups: Union[tuple, list], force_expand: Any = True, to_numpy=False): """ Returns a native representation of `value` where dimensions are laid out according to `groups`. See Also: `native()`, `pack_dims()`, `reshaped_tensor()`, `reshaped_numpy()`. Args: value: `Tensor` groups: `tuple` or `list` of dimensions to be packed into one native dimension. Each entry must be one of the following: * `str`: the name of one dimension that is present on `value`. * `Shape`: Dimensions to be packed. If `force_expand`, missing dimensions are first added, otherwise they are ignored. * Filter function: Packs all dimensions of this type that are present on `value`. force_expand: `bool` or sequence of dimensions. If `True`, repeats the tensor along missing dimensions. If `False`, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions. to_numpy: If True, converts the native tensor to a `numpy.ndarray`. Returns: Native tensor with dimensions matching `groups`. """ assert isinstance(value, Tensor), f"value must be a Tensor but got {type(value)}" assert value.shape.is_uniform, f"Only uniform (homogenous) tensors can be converted to native but got shape {value.shape}" assert isinstance(groups, (tuple, list)), f"groups must be a tuple or list but got {type(value)}" order = [] groups = [group(value) if callable(group) else group for group in groups] for i, group in enumerate(groups): if isinstance(group, Shape): present = value.shape.only(group) if force_expand is True or present.volume > 1 or (force_expand is not False and group.only(force_expand).volume > 1): value = expand(value, group) value = pack_dims(value, group, batch(f"group{i}")) order.append(f"group{i}") else: assert isinstance(group, str), f"Groups must be either single-dim str or Shape but got {group}" assert ',' not in group, f"When packing multiple dimensions, pass a well-defined Shape instead of a comma-separated str. Got {group}" order.append(group) return value.numpy(order) if to_numpy else value.native(order)
def reshaped_numpy(value: phi.math._tensors.Tensor, groups: Union[tuple, list], force_expand: Any = True)
-
Returns the NumPy representation of
value
where dimensions are laid out according togroups
.See Also:
numpy()
,reshaped_native()
,pack_dims()
,reshaped_tensor()
.Args
value
Tensor
groups
- Sequence of dimension names as
str
or groups of dimensions to be packed_dim asShape
. force_expand
bool
or sequence of dimensions. IfTrue
, repeats the tensor along missing dimensions. IfFalse
, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.
Returns
NumPy
ndarray
with dimensions matchinggroups
.Expand source code
def reshaped_numpy(value: Tensor, groups: Union[tuple, list], force_expand: Any = True): """ Returns the NumPy representation of `value` where dimensions are laid out according to `groups`. See Also: `numpy()`, `reshaped_native()`, `pack_dims()`, `reshaped_tensor()`. Args: value: `Tensor` groups: Sequence of dimension names as `str` or groups of dimensions to be packed_dim as `Shape`. force_expand: `bool` or sequence of dimensions. If `True`, repeats the tensor along missing dimensions. If `False`, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions. Returns: NumPy `ndarray` with dimensions matching `groups`. """ return reshaped_native(value, groups, force_expand=force_expand, to_numpy=True)
def reshaped_tensor(value: Any, groups: Union[tuple, list], check_sizes=False, convert=True)
-
Creates a
Tensor
from a native tensor or tensor-like whereby the dimensions ofvalue
are split according togroups
.See Also:
tensor()
,reshaped_native()
,unpack_dim()
.Args
value
- Native tensor or tensor-like.
groups
- Sequence of dimension groups to be packed_dim as
tuple[Shape]
orlist[Shape]
. check_sizes
- If True, group sizes must match the sizes of
value
exactly. Otherwise, allows singleton dimensions. convert
- If True, converts the data to the native format of the current default backend.
If False, wraps the data in a
Tensor
but keeps the given data reference if possible.
Returns
Tensor
with all dimensions fromgroups
Expand source code
def reshaped_tensor(value: Any, groups: Union[tuple, list], check_sizes=False, convert=True): """ Creates a `Tensor` from a native tensor or tensor-like whereby the dimensions of `value` are split according to `groups`. See Also: `phi.math.tensor()`, `reshaped_native()`, `unpack_dim()`. Args: value: Native tensor or tensor-like. groups: Sequence of dimension groups to be packed_dim as `tuple[Shape]` or `list[Shape]`. check_sizes: If True, group sizes must match the sizes of `value` exactly. Otherwise, allows singleton dimensions. convert: If True, converts the data to the native format of the current default backend. If False, wraps the data in a `Tensor` but keeps the given data reference if possible. Returns: `Tensor` with all dimensions from `groups` """ assert all(isinstance(g, Shape) for g in groups), "groups must be a sequence of Shapes" dims = [batch(f'group{i}') for i, group in enumerate(groups)] try: value = tensor(value, *dims, convert=convert) except IncompatibleShapes: raise IncompatibleShapes(f"Cannot reshape native tensor {type(value)} with sizes {value.shape} given groups {groups}") for i, group in enumerate(groups): if value.shape.get_size(f'group{i}') == group.volume: value = unpack_dim(value, f'group{i}', group) elif check_sizes: raise AssertionError(f"Group {group} does not match dimension {i} of value {value.shape}") else: value = unpack_dim(value, f'group{i}', group) return value
def rotate_vector(vector: phi.math._tensors.Tensor, angle: Union[phi.math._tensors.Tensor, float]) ‑> phi.math._tensors.Tensor
-
Rotates
vector
around the origin.Args
vector
- n-dimensional vector with a channel dimension called
'vector'
angle
- Euler angle. The direction is the rotation axis and the length is the amount (in radians).
Returns
Rotated vector as
Tensor
Expand source code
def rotate_vector(vector: math.Tensor, angle: Union[float, math.Tensor]) -> Tensor: """ Rotates `vector` around the origin. Args: vector: n-dimensional vector with a channel dimension called `'vector'` angle: Euler angle. The direction is the rotation axis and the length is the amount (in radians). Returns: Rotated vector as `Tensor` """ assert 'vector' in vector.shape, "vector must have 'vector' dimension." if vector.vector.size == 2: sin = wrap(math.sin(angle)) cos = wrap(math.cos(angle)) x, y = vector.vector rot_x = cos * x - sin * y rot_y = sin * x + cos * y return math.stack_tensors([rot_x, rot_y], channel(vector=vector.vector.item_names)) elif vector.vector.size == 1: raise AssertionError(f"Cannot rotate a 1D vector. shape={vector.shape}") else: raise NotImplementedError(f"Rotation in {vector.vector.size}D not yet implemented.")
def round(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Rounds the
Tensor
orPhiTreeNode
x
to the closest integer.Expand source code
def round_(x) -> Union[Tensor, PhiTreeNode]: """ Rounds the `Tensor` or `phi.math.magic.PhiTreeNode` `x` to the closest integer. """ return _backend_op1(x, Backend.round)
def s2b(value)
-
Change the type of all spatial dimensions of
value
to batch dimensions. Seerename_dims()
.Expand source code
def s2b(value): """ Change the type of all *spatial* dimensions of `value` to *batch* dimensions. See `rename_dims`. """ return rename_dims(value, spatial, batch)
def safe_div(x: Union[phi.math._tensors.Tensor, float], y: Union[phi.math._tensors.Tensor, float])
-
Computes x/y with the
Tensor
sx
andy
but returns 0 where y=0.Expand source code
def safe_div(x: Union[float, Tensor], y: Union[float, Tensor]): """ Computes *x/y* with the `Tensor`s `x` and `y` but returns 0 where *y=0*. """ return custom_op2(x, y, l_operator=safe_div, l_native_function=lambda x_, y_: choose_backend(x_, y_).divide_no_nan(x_, y_), r_operator=lambda y_, x_: safe_div(x_, y_), r_native_function=lambda y_, x_: choose_backend(x_, y_).divide_no_nan(x_, y_), op_name='divide_no_nan')
def sample_subgrid(grid: phi.math._tensors.Tensor, start: phi.math._tensors.Tensor, size: phi.math._shape.Shape) ‑> phi.math._tensors.Tensor
-
Samples a sub-grid from
grid
with equal distance between sampling points. The values at the new sample points are determined via linear interpolation.Args
grid
Tensor
to be resampled. Values are assumed to be sampled at cell centers.start
- Origin point of sub-grid within
grid
, measured in number of cells. Must have a single dimension calledvector
. Example:start=(1, 0.5)
would slice off the first grid point in dim 1 and take the mean of neighbouring points in dim 2. The order of dims must be equal tosize
andgrid.shape.spatial
. size
- Resolution of the sub-grid. Must not be larger than the resolution of
grid
. The order of dims must be equal tostart
andgrid.shape.spatial
.
Returns
Sub-grid as
Tensor
Expand source code
def sample_subgrid(grid: Tensor, start: Tensor, size: Shape) -> Tensor: """ Samples a sub-grid from `grid` with equal distance between sampling points. The values at the new sample points are determined via linear interpolation. Args: grid: `Tensor` to be resampled. Values are assumed to be sampled at cell centers. start: Origin point of sub-grid within `grid`, measured in number of cells. Must have a single dimension called `vector`. Example: `start=(1, 0.5)` would slice off the first grid point in dim 1 and take the mean of neighbouring points in dim 2. The order of dims must be equal to `size` and `grid.shape.spatial`. size: Resolution of the sub-grid. Must not be larger than the resolution of `grid`. The order of dims must be equal to `start` and `grid.shape.spatial`. Returns: Sub-grid as `Tensor` """ assert start.shape.names == ('vector',) assert grid.shape.spatial.names == size.names assert math.all_available(start), "Cannot perform sample_subgrid() during tracing, 'start' must be known." crop = {} for dim, d_start, d_size in zip(grid.shape.spatial.names, start, size.sizes): crop[dim] = slice(int(d_start), int(d_start) + d_size + (0 if d_start % 1 in (0, 1) else 1)) grid = grid[crop] upper_weight = start % 1 lower_weight = 1 - upper_weight for i, dim in enumerate(grid.shape.spatial.names): if upper_weight[i].native() not in (0, 1): lower, upper = shift(grid, (0, 1), [dim], padding=None, stack_dim=None) grid = upper * upper_weight[i] + lower * lower_weight[i] return grid
def scatter(base_grid: Union[phi.math._tensors.Tensor, phi.math._shape.Shape], indices: Union[phi.math._tensors.Tensor, dict], values: Union[phi.math._tensors.Tensor, float], mode: str = 'update', outside_handling: str = 'discard', indices_gradient=False)
-
Scatters
values
intobase_grid
atindices
. instance dimensions ofindices
and/orvalues
are reduced during scattering. Depending onmode
, this method has one of the following effects:mode='update'
: Replaces the values ofbase_grid
atindices
byvalues
. The result is undefined ifindices
contains duplicates.mode='add'
: Addsvalues
tobase_grid
atindices
. The values corresponding to duplicate indices are accumulated.mode='mean'
: Replaces the values ofbase_grid
atindices
by the mean of allvalues
with the same index.
Implementations:
- NumPy: Slice assignment /
numpy.add.at
- PyTorch:
torch.scatter
,torch.scatter_add
- TensorFlow:
tf.tensor_scatter_nd_add
,tf.tensor_scatter_nd_update
- Jax:
jax.lax.scatter_add
,jax.lax.scatter
See Also:
gather()
.Args
base_grid
Tensor
into whichvalues
are scattered.indices
Tensor
of n-dimensional indices at which to placevalues
. Must have a single channel dimension with size matching the number of spatial dimensions ofbase_grid
. This dimension is optional if the spatial rank is 1. Must also contain allscatter_dims
.values
Tensor
of values to scatter atindices
.mode
- Scatter mode as
str
. One of ('add', 'mean', 'update') outside_handling
-
Defines how indices lying outside the bounds of
base_grid
are handled.'discard'
: outside indices are ignored.'clamp'
: outside indices are projected onto the closest point inside the grid.'undefined'
: All points are expected to lie inside the grid. Otherwise an error may be thrown or an undefined tensor may be returned.
indices_gradient
- Whether to allow the gradient of this operation to be backpropagated through
indices
.
Returns
Copy of
base_grid
with updated values atindices
.Expand source code
def scatter(base_grid: Union[Tensor, Shape], indices: Union[Tensor, dict], values: Union[Tensor, float], mode: str = 'update', outside_handling: str = 'discard', indices_gradient=False): """ Scatters `values` into `base_grid` at `indices`. instance dimensions of `indices` and/or `values` are reduced during scattering. Depending on `mode`, this method has one of the following effects: * `mode='update'`: Replaces the values of `base_grid` at `indices` by `values`. The result is undefined if `indices` contains duplicates. * `mode='add'`: Adds `values` to `base_grid` at `indices`. The values corresponding to duplicate indices are accumulated. * `mode='mean'`: Replaces the values of `base_grid` at `indices` by the mean of all `values` with the same index. Implementations: * NumPy: Slice assignment / `numpy.add.at` * PyTorch: [`torch.scatter`](https://pytorch.org/docs/stable/generated/torch.scatter.html), [`torch.scatter_add`](https://pytorch.org/docs/stable/generated/torch.scatter_add.html) * TensorFlow: [`tf.tensor_scatter_nd_add`](https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_add), [`tf.tensor_scatter_nd_update`](https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) * Jax: [`jax.lax.scatter_add`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scatter_add.html), [`jax.lax.scatter`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scatter.html) See Also: `gather()`. Args: base_grid: `Tensor` into which `values` are scattered. indices: `Tensor` of n-dimensional indices at which to place `values`. Must have a single channel dimension with size matching the number of spatial dimensions of `base_grid`. This dimension is optional if the spatial rank is 1. Must also contain all `scatter_dims`. values: `Tensor` of values to scatter at `indices`. mode: Scatter mode as `str`. One of ('add', 'mean', 'update') outside_handling: Defines how indices lying outside the bounds of `base_grid` are handled. * `'discard'`: outside indices are ignored. * `'clamp'`: outside indices are projected onto the closest point inside the grid. * `'undefined'`: All points are expected to lie inside the grid. Otherwise an error may be thrown or an undefined tensor may be returned. indices_gradient: Whether to allow the gradient of this operation to be backpropagated through `indices`. Returns: Copy of `base_grid` with updated values at `indices`. """ assert mode in ('update', 'add', 'mean') assert outside_handling in ('discard', 'clamp', 'undefined') assert isinstance(indices_gradient, bool) if isinstance(indices, dict): # update a slice if len(indices) == 1 and isinstance(next(iter(indices.values())), (str, int, slice)): # update a range dim, sel = next(iter(indices.items())) full_dim = base_grid.shape[dim] if isinstance(sel, str): sel = full_dim.item_names[0].index(sel) if isinstance(sel, int): sel = slice(sel, sel+1) assert isinstance(sel, slice), f"Selection must be a str, int or slice but got {type(sel)}" values = expand(values, full_dim.after_gather({dim: sel})) parts = [ base_grid[{dim: slice(sel.start)}], values, base_grid[{dim: slice(sel.stop, None)}] ] return concat(parts, dim) else: raise NotImplementedError("scattering into non-continuous values not yet supported by dimension") grid_shape = base_grid if isinstance(base_grid, Shape) else base_grid.shape assert channel(indices).rank < 2 if channel(indices) and channel(indices).item_names[0]: indexed_dims = channel(indices).item_names[0] assert indexed_dims in grid_shape, f"Scatter indices {indices.shape} point to missing dimensions in grid {grid_shape}" if indexed_dims != grid_shape.only(indexed_dims).names: indices = indices.vector[grid_shape.only(indexed_dims).names] indexed_dims = grid_shape.only(indexed_dims) else: assert channel(indices).rank == 1 or (grid_shape.spatial_rank + grid_shape.instance_rank == 1 and indices.shape.channel_rank == 0) indexed_dims = grid_shape.spatial or grid_shape.instance assert channel(indices).volume == indexed_dims.rank values = wrap(values) batches = values.shape.non_channel.non_instance & indices.shape.non_channel.non_instance channels = grid_shape.without(indexed_dims).without(batches) & values.shape.channel # --- Set up grid --- if isinstance(base_grid, Shape): with choose_backend_t(indices, values): base_grid = zeros(base_grid & batches & values.shape.channel, dtype=values.dtype) if mode != 'add': base_grid += math.nan # --- Handle outside indices --- if outside_handling == 'clamp': indices = clip(indices, 0, tensor(indexed_dims, channel('vector')) - 1) elif outside_handling == 'discard': indices_linear = pack_dims(indices, instance, instance(_scatter_instance=1)) indices_inside = min_((round_(indices_linear) >= 0) & (round_(indices_linear) < tensor(indexed_dims, channel('vector'))), 'vector') indices_linear = boolean_mask(indices_linear, '_scatter_instance', indices_inside) if instance(values).rank > 0: values_linear = pack_dims(values, instance, instance(_scatter_instance=1)) values_linear = boolean_mask(values_linear, '_scatter_instance', indices_inside) values = unpack_dim(values_linear, '_scatter_instance', instance(values)) indices = unpack_dim(indices_linear, '_scatter_instance', instance(indices)) if indices.shape.is_non_uniform: raise NotImplementedError() lists = indices.shape.instance & values.shape.instance def scatter_forward(base_grid, indices, values): indices = to_int32(round_(indices)) native_grid = reshaped_native(base_grid, [batches, *indexed_dims, channels]) native_values = reshaped_native(values, [batches, lists, channels]) native_indices = reshaped_native(indices, [batches, lists, 'vector']) backend = choose_backend(native_indices, native_values, native_grid) if mode in ('add', 'update'): native_result = backend.scatter(native_grid, native_indices, native_values, mode=mode) else: # mean zero_grid = backend.zeros_like(native_grid) summed = backend.scatter(zero_grid, native_indices, native_values, mode='add') count = backend.scatter(zero_grid, native_indices, backend.ones_like(native_values), mode='add') native_result = summed / backend.maximum(count, 1) native_result = backend.where(count == 0, native_grid, native_result) return reshaped_tensor(native_result, [batches, *indexed_dims, channels], check_sizes=True) def scatter_backward(args: dict, _output, d_output): from ._nd import spatial_gradient values_grad = gather(d_output, args['indices']) spatial_gradient_indices = gather(spatial_gradient(d_output, dims=indexed_dims), args['indices']) indices_grad = mean(spatial_gradient_indices * args['values'], 'vector_') return None, indices_grad, values_grad from ._functional import custom_gradient scatter_function = custom_gradient(scatter_forward, scatter_backward) if indices_gradient else scatter_forward result = scatter_function(base_grid, indices, values) return result
def seed(seed: int)
-
Sets the current seed of all backends and the built-in
random
package.Calling this function with a fixed value at the start of an application yields reproducible results as long as the same backend is used.
Args
seed
- Seed to use.
Expand source code
def seed(seed: int): """ Sets the current seed of all backends and the built-in `random` package. Calling this function with a fixed value at the start of an application yields reproducible results as long as the same backend is used. Args: seed: Seed to use. """ for backend in BACKENDS: backend.seed(seed) import random random.seed(0)
def set_global_precision(floating_point_bits: int)
-
Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends.
If
floating_point_bits
is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64. Operations may also convert floating point values to this precision, even if the input had a different precision.If
floating_point_bits
is None, new tensors will default to float32 unless specified otherwise. The output of math operations has the same precision as its inputs.Args
floating_point_bits
- one of (16, 32, 64, None)
Expand source code
def set_global_precision(floating_point_bits: int): """ Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends. If `floating_point_bits` is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64. Operations may also convert floating point values to this precision, even if the input had a different precision. If `floating_point_bits` is None, new tensors will default to float32 unless specified otherwise. The output of math operations has the same precision as its inputs. Args: floating_point_bits: one of (16, 32, 64, None) """ _PRECISION[0] = floating_point_bits
def shape(obj) ‑> phi.math._shape.Shape
-
If
obj
is aTensor
orShaped
, returns its shape. Ifobj
is aShape
, returnsobj
.This function can be passed as a
dim
argument to an operation to specify that it should act upon all dimensions.Args
Returns
Expand source code
def shape(obj) -> Shape: """ If `obj` is a `Tensor` or `phi.math.magic.Shaped`, returns its shape. If `obj` is a `Shape`, returns `obj`. This function can be passed as a `dim` argument to an operation to specify that it should act upon all dimensions. Args: obj: `Tensor` or `Shape` or `Shaped` Returns: `Shape` """ from phi.math.magic import PhiTreeNode, Shaped if isinstance(obj, Shape): return obj elif hasattr(obj, '__shape__'): return obj.__shape__() elif hasattr(obj, 'shape') and isinstance(obj.shape, Shape): return obj.shape elif isinstance(obj, (int, float, complex, bool)): return EMPTY_SHAPE elif isinstance(obj, (tuple, list)) and all(isinstance(item, (int, float, complex, bool)) for item in obj): return channel('vector') elif isinstance(obj, (Number, bool)): return EMPTY_SHAPE elif isinstance(obj, (tuple, list)) and all(isinstance(item, (PhiTreeNode, Shaped)) for item in obj): return merge_shapes(*obj, allow_varying_sizes=True) if isinstance(obj, dict) and all(isinstance(item, (PhiTreeNode, Shaped)) for item in obj): return merge_shapes(*obj.values(), allow_varying_sizes=True) elif isinstance(obj, PhiTreeNode): from phi.math._magic_ops import all_attributes return merge_shapes(*[getattr(obj, a) for a in all_attributes(obj, assert_any=True)], allow_varying_sizes=True) else: from .backend import choose_backend, NoBackendFound try: backend = choose_backend(obj) shape_tuple = backend.staticshape(obj) if len(shape_tuple) == 0: return EMPTY_SHAPE elif len(shape_tuple) == 1: return channel('vector') else: raise ValueError(f"Cannot auto-complete shape of {backend} tensor with shape {shape_tuple}. Only 0D and 1D tensors have a Φ-Flow shape by default.") except NoBackendFound: raise ValueError(f'shape() requires Shaped or Shape argument but got {type(obj)}')
def shift(x: phi.math._tensors.Tensor, offsets: tuple, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function spatial>, padding: Union[Extrapolation, phi.math._tensors.Tensor, float, None] = boundary, stack_dim: Optional[phi.math._shape.Shape] = (shiftᶜ=None), extend_bounds=0) ‑> list
-
shift Tensor by a fixed offset and abiding by extrapolation
Args
x
- Input data
offsets
- Shift size
dims
- Dimensions along which to shift, defaults to None
padding
- padding to be performed at the boundary, defaults to extrapolation.BOUNDARY
stack_dim
- dimensions to be stacked, defaults to 'shift'
Returns
list
- offset_tensor
Expand source code
def shift(x: Tensor, offsets: tuple, dims: DimFilter = math.spatial, padding: Union[Extrapolation, Tensor, float, None] = extrapolation.BOUNDARY, stack_dim: Optional[Shape] = channel('shift'), extend_bounds=0) -> list: """ shift Tensor by a fixed offset and abiding by extrapolation Args: x: Input data offsets: Shift size dims: Dimensions along which to shift, defaults to None padding: padding to be performed at the boundary, defaults to extrapolation.BOUNDARY stack_dim: dimensions to be stacked, defaults to 'shift' Returns: list: offset_tensor """ if dims is None: raise ValueError("dims=None is not supported anymore.") dims = x.shape.only(dims).names if stack_dim is None: assert len(dims) == 1 x = wrap(x) pad_lower = max(0, -min(offsets)) pad_upper = max(0, max(offsets)) if padding is not None: x = math.pad(x, {axis: (pad_lower + extend_bounds, pad_upper + extend_bounds) for axis in dims}, mode=padding) if extend_bounds: assert padding is not None offset_tensors = [] for offset in offsets: components = [] for dimension in dims: if padding: slices = {dim: slice(pad_lower + offset, (-pad_upper + offset) or None) if dim == dimension else slice(pad_lower, -pad_upper or None) for dim in dims} else: slices = {dim: slice(pad_lower + offset, (-pad_upper + offset) or None) if dim == dimension else slice(None, None) for dim in dims} components.append(x[slices]) offset_tensors.append(stack(components, stack_dim) if stack_dim is not None else components[0]) return offset_tensors
def si2d(value)
-
Change the type of all spatial and instance dimensions of
value
to dual dimensions. Seerename_dims()
.Expand source code
def si2d(value): """ Change the type of all *spatial* and *instance* dimensions of `value` to *dual* dimensions. See `rename_dims`. """ return rename_dims(value, lambda s: s.non_channel.non_dual.non_batch, dual)
def sigmoid(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes the sigmoid function of the
Tensor
orPhiTreeNode
x
.Expand source code
def sigmoid(x) -> Union[Tensor, PhiTreeNode]: """ Computes the sigmoid function of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.sigmoid)
def sign(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
The sign of positive numbers is 1 and -1 for negative numbers. The sign of 0 is undefined.
Args
x
Tensor
orPhiTreeNode
Returns
Tensor
orPhiTreeNode
matchingx
.Expand source code
def sign(x) -> Union[Tensor, PhiTreeNode]: """ The sign of positive numbers is 1 and -1 for negative numbers. The sign of 0 is undefined. Args: x: `Tensor` or `phi.math.magic.PhiTreeNode` Returns: `Tensor` or `phi.math.magic.PhiTreeNode` matching `x`. """ return _backend_op1(x, Backend.sign)
def sin(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes sin(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def sin(x) -> Union[Tensor, PhiTreeNode]: """ Computes *sin(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.sin)
def sinh(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes sinh(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def sinh(x) -> Union[Tensor, PhiTreeNode]: """ Computes *sinh(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.sinh)
def slice(value, slices: Dict[str, Union[int, slice_(), str, tuple, list]])
-
Slices a
Tensor
orPhiTreeNode
along named dimensions.See Also:
unstack()
.Args
value
Tensor
orPhiTreeNode
slices
-
dict
mapping dimension names to slices. A slice can be one of the following:- An index (
int
) - A range (
slice_()
) - An item name (
str
) - Multiple item names (comma-separated
str
) - Multiple indices or item names (
tuple
orlist
)
- An index (
Returns
Tensor
orPhiTreeNode
of the same type asvalue
.Examples
>>> math.slice([vec(x=0, y=1), vec(x=2, y=3)], {'vector': 'y'}) [1, 3]
Expand source code
def slice_(value, slices: Dict[str, Union[int, slice, str, tuple, list]]): """ Slices a `Tensor` or `phi.math.magic.PhiTreeNode` along named dimensions. See Also: `unstack`. Args: value: `Tensor` or `phi.math.magic.PhiTreeNode` slices: `dict` mapping dimension names to slices. A slice can be one of the following: * An index (`int`) * A range (`slice`) * An item name (`str`) * Multiple item names (comma-separated `str`) * Multiple indices or item names (`tuple` or `list`) Returns: `Tensor` or `phi.math.magic.PhiTreeNode` of the same type as `value`. Examples: >>> math.slice([vec(x=0, y=1), vec(x=2, y=3)], {'vector': 'y'}) [1, 3] """ if isinstance(value, (bool, Number)): return value if isinstance(value, tuple): return tuple([slice_(v, slices) for v in value]) if isinstance(value, list): return [slice_(v, slices) for v in value] if isinstance(value, dict): return {k: slice_(v, slices) for k, v in value.items()} if isinstance(value, Shape): raise NotImplementedError if hasattr(value, '__getitem__'): return value[slices] if isinstance(value, PhiTreeNode): attrs = {key: getattr(value, key) for key in value_attributes(value)} new_attrs = {k: slice_(v, slices) for k, v in attrs.items()} return copy_with(value, **new_attrs) raise ValueError(f"value must be a PhiTreeNode but got {type(value)}")
def soft_plus(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes softplus(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def soft_plus(x) -> Union[Tensor, PhiTreeNode]: """ Computes *softplus(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.softplus)
def solve_linear(f: Union[Callable[[~X], ~Y], phi.math._tensors.Tensor], y: ~Y, solve: phi.math._optimize.Solve[~X, ~Y], *f_args, grad_for_f=False, f_kwargs: dict = None, **f_kwargs_) ‑> ~X
-
Solves the system of linear equations f(x) = y and returns x. This method will use the solver specified in
solve
. The following method identifiers are supported by all backends:'auto'
: Automatically choose a solver'CG'
: Conjugate gradient, only for symmetric and positive definite matrices.'CG-adaptive'
: Conjugate gradient with adaptive step size, only for symmetric and positive definite matrices.'biCG'
or'biCG-stab(0)'
: Biconjugate gradient'biCG-stab'
or'biCG-stab(1)'
: Biconjugate gradient stabilized, first order'biCG-stab(2)'
,'biCG-stab(4)'
, …: Biconjugate gradient stabilized, second or higher order'scipy-direct'
: SciPy direct solve always run oh the CPU usingscipy.sparse.linalg.spsolve
.'scipy-CG'
,'scipy-GMres'
,'scipy-biCG'
,'scipy-biCG-stab'
,'scipy-CGS'
,'scipy-QMR'
,'scipy-GCrotMK'
: SciPy iterative solvers always run oh the CPU.
Caution: SciPy solvers cannot be jit-compiled and should only be used for debugging purposes.
For maximum performance, compile
f
usingjit_compile_linear()
beforehand. Then, an optimized representation off
(such as a sparse matrix) will be used to solve the linear system.To obtain additional information about the performed solve, perform the solve within a
SolveTape
context. The used implementation can be obtained asSolveInfo.method
.The gradient of this operation will perform another linear solve with the parameters specified by
Solve.gradient_solve
.See Also:
solve_nonlinear()
,jit_compile_linear()
.Args
f
-
One of the following:
- Linear function with
Tensor
orPhiTreeNode
first parameter and return value.f
can have additional auxiliary arguments and return auxiliary values. - Dense matrix (
Tensor
with at least one dual dimension) - Sparse matrix (Sparse
Tensor
with at least one dual dimension) - Native tensor (not yet supported)
- Linear function with
y
- Desired output of
f(x)
asTensor
orPhiTreeNode
. solve
Solve
object specifying optimization method, parameters and initial guess forx
.*f_args
- Positional arguments to be passed to
f
aftersolve.x0
. These arguments will not be solved for. Supports vararg mode or pass all arguments as atuple
. f_kwargs
- Additional keyword arguments to be passed to
f
. These arguments are treated as auxiliary arguments and can be of any type.
Returns
x
- solution of the linear system of equations
f(x) = y
asTensor
orPhiTreeNode
.
Raises
NotConverged
- If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
- If the solve failed prematurely.
Expand source code
def solve_linear(f: Union[Callable[[X], Y], Tensor], y: Y, solve: Solve[X, Y], *f_args, grad_for_f=False, f_kwargs: dict = None, **f_kwargs_) -> X: """ Solves the system of linear equations *f(x) = y* and returns *x*. This method will use the solver specified in `solve`. The following method identifiers are supported by all backends: * `'auto'`: Automatically choose a solver * `'CG'`: Conjugate gradient, only for symmetric and positive definite matrices. * `'CG-adaptive'`: Conjugate gradient with adaptive step size, only for symmetric and positive definite matrices. * `'biCG'` or `'biCG-stab(0)'`: Biconjugate gradient * `'biCG-stab'` or `'biCG-stab(1)'`: Biconjugate gradient stabilized, first order * `'biCG-stab(2)'`, `'biCG-stab(4)'`, ...: Biconjugate gradient stabilized, second or higher order * `'scipy-direct'`: SciPy direct solve always run oh the CPU using `scipy.sparse.linalg.spsolve`. * `'scipy-CG'`, `'scipy-GMres'`, `'scipy-biCG'`, `'scipy-biCG-stab'`, `'scipy-CGS'`, `'scipy-QMR'`, `'scipy-GCrotMK'`: SciPy iterative solvers always run oh the CPU. **Caution**: SciPy solvers cannot be jit-compiled and should only be used for debugging purposes. For maximum performance, compile `f` using `jit_compile_linear()` beforehand. Then, an optimized representation of `f` (such as a sparse matrix) will be used to solve the linear system. To obtain additional information about the performed solve, perform the solve within a `SolveTape` context. The used implementation can be obtained as `SolveInfo.method`. The gradient of this operation will perform another linear solve with the parameters specified by `Solve.gradient_solve`. See Also: `solve_nonlinear()`, `jit_compile_linear()`. Args: f: One of the following: * Linear function with `Tensor` or `phi.math.magic.PhiTreeNode` first parameter and return value. `f` can have additional auxiliary arguments and return auxiliary values. * Dense matrix (`Tensor` with at least one dual dimension) * Sparse matrix (Sparse `Tensor` with at least one dual dimension) * Native tensor (not yet supported) y: Desired output of `f(x)` as `Tensor` or `phi.math.magic.PhiTreeNode`. solve: `Solve` object specifying optimization method, parameters and initial guess for `x`. *f_args: Positional arguments to be passed to `f` after `solve.x0`. These arguments will not be solved for. Supports vararg mode or pass all arguments as a `tuple`. f_kwargs: Additional keyword arguments to be passed to `f`. These arguments are treated as auxiliary arguments and can be of any type. Returns: x: solution of the linear system of equations `f(x) = y` as `Tensor` or `phi.math.magic.PhiTreeNode`. Raises: NotConverged: If the desired accuracy was not be reached within the maximum number of iterations. Diverged: If the solve failed prematurely. """ # --- Handle parameters --- f_kwargs = f_kwargs or {} f_kwargs.update(f_kwargs_) f_args = f_args[0] if len(f_args) == 1 and isinstance(f_args[0], tuple) else f_args # --- Get input and output tensors --- y_tree, y_tensors = disassemble_tree(y) x0_tree, x0_tensors = disassemble_tree(solve.x0) assert solve.x0 is not None, "Please specify the initial guess as Solve(..., x0=initial_guess)" assert len(x0_tensors) == len(y_tensors) == 1, "Only single-tensor linear solves are currently supported" backend = choose_backend_t(*y_tensors, *x0_tensors) prefer_explicit = backend.supports(Backend.sparse_coo_tensor) or backend.supports(Backend.csr_matrix) or grad_for_f if isinstance(f, Tensor) or (isinstance(f, LinearFunction) and prefer_explicit): # Matrix solve if isinstance(f, LinearFunction): matrix, bias = f.sparse_matrix_and_bias(solve.x0, *f_args, **f_kwargs) else: matrix = f bias = 0 preconditioner = compute_preconditioner(solve.preconditioner, matrix, safe=False, target_backend=NUMPY if solve.method.startswith('scipy-') else backend, solver=solve.method) if solve.preconditioner is not None else None def _matrix_solve_forward(y, solve: Solve, matrix: Tensor, is_backprop=False): backend_matrix = native_matrix(matrix, choose_backend_t(*y_tensors, matrix)) pattern_dims_in = channel(**dual(matrix).untyped_dict).names pattern_dims_out = non_dual(matrix).names # batch dims can be sparse or batched matrices result = _linear_solve_forward(y, solve, backend_matrix, pattern_dims_in, pattern_dims_out, preconditioner, backend, is_backprop) return result # must return exactly `x` so gradient isn't computed w.r.t. other quantities _matrix_solve = attach_gradient_solve(_matrix_solve_forward, auxiliary_args=f'is_backprop,solve{",matrix" if matrix.default_backend == NUMPY else ""}', matrix_adjoint=grad_for_f) return _matrix_solve(y - bias, solve, matrix) else: # Matrix-free solve f_args = cached(f_args) solve = cached(solve) assert not grad_for_f, f"grad_for_f=True can only be used for math.jit_compile_linear functions but got '{f_name(f)}'. Please decorate the linear function with @jit_compile_linear" assert solve.preconditioner is None, f"Preconditioners not currently supported for matrix-free solves. Decorate '{f_name(f)}' with @math.jit_compile_linear to perform a matrix solve." def _function_solve_forward(y, solve: Solve, f_args: tuple, f_kwargs: dict = None, is_backprop=False): y_nest, (y_tensor,) = disassemble_tree(y) x0_nest, (x0_tensor,) = disassemble_tree(solve.x0) # active_dims = (y_tensor.shape & x0_tensor.shape).non_batch # assumes batch dimensions are not active batches = (y_tensor.shape & x0_tensor.shape).batch def native_lin_f(native_x, batch_index=None): if batch_index is not None and batches.volume > 1: native_x = backend.tile(backend.expand_dims(native_x), [batches.volume, 1]) x = assemble_tree(x0_nest, [reshaped_tensor(native_x, [batches, non_batch(x0_tensor)] if backend.ndims(native_x) >= 2 else [non_batch(x0_tensor)], convert=False)]) y = f(x, *f_args, **f_kwargs) _, (y_tensor,) = disassemble_tree(y) y_native = reshaped_native(y_tensor, [batches, non_batch(y_tensor)] if backend.ndims(native_x) >= 2 else [non_batch(y_tensor)]) if batch_index is not None and batches.volume > 1: y_native = y_native[batch_index] return y_native result = _linear_solve_forward(y, solve, native_lin_f, pattern_dims_in=non_batch(x0_tensor).names, pattern_dims_out=non_batch(y_tensor).names, preconditioner=None, backend=backend, is_backprop=is_backprop) return result # must return exactly `x` so gradient isn't computed w.r.t. other quantities _function_solve = attach_gradient_solve(_function_solve_forward, auxiliary_args='is_backprop,f_kwargs,solve', matrix_adjoint=grad_for_f) return _function_solve(y, solve, f_args, f_kwargs=f_kwargs)
def solve_nonlinear(f: Callable, y, solve: phi.math._optimize.Solve) ‑> phi.math._tensors.Tensor
-
Solves the non-linear equation f(x) = y by minimizing the norm of the residual.
This method is limited to backends that support
jacobian()
, currently PyTorch, TensorFlow and Jax.To obtain additional information about the performed solve, use a
SolveTape
.See Also:
minimize()
,solve_linear()
.Args
f
- Function whose output is optimized to match
y
. All positional arguments off
are optimized and must beTensor
orPhiTreeNode
. The output off
must matchy
. y
- Desired output of
f(x)
asTensor
orPhiTreeNode
. solve
Solve
object specifying optimization method, parameters and initial guess forx
.
Returns
x
- Solution fulfilling
f(x) = y
within specified tolerance asTensor
orPhiTreeNode
.
Raises
NotConverged
- If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
- If the solve failed prematurely.
Expand source code
def solve_nonlinear(f: Callable, y, solve: Solve) -> Tensor: """ Solves the non-linear equation *f(x) = y* by minimizing the norm of the residual. This method is limited to backends that support `jacobian()`, currently PyTorch, TensorFlow and Jax. To obtain additional information about the performed solve, use a `SolveTape`. See Also: `minimize()`, `solve_linear()`. Args: f: Function whose output is optimized to match `y`. All positional arguments of `f` are optimized and must be `Tensor` or `phi.math.magic.PhiTreeNode`. The output of `f` must match `y`. y: Desired output of `f(x)` as `Tensor` or `phi.math.magic.PhiTreeNode`. solve: `Solve` object specifying optimization method, parameters and initial guess for `x`. Returns: x: Solution fulfilling `f(x) = y` within specified tolerance as `Tensor` or `phi.math.magic.PhiTreeNode`. Raises: NotConverged: If the desired accuracy was not be reached within the maximum number of iterations. Diverged: If the solve failed prematurely. """ def min_func(x): diff = f(x) - y l2 = l2_loss(diff) return l2 if solve.preprocess_y is not None: y = solve.preprocess_y(y) from ._nd import l2_loss solve = solve.with_defaults('solve') tol = math.maximum(solve.rel_tol * l2_loss(y), solve.abs_tol) min_solve = copy_with(solve, abs_tol=tol, rel_tol=0, preprocess_y=None) return minimize(min_func, min_solve)
def sparse_tensor(indices: phi.math._tensors.Tensor, values: phi.math._tensors.Tensor, dense_shape: phi.math._shape.Shape, can_contain_double_entries=True, indices_sorted=False, format='auto', default: numbers.Number = 0) ‑> phi.math._tensors.Tensor
-
Construct a sparse tensor that stores
values
at the correspondingindices
and is 0 everywhere else. In addition to the sparse dimensions indexed byindices
, the tensor inherits all batch and channel dimensions fromvalues
.indices:
Tensor
encoding the positions of stored values. It has the following dimensions:* One instance dimension exactly matching the instance dimension on <code>values</code>. It enumerates the positions of stored entries. * One channel dimension called <code>vector</code>. Its item names must match the dimension names of <code>dense\_shape</code> but the order can be arbitrary. * Any number of batch dimensions
values:
Tensor
containing the stored values at positions given byindices
. It has the following dimensions:* One instance dimension exactly matching the instance dimension on <code>indices</code>. It enumerates the values of stored entries. * Any number of channel dimensions if multiple values are stored at each index. * Any number of batch dimensions
dense_shape: Dimensions listed in
indices
. The order can differ from the item names ofindices
. can_contain_double_entries: Whether some indices might occur more than once. If so, values at the same index will be summed. indices_sorted: Whether the indices are sorted in ascending order given the dimension order of the item names ofindices
. format: Sparse format in which to store the data, such as'coo'
or'csr'
. Seeget_format()
. default: Value the sparse tensor returns for non-stored values. Must be0
orNone
.Returns
Sparse
Tensor
with the specifiedformat
.Expand source code
def sparse_tensor(indices: Tensor, values: Tensor, dense_shape: Shape, can_contain_double_entries=True, indices_sorted=False, format='auto', default: Number or None = 0) -> Tensor: """ Construct a sparse tensor that stores `values` at the corresponding `indices` and is 0 everywhere else. In addition to the sparse dimensions indexed by `indices`, the tensor inherits all batch and channel dimensions from `values`. indices: `Tensor` encoding the positions of stored values. It has the following dimensions: * One instance dimension exactly matching the instance dimension on `values`. It enumerates the positions of stored entries. * One channel dimension called `vector`. Its item names must match the dimension names of `dense_shape` but the order can be arbitrary. * Any number of batch dimensions values: `Tensor` containing the stored values at positions given by `indices`. It has the following dimensions: * One instance dimension exactly matching the instance dimension on `indices`. It enumerates the values of stored entries. * Any number of channel dimensions if multiple values are stored at each index. * Any number of batch dimensions dense_shape: Dimensions listed in `indices`. The order can differ from the item names of `indices`. can_contain_double_entries: Whether some indices might occur more than once. If so, values at the same index will be summed. indices_sorted: Whether the indices are sorted in ascending order given the dimension order of the item names of `indices`. format: Sparse format in which to store the data, such as `'coo'` or `'csr'`. See `phi.math.get_format`. default: Value the sparse tensor returns for non-stored values. Must be `0` or `None`. Returns: Sparse `Tensor` with the specified `format`. """ assert default in [0, None], f"default value must be either 0 or None but got '{default}'" coo = SparseCoordinateTensor(indices, values, dense_shape, can_contain_double_entries, indices_sorted, default) return to_format(coo, format)
def spatial(*args, **dims: Union[int, str, tuple, list, phi.math._shape.Shape]) ‑> phi.math._shape.Shape
-
Returns the spatial dimensions of an existing
Shape
or creates a newShape
with only spatial dimensions.Usage for filtering spatial dimensions:
>>> spatial_dims = spatial(shape) >>> spatial_dims = spatial(tensor)
Usage for creating a
Shape
with only spatial dimensions:>>> spatial_shape = spatial('undef', x=2, y=3) (x=2, y=3, undef=None)
Here, the dimension
undef
is created with an undefined size ofNone
. Undefined sizes are automatically filled in bytensor()
,wrap()
,stack()
andconcat()
.To create a shape with multiple types, use
merge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.See Also:
channel()
,batch()
,instance()
Args
*args
-
Either
**dims
- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shape
containing only dimensions of type spatial.Expand source code
def spatial(*args, **dims: Union[int, str, tuple, list, Shape]) -> Shape: """ Returns the spatial dimensions of an existing `Shape` or creates a new `Shape` with only spatial dimensions. Usage for filtering spatial dimensions: >>> spatial_dims = spatial(shape) >>> spatial_dims = spatial(tensor) Usage for creating a `Shape` with only spatial dimensions: >>> spatial_shape = spatial('undef', x=2, y=3) (x=2, y=3, undef=None) Here, the dimension `undef` is created with an undefined size of `None`. Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. See Also: `channel`, `batch`, `instance` Args: *args: Either * `Shape` or `Tensor` to filter or * Names of dimensions with undefined sizes as `str`. **dims: Dimension sizes and names. Must be empty when used as a filter operation. Returns: `Shape` containing only dimensions of type spatial. """ from .magic import Shaped if all(isinstance(arg, str) for arg in args) or dims: return _construct_shape(SPATIAL_DIM, '', *args, **dims) elif len(args) == 1 and isinstance(args[0], Shape): return args[0].spatial elif len(args) == 1 and isinstance(args[0], Shaped): return shape(args[0]).spatial else: raise AssertionError(f"spatial() must be called either as a selector spatial(Shape) or spatial(Tensor) or as a constructor spatial(*names, **dims). Got *args={args}, **dims={dims}")
def spatial_gradient(grid: phi.math._tensors.Tensor, dx: Union[phi.math._tensors.Tensor, float] = 1, difference: str = 'central', padding: Optional[Extrapolation] = boundary, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function spatial>, stack_dim: Optional[phi.math._shape.Shape] = (gradientᶜ=None), pad=0) ‑> phi.math._tensors.Tensor
-
Calculates the spatial_gradient of a scalar channel from finite differences. The spatial_gradient vectors are in reverse order, lowest dimension first.
Args
grid
- grid values
dims
- (Optional) Dimensions along which the spatial derivative will be computed. sequence of dimension names
dx
- Physical distance between grid points,
float
orTensor
. When passing a vector-valuedTensor
, the dx values should be listed alongstack_dim
, matchingdims
. difference
- type of difference, one of ('forward', 'backward', 'central') (default 'forward')
padding
- tensor padding mode
stack_dim
- name of the new vector dimension listing the spatial_gradient w.r.t. the various axes
pad
- How many cells to extend the result compared to
grid
. This value is added to the internal padding. For non-trivial extrapolations, this gives the correct result while manual padding before or after this operation would not respect the boundary locations.
Returns
Expand source code
def spatial_gradient(grid: Tensor, dx: Union[float, Tensor] = 1, difference: str = 'central', padding: Union[Extrapolation, None] = extrapolation.BOUNDARY, dims: DimFilter = spatial, stack_dim: Union[Shape, None] = channel('gradient'), pad=0) -> Tensor: """ Calculates the spatial_gradient of a scalar channel from finite differences. The spatial_gradient vectors are in reverse order, lowest dimension first. Args: grid: grid values dims: (Optional) Dimensions along which the spatial derivative will be computed. sequence of dimension names dx: Physical distance between grid points, `float` or `Tensor`. When passing a vector-valued `Tensor`, the dx values should be listed along `stack_dim`, matching `dims`. difference: type of difference, one of ('forward', 'backward', 'central') (default 'forward') padding: tensor padding mode stack_dim: name of the new vector dimension listing the spatial_gradient w.r.t. the various axes pad: How many cells to extend the result compared to `grid`. This value is added to the internal padding. For non-trivial extrapolations, this gives the correct result while manual padding before or after this operation would not respect the boundary locations. Returns: `Tensor` """ grid = wrap(grid) if stack_dim is not None and stack_dim in grid.shape: assert grid.shape.only(stack_dim).size == 1, f"spatial_gradient() cannot list components along {stack_dim.name} because that dimension already exists on grid {grid}" grid = grid[{stack_dim.name: 0}] dims = grid.shape.only(dims) dx = wrap(dx) if dx.vector.exists: dx = dx.vector[dims] if dx.vector.size in (None, 1): dx = dx.vector[0] if difference.lower() == 'central': left, right = shift(grid, (-1, 1), dims, padding, stack_dim=stack_dim, extend_bounds=pad) return (right - left) / (dx * 2) elif difference.lower() == 'forward': left, right = shift(grid, (0, 1), dims, padding, stack_dim=stack_dim, extend_bounds=pad) return (right - left) / dx elif difference.lower() == 'backward': left, right = shift(grid, (-1, 0), dims, padding, stack_dim=stack_dim, extend_bounds=pad) return (right - left) / dx else: raise ValueError('Invalid difference type: {}. Can be CENTRAL or FORWARD'.format(difference))
def sqrt(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes sqrt(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def sqrt(x) -> Union[Tensor, PhiTreeNode]: """ Computes *sqrt(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.sqrt)
def stack(values: Union[tuple, list, dict], dim: phi.math._shape.Shape, expand_values=False, **kwargs)
-
Stacks
values
along the new dimensiondim
. All values must have the same spatial, instance and channel dimensions. If the dimension sizes vary, the resulting tensor will be non-uniform. Batch dimensions will be added as needed.Stacking tensors is performed lazily, i.e. the memory is allocated only when needed. This makes repeated stacking and slicing along the same dimension very efficient, i.e. jit-compiled functions will not perform these operations.
Args
values
- Collection of
Shapable
, such asTensor
If adict
, keys must be of typestr
and are used as item names alongdim
. dim
Shape
with a least one dimension. None of these dimensions can be present with any of thevalues
. Ifdim
is a single-dimension shape, its size is determined fromlen(values)
and can be left undefined (None
). Ifdim
is a multi-dimension shape, its volume must be equal tolen(values)
.expand_values
- If
True
, will first add missing dimensions to all values, not just batch dimensions. This allows tensors with different dimensions to be stacked. The resulting tensor will have all dimensions that are present invalues
. **kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Tensor
containingvalues
stacked alongdim
.Examples
>>> stack({'x': 0, 'y': 1}, channel('vector')) (x=0, y=1)
>>> stack([math.zeros(batch(b=2)), math.ones(batch(b=2))], channel(c='x,y')) (x=0.000, y=1.000); (x=0.000, y=1.000) (bᵇ=2, cᶜ=x,y)
>>> stack([vec(x=1, y=0), vec(x=2, y=3.)], batch('b')) (x=1.000, y=0.000); (x=2.000, y=3.000) (bᵇ=2, vectorᶜ=x,y)
Expand source code
def stack(values: Union[tuple, list, dict], dim: Shape, expand_values=False, **kwargs): """ Stacks `values` along the new dimension `dim`. All values must have the same spatial, instance and channel dimensions. If the dimension sizes vary, the resulting tensor will be non-uniform. Batch dimensions will be added as needed. Stacking tensors is performed lazily, i.e. the memory is allocated only when needed. This makes repeated stacking and slicing along the same dimension very efficient, i.e. jit-compiled functions will not perform these operations. Args: values: Collection of `phi.math.magic.Shapable`, such as `phi.math.Tensor` If a `dict`, keys must be of type `str` and are used as item names along `dim`. dim: `Shape` with a least one dimension. None of these dimensions can be present with any of the `values`. If `dim` is a single-dimension shape, its size is determined from `len(values)` and can be left undefined (`None`). If `dim` is a multi-dimension shape, its volume must be equal to `len(values)`. expand_values: If `True`, will first add missing dimensions to all values, not just batch dimensions. This allows tensors with different dimensions to be stacked. The resulting tensor will have all dimensions that are present in `values`. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments. Returns: `Tensor` containing `values` stacked along `dim`. Examples: >>> stack({'x': 0, 'y': 1}, channel('vector')) (x=0, y=1) >>> stack([math.zeros(batch(b=2)), math.ones(batch(b=2))], channel(c='x,y')) (x=0.000, y=1.000); (x=0.000, y=1.000) (bᵇ=2, cᶜ=x,y) >>> stack([vec(x=1, y=0), vec(x=2, y=3.)], batch('b')) (x=1.000, y=0.000); (x=2.000, y=3.000) (bᵇ=2, vectorᶜ=x,y) """ assert len(values) > 0, f"stack() got empty sequence {values}" assert isinstance(dim, Shape) values_ = tuple(values.values()) if isinstance(values, dict) else values if not expand_values: for v in values_[1:]: assert set(non_batch(v).names) == set(non_batch(values_[0]).names), f"Stacked values must have the same non-batch dimensions but got {non_batch(values_[0])} and {non_batch(v)}" # --- Add missing dimensions --- if expand_values: all_dims = merge_shapes(*values_, allow_varying_sizes=True) if isinstance(values, dict): values = {k: expand(v, all_dims.without(shape(v))) for k, v in values.items()} else: values = [expand(v, all_dims.without(shape(v))) for v in values] else: all_batch_dims = merge_shapes(*[batch(v) for v in values_], allow_varying_sizes=True) if isinstance(values, dict): values = {k: expand(v, all_batch_dims.without(shape(v))) for k, v in values.items()} else: values = [expand(v, all_batch_dims.without(shape(v))) for v in values] if dim.rank == 1: assert dim.size == len(values) or dim.size is None, f"stack dim size must match len(values) or be undefined but got {dim} for {len(values)} values" if dim.size is None: dim = dim.with_size(len(values)) if isinstance(values, dict): dim_item_names = tuple(values.keys()) values = tuple(values.values()) dim = dim.with_size(dim_item_names) # --- First try __stack__ --- for v in values: if hasattr(v, '__stack__'): result = v.__stack__(values, dim, **kwargs) if result is not NotImplemented: assert isinstance(result, Shapable), "__stack__ must return a Shapable object" return result # --- Next: try stacking attributes for tree nodes --- if all(isinstance(v, PhiTreeNode) for v in values): attributes = all_attributes(values[0]) if attributes and all(all_attributes(v) == attributes for v in values): new_attrs = {} for a in attributes: assert all(dim not in shape(getattr(v, a)) for v in values), f"Cannot stack attribute {a} because one values contains the stack dimension {dim}." a_values = [getattr(v, a) for v in values] if all(v is a_values[0] for v in a_values[1:]): new_attrs[a] = expand(a_values[0], dim, **kwargs) else: new_attrs[a] = stack(a_values, dim, expand_values=expand_values, **kwargs) return copy_with(values[0], **new_attrs) else: warnings.warn(f"Failed to concat values using value attributes because attributes differ among values {values}") # --- Fallback: use expand and concat --- for v in values: if not hasattr(v, '__stack__') and hasattr(v, '__concat__') and hasattr(v, '__expand__'): expanded_values = tuple([expand(v, dim.with_size(1 if dim.item_names[0] is None else dim.item_names[0][i]), **kwargs) for i, v in enumerate(values)]) if len(expanded_values) > 8: warnings.warn(f"stack() default implementation is slow on large dimensions ({dim.name}={len(expanded_values)}). Please implement __stack__()", RuntimeWarning, stacklevel=2) result = v.__concat__(expanded_values, dim.name, **kwargs) if result is not NotImplemented: assert isinstance(result, Shapable), "__concat__ must return a Shapable object" return result # --- else maybe all values are native scalars --- from ._tensors import wrap try: values = tuple([wrap(v) for v in values]) except ValueError: raise MagicNotImplemented(f"At least one item in values must be Shapable but got types {[type(v) for v in values]}") return values[0].__stack__(values, dim, **kwargs) else: # multi-dim stack assert dim.volume == len(values), f"When passing multiple stack dims, their volume must equal len(values) but got {dim} for {len(values)} values" if isinstance(values, dict): warnings.warn(f"When stacking a dict along multiple dimensions, the key names are discarded. Got keys {tuple(values.keys())}", RuntimeWarning, stacklevel=2) values = tuple(values.values()) # --- if any value implements Shapable, use stack and unpack_dim --- for v in values: if hasattr(v, '__stack__') and hasattr(v, '__unpack_dim__'): stack_dim = batch('_stack') stacked = v.__stack__(values, stack_dim, **kwargs) if stacked is not NotImplemented: assert isinstance(stacked, Shapable), "__stack__ must return a Shapable object" assert hasattr(stacked, '__unpack_dim__'), "If a value supports __unpack_dim__, the result of __stack__ must also support it." reshaped = stacked.__unpack_dim__(stack_dim.name, dim, **kwargs) if kwargs is NotImplemented: warnings.warn("__unpack_dim__ is overridden but returned NotImplemented during multi-dimensional stack. This results in unnecessary stack operations.", RuntimeWarning, stacklevel=2) else: return reshaped # --- Fallback: multi-level stack --- for dim_ in reversed(dim): values = [stack(values[i:i + dim_.size], dim_, **kwargs) for i in range(0, len(values), dim_.size)] return values[0]
def std(value: Union[phi.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>) ‑> phi.math._tensors.Tensor
-
Computes the standard deviation over
values
along the specified dimensions.Warning: The standard deviation of non-uniform tensors along the stack dimension is undefined.
Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions.Expand source code
def std(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor: """ Computes the standard deviation over `values` along the specified dimensions. *Warning*: The standard deviation of non-uniform tensors along the stack dimension is undefined. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ if not dim: warnings.warn("std along empty shape returns 0", RuntimeWarning, stacklevel=2) return zeros_like(value) if not callable(dim) and set(parse_dim_order(dim)) - set(value.shape.names): return zeros_like(value) # std along constant dim is 0 return reduce_(_std, value, dim)
def stop_gradient(x)
-
Disables gradients for the given tensor. This may switch off the gradients for
x
itself or create a copy ofx
with disabled gradients.Implementations:
- PyTorch:
x.detach()
- TensorFlow:
tf.stop_gradient
- Jax:
jax.lax.stop_gradient
Args
x
Tensor
orPhiTreeNode
for which gradients should be disabled.
Returns
Copy of
x
.Expand source code
def stop_gradient(x): """ Disables gradients for the given tensor. This may switch off the gradients for `x` itself or create a copy of `x` with disabled gradients. Implementations: * PyTorch: [`x.detach()`](https://pytorch.org/docs/stable/autograd.html#torch.Tensor.detach) * TensorFlow: [`tf.stop_gradient`](https://www.tensorflow.org/api_docs/python/tf/stop_gradient) * Jax: [`jax.lax.stop_gradient`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.stop_gradient.html) Args: x: `Tensor` or `phi.math.magic.PhiTreeNode` for which gradients should be disabled. Returns: Copy of `x`. """ if isinstance(x, Tensor): return x._op1(lambda native: choose_backend(native).stop_gradient(native)) elif isinstance(x, PhiTreeNode): nest, values = disassemble_tree(x) new_values = [stop_gradient(v) for v in values] return assemble_tree(nest, new_values) else: return wrap(choose_backend(x).stop_gradient(x))
- PyTorch:
def stored_indices(x: phi.math._tensors.Tensor, list_dim=(entriesⁱ=None), index_dim=(indexᶜ=None), invalid='discard') ‑> phi.math._tensors.Tensor
-
Returns the indices of the stored values for a given `Tensor``. For sparse tensors, this will return the stored indices tensor. For collapsed tensors, only the stored dimensions will be returned.
Args
x
Tensor
list_dim
- Dimension along which stored indices should be laid out.
invalid
- One of
'discard'
,'clamp'
,'keep'
Filter result by valid indices. Internally, invalid indices may be stored for performance reasons.
Returns
Tensor
representing all indices of stored values.Expand source code
def stored_indices(x: Tensor, list_dim=instance('entries'), index_dim=channel('index'), invalid='discard') -> Tensor: """ Returns the indices of the stored values for a given `Tensor``. For sparse tensors, this will return the stored indices tensor. For collapsed tensors, only the stored dimensions will be returned. Args: x: `Tensor` list_dim: Dimension along which stored indices should be laid out. invalid: One of `'discard'`, `'clamp'`, `'keep'` Filter result by valid indices. Internally, invalid indices may be stored for performance reasons. Returns: `Tensor` representing all indices of stored values. """ assert invalid in ['discard', 'clamp', 'keep'], f"invalid handling must be one of 'discard', 'clamp', 'keep' but got {invalid}" if isinstance(x, NativeTensor): from ._ops import meshgrid if batch(x): raise NotImplementedError indices = meshgrid(x._native_shape.non_batch.non_channel) return pack_dims(indices, non_channel, list_dim) if isinstance(x, TensorStack): if x.is_cached or not x.requires_broadcast: return stored_indices(cached(x)) raise NotImplementedError return stack([stored_indices(t, list_dim) for t in x._tensors], x._stack_dim) # ToDo add index for stack dim elif isinstance(x, CompressedSparseMatrix): return rename_dims(x._coo_indices(invalid, stack_dim=index_dim), instance, list_dim) if isinstance(x, SparseCoordinateTensor): if x._can_contain_double_entries: warnings.warn(f"stored_values of sparse tensor {x.shape} may contain multiple values for the same position.") new_index_dim = index_dim.with_size(channel(x._indices).item_names[0]) return rename_dims(x._indices, [instance(x._indices).name, channel(x._indices).name], [list_dim, new_index_dim]) raise ValueError(x)
def stored_values(x: phi.math._tensors.Tensor, list_dim=(entriesⁱ=None), invalid='discard') ‑> phi.math._tensors.Tensor
-
Returns the stored values for a given `Tensor``. For sparse tensors, this will return only the stored entries. For collapsed tensors, only the stored dimensions will be returned. Dense tensors are returned as-is.
Args
x
Tensor
list_dim
- Dimension along which stored values should be laid out.
invalid
- One of
'discard'
,'clamp'
,'keep'
Filter result by valid indices. Internally, invalid indices may be stored for performance reasons.
Returns
Tensor
representing all values stored to representx
.Expand source code
def stored_values(x: Tensor, list_dim=instance('entries'), invalid='discard') -> Tensor: """ Returns the stored values for a given `Tensor``. For sparse tensors, this will return only the stored entries. For collapsed tensors, only the stored dimensions will be returned. Dense tensors are returned as-is. Args: x: `Tensor` list_dim: Dimension along which stored values should be laid out. invalid: One of `'discard'`, `'clamp'`, `'keep'` Filter result by valid indices. Internally, invalid indices may be stored for performance reasons. Returns: `Tensor` representing all values stored to represent `x`. """ assert invalid in ['discard', 'clamp', 'keep'], f"invalid handling must be one of 'discard', 'clamp', 'keep' but got {invalid}" if isinstance(x, NativeTensor): return expand(NativeTensor(x._native, x._native_shape, x._native_shape), list_dim.with_size(1)) if isinstance(x, TensorStack): if x.is_cached: return stored_values(cached(x)) return stack([stored_values(t, list_dim) for t in x._tensors], x._stack_dim) elif isinstance(x, CompressedSparseMatrix): if invalid in ['keep', 'clamp']: return rename_dims(x._values, instance, list_dim) else: x = x.decompress() # or apply slices, then return values if isinstance(x, SparseCoordinateTensor): if x._can_contain_double_entries: warnings.warn(f"stored_values of sparse tensor {x.shape} may contain multiple values for the same position.") return rename_dims(x._values, instance, list_dim) raise ValueError(x)
def sum(value: Union[phi.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function non_batch>) ‑> phi.math._tensors.Tensor
-
Sums
values
along the specified dimensions.Args
value
Tensor
orlist
/tuple
of Tensors.dim
-
Dimension or dimensions to be reduced. One of
None
to reduce all non-batch dimensionsstr
containing single dimension or comma-separated list of dimensionsTuple[str]
orList[str]
Shape
batch()
,instance()
,spatial()
,channel()
to select dimensions by type'0'
whenisinstance(value, (tuple, list))
to add up the sequence of Tensors
Returns
Tensor
without the reduced dimensions.Expand source code
def sum_(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor: """ Sums `values` along the specified dimensions. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_sum, bool_to_int(value), dim, require_all_dims_present=True)
def tan(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes tan(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def tan(x) -> Union[Tensor, PhiTreeNode]: """ Computes *tan(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.tan)
def tanh(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Computes tanh(x) of the
Tensor
orPhiTreeNode
x
.Expand source code
def tanh(x) -> Union[Tensor, PhiTreeNode]: """ Computes *tanh(x)* of the `Tensor` or `phi.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.tanh)
def tensor(data, *shape: phi.math._shape.Shape, convert: bool = True, default_list_dim=(vectorᶜ=None)) ‑> phi.math._tensors.Tensor
-
Create a Tensor from the specified
data
. Ifconvert=True
, convertsdata
to the preferred format of the default backend.data
must be one of the following:- Number: returns a dimensionless Tensor.
- Native tensor such as NumPy array, TensorFlow tensor or PyTorch tensor.
tuple
orlist
of numbers: backs the Tensor with native tensor.tuple
orlist
of non-numbers: creates tensors for the items and stacks them.- Tensor: renames dimensions and dimension types if
names
is specified. Converts all internal native values of the tensor ifconvert=True
. - Shape: creates a 1D tensor listing the dimension sizes.
While specifying
names
is optional in some cases, it is recommended to always specify them.Dimension types are always inferred from the dimension names if specified.
Implementations:
- NumPy:
numpy.array
- PyTorch:
torch.tensor
,torch.from_numpy
- TensorFlow:
tf.convert_to_tensor
- Jax:
jax.numpy.array
See Also:
wrap()
which usesconvert=False
,layout()
.Args
data
- native tensor, scalar, sequence, Shape or Tensor
shape
- Ordered dimensions and types. If sizes are defined, they will be checked against
data
.` convert
- If True, converts the data to the native format of the current default backend.
If False, wraps the data in a
Tensor
but keeps the given data reference if possible.
Raises
AssertionError
- if dimension names are not provided and cannot automatically be inferred
ValueError
- if
data
is not tensor-like
Returns
Tensor containing same values as data
Examples
>>> tensor([1, 2, 3], channel(vector='x,y,z')) (x=1, y=2, z=3)
>>> tensor([1., 2, 3], channel(vector='x,y,z')) (x=1.000, y=2.000, z=3.000) float64
>>> tensor(numpy.zeros([10, 8, 6, 2]), batch('batch'), spatial('x,y'), channel(vector='x,y')) (batchᵇ=10, xˢ=8, yˢ=6, vectorᶜ=x,y) float64 const 0.0
>>> tensor([(0, 1), (0, 2), (1, 3)], instance('particles'), channel(vector='x,y')) (x=0, y=1); (x=0, y=2); (x=1, y=3) (particlesⁱ=3, vectorᶜ=x,y)
>>> tensor(numpy.random.randn(10)) (vectorᶜ=10) float64 -0.128 ± 1.197 (-2e+00...2e+00)
Expand source code
def tensor(data, *shape: Shape, convert: bool = True, default_list_dim=channel('vector')) -> Tensor: # TODO assume convert_unsupported, add convert_external=False for constants """ Create a Tensor from the specified `data`. If `convert=True`, converts `data` to the preferred format of the default backend. `data` must be one of the following: * Number: returns a dimensionless Tensor. * Native tensor such as NumPy array, TensorFlow tensor or PyTorch tensor. * `tuple` or `list` of numbers: backs the Tensor with native tensor. * `tuple` or `list` of non-numbers: creates tensors for the items and stacks them. * Tensor: renames dimensions and dimension types if `names` is specified. Converts all internal native values of the tensor if `convert=True`. * Shape: creates a 1D tensor listing the dimension sizes. While specifying `names` is optional in some cases, it is recommended to always specify them. Dimension types are always inferred from the dimension names if specified. Implementations: * NumPy: [`numpy.array`](https://numpy.org/doc/stable/reference/generated/numpy.array.html) * PyTorch: [`torch.tensor`](https://pytorch.org/docs/stable/generated/torch.tensor.html), [`torch.from_numpy`](https://pytorch.org/docs/stable/generated/torch.from_numpy.html) * TensorFlow: [`tf.convert_to_tensor`](https://www.tensorflow.org/api_docs/python/tf/convert_to_tensor) * Jax: [`jax.numpy.array`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.array.html) See Also: `phi.math.wrap()` which uses `convert=False`, `layout()`. Args: data: native tensor, scalar, sequence, Shape or Tensor shape: Ordered dimensions and types. If sizes are defined, they will be checked against `data`.` convert: If True, converts the data to the native format of the current default backend. If False, wraps the data in a `Tensor` but keeps the given data reference if possible. Raises: AssertionError: if dimension names are not provided and cannot automatically be inferred ValueError: if `data` is not tensor-like Returns: Tensor containing same values as data Examples: >>> tensor([1, 2, 3], channel(vector='x,y,z')) (x=1, y=2, z=3) >>> tensor([1., 2, 3], channel(vector='x,y,z')) (x=1.000, y=2.000, z=3.000) float64 >>> tensor(numpy.zeros([10, 8, 6, 2]), batch('batch'), spatial('x,y'), channel(vector='x,y')) (batchᵇ=10, xˢ=8, yˢ=6, vectorᶜ=x,y) float64 const 0.0 >>> tensor([(0, 1), (0, 2), (1, 3)], instance('particles'), channel(vector='x,y')) (x=0, y=1); (x=0, y=2); (x=1, y=3) (particlesⁱ=3, vectorᶜ=x,y) >>> tensor(numpy.random.randn(10)) (vectorᶜ=10) float64 -0.128 ± 1.197 (-2e+00...2e+00) """ assert all(isinstance(s, Shape) for s in shape), f"Cannot create tensor because shape needs to be one or multiple Shape instances but got {shape}" shape = None if len(shape) == 0 else concat_shapes(*shape) if isinstance(data, Tensor): if convert: backend = data.default_backend if backend != default_backend(): data = data._op1(lambda n: convert_(n, use_dlpack=False)) if shape is None: return data else: if None in shape.sizes: shape = shape.with_sizes(data.shape.sizes) return data._with_shape_replaced(shape) elif isinstance(data, Shape): if shape is None: shape = channel('dims') else: assert shape.rank == 1, "Can only convert 1D shapes to Tensors" shape = shape.with_size(data.names) data = data.sizes elif isinstance(data, str) or data is None: return layout(data) elif isinstance(data, (numbers.Number, bool)): assert not shape, f"Trying to create a zero-dimensional Tensor from value '{data}' but shape={shape}" if convert: data = default_backend().as_tensor(data, convert_external=True) return NativeTensor(data, EMPTY_SHAPE) if isinstance(data, (tuple, list)): if all(isinstance(d, (bool, int, float, complex)) for d in data): array = np.array(data) assert array.dtype != object data = array elif all(isinstance(d, str) for d in data): return layout(data, shape or default_list_dim) else: try: inner_shape = [] if shape is None else [shape[1:]] tensors = [d if isinstance(d, Tensor) else tensor(d, *inner_shape, convert=convert) for d in data] return stack(tensors, default_list_dim if shape is None else shape[0].with_sizes([len(tensors)]), expand_values=True) except IncompatibleShapes: assert not convert, f"Cannot convert {data} to tensor given shape {shape}" return layout(data, shape or default_list_dim) except ValueError: assert not convert, f"Cannot convert {data} to tensor" return layout(data, shape or default_list_dim) try: backend = choose_backend(data) if shape is None: assert backend.ndims(data) <= 1, "Specify dimension names for tensors with more than 1 dimension" shape = default_list_dim if backend.ndims(data) == 1 else EMPTY_SHAPE shape = shape.with_sizes(backend.staticshape(data)) else: # fill in sizes or check them sizes = backend.staticshape(data) if len(sizes) != len(shape): raise IncompatibleShapes(f"Rank of given shape {shape} does not match data with sizes {sizes}") for size, s in zip(sizes, shape.sizes): if s is not None: assert s == size, f"Given shape {shape} does not match data with sizes {sizes}. Consider leaving the sizes undefined." shape = shape.with_sizes(sizes, keep_item_names=True) if convert: data = convert_(data, use_dlpack=False) return NativeTensor(data, shape) except NoBackendFound: raise ValueError(f"{type(data)} is not supported. Only (Tensor, tuple, list, np.ndarray, native tensors) are allowed.\nCurrent backends: {BACKENDS}")
def tensor_like(existing_tensor: phi.math._tensors.Tensor, values: Union[phi.math._tensors.Tensor, numbers.Number, bool], value_order: str = None)
-
Creates a tensor with the same format and shape as
existing_tensor
.Args
existing_tensor
- Any
Tensor
, sparse or dense. values
- New values to replace the existing values by.
If
existing_tensor
is sparse,values
must have an instance dimension to list the stored values, matching the sparse indices. value_order
- Order of
values
compared toexisting_tensor
. If'original'
, the values are ordered like the values that was used to create the first tensor with this sparsity pattern. If'as existing'
, the values match the current order ofexisting_tensor
. Note that the order of values may be changed upon creating a sparse tensor.
Returns
Expand source code
def tensor_like(existing_tensor: Tensor, values: Union[Tensor, Number, bool], value_order: str = None): """ Creates a tensor with the same format and shape as `existing_tensor`. Args: existing_tensor: Any `Tensor`, sparse or dense. values: New values to replace the existing values by. If `existing_tensor` is sparse, `values` must have an instance dimension to list the stored values, matching the sparse indices. value_order: Order of `values` compared to `existing_tensor`. If `'original'`, the values are ordered like the values that was used to create the first tensor with this sparsity pattern. If `'as existing'`, the values match the current order of `existing_tensor`. Note that the order of values may be changed upon creating a sparse tensor. Returns: `Tensor` """ assert value_order in ['original', 'as existing', None] if isinstance(existing_tensor, (SparseCoordinateTensor, CompressedSparseMatrix)): if value_order is None: assert not instance(values), f"When creating a sparse tensor from a list of values, value_order must be specified." if instance(values): values = rename_dims(values, instance, instance(existing_tensor._values)) values = expand(values, instance(existing_tensor._values)) if value_order == 'original' and isinstance(existing_tensor, CompressedSparseMatrix) and existing_tensor._uncompressed_indices_perm is not None: values = values[existing_tensor._uncompressed_indices_perm] if isinstance(existing_tensor, CompressedSparseMatrix) and existing_tensor._uncompressed_offset is not None: from ._ops import where values = where(existing_tensor._valid_mask(), values, 0) return existing_tensor._with_values(values) if not is_sparse(existing_tensor): return unpack_dim(values, instance, existing_tensor.shape.non_channel.non_batch) raise NotImplementedError
def to_complex(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Converts the given tensor to complex floating point format with the currently specified precision.
The precision can be set globally using
math.set_global_precision()
and locally usingwith math.precision()
.See the
phi.math
module documentation at https://tum-pbs.github.io/PhiFlow/Math.htmlSee Also:
cast()
.Args
x
- values to convert
Returns
Tensor
of same shape asx
Expand source code
def to_complex(x) -> Union[Tensor, PhiTreeNode]: """ Converts the given tensor to complex floating point format with the currently specified precision. The precision can be set globally using `math.set_global_precision()` and locally using `with math.precision()`. See the `phi.math` module documentation at https://tum-pbs.github.io/PhiFlow/Math.html See Also: `cast()`. Args: x: values to convert Returns: `Tensor` of same shape as `x` """ return _backend_op1(x, Backend.to_complex)
def to_device(value, device: phi.math.backend._backend.ComputeDevice, convert=True, use_dlpack=True)
-
Allocates the tensors of
value
ondevice
. If the value already exists on that device, this function may either create a copy ofvalue
or returnvalue
directly.See Also:
to_cpu()
.Args
value
Tensor
orPhiTreeNode
or native tensor.device
- Device to allocate value on.
Either
ComputeDevice
or categorystr
, such as'CPU'
or'GPU'
. convert
- Whether to convert tensors that do not belong to the corresponding backend to compatible native tensors.
If
False
, this function has no effect on numpy tensors. use_dlpack
- Only if
convert==True
. Whether to use the DLPack library to convert from one GPU-enabled backend to another.
Returns
Same type as
value
.Expand source code
def to_device(value, device: ComputeDevice or str, convert=True, use_dlpack=True): """ Allocates the tensors of `value` on `device`. If the value already exists on that device, this function may either create a copy of `value` or return `value` directly. See Also: `to_cpu()`. Args: value: `Tensor` or `phi.math.magic.PhiTreeNode` or native tensor. device: Device to allocate value on. Either `ComputeDevice` or category `str`, such as `'CPU'` or `'GPU'`. convert: Whether to convert tensors that do not belong to the corresponding backend to compatible native tensors. If `False`, this function has no effect on numpy tensors. use_dlpack: Only if `convert==True`. Whether to use the DLPack library to convert from one GPU-enabled backend to another. Returns: Same type as `value`. """ assert isinstance(device, (ComputeDevice, str)), f"device must be a ComputeDevice or str but got {type(device)}" return tree_map(_to_device, value, device=device, convert_to_backend=convert, use_dlpack=use_dlpack)
def to_dict(value: Union[phi.math._tensors.Tensor, phi.math._shape.Shape])
-
Returns a serializable form of a
Tensor
orShape
. The result can be written to a JSON file, for example.See Also:
from_dict()
.Args
Returns
Serializable Python tree of primitives
Expand source code
def to_dict(value: Union[Tensor, Shape]): """ Returns a serializable form of a `Tensor` or `Shape`. The result can be written to a JSON file, for example. See Also: `from_dict()`. Args: value: `Tensor` or `Shape` Returns: Serializable Python tree of primitives """ if isinstance(value, Shape): return value._to_dict(include_sizes=True) elif isinstance(value, Tensor): return value._to_dict() raise ValueError(f"Cannot convert {value} to a dict")
def to_float(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Converts the given tensor to floating point format with the currently specified precision.
The precision can be set globally using
math.set_global_precision()
and locally usingwith math.precision()
.See the
phi.math
module documentation at https://tum-pbs.github.io/PhiFlow/Math.htmlSee Also:
cast()
.Args
x
Tensor
orPhiTreeNode
to convert
Returns
Tensor
orPhiTreeNode
matchingx
.Expand source code
def to_float(x) -> Union[Tensor, PhiTreeNode]: """ Converts the given tensor to floating point format with the currently specified precision. The precision can be set globally using `math.set_global_precision()` and locally using `with math.precision()`. See the `phi.math` module documentation at https://tum-pbs.github.io/PhiFlow/Math.html See Also: `cast()`. Args: x: `Tensor` or `phi.math.magic.PhiTreeNode` to convert Returns: `Tensor` or `phi.math.magic.PhiTreeNode` matching `x`. """ return _backend_op1(x, Backend.to_float)
def to_int32(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Converts the
Tensor
orPhiTreeNode
x
to 32-bit integer.Expand source code
def to_int32(x) -> Union[Tensor, PhiTreeNode]: """ Converts the `Tensor` or `phi.math.magic.PhiTreeNode` `x` to 32-bit integer. """ return _backend_op1(x, Backend.to_int32)
def to_int64(x) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Converts the
Tensor
orPhiTreeNode
x
to 64-bit integer.Expand source code
def to_int64(x) -> Union[Tensor, PhiTreeNode]: """ Converts the `Tensor` or `phi.math.magic.PhiTreeNode` `x` to 64-bit integer. """ return _backend_op1(x, Backend.to_int64)
def trace_check(f, *args, **kwargs)
-
Tests if
f(*args, **kwargs)
has already been traced. If true, jit-compiled functions are very fast since the Python function is not actually called anymore.Args
f
- Transformed Function, e.g. jit-compiled or linear function.
*args
- Hypothetical arguments to be passed to
f
**kwargs
- Hypothetical keyword arugments to be passed to
f
Returns
Expand source code
def trace_check(f, *args, **kwargs): """ Tests if `f(*args, **kwargs)` has already been traced. If true, jit-compiled functions are very fast since the Python function is not actually called anymore. Args: f: Transformed Function, e.g. jit-compiled or linear function. *args: Hypothetical arguments to be passed to `f` **kwargs: Hypothetical keyword arugments to be passed to `f` Returns: result: `True` if there is an existing trace that can be used, `False` if `f` would have to be re-traced. reason: Message giving hints as to why `f` needs to be re-traced given `args` and `kwargs`. """ if isinstance(f, (JitFunction, GradientFunction, HessianFunction, CustomGradientFunction)): keys = f.traces.keys() elif isinstance(f, LinearFunction): keys = f.matrices_and_biases.keys() else: raise ValueError(f"{f_name(f)} is not a traceable function. Only supports jit_compile, jit_compile_linear, functional_gradient, custom_gradient, jacobian, hessian") key, *_ = key_from_args(args, kwargs, f.f_params, aux=f.auxiliary_args) if not keys: return False, "Function has not yet been traced" if key in keys: return True, "" traced_key = next(iter(keys)) # ToDo compare against all cond_equal = key.auxiliary_kwargs == traced_key.auxiliary_kwargs if isinstance(cond_equal, Tensor): cond_equal = cond_equal.all if not cond_equal: return False, "Auxiliary arguments do not match" # shapes need not be compared because they are included in specs if traced_key.tree.keys() != key.tree.keys(): return False, f"Different primary arguments passed: {set(traced_key.tree.keys())} vs {set(key.tree.keys())}" for name in traced_key.tree.keys(): if traced_key.tree[name] != key.tree[name]: return False, f"Primary argument '{name}' differs in non-traced variables: {traced_key.tree[name]} vs {key.tree[name]}. Make sure the corresponding class overrides __eq__()." if traced_key.specs != key.specs: return False, "Traced variables differ in shape" if traced_key.backend != key.backend: return False, f"Function was not traced with backend {key.backend}" if traced_key.spatial_derivative_order != key.spatial_derivative_order: return False, f"Different in spatial_derivative_order. This is likely an internal problem." return True
def transpose(x: phi.math._tensors.Tensor, axes)
-
Swap the dimension order of
x
. This operation is superfluous since tensors will be reshaped under the hood or when getting the native/numpy representations.Implementations:
- NumPy:
numpy.transpose
- PyTorch:
x.permute
- TensorFlow:
tf.transpose
- Jax:
jax.numpy.transpose
Args
x
Tensor
or native tensor.axes
tuple
orlist
Returns
Tensor
or native tensor, depending onx
.Expand source code
def transpose(x: Tensor, axes): """ Swap the dimension order of `x`. This operation is superfluous since tensors will be reshaped under the hood or when getting the native/numpy representations. Implementations: * NumPy: [`numpy.transpose`](https://numpy.org/doc/stable/reference/generated/numpy.transpose.html) * PyTorch: [`x.permute`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.permute) * TensorFlow: [`tf.transpose`](https://www.tensorflow.org/api_docs/python/tf/transpose) * Jax: [`jax.numpy.transpose`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.transpose.html) Args: x: `Tensor` or native tensor. axes: `tuple` or `list` Returns: `Tensor` or native tensor, depending on `x`. """ if isinstance(x, Tensor): return expand(x, x.shape[axes]) else: return choose_backend(x).transpose(x, axes)
- NumPy:
def unpack_dim(value, dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable], *unpacked_dims: phi.math._shape.Shape, **kwargs)
-
Decompresses a dimension by unstacking the elements along it. This function replaces the traditional
reshape
for these cases. The compressed dimensiondim
is assumed to contain elements laid out according to the order ofunpacked_dims
.If
dim
does not exist onvalue
, this function will returnvalue
as-is. This includes primitive types.See Also:
pack_dims()
Args
value
Shapable
, such asTensor
, for which one dimension should be split.dim
- Single dimension to be decompressed.
*unpacked_dims
- Vararg
Shape
, ordered dimensions to replacedim
, fulfillingunpacked_dims.volume == shape(self)[dim].rank
. **kwargs
- Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the
bounds: Box
argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.
Returns
Same type as
value
.Examples
>>> unpack_dim(math.zeros(instance(points=12)), 'points', spatial(x=4, y=3)) (xˢ=4, yˢ=3) const 0.0
Expand source code
def unpack_dim(value, dim: DimFilter, *unpacked_dims: Shape, **kwargs): """ Decompresses a dimension by unstacking the elements along it. This function replaces the traditional `reshape` for these cases. The compressed dimension `dim` is assumed to contain elements laid out according to the order of `unpacked_dims`. If `dim` does not exist on `value`, this function will return `value` as-is. This includes primitive types. See Also: `pack_dims()` Args: value: `phi.math.magic.Shapable`, such as `Tensor`, for which one dimension should be split. dim: Single dimension to be decompressed. *unpacked_dims: Vararg `Shape`, ordered dimensions to replace `dim`, fulfilling `unpacked_dims.volume == shape(self)[dim].rank`. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments. Returns: Same type as `value`. Examples: >>> unpack_dim(math.zeros(instance(points=12)), 'points', spatial(x=4, y=3)) (xˢ=4, yˢ=3) const 0.0 """ if isinstance(value, (Number, bool)): return value assert isinstance(value, Shapable) and isinstance(value, Sliceable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}" dim = shape(value).only(dim) if dim.is_empty: return value # Nothing to do, maybe expand? assert dim.rank == 1, f"unpack_dim requires as single dimension to be unpacked but got {dim}" dim = dim.name unpacked_dims = concat_shapes(*unpacked_dims) if unpacked_dims.rank == 0: return value[{dim: 0}] # remove dim elif unpacked_dims.rank == 1: return rename_dims(value, dim, unpacked_dims, **kwargs) # --- First try __unpack_dim__ if hasattr(value, '__unpack_dim__'): result = value.__unpack_dim__(dim, unpacked_dims, **kwargs) if result is not NotImplemented: return result # --- Next try Tree Node --- if isinstance(value, PhiTreeNode) and all_attributes(value): new_attributes = {a: unpack_dim(getattr(value, a), dim, unpacked_dims, **kwargs) for a in all_attributes(value)} return copy_with(value, **new_attributes) # --- Fallback: unstack and stack --- if shape(value).only(dim).volume > 8: warnings.warn(f"pack_dims() default implementation is slow on large dimensions ({shape(value).only(dim)}). Please implement __unpack_dim__() for {type(value).__name__} as defined in phi.math.magic", RuntimeWarning, stacklevel=2) unstacked = unstack(value, dim) for dim in reversed(unpacked_dims): unstacked = [stack(unstacked[i:i+dim.size], dim, **kwargs) for i in range(0, len(unstacked), dim.size)] return unstacked[0]
def unstack(value, dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable])
-
Un-stacks a
Sliceable
along one or multiple dimensions.If multiple dimensions are given, the order of elements will be according to the dimension order in
dim
, i.e. elements along the last dimension will be neighbors in the returnedtuple
.See Also:
slice_()
.Args
value
Shapable
, such asTensor
dim
- Dimensions as
Shape
or comma-separatedstr
or dimension type, i.e.channel()
,spatial()
,instance()
,batch()
.
Returns
tuple
ofTensor
objects.Examples
>>> unstack(expand(0, spatial(x=5)), 'x') (0.0, 0.0, 0.0, 0.0, 0.0)
Expand source code
def unstack(value, dim: DimFilter): """ Un-stacks a `Sliceable` along one or multiple dimensions. If multiple dimensions are given, the order of elements will be according to the dimension order in `dim`, i.e. elements along the last dimension will be neighbors in the returned `tuple`. See Also: `phi.math.slice`. Args: value: `phi.math.magic.Shapable`, such as `phi.math.Tensor` dim: Dimensions as `Shape` or comma-separated `str` or dimension type, i.e. `channel`, `spatial`, `instance`, `batch`. Returns: `tuple` of `Tensor` objects. Examples: >>> unstack(expand(0, spatial(x=5)), 'x') (0.0, 0.0, 0.0, 0.0, 0.0) """ assert isinstance(value, Sliceable) and isinstance(value, Shaped), f"Cannot unstack {type(value).__name__}. Must be Sliceable and Shaped, see https://tum-pbs.github.io/PhiFlow/phi/math/magic.html" dims = shape(value).only(dim) assert dims.rank > 0, "unstack() requires at least one dimension" if dims.rank == 1: if hasattr(value, '__unstack__'): result = value.__unstack__(dims.names) if result is not NotImplemented: assert isinstance(result, tuple), f"__unstack__ must return a tuple but got {type(result)}" assert all([isinstance(item, Sliceable) for item in result]), f"__unstack__ must return a tuple of Sliceable objects but not all items were sliceable in {result}" return result return tuple([slice_(value, {dims.name: i}) for i in range(dims.size)]) else: # multiple dimensions if hasattr(value, '__pack_dims__'): packed_dim = batch('_unstack') value_packed = value.__pack_dims__(dims.names, packed_dim, pos=None) if value_packed is not NotImplemented: return unstack(value_packed, packed_dim) unstack_dim = _any_uniform_dim(dims) first_unstacked = unstack(value, unstack_dim) inner_unstacked = [unstack(v, dims.without(unstack_dim)) for v in first_unstacked] return sum(inner_unstacked, ())
def upsample2x(grid: phi.math._tensors.Tensor, padding: Extrapolation = boundary, dims: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function spatial>) ‑> phi.math._tensors.Tensor
-
Resamples a regular grid to double the number of spatial sample points per dimension. The grid values at the new points are determined via linear interpolation.
Args
grid
- half-size grid
padding
- grid extrapolation
dims
- dims along which up-sampling is applied. If None, up-sample along all spatial dims.
grid
- Tensor:
padding
- Extrapolation: (Default value = extrapolation.BOUNDARY)
dims
- tuple or None: (Default value = None)
Returns
double-size grid
Expand source code
def upsample2x(grid: Tensor, padding: Extrapolation = extrapolation.BOUNDARY, dims: DimFilter = spatial) -> Tensor: """ Resamples a regular grid to double the number of spatial sample points per dimension. The grid values at the new points are determined via linear interpolation. Args: grid: half-size grid padding: grid extrapolation dims: dims along which up-sampling is applied. If None, up-sample along all spatial dims. grid: Tensor: padding: Extrapolation: (Default value = extrapolation.BOUNDARY) dims: tuple or None: (Default value = None) Returns: double-size grid """ for dim in grid.shape.only(dims): left, center, right = shift(grid, (-1, 0, 1), dim.names, padding, None) interp_left = 0.25 * left + 0.75 * center interp_right = 0.75 * center + 0.25 * right stacked = math.stack_tensors([interp_left, interp_right], channel(_interleave='left,right')) grid = math.pack_dims(stacked, (dim.name, '_interleave'), dim) return grid
def vec(name: Union[str, phi.math._shape.Shape] = 'vector', *sequence, tuple_dim=(sequenceˢ=None), list_dim=(sequenceⁱ=None), **components) ‑> phi.math._tensors.Tensor
-
Lay out the given values along a channel dimension without converting them to the current backend.
Args
name
- Dimension name.
*sequence
- Component values that will also be used as item names.
If specified,
components
must be empty. **components
- Values by component name. If specified, no additional positional arguments must be given.
tuple_dim
- Dimension for
tuple
values passed as components, e.g.vec(x=(0, 1), ...)
list_dim
- Dimension for
list
values passed as components, e.g.vec(x=[0, 1], ...)
Returns
Examples
>>> vec(x=1, y=0, z=-1) (x=1, y=0, z=-1)
>>> vec(x=1., z=0) (x=1.000, z=0.000)
>>> vec(x=tensor([1, 2, 3], instance('particles')), y=0) (x=1, y=0); (x=2, y=0); (x=3, y=0) (particlesⁱ=3, vectorᶜ=x,y)
>>> vec(x=0, y=[0, 1]) (x=0, y=0); (x=0, y=1) (vectorᶜ=x,y, sequenceⁱ=2)
>>> vec(x=0, y=(0, 1)) (x=0, y=0); (x=0, y=1) (sequenceˢ=2, vectorᶜ=x,y)
Expand source code
def vec(name: Union[str, Shape] = 'vector', *sequence, tuple_dim=spatial('sequence'), list_dim=instance('sequence'), **components) -> Tensor: """ Lay out the given values along a channel dimension without converting them to the current backend. Args: name: Dimension name. *sequence: Component values that will also be used as item names. If specified, `components` must be empty. **components: Values by component name. If specified, no additional positional arguments must be given. tuple_dim: Dimension for `tuple` values passed as components, e.g. `vec(x=(0, 1), ...)` list_dim: Dimension for `list` values passed as components, e.g. `vec(x=[0, 1], ...)` Returns: `Tensor` Examples: >>> vec(x=1, y=0, z=-1) (x=1, y=0, z=-1) >>> vec(x=1., z=0) (x=1.000, z=0.000) >>> vec(x=tensor([1, 2, 3], instance('particles')), y=0) (x=1, y=0); (x=2, y=0); (x=3, y=0) (particlesⁱ=3, vectorᶜ=x,y) >>> vec(x=0, y=[0, 1]) (x=0, y=0); (x=0, y=1) (vectorᶜ=x,y, sequenceⁱ=2) >>> vec(x=0, y=(0, 1)) (x=0, y=0); (x=0, y=1) (sequenceˢ=2, vectorᶜ=x,y) """ dim = channel(name) if isinstance(name, str) else name assert isinstance(dim, Shape), f"name must be a str or Shape but got '{type(name)}'" if sequence: assert not components, "vec() must be given either positional or keyword arguments but not both" if len(sequence) == 1 and isinstance(sequence[0], (tuple, list)): sequence = sequence[0] dim = dim.with_size([str(v) for v in sequence]) return wrap(sequence, dim) else: def wrap_sequence(value): if isinstance(value, tuple): return wrap(value, tuple_dim) elif isinstance(value, list): return wrap(value, list_dim) else: return value components = {n: wrap_sequence(v) for n, v in components.items()} return stack(components, dim, expand_values=True)
def vec_abs(vec: phi.math._tensors.Tensor, vec_dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function channel>, eps: Union[phi.math._tensors.Tensor, float] = None)
-
Computes the vector length of
vec()
.Args
eps
- Minimum vector length. Use to avoid
inf
gradients for zero-length vectors.
Expand source code
def vec_abs(vec: Tensor, vec_dim: DimFilter = channel, eps: Union[float, Tensor] = None): """ Computes the vector length of `vec`. Args: eps: Minimum vector length. Use to avoid `inf` gradients for zero-length vectors. """ if vec.dtype.kind == complex: vec = stack([vec.real, vec.imag], channel('_ReIm')) squared = vec_squared(vec, vec_dim) if eps is not None: squared = math.maximum(squared, eps) return math.sqrt(squared)
def vec_length(vec: phi.math._tensors.Tensor, vec_dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function channel>, eps: Union[phi.math._tensors.Tensor, float] = None)
-
Computes the vector length of
vec()
.Args
eps
- Minimum vector length. Use to avoid
inf
gradients for zero-length vectors.
Expand source code
def vec_abs(vec: Tensor, vec_dim: DimFilter = channel, eps: Union[float, Tensor] = None): """ Computes the vector length of `vec`. Args: eps: Minimum vector length. Use to avoid `inf` gradients for zero-length vectors. """ if vec.dtype.kind == complex: vec = stack([vec.real, vec.imag], channel('_ReIm')) squared = vec_squared(vec, vec_dim) if eps is not None: squared = math.maximum(squared, eps) return math.sqrt(squared)
def vec_normalize(vec: phi.math._tensors.Tensor, vec_dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function channel>)
-
Normalizes the vectors in
vec()
. Ifvec_dim
is None, the combined channel dimensions ofvec()
are interpreted as a vector.Expand source code
def vec_normalize(vec: Tensor, vec_dim: DimFilter = channel): """ Normalizes the vectors in `vec`. If `vec_dim` is None, the combined channel dimensions of `vec` are interpreted as a vector. """ return vec / vec_abs(vec, vec_dim=vec_dim)
def vec_squared(vec: phi.math._tensors.Tensor, vec_dim: Union[str, tuple, list, set, phi.math._shape.Shape, Callable] = <function channel>)
-
Computes the squared length of
vec()
. Ifvec_dim
is None, the combined channel dimensions ofvec()
are interpreted as a vector.Expand source code
def vec_squared(vec: Tensor, vec_dim: DimFilter = channel): """ Computes the squared length of `vec`. If `vec_dim` is None, the combined channel dimensions of `vec` are interpreted as a vector. """ return math.sum_(vec ** 2, dim=vec_dim)
def where(condition: Union[phi.math._tensors.Tensor, float, int], value_true: Union[phi.math._tensors.Tensor, float, int], value_false: Union[phi.math._tensors.Tensor, float, int])
-
Builds a tensor by choosing either values from
value_true
orvalue_false
depending oncondition
. Ifcondition
is not of type boolean, non-zero values are interpreted as True.This function requires non-None values for
value_true
andvalue_false
. To get the indices of True / non-zero values, use :func:nonzero()
.Args
condition
- determines where to choose values from value_true or from value_false
value_true
- Values to pick where
condition != 0 / True
value_false
- Values to pick where
condition == 0 / False
Returns
Tensor
containing dimensions of all inputs.Expand source code
def where(condition: Union[Tensor, float, int], value_true: Union[Tensor, float, int], value_false: Union[Tensor, float, int]): """ Builds a tensor by choosing either values from `value_true` or `value_false` depending on `condition`. If `condition` is not of type boolean, non-zero values are interpreted as True. This function requires non-None values for `value_true` and `value_false`. To get the indices of True / non-zero values, use :func:`nonzero`. Args: condition: determines where to choose values from value_true or from value_false value_true: Values to pick where `condition != 0 / True` value_false: Values to pick where `condition == 0 / False` Returns: `Tensor` containing dimensions of all inputs. """ condition = wrap(condition) value_true = wrap(value_true) value_false = wrap(value_false) def inner_where(c: Tensor, vt: Tensor, vf: Tensor): if vt._is_tracer or vf._is_tracer or c._is_tracer: return c * vt + (1 - c) * vf # ToDo this does not take NaN into account if is_sparse(vt) or is_sparse(vf): if same_sparsity_pattern(vt, vf, allow_const=True) and same_sparsity_pattern(c, vt, allow_const=True): c_values = c._values if is_sparse(c) else c vt_values = vt._values if is_sparse(vt) else vt vf_values = vf._values if is_sparse(vf) else vf result_values = where(c_values, vt_values, vf_values) return c._with_values(result_values) raise NotImplementedError shape, (c, vt, vf) = broadcastable_native_tensors(c, vt, vf) result = choose_backend(c, vt, vf).where(c, vt, vf) return NativeTensor(result, shape) return broadcast_op(inner_where, [condition, value_true, value_false])
def wrap(data, *shape: phi.math._shape.Shape) ‑> phi.math._tensors.Tensor
-
Short for
tensor()
withconvert=False
.Expand source code
def wrap(data, *shape: Shape) -> Tensor: """ Short for `phi.math.tensor()` with `convert=False`. """ return tensor(data, *shape, convert=False) # TODO inline, simplify
def zeros(*shape: phi.math._shape.Shape, dtype=None) ‑> phi.math._tensors.Tensor
-
Define a tensor with specified shape with value
0.0
/0
/False
everywhere.This method may not immediately allocate the memory to store the values.
See Also:
zeros_like()
,ones()
.Args
*shape
- This (possibly empty) sequence of
Shape
s is concatenated, preserving the order. dtype
- Data type as
DType
object. Defaults tofloat
matching the current precision setting.
Returns
Expand source code
def zeros(*shape: Shape, dtype=None) -> Tensor: """ Define a tensor with specified shape with value `0.0` / `0` / `False` everywhere. This method may not immediately allocate the memory to store the values. See Also: `zeros_like()`, `ones()`. Args: *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order. dtype: Data type as `DType` object. Defaults to `float` matching the current precision setting. Returns: `Tensor` """ return _initialize(lambda shape: expand_tensor(NativeTensor(default_backend().zeros((), dtype=DType.as_dtype(dtype)), EMPTY_SHAPE), shape), shape)
def zeros_like(obj: Union[phi.math._tensors.Tensor, PhiTreeNode]) ‑> Union[phi.math._tensors.Tensor, PhiTreeNode]
-
Create a
Tensor
containing only0.0
/0
/False
with the same shape and dtype asobj
.Expand source code
def zeros_like(obj: Union[Tensor, PhiTreeNode]) -> Union[Tensor, PhiTreeNode]: """ Create a `Tensor` containing only `0.0` / `0` / `False` with the same shape and dtype as `obj`. """ nest, values = disassemble_tree(obj) zeros_ = [] for val in values: val = wrap(val) with val.default_backend: zeros_.append(zeros(val.shape, dtype=val.dtype)) return assemble_tree(nest, zeros_)
Classes
class ConvergenceException
-
Base class for exceptions raised when a solve does not converge.
See Also:
Diverged
,NotConverged
.Expand source code
class ConvergenceException(RuntimeError): """ Base class for exceptions raised when a solve does not converge. See Also: `Diverged`, `NotConverged`. """ def __init__(self, result: SolveInfo): RuntimeError.__init__(self, result.msg) self.result: SolveInfo = result """ `SolveInfo` holding information about the solve. """
Ancestors
- builtins.RuntimeError
- builtins.Exception
- builtins.BaseException
Subclasses
- phi.math._optimize.Diverged
- phi.math._optimize.NotConverged
Instance variables
var result
-
SolveInfo
holding information about the solve.
class DType (kind: type, bits: int = None, precision: int = None)
-
Instances of
DType
represent the kind and size of data elements. The data type of aTensor
can be obtained viaTensor.dtype
.The following kinds of data types are supported:
float
with 32 / 64 bitscomplex
with 64 / 128 bitsint
with 8 / 16 / 32 / 64 bitsbool
with 8 bitsstr
with 8n bits
Unlike with many computing libraries, there are no global variables corresponding to the available types. Instead, data types can simply be instantiated as needed.
Args
kind
- Python type, one of
(bool, int, float, complex, str)
bits
- number of bits per element, a multiple of 8.
Expand source code
class DType: """ Instances of `DType` represent the kind and size of data elements. The data type of a `Tensor` can be obtained via `phi.math.Tensor.dtype`. The following kinds of data types are supported: * `float` with 32 / 64 bits * `complex` with 64 / 128 bits * `int` with 8 / 16 / 32 / 64 bits * `bool` with 8 bits * `str` with 8*n* bits Unlike with many computing libraries, there are no global variables corresponding to the available types. Instead, data types can simply be instantiated as needed. """ def __init__(self, kind: type, bits: int = None, precision: int = None): """ Args: kind: Python type, one of `(bool, int, float, complex, str)` bits: number of bits per element, a multiple of 8. """ assert kind in (bool, int, float, complex, str, object) if kind is bool: assert bits is None, "Bits may not be set for bool or object" assert precision is None, f"Precision may only be specified for float or complex but got {kind}, precision={precision}" bits = 8 elif kind == object: assert bits is None, "bits may not be set for bool or object" assert precision is None, f"Precision may only be specified for float or complex but got {kind}, precision={precision}" bits = int(np.round(np.log2(sys.maxsize))) + 1 elif precision is not None: assert bits is None, "Specify either bits or precision when creating a DType but not both." assert kind in [float, complex], f"Precision may only be specified for float or complex but got {kind}, precision={precision}" if kind == float: bits = precision else: bits = precision * 2 else: assert isinstance(bits, int) self.kind = kind """ Python class corresponding to the type of data, ignoring precision. One of (bool, int, float, complex, str) """ self.bits = bits """ Number of bits used to store a single value of this type. See `DType.itemsize`. """ @property def precision(self): """ Floating point precision. Only defined if `kind in (float, complex)`. For complex values, returns half of `DType.bits`. """ if self.kind == float: return self.bits if self.kind == complex: return self.bits // 2 else: return None @property def itemsize(self): """ Number of bytes used to storea single value of this type. See `DType.bits`. """ assert self.bits % 8 == 0 return self.bits // 8 def __eq__(self, other): return isinstance(other, DType) and self.kind == other.kind and self.bits == other.bits def __ne__(self, other): return not self == other def __hash__(self): return hash(self.kind) + hash(self.bits) def __repr__(self): return f"{self.kind.__name__}{self.bits}" @staticmethod def as_dtype(value: Union['DType', tuple, type, None]) -> Union['DType', None]: if isinstance(value, DType): return value elif value is int: return DType(int, 32) elif value is float: from phi.math import get_precision return DType(float, get_precision()) elif value is complex: from phi.math import get_precision return DType(complex, 2 * get_precision()) elif value is None: return None elif isinstance(value, tuple): return DType(*value) elif value is str: raise ValueError("str DTypes must specify precision") else: return DType(value) # bool, object
Static methods
def as_dtype(value: Union[ForwardRef('DType'), tuple, type, None]) ‑> Optional[phi.math.backend._dtype.DType]
-
Expand source code
@staticmethod def as_dtype(value: Union['DType', tuple, type, None]) -> Union['DType', None]: if isinstance(value, DType): return value elif value is int: return DType(int, 32) elif value is float: from phi.math import get_precision return DType(float, get_precision()) elif value is complex: from phi.math import get_precision return DType(complex, 2 * get_precision()) elif value is None: return None elif isinstance(value, tuple): return DType(*value) elif value is str: raise ValueError("str DTypes must specify precision") else: return DType(value) # bool, object
Instance variables
var bits
-
Number of bits used to store a single value of this type. See
DType.itemsize
. var itemsize
-
Number of bytes used to storea single value of this type. See
DType.bits
.Expand source code
@property def itemsize(self): """ Number of bytes used to storea single value of this type. See `DType.bits`. """ assert self.bits % 8 == 0 return self.bits // 8
var kind
-
Python class corresponding to the type of data, ignoring precision. One of (bool, int, float, complex, str)
var precision
-
Floating point precision. Only defined if
kind in (float, complex)
. For complex values, returns half ofDType.bits
.Expand source code
@property def precision(self): """ Floating point precision. Only defined if `kind in (float, complex)`. For complex values, returns half of `DType.bits`. """ if self.kind == float: return self.bits if self.kind == complex: return self.bits // 2 else: return None
class Dict (*args, **kwargs)
-
Dictionary of
Tensor
orPhiTreeNode
values. Dicts are not themselves tensors and do not have a shape. Uselayout()
to treatdict
instances like tensors.In addition to dictionary functions, supports mathematical operators with other
Dict
s and lookup via.key
syntax.Dict
implementsPhiTreeNode
so instances can be passed to math operations likesin()
.Expand source code
class Dict(dict): """ Dictionary of `Tensor` or `phi.math.magic.PhiTreeNode` values. Dicts are not themselves tensors and do not have a shape. Use `layout()` to treat `dict` instances like tensors. In addition to dictionary functions, supports mathematical operators with other `Dict`s and lookup via `.key` syntax. `Dict` implements `phi.math.magic.PhiTreeNode` so instances can be passed to math operations like `sin`. """ def __value_attrs__(self): return tuple(self.keys()) # --- Dict[key] --- def __getattr__(self, key): try: return self[key] except KeyError as k: raise AttributeError(k) def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): try: del self[key] except KeyError as k: raise AttributeError(k) # --- operators --- def __neg__(self): return Dict({k: -v for k, v in self.items()}) def __invert__(self): return Dict({k: ~v for k, v in self.items()}) def __abs__(self): return Dict({k: abs(v) for k, v in self.items()}) def __round__(self, n=None): return Dict({k: round(v) for k, v in self.items()}) def __add__(self, other): if isinstance(other, Dict): return Dict({key: val + other[key] for key, val in self.items()}) else: return Dict({key: val + other for key, val in self.items()}) def __radd__(self, other): if isinstance(other, Dict): return Dict({key: other[key] + val for key, val in self.items()}) else: return Dict({key: other + val for key, val in self.items()}) def __sub__(self, other): if isinstance(other, Dict): return Dict({key: val - other[key] for key, val in self.items()}) else: return Dict({key: val - other for key, val in self.items()}) def __rsub__(self, other): if isinstance(other, Dict): return Dict({key: other[key] - val for key, val in self.items()}) else: return Dict({key: other - val for key, val in self.items()}) def __mul__(self, other): if isinstance(other, Dict): return Dict({key: val * other[key] for key, val in self.items()}) else: return Dict({key: val * other for key, val in self.items()}) def __rmul__(self, other): if isinstance(other, Dict): return Dict({key: other[key] * val for key, val in self.items()}) else: return Dict({key: other * val for key, val in self.items()}) def __truediv__(self, other): if isinstance(other, Dict): return Dict({key: val / other[key] for key, val in self.items()}) else: return Dict({key: val / other for key, val in self.items()}) def __rtruediv__(self, other): if isinstance(other, Dict): return Dict({key: other[key] / val for key, val in self.items()}) else: return Dict({key: other / val for key, val in self.items()}) def __floordiv__(self, other): if isinstance(other, Dict): return Dict({key: val // other[key] for key, val in self.items()}) else: return Dict({key: val // other for key, val in self.items()}) def __rfloordiv__(self, other): if isinstance(other, Dict): return Dict({key: other[key] // val for key, val in self.items()}) else: return Dict({key: other // val for key, val in self.items()}) def __pow__(self, power, modulo=None): assert modulo is None if isinstance(power, Dict): return Dict({key: val ** power[key] for key, val in self.items()}) else: return Dict({key: val ** power for key, val in self.items()}) def __rpow__(self, other): if isinstance(other, Dict): return Dict({key: other[key] ** val for key, val in self.items()}) else: return Dict({key: other ** val for key, val in self.items()}) def __mod__(self, other): if isinstance(other, Dict): return Dict({key: val % other[key] for key, val in self.items()}) else: return Dict({key: val % other for key, val in self.items()}) def __rmod__(self, other): if isinstance(other, Dict): return Dict({key: other[key] % val for key, val in self.items()}) else: return Dict({key: other % val for key, val in self.items()}) def __eq__(self, other): if isinstance(other, Dict): return Dict({key: val == other[key] for key, val in self.items()}) else: return Dict({key: val == other for key, val in self.items()}) def __ne__(self, other): if isinstance(other, Dict): return Dict({key: val != other[key] for key, val in self.items()}) else: return Dict({key: val != other for key, val in self.items()}) def __lt__(self, other): if isinstance(other, Dict): return Dict({key: val < other[key] for key, val in self.items()}) else: return Dict({key: val < other for key, val in self.items()}) def __le__(self, other): if isinstance(other, Dict): return Dict({key: val <= other[key] for key, val in self.items()}) else: return Dict({key: val <= other for key, val in self.items()}) def __gt__(self, other): if isinstance(other, Dict): return Dict({key: val > other[key] for key, val in self.items()}) else: return Dict({key: val > other for key, val in self.items()}) def __ge__(self, other): if isinstance(other, Dict): return Dict({key: val >= other[key] for key, val in self.items()}) else: return Dict({key: val >= other for key, val in self.items()}) # --- overridden methods --- def copy(self): return Dict(self)
Ancestors
- builtins.dict
Methods
def copy(self)
-
D.copy() -> a shallow copy of D
Expand source code
def copy(self): return Dict(self)
class Diverged
-
Raised if the optimization was stopped prematurely and cannot continue. This may indicate that no solution exists.
The values of the last estimate
x
may or may not be finite.This exception inherits from
ConvergenceException
.See Also:
NotConverged
.Expand source code
class Diverged(ConvergenceException): """ Raised if the optimization was stopped prematurely and cannot continue. This may indicate that no solution exists. The values of the last estimate `x` may or may not be finite. This exception inherits from `ConvergenceException`. See Also: `NotConverged`. """ def __init__(self, result: SolveInfo): ConvergenceException.__init__(self, result)
Ancestors
- phi.math._optimize.ConvergenceException
- builtins.RuntimeError
- builtins.Exception
- builtins.BaseException
class IncompatibleShapes (message, *shapes: phi.math._shape.Shape)
-
Raised when the shape of a tensor does not match the other arguments.
Expand source code
class IncompatibleShapes(Exception): """ Raised when the shape of a tensor does not match the other arguments. """ def __init__(self, message, *shapes: Shape): Exception.__init__(self, message) self.shapes = shapes
Ancestors
- builtins.Exception
- builtins.BaseException
class LinearFunction
-
Just-in-time compiled linear function of
Tensor
arguments and return values.Use
jit_compile_linear()
to create a linear function representation.Expand source code
class LinearFunction(Generic[X, Y], Callable[[X], Y]): """ Just-in-time compiled linear function of `Tensor` arguments and return values. Use `jit_compile_linear()` to create a linear function representation. """ def __init__(self, f, auxiliary_args: Set[str], forget_traces: bool): self.f = f self.f_params = function_parameters(f) self.auxiliary_args = auxiliary_args self.forget_traces = forget_traces self.matrices_and_biases: Dict[SignatureKey, Tuple[SparseCoordinateTensor, Tensor]] = {} self.nl_jit = JitFunction(f, self.auxiliary_args, forget_traces) # for backends that do not support sparse matrices # def _trace(self, in_key: SignatureKey, prefer_numpy: bool) -> 'ShiftLinTracer': # assert in_key.shapes[0].is_uniform, f"math.jit_compile_linear() only supports uniform tensors for function input and output but input shape was {in_key.shapes[0]}" # with NUMPY if prefer_numpy else in_key.backend: # x = math.ones(in_key.shapes[0]) # tracer = ShiftLinTracer(x, {EMPTY_SHAPE: math.ones()}, x.shape, math.zeros(x.shape)) # _TRACING_JIT.append(self) # x_kwargs = assemble_tree(in_key.tree, [tracer]) # result = self.f(**x_kwargs, **in_key.auxiliary_kwargs) # _, result_tensors = disassemble_tree(result) # assert len(result_tensors) == 1, f"Linear function must return a single Tensor or tensor-like but got {result}" # result_tensor = result_tensors[0] # assert isinstance(result_tensor, ShiftLinTracer), f"Tracing linear function '{f_name(self.f)}' failed. Make sure only linear operations are used." # assert _TRACING_JIT.pop(-1) is self # return result_tensor def _get_or_trace(self, key: SignatureKey, args: tuple, f_kwargs: dict): if not key.tracing and key in self.matrices_and_biases: return self.matrices_and_biases[key] else: if self.forget_traces: self.matrices_and_biases.clear() matrix, bias = matrix_from_function(self.f, *args, **f_kwargs, auto_compress=True) if not key.tracing: self.matrices_and_biases[key] = matrix, bias if len(self.matrices_and_biases) >= 4: warnings.warn(f"""Φ-lin: The compiled linear function '{f_name(self.f)}' was traced {len(self.matrices_and_biases)} times. Performing many traces may be slow and cause memory leaks. Tensors in auxiliary arguments (all except the first parameter unless specified otherwise) are compared by reference, not by tensor values. Auxiliary arguments: {key.auxiliary_kwargs} Multiple linear traces can be avoided by jit-compiling the code that calls the linear function or setting forget_traces=True.""", RuntimeWarning, stacklevel=3) return matrix, bias def __call__(self, *args: X, **kwargs) -> Y: try: key, tensors, natives, x = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args) except LinearTraceInProgress: return self.f(*args, **kwargs) assert tensors, "Linear function requires at least one argument" if any(isinstance(t, ShiftLinTracer) for t in tensors): # TODO: if t is identity, use cached ShiftLinTracer, otherwise multiply two ShiftLinTracers return self.f(*args, **kwargs) if not key.backend.supports(Backend.sparse_coo_tensor): # This might be called inside a Jax linear solve # warnings.warn(f"Sparse matrices are not supported by {backend}. Falling back to regular jit compilation.", RuntimeWarning) if not math.all_available(*tensors): # avoid nested tracing, Typical case jax.scipy.sparse.cg(LinearFunction). Nested traces cannot be reused which results in lots of traces per cg. PHI_LOGGER.debug(f"Φ-lin: Running '{f_name(self.f)}' as-is with {key.backend} because it is being traced.") return self.f(*args, **kwargs) else: return self.nl_jit(*args, **kwargs) matrix, bias = self._get_or_trace(key, args, kwargs) return matrix @ tensors[0] + bias def sparse_matrix(self, *args, **kwargs): """ Create an explicit representation of this linear function as a sparse matrix. See Also: `sparse_matrix_and_bias()`. Args: *args: Function arguments. This determines the size of the matrix. **kwargs: Additional keyword arguments for the linear function. Returns: Sparse matrix representation with `values` property and `native()` method. """ key, *_ = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args) matrix, bias = self._get_or_trace(key, args, kwargs) assert math.close(bias, 0), "This is an affine function and cannot be represented by a single matrix. Use sparse_matrix_and_bias() instead." return matrix def sparse_matrix_and_bias(self, *args, **kwargs): """ Create an explicit representation of this affine function as a sparse matrix and a bias vector. Args: *args: Positional arguments to the linear function. This determines the size of the matrix. **kwargs: Additional keyword arguments for the linear function. Returns: matrix: Sparse matrix representation with `values` property and `native()` method. bias: `Tensor` """ key, *_ = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args) return self._get_or_trace(key, args, kwargs) def __repr__(self): return f"lin({f_name(self.f)})"
Ancestors
- collections.abc.Callable
- typing.Generic
Methods
def sparse_matrix(self, *args, **kwargs)
-
Create an explicit representation of this linear function as a sparse matrix.
See Also:
sparse_matrix_and_bias()
.Args
*args
- Function arguments. This determines the size of the matrix.
**kwargs
- Additional keyword arguments for the linear function.
Returns
Sparse matrix representation with
values
property andnative()
method.Expand source code
def sparse_matrix(self, *args, **kwargs): """ Create an explicit representation of this linear function as a sparse matrix. See Also: `sparse_matrix_and_bias()`. Args: *args: Function arguments. This determines the size of the matrix. **kwargs: Additional keyword arguments for the linear function. Returns: Sparse matrix representation with `values` property and `native()` method. """ key, *_ = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args) matrix, bias = self._get_or_trace(key, args, kwargs) assert math.close(bias, 0), "This is an affine function and cannot be represented by a single matrix. Use sparse_matrix_and_bias() instead." return matrix
def sparse_matrix_and_bias(self, *args, **kwargs)
-
Create an explicit representation of this affine function as a sparse matrix and a bias vector.
Args
*args
- Positional arguments to the linear function. This determines the size of the matrix.
**kwargs
- Additional keyword arguments for the linear function.
Returns
Expand source code
def sparse_matrix_and_bias(self, *args, **kwargs): """ Create an explicit representation of this affine function as a sparse matrix and a bias vector. Args: *args: Positional arguments to the linear function. This determines the size of the matrix. **kwargs: Additional keyword arguments for the linear function. Returns: matrix: Sparse matrix representation with `values` property and `native()` method. bias: `Tensor` """ key, *_ = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args) return self._get_or_trace(key, args, kwargs)
class NotConverged
-
Raised during optimization if the desired accuracy was not reached within the maximum number of iterations.
This exception inherits from
ConvergenceException
.See Also:
Diverged
.Expand source code
class NotConverged(ConvergenceException): """ Raised during optimization if the desired accuracy was not reached within the maximum number of iterations. This exception inherits from `ConvergenceException`. See Also: `Diverged`. """ def __init__(self, result: SolveInfo): ConvergenceException.__init__(self, result)
Ancestors
- phi.math._optimize.ConvergenceException
- builtins.RuntimeError
- builtins.Exception
- builtins.BaseException
class Shape
-
Shapes enumerate dimensions, each consisting of a name, size and type.
There are five types of dimensions:
batch()
,dual()
,spatial()
,channel()
, andinstance()
.To construct a
Shape
, usebatch()
,dual()
,spatial()
,channel()
orinstance()
, depending on the desired dimension type. To create a shape with multiple types, usemerge_shapes()
,concat_shapes()
or the syntaxshape1 & shape2
.The
__init__
constructor is for internal use only.Expand source code
class Shape: """ Shapes enumerate dimensions, each consisting of a name, size and type. There are five types of dimensions: `batch`, `dual`, `spatial`, `channel`, and `instance`. """ def __init__(self, sizes: tuple, names: tuple, types: tuple, item_names: tuple): """ To construct a `Shape`, use `batch`, `dual`, `spatial`, `channel` or `instance`, depending on the desired dimension type. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. The `__init__` constructor is for internal use only. """ if len(sizes) > 0 and any(s is not None and not isinstance(s, int) for s in sizes): from ._tensors import Tensor sizes = tuple([s if isinstance(s, Tensor) or s is None else int(s) for s in sizes]) # TODO replace this by an assert self.sizes: tuple = sizes """ Ordered dimension sizes as `tuple`. The size of a dimension can be an `int` or a `Tensor` for [non-uniform shapes](https://tum-pbs.github.io/PhiFlow/Math.html#non-uniform-tensors). See Also: `Shape.get_size()`, `Shape.size`, `Shape.shape`. """ self.names: Tuple[str] = names """ Ordered dimension names as `tuple[str]`. See Also: `Shape.name`. """ self.types: Tuple[str] = types # undocumented, may be private self.item_names: Tuple[Optional[Tuple[str, ...]]] = (None,) * len(sizes) if item_names is None else item_names # undocumented if DEBUG_CHECKS: assert len(sizes) == len(names) == len(types) == len(item_names), f"sizes={sizes}, names={names}, types={types}, item_names={item_names}" assert len(set(names)) == len(names), f"Duplicate dimension names: {names}" assert all(isinstance(n, str) for n in names), f"All names must be of type string but got {names}" assert isinstance(self.item_names, tuple) assert all([items is None or isinstance(items, tuple) for items in self.item_names]) assert all([items is None or all([isinstance(n, str) for n in items]) for items in self.item_names]) from ._tensors import Tensor for name, size in zip(names, sizes): if isinstance(size, Tensor): assert size.rank > 0 for size, item_names in zip(self.sizes, self.item_names): if item_names is not None: assert len(item_names) == size, f"Number of item names ({len(item_names)}) does not match size {size}" def _check_is_valid_tensor_shape(self): if DEBUG_CHECKS: from ._tensors import Tensor for name, size in zip(self.names, self.sizes): if size is not None and isinstance(size, Tensor): assert size.rank > 0 for dim in size.shape.names: assert dim in self.names, f"Dimension {name} varies along {dim} but {dim} is not part of the Shape {self}" def _to_dict(self, include_sizes=True): result = dict(names=self.names, types=self.types, item_names=self.item_names) if include_sizes: if not all([isinstance(s, int)] for s in self.sizes): raise NotImplementedError() result['sizes'] = self.sizes return result @staticmethod def _from_dict(dict_: dict): names = tuple(dict_['names']) sizes = tuple(dict_['sizes']) if 'sizes' in dict_ else (None,) * len(names) item_names = tuple([None if n is None else tuple(n) for n in dict_['item_names']]) return Shape(sizes, names, tuple(dict_['types']), item_names) @property def _named_sizes(self): return zip(self.names, self.sizes) @property def _dimensions(self): return zip(self.sizes, self.names, self.types, self.item_names) @property def untyped_dict(self): """ Returns: `dict` containing dimension names as keys. The values are either the item names as `tuple` if available, otherwise the size. """ return {name: self.get_item_names(i) or self.get_size(i) for i, name in enumerate(self.names)} def __len__(self): return len(self.sizes) def __contains__(self, item): if isinstance(item, (str, tuple, list)): dims = parse_dim_order(item) return all(dim in self.names for dim in dims) elif isinstance(item, Shape): return all([d in self.names for d in item.names]) else: raise ValueError(item) def isdisjoint(self, other: Union['Shape', tuple, list, str]): """ Shapes are disjoint if all dimension names of one shape do not occur in the other shape. """ other = parse_dim_order(other) return not any(dim in self.names for dim in other) def __iter__(self): return iter(self[i] for i in range(self.rank)) def index(self, dim: Union[str, 'Shape', None]) -> int: """ Finds the index of the dimension within this `Shape`. See Also: `Shape.indices()`. Args: dim: Dimension name or single-dimension `Shape`. Returns: Index as `int`. """ if dim is None: return None elif isinstance(dim, str): if dim not in self.names: raise ValueError(f"Shape {self} has no dimension '{dim}'") return self.names.index(dim) elif isinstance(dim, Shape): assert dim.rank == 1, f"index() requires a single dimension as input but got {dim}. Use indices() for multiple dimensions." return self.names.index(dim.name) else: raise ValueError(f"index() requires a single dimension as input but got {dim}") def indices(self, dims: Union[tuple, list, 'Shape']) -> Tuple[int]: """ Finds the indices of the given dimensions within this `Shape`. See Also: `Shape.index()`. Args: dims: Sequence of dimensions as `tuple`, `list` or `Shape`. Returns: Indices as `tuple[int]`. """ if isinstance(dims, (list, tuple, set)): return tuple([self.index(n) for n in dims if n in self.names]) elif isinstance(dims, Shape): return tuple([self.index(n) for n in dims.names if n in self.names]) else: raise ValueError(f"indices() requires a sequence of dimensions but got {dims}") def get_size(self, dim: Union[str, 'Shape', int], default=None): """ See Also: `Shape.get_sizes()`, `Shape.size` Args: dim: Dimension, either as name `str` or single-dimension `Shape` or index `int`. default: (Optional) If the dim does not exist, return this value instead of raising an error. Returns: Size associated with `dim` as `int` or `Tensor`. """ if isinstance(dim, int): assert default is None, "Cannot use a default value when passing an int for dim" return self.sizes[dim] if isinstance(dim, Shape): assert dim.rank == 1, f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes." dim = dim.name if isinstance(dim, str): if dim not in self.names: if default is None: raise KeyError(f"get_size() failed because '{dim}' is not part of Shape {self} and no default value was provided") else: return default return self.sizes[self.names.index(dim)] else: raise ValueError(f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes.") def get_sizes(self, dims: Union[tuple, list, 'Shape']) -> tuple: """ See Also: `Shape.get_size()` Args: dims: Dimensions as `tuple`, `list` or `Shape`. Returns: `tuple` """ assert isinstance(dims, (tuple, list, Shape)), f"get_sizes() requires a sequence of dimensions but got {dims}" return tuple([self.get_size(dim) for dim in dims]) def get_type(self, dim: Union[str, 'Shape']) -> str: # undocumented, use get_dim_type() instead. if isinstance(dim, str): return self.types[self.names.index(dim)] elif isinstance(dim, Shape): assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}" return self.types[self.names.index(dim.name)] else: raise ValueError(dim) def get_dim_type(self, dim: Union[str, 'Shape']) -> Callable: """ Args: dim: Dimension, either as name `str` or single-dimension `Shape`. Returns: Dimension type, one of `batch`, `spatial`, `instance`, `channel`. """ return DIM_FUNCTIONS[self.get_type(dim)] def get_types(self, dims: Union[tuple, list, 'Shape']) -> tuple: # undocumented, do not use if isinstance(dims, (tuple, list)): return tuple(self.get_type(n) for n in dims) elif isinstance(dims, Shape): return tuple(self.get_type(n) for n in dims.names) else: raise ValueError(dims) def get_item_names(self, dim: Union[str, 'Shape', int], fallback_spatial=False) -> Union[tuple, None]: """ Args: fallback_spatial: If `True` and no item names are defined for `dim` and `dim` is a channel dimension, the spatial dimension names are interpreted as item names along `dim` in the order they are listed in this `Shape`. dim: Dimension, either as `int` index, `str` name or single-dimension `Shape`. Returns: Item names as `tuple` or `None` if not defined. """ if isinstance(dim, int): result = self.item_names[dim] elif isinstance(dim, str): result = self.item_names[self.index(dim)] elif isinstance(dim, Shape): assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}" result = self.item_names[self.names.index(dim.name)] else: raise ValueError(dim) if result is not None: return result elif fallback_spatial and self.spatial_rank == self.get_size(dim) and self.get_type(dim) == CHANNEL_DIM: return self.spatial.names else: return None def flipped(self, dims: Union[List[str], Tuple[str]]): item_names = list(self.item_names) for dim in dims: if dim in self.names: dim_i_n = self.get_item_names(dim) if dim_i_n is not None: item_names[self.index(dim)] = tuple(reversed(dim_i_n)) return Shape(self.sizes, self.names, self.types, tuple(item_names)) def __getitem__(self, selection): if isinstance(selection, int): return Shape((self.sizes[selection],), (self.names[selection],), (self.types[selection],), (self.item_names[selection],)) elif isinstance(selection, slice): return Shape(self.sizes[selection], self.names[selection], self.types[selection], self.item_names[selection]) elif isinstance(selection, str): if ',' in selection: selection = [self.index(s.strip()) for s in selection.split(',')] else: selection = self.index(selection) return self[selection] elif isinstance(selection, (tuple, list)): selection = [self.index(s) if isinstance(s, str) else s for s in selection] return Shape(tuple([self.sizes[i] for i in selection]), tuple([self.names[i] for i in selection]), tuple([self.types[i] for i in selection]), tuple([self.item_names[i] for i in selection])) raise AssertionError("Can only access shape elements as shape[int] or shape[slice]") @property def reversed(self): return Shape(tuple(reversed(self.sizes)), tuple(reversed(self.names)), tuple(reversed(self.types)), tuple(reversed(self.item_names))) @property def batch(self) -> 'Shape': """ Filters this shape, returning only the batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == BATCH_DIM]] @property def non_batch(self) -> 'Shape': """ Filters this shape, returning only the non-batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != BATCH_DIM]] @property def spatial(self) -> 'Shape': """ Filters this shape, returning only the spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == SPATIAL_DIM]] @property def non_spatial(self) -> 'Shape': """ Filters this shape, returning only the non-spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != SPATIAL_DIM]] @property def instance(self) -> 'Shape': """ Filters this shape, returning only the instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == INSTANCE_DIM]] @property def non_instance(self) -> 'Shape': """ Filters this shape, returning only the non-instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != INSTANCE_DIM]] @property def channel(self) -> 'Shape': """ Filters this shape, returning only the channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == CHANNEL_DIM]] @property def non_channel(self) -> 'Shape': """ Filters this shape, returning only the non-channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != CHANNEL_DIM]] @property def dual(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == DUAL_DIM]] @property def non_dual(self) -> 'Shape': """ Filters this shape, returning only the non-dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != DUAL_DIM]] @property def primal(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t not in [DUAL_DIM, BATCH_DIM]]] @property def non_primal(self) -> 'Shape': """ Filters this shape, returning only batch and dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t in [DUAL_DIM, BATCH_DIM]]] @property def non_singleton(self) -> 'Shape': """ Filters this shape, returning only non-singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ return self[[i for i, s in enumerate(self.sizes) if not _size_equal(s, 1)]] @property def singleton(self) -> 'Shape': """ Filters this shape, returning only singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ return self[[i for i, s in enumerate(self.sizes) if _size_equal(s, 1)]] def unstack(self, dim='dims') -> Tuple['Shape']: """ Slices this `Shape` along a dimension. The dimension listing the sizes of the shape is referred to as `'dims'`. Non-uniform tensor shapes may be unstacked along other dimensions as well, see https://tum-pbs.github.io/PhiFlow/Math.html#non-uniform-tensors Args: dim: dimension to unstack Returns: slices of this shape """ if dim == 'dims': return tuple(Shape((self.sizes[i],), (self.names[i],), (self.types[i],), (self.item_names[i],)) for i in range(self.rank)) if dim not in self and self.is_uniform: return tuple([self]) from ._tensors import Tensor if dim in self: inner = self.without(dim) dim_size = self.get_size(dim) else: inner = self dim_size = self.shape.get_size(dim) sizes = [] for size in inner.sizes: if isinstance(size, Tensor) and dim in size.shape: sizes.append(size.unstack(dim)) dim_size = size.shape.get_size(dim) else: sizes.append(size) assert isinstance(dim_size, int) shapes = tuple(Shape(tuple([int(size[i]) if isinstance(size, tuple) else size for size in sizes]), inner.names, inner.types, inner.item_names) for i in range(dim_size)) return shapes @property def name(self) -> str: """ Only for Shapes containing exactly one single dimension. Returns the name of the dimension. See Also: `Shape.names`. """ assert self.rank == 1, f"Shape.name is only defined for shapes of rank 1. shape={self}" return self.names[0] @property def size(self) -> int: """ Only for Shapes containing exactly one single dimension. Returns the size of the dimension. See Also: `Shape.sizes`, `Shape.get_size()`. """ assert self.rank == 1, f"Shape.size is only defined for shapes of rank 1 but has dims {self}" return self.sizes[0] @property def type(self) -> str: """ Only for Shapes containing exactly one single dimension. Returns the type of the dimension. See Also: `Shape.get_type()`. """ assert self.rank == 1, "Shape.type is only defined for shapes of rank 1." return self.types[0] @property def dim_type(self): types = set(self.types) assert len(types) == 1, f"Shape contains multiple types: {self}" return DIM_FUNCTIONS[next(iter(types))] def __int__(self): assert self.rank == 1, "int(Shape) is only defined for shapes of rank 1." return self.sizes[0] def mask(self, names: Union[tuple, list, set, 'Shape']): """ Returns a binary sequence corresponding to the names of this Shape. A value of 1 means that a dimension of this Shape is contained in `names`. Args: names: instance of dimension names: tuple or list or set: Returns: binary sequence """ if isinstance(names, str): names = [names] elif isinstance(names, Shape): names = names.names mask = [1 if name in names else 0 for name in self.names] return tuple(mask) def __repr__(self): def size_repr(size, items): if items is not None: items_str = ",".join(items) return items_str if len(items_str) <= 20 else f"{size}:{items[0]}..{items[-1]}" return size strings = [f"{name}{TYPE_ABBR.get(dim_type, '?')}={size_repr(size, items)}" for size, name, dim_type, items in self._dimensions] return '(' + ', '.join(strings) + ')' def __eq__(self, other): if not isinstance(other, Shape): return False if self.names != other.names or self.types != other.types: return False for size1, size2 in zip(self.sizes, other.sizes): equal = size1 == size2 assert isinstance(equal, (bool, math.Tensor)) if isinstance(equal, math.Tensor): equal = equal.all if not equal: return False for names1, names2 in zip(self.item_names, other.item_names): if names1 != names2: return False return True def __ne__(self, other): return not self == other def __bool__(self): return self.rank > 0 def _reorder(self, names: Union[tuple, list, 'Shape']) -> 'Shape': assert len(names) == self.rank if isinstance(names, Shape): names = names.names order = [self.index(n) for n in names] return self[order] def _order_group(self, names: Union[tuple, list, 'Shape']) -> list: """ Reorders the dimensions of this `Shape` so that `names` are clustered together and occur in the specified order. """ if isinstance(names, Shape): names = names.names result = [] for dim in self.names: if dim not in result: if dim in names: result.extend(names) else: result.append(dim) return result def __and__(self, other): return merge_shapes(self, other) def _expand(self, dim: 'Shape', pos=None) -> 'Shape': """**Deprecated.** Use `phi.math.merge_shapes()` or `phi.math.concat_shapes()` instead. """ warnings.warn("Shape.expand() is deprecated. Use merge_shapes() or concat_shapes() instead.", DeprecationWarning) if not dim: return self assert dim.name not in self, f"Cannot expand shape {self} by {dim} because dimension already exists." assert isinstance(dim, Shape) and dim.rank == 1, f"Shape.expand() requires a single dimension as a Shape but got {dim}" if pos is None: same_type_dims = self[[i for i, t in enumerate(self.types) if t == dim.type]] if len(same_type_dims) > 0: pos = self.index(same_type_dims.names[0]) else: pos = {BATCH_DIM: 0, INSTANCE_DIM: self.batch_rank, SPATIAL_DIM: self.batch.rank + self.instance_rank, CHANNEL_DIM: self.rank + 1}[dim.type] elif pos < 0: pos += self.rank + 1 sizes = list(self.sizes) names = list(self.names) types = list(self.types) item_names = list(self.item_names) sizes.insert(pos, dim.size) names.insert(pos, dim.name) types.insert(pos, dim.type) item_names.insert(pos, dim.item_names[0]) return Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names)) def without(self, dims: 'DimFilter') -> 'Shape': """ Builds a new shape from this one that is missing all given dimensions. Dimensions in `dims` that are not part of this Shape are ignored. The complementary operation is `Shape.only()`. Args: dims: Single dimension (str) or instance of dimensions (tuple, list, Shape) dims: Dimensions to exclude as `str` or `tuple` or `list` or `Shape`. Dimensions that are not included in this shape are ignored. Returns: Shape without specified dimensions """ if callable(dims): dims = dims(self) if isinstance(dims, str): dims = parse_dim_order(dims) if isinstance(dims, (tuple, list, set)): return self[[i for i in range(self.rank) if self.names[i] not in dims]] elif isinstance(dims, Shape): return self[[i for i in range(self.rank) if self.names[i] not in dims.names]] elif dims is None: # subtract none return self else: raise ValueError(dims) def only(self, dims: 'DimFilter', reorder=False): """ Builds a new shape from this one that only contains the given dimensions. Dimensions in `dims` that are not part of this Shape are ignored. The complementary operation is :func:`Shape.without`. Args: dims: comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function. reorder: If `False`, keeps the dimension order as defined in this shape. If `True`, reorders the dimensions of this shape to match the order of `dims`. Returns: Shape containing only specified dimensions """ if dims is None: # keep none return EMPTY_SHAPE if callable(dims): dims = dims(self) if isinstance(dims, str): dims = parse_dim_order(dims) if isinstance(dims, Shape): dims = dims.names if not isinstance(dims, (tuple, list, set)): raise ValueError(dims) if reorder: return self[[self.names.index(d) for d in dims if d in self.names]] else: return self[[i for i in range(self.rank) if self.names[i] in dims]] @property def rank(self) -> int: """ Returns the number of dimensions. Equal to `len(shape)`. See `Shape.is_empty`, `Shape.batch_rank`, `Shape.spatial_rank`, `Shape.channel_rank`. """ return len(self.sizes) @property def batch_rank(self) -> int: """ Number of batch dimensions """ return sum([1 for ty in self.types if ty == BATCH_DIM]) @property def instance_rank(self) -> int: return sum([1 for ty in self.types if ty == INSTANCE_DIM]) @property def spatial_rank(self) -> int: """ Number of spatial dimensions """ return sum([1 for ty in self.types if ty == SPATIAL_DIM]) @property def dual_rank(self) -> int: """ Number of spatial dimensions """ return sum([1 for ty in self.types if ty == DUAL_DIM]) @property def channel_rank(self) -> int: """ Number of channel dimensions """ return sum([1 for ty in self.types if ty == CHANNEL_DIM]) @property def well_defined(self): """ Returns `True` if no dimension size is `None`. Shapes with undefined sizes may be used in `phi.math.tensor()`, `phi.math.wrap()`, `phi.math.stack()` or `phi.math.concat()`. To create an undefined size, call a constructor function (`batch()`, `spatial()`, `channel()`, `instance()`) with positional `str` arguments, e.g. `spatial('x')`. """ for size in self.sizes: if size is None: return False return True @property def shape(self) -> 'Shape': """ Higher-order `Shape`. The returned shape will always contain the channel dimension `dims` with a size equal to the `Shape.rank` of this shape. For uniform shapes, `Shape.shape` will only contain the dimension `dims` but the shapes of [non-uniform shapes](https://tum-pbs.github.io/PhiFlow/Math.html#non-uniform-tensors) may contain additional dimensions. See Also: `Shape.is_uniform`. Returns: `Shape`. """ from phi.math import Tensor shape = Shape((self.rank,), ('dims',), (CHANNEL_DIM,), (self.names,)) for size in self.sizes: if isinstance(size, Tensor): shape = shape & size.shape return shape @property def is_uniform(self) -> bool: """ A shape is uniform if it all sizes have a single integer value. See Also: `Shape.is_non_uniform`, `Shape.shape`. """ return all(isinstance(s, int) for s in self.sizes) @property def is_non_uniform(self) -> bool: """ A shape is non-uniform if the size of any dimension varies along another dimension. See Also: `Shape.is_uniform`, `Shape.shape`. """ return not self.is_uniform @property def non_uniform(self) -> 'Shape': """ Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension. """ from phi.math import Tensor indices = [i for i, size in enumerate(self.sizes) if isinstance(size, Tensor) and size.rank > 0] return self[indices] def with_size(self, size: Union[int, Tuple[str, ...]]): """ Only for single-dimension shapes. Returns a `Shape` representing this dimension but with a different size. See Also: `Shape.with_sizes()`. Args: size: Replacement size for this dimension. Returns: `Shape` """ assert self.rank == 1, "Shape.with_size() is only defined for shapes of rank 1." return self.with_sizes([size]) def with_sizes(self, sizes: Union[Sequence[int], Sequence[Tuple[str, ...]], 'Shape', int], keep_item_names=True): """ Returns a new `Shape` matching the dimension names and types of `self` but with different sizes. See Also: `Shape.with_size()`. Args: sizes: One of * `tuple` / `list` of same length as `self` containing replacement sizes or replacement item names. * `Shape` of any rank. Replaces sizes for dimensions shared by `sizes` and `self`. * `int`: new size for all dimensions keep_item_names: If `False`, forgets all item names. If `True`, keeps item names where the size does not change. Returns: `Shape` with same names and types as `self`. """ if isinstance(sizes, int): sizes = [sizes] * len(self.sizes) if isinstance(sizes, Shape): item_names = [sizes.get_item_names(dim) if dim in sizes else self.get_item_names(dim) for dim in self.names] sizes = [sizes.get_size(dim) if dim in sizes else s for dim, s in self._named_sizes] return Shape(tuple(sizes), self.names, self.types, tuple(item_names)) else: assert len(sizes) == len(self.sizes), f"Cannot create shape from {self} with sizes {sizes}" sizes_ = [] item_names = [] for i, obj in enumerate(sizes): new_size, new_item_names = Shape._size_and_item_names_from_obj(obj, self.sizes[i], self.item_names[i], keep_item_names) sizes_.append(new_size) item_names.append(new_item_names) return Shape(tuple(sizes_), self.names, self.types, tuple(item_names)) @staticmethod def _size_and_item_names_from_obj(obj, prev_size, prev_item_names, keep_item_names=True): if isinstance(obj, str): obj = [s.strip() for s in obj.split(',')] if isinstance(obj, (tuple, list)): return len(obj), tuple(obj) elif isinstance(obj, Number): return obj, prev_item_names if keep_item_names and (prev_size is None or _size_equal(obj, prev_size)) else None elif isinstance(obj, math.Tensor) or obj is None: return obj, None else: raise ValueError(f"sizes can only contain int, str or Tensor but got {type(obj)}") def without_sizes(self): """ Returns: `Shape` with all sizes undefined (`None`) """ return Shape((None,) * self.rank, self.names, self.types, (None,) * self.rank) def _replace_single_size(self, dim: str, size: int, keep_item_names: bool = False): new_sizes = list(self.sizes) new_sizes[self.index(dim)] = size return self.with_sizes(new_sizes, keep_item_names=keep_item_names) def with_dim_size(self, dim: Union[str, 'Shape'], size: Union[int, 'math.Tensor', str, tuple, list], keep_item_names=True): """ Returns a new `Shape` that has a different size for `dim`. Args: dim: Dimension for which to replace the size, `Shape` or `str`. size: New size, `int` or `Tensor` Returns: `Shape` with same names and types as `self`. """ if isinstance(dim, Shape): dim = dim.name assert isinstance(dim, str) new_size, new_item_names = Shape._size_and_item_names_from_obj(size, self.get_size(dim), self.get_item_names(dim), keep_item_names) return self.replace(dim, Shape((new_size,), (dim,), (self.get_type(dim),), (new_item_names,))) def _with_names(self, names: Union[str, tuple, list]): if isinstance(names, str): names = parse_dim_names(names, self.rank) names = [n if n is not None else o for n, o in zip(names, self.names)] return Shape(self.sizes, tuple(names), self.types, self.item_names) def _replace_names_and_types(self, dims: Union['Shape', str, tuple, list], new: Union['Shape', str, tuple, list]) -> 'Shape': """ Returns a copy of `self` with `dims` replaced by `new`. Dimensions that are not present in `self` are ignored. The dimension order is preserved. Args: dims: Dimensions to replace. new: New dimensions, must have same length as `dims`. If a `Shape` is given, replaces the dimension types and item names as well. Returns: `Shape` with same rank and dimension order as `self`. """ dims = parse_dim_order(dims) sizes = [math.rename_dims(s, dims, new) if isinstance(s, math.Tensor) else s for s in self.sizes] new = parse_dim_order(new) if isinstance(new, str) else new names = list(self.names) types = list(self.types) item_names = list(self.item_names) for old_name, new_dim in zip(dims, new): if old_name in self: if isinstance(new_dim, Shape): names[self.index(old_name)] = new_dim.name types[self.index(old_name)] = new_dim.type item_names[self.index(old_name)] = new_dim.item_names[0] else: names[self.index(old_name)] = new_dim return Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names)) def replace(self, dims: Union['Shape', str, tuple, list], new: 'Shape') -> 'Shape': """ Returns a copy of `self` with `dims` replaced by `new`. Dimensions that are not present in `self` are ignored. The dimension order is preserved. Args: dims: Dimensions to replace. new: New dimensions, must have same length as `dims`. If a `Shape` is given, replaces the dimension types and item names as well. Returns: `Shape` with same rank and dimension order as `self`. """ dims = parse_dim_order(dims) assert isinstance(new, Shape), f"new must be a Shape but got {new}" names = list(self.names) sizes = list(self.sizes) types = list(self.types) item_names = list(self.item_names) if len(new) > len(dims): # Put all in one spot assert len(dims) == 1, "Cannot replace 2+ dims by more replacements" index = self.index(dims[0]) return concat_shapes(self[:index], new, self[index+1:]) for old_name, new_dim in zip(dims, new): if old_name in self: names[self.index(old_name)] = new_dim.name types[self.index(old_name)] = new_dim.type item_names[self.index(old_name)] = new_dim.item_names[0] sizes[self.index(old_name)] = new_dim.size replaced = Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names)) if len(new) == len(dims): return replaced to_remove = dims[-(len(dims) - len(new)):] return replaced.without(to_remove) def _with_types(self, types: Union['Shape', str]): """ Only for internal use. Note: This method does not rename dimensions to comply with type requirements (e.g. ~ for dual dims). """ if isinstance(types, Shape): return Shape(self.sizes, self.names, tuple([types.get_type(name) if name in types else self_type for name, self_type in zip(self.names, self.types)]), self.item_names) elif isinstance(types, str): return Shape(self.sizes, self.names, (types,) * self.rank, self.item_names) else: raise ValueError(types) def _with_item_names(self, item_names: tuple): return Shape(self.sizes, self.names, self.types, item_names) def _with_item_name(self, dim: str, item_name: tuple): if dim not in self: return self item_names = list(self.item_names) item_names[self.index(dim)] = item_name return Shape(self.sizes, self.names, self.types, tuple(item_names)) def _perm(self, names: Tuple[str]) -> List[int]: assert len(set(names)) == len(names), f"No duplicates allowed but got {names}" assert len(names) >= len(self.names), f"Cannot find permutation for {self} given {names} because names {set(self.names) - set(names)} are missing" assert len(names) <= len(self.names), f"Cannot find permutation for {self} given {names} because too many names were passed: {names}" perm = [self.names.index(name) for name in names] return perm @property def volume(self) -> Union[int, None]: """ Returns the total number of values contained in a tensor of this shape. This is the product of all dimension sizes. Returns: volume as `int` or `Tensor` or `None` if the shape is not `Shape.well_defined` """ from phi.math import Tensor for dim, size in self._named_sizes: if isinstance(size, Tensor) and size.rank > 0: non_uniform_dim = size.shape.names[0] shapes = self.unstack(non_uniform_dim) return sum(s.volume for s in shapes) result = 1 for size in self.sizes: if size is None: return None result *= size return int(result) @property def is_empty(self) -> bool: """ True if this shape has no dimensions. Equivalent to `Shape.rank` `== 0`. """ return len(self.sizes) == 0 def after_pad(self, widths: dict) -> 'Shape': sizes = list(self.sizes) item_names = list(self.item_names) for dim, (lo, up) in widths.items(): if dim in self.names: sizes[self.index(dim)] += lo + up item_names[self.index(dim)] = None return Shape(tuple(sizes), self.names, self.types, tuple(item_names)) def prepare_gather(self, dim: str, selection): if isinstance(selection, Shape): selection = selection.name if selection.rank == 1 else selection.names if isinstance(selection, str) and ',' in selection: selection = parse_dim_order(selection) if isinstance(selection, str): # single item name item_names = self.get_item_names(dim, fallback_spatial=True) assert item_names is not None, f"No item names defined for dim '{dim}' in tensor {self.shape} and dimension size does not match spatial rank." assert selection in item_names, f"Accessing tensor.{dim}['{selection}'] failed. Item names are {item_names}." selection = item_names.index(selection) if isinstance(selection, (tuple, list)): selection = list(selection) if any([isinstance(s, str) for s in selection]): item_names = self.get_item_names(dim, fallback_spatial=True) for i, s in enumerate(selection): if isinstance(s, str): assert item_names is not None, f"Accessing tensor.{dim}['{s}'] failed because no item names are present on tensor {self.shape}" assert s in item_names, f"Accessing tensor.{dim}['{s}'] failed. Item names are {item_names}." selection[i] = item_names.index(s) if not selection: # empty selection = slice(0, 0) return selection def after_gather(self, selection: dict) -> 'Shape': result = self for sel_dim, selection in selection.items(): if sel_dim not in self.names: continue selection = self.prepare_gather(sel_dim, selection) if isinstance(selection, int): if result.is_uniform: result = result.without(sel_dim) else: from phi.math import Tensor gathered_sizes = [(s[{sel_dim: selection}] if isinstance(s, Tensor) else s) for s in result.sizes] gathered_sizes = [(int(s) if isinstance(s, Tensor) and s.rank == 0 else s) for s in gathered_sizes] result = result.with_sizes(gathered_sizes, keep_item_names=True).without(sel_dim) elif isinstance(selection, slice): assert isinstance(selection.step, int) or selection.step is None, f"slice step must be an int or None but got {type(selection.step).__name__}" assert isinstance(selection.start, int) or selection.start is None, f"slice start must be an int or None but got {type(selection.start).__name__}" assert isinstance(selection.stop, int) or selection.stop is None, f"slice stop must be an int or None but got {type(selection.stop).__name__}" step = selection.step or 1 start = selection.start if isinstance(selection.start, int) else (0 if step > 0 else self.get_size(sel_dim)-1) stop = selection.stop if isinstance(selection.stop, int) else (self.get_size(sel_dim) if step > 0 else -1) if stop < 0 and step > 0: stop += self.get_size(sel_dim) assert stop >= 0 if start < 0 and step > 0: start += self.get_size(sel_dim) assert start >= 0 stop = min(stop, self.get_size(sel_dim)) new_size = math.to_int64(math.ceil(math.wrap((stop - start) / step))) if new_size.rank == 0: new_size = int(new_size) # NumPy array not allowed because not hashable result = result._replace_single_size(sel_dim, new_size, keep_item_names=True) if step < 0: result = result.flipped([sel_dim]) if self.get_item_names(sel_dim) is not None: result = result._with_item_name(sel_dim, tuple(self.get_item_names(sel_dim)[selection])) elif isinstance(selection, (tuple, list)): result = result._replace_single_size(sel_dim, len(selection)) if self.get_item_names(sel_dim) is not None: result = result._with_item_name(sel_dim, tuple([self.get_item_names(sel_dim)[i] for i in selection])) else: raise NotImplementedError(f"{type(selection)} not supported. Only (int, slice) allowed.") return result def meshgrid(self, names=False): """ Builds a sequence containing all multi-indices within a tensor of this shape. All indices are returned as `dict` mapping dimension names to `int` indices. The corresponding values can be retrieved from Tensors and other Sliceables using `tensor[index]`. This function currently only supports uniform tensors. Args: names: If `True`, replace indices by their item names if available. Returns: `dict` iterator. """ assert self.is_uniform, f"Shape.meshgrid() is currently not supported for non-uniform tensors, {self}" indices = [0] * self.rank while True: if names: yield {dim: (names[index] if names is not None else index) for dim, index, names in zip(self.names, indices, self.item_names)} else: yield {dim: index for dim, index in zip(self.names, indices)} for i in range(self.rank-1, -1, -1): indices[i] = (indices[i] + 1) % self.sizes[i] if indices[i] != 0: break else: return def first_index(self, names=False): return next(iter(self.meshgrid(names=names))) def are_adjacent(self, dims: Union[str, tuple, list, set, 'Shape']): indices = self.indices(dims) return (max(indices) - min(indices)) == len(dims) - 1 def __add__(self, other): return self._op2(other, lambda s, o: s + o, 0) def __radd__(self, other): return self._op2(other, lambda s, o: o + s, 0) def __sub__(self, other): return self._op2(other, lambda s, o: s - o, 0) def __rsub__(self, other): return self._op2(other, lambda s, o: o - s, 0) def __mul__(self, other): return self._op2(other, lambda s, o: s * o, 1) def __rmul__(self, other): return self._op2(other, lambda s, o: o * s, 1) def _op2(self, other, fun, default: int): if isinstance(other, int): return Shape(tuple([fun(s, other) for s in self.sizes]), self.names, self.types, (None,) * self.rank) elif isinstance(other, Shape): merged = self.without_sizes() & other.without_sizes() sizes = () for dim in merged.names: self_val = self.get_size(dim) if dim in self else default other_val = other.get_size(dim) if dim in other else default sizes += (fun(self_val, other_val),) return merged.with_sizes(sizes) else: return NotImplemented def __hash__(self): return hash(self.names)
Instance variables
var batch : phi.math._shape.Shape
-
Filters this shape, returning only the batch dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def batch(self) -> 'Shape': """ Filters this shape, returning only the batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == BATCH_DIM]]
var batch_rank : int
-
Number of batch dimensions
Expand source code
@property def batch_rank(self) -> int: """ Number of batch dimensions """ return sum([1 for ty in self.types if ty == BATCH_DIM])
var channel : phi.math._shape.Shape
-
Filters this shape, returning only the channel dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def channel(self) -> 'Shape': """ Filters this shape, returning only the channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == CHANNEL_DIM]]
var channel_rank : int
-
Number of channel dimensions
Expand source code
@property def channel_rank(self) -> int: """ Number of channel dimensions """ return sum([1 for ty in self.types if ty == CHANNEL_DIM])
var dim_type
-
Expand source code
@property def dim_type(self): types = set(self.types) assert len(types) == 1, f"Shape contains multiple types: {self}" return DIM_FUNCTIONS[next(iter(types))]
var dual : phi.math._shape.Shape
-
Filters this shape, returning only the dual dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def dual(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == DUAL_DIM]]
var dual_rank : int
-
Number of spatial dimensions
Expand source code
@property def dual_rank(self) -> int: """ Number of spatial dimensions """ return sum([1 for ty in self.types if ty == DUAL_DIM])
var instance : phi.math._shape.Shape
-
Filters this shape, returning only the instance dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def instance(self) -> 'Shape': """ Filters this shape, returning only the instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == INSTANCE_DIM]]
var instance_rank : int
-
Expand source code
@property def instance_rank(self) -> int: return sum([1 for ty in self.types if ty == INSTANCE_DIM])
var is_empty : bool
-
True if this shape has no dimensions. Equivalent to
Shape.rank
== 0
.Expand source code
@property def is_empty(self) -> bool: """ True if this shape has no dimensions. Equivalent to `Shape.rank` `== 0`. """ return len(self.sizes) == 0
var is_non_uniform : bool
-
A shape is non-uniform if the size of any dimension varies along another dimension.
See Also:
Shape.is_uniform
,Shape.shape
.Expand source code
@property def is_non_uniform(self) -> bool: """ A shape is non-uniform if the size of any dimension varies along another dimension. See Also: `Shape.is_uniform`, `Shape.shape`. """ return not self.is_uniform
var is_uniform : bool
-
A shape is uniform if it all sizes have a single integer value.
See Also:
Shape.is_non_uniform
,Shape.shape
.Expand source code
@property def is_uniform(self) -> bool: """ A shape is uniform if it all sizes have a single integer value. See Also: `Shape.is_non_uniform`, `Shape.shape`. """ return all(isinstance(s, int) for s in self.sizes)
var name : str
-
Only for Shapes containing exactly one single dimension. Returns the name of the dimension.
See Also:
Shape.names
.Expand source code
@property def name(self) -> str: """ Only for Shapes containing exactly one single dimension. Returns the name of the dimension. See Also: `Shape.names`. """ assert self.rank == 1, f"Shape.name is only defined for shapes of rank 1. shape={self}" return self.names[0]
var names
-
Ordered dimension names as
tuple[str]
.See Also:
Shape.name
. var non_batch : phi.math._shape.Shape
-
Filters this shape, returning only the non-batch dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_batch(self) -> 'Shape': """ Filters this shape, returning only the non-batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != BATCH_DIM]]
var non_channel : phi.math._shape.Shape
-
Filters this shape, returning only the non-channel dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_channel(self) -> 'Shape': """ Filters this shape, returning only the non-channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != CHANNEL_DIM]]
var non_dual : phi.math._shape.Shape
-
Filters this shape, returning only the non-dual dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_dual(self) -> 'Shape': """ Filters this shape, returning only the non-dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != DUAL_DIM]]
var non_instance : phi.math._shape.Shape
-
Filters this shape, returning only the non-instance dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_instance(self) -> 'Shape': """ Filters this shape, returning only the non-instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != INSTANCE_DIM]]
var non_primal : phi.math._shape.Shape
-
Filters this shape, returning only batch and dual dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_primal(self) -> 'Shape': """ Filters this shape, returning only batch and dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t in [DUAL_DIM, BATCH_DIM]]]
var non_singleton : phi.math._shape.Shape
-
Filters this shape, returning only non-singleton dimensions as a new
Shape
object. Dimensions are singleton if their size is exactly1
.Returns
New
Shape
objectExpand source code
@property def non_singleton(self) -> 'Shape': """ Filters this shape, returning only non-singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ return self[[i for i, s in enumerate(self.sizes) if not _size_equal(s, 1)]]
var non_spatial : phi.math._shape.Shape
-
Filters this shape, returning only the non-spatial dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def non_spatial(self) -> 'Shape': """ Filters this shape, returning only the non-spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t != SPATIAL_DIM]]
var non_uniform : phi.math._shape.Shape
-
Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension.
Expand source code
@property def non_uniform(self) -> 'Shape': """ Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension. """ from phi.math import Tensor indices = [i for i, size in enumerate(self.sizes) if isinstance(size, Tensor) and size.rank > 0] return self[indices]
var primal : phi.math._shape.Shape
-
Filters this shape, returning only the dual dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def primal(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t not in [DUAL_DIM, BATCH_DIM]]]
var rank : int
-
Returns the number of dimensions. Equal to
len(shape())
.See
Shape.is_empty
,Shape.batch_rank
,Shape.spatial_rank
,Shape.channel_rank
.Expand source code
@property def rank(self) -> int: """ Returns the number of dimensions. Equal to `len(shape)`. See `Shape.is_empty`, `Shape.batch_rank`, `Shape.spatial_rank`, `Shape.channel_rank`. """ return len(self.sizes)
var reversed
-
Expand source code
@property def reversed(self): return Shape(tuple(reversed(self.sizes)), tuple(reversed(self.names)), tuple(reversed(self.types)), tuple(reversed(self.item_names)))
var shape : phi.math._shape.Shape
-
Higher-order
Shape
. The returned shape will always contain the channel dimensiondims
with a size equal to theShape.rank
of this shape.For uniform shapes,
Shape.shape
will only contain the dimensiondims
but the shapes of non-uniform shapes may contain additional dimensions.See Also:
Shape.is_uniform
.Returns
Expand source code
@property def shape(self) -> 'Shape': """ Higher-order `Shape`. The returned shape will always contain the channel dimension `dims` with a size equal to the `Shape.rank` of this shape. For uniform shapes, `Shape.shape` will only contain the dimension `dims` but the shapes of [non-uniform shapes](https://tum-pbs.github.io/PhiFlow/Math.html#non-uniform-tensors) may contain additional dimensions. See Also: `Shape.is_uniform`. Returns: `Shape`. """ from phi.math import Tensor shape = Shape((self.rank,), ('dims',), (CHANNEL_DIM,), (self.names,)) for size in self.sizes: if isinstance(size, Tensor): shape = shape & size.shape return shape
var singleton : phi.math._shape.Shape
-
Filters this shape, returning only singleton dimensions as a new
Shape
object. Dimensions are singleton if their size is exactly1
.Returns
New
Shape
objectExpand source code
@property def singleton(self) -> 'Shape': """ Filters this shape, returning only singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ return self[[i for i, s in enumerate(self.sizes) if _size_equal(s, 1)]]
var size : int
-
Only for Shapes containing exactly one single dimension. Returns the size of the dimension.
See Also:
Shape.sizes
,Shape.get_size()
.Expand source code
@property def size(self) -> int: """ Only for Shapes containing exactly one single dimension. Returns the size of the dimension. See Also: `Shape.sizes`, `Shape.get_size()`. """ assert self.rank == 1, f"Shape.size is only defined for shapes of rank 1 but has dims {self}" return self.sizes[0]
var sizes
-
Ordered dimension sizes as
tuple
. The size of a dimension can be anint
or aTensor
for non-uniform shapes.See Also:
Shape.get_size()
,Shape.size
,Shape.shape
. var spatial : phi.math._shape.Shape
-
Filters this shape, returning only the spatial dimensions as a new
Shape
object.See also:
Shape.batch
,Shape.spatial
,Shape.instance
,Shape.channel
,Shape.dual
,Shape.non_batch
,Shape.non_spatial
,Shape.non_instance
,Shape.non_channel
,Shape.non_dual
.Returns
New
Shape
objectExpand source code
@property def spatial(self) -> 'Shape': """ Filters this shape, returning only the spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ return self[[i for i, t in enumerate(self.types) if t == SPATIAL_DIM]]
var spatial_rank : int
-
Number of spatial dimensions
Expand source code
@property def spatial_rank(self) -> int: """ Number of spatial dimensions """ return sum([1 for ty in self.types if ty == SPATIAL_DIM])
var type : str
-
Only for Shapes containing exactly one single dimension. Returns the type of the dimension.
See Also:
Shape.get_type()
.Expand source code
@property def type(self) -> str: """ Only for Shapes containing exactly one single dimension. Returns the type of the dimension. See Also: `Shape.get_type()`. """ assert self.rank == 1, "Shape.type is only defined for shapes of rank 1." return self.types[0]
var untyped_dict
-
Returns
dict
containing dimension names as keys. The values are either the item names astuple
if available, otherwise the size.Expand source code
@property def untyped_dict(self): """ Returns: `dict` containing dimension names as keys. The values are either the item names as `tuple` if available, otherwise the size. """ return {name: self.get_item_names(i) or self.get_size(i) for i, name in enumerate(self.names)}
var volume : Optional[int]
-
Returns the total number of values contained in a tensor of this shape. This is the product of all dimension sizes.
Returns
volume as
int
orTensor
orNone
if the shape is notShape.well_defined
Expand source code
@property def volume(self) -> Union[int, None]: """ Returns the total number of values contained in a tensor of this shape. This is the product of all dimension sizes. Returns: volume as `int` or `Tensor` or `None` if the shape is not `Shape.well_defined` """ from phi.math import Tensor for dim, size in self._named_sizes: if isinstance(size, Tensor) and size.rank > 0: non_uniform_dim = size.shape.names[0] shapes = self.unstack(non_uniform_dim) return sum(s.volume for s in shapes) result = 1 for size in self.sizes: if size is None: return None result *= size return int(result)
var well_defined
-
Returns
True
if no dimension size isNone
.Shapes with undefined sizes may be used in
tensor()
,wrap()
,stack()
orconcat()
.To create an undefined size, call a constructor function (
batch()
,spatial()
,channel()
,instance()
) with positionalstr
arguments, e.g.spatial('x')
.Expand source code
@property def well_defined(self): """ Returns `True` if no dimension size is `None`. Shapes with undefined sizes may be used in `phi.math.tensor()`, `phi.math.wrap()`, `phi.math.stack()` or `phi.math.concat()`. To create an undefined size, call a constructor function (`batch()`, `spatial()`, `channel()`, `instance()`) with positional `str` arguments, e.g. `spatial('x')`. """ for size in self.sizes: if size is None: return False return True
Methods
def after_gather(self, selection: dict) ‑> phi.math._shape.Shape
-
Expand source code
def after_gather(self, selection: dict) -> 'Shape': result = self for sel_dim, selection in selection.items(): if sel_dim not in self.names: continue selection = self.prepare_gather(sel_dim, selection) if isinstance(selection, int): if result.is_uniform: result = result.without(sel_dim) else: from phi.math import Tensor gathered_sizes = [(s[{sel_dim: selection}] if isinstance(s, Tensor) else s) for s in result.sizes] gathered_sizes = [(int(s) if isinstance(s, Tensor) and s.rank == 0 else s) for s in gathered_sizes] result = result.with_sizes(gathered_sizes, keep_item_names=True).without(sel_dim) elif isinstance(selection, slice): assert isinstance(selection.step, int) or selection.step is None, f"slice step must be an int or None but got {type(selection.step).__name__}" assert isinstance(selection.start, int) or selection.start is None, f"slice start must be an int or None but got {type(selection.start).__name__}" assert isinstance(selection.stop, int) or selection.stop is None, f"slice stop must be an int or None but got {type(selection.stop).__name__}" step = selection.step or 1 start = selection.start if isinstance(selection.start, int) else (0 if step > 0 else self.get_size(sel_dim)-1) stop = selection.stop if isinstance(selection.stop, int) else (self.get_size(sel_dim) if step > 0 else -1) if stop < 0 and step > 0: stop += self.get_size(sel_dim) assert stop >= 0 if start < 0 and step > 0: start += self.get_size(sel_dim) assert start >= 0 stop = min(stop, self.get_size(sel_dim)) new_size = math.to_int64(math.ceil(math.wrap((stop - start) / step))) if new_size.rank == 0: new_size = int(new_size) # NumPy array not allowed because not hashable result = result._replace_single_size(sel_dim, new_size, keep_item_names=True) if step < 0: result = result.flipped([sel_dim]) if self.get_item_names(sel_dim) is not None: result = result._with_item_name(sel_dim, tuple(self.get_item_names(sel_dim)[selection])) elif isinstance(selection, (tuple, list)): result = result._replace_single_size(sel_dim, len(selection)) if self.get_item_names(sel_dim) is not None: result = result._with_item_name(sel_dim, tuple([self.get_item_names(sel_dim)[i] for i in selection])) else: raise NotImplementedError(f"{type(selection)} not supported. Only (int, slice) allowed.") return result
def after_pad(self, widths: dict) ‑> phi.math._shape.Shape
-
Expand source code
def after_pad(self, widths: dict) -> 'Shape': sizes = list(self.sizes) item_names = list(self.item_names) for dim, (lo, up) in widths.items(): if dim in self.names: sizes[self.index(dim)] += lo + up item_names[self.index(dim)] = None return Shape(tuple(sizes), self.names, self.types, tuple(item_names))
def are_adjacent(self, dims: Union[str, tuple, list, set, ForwardRef('Shape')])
-
Expand source code
def are_adjacent(self, dims: Union[str, tuple, list, set, 'Shape']): indices = self.indices(dims) return (max(indices) - min(indices)) == len(dims) - 1
def first_index(self, names=False)
-
Expand source code
def first_index(self, names=False): return next(iter(self.meshgrid(names=names)))
def flipped(self, dims: Union[List[str], Tuple[str]])
-
Expand source code
def flipped(self, dims: Union[List[str], Tuple[str]]): item_names = list(self.item_names) for dim in dims: if dim in self.names: dim_i_n = self.get_item_names(dim) if dim_i_n is not None: item_names[self.index(dim)] = tuple(reversed(dim_i_n)) return Shape(self.sizes, self.names, self.types, tuple(item_names))
def get_dim_type(self, dim: Union[str, ForwardRef('Shape')]) ‑> Callable
-
Args
dim
- Dimension, either as name
str
or single-dimensionShape
.
Returns
Dimension type, one of
batch()
,spatial()
,instance()
,channel()
.Expand source code
def get_dim_type(self, dim: Union[str, 'Shape']) -> Callable: """ Args: dim: Dimension, either as name `str` or single-dimension `Shape`. Returns: Dimension type, one of `batch`, `spatial`, `instance`, `channel`. """ return DIM_FUNCTIONS[self.get_type(dim)]
def get_item_names(self, dim: Union[str, ForwardRef('Shape'), int], fallback_spatial=False) ‑> Optional[tuple]
-
Args
fallback_spatial
- If
True
and no item names are defined fordim
anddim
is a channel dimension, the spatial dimension names are interpreted as item names alongdim
in the order they are listed in thisShape
. dim
- Dimension, either as
int
index,str
name or single-dimensionShape
.
Returns
Item names as
tuple
orNone
if not defined.Expand source code
def get_item_names(self, dim: Union[str, 'Shape', int], fallback_spatial=False) -> Union[tuple, None]: """ Args: fallback_spatial: If `True` and no item names are defined for `dim` and `dim` is a channel dimension, the spatial dimension names are interpreted as item names along `dim` in the order they are listed in this `Shape`. dim: Dimension, either as `int` index, `str` name or single-dimension `Shape`. Returns: Item names as `tuple` or `None` if not defined. """ if isinstance(dim, int): result = self.item_names[dim] elif isinstance(dim, str): result = self.item_names[self.index(dim)] elif isinstance(dim, Shape): assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}" result = self.item_names[self.names.index(dim.name)] else: raise ValueError(dim) if result is not None: return result elif fallback_spatial and self.spatial_rank == self.get_size(dim) and self.get_type(dim) == CHANNEL_DIM: return self.spatial.names else: return None
def get_size(self, dim: Union[str, ForwardRef('Shape'), int], default=None)
-
See Also:
Shape.get_sizes()
,Shape.size
Args
dim
- Dimension, either as name
str
or single-dimensionShape
or indexint
. default
- (Optional) If the dim does not exist, return this value instead of raising an error.
Returns
Size associated with
dim
asint
orTensor
.Expand source code
def get_size(self, dim: Union[str, 'Shape', int], default=None): """ See Also: `Shape.get_sizes()`, `Shape.size` Args: dim: Dimension, either as name `str` or single-dimension `Shape` or index `int`. default: (Optional) If the dim does not exist, return this value instead of raising an error. Returns: Size associated with `dim` as `int` or `Tensor`. """ if isinstance(dim, int): assert default is None, "Cannot use a default value when passing an int for dim" return self.sizes[dim] if isinstance(dim, Shape): assert dim.rank == 1, f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes." dim = dim.name if isinstance(dim, str): if dim not in self.names: if default is None: raise KeyError(f"get_size() failed because '{dim}' is not part of Shape {self} and no default value was provided") else: return default return self.sizes[self.names.index(dim)] else: raise ValueError(f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes.")
def get_sizes(self, dims: Union[tuple, list, ForwardRef('Shape')]) ‑> tuple
-
Expand source code
def get_sizes(self, dims: Union[tuple, list, 'Shape']) -> tuple: """ See Also: `Shape.get_size()` Args: dims: Dimensions as `tuple`, `list` or `Shape`. Returns: `tuple` """ assert isinstance(dims, (tuple, list, Shape)), f"get_sizes() requires a sequence of dimensions but got {dims}" return tuple([self.get_size(dim) for dim in dims])
def get_type(self, dim: Union[str, ForwardRef('Shape')]) ‑> str
-
Expand source code
def get_type(self, dim: Union[str, 'Shape']) -> str: # undocumented, use get_dim_type() instead. if isinstance(dim, str): return self.types[self.names.index(dim)] elif isinstance(dim, Shape): assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}" return self.types[self.names.index(dim.name)] else: raise ValueError(dim)
def get_types(self, dims: Union[tuple, list, ForwardRef('Shape')]) ‑> tuple
-
Expand source code
def get_types(self, dims: Union[tuple, list, 'Shape']) -> tuple: # undocumented, do not use if isinstance(dims, (tuple, list)): return tuple(self.get_type(n) for n in dims) elif isinstance(dims, Shape): return tuple(self.get_type(n) for n in dims.names) else: raise ValueError(dims)
def index(self, dim: Union[str, ForwardRef('Shape'), None]) ‑> int
-
Finds the index of the dimension within this
Shape
.See Also:
Shape.indices()
.Args
dim
- Dimension name or single-dimension
Shape
.
Returns
Index as
int
.Expand source code
def index(self, dim: Union[str, 'Shape', None]) -> int: """ Finds the index of the dimension within this `Shape`. See Also: `Shape.indices()`. Args: dim: Dimension name or single-dimension `Shape`. Returns: Index as `int`. """ if dim is None: return None elif isinstance(dim, str): if dim not in self.names: raise ValueError(f"Shape {self} has no dimension '{dim}'") return self.names.index(dim) elif isinstance(dim, Shape): assert dim.rank == 1, f"index() requires a single dimension as input but got {dim}. Use indices() for multiple dimensions." return self.names.index(dim.name) else: raise ValueError(f"index() requires a single dimension as input but got {dim}")
def indices(self, dims: Union[tuple, list, ForwardRef('Shape')]) ‑> Tuple[int]
-
Finds the indices of the given dimensions within this
Shape
.See Also:
Shape.index()
.Args
dims
- Sequence of dimensions as
tuple
,list
orShape
.
Returns
Indices as
tuple[int]
.Expand source code
def indices(self, dims: Union[tuple, list, 'Shape']) -> Tuple[int]: """ Finds the indices of the given dimensions within this `Shape`. See Also: `Shape.index()`. Args: dims: Sequence of dimensions as `tuple`, `list` or `Shape`. Returns: Indices as `tuple[int]`. """ if isinstance(dims, (list, tuple, set)): return tuple([self.index(n) for n in dims if n in self.names]) elif isinstance(dims, Shape): return tuple([self.index(n) for n in dims.names if n in self.names]) else: raise ValueError(f"indices() requires a sequence of dimensions but got {dims}")
def isdisjoint(self, other: Union[ForwardRef('Shape'), tuple, list, str])
-
Shapes are disjoint if all dimension names of one shape do not occur in the other shape.
Expand source code
def isdisjoint(self, other: Union['Shape', tuple, list, str]): """ Shapes are disjoint if all dimension names of one shape do not occur in the other shape. """ other = parse_dim_order(other) return not any(dim in self.names for dim in other)
def mask(self, names: Union[tuple, list, set, ForwardRef('Shape')])
-
Returns a binary sequence corresponding to the names of this Shape. A value of 1 means that a dimension of this Shape is contained in
names
.Args
names
- instance of dimension
names
- tuple or list or set:
Returns
binary sequence
Expand source code
def mask(self, names: Union[tuple, list, set, 'Shape']): """ Returns a binary sequence corresponding to the names of this Shape. A value of 1 means that a dimension of this Shape is contained in `names`. Args: names: instance of dimension names: tuple or list or set: Returns: binary sequence """ if isinstance(names, str): names = [names] elif isinstance(names, Shape): names = names.names mask = [1 if name in names else 0 for name in self.names] return tuple(mask)
def meshgrid(self, names=False)
-
Builds a sequence containing all multi-indices within a tensor of this shape. All indices are returned as
dict
mapping dimension names toint
indices.The corresponding values can be retrieved from Tensors and other Sliceables using
tensor()[index]
.This function currently only supports uniform tensors.
Args
names
- If
True
, replace indices by their item names if available.
Returns
dict
iterator.Expand source code
def meshgrid(self, names=False): """ Builds a sequence containing all multi-indices within a tensor of this shape. All indices are returned as `dict` mapping dimension names to `int` indices. The corresponding values can be retrieved from Tensors and other Sliceables using `tensor[index]`. This function currently only supports uniform tensors. Args: names: If `True`, replace indices by their item names if available. Returns: `dict` iterator. """ assert self.is_uniform, f"Shape.meshgrid() is currently not supported for non-uniform tensors, {self}" indices = [0] * self.rank while True: if names: yield {dim: (names[index] if names is not None else index) for dim, index, names in zip(self.names, indices, self.item_names)} else: yield {dim: index for dim, index in zip(self.names, indices)} for i in range(self.rank-1, -1, -1): indices[i] = (indices[i] + 1) % self.sizes[i] if indices[i] != 0: break else: return
def only(self, dims: DimFilter, reorder=False)
-
Builds a new shape from this one that only contains the given dimensions. Dimensions in
dims
that are not part of this Shape are ignored.The complementary operation is :func:
Shape.without()
.Args
dims
- comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function.
reorder
- If
False
, keeps the dimension order as defined in this shape. IfTrue
, reorders the dimensions of this shape to match the order ofdims
.
Returns
Shape containing only specified dimensions
Expand source code
def only(self, dims: 'DimFilter', reorder=False): """ Builds a new shape from this one that only contains the given dimensions. Dimensions in `dims` that are not part of this Shape are ignored. The complementary operation is :func:`Shape.without`. Args: dims: comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function. reorder: If `False`, keeps the dimension order as defined in this shape. If `True`, reorders the dimensions of this shape to match the order of `dims`. Returns: Shape containing only specified dimensions """ if dims is None: # keep none return EMPTY_SHAPE if callable(dims): dims = dims(self) if isinstance(dims, str): dims = parse_dim_order(dims) if isinstance(dims, Shape): dims = dims.names if not isinstance(dims, (tuple, list, set)): raise ValueError(dims) if reorder: return self[[self.names.index(d) for d in dims if d in self.names]] else: return self[[i for i in range(self.rank) if self.names[i] in dims]]
def prepare_gather(self, dim: str, selection)
-
Expand source code
def prepare_gather(self, dim: str, selection): if isinstance(selection, Shape): selection = selection.name if selection.rank == 1 else selection.names if isinstance(selection, str) and ',' in selection: selection = parse_dim_order(selection) if isinstance(selection, str): # single item name item_names = self.get_item_names(dim, fallback_spatial=True) assert item_names is not None, f"No item names defined for dim '{dim}' in tensor {self.shape} and dimension size does not match spatial rank." assert selection in item_names, f"Accessing tensor.{dim}['{selection}'] failed. Item names are {item_names}." selection = item_names.index(selection) if isinstance(selection, (tuple, list)): selection = list(selection) if any([isinstance(s, str) for s in selection]): item_names = self.get_item_names(dim, fallback_spatial=True) for i, s in enumerate(selection): if isinstance(s, str): assert item_names is not None, f"Accessing tensor.{dim}['{s}'] failed because no item names are present on tensor {self.shape}" assert s in item_names, f"Accessing tensor.{dim}['{s}'] failed. Item names are {item_names}." selection[i] = item_names.index(s) if not selection: # empty selection = slice(0, 0) return selection
def replace(self, dims: Union[ForwardRef('Shape'), tuple, list, str], new: Shape) ‑> phi.math._shape.Shape
-
Returns a copy of
self
withdims
replaced bynew
. Dimensions that are not present inself
are ignored.The dimension order is preserved.
Args
dims
- Dimensions to replace.
new
- New dimensions, must have same length as
dims
. If aShape
is given, replaces the dimension types and item names as well.
Returns
Shape
with same rank and dimension order asself
.Expand source code
def replace(self, dims: Union['Shape', str, tuple, list], new: 'Shape') -> 'Shape': """ Returns a copy of `self` with `dims` replaced by `new`. Dimensions that are not present in `self` are ignored. The dimension order is preserved. Args: dims: Dimensions to replace. new: New dimensions, must have same length as `dims`. If a `Shape` is given, replaces the dimension types and item names as well. Returns: `Shape` with same rank and dimension order as `self`. """ dims = parse_dim_order(dims) assert isinstance(new, Shape), f"new must be a Shape but got {new}" names = list(self.names) sizes = list(self.sizes) types = list(self.types) item_names = list(self.item_names) if len(new) > len(dims): # Put all in one spot assert len(dims) == 1, "Cannot replace 2+ dims by more replacements" index = self.index(dims[0]) return concat_shapes(self[:index], new, self[index+1:]) for old_name, new_dim in zip(dims, new): if old_name in self: names[self.index(old_name)] = new_dim.name types[self.index(old_name)] = new_dim.type item_names[self.index(old_name)] = new_dim.item_names[0] sizes[self.index(old_name)] = new_dim.size replaced = Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names)) if len(new) == len(dims): return replaced to_remove = dims[-(len(dims) - len(new)):] return replaced.without(to_remove)
def unstack(self, dim='dims') ‑> Tuple[phi.math._shape.Shape]
-
Slices this
Shape
along a dimension. The dimension listing the sizes of the shape is referred to as'dims'
.Non-uniform tensor shapes may be unstacked along other dimensions as well, see https://tum-pbs.github.io/PhiFlow/Math.html#non-uniform-tensors
Args
dim
- dimension to unstack
Returns
slices of this shape
Expand source code
def unstack(self, dim='dims') -> Tuple['Shape']: """ Slices this `Shape` along a dimension. The dimension listing the sizes of the shape is referred to as `'dims'`. Non-uniform tensor shapes may be unstacked along other dimensions as well, see https://tum-pbs.github.io/PhiFlow/Math.html#non-uniform-tensors Args: dim: dimension to unstack Returns: slices of this shape """ if dim == 'dims': return tuple(Shape((self.sizes[i],), (self.names[i],), (self.types[i],), (self.item_names[i],)) for i in range(self.rank)) if dim not in self and self.is_uniform: return tuple([self]) from ._tensors import Tensor if dim in self: inner = self.without(dim) dim_size = self.get_size(dim) else: inner = self dim_size = self.shape.get_size(dim) sizes = [] for size in inner.sizes: if isinstance(size, Tensor) and dim in size.shape: sizes.append(size.unstack(dim)) dim_size = size.shape.get_size(dim) else: sizes.append(size) assert isinstance(dim_size, int) shapes = tuple(Shape(tuple([int(size[i]) if isinstance(size, tuple) else size for size in sizes]), inner.names, inner.types, inner.item_names) for i in range(dim_size)) return shapes
def with_dim_size(self, dim: Union[str, ForwardRef('Shape')], size: Union[int, ForwardRef('math.Tensor'), str, tuple, list], keep_item_names=True)
-
Returns a new
Shape
that has a different size fordim
.Args
Returns
Shape
with same names and types asself
.Expand source code
def with_dim_size(self, dim: Union[str, 'Shape'], size: Union[int, 'math.Tensor', str, tuple, list], keep_item_names=True): """ Returns a new `Shape` that has a different size for `dim`. Args: dim: Dimension for which to replace the size, `Shape` or `str`. size: New size, `int` or `Tensor` Returns: `Shape` with same names and types as `self`. """ if isinstance(dim, Shape): dim = dim.name assert isinstance(dim, str) new_size, new_item_names = Shape._size_and_item_names_from_obj(size, self.get_size(dim), self.get_item_names(dim), keep_item_names) return self.replace(dim, Shape((new_size,), (dim,), (self.get_type(dim),), (new_item_names,)))
def with_size(self, size: Union[int, Tuple[str, ...]])
-
Only for single-dimension shapes. Returns a
Shape
representing this dimension but with a different size.See Also:
Shape.with_sizes()
.Args
size
- Replacement size for this dimension.
Returns
Expand source code
def with_size(self, size: Union[int, Tuple[str, ...]]): """ Only for single-dimension shapes. Returns a `Shape` representing this dimension but with a different size. See Also: `Shape.with_sizes()`. Args: size: Replacement size for this dimension. Returns: `Shape` """ assert self.rank == 1, "Shape.with_size() is only defined for shapes of rank 1." return self.with_sizes([size])
def with_sizes(self, sizes: Union[Sequence[int], Sequence[Tuple[str, ...]], ForwardRef('Shape'), int], keep_item_names=True)
-
Returns a new
Shape
matching the dimension names and types ofself
but with different sizes.See Also:
Shape.with_size()
.Args
sizes
-
One of
tuple
/list
of same length asself
containing replacement sizes or replacement item names.Shape
of any rank. Replaces sizes for dimensions shared bysizes
andself
.int
: new size for all dimensions
keep_item_names
- If
False
, forgets all item names. IfTrue
, keeps item names where the size does not change.
Returns
Shape
with same names and types asself
.Expand source code
def with_sizes(self, sizes: Union[Sequence[int], Sequence[Tuple[str, ...]], 'Shape', int], keep_item_names=True): """ Returns a new `Shape` matching the dimension names and types of `self` but with different sizes. See Also: `Shape.with_size()`. Args: sizes: One of * `tuple` / `list` of same length as `self` containing replacement sizes or replacement item names. * `Shape` of any rank. Replaces sizes for dimensions shared by `sizes` and `self`. * `int`: new size for all dimensions keep_item_names: If `False`, forgets all item names. If `True`, keeps item names where the size does not change. Returns: `Shape` with same names and types as `self`. """ if isinstance(sizes, int): sizes = [sizes] * len(self.sizes) if isinstance(sizes, Shape): item_names = [sizes.get_item_names(dim) if dim in sizes else self.get_item_names(dim) for dim in self.names] sizes = [sizes.get_size(dim) if dim in sizes else s for dim, s in self._named_sizes] return Shape(tuple(sizes), self.names, self.types, tuple(item_names)) else: assert len(sizes) == len(self.sizes), f"Cannot create shape from {self} with sizes {sizes}" sizes_ = [] item_names = [] for i, obj in enumerate(sizes): new_size, new_item_names = Shape._size_and_item_names_from_obj(obj, self.sizes[i], self.item_names[i], keep_item_names) sizes_.append(new_size) item_names.append(new_item_names) return Shape(tuple(sizes_), self.names, self.types, tuple(item_names))
def without(self, dims: DimFilter) ‑> phi.math._shape.Shape
-
Builds a new shape from this one that is missing all given dimensions. Dimensions in
dims
that are not part of this Shape are ignored.The complementary operation is
Shape.only()
.Args
dims
- Single dimension (str) or instance of dimensions (tuple, list, Shape)
dims
- Dimensions to exclude as
str
ortuple
orlist
orShape
. Dimensions that are not included in this shape are ignored.
Returns
Shape without specified dimensions
Expand source code
def without(self, dims: 'DimFilter') -> 'Shape': """ Builds a new shape from this one that is missing all given dimensions. Dimensions in `dims` that are not part of this Shape are ignored. The complementary operation is `Shape.only()`. Args: dims: Single dimension (str) or instance of dimensions (tuple, list, Shape) dims: Dimensions to exclude as `str` or `tuple` or `list` or `Shape`. Dimensions that are not included in this shape are ignored. Returns: Shape without specified dimensions """ if callable(dims): dims = dims(self) if isinstance(dims, str): dims = parse_dim_order(dims) if isinstance(dims, (tuple, list, set)): return self[[i for i in range(self.rank) if self.names[i] not in dims]] elif isinstance(dims, Shape): return self[[i for i in range(self.rank) if self.names[i] not in dims.names]] elif dims is None: # subtract none return self else: raise ValueError(dims)
def without_sizes(self)
-
Returns
Shape
with all sizes undefined (None
)Expand source code
def without_sizes(self): """ Returns: `Shape` with all sizes undefined (`None`) """ return Shape((None,) * self.rank, self.names, self.types, (None,) * self.rank)
class Solve (method: Optional[str] = 'auto', rel_tol: Union[phi.math._tensors.Tensor, float] = None, abs_tol: Union[phi.math._tensors.Tensor, float] = None, x0: Union[~X, Any] = None, max_iterations: Union[int, phi.math._tensors.Tensor] = 1000, suppress: Union[tuple, list] = (), preprocess_y: Callable = None, preprocess_y_args: tuple = (), preconditioner: Optional[str] = None, gradient_solve: Optional[ForwardRef('Solve[Y, X]')] = None)
-
Specifies parameters and stopping criteria for solving a minimization problem or system of equations.
Expand source code
class Solve(Generic[X, Y]): """ Specifies parameters and stopping criteria for solving a minimization problem or system of equations. """ def __init__(self, method: Union[str, None] = 'auto', rel_tol: Union[float, Tensor] = None, abs_tol: Union[float, Tensor] = None, x0: Union[X, Any] = None, max_iterations: Union[int, Tensor] = 1000, suppress: Union[tuple, list] = (), preprocess_y: Callable = None, preprocess_y_args: tuple = (), preconditioner: Optional[str] = None, gradient_solve: Union['Solve[Y, X]', None] = None): method = method or 'auto' assert isinstance(method, str) self.method: str = method """ Optimization method to use. Available solvers depend on the solve function that is used to perform the solve. """ self.rel_tol: Tensor = math.to_float(wrap(rel_tol)) if rel_tol is not None else None """Relative tolerance for linear solves only, defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. This must be unset or `0` for minimization problems. For systems of equations *f(x)=y*, the final tolerance is `max(rel_tol * norm(y), abs_tol)`. """ self.abs_tol: Tensor = math.to_float(wrap(abs_tol)) if abs_tol is not None else None """ Absolut tolerance for optimization problems and linear solves. Defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. For systems of equations *f(x)=y*, the final tolerance is `max(rel_tol * norm(y), abs_tol)`. """ self.max_iterations: Tensor = math.to_int32(wrap(max_iterations)) """ Maximum number of iterations to perform before raising a `NotConverged` error is raised. """ self.x0 = x0 """ Initial guess for the method, of same type and dimensionality as the solve result. This property must be set to a value compatible with the solution `x` before running a method. """ self.preprocess_y: Callable = preprocess_y """ Function to be applied to the right-hand-side vector of an equation system before solving the system. This property is propagated to gradient solves by default. """ self.preprocess_y_args: tuple = preprocess_y_args assert all(issubclass(err, ConvergenceException) for err in suppress) self.suppress: tuple = tuple(suppress) """ Error types to suppress; `tuple` of `ConvergenceException` types. For these errors, the solve function will instead return the partial result without raising the error. """ self.preconditioner = preconditioner self._gradient_solve: Solve[Y, X] = gradient_solve self.id = str(uuid.uuid4()) # not altered by copy_with(), so that the lookup SolveTape[Solve] works after solve has been copied @property def gradient_solve(self) -> 'Solve[Y, X]': """ Parameters to use for the gradient pass when an implicit gradient is computed. If `None`, a duplicate of this `Solve` is created for the gradient solve. In any case, the gradient solve information will be stored in `gradient_solve.result`. """ if self._gradient_solve is None: self._gradient_solve = Solve(self.method, self.rel_tol, self.abs_tol, None, self.max_iterations, self.suppress, self.preprocess_y, self.preprocess_y_args) return self._gradient_solve def __repr__(self): return f"{self.method} with tolerance {self.rel_tol} (rel), {self.abs_tol} (abs), max_iterations={self.max_iterations}" + (" including preprocessing" if self.preprocess_y else "") def __eq__(self, other): if not isinstance(other, Solve): return False if self.method != other.method \ or (self.abs_tol != other.abs_tol).any \ or (self.rel_tol != other.rel_tol).any \ or (self.max_iterations != other.max_iterations).any \ or self.preprocess_y is not other.preprocess_y \ or self.suppress != other.suppress: return False return self.x0 == other.x0 def __variable_attrs__(self): return 'x0', 'preprocess_y_args' def with_defaults(self, mode: str): assert mode in ('solve', 'optimization') result = self if result.rel_tol is None: result = copy_with(result, rel_tol=_default_tolerance() if mode == 'solve' else wrap(0.)) if result.abs_tol is None: result = copy_with(result, abs_tol=_default_tolerance()) return result def with_preprocessing(self, preprocess_y: Callable, *args) -> 'Solve': """ Adds preprocessing to this `Solve` and all corresponding gradient solves. Args: preprocess_y: Preprocessing function. *args: Arguments for the preprocessing function. Returns: Copy of this `Solve` with given preprocessing. """ assert self.preprocess_y is None, f"preprocessing for linear solve '{self}' already set" gradient_solve = self._gradient_solve.with_preprocessing(preprocess_y, *args) if self._gradient_solve is not None else None return copy_with(self, preprocess_y=preprocess_y, preprocess_y_args=args, _gradient_solve=gradient_solve)
Ancestors
- typing.Generic
Instance variables
var abs_tol
-
Absolut tolerance for optimization problems and linear solves. Defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. For systems of equations f(x)=y, the final tolerance is
max(rel_tol * norm(y), abs_tol)
. var gradient_solve : phi.math._optimize.Solve[~Y, ~X]
-
Parameters to use for the gradient pass when an implicit gradient is computed. If
None
, a duplicate of thisSolve
is created for the gradient solve.In any case, the gradient solve information will be stored in
gradient_solve.result
.Expand source code
@property def gradient_solve(self) -> 'Solve[Y, X]': """ Parameters to use for the gradient pass when an implicit gradient is computed. If `None`, a duplicate of this `Solve` is created for the gradient solve. In any case, the gradient solve information will be stored in `gradient_solve.result`. """ if self._gradient_solve is None: self._gradient_solve = Solve(self.method, self.rel_tol, self.abs_tol, None, self.max_iterations, self.suppress, self.preprocess_y, self.preprocess_y_args) return self._gradient_solve
var max_iterations
-
Maximum number of iterations to perform before raising a
NotConverged
error is raised. var method
-
Optimization method to use. Available solvers depend on the solve function that is used to perform the solve.
var preprocess_y
-
Function to be applied to the right-hand-side vector of an equation system before solving the system. This property is propagated to gradient solves by default.
var rel_tol
-
Relative tolerance for linear solves only, defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. This must be unset or
0
for minimization problems. For systems of equations f(x)=y, the final tolerance ismax(rel_tol * norm(y), abs_tol)
. var suppress
-
Error types to suppress;
tuple
ofConvergenceException
types. For these errors, the solve function will instead return the partial result without raising the error. var x0
-
Initial guess for the method, of same type and dimensionality as the solve result. This property must be set to a value compatible with the solution
x
before running a method.
Methods
def with_defaults(self, mode: str)
-
Expand source code
def with_defaults(self, mode: str): assert mode in ('solve', 'optimization') result = self if result.rel_tol is None: result = copy_with(result, rel_tol=_default_tolerance() if mode == 'solve' else wrap(0.)) if result.abs_tol is None: result = copy_with(result, abs_tol=_default_tolerance()) return result
def with_preprocessing(self, preprocess_y: Callable, *args) ‑> phi.math._optimize.Solve
-
Adds preprocessing to this
Solve
and all corresponding gradient solves.Args
preprocess_y
- Preprocessing function.
*args
- Arguments for the preprocessing function.
Returns
Copy of this
Solve
with given preprocessing.Expand source code
def with_preprocessing(self, preprocess_y: Callable, *args) -> 'Solve': """ Adds preprocessing to this `Solve` and all corresponding gradient solves. Args: preprocess_y: Preprocessing function. *args: Arguments for the preprocessing function. Returns: Copy of this `Solve` with given preprocessing. """ assert self.preprocess_y is None, f"preprocessing for linear solve '{self}' already set" gradient_solve = self._gradient_solve.with_preprocessing(preprocess_y, *args) if self._gradient_solve is not None else None return copy_with(self, preprocess_y=preprocess_y, preprocess_y_args=args, _gradient_solve=gradient_solve)
class SolveInfo
-
Stores information about the solution or trajectory of a solve.
When representing the full optimization trajectory, all tracked quantities will have an additional
trajectory
batch dimension.Expand source code
class SolveInfo(Generic[X, Y]): """ Stores information about the solution or trajectory of a solve. When representing the full optimization trajectory, all tracked quantities will have an additional `trajectory` batch dimension. """ def __init__(self, solve: Solve, x: X, residual: Union[Y, None], iterations: Union[Tensor, None], function_evaluations: Union[Tensor, None], converged: Tensor, diverged: Tensor, method: str, msg: Tensor, solve_time: float): # tuple.__new__(SolveInfo, (x, residual, iterations, function_evaluations, converged, diverged)) self.solve: Solve[X, Y] = solve """ `Solve`, Parameters specified for the solve. """ self.x: X = x """ `Tensor` or `phi.math.magic.PhiTreeNode`, solution estimate. """ self.residual: Y = residual """ `Tensor` or `phi.math.magic.PhiTreeNode`, residual vector for systems of equations or function value for minimization problems. """ self.iterations: Tensor = iterations """ `Tensor`, number of performed iterations to reach this state. """ self.function_evaluations: Tensor = function_evaluations """ `Tensor`, how often the function (or its gradient function) was called. """ self.converged: Tensor = converged """ `Tensor`, whether the residual is within the specified tolerance. """ self.diverged: Tensor = diverged """ `Tensor`, whether the solve has diverged at this point. """ self.method = method """ `str`, which method and implementation that was used. """ if all_available(diverged, converged, iterations): msg = math.map_(_default_solve_info_msg, msg, converged.trajectory[-1], diverged.trajectory[-1], iterations.trajectory[-1], solve=solve, method=method, residual=residual) self.msg = msg """ `str`, termination message """ self.solve_time = solve_time """ Time spent in Backend solve function (in seconds) """ def __repr__(self): return f"{self.method}: {self.converged.trajectory[-1].sum} converged, {self.diverged.trajectory[-1].sum} diverged" def snapshot(self, index): return SolveInfo(self.solve, self.x.trajectory[index], self.residual.trajectory[index], self.iterations.trajectory[index], self.function_evaluations.trajectory[index], self.converged.trajectory[index], self.diverged.trajectory[index], self.method, self.msg, self.solve_time) def convergence_check(self, only_warn: bool): if not all_available(self.diverged, self.converged): return if self.diverged.any: if Diverged not in self.solve.suppress: if only_warn: warnings.warn(self.msg, ConvergenceWarning) else: raise Diverged(self) if not self.converged.trajectory[-1].all: if NotConverged not in self.solve.suppress: if only_warn: warnings.warn(self.msg, ConvergenceWarning) else: raise NotConverged(self)
Ancestors
- typing.Generic
Instance variables
var converged
-
Tensor
, whether the residual is within the specified tolerance. var diverged
-
Tensor
, whether the solve has diverged at this point. var function_evaluations
-
Tensor
, how often the function (or its gradient function) was called. var iterations
-
Tensor
, number of performed iterations to reach this state. var method
-
str
, which method and implementation that was used. var msg
-
str
, termination message var residual
-
Tensor
orPhiTreeNode
, residual vector for systems of equations or function value for minimization problems. var solve
-
Solve
, Parameters specified for the solve. var solve_time
-
Time spent in Backend solve function (in seconds)
var x
-
Tensor
orPhiTreeNode
, solution estimate.
Methods
def convergence_check(self, only_warn: bool)
-
Expand source code
def convergence_check(self, only_warn: bool): if not all_available(self.diverged, self.converged): return if self.diverged.any: if Diverged not in self.solve.suppress: if only_warn: warnings.warn(self.msg, ConvergenceWarning) else: raise Diverged(self) if not self.converged.trajectory[-1].all: if NotConverged not in self.solve.suppress: if only_warn: warnings.warn(self.msg, ConvergenceWarning) else: raise NotConverged(self)
def snapshot(self, index)
-
Expand source code
def snapshot(self, index): return SolveInfo(self.solve, self.x.trajectory[index], self.residual.trajectory[index], self.iterations.trajectory[index], self.function_evaluations.trajectory[index], self.converged.trajectory[index], self.diverged.trajectory[index], self.method, self.msg, self.solve_time)
class SolveTape (*solves: phi.math._optimize.Solve, record_trajectories=False)
-
Used to record additional information about solves invoked via
solve_linear()
,solve_nonlinear()
orminimize()
. While aSolveTape
is active, certain performance optimizations and algorithm implementations may be disabled.To access a
SolveInfo
of a recorded solve, use>>> solve = Solve(method, ...) >>> with SolveTape() as solves: >>> x = math.solve_linear(f, y, solve) >>> result: SolveInfo = solves[solve] # get by Solve >>> result: SolveInfo = solves[0] # get by index
Args
Expand source code
class SolveTape: """ Used to record additional information about solves invoked via `solve_linear()`, `solve_nonlinear()` or `minimize()`. While a `SolveTape` is active, certain performance optimizations and algorithm implementations may be disabled. To access a `SolveInfo` of a recorded solve, use >>> solve = Solve(method, ...) >>> with SolveTape() as solves: >>> x = math.solve_linear(f, y, solve) >>> result: SolveInfo = solves[solve] # get by Solve >>> result: SolveInfo = solves[0] # get by index """ def __init__(self, *solves: Solve, record_trajectories=False): """ Args: *solves: (Optional) Select specific `solves` to be recorded. If none is given, records all solves that occur within the scope of this `SolveTape`. record_trajectories: When enabled, the entries of `SolveInfo` will contain an additional batch dimension named `trajectory`. """ self.record_only_ids = [s.id for s in solves] self.record_trajectories = record_trajectories self.solves: List[SolveInfo] = [] def should_record_trajectory_for(self, solve: Solve): if not self.record_trajectories: return False if not self.record_only_ids: return True return solve.id in self.record_only_ids def __enter__(self): _SOLVE_TAPES.append(self) return self def __exit__(self, exc_type, exc_val, exc_tb): _SOLVE_TAPES.remove(self) def _add(self, solve: Solve, trj: bool, result: SolveInfo): if any(s.solve.id == solve.id for s in self.solves): warnings.warn("SolveTape contains two results for the same solve settings. SolveTape[solve] will return the first solve result.", RuntimeWarning) if self.record_only_ids and solve.id not in self.record_only_ids: return # this solve should not be recorded if self.record_trajectories: assert trj, "Solve did not record a trajectory." self.solves.append(result) elif trj: self.solves.append(result.snapshot(-1)) else: self.solves.append(result) def __getitem__(self, item) -> SolveInfo: if isinstance(item, int): return self.solves[item] else: assert isinstance(item, Solve) solves = [s for s in self.solves if s.solve.id == item.id] if len(solves) == 0: raise KeyError(f"No solve recorded with key '{item}'.") assert len(solves) == 1 return solves[0] def __iter__(self): return iter(self.solves) def __len__(self): return len(self.solves)
Methods
def should_record_trajectory_for(self, solve: phi.math._optimize.Solve)
-
Expand source code
def should_record_trajectory_for(self, solve: Solve): if not self.record_trajectories: return False if not self.record_only_ids: return True return solve.id in self.record_only_ids
class Tensor
-
Abstract base class to represent structured data of one data type. This class replaces the native tensor classes
numpy.ndarray
,torch.Tensor
,tensorflow.Tensor
orjax.numpy.ndarray
as the main data container in ΦFlow.Tensor
instances are different from native tensors in two important ways:- The dimensions of Tensors have names and types.
- Tensors can have non-uniform shapes, meaning that the size of dimensions can vary along other dimensions.
To check whether a value is a tensor, use
isinstance(value, Tensor)
.To construct a Tensor, use
tensor()
,wrap()
or one of the basic tensor creation functions, see https://tum-pbs.github.io/PhiFlow/Math.html#tensor-creation .Tensors are not editable. When backed by an editable native tensor, e.g. a
numpy.ndarray
, do not edit the underlying data structure.Expand source code
class Tensor: """ Abstract base class to represent structured data of one data type. This class replaces the native tensor classes `numpy.ndarray`, `torch.Tensor`, `tensorflow.Tensor` or `jax.numpy.ndarray` as the main data container in Φ<sub>Flow</sub>. `Tensor` instances are different from native tensors in two important ways: * The dimensions of Tensors have *names* and *types*. * Tensors can have non-uniform shapes, meaning that the size of dimensions can vary along other dimensions. To check whether a value is a tensor, use `isinstance(value, Tensor)`. To construct a Tensor, use `phi.math.tensor()`, `phi.math.wrap()` or one of the basic tensor creation functions, see https://tum-pbs.github.io/PhiFlow/Math.html#tensor-creation . Tensors are not editable. When backed by an editable native tensor, e.g. a `numpy.ndarray`, do not edit the underlying data structure. """ def native(self, order: Union[str, tuple, list, Shape] = None, singleton_for_const=False): """ Returns a native tensor object with the dimensions ordered according to `order`. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. Args: order: (Optional) Order of dimension names as comma-separated string, list or `Shape`. singleton_for_const: If `True`, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions. Returns: Native tensor representation, such as PyTorch tensor or NumPy array. Raises: ValueError if the tensor cannot be transposed to match target_shape """ raise NotImplementedError(self.__class__) def numpy(self, order: Union[str, tuple, list, Shape] = None) -> np.ndarray: """ Converts this tensor to a `numpy.ndarray` with dimensions ordered according to `order`. *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use `Tensor.native()` instead. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. If this `Tensor` is backed by a NumPy array, a reference to this array may be returned. See Also: `phi.math.numpy()` Args: order: (Optional) Order of dimension names as comma-separated string, list or `Shape`. Returns: NumPy representation Raises: ValueError if the tensor cannot be transposed to match target_shape """ native = self.native(order=order) return choose_backend(native).numpy(native) def __array__(self, dtype=None): # NumPy conversion if self.rank > 1: warnings.warn("Automatic conversion of Φ-Flow tensors to NumPy can cause problems because the dimension order is not guaranteed.", SyntaxWarning, stacklevel=3) return self.numpy(self._shape) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # NumPy interface if len(inputs) != 2: return NotImplemented if ufunc.__name__ == 'multiply': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x * y, lambda x, y: choose_backend(x, y).mul(x, y), 'mul', '*') else: return self._op2(inputs[0], lambda x, y: y * x, lambda x, y: choose_backend(x, y).mul(y, x), 'rmul', '*') if ufunc.__name__ == 'add': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x + y, lambda x, y: choose_backend(x, y).add(x, y), 'add', '+') else: return self._op2(inputs[0], lambda x, y: y + x, lambda x, y: choose_backend(x, y).add(y, x), 'radd', '+') if ufunc.__name__ == 'subtract': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x - y, lambda x, y: choose_backend(x, y).sub(x, y), 'add', '-') else: return self._op2(inputs[0], lambda x, y: y - x, lambda x, y: choose_backend(x, y).sub(y, x), 'rsub', '-') if ufunc.__name__ in ['divide', 'true_divide']: if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x / y, lambda x, y: choose_backend(x, y).div(x, y), 'true_divide', '/') else: return self._op2(inputs[0], lambda x, y: y / x, lambda x, y: choose_backend(x, y).div(y, x), 'r_true_divide', '/') if ufunc.__name__ == 'floor_divide': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x // y, lambda x, y: choose_backend(x, y).floordiv(x, y), 'floor_divide', '//') else: return self._op2(inputs[0], lambda x, y: y // x, lambda x, y: choose_backend(x, y).floordiv(y, x), 'r_floor_divide', '//') if ufunc.__name__ == 'remainder': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x % y, lambda x, y: choose_backend(x, y).mod(x, y), 'remainder', '%') else: return self._op2(inputs[0], lambda x, y: y % x, lambda x, y: choose_backend(x, y).mod(y, x), 'r_remainder', '%') if ufunc.__name__ == 'power': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x ** y, lambda x, y: choose_backend(x, y).pow(x, y), 'power', '**') else: return self._op2(inputs[0], lambda x, y: y ** x, lambda x, y: choose_backend(x, y).pow(y, x), 'r_power', '**') if ufunc.__name__ == 'equal': if _EQUALITY_BY_REF: return wrap(inputs[0] is inputs[1]) if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x == y, lambda x, y: choose_backend(x, y).equal(x, y), 'equal', '==') else: return self._op2(inputs[0], lambda x, y: y == x, lambda x, y: choose_backend(x, y).equal(y, x), 'r_equal', '==') if ufunc.__name__ == 'not_equal': if _EQUALITY_BY_REF: return wrap(inputs[0] is not inputs[1]) if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x != y, lambda x, y: choose_backend(x, y).not_equal(x, y), 'equal', '!=') else: return self._op2(inputs[0], lambda x, y: y != x, lambda x, y: choose_backend(x, y).not_equal(y, x), 'r_equal', '!=') if ufunc.__name__ == 'greater': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x > y, lambda x, y: choose_backend(x, y).greater_than(x, y), 'greater', '>') else: return self._op2(inputs[0], lambda x, y: y > x, lambda x, y: choose_backend(x, y).greater_than(y, x), 'r_greater', '>') if ufunc.__name__ == 'greater_equal': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x >= y, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'greater_equal', '>=') else: return self._op2(inputs[0], lambda x, y: y >= x, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'r_greater_equal', '>=') if ufunc.__name__ == 'less': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x < y, lambda x, y: choose_backend(x, y).greater_than(y, x), 'less', '<') else: return self._op2(inputs[0], lambda x, y: y < x, lambda x, y: choose_backend(x, y).greater_than(x, y), 'r_less', '<') if ufunc.__name__ == 'less_equal': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x <= y, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'less_equal', '<=') else: return self._op2(inputs[0], lambda x, y: y <= x, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'r_less_equal', '<=') if ufunc.__name__ == 'left_shift': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x << y, lambda x, y: choose_backend(x, y).shift_bits_left(x, y), 'left_shift', '<<') else: return self._op2(inputs[0], lambda x, y: y << x, lambda x, y: choose_backend(x, y).shift_bits_left(y, x), 'r_left_shift', '<<') if ufunc.__name__ == 'right_shift': if inputs[0] is self: return self._op2(inputs[1], lambda x, y: x >> y, lambda x, y: choose_backend(x, y).shift_bits_right(x, y), 'right_shift', '>>') else: return self._op2(inputs[0], lambda x, y: y >> x, lambda x, y: choose_backend(x, y).shift_bits_right(y, x), 'r_right_shift', '>>') raise NotImplementedError(f"NumPy function '{ufunc.__name__}' is not compatible with Φ-Flow tensors.") @property def dtype(self) -> DType: """ Data type of the elements of this `Tensor`. """ raise NotImplementedError() @property def shape(self) -> Shape: """ The `Shape` lists the dimensions with their sizes, names and types. """ raise NotImplementedError() @property def default_backend(self) -> Backend: from ._ops import choose_backend_t return choose_backend_t(self) def _with_shape_replaced(self, new_shape: Shape): raise NotImplementedError() def _with_natives_replaced(self, natives: list): """ Replaces all n _natives() of this Tensor with the first n elements of the list and removes them from the list. """ raise NotImplementedError() @property def rank(self) -> int: """ Number of explicit dimensions of this `Tensor`. Equal to `tensor.shape.rank`. This replaces [`numpy.ndarray.ndim`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html) / [`torch.Tensor.dim`](https://pytorch.org/docs/master/generated/torch.Tensor.dim.html) / [`tf.rank()`](https://www.tensorflow.org/api_docs/python/tf/rank) / [`jax.numpy.ndim()`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndim.html). """ return self.shape.rank @property def _is_tracer(self) -> bool: """ Tracers store additional internal information. They should not be converted to `native()` in intermediate operations. TensorStack prevents performing the actual stack operation if one of its component tensors is special. """ raise NotImplementedError(self.__class__) def _to_dict(self): return cached(self)._to_dict() def __len__(self): return self.shape.volume if self.rank == 1 else NotImplemented def __bool__(self): assert self.rank == 0, f"Cannot convert tensor with non-empty shape {self.shape} to bool. Use tensor.any or tensor.all instead." from ._ops import all_ if not self.default_backend.supports(Backend.jit_compile): # NumPy return bool(self.native()) if self.rank == 0 else bool(all_(self).native()) else: # __bool__ does not work with TensorFlow tracing. # TensorFlow needs to see a tf.Tensor in loop conditions but won't allow bool() invocations. # However, this function must always return a Python bool. raise AssertionError("To evaluate the boolean value of a Tensor, use 'Tensor.all'.") @property def all(self): """ Whether all values of this `Tensor` are `True` as a native bool. """ from ._ops import all_, cast if self.rank == 0: return cast(self, DType(bool)).native() else: return all_(self, dim=self.shape).native() @property def any(self): """ Whether this `Tensor` contains a `True` value as a native bool. """ from ._ops import any_, cast if self.rank == 0: return cast(self, DType(bool)).native() else: return any_(self, dim=self.shape).native() @property def mean(self): """ Mean value of this `Tensor` as a native scalar. """ from ._ops import mean return mean(self, dim=self.shape).native() @property def finite_mean(self): """ Mean value of all finite values in this `Tensor` as a native scalar. """ from ._ops import finite_mean return finite_mean(self, dim=self.shape).native() @property def std(self): """ Standard deviation of this `Tensor` as a native scalar. """ from ._ops import std return std(self, dim=self.shape).native() @property def sum(self): """ Sum of all values of this `Tensor` as a native scalar. """ from ._ops import sum_ return sum_(self, dim=self.shape).native() @property def finite_sum(self): """ Sum of all finite values of this `Tensor` as a native scalar. """ from ._ops import finite_sum return finite_sum(self, dim=self.shape).native() @property def min(self): """ Minimum value of this `Tensor` as a native scalar. """ from ._ops import min_ return min_(self, dim=self.shape).native() @property def finite_min(self): """ Minimum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_min return finite_min(self, dim=self.shape).native() @property def max(self): """ Maximum value of this `Tensor` as a native scalar. """ from ._ops import max_ return max_(self, dim=self.shape).native() @property def finite_max(self): """ Maximum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_max return finite_max(self, dim=self.shape).native() @property def real(self) -> 'Tensor': """ Returns the real part of this tensor. See Also: `phi.math.real()` """ from ._ops import real return real(self) @property def imag(self) -> 'Tensor': """ Returns the imaginary part of this tensor. If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor. See Also: `phi.math.imag()` """ from ._ops import imag return imag(self) @property def available(self) -> bool: """ A tensor is available if it stores concrete values and these can currently be read. Tracers used inside jit compilation are typically not available. See Also: `phi.math.jit_compile()`. """ if self._is_tracer: return False natives = self._natives() natives_available = [choose_backend(native).is_available(native) for native in natives] return all(natives_available) @property def device(self) -> Union[ComputeDevice, None]: """ Returns the `ComputeDevice` that this tensor is allocated on. The device belongs to this tensor's `default_backend`. See Also: `Tensor.default_backend`. """ natives = self._natives() if not natives: return None return self.default_backend.get_device(natives[0]) def __int__(self): return int(self.native()) if self.shape.volume == 1 else NotImplemented def __float__(self): return float(self.native()) if self.shape.volume == 1 else NotImplemented def __complex__(self): return complex(self.native()) if self.shape.volume == 1 else NotImplemented def __index__(self): assert self.shape.volume == 1, f"Only scalar tensors can be converted to index but has shape {self.shape}" assert self.dtype.kind == int, f"Only int tensors can be converted to index but dtype is {self.dtype}" return int(self.native()) def __repr__(self): return format_tensor(self, PrintOptions()) def _repr_pretty_(self, printer, cycle): printer.text(format_tensor(self, PrintOptions(colors=DEFAULT_COLORS))) def __format__(self, format_spec: str): if BROADCAST_FORMATTER.values is not None: return BROADCAST_FORMATTER.register_formatted(self, format_spec) specs = format_spec.split(':') layout_ = 'auto' for possible_layout in ['summary', 'full', 'row', 'numpy']: if possible_layout in specs: assert layout_ == 'auto', f"Two layout identifiers encountered in '{format_spec}'" layout_ = possible_layout include_shape = 'shape' in specs or (False if 'no-shape' in specs else None) include_dtype = 'dtype' in specs or (False if 'no-dtype' in specs else None) color = 'color' in specs or (False if 'no-color' in specs else None) threshold = 8 float_format = None for spec in specs: if spec.startswith('threshold='): threshold = int(spec[len('threshold='):]) elif '.' in spec: float_format = spec result = format_tensor(self, PrintOptions(layout_, float_format, threshold, color, include_shape, include_dtype)) return result def __getitem__(self, item) -> 'Tensor': if isinstance(item, Tensor): from ._ops import gather return gather(self, item) item = slicing_dict(self, item) selections = {} sliced = self for dim, selection in item.items(): if dim not in self.shape: continue selection = self.shape.prepare_gather(dim, selection) # Either handle slicing directly or add it to the dict if isinstance(selection, (tuple, list)): from ._magic_ops import stack result = [sliced[{dim: i}] for i in selection] stack_dim = sliced.shape[dim].after_gather({dim: selection}) sliced = stack(result, stack_dim) elif isinstance(selection, Tensor) and selection.dtype.kind == bool: from ._ops import boolean_mask sliced = boolean_mask(sliced, dim, selection) elif isinstance(selection, Tensor) and selection.dtype.kind == int: from ._ops import gather sliced = gather(sliced, selection, dims=dim) else: selections[dim] = selection return sliced._getitem(selections) if selections else sliced def _getitem(self, selection: dict) -> 'Tensor': """ Slice the tensor along specified dimensions. Args: selection: dim_name: str -> Union[int, slice] selection: dict: Returns: """ raise NotImplementedError() def __setitem__(self, key, value): raise SyntaxError("Tensors are not editable to preserve the autodiff chain. This feature might be added in the future. To update part of a tensor, use math.where() or math.scatter()") def flip(self, *dims: str) -> 'Tensor': """ Reverses the order of elements along one or multiple dimensions. Args: *dims: dimensions to flip Returns: `Tensor` of the same `Shape` """ raise NotImplementedError() def __unstack__(self, dims: Tuple[str, ...]) -> Tuple['Tensor', ...]: # from phi.math.magic.Sliceable if len(dims) == 1: return self.unstack(dims[0]) else: return NotImplemented def unstack(self, dim: str): """ Splits this tensor along the specified dimension. The returned tensors have the same dimensions as this tensor save the unstacked dimension. Raises an error if the dimension is not part of the `Shape` of this `Tensor`. See Also: `TensorDim.unstack()` Args: dim: name of dimension to unstack Returns: tuple of tensors """ raise NotImplementedError() @staticmethod def __stack__(values: tuple, dim: Shape, **_kwargs) -> 'Tensor': from ._ops import stack_tensors return stack_tensors(values, dim) def __expand__(self, dims: Shape, **kwargs) -> 'Tensor': return expand_tensor(self, dims) @staticmethod def __concat__(values: tuple, dim: str, **kwargs) -> 'Tensor': from ._ops import concat_tensor return concat_tensor(values, dim) def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'Tensor': from ._magic_ops import rename_dims return self._with_shape_replaced(rename_dims(self.shape, dims, new_dims)) def __unpack_dim__(self, dim: str, unpacked_dims: Shape, **kwargs) -> 'Tensor': if self.shape.is_uniform: native = self.native(self.shape.names) new_shape = self.shape.without(dim) i = self.shape.index(dim) for d in unpacked_dims: new_shape = new_shape._expand(d, pos=i) i += 1 native_reshaped = choose_backend(native).reshape(native, new_shape.sizes) return NativeTensor(native_reshaped, new_shape) else: tensors = self._tensors if dim == self._stack_dim.name: for udim in unpacked_dims: tensors = [TensorStack(tensors[o::len(tensors)//udim.size], udim) for o in range(len(tensors)//udim.size)] assert len(tensors) == 1 return tensors[0] raise NotImplementedError def __pack_dims__(self, dims: Tuple[str, ...], packed_dim: Shape, pos: Union[int, None], **kwargs) -> 'Tensor': order = self.shape._order_group(dims) if self.shape.is_uniform: native = self.native(order) if pos is None: pos = min(self.shape.indices(dims)) new_shape = self.shape.without(dims)._expand(packed_dim.with_sizes([self.shape.only(dims).volume]), pos) native = choose_backend(native).reshape(native, new_shape.sizes) return NativeTensor(native, new_shape) else: from ._ops import concat_tensor from ._magic_ops import pack_dims value = cached(self) assert isinstance(value, TensorStack) assert value._stack_dim.name in dims inner_packed = [pack_dims(t, dims, packed_dim) for t in value._tensors] return concat_tensor(inner_packed, packed_dim.name) def __cast__(self, dtype: DType): return self._op1(lambda native: choose_backend(native).cast(native, dtype=dtype)) def dimension(self, name: Union[str, Shape]) -> 'TensorDim': """ Returns a reference to a specific dimension of this tensor. This is equivalent to the syntax `tensor.<name>`. The dimension need not be part of the `Tensor.shape` in which case its size is 1. Args: name: dimension name Returns: `TensorDim` corresponding to a dimension of this tensor """ if isinstance(name, str): return TensorDim(self, name) elif isinstance(name, Shape): return TensorDim(self, name.name) else: raise ValueError(name) def pack(self, dims, packed_dim): """ See `pack_dims()` """ from ._ops import pack_dims return pack_dims(self, dims, packed_dim) def unpack(self, dim, unpacked_dims): """ See `unpack_dim()` """ from ._ops import unpack_dim return unpack_dim(self, dim, unpacked_dims) def __getattr__(self, name): if name.startswith('__'): # called by hasattr in magic ops raise AttributeError if name.startswith('_'): raise AttributeError(f"'{type(self)}' object has no attribute '{name}'") if name == 'is_tensor_like': # TensorFlow replaces abs() while tracing and checks for this attribute raise AttributeError(f"'{type(self)}' object has no attribute '{name}'") assert name not in ('shape', '_shape', 'tensor'), name return TensorDim(self, name) def __add__(self, other): return self._op2(other, lambda x, y: x + y, lambda x, y: choose_backend(x, y).add(x, y), 'add', '+') def __radd__(self, other): return self._op2(other, lambda x, y: y + x, lambda x, y: choose_backend(x, y).add(y, x), 'radd', '+') def __sub__(self, other): return self._op2(other, lambda x, y: x - y, lambda x, y: choose_backend(x, y).sub(x, y), 'sub', '-') def __rsub__(self, other): return self._op2(other, lambda x, y: y - x, lambda x, y: choose_backend(x, y).sub(y, x), 'rsub', '-') def __and__(self, other): return self._op2(other, lambda x, y: x & y, lambda x, y: choose_backend(x, y).and_(x, y), 'and', '&') def __rand__(self, other): return self._op2(other, lambda x, y: y & x, lambda x, y: choose_backend(x, y).and_(y, x), 'rand', '&') def __or__(self, other): return self._op2(other, lambda x, y: x | y, lambda x, y: choose_backend(x, y).or_(x, y), 'or', '|') def __ror__(self, other): return self._op2(other, lambda x, y: y | x, lambda x, y: choose_backend(x, y).or_(y, x), 'ror', '|') def __xor__(self, other): return self._op2(other, lambda x, y: x ^ y, lambda x, y: choose_backend(x, y).xor(x, y), 'xor', '^') def __rxor__(self, other): return self._op2(other, lambda x, y: y ^ x, lambda x, y: choose_backend(x, y).xor(y, x), 'rxor', '^') def __mul__(self, other): return self._op2(other, lambda x, y: x * y, lambda x, y: choose_backend(x, y).mul(x, y), 'mul', '*') def __rmul__(self, other): return self._op2(other, lambda x, y: y * x, lambda x, y: choose_backend(x, y).mul(y, x), 'rmul', '*') def __truediv__(self, other): return self._op2(other, lambda x, y: x / y, lambda x, y: choose_backend(x, y).div(x, y), 'truediv', '/') def __rtruediv__(self, other): return self._op2(other, lambda x, y: y / x, lambda x, y: choose_backend(x, y).div(y, x), 'rtruediv', '/') def __divmod__(self, other): return self._op2(other, lambda x, y: divmod(x, y), lambda x, y: divmod(x, y), 'divmod', 'divmod') def __rdivmod__(self, other): return self._op2(other, lambda x, y: divmod(y, x), lambda x, y: divmod(y, x), 'rdivmod', 'divmod') def __floordiv__(self, other): return self._op2(other, lambda x, y: x // y, lambda x, y: choose_backend(x, y).floordiv(x, y), 'floordiv', '//') def __rfloordiv__(self, other): return self._op2(other, lambda x, y: y // x, lambda x, y: choose_backend(x, y).floordiv(y, x), 'rfloordiv', '//') def __pow__(self, power, modulo=None): assert modulo is None return self._op2(power, lambda x, y: x ** y, lambda x, y: choose_backend(x, y).pow(x, y), 'pow', '**') def __rpow__(self, other): return self._op2(other, lambda x, y: y ** x, lambda x, y: choose_backend(x, y).pow(y, x), 'rpow', '**') def __mod__(self, other): return self._op2(other, lambda x, y: x % y, lambda x, y: choose_backend(x, y).mod(x, y), 'mod', '%') def __rmod__(self, other): return self._op2(other, lambda x, y: y % x, lambda x, y: choose_backend(x, y).mod(y, x), 'rmod', '%') def __eq__(self, other): if _EQUALITY_BY_REF: return wrap(self is other) if other is None: other = float('nan') return self._op2(other, lambda x, y: x == y, lambda x, y: choose_backend(x, y).equal(x, y), 'eq', '==') def __ne__(self, other): if _EQUALITY_BY_REF: return wrap(self is not other) if other is None: other = float('nan') return self._op2(other, lambda x, y: x != y, lambda x, y: choose_backend(x, y).not_equal(x, y), 'ne', '!=') def __lt__(self, other): return self._op2(other, lambda x, y: x < y, lambda x, y: choose_backend(x, y).greater_than(y, x), 'lt', '<') def __le__(self, other): return self._op2(other, lambda x, y: x <= y, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'le', '<=') def __gt__(self, other): return self._op2(other, lambda x, y: x > y, lambda x, y: choose_backend(x, y).greater_than(x, y), 'gt', '>') def __ge__(self, other): return self._op2(other, lambda x, y: x >= y, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'ge', '>=') def __lshift__(self, other): return self._op2(other, lambda x, y: x << y, lambda x, y: choose_backend(x, y).shift_bits_left(x, y), 'lshift', '<<') def __rlshift__(self, other): return self._op2(other, lambda y, x: x << y, lambda y, x: choose_backend(x, y).shift_bits_left(x, y), 'lshift', '<<') def __rshift__(self, other): return self._op2(other, lambda x, y: x >> y, lambda x, y: choose_backend(x, y).shift_bits_right(x, y), 'rshift', '>>') def __rrshift__(self, other): return self._op2(other, lambda y, x: x >> y, lambda y, x: choose_backend(x, y).shift_bits_right(x, y), 'rshift', '>>') def __abs__(self): return self._op1(lambda t: choose_backend(t).abs(t)) def __round__(self, n=None): return self._op1(lambda t: choose_backend(t).round(t)) def __copy__(self): return self._op1(lambda t: choose_backend(t).copy(t, only_mutable=True)) def __deepcopy__(self, memodict={}): return self._op1(lambda t: choose_backend(t).copy(t, only_mutable=False)) def __neg__(self): return self._op1(lambda t: -t) def __invert__(self): return self._op1(lambda t: ~t) def __reversed__(self): assert self.shape.channel.rank == 1 return self[::-1] def __iter__(self): if self.rank == 1: return iter(self.native()) elif self.rank == 0: return iter([self.native()]) else: from ._ops import reshaped_native native = reshaped_native(self, [self.shape]) return iter(native) def __matmul__(self, other): assert isinstance(other, Tensor), f"Matmul '@' requires two Tensor arguments but got {type(other)}" dims = batch(**self.shape.dual.untyped_dict).names if not dims: # this is not a matrix assert self.shape.primal.only(other.shape).is_empty, f"Cannot compute matmul {self.shape} @ {other.shape}. First argument is not a matrix; it has no dual dimensions." return self * other match = other.shape.only(dims, reorder=True) if not match: assert non_batch(other).non_dual.rank == 1, f"Cannot multiply {self.shape} @ {other.shape} because arg2 does not have appropriate non-dual dimensions" match = non_batch(other).non_dual assert len(dims) == match.rank, f"Dual dimensions {dual} do not match shape of second argument {other.shape}" left_arg = pack_dims(self, dual, dual('_reduce')) if len(dims) > 1 else self right_arg = pack_dims(other, match, channel('_reduce')) from ._ops import dot return dot(left_arg, dual, right_arg, '_reduce') # def __rmatmul__(self, other): def _tensor(self, other) -> 'Tensor': if isinstance(other, Tensor): return other elif isinstance(other, (tuple, list)) and any(isinstance(v, Tensor) for v in other): if 'vector' in self.shape: outer_dim = self.shape['vector'] elif self.shape.channel_rank == 1: outer_dim = self.shape.channel else: raise ValueError(f"Cannot combine tensor of shape {self.shape} with tuple {tuple([type(v).__name__ for v in other])}") remaining_shape = self.shape.without(outer_dim) other_items = [v if isinstance(v, Tensor) else compatible_tensor(v, compat_shape=remaining_shape, compat_natives=self._natives(), convert=False) for v in other] other_stacked = stack(other_items, outer_dim, expand_values=True) return other_stacked else: return compatible_tensor(other, compat_shape=self.shape, compat_natives=self._natives(), convert=False) def _op1(self, native_function): """ Transform the values of this tensor given a function that can be applied to any native tensor. Args: native_function: Returns: """ raise NotImplementedError(self.__class__) def _op2(self, other, operator: Callable, native_function: Callable, op_name: str = 'unknown', op_symbol: str = '?') -> 'Tensor': """ Apply a broadcast operation on two tensors. Args: other: second argument operator: function (Tensor, Tensor) -> Tensor, used to propagate the operation to children tensors to have Python choose the callee native_function: function (native tensor, native tensor) -> native tensor op_name: Name of the python function without leading and trailing `__`. Examples: 'add', 'radd', 'sub', 'mul', 'and', 'eq', 'ge'. op_symbol: Operation symbol, such as '+', '-', '&', '%', '>=' Returns: `Tensor` """ raise NotImplementedError(self.__class__) def _natives(self) -> tuple: raise NotImplementedError(self.__class__) def _spec_dict(self) -> dict: raise NotImplementedError(self.__class__) @classmethod def _from_spec_and_natives(cls, spec: dict, natives: list): raise NotImplementedError(cls) def _expand(self): """ Expands all compressed tensors to their defined size as if they were being used in `Tensor.native()`. """ warnings.warn("Tensor._expand() is deprecated, use cached(Tensor) instead.", DeprecationWarning) raise NotImplementedError(self.__class__) def _simplify(self): """ Does not cache this value but if it is already cached, returns the cached version. """ return self
Subclasses
- phi.math._sparse.CompressedSparseMatrix
- phi.math._sparse.SparseCoordinateTensor
- phi.math._tensors.Layout
- phi.math._tensors.NativeTensor
- phi.math._tensors.TensorStack
- phi.math._trace.ShiftLinTracer
Instance variables
var all
-
Whether all values of this
Tensor
areTrue
as a native bool.Expand source code
@property def all(self): """ Whether all values of this `Tensor` are `True` as a native bool. """ from ._ops import all_, cast if self.rank == 0: return cast(self, DType(bool)).native() else: return all_(self, dim=self.shape).native()
var any
-
Whether this
Tensor
contains aTrue
value as a native bool.Expand source code
@property def any(self): """ Whether this `Tensor` contains a `True` value as a native bool. """ from ._ops import any_, cast if self.rank == 0: return cast(self, DType(bool)).native() else: return any_(self, dim=self.shape).native()
var available : bool
-
A tensor is available if it stores concrete values and these can currently be read.
Tracers used inside jit compilation are typically not available.
See Also:
jit_compile()
.Expand source code
@property def available(self) -> bool: """ A tensor is available if it stores concrete values and these can currently be read. Tracers used inside jit compilation are typically not available. See Also: `phi.math.jit_compile()`. """ if self._is_tracer: return False natives = self._natives() natives_available = [choose_backend(native).is_available(native) for native in natives] return all(natives_available)
var default_backend : phi.math.backend._backend.Backend
-
Expand source code
@property def default_backend(self) -> Backend: from ._ops import choose_backend_t return choose_backend_t(self)
var device : Optional[phi.math.backend._backend.ComputeDevice]
-
Returns the
ComputeDevice
that this tensor is allocated on. The device belongs to this tensor'sdefault_backend
.See Also:
Tensor.default_backend
.Expand source code
@property def device(self) -> Union[ComputeDevice, None]: """ Returns the `ComputeDevice` that this tensor is allocated on. The device belongs to this tensor's `default_backend`. See Also: `Tensor.default_backend`. """ natives = self._natives() if not natives: return None return self.default_backend.get_device(natives[0])
var dtype : phi.math.backend._dtype.DType
-
Data type of the elements of this
Tensor
.Expand source code
@property def dtype(self) -> DType: """ Data type of the elements of this `Tensor`. """ raise NotImplementedError()
var finite_max
-
Maximum finite value of this
Tensor
as a native scalar.Expand source code
@property def finite_max(self): """ Maximum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_max return finite_max(self, dim=self.shape).native()
var finite_mean
-
Mean value of all finite values in this
Tensor
as a native scalar.Expand source code
@property def finite_mean(self): """ Mean value of all finite values in this `Tensor` as a native scalar. """ from ._ops import finite_mean return finite_mean(self, dim=self.shape).native()
var finite_min
-
Minimum finite value of this
Tensor
as a native scalar.Expand source code
@property def finite_min(self): """ Minimum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_min return finite_min(self, dim=self.shape).native()
var finite_sum
-
Sum of all finite values of this
Tensor
as a native scalar.Expand source code
@property def finite_sum(self): """ Sum of all finite values of this `Tensor` as a native scalar. """ from ._ops import finite_sum return finite_sum(self, dim=self.shape).native()
var imag : phi.math._tensors.Tensor
-
Returns the imaginary part of this tensor. If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.
See Also:
imag()
Expand source code
@property def imag(self) -> 'Tensor': """ Returns the imaginary part of this tensor. If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor. See Also: `phi.math.imag()` """ from ._ops import imag return imag(self)
var max
-
Maximum value of this
Tensor
as a native scalar.Expand source code
@property def max(self): """ Maximum value of this `Tensor` as a native scalar. """ from ._ops import max_ return max_(self, dim=self.shape).native()
var mean
-
Mean value of this
Tensor
as a native scalar.Expand source code
@property def mean(self): """ Mean value of this `Tensor` as a native scalar. """ from ._ops import mean return mean(self, dim=self.shape).native()
var min
-
Minimum value of this
Tensor
as a native scalar.Expand source code
@property def min(self): """ Minimum value of this `Tensor` as a native scalar. """ from ._ops import min_ return min_(self, dim=self.shape).native()
var rank : int
-
Number of explicit dimensions of this
Tensor
. Equal totensor.shape.rank
. This replacesnumpy.ndarray.ndim
/torch.Tensor.dim
/tf.rank()
/jax.numpy.ndim()
.Expand source code
@property def rank(self) -> int: """ Number of explicit dimensions of this `Tensor`. Equal to `tensor.shape.rank`. This replaces [`numpy.ndarray.ndim`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html) / [`torch.Tensor.dim`](https://pytorch.org/docs/master/generated/torch.Tensor.dim.html) / [`tf.rank()`](https://www.tensorflow.org/api_docs/python/tf/rank) / [`jax.numpy.ndim()`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndim.html). """ return self.shape.rank
var real : phi.math._tensors.Tensor
-
Returns the real part of this tensor.
See Also:
real()
Expand source code
@property def real(self) -> 'Tensor': """ Returns the real part of this tensor. See Also: `phi.math.real()` """ from ._ops import real return real(self)
var shape : phi.math._shape.Shape
-
The
Shape
lists the dimensions with their sizes, names and types.Expand source code
@property def shape(self) -> Shape: """ The `Shape` lists the dimensions with their sizes, names and types. """ raise NotImplementedError()
var std
-
Standard deviation of this
Tensor
as a native scalar.Expand source code
@property def std(self): """ Standard deviation of this `Tensor` as a native scalar. """ from ._ops import std return std(self, dim=self.shape).native()
var sum
-
Sum of all values of this
Tensor
as a native scalar.Expand source code
@property def sum(self): """ Sum of all values of this `Tensor` as a native scalar. """ from ._ops import sum_ return sum_(self, dim=self.shape).native()
Methods
def dimension(self, name: Union[str, phi.math._shape.Shape]) ‑> phi.math._tensors.TensorDim
-
Returns a reference to a specific dimension of this tensor. This is equivalent to the syntax
tensor.<name>
.The dimension need not be part of the
Tensor.shape
in which case its size is 1.Args
name
- dimension name
Returns
TensorDim
corresponding to a dimension of this tensorExpand source code
def dimension(self, name: Union[str, Shape]) -> 'TensorDim': """ Returns a reference to a specific dimension of this tensor. This is equivalent to the syntax `tensor.<name>`. The dimension need not be part of the `Tensor.shape` in which case its size is 1. Args: name: dimension name Returns: `TensorDim` corresponding to a dimension of this tensor """ if isinstance(name, str): return TensorDim(self, name) elif isinstance(name, Shape): return TensorDim(self, name.name) else: raise ValueError(name)
def flip(self, *dims: str) ‑> phi.math._tensors.Tensor
-
Reverses the order of elements along one or multiple dimensions.
Args
*dims
- dimensions to flip
Returns
Expand source code
def flip(self, *dims: str) -> 'Tensor': """ Reverses the order of elements along one or multiple dimensions. Args: *dims: dimensions to flip Returns: `Tensor` of the same `Shape` """ raise NotImplementedError()
def native(self, order: Union[phi.math._shape.Shape, tuple, list, str] = None, singleton_for_const=False)
-
Returns a native tensor object with the dimensions ordered according to
order
.Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in
order
, aValueError
is raised.Args
order
- (Optional) Order of dimension names as comma-separated string, list or
Shape
. singleton_for_const
- If
True
, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions.
Returns
Native tensor representation, such as PyTorch tensor or NumPy array.
Raises
ValueError if the tensor cannot be transposed to match target_shape
Expand source code
def native(self, order: Union[str, tuple, list, Shape] = None, singleton_for_const=False): """ Returns a native tensor object with the dimensions ordered according to `order`. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. Args: order: (Optional) Order of dimension names as comma-separated string, list or `Shape`. singleton_for_const: If `True`, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions. Returns: Native tensor representation, such as PyTorch tensor or NumPy array. Raises: ValueError if the tensor cannot be transposed to match target_shape """ raise NotImplementedError(self.__class__)
def numpy(self, order: Union[phi.math._shape.Shape, tuple, list, str] = None) ‑> numpy.ndarray
-
Converts this tensor to a
numpy.ndarray
with dimensions ordered according toorder
.Note: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use
Tensor.native()
instead.Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in
order
, aValueError
is raised.If this
Tensor
is backed by a NumPy array, a reference to this array may be returned.See Also:
numpy()
Args
order
- (Optional) Order of dimension names as comma-separated string, list or
Shape
.
Returns
NumPy representation
Raises
ValueError if the tensor cannot be transposed to match target_shape
Expand source code
def numpy(self, order: Union[str, tuple, list, Shape] = None) -> np.ndarray: """ Converts this tensor to a `numpy.ndarray` with dimensions ordered according to `order`. *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use `Tensor.native()` instead. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. If this `Tensor` is backed by a NumPy array, a reference to this array may be returned. See Also: `phi.math.numpy()` Args: order: (Optional) Order of dimension names as comma-separated string, list or `Shape`. Returns: NumPy representation Raises: ValueError if the tensor cannot be transposed to match target_shape """ native = self.native(order=order) return choose_backend(native).numpy(native)
def pack(self, dims, packed_dim)
-
See
pack_dims()
Expand source code
def pack(self, dims, packed_dim): """ See `pack_dims()` """ from ._ops import pack_dims return pack_dims(self, dims, packed_dim)
def unpack(self, dim, unpacked_dims)
-
See
unpack_dim()
Expand source code
def unpack(self, dim, unpacked_dims): """ See `unpack_dim()` """ from ._ops import unpack_dim return unpack_dim(self, dim, unpacked_dims)
def unstack(self, dim: str)
-
Splits this tensor along the specified dimension. The returned tensors have the same dimensions as this tensor save the unstacked dimension.
Raises an error if the dimension is not part of the
Shape
of thisTensor
.See Also:
TensorDim.unstack()
Args
dim
- name of dimension to unstack
Returns
tuple of tensors
Expand source code
def unstack(self, dim: str): """ Splits this tensor along the specified dimension. The returned tensors have the same dimensions as this tensor save the unstacked dimension. Raises an error if the dimension is not part of the `Shape` of this `Tensor`. See Also: `TensorDim.unstack()` Args: dim: name of dimension to unstack Returns: tuple of tensors """ raise NotImplementedError()