Module phiml.math
Vectorized operations, tensors with named dimensions.
This package provides a common interface for tensor operations. Is internally uses NumPy, TensorFlow or PyTorch.
Main classes: Tensor, Shape, DType, Extrapolation.
The provided operations are not implemented directly. Instead, they delegate the actual computation to either NumPy, TensorFlow or PyTorch, depending on the configuration. This allows the user to write simulation code once and have it run with various computation backends.
See the documentation at https://tum-pbs.github.io/PhiML/
Sub-modules
phiml.math.extrapolation-
Extrapolations are used for padding tensors and sampling coordinates lying outside the tensor bounds. Standard extrapolations are listed as global …
phiml.math.magic-
Magic methods allow custom classes to be compatible with various functions defined in
phiml.math, analogous to how implementing__hash__allows … phiml.math.perm-
Functions related to tensor permutation.
Global variables
var INF-
Floating-point representation of positive infinity.
var NAN-
Floating-point representation of NaN (not a number).
var NUMPY-
Default backend for NumPy arrays and SciPy objects.
var PI-
Value of π to double precision
var f-
Automatic mapper for broadcast string formatting of tensors, resulting in tensors of strings. Used with the special
-f-syntax.Examples
>>> from phiml.math import f >>> -f-f'String containing {tensor1} and {tensor2:.1f}' # Result is a str tensor containing all dims of tensor1 and tensor2 var math-
Expand source code
""" Vectorized operations, tensors with named dimensions. This package provides a common interface for tensor operations. Is internally uses NumPy, TensorFlow or PyTorch. Main classes: `Tensor`, `Shape`, `DType`, `Extrapolation`. The provided operations are not implemented directly. Instead, they delegate the actual computation to either NumPy, TensorFlow or PyTorch, depending on the configuration. This allows the user to write simulation code once and have it run with various computation backends. See the documentation at https://tum-pbs.github.io/PhiML/ """ from ..backend._dtype import DType from ..backend import NUMPY, precision, set_global_precision, get_precision, set_global_default_backend as use from ._shape import ( shape, Shape, EMPTY_SHAPE, DimFilter, spatial, channel, batch, instance, dual, dsize, isize, ssize, csize, non_batch, non_spatial, non_instance, non_channel, non_dual, non_primal, primal, merge_shapes, concat_shapes, IncompatibleShapes, enable_debug_checks, ) from ._tensors import ( Tensor, wrap, tensor, native, numpy_ as numpy, reshaped_native, reshaped_numpy, Dict, to_dict, from_dict, is_scalar, is_numeric, BROADCAST_FORMATTER as f, ) from ._tree import layout, is_composite, save, load, slice_ as slice, copy_with, replace, find_differences, object_dims from ._magic_ops import ( unstack, stack, concat, ncat, tcat, ccat, scat, icat, dcat, expand, rename_dims, rename_dims as replace_dims, pack_dims, dpack, ipack, spack, cpack, unpack_dim, flatten, squeeze, b2i, c2b, c2d, i2b, s2b, si2d, p2d, d2i, d2s, ) from ._sparse import ( is_sparse, get_sparsity, get_format, stored_indices, stored_values, sparse_tensor, tensor_like, to_format, dense, sparse, to_coo, to_csr, to_csc, to_compact_rows, to_compact_cols, ) from .extrapolation import Extrapolation, as_extrapolation from ._matrix import matrix_rank # from ._trace import Trace from ._ops import ( backend_for as choose_backend, all_available, convert, seed, to_device, to_cpu, to_gpu, reshaped_tensor, copy, native_call, print_ as print, slice_off, zeros, ones, fftfreq, random_normal, random_normal as randn, random_uniform, random_uniform as rand, meshgrid, linspace, arange, arange as range, range_tensor, brange, drange, irange, srange, crange, # creation operators (use default backend) zeros_like, ones_like, pad, pad_to_uniform, swap_axes, # reshape operations sort, dsort, psort, isort, ssort, csort, safe_div, where, nonzero, ravel_index, unravel_index, sum_ as sum, finite_sum, dsum, psum, isum, ssum, csum, finite_dsum, finite_psum, finite_isum, finite_ssum, finite_csum, mean, finite_mean, dmean, pmean, imean, smean, cmean, finite_dmean, finite_pmean, finite_imean, finite_smean, finite_cmean, std, finite_std, dstd, pstd, istd, sstd, cstd, finite_dstd, finite_pstd, finite_istd, finite_sstd, finite_cstd, prod, dprod, pprod, sprod, iprod, cprod, max_ as max, finite_max, dmax, pmax, smax, imax, cmax, finite_dmax, finite_pmax, finite_imax, finite_smax, finite_cmax, min_ as min, finite_min, dmin, pmin, smin, imin, cmin, finite_dmin, finite_pmin, finite_imin, finite_smin, finite_cmin, any_ as any, all_ as all, quantile, median, # reduce at_max, at_min, argmax, argmin, dot, abs_ as abs, sign, round_ as round, ceil, floor, maximum, minimum, clip, sqrt, exp, erf, log, log2, log10, sigmoid, soft_plus, softmax, sin, cos, tan, sinh, cosh, tanh, arcsin, arccos, arctan, arcsinh, arccosh, arctanh, log_gamma, factorial, incomplete_gamma, to_float, to_int32, to_int64, to_complex, imag, real, conjugate, angle, radians_to_degrees, degrees_to_radians, boolean_mask, is_finite, is_nan, is_inf, nan_to_0, is_none, closest_grid_values, grid_sample, scatter, gather, histogram, fft, ifft, convolve, cumulative_sum, dtype, cast, close, always_close, assert_close, equal, stop_gradient, pairwise_differences, pairwise_differences as pairwise_distances, map_pairs, with_diagonal, eigenvalues, svd, contains, count_occurrences, count_intersections, ) from ._nd import ( shift, index_shift, vec, const_vec, norm, squared_norm, normalize, normalize as vec_normalize, dim_mask, normalize_to, l1_loss, l2_loss, frequency_loss, spatial_gradient, laplace, neighbor_reduce, neighbor_mean, neighbor_sum, neighbor_max, neighbor_min, at_min_neighbor, at_max_neighbor, fourier_laplace, fourier_poisson, abs_square, downsample2x, upsample2x, sample_subgrid, masked_fill, finite_fill, find_closest, ) from ._lin_trace import matrix_from_function from ._functional import ( LinearFunction, jit_compile_linear, jit_compile, jacobian, gradient, custom_gradient, print_gradient, safe_mul, map_types, map_s2b, map_i2b, map_c2b, map_d2b, map_d2c, map_c2d, broadcast, iterate, identity, trace_check, map_ as map, when_available, perf_counter, ) from ._optimize import solve_linear, solve_nonlinear, minimize, Solve, SolveInfo, ConvergenceException, NotConverged, Diverged, SolveTape, factor_ilu from ._deprecated import clip_length, cross_product, cross_product as cross, rotate_vector, rotation_matrix, length, length as vec_length, vec_squared import sys as _sys math = _sys.modules[__name__] """Convenience alias for the module `phiml.math`. This way, you can import the module and contained items in one line. ``` from phiml.math import math, Tensor, wrap, extrapolation, l2_loss ```""" PI = 3.14159265358979323846 """Value of π to double precision """ pi = PI # intentionally undocumented, use PI instead. Exists only as an anlog to numpy.pi INF = float("inf") """ Floating-point representation of positive infinity. """ inf = INF # intentionally undocumented, use INF instead. Exists only as an anlog to numpy.inf NAN = float("nan") """ Floating-point representation of NaN (not a number). """ nan = NAN # intentionally undocumented, use NAN instead. Exists only as an anlog to numpy.nan NUMPY = NUMPY # to show up in pdoc """Default backend for NumPy arrays and SciPy objects.""" f = f """ Automatic mapper for broadcast string formatting of tensors, resulting in tensors of strings. Used with the special `-f-` syntax. Examples: >>> from phiml.math import f >>> -f-f'String containing {tensor1} and {tensor2:.1f}' # Result is a str tensor containing all dims of tensor1 and tensor2 """ # --- Legacy imports for PhiFlow --- _magic_ops.find_differences = _tree.find_differences _tensors.layout = _tree.layout _tensors.Layout = _tree.Layout _tensors.object_dims = _tree.object_dims _tensors.cached = _ops.cached __all__ = [key for key in globals().keys() if not key.startswith('_')] __pdoc__ = { 'Extrapolation': False, 'Shape.__init__': False, 'SolveInfo.__init__': False, 'TensorDim.__init__': False, 'ConvergenceException.__init__': False, 'Diverged.__init__': False, 'NotConverged.__init__': False, 'LinearFunction.__init__': False, }Convenience alias for the module
phiml.math. This way, you can import the module and contained items in one line.from phiml.math import math, Tensor, wrap, extrapolation, l2_loss
Functions
def abs(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def abs_(x: TensorOrTree) -> TensorOrTree: """ Computes *||x||<sub>1</sub>*. Complex `x` result in matching precision float values. *Note*: The gradient of this operation is undefined for *x=0*. TensorFlow and PyTorch return 0 while Jax returns 1. Args: x: `Tensor` or `phiml.math.magic.PhiTreeNode` Returns: Absolute value of `x` of same type as `x`. """ return _backend_op1(x, Backend.abs, abs_)Computes ||x||1. Complex
xresult in matching precision float values.Note: The gradient of this operation is undefined for x=0. TensorFlow and PyTorch return 0 while Jax returns 1.
Args
xTensororPhiTreeNode
Returns
Absolute value of
xof same type asx. def abs_square(complex_values: phiml.math._tensors.Tensor | complex) ‑> phiml.math._tensors.Tensor-
Expand source code
def abs_square(complex_values: Union[Tensor, complex]) -> Tensor: """ Squared magnitude of complex values. Args: complex_values: complex `Tensor` Returns: Tensor: real valued magnitude squared """ return math.imag(complex_values) ** 2 + math.real(complex_values) ** 2 def all(boolean_value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>) ‑> phiml.math._tensors.Tensor-
Expand source code
def all_(boolean_value, dim: DimFilter = non_batch) -> Tensor: """ Tests whether all entries of `boolean_tensor` are `True` along the specified dimensions. Args: boolean_value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_all, boolean_value, dim, required_kind=bool)Tests whether all entries of
boolean_tensorareTruealong the specified dimensions.Args
boolean_valueTensororlist/tupleof Tensors.dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
Returns
Tensorwithout the reduced dimensions. def all_available(*values) ‑> bool-
Expand source code
def all_available(*values) -> bool: """ Tests if all tensors contained in the given `values` are currently known and can be read. Placeholder tensors used to trace functions for just-in-time compilation or matrix construction are considered not available, even when they hold example values like with PyTorch's JIT. Tensors are not available during `jit_compile()`, `jit_compile_linear()` or while using TensorFlow's legacy graph mode. Tensors are typically available when the backend operates in eager mode and is not currently tracing a function. This can be used instead of the native checks * PyTorch: `torch._C._get_tracing_state()` * TensorFlow: `tf.executing_eagerly()` * Jax: `isinstance(x, jax.core.Tracer)` Args: values: Tensors to check. Returns: `True` if no value is a placeholder or being traced, `False` otherwise. """ _, tensors = disassemble_tree(values, cache=False) return all([t.available for t in tensors])Tests if all tensors contained in the given
valuesare currently known and can be read. Placeholder tensors used to trace functions for just-in-time compilation or matrix construction are considered not available, even when they hold example values like with PyTorch's JIT.Tensors are not available during
jit_compile(),jit_compile_linear()or while using TensorFlow's legacy graph mode.Tensors are typically available when the backend operates in eager mode and is not currently tracing a function.
This can be used instead of the native checks
- PyTorch:
torch._C._get_tracing_state() - TensorFlow:
tf.executing_eagerly() - Jax:
isinstance(x, jax.core.Tracer)
Args
values- Tensors to check.
Returns
Trueif no value is a placeholder or being traced,Falseotherwise. - PyTorch:
def always_close(t1: numbers.Number | phiml.math._tensors.Tensor | bool,
t2: numbers.Number | phiml.math._tensors.Tensor | bool,
rel_tolerance=1e-05,
abs_tolerance=0,
equal_nan=False) ‑> bool-
Expand source code
def always_close(t1: Union[Number, Tensor, bool], t2: Union[Number, Tensor, bool], rel_tolerance=1e-5, abs_tolerance=0, equal_nan=False) -> bool: """ Checks whether two tensors are guaranteed to be `close` in all values. Unlike `close()`, this function can be used with JIT compilation and with tensors of incompatible shapes. Incompatible tensors are never close. If one of the given tensors is being traced, the tensors are only equal if they reference the same native tensor. Otherwise, an element-wise equality check is performed. See Also: `close()`. Args: t1: First tensor or number to compare. t2: Second tensor or number to compare. rel_tolerance: Relative tolerance, only used if neither tensor is traced. abs_tolerance: Absolute tolerance, only used if neither tensor is traced. equal_nan: If `True`, tensors are considered close if they are NaN in the same places. Returns: `bool` """ if t1 is t2: return True if t1 is None or t2 is None: return t1 is None and t2 is None t1 = wrap(t1) t2 = wrap(t2) if t1.available != t2.available: return False if t1.available and t2.available: try: return close(t1, t2, rel_tolerance=rel_tolerance, abs_tolerance=abs_tolerance, equal_nan=equal_nan) except IncompatibleShapes: return False elif isinstance(t1, Dense) and isinstance(t2, Dense): return t1._native is t2._native else: return t1 is t2Checks whether two tensors are guaranteed to be
close()in all values. Unlikeclose(), this function can be used with JIT compilation and with tensors of incompatible shapes. Incompatible tensors are never close.If one of the given tensors is being traced, the tensors are only equal if they reference the same native tensor. Otherwise, an element-wise equality check is performed.
See Also:
close().Args
t1- First tensor or number to compare.
t2- Second tensor or number to compare.
rel_tolerance- Relative tolerance, only used if neither tensor is traced.
abs_tolerance- Absolute tolerance, only used if neither tensor is traced.
equal_nan- If
True, tensors are considered close if they are NaN in the same places.
Returns
bool def angle(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def angle(x: TensorOrTree) -> TensorOrTree: """ Compute the angle of a complex number. This is equal to *atan(Im/Re)* for most values. Args: x: `Tensor` or `phiml.math.magic.PhiTreeNode` Returns: Angle of complex number in radians. """ return arctan(imag(x), divide_by=real(x))Compute the angle of a complex number. This is equal to atan(Im/Re) for most values.
Args
xTensororPhiTreeNode
Returns
Angle of complex number in radians.
def any(boolean_value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>) ‑> phiml.math._tensors.Tensor-
Expand source code
def any_(boolean_value, dim: DimFilter = non_batch) -> Tensor: """ Tests whether any entry of `boolean_tensor` is `True` along the specified dimensions. Args: boolean_value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_any, boolean_value, dim, required_kind=bool)Tests whether any entry of
boolean_tensorisTruealong the specified dimensions.Args
boolean_valueTensororlist/tupleof Tensors.dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
Returns
Tensorwithout the reduced dimensions. def arange(dim: phiml.math._shape.Shape,
start_or_stop: int | None = None,
stop: int | None = None,
step=1,
backend=None) ‑> phiml.math._tensors.Tensor[int]-
Expand source code
def arange(dim: Shape, start_or_stop: Union[int, None] = None, stop: Union[int, None] = None, step=1, backend=None) -> Tensor[int]: """ Returns evenly spaced values between `start` and `stop`. If only one limit is given, `0` is used for the start. See Also: `range_tensor()`, `linspace()`, `meshgrid()`. Args: dim: Dimension name and type as `Shape` object. The `size` of `dim` is interpreted as `stop` unless `start_or_stop` is specified. start_or_stop: (Optional) `int`. Interpreted as `start` if `stop` is specified as well. Otherwise this is `stop`. stop: (Optional) `int`. `stop` value. step: Distance between values. backend: Backend to use for creating the tensor. If unspecified, uses the current default. Returns: `Tensor` """ assert dim.primal.rank <= 1, f"dim can have at most one primal dimension" if dim.primal.rank == 0: assert dim.rank == 1, f"When no primal dimension is specified, dim must have rank 1" range_dim = dim else: range_dim = dim.primal if start_or_stop is None: assert stop is None, "start_or_stop must be specified when stop is given." assert dim.well_defined, "When start_or_stop is not specified, all sizes of dim must be specified." start, stop = 0, (dim.primal.size if dim.primal else dim.size) elif stop is None: start, stop = 0, start_or_stop else: start = start_or_stop start, stop, step = wrap(start), wrap(stop), wrap(step) assert range_dim not in start and range_dim not in stop and range_dim not in step, f"range dim {range_dim} must not be present in either start, stop, or step" def batched_range(dims: Shape, start: Tensor, stop: Tensor, step: Tensor): batches = (dims - range_dim) & start.shape & stop.shape & step.shape if batches: b0 = batches.non_uniform[0] if batches.is_non_uniform else batches ranges = [batched_range(dims.after_gather(i), start[i], stop[i], step[i]) for i in b0.meshgrid()] return stack(ranges, b0) b = backend or preferred_backend_for(start, stop) native = b.range(start.native(), stop.native(), step.native(), INT32) return Dense(native, range_dim.names, range_dim.with_size(len(native)), b) return batched_range(dim, start, stop, step)Returns evenly spaced values between
startandstop. If only one limit is given,0is used for the start.See Also:
range_tensor(),linspace(),meshgrid().Args
dim- Dimension name and type as
Shapeobject. Thesizeofdimis interpreted asstopunlessstart_or_stopis specified. start_or_stop- (Optional)
int. Interpreted asstartifstopis specified as well. Otherwise this isstop. stop- (Optional)
int.stopvalue. step- Distance between values.
backend- Backend to use for creating the tensor. If unspecified, uses the current default.
Returns
def arccos(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def arccos(x: TensorOrTree) -> TensorOrTree: """ Computes the inverse of *cos(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. For real arguments, the result lies in the range [0, π]. """ return _backend_op1(x, Backend.arccos, arccos)Computes the inverse of cos(x) of the
TensororPhiTreeNodex. For real arguments, the result lies in the range [0, π]. def arccosh(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def arccosh(x: TensorOrTree) -> TensorOrTree: """ Computes the inverse of *cosh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.arccosh, arccosh)Computes the inverse of cosh(x) of the
TensororPhiTreeNodex. def arcsin(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def arcsin(x: TensorOrTree) -> TensorOrTree: """ Computes the inverse of *sin(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. For real arguments, the result lies in the range [-π/2, π/2]. """ return _backend_op1(x, Backend.arcsin, arcsin)Computes the inverse of sin(x) of the
TensororPhiTreeNodex. For real arguments, the result lies in the range [-π/2, π/2]. def arcsinh(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def arcsinh(x: TensorOrTree) -> TensorOrTree: """ Computes the inverse of *sinh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.arcsinh, arcsinh)Computes the inverse of sinh(x) of the
TensororPhiTreeNodex. def arctan(x: ~TensorOrTree, divide_by=None) ‑> ~TensorOrTree-
Expand source code
def arctan(x: TensorOrTree, divide_by=None) -> TensorOrTree: """ Computes the inverse of *tan(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. Args: x: Input. The single-argument `arctan` function cannot output π/2 or -π/2 since tan(π/2) is infinite. divide_by: If specified, computes `arctan(x/divide_by)` so that it can return π/2 and -π/2. This is equivalent to the common `arctan2` function. """ if divide_by is None: return _backend_op1(x, Backend.arctan, arctan) else: divide_by = to_float(divide_by) return custom_op2(x, divide_by, xops.arctan2)Computes the inverse of tan(x) of the
TensororPhiTreeNodex.Args
x- Input. The single-argument
arctan()function cannot output π/2 or -π/2 since tan(π/2) is infinite. divide_by- If specified, computes
arctan(x/divide_by)so that it can return π/2 and -π/2. This is equivalent to the commonarctan2function.
def arctanh(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def arctanh(x: TensorOrTree) -> TensorOrTree: """ Computes the inverse of *tanh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.arctanh, arctanh)Computes the inverse of tanh(x) of the
TensororPhiTreeNodex. def argmax(x: phiml.math._tensors.Tensor,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
index_dim=(indexᶜ))-
Expand source code
def argmax(x: Tensor, dim: DimFilter, index_dim=channel('index')): """ Finds the maximum value along one or multiple dimensions and returns the corresponding index. See Also: `argmin`, `at_max`. Args: x: `Tensor` dim: Dimensions along which the maximum should be determined. These are reduced in the operation. index_dim: Dimension listing the index components for multidimensional argmax. Returns: Index tensor `idx`, such that `x[idx] = max(x)`. """ dims = x.shape.only(dim) keep = x.shape.without(dims) assert dims, f"argmax requires dim to be present on data but {dim} does not exist on {x.shape}" if is_sparse(x): if dims in sparse_dims(x): max_val = max_(x, dim) is_max = x == max_val is_max_idx = nonzero(is_max, list_dim=instance('true_values')) scatter_val = is_max_idx[dims.only(sparse_dims(x)).name_list] remaining_dims = sparse_dims(x).without(dims) result_shape = max_val.shape & channel(scatter_val) if remaining_dims: scatter_idx = is_max_idx[remaining_dims.name_list] result = scatter(result_shape, scatter_idx, scatter_val, mode='update', default=-1) else: # all sparse dims are reduced result = scatter_val.true_values[0] return rename_dims(result, channel(scatter_val), index_dim.with_sizes(dims.name_list)) elif dims.isdisjoint(sparse_dims(x)): # only argmax across values dim return x._with_values(argmax(x._values, dims)) else: raise NotImplementedError broadcast = broadcast_dims(x) def uniform_argmin(x: Tensor): dims = x.shape.only(dim) v_native = x._reshaped_native([keep - broadcast, dims]) idx_native = x.backend.argmax(v_native, 1, keepdims=True) multi_idx_native = choose_backend(idx_native).unravel_index(idx_native[:, 0], dims.sizes) return reshaped_tensor(multi_idx_native, [keep - broadcast, index_dim.with_size(dims.name_list)]) return broadcast_op(uniform_argmin, [x], broadcast)Finds the maximum value along one or multiple dimensions and returns the corresponding index.
Args
xTensordim- Dimensions along which the maximum should be determined. These are reduced in the operation.
index_dim- Dimension listing the index components for multidimensional argmax.
Returns
Index tensor
idx, such thatx[idx] = max(x). def argmin(x: phiml.math._tensors.Tensor,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
index_dim=(indexᶜ))-
Expand source code
def argmin(x: Tensor, dim: DimFilter, index_dim=channel('index')): """ Finds the minimum value along one or multiple dimensions and returns the corresponding index. See Also: `argmax`, `at_min`. Args: x: `Tensor` dim: Dimensions along which the minimum should be determined. These are reduced in the operation. index_dim: Dimension listing the index components for multidimensional argmin. Returns: Index tensor `idx`, such that `x[idx] = min(x)`. """ dims = x.shape.only(dim) keep = x.shape.without(dims) assert dims, f"argmin requires dim to be present on data but {dim} does not exist on {x.shape}" if is_sparse(x): if dims in sparse_dims(x): min_val = min_(x, dim) is_min = x == min_val is_min_idx = nonzero(is_min, list_dim=instance('true_values')) scatter_val = is_min_idx[dims.only(sparse_dims(x)).name_list] remaining_dims = sparse_dims(x).without(dims) result_shape = min_val.shape & channel(scatter_val) if remaining_dims: scatter_idx = is_min_idx[remaining_dims.name_list] result = scatter(result_shape, scatter_idx, scatter_val, mode='update', default=-1) else: # all sparse dims are reduced result = scatter_val.true_values[0] return rename_dims(result, channel(scatter_val), index_dim.with_sizes(dims.name_list)) elif dims.isdisjoint(sparse_dims(x)): # only argmin across values dim return x._with_values(argmin(x._values, dims)) else: raise NotImplementedError broadcast = broadcast_dims(x) def uniform_argmin(x: Tensor): dims = x.shape.only(dim) v_native = x._reshaped_native([keep - broadcast, dims]) idx_native = x.backend.argmin(v_native, 1, keepdims=True) multi_idx_native = choose_backend(idx_native).unravel_index(idx_native[:, 0], dims.sizes) return reshaped_tensor(multi_idx_native, [keep - broadcast, index_dim.with_size(dims.name_list)]) return broadcast_op(uniform_argmin, [x], broadcast)Finds the minimum value along one or multiple dimensions and returns the corresponding index.
Args
xTensordim- Dimensions along which the minimum should be determined. These are reduced in the operation.
index_dim- Dimension listing the index components for multidimensional argmin.
Returns
Index tensor
idx, such thatx[idx] = min(x). def as_extrapolation(obj) ‑> Extrapolation-
Expand source code
def as_extrapolation(obj) -> Extrapolation: """ Creates an `Extrapolation` from a descriptor object. Args: obj: Extrapolation specification, one of the following: * `Extrapolation` * Primitive name as `str`: periodic, zero, one, zero-gradient, symmetric, symmetric-gradient, antisymmetric, reflect, antireflect * `dict` containing exactly the keys `'normal'` and `'tangential'` * `dict` mapping spatial dimension names to extrapolations Returns: `Extrapolation` """ if isinstance(obj, Extrapolation): return obj if obj is None: return NONE if isinstance(obj, str): assert obj in _PRIMITIVES, f"Unrecognized extrapolation type: '{obj}'" return _PRIMITIVES[obj] if isinstance(obj, dict): if 'normal' in obj or 'tangential' in obj: assert 'normal' in obj and 'tangential' in obj, f"Normal/tangential dict requires both entries 'normal' and 'tangential' but got {obj}" assert len(obj) == 2, f"Normal/tangential dict must only contain entries 'normal' and 'tangential' but got {obj}" normal = as_extrapolation(obj['normal']) tangential = as_extrapolation(obj['tangential']) return combine_by_direction(normal=normal, tangential=tangential) else: return combine_sides(obj) return ConstantExtrapolation(obj)Creates an
Extrapolationfrom a descriptor object.Args
obj-
Extrapolation specification, one of the following:
Extrapolation- Primitive name as
str: periodic, zero, one, zero-gradient, symmetric, symmetric-gradient, antisymmetric, reflect, antireflect dictcontaining exactly the keys'normal'and'tangential'dictmapping spatial dimension names to extrapolations
Returns
Extrapolation def assert_close(*values,
rel_tolerance: float = 1e-05,
abs_tolerance: float = 0,
msg: str = '',
verbose: bool = True,
equal_nan=True)-
Expand source code
def assert_close(*values, rel_tolerance: float = 1e-5, abs_tolerance: float = 0, msg: str = "", verbose: bool = True, equal_nan=True): """ Checks that all given tensors have equal values within the specified tolerance. Raises an AssertionError if the values of this tensor are not within tolerance of any of the other tensors. Does not check that the shapes match as long as they can be broadcast to a common shape. Args: values: Tensors or native tensors or numbers or sequences of numbers. rel_tolerance: Relative tolerance. abs_tolerance: Absolute tolerance. msg: Optional error message. verbose: Whether to print conflicting values. equal_nan: If `False`, `NaN` values will always trigger an assertion error. """ if not values: return ml_tensors = [t for t in values if isinstance(t, Tensor)] if ml_tensors: values = [compatible_tensor(t, ml_tensors[0].shape)._simplify() for t in values] # use Tensor to infer dimensions for other in values[1:]: _assert_close(values[0], other, rel_tolerance, abs_tolerance, msg, verbose) elif all(isinstance(v, PhiTreeNode) for v in values): tree0, tensors0 = disassemble_tree(values[0], cache=False, attr_type=value_attributes) for value in values[1:]: tree, tensors_ = disassemble_tree(value, cache=False, attr_type=value_attributes) assert tree0 == tree, f"Tree structures do not match: {tree0} and {tree}" for t0, t in zip(tensors0, tensors_): _assert_close(t0, t, rel_tolerance, abs_tolerance, msg, verbose) else: np_values = [choose_backend(t).numpy(t) for t in values] for other in np_values[1:]: np.testing.assert_allclose(np_values[0], other, rel_tolerance, abs_tolerance, err_msg=msg, verbose=verbose, equal_nan=equal_nan)Checks that all given tensors have equal values within the specified tolerance. Raises an AssertionError if the values of this tensor are not within tolerance of any of the other tensors.
Does not check that the shapes match as long as they can be broadcast to a common shape.
Args
values- Tensors or native tensors or numbers or sequences of numbers.
rel_tolerance- Relative tolerance.
abs_tolerance- Absolute tolerance.
msg- Optional error message.
verbose- Whether to print conflicting values.
equal_nan- If
False,NaNvalues will always trigger an assertion error.
def at_max(value,
key: phiml.math._tensors.Tensor,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>)-
Expand source code
def at_max(value, key: Tensor, dim: DimFilter = non_batch): """ Looks up the values of `value` at the positions where the maximum values in `key` are located along `dim`. See Also: `at_min`, `phiml.math.max`. Args: value: Tensors or trees from which to lookup and return values. These tensors are indexed at the maximum index in `key´. You can pass `range` (the type) to retrieve the picked indices. key: `Tensor` containing at least one dimension of `dim`. The maximum index of `key` is determined. dim: Dimensions along which to compute the maximum of `key`. Returns: The values of `other_tensors` at the positions where the maximum values in `value` are located along `dim`. """ if not shape(key).only(dim): return value idx = argmax(key, dim) return slice_(value, idx)Looks up the values of
valueat the positions where the maximum values inkeyare located alongdim.Args
value- Tensors or trees from which to lookup and return values. These tensors are indexed at the maximum index in `key´.
You can pass
arange()(the type) to retrieve the picked indices. keyTensorcontaining at least one dimension ofdim. The maximum index ofkeyis determined.dim- Dimensions along which to compute the maximum of
key.
Returns
The values of
other_tensorsat the positions where the maximum values invalueare located alongdim. def at_max_neighbor(values,
key_grid: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = None,
offsets=(0, 1),
diagonal=True) ‑> phiml.math._tensors.Tensor-
Expand source code
def at_max_neighbor(values, key_grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None, offsets=(0, 1), diagonal=True) -> Tensor: """ Computes the min of neighboring values in `key_grid` along each dimension in `dims` and retrieves the corresponding values from `values`. Args: values: Values to look up and return. `Tensor` or tree structure. key_grid: Values to compare. dims: Dimensions along which neighbors should be averaged. padding: Padding at the upper edges of `grid` along `dims'. If not `None`, the result tensor will have the same shape as `grid`. offsets: Relative neighbor indices as `int`. `0` refers to self, negative values to earlier (left) neighbors and positive values to later (right) neighbors. diagonal: If `True`, performs sequential reductions along each axis, determining the minimum value along each axis independently. If the values of `key_grid` depend on `values` or their position in the grid, this can lead to undesired behavior. Returns: Tree or `Tensor` like values. """ return at_neighbor_where(math.at_max, values, key_grid, dims, padding=padding, offsets=offsets, diagonal=diagonal)Computes the min of neighboring values in
key_gridalong each dimension indimsand retrieves the corresponding values fromvalues.Args
values- Values to look up and return.
Tensoror tree structure. key_grid- Values to compare.
dims- Dimensions along which neighbors should be averaged.
padding- Padding at the upper edges of
gridalongdims'. If notNone, the result tensor() will have the same shape() asgrid`. offsets- Relative neighbor indices as
int.0refers to self, negative values to earlier (left) neighbors and positive values to later (right) neighbors. diagonal- If
True, performs sequential reductions along each axis, determining the minimum value along each axis independently. If the values ofkey_griddepend onvaluesor their position in the grid, this can lead to undesired behavior.
Returns
Tree or
Tensorlike values. def at_min(value,
key: phiml.math._tensors.Tensor,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>)-
Expand source code
def at_min(value, key: Tensor, dim: DimFilter = non_batch): """ Looks up the values of `value` at the positions where the minimum values in `key` are located along `dim`. See Also: `at_max`, `phiml.math.min`. Args: value: Tensors or trees from which to lookup and return values. These tensors are indexed at the minimum index in `key´. You can pass `range` (the type) to retrieve the picked indices. key: `Tensor` containing at least one dimension of `dim`. The minimum index of `key` is determined. dim: Dimensions along which to compute the minimum of `key`. Returns: The values of `other_tensors` at the positions where the minimum values in `value` are located along `dim`. """ if not shape(key).only(dim): return value idx = argmin(key, dim) return slice_(value, idx)Looks up the values of
valueat the positions where the minimum values inkeyare located alongdim.Args
value- Tensors or trees from which to lookup and return values. These tensors are indexed at the minimum index in `key´.
You can pass
arange()(the type) to retrieve the picked indices. keyTensorcontaining at least one dimension ofdim. The minimum index ofkeyis determined.dim- Dimensions along which to compute the minimum of
key.
Returns
The values of
other_tensorsat the positions where the minimum values invalueare located alongdim. def at_min_neighbor(values,
key_grid: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = None,
offsets=(0, 1),
diagonal=True) ‑> phiml.math._tensors.Tensor-
Expand source code
def at_min_neighbor(values, key_grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None, offsets=(0, 1), diagonal=True) -> Tensor: """ Computes the max of neighboring values in `key_grid` along each dimension in `dims` and retrieves the corresponding values from `values`. Args: values: Values to look up and return. key_grid: Values to compare. dims: Dimensions along which neighbors should be averaged. padding: Padding at the upper edges of `grid` along `dims'. If not `None`, the result tensor will have the same shape as `grid`. offsets: Relative neighbor indices as `int`. `0` refers to self, negative values to earlier (left) neighbors and positive values to later (right) neighbors. diagonal: If `True`, performs sequential reductions along each axis, determining the minimum value along each axis independently. If the values of `key_grid` depend on `values` or their position in the grid, this can lead to undesired behavior. Returns: Tree or `Tensor` like values. """ return at_neighbor_where(math.at_min, values, key_grid, dims, padding=padding, offsets=offsets, diagonal=diagonal)Computes the max of neighboring values in
key_gridalong each dimension indimsand retrieves the corresponding values fromvalues.Args
values- Values to look up and return.
key_grid- Values to compare.
dims- Dimensions along which neighbors should be averaged.
padding- Padding at the upper edges of
gridalongdims'. If notNone, the result tensor() will have the same shape() asgrid`. offsets- Relative neighbor indices as
int.0refers to self, negative values to earlier (left) neighbors and positive values to later (right) neighbors. diagonal- If
True, performs sequential reductions along each axis, determining the minimum value along each axis independently. If the values ofkey_griddepend onvaluesor their position in the grid, this can lead to undesired behavior.
Returns
Tree or
Tensorlike values. def b2i(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType-
Expand source code
def b2i(value: PhiTreeNodeType) -> PhiTreeNodeType: """ Change the type of all *batch* dims of `value` to *instance* dimensions. See `rename_dims`. """ return rename_dims(value, batch, instance)Change the type of all batch dims of
valueto instance dimensions. Seerename_dims(). def batch(*args,
**dims: int | str | tuple | list | phiml.math._shape.Shape | ForwardRef('Tensor')) ‑> phiml.math._shape.Shape-
Expand source code
def batch(*args, **dims: Union[int, str, tuple, list, Shape, 'Tensor']) -> Shape: """ Returns the batch dimensions of an existing `Shape` or creates a new `Shape` with only batch dimensions. Usage for filtering batch dimensions: >>> batch_dims = batch(shape) >>> batch_dims = batch(tensor) Usage for creating a `Shape` with only batch dimensions: >>> batch_shape = batch('undef', batch=2) (batch=2, undef=None) Here, the dimension `undef` is created with an undefined size of `None`. Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. See Also: `channel`, `spatial`, `instance` Args: *args: Either * `Shape` or `Tensor` to filter or * Names of dimensions with undefined sizes as `str`. **dims: Dimension sizes and names. Must be empty when used as a filter operation. Returns: `Shape` containing only dimensions of type batch. """ if all(isinstance(arg, str) for arg in args) or dims: return _construct_shape(BATCH_DIM, *args, **dims) elif len(args) == 1 and isinstance(args[0], SHAPE_TYPES): return args[0].batch assert len(args) == 1, f"batch() must be called either as a selector batch(Shape) or batch(Tensor) or as a constructor batch(*names, **dims). Got *args={args}, **dims={dims}" return shape(args[0]).batchReturns the batch dimensions of an existing
Shapeor creates a newShapewith only batch dimensions.Usage for filtering batch dimensions:
>>> batch_dims = batch(shape) >>> batch_dims = batch(tensor)Usage for creating a
Shapewith only batch dimensions:>>> batch_shape = batch('undef', batch=2) (batch=2, undef=None)Here, the dimension
undefis created with an undefined size ofNone. Undefined sizes are automatically filled in bytensor(),wrap(),stack()andconcat().To create a shape with multiple types, use
merge_shapes(),concat_shapes()or the syntaxshape1 & shape2.See Also:
channel(),spatial(),instance()Args
*args-
Either
**dims- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shapecontaining only dimensions of type batch. def boolean_mask(x,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
mask: phiml.math._tensors.Tensor,
preserve_names=False)-
Expand source code
def boolean_mask(x, dim: DimFilter, mask: Tensor, preserve_names=False): """ Discards values `x.dim[i]` where `mask.dim[i]=False`. All dimensions of `mask` that are not `dim` are treated as batch dimensions. Alternative syntax: `x.dim[mask]`. Implementations: * NumPy: Slicing * PyTorch: [`masked_select`](https://pytorch.org/docs/stable/generated/torch.masked_select.html) * TensorFlow: [`tf.boolean_mask`](https://www.tensorflow.org/api_docs/python/tf/boolean_mask) * Jax: Slicing Args: x: `Tensor` or `phiml.math.magic.Sliceable`. dim: Dimension of `x` to along which to discard slices. mask: Boolean `Tensor` marking which values to keep. Must have the dimension `dim` matching `x´. preserve_names: This only supports uniform 1D slicing. Batched slicing will remove labels if incompatible. Returns: Selected values of `x` as `Tensor` with dimensions from `x` and `mask`. """ if x is None: return None dim, original_dim = shape(mask).only(dim), dim assert dim, f"mask dimension '{original_dim}' must be present on the mask {mask.shape}" assert dim.rank == 1, f"boolean mask only supports 1D selection" if not isinstance(x, Tensor) and isinstance(x, PhiTreeNode): return tree_map(boolean_mask, x, all_attributes, dim=dim, mask=mask, preserve_names=preserve_names, include_non_attrs=False, treat_layout_as_leaf=True) if isinstance(x, Layout): if x._stack_dim.without(dim): from ._functional import map_ return map_(boolean_mask, x, dims=x._stack_dim - dim, dim=dim - x._stack_dim, mask=mask, preserve_names=preserve_names) if dim in x._stack_dim: indices = np.nonzero(mask.numpy())[0] gathered = [x._obj[i] for i in indices] size = len(gathered) if not preserve_names or x._stack_dim.labels[0] is None else [x._stack_dim.labels[0][i] for i in indices] return Layout(gathered, dim.with_size(size)) raise NotImplementedError if is_sparse(x): indices = nonzero(mask, list_dim=instance('_boolean_mask')) result = x[indices] return result.__replace_dims__(('_boolean_mask',), mask.shape.non_channel) if not isinstance(x, Tensor) or is_sparse(x): keep_slices = nonzero_slices(mask) x_slices = [x[s] for s in keep_slices] return concat(x_slices, dim.name) if isinstance(x, TensorStack) and dim.name in broadcast_dims(x): indices = np.nonzero(mask.numpy())[0] items = x._unstack(dim.name) return TensorStack([items[i] for i in indices], dim) def uniform_boolean_mask(x: Tensor, mask_1d: Tensor): if dim in x.shape: x_native = x.native(x.shape.names) # order does not matter mask_native = mask_1d.native() # only has 1 dim backend = choose_backend(x_native, mask_native) result_native = backend.boolean_mask(x_native, mask_native, axis=x.shape.index(dim)) new_shape = x.shape.with_sizes(backend.staticshape(result_native)) # ToDo add selected labels!!! if preserve_names and dim.labels[0]: sel_names = [n for n, sel in zip(dim.labels[0], mask_native) if sel] new_shape = new_shape.with_dim_size(dim, sel_names) return Dense(result_native, new_shape.names, new_shape, backend) else: total = int(sum_(to_int64(mask_1d), mask_1d.shape)) new_shape = mask_1d.shape.with_sizes([total]) return expand(x, new_shape) return broadcast_op(uniform_boolean_mask, [x, mask], iter_dims=set(mask.shape.without(dim).names) | broadcast_dims(x, mask))Discards values
x.dim[i]wheremask.dim[i]=False. All dimensions ofmaskthat are notdimare treated as batch dimensions.Alternative syntax:
x.dim[mask].Implementations:
- NumPy: Slicing
- PyTorch:
masked_select - TensorFlow:
tf.boolean_mask - Jax: Slicing
Args
xTensororSliceable.dim- Dimension of
xto along which to discard slices. mask- Boolean
Tensormarking which values to keep. Must have the dimensiondimmatching `x´. preserve_names- This only supports uniform 1D slicing. Batched slicing will remove labels if incompatible.
Returns
Selected values of
xasTensorwith dimensions fromxandmask. def brange(start: int = 0, **stop: int) ‑> phiml.math._tensors.Tensor[int]-
Expand source code
def brange(start: int = 0, **stop: int) -> Tensor[int]: """ Construct a range `Tensor` along one batch dim. """ assert len(stop) == 1, f"brange() requires exactly one stop dimension but got {stop}" return arange(batch(next(iter(stop))), start, next(iter(stop.values())))Construct a range
Tensoralong one batch dim. def broadcast(function=None,
dims=<function shape>,
range=builtins.range,
unwrap_scalars=True,
simplify=False,
name: str | bool = True)-
Expand source code
def broadcast(function=None, dims=shape, range=range, unwrap_scalars=True, simplify=False, name: Union[str, bool] = True): """ Function decorator for non-vectorized functions. When passing `Tensor` arguments to a broadcast function, the function is called once for each slice of the tensor. How tensors are sliced is determined by `dims`. Decorating a function with `broadcast` is equal to passing the function to `phi.math.map()`. See Also: `phiml.math.map` Args: function: Function to broadcast. dims: Dimensions which should be sliced. `function` is called once for each element in `dims`, i.e. `dims.volume` times. If `dims` is not specified, all dimensions from the `phiml.math.magic.Sliceable` values in `args` and `kwargs` will be mapped. range: Optional range function. Can be used to generate `tqdm` output by passing `trange`. unwrap_scalars: If `True`, passes the contents of scalar `Tensor`s instead of the tensor objects. simplify: If `True`, reduces constant dims of output tensors that don't vary across broadcast slices. name: Name to pass to `phiml.math.map()`. This may be displayed using `tqdm`. If `True`, uses the function name. Returns: Broadcast function """ if function is None: kwargs = {k: v for k, v in locals().items() if v is not None} return partial(broadcast, **kwargs) if name is True: name = f_name(function) elif name is False: name = None @wraps(function) def broadcast_(*args, **kwargs): return map_(function, *args, dims=dims, range=range, unwrap_scalars=unwrap_scalars, simplify=simplify, map_name=name, **kwargs) return broadcast_Function decorator for non-vectorized functions. When passing
Tensorarguments to a broadcast function, the function is called once for each slice of the tensor. How tensors are sliced is determined bydims. Decorating a function withbroadcast()is equal to passing the function tophi.math.map().See Also:
map_()Args
function- Function to broadcast.
dims- Dimensions which should be sliced.
functionis called once for each element indims, i.e.dims.volumetimes. Ifdimsis not specified, all dimensions from theSliceablevalues inargsandkwargswill be mapped. range- Optional range function. Can be used to generate
tqdmoutput by passingtrange. unwrap_scalars- If
True, passes the contents of scalarTensors instead of the tensor objects. simplify- If
True, reduces constant dims of output tensors that don't vary across broadcast slices. name- Name to pass to
map_(). This may be displayed usingtqdm. IfTrue, uses the function name.
Returns
Broadcast function
def c2b(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType-
Expand source code
def c2b(value: PhiTreeNodeType) -> PhiTreeNodeType: """ Change the type of all *channel* dims of `value` to *batch* dimensions. See `rename_dims`. """ return rename_dims(value, channel, batch)Change the type of all channel dims of
valueto batch dimensions. Seerename_dims(). def c2d(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType-
Expand source code
def c2d(value: PhiTreeNodeType) -> PhiTreeNodeType: """ Change the type of all *channel* dims of `value` to *dual* dimensions. See `rename_dims`. """ return rename_dims(value, channel, dual)Change the type of all channel dims of
valueto dual dimensions. Seerename_dims(). def cast(x: ~MagicType, dtype: phiml.backend._dtype.DType | type) ‑> ~OtherMagicType-
Expand source code
def cast(x: MagicType, dtype: Union[DType, type]) -> OtherMagicType: """ Casts `x` to a different data type. Implementations: * NumPy: [`x.astype()`](numpy.ndarray.astype) * PyTorch: [`x.to()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.to) * TensorFlow: [`tf.cast`](https://www.tensorflow.org/api_docs/python/tf/cast) * Jax: [`jax.numpy.array`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.array.html) See Also: `to_float`, `to_int32`, `to_int64`, `to_complex`. Args: x: `Tensor` dtype: New data type as `phiml.math.DType`, e.g. `DType(int, 16)`. Returns: `Tensor` with data type `dtype` """ if not isinstance(dtype, DType): dtype = DType.as_dtype(dtype) if hasattr(x, '__cast__'): return x.__cast__(dtype) elif isinstance(x, (Number, bool)): return dtype.kind(x) elif isinstance(x, PhiTreeNode): attrs = {key: getattr(x, key) for key in value_attributes(x)} new_attrs = {k: cast(v, dtype) for k, v in attrs.items()} return copy_with(x, **new_attrs) try: backend = choose_backend(x) return backend.cast(x, dtype) except NoBackendFound: if dtype.kind == bool: return bool(x) raise ValueError(f"Cannot cast object of type '{type(x).__name__}'")Casts
xto a different data type.Implementations:
- NumPy:
x.astype() - PyTorch:
x.to() - TensorFlow:
tf.cast - Jax:
jax.numpy.array
See Also:
to_float(),to_int32(),to_int64(),to_complex().Args
Returns
- NumPy:
def ceil(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def ceil(x: TensorOrTree) -> TensorOrTree: """ Computes *⌈x⌉* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.ceil, ceil)Computes ⌈x⌉ of the
TensororPhiTreeNodex. def channel(*args,
**dims: int | str | tuple | list | phiml.math._shape.Shape | ForwardRef('Tensor')) ‑> phiml.math._shape.Shape-
Expand source code
def channel(*args, **dims: Union[int, str, tuple, list, Shape, 'Tensor']) -> Shape: """ Returns the channel dimensions of an existing `Shape` or creates a new `Shape` with only channel dimensions. Usage for filtering channel dimensions: >>> channel_dims = channel(shape) >>> channel_dims = channel(tensor) Usage for creating a `Shape` with only channel dimensions: >>> channel_shape = channel('undef', vector=2) (vector=2, undef=None) Here, the dimension `undef` is created with an undefined size of `None`. Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. See Also: `spatial`, `batch`, `instance` Args: *args: Either * `Shape` or `Tensor` to filter or * Names of dimensions with undefined sizes as `str`. **dims: Dimension sizes and names. Must be empty when used as a filter operation. Returns: `Shape` containing only dimensions of type channel. """ if all(isinstance(arg, str) for arg in args) or dims: return _construct_shape(CHANNEL_DIM, *args, **dims) elif len(args) == 1 and isinstance(args[0], SHAPE_TYPES): return args[0].channel assert len(args) == 1, f"channel() must be called either as a selector channel(Shape) or channel(Tensor) or as a constructor channel(*names, **dims). Got *args={args}, **dims={dims}" return shape(args[0]).channelReturns the channel dimensions of an existing
Shapeor creates a newShapewith only channel dimensions.Usage for filtering channel dimensions:
>>> channel_dims = channel(shape) >>> channel_dims = channel(tensor)Usage for creating a
Shapewith only channel dimensions:>>> channel_shape = channel('undef', vector=2) (vector=2, undef=None)Here, the dimension
undefis created with an undefined size ofNone. Undefined sizes are automatically filled in bytensor(),wrap(),stack()andconcat().To create a shape with multiple types, use
merge_shapes(),concat_shapes()or the syntaxshape1 & shape2.See Also:
spatial(),batch(),instance()Args
*args-
Either
**dims- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shapecontaining only dimensions of type channel. def choose_backend(*values: phiml.math._tensors.Tensor) ‑> phiml.backend._backend.Backend-
Expand source code
def backend_for(*values: Tensor) -> Backend: """ Chooses an appropriate backend based on the backends of `values`. Args: *values: Input tensors to some operation. Returns: `Backend` that is compatible with all `values´. Raises: `NoBackendFound`: If no backend exists that can handle all `values`. """ backends = {v.backend.name: v.backend for v in values} if len(backends) == 1: return next(iter(backends.values())) if 'object' in backends: return backends['object'] ml_backends = backends.keys() & {'torch', 'tensorflow', 'jax'} if len(ml_backends) > 1: raise NoBackendFound(f"Mixing ML backends {ml_backends} given tensors {values}") if ml_backends: return backends[next(iter(ml_backends))] raise AssertionError(f"Invalid backend combination: {backends}")Chooses an appropriate backend based on the backends of
values.Args
*values- Input tensors to some operation.
Returns
Backendthat is compatible with all `values´.Raises
NoBackendFound: If no backend exists that can handle allvalues. def clip(x: phiml.math._tensors.Tensor,
lower_limit: float | phiml.math._tensors.Tensor = 0,
upper_limit: float | phiml.math._tensors.Tensor | phiml.math._shape.Shape = 1)-
Expand source code
def clip(x: Tensor, lower_limit: Union[float, Tensor] = 0, upper_limit: Union[float, Tensor, Shape] = 1): """ Limits the values of the `Tensor` `x` to lie between `lower_limit` and `upper_limit` (inclusive). """ if isinstance(upper_limit, SHAPE_TYPES): assert x.shape.channel_rank == 1, f"When passing a Shape for upper_limit, x must have exactly one channel dim but got {x.shape}" upper_limit = wrap(upper_limit.sizes, channel(x)) if isinstance(lower_limit, Number) and isinstance(upper_limit, Number): def clip_(x): return x._op1(lambda native: choose_backend(native).clip(native, lower_limit, upper_limit), 'clip') return broadcast_op(clip_, [x]) else: return maximum(lower_limit, minimum(x, upper_limit))Limits the values of the
Tensorxto lie betweenlower_limitandupper_limit(inclusive). def clip_length(vec: phiml.math._tensors.Tensor,
min_len=0,
max_len=1,
vec_dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function channel>,
eps: float | phiml.math._tensors.Tensor = None)-
Expand source code
def clip_length(vec: Tensor, min_len=0, max_len=1, vec_dim: DimFilter = channel, eps: Union[float, Tensor] = None): """ Clips the length of a vector to the interval `[min_len, max_len]` while keeping the direction. Zero-vectors remain zero-vectors. Args: vec: `Tensor` min_len: Lower clipping threshold. max_len: Upper clipping threshold. vec_dim: Dimensions to compute the length over. By default, all channel dimensions are used to compute the vector length. eps: Minimum vector length. Use to avoid `inf` gradients for zero-length vectors. Returns: `Tensor` with same shape as `vec`. """ warnings.warn("phiml.math.clip_length() is deprecated. Use PhiFlow's geometry functions instead.", DeprecationWarning) le = length(vec, vec_dim, eps) new_length = clip(le, min_len, max_len) return vec * safe_div(new_length, le)Clips the length of a vector to the interval
[min_len, max_len]while keeping the direction. Zero-vectors remain zero-vectors.Args
vecTensormin_len- Lower clipping threshold.
max_len- Upper clipping threshold.
vec_dim- Dimensions to compute the length over. By default, all channel dimensions are used to compute the vector length.
eps- Minimum vector length. Use to avoid
infgradients for zero-length vectors.
Returns
def close(*tensors,
rel_tolerance: float | phiml.math._tensors.Tensor = 1e-05,
abs_tolerance: float | phiml.math._tensors.Tensor = 0,
equal_nan=False,
reduce=<function shape>) ‑> bool-
Expand source code
def close(*tensors, rel_tolerance: Union[float, Tensor] = 1e-5, abs_tolerance: Union[float, Tensor] = 0, equal_nan=False, reduce=shape) -> bool: """ Checks whether all tensors have equal values within the specified tolerance. Does not check that the shapes exactly match but if shapes are incompatible, returns `False`. Unlike with `always_close()`, all shapes must be compatible and tensors with different shapes are reshaped before comparing. See Also: `always_close()`. Args: *tensors: At least two `Tensor` or tensor-like objects or `None`. The shapes of all tensors must be compatible but not all tensors must have all dimensions. If any argument is `None`, returns `True` only if all are `None`. rel_tolerance: Relative tolerance abs_tolerance: Absolute tolerance equal_nan: If `True`, tensors are considered close if they are NaN in the same places. Returns: `bool`, whether all given tensors are equal to the first tensor within the specified tolerance. """ if tensors[0] is None: return all(o is None for o in tensors) if any(o is None for o in tensors): return False if all(t is tensors[0] for t in tensors): return True tensors = [wrap(t) for t in tensors] if any([not tensors[0].shape.is_compatible(t.shape) for t in tensors[1:]]): return False c = True abs_tolerance = stop_gradient(abs_tolerance) if isinstance(abs_tolerance, Tensor) else abs_tolerance rel_tolerance = stop_gradient(rel_tolerance) if isinstance(rel_tolerance, Tensor) else rel_tolerance for other in tensors[1:]: c &= _close(tensors[0], other, rel_tolerance=rel_tolerance, abs_tolerance=abs_tolerance, equal_nan=equal_nan, reduce=reduce) return cChecks whether all tensors have equal values within the specified tolerance.
Does not check that the shapes exactly match but if shapes are incompatible, returns
False. Unlike withalways_close(), all shapes must be compatible and tensors with different shapes are reshaped before comparing.See Also:
always_close().Args
*tensors- At least two
Tensoror tensor-like objects orNone. The shapes of all tensors must be compatible but not all tensors must have all dimensions. If any argument isNone, returnsTrueonly if all areNone. rel_tolerance- Relative tolerance
abs_tolerance- Absolute tolerance
equal_nan- If
True, tensors are considered close if they are NaN in the same places.
Returns
bool, whether all given tensors are equal to the first tensor within the specified tolerance. def closest_grid_values(grid: phiml.math._tensors.Tensor,
coordinates: phiml.math._tensors.Tensor,
extrap: e_.Extrapolation,
stack_dim_prefix='closest_',
**kwargs)-
Expand source code
def closest_grid_values(grid: Tensor, coordinates: Tensor, extrap: 'e_.Extrapolation', stack_dim_prefix='closest_', **kwargs): """ Finds the neighboring grid points in all directions and returns their values. The result will have 2^d values for each vector in coordinates in d dimensions. If `coordinates` does not have a channel dimension with labels, the spatial dims of `grid` will be used. Args: grid: grid data. The grid is spanned by the spatial dimensions of the tensor coordinates: tensor with 1 channel dimension holding vectors pointing to locations in grid index space extrap: grid extrapolation stack_dim_prefix: For each spatial dimension `dim`, stacks lower and upper closest values along dimension `stack_dim_prefix+dim`. kwargs: Additional information for the extrapolation. Returns: `Tensor` of shape (batch, coord_spatial, grid_spatial=(2, 2,...), grid_channel) """ return broadcast_op(functools.partial(_closest_grid_values, extrap=extrap, stack_dim_prefix=stack_dim_prefix, pad_kwargs=kwargs), [grid, coordinates])Finds the neighboring grid points in all directions and returns their values. The result will have 2^d values for each vector in coordinates in d dimensions.
If
coordinatesdoes not have a channel dimension with labels, the spatial dims ofgridwill be used.Args
grid- grid data. The grid is spanned by the spatial dimensions of the tensor
coordinates- tensor with 1 channel dimension holding vectors pointing to locations in grid index space
extrap- grid extrapolation
stack_dim_prefix- For each spatial dimension
dim, stacks lower and upper closest values along dimensionstack_dim_prefix+dim. kwargs- Additional information for the extrapolation.
Returns
Tensorof shape (batch, coord_spatial, grid_spatial=(2, 2,…), grid_channel) def concat(values: Sequence[~PhiTreeNodeType],
dim: str | phiml.math._shape.Shape,
expand_values=False,
**kwargs) ‑> ~PhiTreeNodeType-
Expand source code
def concat(values: Sequence[PhiTreeNodeType], dim: Union[str, Shape], expand_values=False, **kwargs) -> PhiTreeNodeType: """ Concatenates a sequence of `phiml.math.magic.Shapable` objects, e.g. `Tensor`, along one dimension. All values must have the same spatial, instance and channel dims and their sizes must be equal, except for `dim`. Batch dims will be added as needed. Args: values: Tuple or list of `phiml.math.magic.Shapable`, such as `phiml.math.Tensor` dim: Concatenation dimension, must be present in all `values`. The size along `dim` is determined from `values` and can be set to undefined (`None`). Alternatively, a `str` of the form `'t->name:t'` can be specified, where `t` is on of `b d i s c` denoting the dimension type. This first packs all dims of the input into a new dim with given name and type, then concatenates the values along this dim. expand_values: If `True`, will first add missing dims to all values, not just batch dimensions. This allows tensors with different dims to be concatenated. The resulting tensor will have all dims that are present in `values`. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dims to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments. Returns: Concatenated `Tensor` Examples: >>> concat([math.zeros(batch(b=10)), math.ones(batch(b=10))], 'b') (bᵇ=20) 0.500 ± 0.500 (0e+00...1e+00) >>> concat([vec(x=1, y=0), vec(z=2.)], 'vector') (x=1.000, y=0.000, z=2.000) float64 """ assert len(values) > 0, f"concat() got empty sequence {values}" if isinstance(dim, SHAPE_TYPES): dim = dim.name assert isinstance(dim, str), f"dim must be a str or Shape but got '{dim}' of type {type(dim)}" if '->' in dim: dim_type, dim = [s.strip() for s in dim.split('->', 1)] dim_type = DIM_FUNCTIONS[INV_CHAR[dim_type]] dim = auto(dim, dim_type) values = [pack_dims(v, dim_type, dim) for v in values] dim = dim.name else: dim = auto(dim, channel).name # --- Filter 0-length values --- def is_non_zero(s: Shape): if dim not in s: return True size = s.get_size(dim) if isinstance(size, int): return size > 0 return True # --- Add missing dimensions --- shapes = [shape(v) for v in values] if expand_values: all_other_dims = merge_shapes(*[s - dim for s in shapes], allow_varying_sizes=True) dim_candidate = merge_shapes(*[s.only(dim) for s in shapes], allow_varying_sizes=True, allow_varying_labels=True) all_dims = all_other_dims + dim_candidate.with_dim_size(dim_candidate.name, 1, keep_labels=False) values = [expand(v, all_dims - s) for v, s in zip(values, shapes)] else: for v, s in zip(values, shapes): assert dim in s, f"concat dim '{dim}' must be present in the shapes of all values bot got value {type(v).__name__} with shape {s}" for v in values[1:]: assert set(non_batch(v).names) == set(non_batch(values[0]).names), f"Concatenated values must have the same non-batch dims but got {non_batch(values[0])} and {non_batch(v)}" all_batch_dims = merge_shapes(*[s.batch - dim for s in shapes]) values = [expand(v, all_batch_dims) for v in values] shapes = [shape(v) for v in values] # --- Filter out 0-size tensors --- filtered_values_and_shapes = [(v, s) for v, s in zip(values, shapes) if is_non_zero(s)] if not filtered_values_and_shapes: return values[0] values, shapes = zip(*filtered_values_and_shapes) if len(values) == 1: return values[0] # --- First try __concat__ --- for v in values: if isinstance(v, Shapable): if hasattr(v, '__concat__'): result = v.__concat__(values, dim, **kwargs) if result is not NotImplemented: assert isinstance(result, Shapable), f"__concat__ must return a Shapable object but got {type(result).__name__} from {type(v).__name__} {v}" return result # --- Next: try concat attributes for tree nodes --- if all(isinstance(v, PhiTreeNode) for v in values): attributes = all_attributes(values[0]) if attributes and all(all_attributes(v) == attributes for v in values): new_attrs = {} for a in attributes: common_shape = merge_shapes(*[shape(getattr(v, a)).without(dim) for v in values]) a_values = [expand(getattr(v, a), common_shape & shape(v).only(dim)) for v in values] # expand by dim if missing, and dims of others new_attrs[a] = concat(a_values, dim, expand_values=expand_values, **kwargs) return copy_with(values[0], **new_attrs) else: warnings.warn(f"Failed to concat values using value attributes because attributes differ among values {values}") # --- Fallback: slice and stack --- try: unstacked = sum([unstack(v, dim) for v in values], ()) except MagicNotImplemented: raise MagicNotImplemented(f"concat: No value implemented __concat__ and not all values were Sliceable along {dim}. values = {[type(v) for v in values]}") if len(unstacked) > 8: warnings.warn(f"concat() default implementation is slow on large dims ({dim}={len(unstacked)}). Please implement __concat__()", RuntimeWarning, stacklevel=2) dim = shapes[0][dim].with_size(None) try: return stack(unstacked, dim, **kwargs) except MagicNotImplemented: raise MagicNotImplemented(f"concat: No value implemented __concat__ and slices could not be stacked. values = {[type(v) for v in values]}")Concatenates a sequence of
Shapableobjects, e.g.Tensor, along one dimension. All values must have the same spatial, instance and channel dims and their sizes must be equal, except fordim. Batch dims will be added as needed.Args
values- Tuple or list of
Shapable, such asTensor dim- Concatenation dimension, must be present in all
values. The size alongdimis determined fromvaluesand can be set to undefined (None). Alternatively, astrof the form't->name:t'can be specified, wheretis on ofb d i s cdenoting the dimension type. This first packs all dims of the input into a new dim with given name and type, then concatenates the values along this dim. expand_values- If
True, will first add missing dims to all values, not just batch dimensions. This allows tensors with different dims to be concatenated. The resulting tensor will have all dims that are present invalues. **kwargs- Additional keyword arguments required by specific implementations.
Adding spatial dims to fields requires the
bounds: Boxargument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments.
Returns
Concatenated
TensorExamples
>>> concat([math.zeros(batch(b=10)), math.ones(batch(b=10))], 'b') (bᵇ=20) 0.500 ± 0.500 (0e+00...1e+00)>>> concat([vec(x=1, y=0), vec(z=2.)], 'vector') (x=1.000, y=0.000, z=2.000) float64 def concat_shapes(*shapes: phiml.math._shape.Shape | Any) ‑> phiml.math._shape.Shape-
Expand source code
def concat_shapes(*shapes: Union[Shape, Any]) -> Shape: """ Creates a `Shape` listing the dimensions of all `shapes` in the given order. See Also: `merge_shapes()`. Args: *shapes: Shapes to concatenate. No two shapes must contain a dimension with the same name. Returns: Combined `Shape`. """ shapes = [obj if isinstance(obj, SHAPE_TYPES) else shape(obj) for obj in shapes] return concat_shapes_(*shapes)Creates a
Shapelisting the dimensions of allshapesin the given order.See Also:
merge_shapes().Args
*shapes- Shapes to concatenate. No two shapes must contain a dimension with the same name.
Returns
Combined
Shape. def conjugate(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def conjugate(x: TensorOrTree) -> TensorOrTree: """ See Also: `imag()`, `real()`. Args: x: Real or complex `Tensor` or `phiml.math.magic.PhiTreeNode` or native tensor. Returns: Complex conjugate of `x` if `x` is complex, else `x`. """ return _backend_op1(x, Backend.conj, conjugate)Args
x- Real or complex
TensororPhiTreeNodeor native tensor.
Returns
Complex conjugate of
xifxis complex, elsex. def const_vec(value: float | phiml.math._tensors.Tensor,
dim: phiml.math._shape.Shape | tuple | list | str)-
Expand source code
def const_vec(value: Union[float, Tensor], dim: Union[Shape, tuple, list, str]): """ Creates a single-dimension tensor with all values equal to `value`. `value` is not converted to the default backend, even when it is a Python primitive. Args: value: Value for filling the vector. dim: Either single-dimension non-spatial Shape or `Shape` consisting of any number of spatial dimensions. In the latter case, a new channel dimension named `'vector'` will be created from the spatial shape. Returns: `Tensor` """ if isinstance(dim, SHAPE_TYPES): if dim.spatial: assert not dim.non_spatial, f"When creating a vector given spatial dimensions, the shape may only contain spatial dimensions but got {dim}" shape = channel(vector=dim.names) else: assert dim.rank == 1, f"Cannot create vector from {dim}" shape = dim else: dims = parse_dim_order(dim) shape = channel(vector=dims) return wrap([value] * shape.size, shape)Creates a single-dimension tensor with all values equal to
value.valueis not converted to the default backend, even when it is a Python primitive.Args
value- Value for filling the vector.
dim- Either single-dimension non-spatial Shape or
Shapeconsisting of any number of spatial dimensions. In the latter case, a new channel dimension named'vector'will be created from the spatial shape.
Returns
def contains(values: phiml.math._tensors.Tensor,
query: phiml.math._tensors.Tensor,
feature_dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function channel>) ‑> phiml.math._tensors.Tensor-
Expand source code
def contains(values: Tensor, query: Tensor, feature_dims: DimFilter = channel) -> Tensor: """ For each query item, checks whether it is contained in `values`. See Also: `count_occurrences()`. Args: values: Data `Tensor` containing all `feature_dims`. All non-batch and dims not specified as `feature_dims` are flattened. query: Items to count the occurrences of. Must contain all `feature_dims`. feature_dims: One item is considered to be the set of all values along `feature_dims`. The number of items in a tensor is given by all dims except `feature_dims`. Returns: Integer `Tensor` matching `query` without `feature_dims`. """ return count_occurrences(values, query, feature_dims=feature_dims) > 0For each query item, checks whether it is contained in
values.See Also:
count_occurrences().Args
values- Data
Tensorcontaining allfeature_dims. All non-batch and dims not specified asfeature_dimsare flattened. query- Items to count the occurrences of. Must contain all
feature_dims. feature_dims- One item is considered to be the set of all values along
feature_dims. The number of items in a tensor is given by all dims exceptfeature_dims.
Returns
Integer
Tensormatchingquerywithoutfeature_dims. def convert(x, backend: phiml.backend._backend.Backend = None, use_dlpack=True)-
Expand source code
def convert(x, backend: Backend = None, use_dlpack=True): """ Convert the native representation of a `Tensor` or `phiml.math.magic.PhiTreeNode` to the native format of `backend`. *Warning*: This operation breaks the automatic differentiation chain. See Also: `phiml.math.backend.convert()`. Args: x: `Tensor` to convert. If `x` is a `phiml.math.magic.PhiTreeNode`, its variable attributes are converted. backend: Target backend. If `None`, uses the current default backend, see `phiml.math.backend.backend()`. Returns: `Tensor` with native representation belonging to `backend`. """ if x is None: return x if isinstance(x, Layout): return tree_map(convert, x, backend=backend, use_dlpack=use_dlpack) if isinstance(x, Tensor): return x._from_spec_and_natives(x._spec_dict(), [b_convert(n, backend=backend, use_dlpack=False) for n in x._natives()]) elif isinstance(x, PhiTreeNode): return tree_map(convert, x, backend=backend, use_dlpack=use_dlpack) else: return b_convert(x, backend, use_dlpack=use_dlpack)Convert the native representation of a
TensororPhiTreeNodeto the native format ofbackend.Warning: This operation breaks the automatic differentiation chain.
See Also:
phiml.math.backend.convert().Args
xTensorto convert. Ifxis aPhiTreeNode, its variable attributes are converted.backend- Target backend. If
None, uses the current default backend, seephiml.math.backend.backend().
Returns
Tensorwith native representation belonging tobackend. def convolve(value: phiml.math._tensors.Tensor,
kernel: phiml.math._tensors.Tensor,
size: str | phiml.math._shape.Shape = 'same',
extrapolation: Union[e_.Extrapolation, float] = 0,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
strides: int | Dict[str, int] = 1,
transpose=False) ‑> phiml.math._tensors.Tensor-
Expand source code
def convolve(value: Tensor, kernel: Tensor, size: Union[str, Shape] = 'same', extrapolation: 'Union[e_.Extrapolation, float]' = 0, dims: DimFilter = spatial, strides: Union[int, Dict[str, int]] = 1, transpose=False) -> Tensor: """ Computes the convolution of `value` and `kernel` along the specified dims. Dual dims of `kernel` are reduced against the corresponding primal dims of `value`. All other primal dims of `value` are treated as batch. Args: value: `Tensor` whose shape includes all spatial dimensions of `kernel`. kernel: `Tensor` used as convolutional filter. size: Either a `Shape` specifying the desired output resolution or one of the following predefined strings: `('valid', 'same', 'full')`. `valid`: Only those values are returned where the full kernel can be applied on valid values without using the `extrapolation`. `same` the output is the same size as the input. `full`: the output contains all values of the convolution, including those where the kernel extends beyond the input. extrapolation: If `None`, convolve only where `kernel` fits into `value`, i.e. 'valid'. Otherwise, pads `value` with the specified extrapolation. The amount of padding depends on `full`. dims: Which dimensions to convolve over. Defaults to all spatial dims. strides: Convolution strides for applying `kernel` to a subset of `value` only. This will result in a smaller output. The stride can be specified per dim, with missing dims defaulting to `1`. transpose: If `True`, the kernel is transposed before convolution, and strides are replaced by up-sampling. Returns: `Tensor` with all non-reduced dims of `value` and additional non-dual dims from `kernel`. """ assert all(dim in value.shape for dim in kernel.shape.spatial.names), f"Value must have all spatial dimensions of kernel but got value {value} kernel {kernel}" dims = kernel.shape.only(dims) assert dims.dual_rank == 0, f"convolve dims must not be of type dual but got {dims}" in_dims = value.shape.only(dual(kernel).as_batch().names) out_dims = non_dual(kernel) - dims - batch(value) batch_dims = (value.shape - dims - in_dims) & (non_dual(kernel) - dims - out_dims) extrapolation = e_.as_extrapolation(extrapolation) # --- Resolve output size --- native_strides = (strides,) * len(dims) if isinstance(strides, int) else [strides.get(dim, 1) for dim in dims.names] if isinstance(size, str): if size == 'valid': if not transpose: out_sizes = [int(np.ceil((abs(value.shape.get_size(d) - d.size) + 1) / st)) for d, st in zip(dims, native_strides)] else: raise NotImplementedError elif size == 'same': if not transpose: out_sizes = [int(np.ceil(value.shape.get_size(d) / st)) for d, st in zip(dims, native_strides)] else: out_sizes = [value.shape.get_size(d) * st for d, st in zip(dims, native_strides)] elif size == 'full': if not transpose: out_sizes = [(value.shape.get_size(d) + d.size - 1) // st for d, st in zip(dims, native_strides)] else: out_sizes = [(value.shape.get_size(d) + 1) * st - d.size for d, st in zip(dims, native_strides)] else: raise ValueError(f"Unsupported output size: {size}") elif isinstance(size, Shape): out_sizes = [size.get_size(d) for d in dims.names] else: raise ValueError(f"size must be of type str or Shape but got {size}") # --- Apply extrapolation if not 0 --- if extrapolation is None: ... # check that out_sizes does not exceed valid size if extrapolation == e_.PERIODIC: ... # limit to same size, else we are repeating computations if extrapolation is not None and extrapolation != e_.ZERO: # custom padding, cannot be handled by backend pad_widths = {} for dim, st, os in zip(dims, native_strides, out_sizes): vs = value.shape.get_size(dim) ks = kernel.shape.get_size(dim) if not transpose: padding = max(0, st * (os - 1) - vs + ks) pad_widths[dim.name] = (padding//2, (padding+1)//2) else: default_size = (vs + 1) * st - ks # size if no padding is used if default_size < os: pad_widths[dim.name] = os - default_size # ToDo raise NotImplementedError value = pad(value, pad_widths, extrapolation) # value = pad(value, {dim: (kernel.shape.get_size(dim) // 2, (kernel.shape.get_size(dim) - 1) // 2) for dim in dims.names}, extrapolation) if value._is_tracer: if strides != 1 or transpose: raise NotImplementedError result = [] widths = {dim.name: s - value.shape.get_size(dim) + kernel.shape.get_size(dim) - 1 for dim, s in zip(dims, out_sizes)} value = pad(value, {dim: (s//2, s//2 + (s%2)) for dim, s in widths.items()}, extrapolation) for idx in dims.meshgrid(): kernel_i = kernel[idx] value_i = value[{dim: slice(offset, s + offset) for (dim, offset), s in zip(idx.items(), out_sizes)}] result.append(kernel_i * value_i) result = stack(result, '_reduce:b') return sum_(result, '_reduce') # --- Perform conv --- backend = backend_for(value, kernel) native_kernel = kernel.native((batch_dims if batch(kernel) else EMPTY_SHAPE, out_dims, dual(kernel), *dims)) native_value = value.native((batch_dims, in_dims, *dims.names)) native_result = backend.conv(native_value, native_kernel, native_strides, out_sizes, transpose) assert tuple(out_sizes) == backend.staticshape(native_result)[2:], f"Internal shape mismatch in conv(). Expected shape {out_sizes} but got {backend.staticshape(native_result)[2:]} from {backend}" result = reshaped_tensor(native_result, (batch_dims, out_dims, *dims), convert=False) return resultComputes the convolution of
valueandkernelalong the specified dims.Dual dims of
kernelare reduced against the corresponding primal dims ofvalue. All other primal dims ofvalueare treated as batch.Args
valueTensorwhose shape includes all spatial dimensions ofkernel.kernelTensorused as convolutional filter.size- Either a
Shapespecifying the desired output resolution or one of the following predefined strings:('valid', 'same', 'full').valid: Only those values are returned where the full kernel can be applied on valid values without using thephiml.math.extrapolation.samethe output is the same size as the input.full: the output contains all values of the convolution, including those where the kernel extends beyond the input. extrapolation- If
None, convolve only wherekernelfits intovalue, i.e. 'valid'. Otherwise, padsvaluewith the specified extrapolation. The amount of padding depends onfull. dims- Which dimensions to convolve over. Defaults to all spatial dims.
strides- Convolution strides for applying
kernelto a subset ofvalueonly. This will result in a smaller output. The stride can be specified per dim, with missing dims defaulting to1. transpose- If
True, the kernel is transposed before convolution, and strides are replaced by up-sampling.
Returns
Tensorwith all non-reduced dims ofvalueand additional non-dual dims fromkernel. def copy(value: phiml.math._tensors.Tensor)-
Expand source code
def copy(value: Tensor): """ Copies the data buffer and encapsulating `Tensor` object. Args: value: `Tensor` to be copied. Returns: Copy of `value`. """ if value._is_tracer: warnings.warn("Tracing tensors cannot be copied.", RuntimeWarning) return value return value._op1(lambda native: choose_backend(native).copy(native), 'copy') def copy_with(obj: ~PhiTreeNodeType, **updates) ‑> ~PhiTreeNodeType-
Expand source code
def replace(obj: PhiTreeNodeType, **updates) -> PhiTreeNodeType: """ Creates a copy of the given `phiml.math.magic.PhiTreeNode` with updated values as specified in `updates`. If `obj` overrides `__with_attrs__`, the copy will be created via that specific implementation. Otherwise, the `copy` module and `setattr` will be used. Args: obj: `phiml.math.magic.PhiTreeNode` **updates: Values to be replaced. Returns: Copy of `obj` with updated values. """ if isinstance(obj, (Number, bool)): return obj elif hasattr(obj, '__with_attrs__'): result = obj.__with_attrs__(**updates) if result is not NotImplemented: return result if dataclasses.is_dataclass(obj): return dataclasses.replace(obj, **updates) else: cpy = copy.copy(obj) for attr, value in updates.items(): setattr(cpy, attr, value) return cpyCreates a copy of the given
PhiTreeNodewith updated values as specified inupdates.If
objoverrides__with_attrs__, the copy will be created via that specific implementation. Otherwise, thecopy()module andsetattrwill be used.Args
objPhiTreeNode**updates- Values to be replaced.
Returns
Copy of
objwith updated values. def cos(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def cos(x: TensorOrTree) -> TensorOrTree: """ Computes *cos(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.cos, cos)Computes cos(x) of the
TensororPhiTreeNodex. def cosh(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def cosh(x: TensorOrTree) -> TensorOrTree: """ Computes *cosh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.cosh, cosh)Computes cosh(x) of the
TensororPhiTreeNodex. def count_intersections(values: phiml.math._tensors.Tensor,
arg_dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
list_dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function instance>,
feature_dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function channel>) ‑> phiml.math._tensors.Tensor-
Expand source code
def count_intersections(values: Tensor, arg_dims: DimFilter, list_dims: DimFilter = instance, feature_dims: DimFilter = channel) -> Tensor: """ Counts the number of elements that are part of each pair of lists. Args: values: arg_dims: Dims enumerating the input lists. list_dims: Dims listing the elements. feature_dims: Vector dims of one element. Elements are equal if all values along `feature_dims` are equal. Returns: `Tensor`. """ assert arg_dims is not batch feature_dims = values.shape.only(feature_dims) arg_dims = values.shape.only(arg_dims) if feature_dims: if feature_dims.volume == 1: values = unstack(values, feature_dims)[0] else: raise NotImplementedError batch_dims = values.shape - arg_dims - list_dims - feature_dims result = [] for b in batch_dims.meshgrid(): lists = unstack(values[b], arg_dims) np_lists = [l.numpy([list_dims]) for l in lists] n = len(np_lists) shared_counts = np.zeros((n, n), dtype=int) for i in range(n): for j in range(i + 1, n): intersection = np.intersect1d(np_lists[i], np_lists[j]) shared_counts[i, j] = shared_counts[j, i] = len(intersection) result.append(wrap(shared_counts, arg_dims & arg_dims.as_dual())) return stack(result, batch_dims)Counts the number of elements that are part of each pair of lists.
Args
- values:
arg_dims- Dims enumerating the input lists.
list_dims- Dims listing the elements.
feature_dims- Vector dims of one element. Elements are equal if all values along
feature_dimsare equal.
Returns
def count_occurrences(values: phiml.math._tensors.Tensor,
query: phiml.math._tensors.Tensor,
feature_dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function channel>) ‑> phiml.math._tensors.Tensor-
Expand source code
def count_occurrences(values: Tensor, query: Tensor, feature_dims: DimFilter = channel) -> Tensor: """ For each query item, counts how often this value occurs in `values`. See Also: `contains()`. Args: values: Data `Tensor` containing all `feature_dims`. All non-batch and dims not specified as `feature_dims` are flattened. query: Items to count the occurrences of. Must contain all `feature_dims`. feature_dims: One item is considered to be the set of all values along `feature_dims`. The number of items in a tensor is given by all dims except `feature_dims`. Returns: Integer `Tensor` matching `query` without `feature_dims`. """ feature_dims = values.shape.only(feature_dims) assert feature_dims in query batches = batch(values) & batch(query) values_nat = values.native([batches, ..., feature_dims]) query_nat = query.native([batches, ..., feature_dims]) def np_count(query_np: np.ndarray, values_np: np.ndarray): query_and_values = np.concatenate([query_np, values_np], 1) result_np = [] for i in range(batches.volume): unique, inverse, counts = np.unique(query_and_values[i], axis=0, return_counts=True, return_inverse=True) combined_occurrences = counts[inverse][:query_np.shape[1]] unique, inverse, counts = np.unique(query_np[i], axis=0, return_counts=True, return_inverse=True) query_occurrences = counts[inverse] result_np.append(combined_occurrences - query_occurrences) return np.stack(result_np).astype(np.int32) result_nat = choose_backend(query_nat, values_nat).numpy_call(np_count, (batches.volume, (non_batch(query) - feature_dims).volume), INT32, query_nat, values_nat) return reshaped_tensor(result_nat, [batches, non_batch(query) - feature_dims], convert=False)For each query item, counts how often this value occurs in
values.See Also:
contains().Args
values- Data
Tensorcontaining allfeature_dims. All non-batch and dims not specified asfeature_dimsare flattened. query- Items to count the occurrences of. Must contain all
feature_dims. feature_dims- One item is considered to be the set of all values along
feature_dims. The number of items in a tensor is given by all dims exceptfeature_dims.
Returns
Integer
Tensormatchingquerywithoutfeature_dims. def cpack(value,
packed_dim: str | phiml.math._shape.Shape,
pos: int | None = None,
**kwargs)-
Expand source code
def cpack(value, packed_dim: Union[Shape, str], pos: Optional[int] = None, **kwargs): """Short for `pack_dims(..., dims=channel)""" return pack_dims(value, channel, packed_dim, pos=pos, **kwargs)Short for `pack_dims(…, dims=channel)
def crange(start: int = 0, **stop: int) ‑> phiml.math._tensors.Tensor[int]-
Expand source code
def crange(start: int = 0, **stop: int) -> Tensor[int]: """ Construct a range `Tensor` along one channel dim. """ assert len(stop) == 1, f"crange() requires exactly one stop dimension but got {stop}" return arange(channel(next(iter(stop))), start, next(iter(stop.values())))Construct a range
Tensoralong one channel dim. def cross(vec1: phiml.math._tensors.Tensor, vec2: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor-
Expand source code
def cross_product(vec1: Tensor, vec2: Tensor) -> Tensor: """ Computes the cross product of two vectors in 2D. Args: vec1: `Tensor` with a single channel dimension called `'vector'` vec2: `Tensor` with a single channel dimension called `'vector'` Returns: `Tensor` """ warnings.warn("phiml.math.cross_product() is deprecated. Use PhiFlow's geometry functions instead.", DeprecationWarning) vec1 = tensor(vec1) vec2 = tensor(vec2) spatial_rank = vec1.vector.size if 'vector' in vec1.shape else vec2.vector.size if spatial_rank == 2: # Curl in 2D assert 'vector' in vec2.shape if 'vector' in vec1.shape: v1_x, v1_y = vec1.vector v2_x, v2_y = vec2.vector return v1_x * v2_y - v1_y * v2_x else: v2_x, v2_y = vec2.vector return vec1 * stack_tensors([-v2_y, v2_x], channel(vec2)) elif spatial_rank == 3: # Curl in 3D assert 'vector' in vec1.shape and 'vector' in vec2.shape, f"Both vectors must have a 'vector' dimension but got shapes {vec1.shape}, {vec2.shape}" v1_x, v1_y, v1_z = vec1.vector v2_x, v2_y, v2_z = vec2.vector return stack_tensors([ v1_y * v2_z - v1_z * v2_y, v1_z * v2_x - v1_x * v2_z, v1_x * v2_y - v1_y * v2_x, ], vec1.shape['vector']) else: raise AssertionError(f'dims = {spatial_rank}. Vector product not available in > 3 dimensions') def cross_product(vec1: phiml.math._tensors.Tensor, vec2: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor-
Expand source code
def cross_product(vec1: Tensor, vec2: Tensor) -> Tensor: """ Computes the cross product of two vectors in 2D. Args: vec1: `Tensor` with a single channel dimension called `'vector'` vec2: `Tensor` with a single channel dimension called `'vector'` Returns: `Tensor` """ warnings.warn("phiml.math.cross_product() is deprecated. Use PhiFlow's geometry functions instead.", DeprecationWarning) vec1 = tensor(vec1) vec2 = tensor(vec2) spatial_rank = vec1.vector.size if 'vector' in vec1.shape else vec2.vector.size if spatial_rank == 2: # Curl in 2D assert 'vector' in vec2.shape if 'vector' in vec1.shape: v1_x, v1_y = vec1.vector v2_x, v2_y = vec2.vector return v1_x * v2_y - v1_y * v2_x else: v2_x, v2_y = vec2.vector return vec1 * stack_tensors([-v2_y, v2_x], channel(vec2)) elif spatial_rank == 3: # Curl in 3D assert 'vector' in vec1.shape and 'vector' in vec2.shape, f"Both vectors must have a 'vector' dimension but got shapes {vec1.shape}, {vec2.shape}" v1_x, v1_y, v1_z = vec1.vector v2_x, v2_y, v2_z = vec2.vector return stack_tensors([ v1_y * v2_z - v1_z * v2_y, v1_z * v2_x - v1_x * v2_z, v1_x * v2_y - v1_y * v2_x, ], vec1.shape['vector']) else: raise AssertionError(f'dims = {spatial_rank}. Vector product not available in > 3 dimensions') def csize(obj) ‑> int | None-
Expand source code
def csize(obj) -> Optional[int]: """ Returns the total number of elements listed along channel dims of an object, equal to the product of the sizes of all channel dims. Args: obj: `Shape` or object with a valid `shape` property. Returns: Size as `int`. If `obj` is an undefined `Shape`, returns `None`. """ return channel(obj).volume def cumulative_sum(x: phiml.math._tensors.Tensor,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
include_0=False,
include_sum=True,
index_dim: str | phiml.math._shape.Shape | None = None)-
Expand source code
def cumulative_sum(x: Tensor, dim: DimFilter, include_0=False, include_sum=True, index_dim: Union[str, Shape, None] = None): """ Performs a cumulative sum of `x` along `dim`. Implementations: * NumPy: [`cumsum`](https://numpy.org/doc/stable/reference/generated/numpy.cumsum.html) * PyTorch: [`cumsum`](https://pytorch.org/docs/stable/generated/torch.cumsum.html) * TensorFlow: [`cumsum`](https://www.tensorflow.org/api_docs/python/tf/math/cumsum) * Jax: [`cumsum`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.cumsum.html) Args: x: `Tensor` dim: Dimension along which to sum, as `str` or `Shape`. If multiple dims are passed, `x` the cumulative sum will be computed on the flattened array. include_0: If `True`, adds a 0 to the result before the first value. include_sum: If `False`, the total sum will be sliced off the result. index_dim: If given, adds an index dimension for `dim`. Returns: `Tensor` with the same shape as `x`. """ dim = x.shape.only(dim, reorder=True) assert dim.rank >= 1, f"dim must contain at least one dimension." assert dim.rank == 1 or include_0 + include_sum == 1, f"When summing over multiple flattened dims, exaclty one of (include_0, include_sum) must be True but got include_0={include_0}, include_sum={include_sum}" broadcast = broadcast_dims(x) assert dim.only(broadcast).is_empty, f"Cannot compute cumulative sum along {dim} because input is not uniform along that dimension." def uniform_cumulative_sum(x: Tensor, index_dim=index_dim, dim=dim.names): dim = x.shape.only(dim, reorder=True) native_x = x._reshaped_native([x.shape - dim, dim]) b = choose_backend(native_x) native_result = b.cumsum(native_x, 1) if include_0: native_result = b.pad(native_result, ((0, 0), (1, 0))) if not include_sum: native_result = native_result[:, :-1] result = reshaped_tensor(native_result, [x.shape - dim, dim + (include_0 + include_sum) - 1]) if index_dim is not None: assert dim.rank == 1, f"multi-dimensional indices not yet supported" if isinstance(index_dim, str): index_dim = auto(index_dim, channel) index_dim = index_dim.with_size(dim.name_list) result = expand(result, index_dim) return result return broadcast_op(uniform_cumulative_sum, [x], broadcast)Performs a cumulative sum of
xalongdim.Implementations:
Args
xTensordim- Dimension along which to sum, as
strorShape. If multiple dims are passed,xthe cumulative sum will be computed on the flattened array. include_0- If
True, adds a 0 to the result before the first value. include_sum- If
False, the total sum will be sliced off the result. index_dim- If given, adds an index dimension for
dim.
Returns
Tensorwith the same shape asx. def custom_gradient(f: Callable, gradient: Callable, auxiliary_args: str = '')-
Expand source code
def custom_gradient(f: Callable, gradient: Callable, auxiliary_args: str = ''): """ Creates a function based on `f` that uses a custom gradient for the backpropagation pass. *Warning* This method can lead to memory leaks if the gradient function is not called. Make sure to pass tensors without gradients if the gradient is not required, see `stop_gradient()`. Args: f: Forward function mapping `Tensor` arguments `x` to a single `Tensor` output or sequence of tensors `y`. gradient: Function to compute the vector-Jacobian product for backpropagation. Will be called as `gradient(input_dict, *y, *dy) -> output_dict` where `input_dict` contains all named arguments passed to the forward function and `output_dict` contains only those parameters for which a gradient is defined. auxiliary_args: Comma-separated parameter names of arguments that are not relevant to backpropagation. Returns: Function with similar signature and return values as `f`. However, the returned function does not support keyword arguments. """ auxiliary_args = set(s.strip() for s in auxiliary_args.split(',') if s.strip()) return CustomGradientFunction(f, gradient, auxiliary_args)Creates a function based on
fthat uses a custom gradient for the backpropagation pass.Warning This method can lead to memory leaks if the gradient function is not called. Make sure to pass tensors without gradients if the gradient is not required, see
stop_gradient().Args
f- Forward function mapping
Tensorargumentsxto a singleTensoroutput or sequence of tensorsy. gradient- Function to compute the vector-Jacobian product for backpropagation.
Will be called as
gradient(input_dict, *y, *dy) -> output_dictwhereinput_dictcontains all named arguments passed to the forward function andoutput_dictcontains only those parameters for which a gradient is defined. auxiliary_args- Comma-separated parameter names of arguments that are not relevant to backpropagation.
Returns
Function with similar signature and return values as
f. However, the returned function does not support keyword arguments. def d2i(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType-
Expand source code
def d2i(value: PhiTreeNodeType) -> PhiTreeNodeType: """ Change the type of all *dual* dims of `value` to *instance* dimensions. See `rename_dims`. """ return rename_dims(value, dual, instance)Change the type of all dual dims of
valueto instance dimensions. Seerename_dims(). def d2s(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType-
Expand source code
def d2s(value: PhiTreeNodeType) -> PhiTreeNodeType: """ Change the type of all *dual* dims of `value` to *spatial* dimensions. See `rename_dims`. """ return rename_dims(value, dual, spatial)Change the type of all dual dims of
valueto spatial dimensions. Seerename_dims(). def degrees_to_radians(deg: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def degrees_to_radians(deg: TensorOrTree) -> TensorOrTree: """ Convert degrees to radians. """ return tree_map(lambda x: x * (3.14159265358979323846 / 180), deg)Convert degrees to radians.
def dense(x: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor-
Expand source code
def dense(x: Tensor) -> Tensor: """ Convert a sparse tensor representation to an equivalent dense one in which all values are explicitly stored contiguously in memory. Args: x: Any `Tensor`. Python primitives like `float`, `int` or `bool` will be converted to `Tensors` in the process. Returns: Dense tensor. """ from . import reshaped_tensor if isinstance(x, CompactSparseTensor): x = x.to_coo() if isinstance(x, SparseCoordinateTensor): from ._ops import scatter return scatter(x.shape, x._indices, x._values, mode='add', outside_handling='undefined') elif isinstance(x, CompressedSparseMatrix): ind_batch, channels, native_indices, native_pointers, native_values, native_shape = x._native_csr_components() native_dense = x.default_backend.csr_to_dense(native_indices, native_pointers, native_values, native_shape, contains_duplicates=x._uncompressed_offset is not None) return reshaped_tensor(native_dense, [ind_batch, x._compressed_dims, x._uncompressed_dims, channels]) elif isinstance(x, Dense): return x elif isinstance(x, TensorStack): inner_dense = [dense(inner) for inner in x._tensors] from ._ops import stack_tensors return stack_tensors(inner_dense, x._stack_dim) elif isinstance(x, Tensor): return x._cached() elif isinstance(x, (Number, bool)): return wrap(x)Convert a sparse tensor representation to an equivalent dense one in which all values are explicitly stored contiguously in memory.
Args
x- Any
Tensor. Python primitives likefloat,intorboolwill be converted toTensorsin the process.
Returns
Dense tensor.
def dim_mask(all_dims: phiml.math._shape.Shape | tuple | list,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
mask_dim=(vectorᶜ)) ‑> phiml.math._tensors.Tensor-
Expand source code
def dim_mask(all_dims: Union[Shape, tuple, list], dims: DimFilter, mask_dim=channel('vector')) -> Tensor: """ Creates a masked vector with 1 elements for `dims` and 0 for all other dimensions in `all_dims`. Args: all_dims: All dimensions for which the vector should have an entry. dims: Dimensions marked as 1. mask_dim: Dimension of the masked vector. Item names are assigned automatically. Returns: `Tensor` """ assert isinstance(all_dims, (Shape, tuple, list)), f"all_dims must be a tuple or Shape but got {type(all_dims)}" assert isinstance(mask_dim, SHAPE_TYPES) and mask_dim.rank == 1, f"mask_dim must be a single-dimension Shape but got {mask_dim}" if isinstance(all_dims, (tuple, list)): all_dims = spatial(*all_dims) dims = all_dims.only(dims) mask = [dim in dims for dim in all_dims] mask_dim = mask_dim.with_size(all_dims.names) return wrap(mask, mask_dim)Creates a masked vector with 1 elements for
dimsand 0 for all other dimensions inall_dims.Args
all_dims- All dimensions for which the vector should have an entry.
dims- Dimensions marked as 1.
mask_dim- Dimension of the masked vector. Item names are assigned automatically.
Returns
def dot(x: phiml.math._tensors.Tensor,
x_dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
y: phiml.math._tensors.Tensor,
y_dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None) ‑> phiml.math._tensors.Tensor-
Expand source code
def dot(x: Tensor, x_dims: DimFilter, y: Tensor, y_dims: DimFilter) -> Tensor: """ Computes the dot product along the specified dimensions. Contracts `x_dims` with `y_dims` by first multiplying the elements and then summing them up. For one dimension, this is equal to matrix-matrix or matrix-vector multiplication. The function replaces the traditional `dot` / `tensordot` / `matmul` / `einsum` functions. * NumPy: [`numpy.tensordot`](https://numpy.org/doc/stable/reference/generated/numpy.tensordot.html), [`numpy.einsum`](https://numpy.org/doc/stable/reference/generated/numpy.einsum.html) * PyTorch: [`torch.tensordot`](https://pytorch.org/docs/stable/generated/torch.tensordot.html#torch.tensordot), [`torch.einsum`](https://pytorch.org/docs/stable/generated/torch.einsum.html) * TensorFlow: [`tf.tensordot`](https://www.tensorflow.org/api_docs/python/tf/tensordot), [`tf.einsum`](https://www.tensorflow.org/api_docs/python/tf/einsum) * Jax: [`jax.numpy.tensordot`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.tensordot.html), [`jax.numpy.einsum`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.einsum.html) Args: x: First `Tensor` x_dims: Dimensions of `x` to reduce against `y` y: Second `Tensor` y_dims: Dimensions of `y` to reduce against `x`. Returns: Dot product as `Tensor`. """ x_dims = x.shape.only(x_dims) y_dims = y.shape.only(y_dims) if not x_dims: return x * sum_(y, y_dims) if not y_dims: return sum_(x, x_dims) * y def tensor_dot(x, y): if is_sparse(x) or is_sparse(y): if x_dims.isdisjoint(sparse_dims(x)) and y_dims.isdisjoint(sparse_dims(y)): # dot only dense dims if is_sparse(x): return x._op2(y, lambda vx, vy: dot(vx, x_dims, vy, y_dims), False) else: return y._op2(x, lambda vy, vx: dot(vx, x_dims, vy, y_dims), False) else: return sparse_dot(x, x_dims, y, y_dims) if x._is_tracer: return x._dot(x_dims, y, y_dims) if y._is_tracer: return y._dot(y_dims, x, x_dims) x_native = x.native(x.shape) y_native = y.native(y.shape) backend = choose_backend(x_native, y_native) remaining_shape_x = x.shape.without(x_dims) remaining_shape_y = y.shape.without(y_dims) assert x_dims.volume == y_dims.volume, f"Failed to reduce {x_dims} against {y_dims} in dot product of {x.shape} and {y.shape}. Sizes do not match." if remaining_shape_y.isdisjoint(remaining_shape_x): # no shared batch dimensions -> tensordot result_native = backend.tensordot(x_native, x.shape.indices(x_dims.names), y_native, y.shape.indices(y_dims.names)) result_shape = remaining_shape_x + remaining_shape_y else: # shared batch dimensions -> einsum result_shape = merge_shapes(x.shape.without(x_dims), y.shape.without(y_dims)) REDUCE_LETTERS = list('ijklmn') KEEP_LETTERS = list('abcdefgh') x_letters = [(REDUCE_LETTERS if dim in x_dims else KEEP_LETTERS).pop(0) for dim in x.shape.names] letter_map = {dim: letter for dim, letter in zip(x.shape.names, x_letters)} REDUCE_LETTERS = list('ijklmn') y_letters = [] for dim in y.shape.names: if dim in y_dims: y_letters.append(REDUCE_LETTERS.pop(0)) else: if dim in x.shape and dim not in x_dims: y_letters.append(letter_map[dim]) else: next_letter = KEEP_LETTERS.pop(0) letter_map[dim] = next_letter y_letters.append(next_letter) keep_letters = [letter_map[dim] for dim in result_shape.names] subscripts = f'{"".join(x_letters)},{"".join(y_letters)}->{"".join(keep_letters)}' result_native = backend.einsum(subscripts, x_native, y_native) return Dense(result_native, result_shape.names, result_shape, backend) broadcast = broadcast_dims(x, y) assert x_dims.only(broadcast).is_empty and y_dims.only(broadcast).is_empty, f"Broadcasting reduction dims not supported for dot product along {x_dims} and {y_dims}." return broadcast_op(tensor_dot, [x, y])Computes the dot product along the specified dimensions. Contracts
x_dimswithy_dimsby first multiplying the elements and then summing them up.For one dimension, this is equal to matrix-matrix or matrix-vector multiplication.
The function replaces the traditional
dot()/tensordot/matmul/einsumfunctions.- NumPy:
numpy.tensordot,numpy.einsum - PyTorch:
torch.tensordot,torch.einsum - TensorFlow:
tf.tensordot,tf.einsum - Jax:
jax.numpy.tensordot,jax.numpy.einsum
Args
x- First
Tensor x_dims- Dimensions of
xto reduce againsty y- Second
Tensor y_dims- Dimensions of
yto reduce againstx.
Returns
Dot product as
Tensor. - NumPy:
def downsample2x(grid: phiml.math._tensors.Tensor,
padding: Extrapolation = zero-gradient,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>) ‑> phiml.math._tensors.Tensor-
Expand source code
def downsample2x(grid: Tensor, padding: Extrapolation = extrapolation.BOUNDARY, dims: DimFilter = spatial) -> Tensor: """ Resamples a regular grid to half the number of spatial sample points per dimension. The grid values at the new points are determined via mean (linear interpolation). Args: grid: full size grid padding: grid extrapolation. Used to insert an additional value for odd spatial dims dims: dims along which down-sampling is applied. If None, down-sample along all spatial dims. grid: Tensor: padding: Extrapolation: (Default value = extrapolation.BOUNDARY) dims: tuple or None: (Default value = None) Returns: half-size grid """ if grid is None: return None dims = grid.shape.only(dims).names odd_dimensions = [dim for dim in dims if grid.shape.get_size(dim) % 2 != 0] grid = math.pad(grid, {dim: (0, 1) for dim in odd_dimensions}, padding) for dim in dims: grid = (grid[{dim: slice(1, None, 2)}] + grid[{dim: slice(0, None, 2)}]) / 2 return gridResamples a regular grid to half the number of spatial sample points per dimension. The grid values at the new points are determined via mean (linear interpolation).
Args
grid- full size grid
padding- grid extrapolation. Used to insert an additional value for odd spatial dims
dims- dims along which down-sampling is applied. If None, down-sample along all spatial dims.
grid- Tensor:
padding- Extrapolation: (Default value = extrapolation.BOUNDARY)
dims- tuple or None: (Default value = None)
Returns
half-size grid
def dpack(value,
packed_dim: str | phiml.math._shape.Shape,
pos: int | None = None,
**kwargs)-
Expand source code
def dpack(value, packed_dim: Union[Shape, str], pos: Optional[int] = None, **kwargs): """Short for `pack_dims(..., dims=dual)""" return pack_dims(value, dual, packed_dim, pos=pos, **kwargs)Short for `pack_dims(…, dims=dual)
def drange(start: int = 0, **stop: int) ‑> phiml.math._tensors.Tensor[int]-
Expand source code
def drange(start: int = 0, **stop: int) -> Tensor[int]: """ Construct a range `Tensor` along one dual dim. """ assert len(stop) == 1, f"drange() requires exactly one stop dimension but got {stop}" return arange(dual(next(iter(stop))), start, next(iter(stop.values())))Construct a range
Tensoralong one dual dim. def dsize(obj) ‑> int | None-
Expand source code
def dsize(obj) -> Optional[int]: """ Returns the total number of elements listed along dual dims of an object, equal to the product of the sizes of all dual dims. Args: obj: `Shape` or object with a valid `shape` property. Returns: Size as `int`. If `obj` is an undefined `Shape`, returns `None`. """ return dual(obj).volume def dtype(x) ‑> phiml.backend._dtype.DType-
Expand source code
def dtype(x) -> DType: """ Returns the data type of `x`. Args: x: `Tensor` or native tensor. Returns: `DType` """ if isinstance(x, Tensor): return x.dtype else: return choose_backend(x).dtype(x) def dual(*args,
**dims: int | str | tuple | list | phiml.math._shape.Shape | ForwardRef('Tensor')) ‑> phiml.math._shape.Shape-
Expand source code
def dual(*args, **dims: Union[int, str, tuple, list, Shape, 'Tensor']) -> Shape: """ Returns the dual dimensions of an existing `Shape` or creates a new `Shape` with only dual dimensions. Dual dimensions are assigned the prefix `~` to distinguish them from regular dimensions. This way, a regular and dual dimension of the same name can exist in one `Shape`. Dual dimensions represent the input space and are typically only present on matrices or higher-order matrices. Dual dimensions behave like batch dimensions in regular operations, if supported. During matrix multiplication, they are matched against their regular counterparts by name (ignoring the `~` prefix). Usage for filtering dual dimensions: >>> dual_dims = dual(shape) >>> dual_dims = dual(tensor) Usage for creating a `Shape` with only dual dimensions: >>> dual('undef', points=2) (~undefᵈ=None, ~pointsᵈ=2) Here, the dimension `undef` is created with an undefined size of `None`. Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. See Also: `channel`, `batch`, `spatial` Args: *args: Either * `Shape` or `Tensor` to filter or * Names of dimensions with undefined sizes as `str`. **dims: Dimension sizes and names. Must be empty when used as a filter operation. Returns: `Shape` containing only dimensions of type dual. """ if all(isinstance(arg, str) for arg in args) or dims: return _construct_shape(DUAL_DIM, *args, **dims) elif len(args) == 1 and isinstance(args[0], SHAPE_TYPES): return args[0].dual assert len(args) == 1, f"dual() must be called either as a selector dual(Shape) or dual(Tensor) or as a constructor dual(*names, **dims). Got *args={args}, **dims={dims}" return shape(args[0]).dualReturns the dual dimensions of an existing
Shapeor creates a newShapewith only dual dimensions.Dual dimensions are assigned the prefix
~to distinguish them from regular dimensions. This way, a regular and dual dimension of the same name can exist in oneShape.Dual dimensions represent the input space and are typically only present on matrices or higher-order matrices. Dual dimensions behave like batch dimensions in regular operations, if supported. During matrix multiplication, they are matched against their regular counterparts by name (ignoring the
~prefix).Usage for filtering dual dimensions:
>>> dual_dims = dual(shape) >>> dual_dims = dual(tensor)Usage for creating a
Shapewith only dual dimensions:>>> dual('undef', points=2) (~undefᵈ=None, ~pointsᵈ=2)Here, the dimension
undefis created with an undefined size ofNone. Undefined sizes are automatically filled in bytensor(),wrap(),stack()andconcat().To create a shape with multiple types, use
merge_shapes(),concat_shapes()or the syntaxshape1 & shape2.See Also:
channel(),batch(),spatial()Args
*args-
Either
**dims- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shapecontaining only dimensions of type dual. def eigenvalues(matrix: phiml.math._tensors.Tensor, eigen_dim=(eigenvaluesᶜ))-
Expand source code
def eigenvalues(matrix: Tensor, eigen_dim=channel('eigenvalues')): """ Computes the eigenvalues of a square matrix. The matrix columns are listed along dual dimensions and the rows are listed along the corresponding non-dual dimensions. Row dims are matched by name if possible, else all primal dims are used. Args: matrix: Square matrix. Must have at least one dual dim and corresponding non-dual dim. eigen_dim: Dimension along which eigenvalues should be listed. Returns: `Tensor` listing the eigenvalues along `eigen_dim`. """ cols = dual(matrix) assert cols, f"Matrix must have at least one dual dim listing the columns" rows = matrix.shape.only(cols.as_batch().name_list) if not rows: rows = primal(matrix) assert rows.volume == cols.volume, f"Matrix rows {rows} don't match cols {cols}" batch_dims = matrix.shape.without(cols).without(rows) native_matrix = matrix._reshaped_native([*batch_dims, rows, cols]) native_result = matrix.backend.eigvals(native_matrix) return reshaped_tensor(native_result, [*batch_dims, eigen_dim], convert=False)Computes the eigenvalues of a square matrix. The matrix columns are listed along dual dimensions and the rows are listed along the corresponding non-dual dimensions. Row dims are matched by name if possible, else all primal dims are used.
Args
matrix- Square matrix. Must have at least one dual dim and corresponding non-dual dim.
eigen_dim- Dimension along which eigenvalues should be listed.
Returns
Tensorlisting the eigenvalues alongeigen_dim. def enable_debug_checks()-
Expand source code
def enable_debug_checks(): """ Once called, additional type checks are enabled. This may result in a noticeable drop in performance. """ DEBUG_CHECKS.append(True)Once called, additional type checks are enabled. This may result in a noticeable drop in performance.
def equal(*objects, equal_nan=False) ‑> bool-
Expand source code
def equal(*objects, equal_nan=False) -> bool: """ Checks whether all objects are equal. See Also: `close()`, `always_close()`. Args: *objects: Objects to compare. Can be tensors or other objects or `None` equal_nan: If all objects are tensor-like, whether to count `NaN` values as equal. Returns: `bool`, whether all given objects are equal to the first one. """ if objects[0] is None: return all(o is None for o in objects) if any(o is None for o in objects): return False if all(o is objects[0] for o in objects): return True try: tensors = [wrap(o) for o in objects] if any(t.dtype.kind == object for t in tensors): raise ValueError except ValueError: # not all are tensor-like if any(isinstance(o, Tensor) and o.dtype.kind != object for o in objects): return False # numeric tensor mixed not equal to non-tensor return all(o == objects[0] for o in objects[1:]) return close(*tensors, rel_tolerance=0, abs_tolerance=0, equal_nan=equal_nan)Checks whether all objects are equal.
See Also:
close(),always_close().Args
*objects- Objects to compare. Can be tensors or other objects or
None equal_nan- If all objects are tensor-like, whether to count
NaNvalues as equal.
Returns
bool, whether all given objects are equal to the first one. def erf(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def erf(x: TensorOrTree) -> TensorOrTree: """ Computes the error function *erf(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.erf, erf)Computes the error function erf(x) of the
TensororPhiTreeNodex. def exp(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def exp(x: TensorOrTree) -> TensorOrTree: """ Computes *exp(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.exp, exp)Computes exp(x) of the
TensororPhiTreeNodex. def expand(value, *dims: str | phiml.math._shape.Shape, **kwargs)-
Expand source code
def expand(value, *dims: Union[Shape, str], **kwargs): """ Adds dims to a `Tensor` or tensor-like object by implicitly repeating the tensor values along the new dimensions. If `value` already contains any of the new dimensions, a size and type check is performed for these instead. If any of `dims` varies along a dimension that is present neither in `value` nor on `dims`, it will also be added to `value`. This function replaces the usual `tile` / `repeat` functions of [NumPy](https://numpy.org/doc/stable/reference/generated/numpy.tile.html), [PyTorch](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.repeat), [TensorFlow](https://www.tensorflow.org/api_docs/python/tf/tile) and [Jax](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.tile.html). Additionally, it replaces the traditional `unsqueeze` / `expand_dims` functions. Args: value: `phiml.math.magic.Shapable`, such as `phiml.math.Tensor` For tree nodes, expands all value attributes by `dims` or the first variable attribute if no value attributes are set. *dims: Dimensions to be added as `Shape` **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dims to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments. Returns: Same type as `value`. """ if not dims: return value dims = concat_shapes_(*[d if isinstance(d, SHAPE_TYPES) else parse_shape_spec(d) for d in dims]) combined = merge_shapes(value, dims) # check that existing sizes match if not dims.without(shape(value)): # no new dims to add if set(dims) == set(shape(value).only(dims)): # sizes and labels might differ, though return value dims &= combined.non_uniform_shape # add missing non-uniform dims # --- First try __expand__ if hasattr(value, '__expand__'): result = value.__expand__(dims, **kwargs) if result is not NotImplemented: return result # --- Next try Tree Node --- if isinstance(value, PhiTreeNode): new_attributes = {a: expand(getattr(value, a), dims, **kwargs) for a in all_attributes(value)} return copy_with(value, **new_attributes) # --- Fallback: stack --- if hasattr(value, '__stack__'): if dims.volume > 8: warnings.warn(f"expand() default implementation is slow on large shapes {dims}. Please implement __expand__() for {type(value).__name__} as defined in phiml.math.magic", RuntimeWarning, stacklevel=2) for dim in reversed(dims): value = stack((value,) * dim.size, dim, **kwargs) assert value is not NotImplemented, "Value must implement either __expand__ or __stack__" return value try: # value may be a native scalar from ._tensors import expand_tensor, wrap value = wrap(value) except ValueError: raise AssertionError(f"Cannot expand non-shapable object {type(value)}") return expand_tensor(value, dims)Adds dims to a
Tensoror tensor-like object by implicitly repeating the tensor values along the new dimensions. Ifvaluealready contains any of the new dimensions, a size and type check is performed for these instead.If any of
dimsvaries along a dimension that is present neither invaluenor ondims, it will also be added tovalue.This function replaces the usual
tile/repeatfunctions of NumPy, PyTorch, TensorFlow and Jax.Additionally, it replaces the traditional
unsqueeze/expand_dimsfunctions.Args
valueShapable, such asTensorFor tree nodes, expands all value attributes bydimsor the first variable attribute if no value attributes are set.*dims- Dimensions to be added as
Shape **kwargs- Additional keyword arguments required by specific implementations.
Adding spatial dims to fields requires the
bounds: Boxargument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments.
Returns
Same type as
value. def factor_ilu(matrix: phiml.math._tensors.Tensor, iterations: int, safe=False)-
Expand source code
def factor_ilu(matrix: Tensor, iterations: int, safe=False): """ Incomplete LU factorization for dense or sparse matrices. For sparse matrices, keeps the sparsity pattern of `matrix`. L and U will be trimmed to the respective areas, i.e. stored upper elements in L will be dropped, unless this would lead to varying numbers of stored elements along a batch dimension. Args: matrix: Dense or sparse matrix to factor. Currently, compressed sparse matrices are decompressed before running the ILU algorithm. Sparse matrices must not store any zeros in their values explicitly. If in doubt, convert the matrix to `csr` or `csc` beforehand to eliminate zeros. iterations: (Optional) Number of fixed-point iterations to perform. If not given, will be automatically determined from matrix size and sparsity. safe: If `False` (default), only matrices with a rank deficiency of up to 1 can be factored as all values of L and U are uniquely determined. For matrices with higher rank deficiencies, the result includes `NaN` values. If `True`, the algorithm runs slightly slower but can factor highly rank-deficient matrices as well. However, then L is undeterdetermined and unused values of L are set to 0. Rank deficiencies of 1 occur frequently in periodic settings but higher ones are rare. Returns: L: Lower-triangular matrix as `Tensor` with all diagonal elements equal to 1. U: Upper-triangular matrix as `Tensor`. Examples: >>> matrix = wrap([[-2, 1, 0], >>> [1, -2, 1], >>> [0, 1, -2]], channel('row'), dual('col')) >>> L, U = math.factor_ilu(matrix) >>> math.print(L) row=0 1. 0. 0. along ~col row=1 -0.5 1. 0. along ~col row=2 0. -0.6666667 1. along ~col >>> math.print(L @ U, "L @ U") L @ U row=0 -2. 1. 0. along ~col row=1 1. -2. 1. along ~col row=2 0. 1. -2. along ~col """ if isinstance(matrix, CompressedSparseMatrix): matrix = matrix.decompress() if isinstance(matrix, SparseCoordinateTensor): ind_batch, channels, indices, values, shape = matrix._native_coo_components(dual, matrix=True) (l_idx_nat, l_val_nat), (u_idx_nat, u_val_nat) = incomplete_lu_coo(indices, values, shape, iterations, safe) col_dims = matrix._shape.only(dual) row_dims = matrix._dense_shape.without(col_dims) l_indices = matrix._unpack_indices(l_idx_nat[..., 0], l_idx_nat[..., 1], row_dims, col_dims, ind_batch) u_indices = matrix._unpack_indices(u_idx_nat[..., 0], u_idx_nat[..., 1], row_dims, col_dims, ind_batch) l_values = reshaped_tensor(l_val_nat, [ind_batch, instance(matrix._values), channels], convert=False) u_values = reshaped_tensor(u_val_nat, [ind_batch, instance(matrix._values), channels], convert=False) lower = SparseCoordinateTensor(l_indices, l_values, matrix._dense_shape, matrix._can_contain_double_entries, matrix._indices_sorted, matrix._indices_constant) upper = SparseCoordinateTensor(u_indices, u_values, matrix._dense_shape, matrix._can_contain_double_entries, matrix._indices_sorted, matrix._indices_constant) else: # dense matrix native_matrix = matrix.native([batch, non_batch(matrix).non_dual, dual, EMPTY_SHAPE]) l_native, u_native = incomplete_lu_dense(native_matrix, iterations, safe) lower = reshaped_tensor(l_native, [batch(matrix), non_batch(matrix).non_dual, dual(matrix), EMPTY_SHAPE]) upper = reshaped_tensor(u_native, [batch(matrix), non_batch(matrix).non_dual, dual(matrix), EMPTY_SHAPE]) return lower, upperIncomplete LU factorization for dense or sparse matrices.
For sparse matrices, keeps the sparsity pattern of
matrix. L and U will be trimmed to the respective areas, i.e. stored upper elements in L will be dropped, unless this would lead to varying numbers of stored elements along a batch dimension.Args
matrix- Dense or sparse matrix to factor.
Currently, compressed sparse matrices are decompressed before running the ILU algorithm.
Sparse matrices must not store any zeros in their values explicitly. If in doubt, convert the matrix to
csrorcscbeforehand to eliminate zeros. iterations- (Optional) Number of fixed-point iterations to perform. If not given, will be automatically determined from matrix size and sparsity.
safe- If
False(default), only matrices with a rank deficiency of up to 1 can be factored as all values of L and U are uniquely determined. For matrices with higher rank deficiencies, the result includesNaNvalues. IfTrue, the algorithm runs slightly slower but can factor highly rank-deficient matrices as well. However, then L is undeterdetermined and unused values of L are set to 0. Rank deficiencies of 1 occur frequently in periodic settings but higher ones are rare.
Returns
L- Lower-triangular matrix as
Tensorwith all diagonal elements equal to 1. U- Upper-triangular matrix as
Tensor.
Examples
>>> matrix = wrap([[-2, 1, 0], >>> [1, -2, 1], >>> [0, 1, -2]], channel('row'), dual('col')) >>> L, U = math.factor_ilu(matrix) >>> math.print(L) row=0 1. 0. 0. along ~col row=1 -0.5 1. 0. along ~col row=2 0. -0.6666667 1. along ~col >>> math.print(L @ U, "L @ U") L @ U row=0 -2. 1. 0. along ~col row=1 1. -2. 1. along ~col row=2 0. 1. -2. along ~col def factorial(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def factorial(x: TensorOrTree) -> TensorOrTree: """ Computes *factorial(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. For floating-point numbers computes the continuous factorial using the gamma function. For integer numbers computes the exact factorial and returns the same integer type. However, this results in integer overflow for inputs larger than 12 (int32) or 19 (int64). """ return _backend_op1(x, Backend.factorial, factorial)Computes factorial(x) of the
TensororPhiTreeNodex. For floating-point numbers computes the continuous factorial using the gamma function. For integer numbers computes the exact factorial and returns the same integer type. However, this results in integer overflow for inputs larger than 12 (int32) or 19 (int64). def fft(x: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>) ‑> phiml.math._tensors.Tensor-
Expand source code
def fft(x: Tensor, dims: DimFilter = spatial) -> Tensor: """ Performs a fast Fourier transform (FFT) on all spatial dimensions of x. The inverse operation is `ifft()`. Implementations: * NumPy: [`np.fft.fft`](https://numpy.org/doc/stable/reference/generated/numpy.fft.fft.html), [`numpy.fft.fft2`](https://numpy.org/doc/stable/reference/generated/numpy.fft.fft2.html), [`numpy.fft.fftn`](https://numpy.org/doc/stable/reference/generated/numpy.fft.fftn.html) * PyTorch: [`torch.fft.fft`](https://pytorch.org/docs/stable/fft.html) * TensorFlow: [`tf.signal.fft`](https://www.tensorflow.org/api_docs/python/tf/signal/fft), [`tf.signal.fft2d`](https://www.tensorflow.org/api_docs/python/tf/signal/fft2d), [`tf.signal.fft3d`](https://www.tensorflow.org/api_docs/python/tf/signal/fft3d) * Jax: [`jax.numpy.fft.fft`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.fft.fft.html), [`jax.numpy.fft.fft2`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.fft.fft2.html) [`jax.numpy.fft.fft`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.fft.fftn.html) Args: x: Uniform complex or float `Tensor` with at least one spatial dimension. dims: Dimensions along which to perform the FFT. If `None`, performs the FFT along all spatial dimensions of `x`. Returns: *Ƒ(x)* as complex `Tensor` """ dims = x.shape.only(dims) x_native = x.native(x.shape) result_native = x.backend.fft(x_native, x.shape.indices(dims.names)) return Dense(result_native, x.shape.names, x.shape, x.backend)Performs a fast Fourier transform (FFT) on all spatial dimensions of x.
The inverse operation is
ifft().Implementations:
- NumPy:
np.fft.fft,numpy.fft.fft2,numpy.fft.fftn - PyTorch:
torch.fft.fft - TensorFlow:
tf.signal.fft,tf.signal.fft2d,tf.signal.fft3d - Jax:
jax.numpy.fft.fft,jax.numpy.fft.fft2jax.numpy.fft.fft
Args
x- Uniform complex or float
Tensorwith at least one spatial dimension. dims- Dimensions along which to perform the FFT.
If
None, performs the FFT along all spatial dimensions ofx.
Returns
Ƒ(x) as complex
Tensor - NumPy:
def fftfreq(resolution: phiml.math._shape.Shape,
dx: float | phiml.math._tensors.Tensor = 1,
dtype: phiml.backend._dtype.DType = None)-
Expand source code
def fftfreq(resolution: Shape, dx: Union[Tensor, float] = 1, dtype: DType = None): """ Returns the discrete Fourier transform sample frequencies. These are the frequencies corresponding to the components of the result of `math.fft` on a tensor of shape `resolution`. Args: resolution: Grid resolution measured in cells dx: Distance between sampling points in real space. dtype: Data type of the returned tensor (Default value = None) Returns: `Tensor` holding the frequencies of the corresponding values computed by math.fft """ assert resolution.spatial and f"resolution must contain at least one spatial dimension" k = meshgrid(**{dim.name: np.fft.fftfreq(int(dim.size)) for dim in resolution.spatial}) k /= dx return to_float(k) if dtype is None else cast(k, dtype)Returns the discrete Fourier transform sample frequencies. These are the frequencies corresponding to the components of the result of
math.ffton a tensor of shaperesolution.Args
resolution- Grid resolution measured in cells
dx- Distance between sampling points in real space.
dtype- Data type of the returned tensor (Default value = None)
Returns
Tensorholding the frequencies of the corresponding values computed by math.fft def find_closest(vectors: phiml.math._tensors.Tensor,
query: phiml.math._tensors.Tensor = None,
/,
method='kd',
index_dim=(indexᶜ))-
Expand source code
def find_closest(vectors: Tensor, query: Tensor = None, /, method='kd', index_dim=channel('index')): """ Finds the closest vector to `query` from `vectors`. This is implemented using a k-d tree built from `vectors`. Args: vectors: Points to find. query: (Optional) Target locations. If not specified, returns a function (query) -> index which caches the acceleration structure. Otherwise, returns the index tensor. method: One of the following: * `'dense'`: compute the pair-wise distances between all vectors and query points, then return the index of the smallest distance for each query point. * `'kd'` (default): Build a k-d tree from `vectors` and use it to query all points in `query`. The tree will be cached if this call is jit-compiled and `vectors` is constant. index_dim: Dimension along which components should be listed as `Shape`. Pass `None` to get 1D indices as scalars. Returns: Index tensor `idx` so that the closest points to `query` are `vectors[idx]`. """ index_dim = None if index_dim is None else index_dim.with_size(non_batch(vectors).non_channel.names) if method == 'dense': def find_fun(query: Tensor): dist = math.sum_((query - vectors) ** 2, channel) idx = math.argmin(dist, non_batch(vectors).non_channel) return rename_dims(idx, '_index', index_dim) if index_dim is not None else idx._index[0] elif method == 'kd': # try: # from sklearn.neighbors import KDTree # except ImportError: from scipy.spatial import cKDTree as KDTree def find_fun(query: Tensor): result = [] for i in batch(vectors).meshgrid(): query_i = query[i] native_query = query_i.native([..., channel]) if vectors.available: kd_tree = KDTree(vectors[i].numpy([..., channel])) def perform_query(np_query): return kd_tree.query(np_query)[1] native_idx = query.default_backend.numpy_call(perform_query, (query_i.shape.non_channel.volume,), INT64, native_query) else: b = backend_for(vectors, query) native_vectors = vectors[i].native([..., channel]) def perform_query(np_vectors, np_query): return KDTree(np_vectors).query(np_query)[1] native_idx = b.numpy_call(perform_query, (query.shape.without(batch(vectors)).non_channel.volume,), INT64, native_vectors, native_query) native_multi_idx = choose_backend(native_idx).unravel_index(native_idx, after_gather(vectors.shape, i).non_channel.sizes) result.append(reshaped_tensor(native_multi_idx, [query_i.shape.non_channel, index_dim or math.EMPTY_SHAPE])) return stack(result, batch(vectors)) else: raise ValueError(f"Unsupported method: {method}") if query is not None: return find_fun(query) return find_funFinds the closest vector to
queryfromvectors. This is implemented using a k-d tree built fromvectors.Args
vectors- Points to find.
query- (Optional) Target locations. If not specified, returns a function (query) -> index which caches the acceleration structure. Otherwise, returns the index tensor.
method-
One of the following:
'dense': compute the pair-wise distances between all vectors and query points, then return the index of the smallest distance for each query point.'kd'(default): Build a k-d tree fromvectorsand use it to query all points inquery. The tree will be cached if this call is jit-compiled andvectorsis constant.
index_dim- Dimension along which components should be listed as
Shape. PassNoneto get 1D indices as scalars.
Returns
Index tensor
idxso that the closest points toqueryarevectors[idx]. def find_differences(tree1,
tree2,
compare_tensors_by_id=False,
attr_type=<function value_attributes>,
tensor_equality=None) ‑> Sequence[Tuple[str, str, Any, Any]]-
Expand source code
def find_differences(tree1, tree2, compare_tensors_by_id=False, attr_type=value_attributes, tensor_equality=None) -> Sequence[Tuple[str, str, Any, Any]]: """ Compares `tree1` and `tree2` and returns all differences in the form `(difference_description: str, variable_identifier: str, value1, value2)`. Args: tree1: Nested tree or leaf tree2: Nested tree or leaf compare_tensors_by_id: Whether `phiml.math.Tensor` objects should be compared by identity or values. attr_type: What attributes to compare, either `value_attributes` or `variable_attributes`. tensor_equality: Function that compares two tensors for equality. `None` defaults to `equal`. Returns: List of differences, each represented as a `tuple`. """ result = [] _recursive_diff(tree1, tree2, '', result, compare_tensors_by_id, attr_type, tensor_equality) return resultCompares
tree1andtree2and returns all differences in the form(difference_description: str, variable_identifier: str, value1, value2).Args
tree1- Nested tree or leaf
tree2- Nested tree or leaf
compare_tensors_by_id- Whether
Tensorobjects should be compared by identity or values. attr_type- What attributes to compare, either
value_attributesorvariable_attributes. tensor_equality- Function that compares two tensors for equality.
Nonedefaults toequal().
Returns
List of differences, each represented as a
tuple. def finite_fill(values: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
distance: int = 1,
diagonal: bool = True,
padding=zero-gradient,
padding_kwargs: dict = None) ‑> phiml.math._tensors.Tensor-
Expand source code
def finite_fill(values: Tensor, dims: DimFilter = spatial, distance: int = 1, diagonal: bool = True, padding=extrapolation.BOUNDARY, padding_kwargs: dict = None) -> Tensor: """ Fills non-finite (NaN, inf, -inf) values from nearby finite values. Extrapolates the finite values of `values` for `distance` steps along `dims`. Where multiple finite values could fill an invalid value, the average is computed. Args: values: Floating-point `Tensor`. All non-numeric values (`NaN`, `inf`, `-inf`) are interpreted as invalid. dims: Dimensions along which to fill invalid values from finite ones. distance: Number of extrapolation steps, each extrapolating one cell out. diagonal: Whether to extrapolate values to their diagonal neighbors per step. padding: Extrapolation of `values`. Determines whether to extrapolate from the edges as well. padding_kwargs: Additional keyword arguments to be passed to `phiml.math.pad()`. Returns: `Tensor` of same shape as `values`. """ if diagonal: distance = min(distance, max(values.shape.sizes)) dims = values.shape.only(dims) for _ in range(distance): valid = math.is_finite(values) valid_values = math.where(valid, values, 0) overlap = valid for dim in dims: values_l, values_r = shift(valid_values, (-1, 1), dims=dim, padding=padding, padding_kwargs=padding_kwargs) valid_values = math.sum_(values_l + values_r + valid_values, dim='shift') mask_l, mask_r = shift(overlap, (-1, 1), dims=dim, padding=padding, padding_kwargs=padding_kwargs) overlap = math.sum_(mask_l + mask_r + overlap, dim='shift') values = math.where(valid, values, valid_values / overlap) else: distance = min(distance, sum(values.shape.sizes)) for _ in range(distance): neighbors = concat(shift(values, (-1, 1), dims, padding=padding, stack_dim=channel('neighbors'), padding_kwargs=padding_kwargs), 'neighbors') finite = math.is_finite(neighbors) avg_neighbors = math.sum_(math.where(finite, neighbors, 0), 'neighbors') / math.sum_(finite, 'neighbors') values = math.where(math.is_finite(values), values, avg_neighbors) return valuesFills non-finite (NaN, inf, -inf) values from nearby finite values. Extrapolates the finite values of
valuesfordistancesteps alongdims. Where multiple finite values could fill an invalid value, the average is computed.Args
values- Floating-point
Tensor. All non-numeric values (NaN,inf,-inf) are interpreted as invalid. dims- Dimensions along which to fill invalid values from finite ones.
distance- Number of extrapolation steps, each extrapolating one cell out.
diagonal- Whether to extrapolate values to their diagonal neighbors per step.
padding- Extrapolation of
values. Determines whether to extrapolate from the edges as well. padding_kwargs- Additional keyword arguments to be passed to
pad().
Returns
Tensorof same shape asvalues. def finite_max(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>,
default: complex | float = nan)-
Expand source code
def finite_max(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')): """ Finds the maximum along `dim` ignoring all non-finite values. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors default: Value to use where no finite value was encountered. Returns: `Tensor` without the reduced dimensions. """ value_inf = where(is_finite(value), value, float('-inf')) result_inf = max_(value_inf, dim) return where(is_finite(result_inf), result_inf, default)Finds the maximum along
dimignoring all non-finite values.Args
valueTensororlist/tupleof Tensors.dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
default- Value to use where no finite value was encountered.
Returns
Tensorwithout the reduced dimensions. def finite_mean(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>,
default: complex | float = nan)-
Expand source code
def finite_mean(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')): """ Computes the mean value of all finite values in `value` along `dim`. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors default: Value to use where no finite value was encountered. Returns: `Tensor` without the reduced dimensions. """ finite = is_finite(value) summed = sum_(where(finite, value, 0), dim) count = sum_(finite, dim) mean_nan = summed / count if isinstance(default, Number) and np.isnan(default): return mean_nan return where(is_finite(mean_nan), mean_nan, default)Computes the mean value of all finite values in
valuealongdim.Args
valueTensororlist/tupleof Tensors.dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
default- Value to use where no finite value was encountered.
Returns
Tensorwithout the reduced dimensions. def finite_min(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>,
default: complex | float = nan)-
Expand source code
def finite_min(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')): """ Finds the minimum along `dim` ignoring all non-finite values. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors default: Value to use where no finite value was encountered. Returns: `Tensor` without the reduced dimensions. """ value_inf = where(is_finite(value), value, float('inf')) result_inf = min_(value_inf, dim) return where(is_finite(result_inf), result_inf, default)Finds the minimum along
dimignoring all non-finite values.Args
valueTensororlist/tupleof Tensors.dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
default- Value to use where no finite value was encountered.
Returns
Tensorwithout the reduced dimensions. def finite_std(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>,
default: complex | float = nan)-
Expand source code
def finite_std(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')): """ Computes the standard deviation of all finite values in `value` along `dim`. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors default: Value to use where no finite value was encountered. Returns: `Tensor` without the reduced dimensions. """ return sqrt(finite_mean(value**2, dim, default) - finite_mean(value, dim, default)**2)Computes the standard deviation of all finite values in
valuealongdim.Args
valueTensororlist/tupleof Tensors.dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
default- Value to use where no finite value was encountered.
Returns
Tensorwithout the reduced dimensions. def finite_sum(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>,
default: complex | float = nan)-
Expand source code
def finite_sum(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')): """ Sums all finite values in `value` along `dim`. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors default: Value to use where no finite value was encountered. Returns: `Tensor` without the reduced dimensions. """ finite = is_finite(value) summed = sum_(where(finite, value, 0), dim) return where(any_(finite, dim), summed, default)Sums all finite values in
valuealongdim.Args
valueTensororlist/tupleof Tensors.dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
default- Value to use where no finite value was encountered.
Returns
Tensorwithout the reduced dimensions. def flatten(value, flat_dim: phiml.math._shape.Shape = (flatⁱ), flatten_batch=False, **kwargs)-
Expand source code
def flatten(value, flat_dim: Shape = instance('flat'), flatten_batch=False, **kwargs): """ Returns a `Tensor` with the same values as `value` but only a single dimension `flat_dim`. The order of the values in memory is not changed. Args: value: `phiml.math.magic.Shapable`, such as `Tensor`. If a non-`phiml.math.magic.Shaped` object or one with an empty `Shape` is passed, it is returned without alteration. flat_dim: Dimension name and type as `Shape` object. The size is ignored. flatten_batch: Whether to flatten batch dims as well. If `False`, batch dims are kept, only onn-batch dims are flattened. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dims to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments. Returns: Same type as `value`. Examples: >>> flatten(math.zeros(spatial(x=4, y=3))) (flatⁱ=12) const 0.0 """ assert isinstance(flat_dim, SHAPE_TYPES) and flat_dim.rank == 1, flat_dim if not isinstance(value, Shaped): return value if shape(value).is_empty: return value assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}" # --- First try __flatten__ --- if hasattr(value, '__flatten__'): result = value.__flatten__(flat_dim, flatten_batch, **kwargs) if result is not NotImplemented: return result # There is no tree node implementation for flatten because pack_dims is just as fast # --- Fallback: pack_dims --- return pack_dims(value, shape(value) if flatten_batch else non_batch(value), flat_dim, **kwargs)Returns a
Tensorwith the same values asvaluebut only a single dimensionflat_dim. The order of the values in memory is not changed.Args
valueShapable, such asTensor. If a non-Shapedobject or one with an emptyShapeis passed, it is returned without alteration.flat_dim- Dimension name and type as
Shapeobject. The size is ignored. flatten_batch- Whether to flatten batch dims as well.
If
False, batch dims are kept, only onn-batch dims are flattened. **kwargs- Additional keyword arguments required by specific implementations.
Adding spatial dims to fields requires the
bounds: Boxargument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments.
Returns
Same type as
value.Examples
>>> flatten(math.zeros(spatial(x=4, y=3))) (flatⁱ=12) const 0.0 def floor(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def floor(x: TensorOrTree) -> TensorOrTree: """ Computes *⌊x⌋* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.floor, floor)Computes ⌊x⌋ of the
TensororPhiTreeNodex. def fourier_laplace(grid: phiml.math._tensors.Tensor,
dx: phiml.math._tensors.Tensor | phiml.math._shape.Shape | float | list | tuple,
times: int = 1)-
Expand source code
def fourier_laplace(grid: Tensor, dx: Union[Tensor, Shape, float, list, tuple], times: int = 1): """ Applies the spatial laplace operator to the given tensor with periodic boundary conditions. *Note:* The results of `fourier_laplace` and `laplace` are close but not identical. This implementation computes the laplace operator in Fourier space. The result for periodic fields is exact, i.e. no numerical instabilities can occur, even for higher-order derivatives. Args: grid: tensor, assumed to have periodic boundary conditions dx: distance between grid points, tensor-like, scalar or vector times: number of times the laplace operator is applied. The computational cost is independent of this parameter. grid: Tensor: dx: Tensor or Shape or float or list or tuple: times: int: (Default value = 1) Returns: tensor of same shape as `tensor` """ frequencies = math.fft(math.to_complex(grid)) k_squared = math.sum_(math.fftfreq(grid.shape) ** 2, 'vector') fft_laplace = -(2 * np.pi) ** 2 * k_squared result = math.real(math.ifft(frequencies * fft_laplace ** times)) return math.cast(result / wrap(dx) ** 2, grid.dtype)Applies the spatial laplace operator to the given tensor with periodic boundary conditions.
Note: The results of
fourier_laplace()andlaplace()are close but not identical.This implementation computes the laplace operator in Fourier space. The result for periodic fields is exact, i.e. no numerical instabilities can occur, even for higher-order derivatives.
Args
grid- tensor, assumed to have periodic boundary conditions
dx- distance between grid points, tensor-like, scalar or vector
times- number of times the laplace operator is applied. The computational cost is independent of this parameter.
grid- Tensor:
dx- Tensor or Shape or float or list or tuple:
times- int: (Default value = 1)
Returns
tensor of same shape as
tensor() def fourier_poisson(grid: phiml.math._tensors.Tensor,
dx: phiml.math._tensors.Tensor | phiml.math._shape.Shape | float | list | tuple,
times: int = 1)-
Expand source code
def fourier_poisson(grid: Tensor, dx: Union[Tensor, Shape, float, list, tuple], times: int = 1): """ Inverse operation to `fourier_laplace`. Args: grid: Tensor: dx: Tensor or Shape or float or list or tuple: times: int: (Default value = 1) Returns: """ frequencies = math.fft(math.to_complex(grid)) k_squared = math.sum_(math.fftfreq(grid.shape) ** 2, 'vector') fft_laplace = -(2 * np.pi) ** 2 * k_squared # fft_laplace.tensor[(0,) * math.ndims(k_squared)] = math.inf # assume NumPy array to edit result = math.real(math.ifft(math.safe_div(frequencies, math.to_complex(fft_laplace ** times)))) return math.cast(result * wrap(dx) ** 2, grid.dtype)Inverse operation to
fourier_laplace().Args
grid- Tensor:
dx- Tensor or Shape or float or list or tuple:
times- int: (Default value = 1)
Returns:
def frequency_loss(x, frequency_falloff: float = 100, threshold=1e-05, ignore_mean=False, n=2) ‑> phiml.math._tensors.Tensor-
Expand source code
def frequency_loss(x, frequency_falloff: float = 100, threshold=1e-5, ignore_mean=False, n=2) -> Tensor: """ Penalizes the squared `values` in frequency (Fourier) space. Lower frequencies are weighted more strongly then higher frequencies, depending on `frequency_falloff`. Args: x: `Tensor` or `phiml.math.magic.PhiTreeNode` Values to penalize, typically `actual - target`. frequency_falloff: Large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally. *Note*: The total loss is not normalized. Varying the value will result in losses of different magnitudes. threshold: Frequency amplitudes below this value are ignored. Setting this to zero may cause infinities or NaN values during backpropagation. ignore_mean: If `True`, does not penalize the mean value (frequency=0 component). Returns: Scalar loss value """ assert n in (1, 2) if isinstance(x, Tensor): if ignore_mean: x -= math.mean(x, x.shape.non_batch) k_squared = math.sum_(math.fftfreq(x.shape.spatial) ** 2, channel) weights = math.exp(-0.5 * k_squared * frequency_falloff ** 2) diff_fft = abs_square(math.fft(x) * weights) diff_fft = math.sqrt(math.maximum(diff_fft, threshold)) return l2_loss(diff_fft) if n == 2 else l1_loss(diff_fft) elif isinstance(x, PhiTreeNode): losses = [frequency_loss(getattr(x, a), frequency_falloff, threshold, ignore_mean, n) for a in value_attributes(x)] return sum(losses) else: raise ValueError(x)Penalizes the squared
valuesin frequency (Fourier) space. Lower frequencies are weighted more strongly then higher frequencies, depending onfrequency_falloff.Args
xTensororPhiTreeNodeValues to penalize, typicallyactual - target.frequency_falloff- Large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally. Note: The total loss is not normalized. Varying the value will result in losses of different magnitudes.
threshold- Frequency amplitudes below this value are ignored. Setting this to zero may cause infinities or NaN values during backpropagation.
ignore_mean- If
True, does not penalize the mean value (frequency=0 component).
Returns
Scalar loss value
def from_dict(dict_: dict, convert=False)-
Expand source code
def from_dict(dict_: dict, convert=False): """ Loads a `Tensor` or `Shape` from a serialized form. See Also: `to_dict()`. Args: dict_: Serialized tensor properties. convert: Whether to convert the data to the current backend format or keep it as a Numpy array. Returns: `Tensor` or `Shape`. """ shape = Shape._from_dict(dict_) if 'data' in dict_: return tensor(dict_['data'], shape, convert=convert) else: return shape def gather(values,
indices: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = None,
pref_index_dim='index')-
Expand source code
def gather(values, indices: Tensor, dims: Union[DimFilter, None] = None, pref_index_dim='index'): """ Gathers the entries of `values` at positions described by `indices`. All non-channel dimensions of `indices` that are part of `values` but not indexed are treated as batch dimensions. See Also: `scatter()`. Args: values: `Tensor` or `phiml.math.matic.PhiTreeNode` containing values to gather. indices: `int` `Tensor`. Multidimensional position references in `values`. Must contain a single channel dimension for the index vector matching the number of dimensions to index. This channel dimension should list the dimension names to index as labels unless explicitly specified as `dims`. dims: (Optional) Dimensions indexed by `indices`. Alternatively, the dimensions can be specified as the labels of the channel dimension of `indices`. If `None` and no index labels are specified, will default to all spatial dimensions or all instance dimensions, depending on which ones are present (but not both). pref_index_dim: In case `indices` has multiple channel dims, use this dim as the index, treating the others as batch. Has no effect if `indices` only has one channel dim. Returns: `Tensor` with combined batch dimensions, channel dimensions of `values` and spatial/instance dimensions of `indices`. """ if values is None: return None if not isinstance(values, Tensor): return tree_map(lambda v: gather(v, indices, dims), values, attr_type=all_attributes) index_dim = channel(indices) if index_dim.rank >= 2: assert pref_index_dim in index_dim, f"When indices has multiple channel dims, pref_index_dim must select one of them but got {pref_index_dim} which is not in {index_dim}" index_dim = index_dim.only(pref_index_dim) if dims is None: if index_dim and index_dim.labels[0]: dims = index_dim.labels[0] else: # Fallback to spatial / instance assert values.shape.instance.is_empty or values.shape.spatial.is_empty, f"Specify gather dimensions for values with both instance and spatial dimensions. Got {values.shape}" dims = values.shape.instance if values.shape.spatial.is_empty else values.shape.spatial assert dims, f"Specify gather dimensions for values with neither instance nor spatial dimensions. Got {values.shape}" dims = parse_dim_order(dims) assert dims, f"No indexing dimensions for tensor {values.shape} given indices {indices.shape}" if not values.shape.only(dims): # no indexed dim in values return expand(values, indices.shape - index_dim) elif dims not in values.shape: # Only some dims indexed dims = [d for d in dims if d in values.shape] indices = indices[{index_dim: dims}] index_dim = channel(indices) if len(dims) > 1: assert index_dim.rank == 1, f"indices must have a single channel dimension listing the indexed dims {dims} but got {indices.shape}." assert index_dim.volume == len(dims), f"channel dim of indices must have size equal to the number of indexed dims {dims} but got {index_dim} which has {index_dim.volume} entries" if indices.dtype.kind == bool: indices = to_int32(indices) if isinstance(values, Layout): inner_dims = values.shape.only(dims) - values._stack_dim if not inner_dims: index_list = unstack(rename_dims(indices, index_dim, 'index_'), indices.shape - index_dim) v_list = [values[{n: int(v) for n, v in zip(index_dim.labels[0], i)}] for i in index_list] return stack(v_list, indices.shape - index_dim) if values._stack_dim.only(dims).is_empty: assert len(values._stack_dim) == 1 value_slices = values._unstack(values._stack_dim.name) index_slices = indices._unstack(values._stack_dim.name) if values._stack_dim in indices.shape else [indices] * len(value_slices) inner_gathered = [gather(v, i, inner_dims, pref_index_dim) for i, v in zip(index_slices, value_slices)] return Layout(inner_gathered, values._stack_dim) if values._is_tracer or is_sparse(values): if not index_dim: index_dim = channel(gather=dims) indices = expand(indices, index_dim) if not index_dim.labels[0]: indices = indices._with_shape_replaced(indices.shape.with_dim_size(index_dim, dims)) if values._is_tracer: return values._gather(indices) if is_sparse(values): if isinstance(values, TensorStack): if dims in values._stack_dim: gathered = [values[{dims[0]: i}] for i in indices] return stack(gathered, indices.shape-index_dim) raise NotImplementedError return sparse_gather(values, indices, index_dim) elif is_sparse(indices): # only indices sparse -> gather on sparse pattern gathered = gather(values, indices._values, dims=dims, pref_index_dim=index_dim) return indices._with_values(gathered) broadcast = broadcast_dims(values, indices) treat_as_batch = indices.shape.only(values.shape) - dims - index_dim batch_ = ((values.shape.batch & indices.shape.batch).without(dims) & treat_as_batch) - broadcast channel_ = values.shape - dims - batch_ - broadcast if broadcast.intersection(set(dims)): # Cannot broadcast because that would iterate over dims! if values.shape.is_uniform: broadcast = broadcast - set(dims) else: # We have to slice the items, then stack the results # if batch_ or treat_as_batch: # raise NotImplementedError # ToDo iterate over batches result = [] for single_index in unstack(indices, indices.shape - index_dim): index_slice = {d: i for d, i in zip(index_dim.labels[0], single_index)} result.append(values[index_slice]) return stack(result, indices.shape - index_dim) def uniform_gather(values: Tensor, indices: Tensor): index_list_dims = indices.shape - index_dim - batch_ channel_ = values.shape - dims - batch_ - broadcast squeeze_index_list = False if not index_list_dims: index_list_dims = instance(_single_index=1) squeeze_index_list = True backend = backend_for(values, indices) native_values = values.native([batch_, *dims, channel_], True) native_indices = indices._reshaped_native([batch_, *index_list_dims, index_dim]) native_result = backend.batched_gather_nd(native_values, native_indices) result = reshaped_tensor(native_result, [batch_, *index_list_dims, channel_], convert=False) if squeeze_index_list: result = result[{'_single_index': 0}] return result return broadcast_op(uniform_gather, [values, indices], iter_dims=broadcast)Gathers the entries of
valuesat positions described byindices. All non-channel dimensions ofindicesthat are part ofvaluesbut not indexed are treated as batch dimensions.See Also:
scatter().Args
valuesTensororphiml.math.matic.PhiTreeNodecontaining values to gather.indicesintTensor. Multidimensional position references invalues. Must contain a single channel dimension for the index vector matching the number of dimensions to index. This channel dimension should list the dimension names to index as labels unless explicitly specified asdims.dims- (Optional) Dimensions indexed by
indices. Alternatively, the dimensions can be specified as the labels of the channel dimension ofindices. IfNoneand no index labels are specified, will default to all spatial dimensions or all instance dimensions, depending on which ones are present (but not both). pref_index_dim- In case
indiceshas multiple channel dims, use this dim as the index, treating the others as batch. Has no effect ifindicesonly has one channel dim.
Returns
Tensorwith combined batch dimensions, channel dimensions ofvaluesand spatial/instance dimensions ofindices. def get_format(x: phiml.math._tensors.Tensor) ‑> str-
Expand source code
def get_format(x: Tensor) -> str: """ Returns the sparse storage format of a tensor. Args: x: `Tensor` Returns: One of `'coo'`, `'csr'`, `'csc'`, `'dense'`. """ if isinstance(x, SparseCoordinateTensor): return 'coo' elif isinstance(x, CompressedSparseMatrix): if dual(x._uncompressed_dims): return 'csr' elif dual(x._compressed_dims): return 'csc' else: return 'compressed' elif isinstance(x, CompactSparseTensor): if dual(x._compressed_dims): return 'compact-cols' elif dual(x._uncompressed_dims): return 'compact-rows' else: return 'compact' elif isinstance(x, TensorStack): formats = [get_format(t) for t in x._tensors] if all(f == formats[0] for f in formats): return formats[0] return 'mixed' elif isinstance(x, BlockTensor): formats = [get_format(t) for t, _ in x._blo] if all(f == formats[0] for f in formats): return formats[0] return 'mixed' elif isinstance(x, Tensor): return 'dense' else: # assume native tensor b = choose_backend(x) if not b.is_sparse(x): return 'dense' return b.get_sparse_format(x)Returns the sparse storage format of a tensor.
Args
xTensor
Returns
One of
'coo','csr','csc','dense'. def get_precision() ‑> int-
Expand source code
def get_precision() -> int: """ Gets the current target floating point precision in bits. The precision can be set globally using `set_global_precision()` or locally using `with precision(p):`. Any Backend method may convert floating point values to this precision, even if the input had a different precision. Returns: 16 for half, 32 for single, 64 for double """ return _PRECISION[-1]Gets the current target floating point precision in bits. The precision can be set globally using
set_global_precision()or locally usingwith precision(p):.Any Backend method may convert floating point values to this precision, even if the input had a different precision.
Returns
16 for half, 32 for single, 64 for double
def get_sparsity(x: phiml.math._tensors.Tensor)-
Expand source code
def get_sparsity(x: Tensor): """ Fraction of values currently stored on disk for the given `Tensor` `x`. For sparse tensors, this is `nnz / shape`. This is a lower limit on the number of values that will need to be processed for operations involving `x`. The actual number is often higher since many operations require data be laid out in a certain format. In these cases, missing values, such as zeros, are filled in before the operation. The following operations may return tensors whose values are only partially stored: * `phiml.math.expand()` * `phiml.math.pairwise_distance()` with `max_distance` set. * Tracers used in `phiml.math.jit_compile_linear()` * Stacking any of the above. Args: x: `Tensor` Returns: The number of values that are actually stored on disk. This does not include additional information, such as position information / indices. For sparse matrices, this is equal to the number of nonzero values. """ return stored_values(x, invalid='keep').shape.volume / x.shape.volumeFraction of values currently stored on disk for the given
Tensorx. For sparse tensors, this isnnz / shape.This is a lower limit on the number of values that will need to be processed for operations involving
x. The actual number is often higher since many operations require data be laid out in a certain format. In these cases, missing values, such as zeros, are filled in before the operation.The following operations may return tensors whose values are only partially stored:
expand()phiml.math.pairwise_distance()withmax_distanceset.- Tracers used in
jit_compile_linear() - Stacking any of the above.
Args
xTensor
Returns
The number of values that are actually stored on disk. This does not include additional information, such as position information / indices. For sparse matrices, this is equal to the number of nonzero values.
def gradient(f: Callable, wrt: str | int | tuple | list = None, get_output=True) ‑> Callable-
Expand source code
def gradient(f: Callable, wrt: Union[str, int, tuple, list] = None, get_output=True) -> Callable: """ Creates a function which computes the gradient of `f`. Example: ```python def loss_function(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = gradient(loss_function, 'x', get_output=False)(x, y) (loss, prediction), (dx, dy) = gradient(loss_function, 'x,y', get_output=True)(x, y) ``` Functional gradients are implemented for the following backends: * PyTorch: [`torch.autograd.grad`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.grad) / [`torch.autograd.backward`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.backward) * TensorFlow: [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape) * Jax: [`jax.grad`](https://jax.readthedocs.io/en/latest/jax.html#jax.grad) When the gradient function is invoked, `f` is called with tensors that track the gradient. For PyTorch, `arg.requires_grad = True` for all positional arguments of `f`. Args: f: Function to be differentiated. `f` must return a floating point `Tensor` with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if `return_values=True`. All arguments for which the gradient is computed must be of dtype float or complex. get_output: Whether the gradient function should also return the return values of `f`. wrt: Comma-separated parameter names of `f` with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged). Returns: Function with the same arguments as `f` that returns the value of `f`, auxiliary data and gradient of `f` if `get_output=True`, else just the gradient of `f`. """ f_params, wrt = simplify_wrt(f, wrt) return GradientFunction(f, f_params, wrt, get_output, is_f_scalar=True)Creates a function which computes the gradient of
f.Example:
def loss_function(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = gradient(loss_function, 'x', get_output=False)(x, y) (loss, prediction), (dx, dy) = gradient(loss_function, 'x,y', get_output=True)(x, y)Functional gradients are implemented for the following backends:
- PyTorch:
torch.autograd.grad/torch.autograd.backward - TensorFlow:
tf.GradientTape - Jax:
jax.grad
When the gradient function is invoked,
fis called with tensors that track the gradient. For PyTorch,arg.requires_grad = Truefor all positional arguments off.Args
f- Function to be differentiated.
fmust return a floating pointTensorwith rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function ifreturn_values=True. All arguments for which the gradient is computed must be of dtype float or complex. get_output- Whether the gradient function should also return the return values of
f. wrt- Comma-separated parameter names of
fwith respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).
Returns
Function with the same arguments as
fthat returns the value off, auxiliary data and gradient offifget_output=True, else just the gradient off. - PyTorch:
def grid_sample(grid: phiml.math._tensors.Tensor,
coordinates: phiml.math._tensors.Tensor,
extrap: ForwardRef('e_.Extrapolation') | float | str,
**kwargs)-
Expand source code
def grid_sample(grid: Tensor, coordinates: Tensor, extrap: Union['e_.Extrapolation', float, str], **kwargs): """ Samples values of `grid` at the locations referenced by `coordinates`. Values lying in between sample points are determined via linear interpolation. If `coordinates` has a channel dimension, its labels are used to determine the grid dimensions of `grid`. Otherwise, the spatial dims of `grid` will be used. For values outside the valid bounds of `grid` (`coord < 0 or coord > grid.shape - 1`), `extrap` is used to determine the neighboring grid values. If the extrapolation does not support resampling, the grid is padded by one cell layer before resampling. In that case, values lying further outside will not be sampled according to the extrapolation. Args: grid: Grid with at least one spatial dimension and no instance dimensions. coordinates: Coordinates with a single channel dimension called `'vector'`. The size of the `vector` dimension must match the number of spatial dimensions of `grid`. extrap: Extrapolation used to determine the values of `grid` outside its valid bounds. kwargs: Additional information for the extrapolation. Returns: `Tensor` with channel dimensions of `grid`, spatial and instance dimensions of `coordinates` and combined batch dimensions. """ extrap = e_.as_extrapolation(extrap) if extrap is not None else None if not channel(coordinates): assert spatial(grid).rank == 1, f"grid must have 1 spatial dimension if coordinates does not have a channel dimension" coordinates = expand(coordinates, channel(vector=spatial(grid))) assert channel(coordinates).rank == 1, f"coordinates must have at most one channel dimension but got {channel(coordinates)}" coordinates = rename_dims(coordinates, channel, 'vector') result = broadcast_op(functools.partial(_grid_sample, extrap=extrap, pad_kwargs=kwargs), [grid, coordinates]) return resultSamples values of
gridat the locations referenced bycoordinates. Values lying in between sample points are determined via linear interpolation.If
coordinateshas a channel dimension, its labels are used to determine the grid dimensions ofgrid. Otherwise, the spatial dims ofgridwill be used.For values outside the valid bounds of
grid(coord < 0 or coord > grid.shape - 1),extrapis used to determine the neighboring grid values. If the extrapolation does not support resampling, the grid is padded by one cell layer before resampling. In that case, values lying further outside will not be sampled according to the extrapolation.Args
grid- Grid with at least one spatial dimension and no instance dimensions.
coordinates- Coordinates with a single channel dimension called
'vector'. The size of thevectordimension must match the number of spatial dimensions ofgrid. extrap- Extrapolation used to determine the values of
gridoutside its valid bounds. kwargs- Additional information for the extrapolation.
Returns
Tensorwith channel dimensions ofgrid, spatial and instance dimensions ofcoordinatesand combined batch dimensions. def histogram(values: phiml.math._tensors.Tensor,
bins: phiml.math._shape.Shape = (binsˢ=30),
weights=1,
same_bins: str | Sequence | set | phiml.math._shape.Shape | Callable | None = None,
eps=1e-05)-
Expand source code
def histogram(values: Tensor, bins: Shape or Tensor = spatial(bins=30), weights=1, same_bins: DimFilter = None, eps=1e-5): """ Compute a histogram of a distribution of values. *Important Note:* In its current implementation, values outside the range of bins may or may not be added to the outermost bins. Args: values: `Tensor` listing the values to be binned along spatial or instance dimensions. `values´ may not contain channel or dual dimensions. bins: Either `Shape` specifying the number of equally-spaced bins to use or bin edge positions as `Tensor` with a spatial or instance dimension. weights: `Tensor` assigning a weight to every value in `values` that will be added to the bin, default 1. same_bins: Only used if `bins` is given as a `Shape`. Use the same bin sizes and positions across these batch dimensions. By default, bins will be chosen independently for each example. Returns: hist: `Tensor` containing all batch dimensions and the `bins` dimension with dtype matching `weights`. bin_edges: `Tensor` bin_center: `Tensor` """ assert isinstance(values, Tensor), f"values must be a Tensor but got {type(values)}" assert channel(values).is_empty, f"Only 1D histograms supported but values have a channel dimension: {values.shape}" assert dual(values).is_empty, f"values cannot contain dual dimensions but got shape {values.shape}" weights = wrap(weights) if isinstance(bins, SHAPE_TYPES): def equal_bins(v): lo, up = finite_min(v, shape), finite_max(v, shape) margin = eps * (up - lo) return linspace(lo, up+margin, bins.with_size(bins.size + 1)) bins = broadcast_op(equal_bins, [values], iter_dims=(batch(values) & batch(weights)).without(same_bins)) assert isinstance(bins, Tensor), f"bins must be a Tensor but got {type(bins)}" assert non_batch(bins).rank == 1, f"bins must contain exactly one spatial or instance dimension listing the bin edges but got shape {bins.shape}" assert channel(bins).rank == dual(bins).rank == 0, f"bins cannot have any channel or dual dimensions but got shape {bins.shape}" tensors = [values, bins] if weights is None else [values, weights, bins] backend = backend_for(*tensors) def histogram_uniform(values: Tensor, bin_edges: Tensor, weights): batch_dims = batch(values) & batch(bin_edges) & batch(weights) value_dims = non_batch(values) & non_batch(weights) values_native = values._reshaped_native([batch_dims, value_dims]) weights_native = weights._reshaped_native([batch_dims, value_dims]) bin_edges_native = bin_edges._reshaped_native([batch_dims, non_batch(bin_edges)]) hist_native = backend.histogram1d(values_native, weights_native, bin_edges_native) hist = reshaped_tensor(hist_native, [batch_dims, non_batch(bin_edges).with_size(non_batch(bin_edges).size - 1)]) return hist # return stack_tensors([bin_edges, hist], channel(vector=[bin_edges.shape.name, 'hist'])) bin_center = (bins[{non_batch(bins).name: slice(1, None)}] + bins[{non_batch(bins).name: slice(0, -1)}]) / 2 bin_center = expand(bin_center, channel(vector=non_batch(bins).names)) bin_edges = stack_tensors([bins], channel(values)) if channel(values) else bins return broadcast_op(histogram_uniform, [values, bins, weights]), bin_edges, bin_centerCompute a histogram of a distribution of values.
Important Note: In its current implementation, values outside the range of bins may or may not be added to the outermost bins.
Args
valuesTensorlisting the values to be binned along spatial or instance dimensions. `values´ may not contain channel or dual dimensions.bins- Either
Shapespecifying the number of equally-spaced bins to use or bin edge positions asTensorwith a spatial or instance dimension. weightsTensorassigning a weight to every value invaluesthat will be added to the bin, default 1.same_bins- Only used if
binsis given as aShape. Use the same bin sizes and positions across these batch dimensions. By default, bins will be chosen independently for each example.
Returns
def i2b(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType-
Expand source code
def i2b(value: PhiTreeNodeType) -> PhiTreeNodeType: """ Change the type of all *instance* dims of `value` to *batch* dimensions. See `rename_dims`. """ return rename_dims(value, instance, batch)Change the type of all instance dims of
valueto batch dimensions. Seerename_dims(). def identity(x)-
Expand source code
def identity(x): """ Identity function for one argument. Vararg functions cannot be transformed as the argument names are unknown. Args: x: Positional argument. Returns: `x` """ return xIdentity function for one argument. Vararg functions cannot be transformed as the argument names are unknown.
Args
x- Positional argument.
Returns
x def ifft(k: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>)-
Expand source code
def ifft(k: Tensor, dims: DimFilter = spatial): """ Inverse of `fft()`. Args: k: Complex or float `Tensor` with at least one spatial dimension. dims: Dimensions along which to perform the inverse FFT. If `None`, performs the inverse FFT along all spatial dimensions of `k`. Returns: *Ƒ<sup>-1</sup>(k)* as complex `Tensor` """ dims = k.shape.only(dims) k_native = k.native(k.shape) result_native = k.backend.ifft(k_native, k.shape.indices(dims.names)) return Dense(result_native, k.shape.names, k.shape, k.backend) def imag(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def imag(x: TensorOrTree) -> TensorOrTree: """ Returns the imaginary part of `x`. If `x` does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor. See Also: `real()`, `conjugate()`. Args: x: `Tensor` or `phiml.math.magic.PhiTreeNode` or native tensor. Returns: Imaginary component of `x` if `x` is complex, zeros otherwise. """ return _backend_op1(x, Backend.imag, imag)Returns the imaginary part of
x. Ifxdoes not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.See Also:
real(),conjugate().Args
xTensororPhiTreeNodeor native tensor.
Returns
Imaginary component of
xifxis complex, zeros otherwise. def incomplete_gamma(a: ~TensorOrTree, x: ~TensorOrTree, upper=False, regularized=True) ‑> ~TensorOrTree-
Expand source code
def incomplete_gamma(a: TensorOrTree, x: TensorOrTree, upper=False, regularized=True) -> TensorOrTree: """ Computes the incomplete gamma function. Args: a: Positive parameter, `Tensor` or tree. x: Non-negative argument, `Tensor` or tree. upper: Whether to complete the upper integral (x to infinity) or the lower integral (0 to x). regularized: Whether the integral is divided by Γ(a). """ if upper: reg = custom_op2(a, x, xops.gamma_inc_u) else: reg = custom_op2(a, x, xops.gamma_inc_l) return reg if regularized else reg * exp(log_gamma(a)) def index_shift(x: phiml.math._tensors.Tensor,
offsets: Sequence[int | phiml.math._tensors.Tensor],
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = None) ‑> List[phiml.math._tensors.Tensor]-
Expand source code
def index_shift(x: Tensor, offsets: Sequence[Union[int, Tensor]], padding: Union[Extrapolation, float, Tensor, str, None] = None) -> List[Tensor]: """ Returns shifted versions of `x` according to `offsets` where each offset is an `int` vector indexing some dimensions of `x`. See Also: `shift`, `neighbor_reduce`. Args: x: Input grid-like `Tensor`. offsets: Sequence of offset vectors. Each offset is an `int` vector indexing some dimensions of `x`. Offsets can have different subsets of the dimensions of `x`. Missing dimensions count as 0. The value `0` can also be passed as a zero-shift. padding: Padding to be performed at the boundary so that the shifted versions have the same size as `x`. Must be one of the following: `Extrapolation`, `Tensor` or number for constant extrapolation, name of extrapolation as `str`. Can be set to `None` to disable padding. Then the result tensors will be smaller than `x`. Returns: `list` of shifted tensors. The number of return tensors is equal to the number of `offsets`. """ _, widths_list, min_by_dim, max_by_dim = join_index_offsets(offsets, negate=True) if padding is not None: pad_lower = {d: max(0, -m) for d, m in min_by_dim.items()} pad_upper = {d: max(0, m) for d, m in max_by_dim.items()} widths = {d: (pad_lower[d], pad_upper[d]) for d in pad_lower.keys()} x = math.pad(x, widths, mode=padding) return [math.pad(x, w, extrapolation.NONE) for w in widths_list]Returns shifted versions of
xaccording tooffsetswhere each offset is anintvector indexing some dimensions ofx.See Also:
shift(),neighbor_reduce().Args
x- Input grid-like
Tensor. offsets- Sequence of offset vectors. Each offset is an
intvector indexing some dimensions ofx. Offsets can have different subsets of the dimensions ofx. Missing dimensions count as 0. The value0can also be passed as a zero-shift. padding- Padding to be performed at the boundary so that the shifted versions have the same size as
x. Must be one of the following:Extrapolation,Tensoror number for constant extrapolation, name of extrapolation asstr. Can be set toNoneto disable padding. Then the result tensors will be smaller thanx.
Returns
listof shifted tensors. The number of return tensors is equal to the number ofoffsets. def instance(*args,
**dims: int | str | tuple | list | phiml.math._shape.Shape | ForwardRef('Tensor')) ‑> phiml.math._shape.Shape-
Expand source code
def instance(*args, **dims: Union[int, str, tuple, list, Shape, 'Tensor']) -> Shape: """ Returns the instance dimensions of an existing `Shape` or creates a new `Shape` with only instance dimensions. Usage for filtering instance dimensions: >>> instance_dims = instance(shape) >>> instance_dims = instance(tensor) Usage for creating a `Shape` with only instance dimensions: >>> instance_shape = instance('undef', points=2) (points=2, undef=None) Here, the dimension `undef` is created with an undefined size of `None`. Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. See Also: `channel`, `batch`, `spatial` Args: *args: Either * `Shape` or `Tensor` to filter or * Names of dimensions with undefined sizes as `str`. **dims: Dimension sizes and names. Must be empty when used as a filter operation. Returns: `Shape` containing only dimensions of type instance. """ if all(isinstance(arg, str) for arg in args) or dims: return _construct_shape(INSTANCE_DIM, *args, **dims) elif len(args) == 1 and isinstance(args[0], SHAPE_TYPES): return args[0].instance assert len(args) == 1, f"instance() must be called either as a selector instance(Shape) or instance(Tensor) or as a constructor instance(*names, **dims). Got *args={args}, **dims={dims}" return shape(args[0]).instanceReturns the instance dimensions of an existing
Shapeor creates a newShapewith only instance dimensions.Usage for filtering instance dimensions:
>>> instance_dims = instance(shape) >>> instance_dims = instance(tensor)Usage for creating a
Shapewith only instance dimensions:>>> instance_shape = instance('undef', points=2) (points=2, undef=None)Here, the dimension
undefis created with an undefined size ofNone. Undefined sizes are automatically filled in bytensor(),wrap(),stack()andconcat().To create a shape with multiple types, use
merge_shapes(),concat_shapes()or the syntaxshape1 & shape2.See Also:
channel(),batch(),spatial()Args
*args-
Either
**dims- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shapecontaining only dimensions of type instance. def ipack(value,
packed_dim: str | phiml.math._shape.Shape,
pos: int | None = None,
**kwargs)-
Expand source code
def ipack(value, packed_dim: Union[Shape, str], pos: Optional[int] = None, **kwargs): """Short for `pack_dims(..., dims=instance)""" return pack_dims(value, instance, packed_dim, pos=pos, **kwargs)Short for `pack_dims(…, dims=instance)
def irange(start: int = 0, **stop: int) ‑> phiml.math._tensors.Tensor[int]-
Expand source code
def irange(start: int = 0, **stop: int) -> Tensor[int]: """ Construct a range `Tensor` along one instance dim. """ assert len(stop) == 1, f"irange() requires exactly one stop dimension but got {stop}" return arange(instance(next(iter(stop))), start, next(iter(stop.values())))Construct a range
Tensoralong one instance dim. def is_composite(x: Any) ‑> bool-
Expand source code
def is_composite(x: Any) -> bool: """ Args: x: Object to check. Returns: `True` if `x` is a composite type / container, e.g. a dataclass or pytree. Sparse tensors are treated as non-composite. """ if x is None: return False elif isinstance(x, Layout): return True elif isinstance(x, Tensor): return False elif dataclasses.is_dataclass(x): return True elif isinstance(x, (tuple, list, dict)): return True try: backend = choose_backend(x) return not backend.is_tensor(x) except NoBackendFound as err: raise ValueError(x) from errArgs
x- Object to check.
Returns
Trueifxis a composite type / container, e.g. a dataclass or pytree. Sparse tensors are treated as non-composite. def is_finite(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def is_finite(x: TensorOrTree) -> TensorOrTree: """ Returns a `Tensor` or `phiml.math.magic.PhiTreeNode` matching `x` with values `True` where `x` has a finite value and `False` otherwise. """ return _backend_op1(x, Backend.isfinite, is_finite)Returns a
TensororPhiTreeNodematchingxwith valuesTruewherexhas a finite value andFalseotherwise. def is_inf(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def is_inf(x: TensorOrTree) -> TensorOrTree: """ Returns a `Tensor` or `phiml.math.magic.PhiTreeNode` matching `x` with values `True` where `x` is `+inf` or `-inf` and `False` otherwise. """ return _backend_op1(x, Backend.isnan, is_inf)Returns a
TensororPhiTreeNodematchingxwith valuesTruewherexis+infor-infandFalseotherwise. def is_nan(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def is_nan(x: TensorOrTree) -> TensorOrTree: """ Returns a `Tensor` or `phiml.math.magic.PhiTreeNode` matching `x` with values `True` where `x` is `NaN` and `False` otherwise. """ return _backend_op1(x, Backend.isnan, is_nan)Returns a
TensororPhiTreeNodematchingxwith valuesTruewherexisNaNandFalseotherwise. def is_none(x: phiml.math._tensors.Tensor | None) ‑> bool-
Expand source code
def is_none(x: Optional[Tensor]) -> bool: """Returns `True` if `x is None or x == wrap(None)`.""" if x is None: return True if isinstance(x, Layout): return x._obj is None return FalseReturns
Trueifx is None or x == wrap(None). def is_numeric(x: Any) ‑> bool-
Expand source code
def is_numeric(x: Any) -> bool: """ Args: x: Object to test. Returns: `True` if `x` is a primitive number, native number tensor or numeric `Tensor`. """ if x is None: return False if isinstance(x, Tensor): return x.dtype.kind in {int, float, complex} try: backend = choose_backend(x) return backend.dtype(x) in {int, float, complex} except NoBackendFound: return FalseArgs
x- Object to test.
Returns
Trueifxis a primitive number, native number tensor or numericTensor. def is_scalar(value) ‑> bool-
Expand source code
def is_scalar(value) -> bool: """ Checks whether `value` has no dimensions. Args: value: `Tensor` or Python primitive or native tensor. Returns: `bool` """ if isinstance(value, Tensor): return value.shape.rank == 0 elif isinstance(value, Number): return True else: return len(choose_backend(value).staticshape(value)) == 0Checks whether
valuehas no dimensions.Args
valueTensoror Python primitive or native tensor.
Returns
bool def is_sparse(x: phiml.math._tensors.Tensor)-
Expand source code
def is_sparse(x: Tensor): """ Checks whether a tensor is represented in COO, CSR or CSC format. If the tensor is neither sparse nor dense, this function raises an error. Args: x: `Tensor` to test. Returns: `True` if `x` is sparse, `False` if `x` is dense. Raises: `AssertionError` if `x` is neither sparse nor fully dense. """ f = get_format(x) if f == 'dense': return False if f in ['csr', 'csc', 'coo', 'compressed', 'compact', 'compact-rows', 'compact-cols']: return True raise AssertionError(f"Tensor {x} is neither sparse nor dense")Checks whether a tensor is represented in COO, CSR or CSC format. If the tensor is neither sparse nor dense, this function raises an error.
Args
xTensorto test.
Returns
Trueifxis sparse,Falseifxis dense.Raises
AssertionErrorifxis neither sparse nor fully dense. def isize(obj) ‑> int | None-
Expand source code
def isize(obj) -> Optional[int]: """ Returns the total number of elements listed along instance dims of an object, equal to the product of the sizes of all instance dims. Args: obj: `Shape` or object with a valid `shape` property. Returns: Size as `int`. If `obj` is an undefined `Shape`, returns `None`. """ return instance(obj).volume def iterate(map_function: Callable,
iterations: int | phiml.math._shape.Shape,
*x0,
f_kwargs: dict = None,
range: Callable = builtins.range,
measure: Callable = None,
substeps: int = 1,
**f_kwargs_)-
Expand source code
def iterate(map_function: Callable, iterations: Union[int, Shape], *x0, f_kwargs: dict = None, range: Callable = range, measure: Callable = None, substeps: int = 1, **f_kwargs_): """ Repeatedly call `function`, passing the previous output as the next input. If the function outputs more values than the number of arguments in `x0`, only the first `len(x0)` ones are passed to `map_function`. However, all outputs will be returned by `iterate`. Args: map_function: Function to call. Must be callable as `f(x0, **f_kwargs)` and `f(f(x0, **f_kwargs), **f_kwargs)`. iterations: Number of iterations as `int` or single-dimension `Shape`. If `int`, returns the final output of `map_function`. If `Shape`, returns the trajectory (`x0` and all outputs of `map_function`), stacking the values along this dimension. x0: Initial positional arguments for `map_function`. Values that are initially `None` are not stacked with the other values if `iterations` is a `Shape`. range: Range function. Can be used to generate tqdm output by passing `trange`. measure: Function without arguments to call at the start and end (and in between if `isinstance(iterations, Shape)`) calls to `map_function`. The measure of each call to `map_function` is `measure()` after minus `measure()` before the call. substeps: If > 1, iterates the function multiple times for each recorded step. The returned trajectories as well as measurements only record the large steps, not the sub-steps. The `range` is also only used on large steps, not sub-steps. f_kwargs: Additional keyword arguments to be passed to `map_function`. These arguments can be of any type. f_kwargs_: More keyword arguments. Returns: final_or_trajectory: Stacked trajectory or final output of `map_function`, depending on `iterations`. measured: Only if `measure` was specified, returns the measured value or trajectory tensor. """ if f_kwargs is None: f_kwargs = {} f_kwargs.update(f_kwargs_) assert isinstance(substeps, int), f"substeps must be an int but got {type(substeps)}" assert substeps >= 1, f"substeps must be >= 1" x = x0 if isinstance(iterations, int): start_time = measure() if measure else None for _i in range(iterations): for _sub_i in builtin_range(substeps): x = map_function(*x[:len(x0)], **f_kwargs) x = x if isinstance(x, tuple) else (x,) if len(x) < len(x0): raise AssertionError(f"Function to iterate must return at least {len(x0)} outputs to match input but got {x}") result = x[0] if len(x) == 1 else x return (result, measure() - start_time) if measure else result elif isinstance(iterations, Shape): xs = [x0] ts = [measure()] if measure else None for _i in range(iterations.size): for _sub_i in builtin_range(substeps): x = map_function(*x[:len(x0)], **f_kwargs) x = x if isinstance(x, tuple) else (x,) if len(x) < len(x0): raise AssertionError(f"Function to iterate must return at least {len(x0)} outputs to match input but got {x}") elif len(x) > len(x0): xs[0] = xs[0] + (None,) * (len(x) - len(x0)) xs.append(x) if measure: ts.append(measure()) xs = [stack(item[1:] if item[0] is None else item, iterations.with_size(None)) for item in zip(*xs)] result = xs[0] if len(xs) == 1 else xs ts = np.asarray(ts) return (result, wrap(ts[1:] - ts[:-1], iterations.with_size(None))) if measure else result else: raise ValueError(f"iterations must be an int or Shape but got {type(iterations)}")Repeatedly call
function, passing the previous output as the next input.If the function outputs more values than the number of arguments in
x0, only the firstlen(x0)ones are passed tomap_function. However, all outputs will be returned byiterate().Args
map_function- Function to call. Must be callable as
f(x0, **f_kwargs)andf(f(x0, **f_kwargs), **f_kwargs). iterations- Number of iterations as
intor single-dimensionShape. Ifint, returns the final output ofmap_function. IfShape, returns the trajectory (x0and all outputs ofmap_function), stacking the values along this dimension. x0- Initial positional arguments for
map_function. Values that are initiallyNoneare not stacked with the other values ifiterationsis aShape. range- Range function. Can be used to generate tqdm output by passing
trange. measure- Function without arguments to call at the start and end (and in between if
isinstance(iterations, Shape)) calls tomap_function. The measure of each call tomap_functionismeasure()after minusmeasure()before the call. substeps- If > 1, iterates the function multiple times for each recorded step.
The returned trajectories as well as measurements only record the large steps, not the sub-steps.
The
arange()is also only used on large steps, not sub-steps. f_kwargs- Additional keyword arguments to be passed to
map_function. These arguments can be of any type. f_kwargs_- More keyword arguments.
Returns
final_or_trajectory- Stacked trajectory or final output of
map_function, depending oniterations. measured- Only if
measurewas specified, returns the measured value or trajectory tensor.
def jacobian(f: Callable, wrt: str = None, get_output=True) ‑> Callable-
Expand source code
def jacobian(f: Callable, wrt: str = None, get_output=True) -> Callable: """ Creates a function which computes the Jacobian matrix of `f`. For scalar functions, consider using `gradient()` instead. Example: ```python def f(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = jacobian(loss_function, wrt='x', get_output=False)(x, y) (loss, prediction), (dx, dy) = jacobian(loss_function, wrt='x,y', get_output=True)(x, y) ``` Functional gradients are implemented for the following backends: * PyTorch: [`torch.autograd.grad`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.grad) / [`torch.autograd.backward`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.backward) * TensorFlow: [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape) * Jax: [`jax.grad`](https://jax.readthedocs.io/en/latest/jax.html#jax.grad) When the gradient function is invoked, `f` is called with tensors that track the gradient. For PyTorch, `arg.requires_grad = True` for all positional arguments of `f`. Args: f: Function to be differentiated. `f` must return a floating point `Tensor` with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if `return_values=True`. All arguments for which the gradient is computed must be of dtype float or complex. get_output: Whether the gradient function should also return the return values of `f`. wrt: Comma-separated parameter names of `f` with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged). Returns: Function with the same arguments as `f` that returns the value of `f`, auxiliary data and Jacobian of `f` if `get_output=True`, else just the Jacobian of `f`. """ f_params, wrt = simplify_wrt(f, wrt) return GradientFunction(f, f_params, wrt, get_output, is_f_scalar=False)Creates a function which computes the Jacobian matrix of
f. For scalar functions, consider usinggradient()instead.Example:
def f(x, y): prediction = f(x) loss = math.l2_loss(prediction - y) return loss, prediction dx = jacobian(loss_function, wrt='x', get_output=False)(x, y) (loss, prediction), (dx, dy) = jacobian(loss_function, wrt='x,y', get_output=True)(x, y)Functional gradients are implemented for the following backends:
- PyTorch:
torch.autograd.grad/torch.autograd.backward - TensorFlow:
tf.GradientTape - Jax:
jax.grad
When the gradient function is invoked,
fis called with tensors that track the gradient. For PyTorch,arg.requires_grad = Truefor all positional arguments off.Args
f- Function to be differentiated.
fmust return a floating pointTensorwith rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function ifreturn_values=True. All arguments for which the gradient is computed must be of dtype float or complex. get_output- Whether the gradient function should also return the return values of
f. wrt- Comma-separated parameter names of
fwith respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).
Returns
Function with the same arguments as
fthat returns the value off, auxiliary data and Jacobian offifget_output=True, else just the Jacobian off. - PyTorch:
def jit_compile(f: Callable = None, auxiliary_args: str = '', forget_traces: bool = None) ‑> Callable-
Expand source code
def jit_compile(f: Callable = None, auxiliary_args: str = '', forget_traces: bool = None) -> Callable: """ Compiles a graph based on the function `f`. The graph compilation is performed just-in-time (jit), e.g. when the returned function is called for the first time. The traced function will compute the same result as `f` but may run much faster. Some checks may be disabled in the compiled function. Can be used as a decorator: ```python @math.jit_compile def my_function(x: math.Tensor) -> math.Tensor: ``` Invoking the returned function may invoke re-tracing / re-compiling `f` after the first call if either * it is called with a different number of arguments, * the tensor arguments have different dimension names or types (the dimension order also counts), * any `Tensor` arguments require a different backend than previous invocations, * `phiml.math.magic.PhiTreeNode` positional arguments do not match in non-variable properties. Compilation is implemented for the following backends: * PyTorch: [`torch.jit.trace`](https://pytorch.org/docs/stable/jit.html) * TensorFlow: [`tf.function`](https://www.tensorflow.org/guide/function) * Jax: [`jax.jit`](https://jax.readthedocs.io/en/latest/notebooks/quickstart.html#using-jit-to-speed-up-functions) Jit-compilations cannot be nested, i.e. you cannot call `jit_compile()` while another function is being compiled. An exception to this is `jit_compile_linear()` which can be called from within a jit-compiled function. See Also: `jit_compile_linear()` Args: f: Function to be traced. All positional arguments must be of type `Tensor` or `phiml.math.magic.PhiTreeNode` returning a single `Tensor` or `phiml.math.magic.PhiTreeNode`. auxiliary_args: Comma-separated parameter names of arguments that are not relevant to backpropagation. forget_traces: If `True`, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces. Returns: Function with similar signature and return values as `f`. """ if f is None: kwargs = {k: v for k, v in locals().items() if v is not None} return partial(jit_compile, **kwargs) auxiliary_args = set(s.strip() for s in auxiliary_args.split(',') if s.strip()) return f if isinstance(f, (JitFunction, LinearFunction)) and f.auxiliary_args == auxiliary_args else JitFunction(f, auxiliary_args, forget_traces or False)Compiles a graph based on the function
f. The graph compilation is performed just-in-time (jit), e.g. when the returned function is called for the first time.The traced function will compute the same result as
fbut may run much faster. Some checks may be disabled in the compiled function.Can be used as a decorator:
@math.jit_compile def my_function(x: math.Tensor) -> math.Tensor:Invoking the returned function may invoke re-tracing / re-compiling
fafter the first call if either- it is called with a different number of arguments,
- the tensor arguments have different dimension names or types (the dimension order also counts),
- any
Tensorarguments require a different backend than previous invocations, PhiTreeNodepositional arguments do not match in non-variable properties.
Compilation is implemented for the following backends:
- PyTorch:
torch.jit.trace - TensorFlow:
tf.function - Jax:
jax.jit
Jit-compilations cannot be nested, i.e. you cannot call
jit_compile()while another function is being compiled. An exception to this isjit_compile_linear()which can be called from within a jit-compiled function.See Also:
jit_compile_linear()Args
f- Function to be traced.
All positional arguments must be of type
TensororPhiTreeNodereturning a singleTensororPhiTreeNode. auxiliary_args- Comma-separated parameter names of arguments that are not relevant to backpropagation.
forget_traces- If
True, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces.
Returns
Function with similar signature and return values as
f. def jit_compile_linear(f: Callable[[~X], ~Y] = None,
auxiliary_args: str = None,
forget_traces: bool = None) ‑> LinearFunction[X, Y]-
Expand source code
def jit_compile_linear(f: Callable[[X], Y] = None, auxiliary_args: str = None, forget_traces: bool = None) -> 'LinearFunction[X, Y]': """ Compile an optimized representation of the linear function `f`. For backends that support sparse tensors, a sparse matrix will be constructed for `f`. Can be used as a decorator: ```python @math.jit_compile_linear def my_linear_function(x: math.Tensor) -> math.Tensor: ``` Unlike `jit_compile()`, `jit_compile_linear()` can be called during a regular jit compilation. See Also: `jit_compile()` Args: f: Function that is linear in its positional arguments. All positional arguments must be of type `Tensor` and `f` must return a `Tensor`. auxiliary_args: Which parameters `f` is not linear in. These arguments are treated as conditioning arguments and will cause re-tracing on change. forget_traces: If `True`, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces. Returns: `LinearFunction` with similar signature and return values as `f`. """ if f is None: kwargs = {k: v for k, v in locals().items() if v is not None} return partial(jit_compile_linear, **kwargs) if isinstance(f, JitFunction): f = f.f # cannot trace linear function from jitted version if isinstance(auxiliary_args, str): auxiliary_args = set(s.strip() for s in auxiliary_args.split(',') if s.strip()) else: assert auxiliary_args is None f_params = function_parameters(f) auxiliary_args = f_params[1:] return f if isinstance(f, LinearFunction) and f.auxiliary_args == auxiliary_args else LinearFunction(f, auxiliary_args, forget_traces or False)Compile an optimized representation of the linear function
f. For backends that support sparse tensors, a sparse matrix will be constructed forf.Can be used as a decorator:
@math.jit_compile_linear def my_linear_function(x: math.Tensor) -> math.Tensor:Unlike
jit_compile(),jit_compile_linear()can be called during a regular jit compilation.See Also:
jit_compile()Args
f- Function that is linear in its positional arguments.
All positional arguments must be of type
Tensorandfmust return aTensor. auxiliary_args- Which parameters
fis not linear in. These arguments are treated as conditioning arguments and will cause re-tracing on change. forget_traces- If
True, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces.
Returns
LinearFunctionwith similar signature and return values asf. def l1_loss(x,
reduce: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>) ‑> phiml.math._tensors.Tensor-
Expand source code
def l1_loss(x, reduce: DimFilter = math.non_batch) -> Tensor: """ Computes *∑<sub>i</sub> ||x<sub>i</sub>||<sub>1</sub>*, summing over all non-batch dimensions. Args: x: `Tensor` or `phiml.math.magic.PhiTreeNode` or 0D or 1D native tensor. For `phiml.math.magic.PhiTreeNode` objects, only value the sum over all value attributes is computed. reduce: Dimensions to reduce as `DimFilter`. Returns: loss: `Tensor` """ if isinstance(x, Tensor): return math.sum_(abs(x), reduce) elif isinstance(x, PhiTreeNode): return sum([l1_loss(getattr(x, a), reduce) for a in value_attributes(x)]) else: try: backend = choose_backend(x) shape = backend.staticshape(x) if len(shape) == 0: return abs(x) elif len(shape) == 1: return backend.sum(abs(x)) else: raise ValueError("l2_loss is only defined for 0D and 1D native tensors. For higher-dimensional data, use Φ-ML tensors.") except math.NoBackendFound: raise ValueError(x)Computes ∑i ||xi||1, summing over all non-batch dimensions.
Args
xTensororPhiTreeNodeor 0D or 1D native tensor. ForPhiTreeNodeobjects, only value the sum over all value attributes is computed.reduce- Dimensions to reduce as
DimFilter.
Returns
lossTensor
def l2_loss(x,
reduce: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>) ‑> phiml.math._tensors.Tensor-
Expand source code
def l2_loss(x, reduce: DimFilter = math.non_batch) -> Tensor: """ Computes *∑<sub>i</sub> ||x<sub>i</sub>||<sub>2</sub><sup>2</sup> / 2*, summing over all non-batch dimensions. Args: x: `Tensor` or `phiml.math.magic.PhiTreeNode` or 0D or 1D native tensor. For `phiml.math.magic.PhiTreeNode` objects, only value the sum over all value attributes is computed. reduce: Dimensions to reduce as `DimFilter`. Returns: loss: `Tensor` """ if isinstance(x, Tensor): if x.dtype.kind == complex: x = abs(x) return math.sum_(x ** 2, reduce) * 0.5 elif isinstance(x, PhiTreeNode): return sum([l2_loss(getattr(x, a), reduce) for a in value_attributes(x)]) else: try: backend = choose_backend(x) shape = backend.staticshape(x) if len(shape) == 0: return x ** 2 * 0.5 elif len(shape) == 1: return backend.sum(x ** 2) * 0.5 else: raise ValueError("l2_loss is only defined for 0D and 1D native tensors. For higher-dimensional data, use Φ-ML tensors.") except math.NoBackendFound: raise ValueError(x)Computes ∑i ||xi||22 / 2, summing over all non-batch dimensions.
Args
xTensororPhiTreeNodeor 0D or 1D native tensor. ForPhiTreeNodeobjects, only value the sum over all value attributes is computed.reduce- Dimensions to reduce as
DimFilter.
Returns
lossTensor
def laplace(x: phiml.math._tensors.Tensor,
dx: float | phiml.math._tensors.Tensor = 1,
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = zero-gradient,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
weights: phiml.math._tensors.Tensor = None,
padding_kwargs: dict = None)-
Expand source code
def laplace(x: Tensor, dx: Union[Tensor, float] = 1, padding: Union[Extrapolation, float, Tensor, str, None] = extrapolation.BOUNDARY, dims: DimFilter = spatial, weights: Tensor = None, padding_kwargs: dict = None): """ Spatial Laplace operator as defined for scalar fields. If a vector field is passed, the laplace is computed component-wise. Args: x: n-dimensional field of shape (batch, spacial dimensions..., components) dx: scalar or 1d tensor padding: Padding mode. Must be one of the following: `Extrapolation`, `Tensor` or number for constant extrapolation, name of extrapolation as `str`. dims: The second derivative along these dimensions is summed over weights: (Optional) Multiply the axis terms by these factors before summation. Must be a Tensor with a single channel dimension that lists all laplace dims by name. padding_kwargs: Additional keyword arguments to be passed to `phiml.math.pad()`. Returns: `phiml.math.Tensor` of same shape as `x` """ if isinstance(dx, (tuple, list)): dx = wrap(dx, batch('_laplace')) elif isinstance(dx, Tensor) and 'vector' in dx.shape: dx = rename_dims(dx, 'vector', batch('_laplace')) if isinstance(x, Extrapolation): return x.spatial_gradient() left, center, right = shift(wrap(x), (-1, 0, 1), dims, padding, stack_dim=batch('_laplace'), padding_kwargs=padding_kwargs) result = (left + right - 2 * center) / (dx ** 2) if weights is not None: dim_names = x.shape.only(dims).names if channel(weights): assert set(channel(weights).labels[0]) >= set(dim_names), f"the channel dim of weights must contain all laplace dims {dim_names} but only has {channel(weights).labels}" weights = rename_dims(weights, channel, batch('_laplace')) result *= weights result = math.sum_(result, '_laplace') return resultSpatial Laplace operator as defined for scalar fields. If a vector field is passed, the laplace is computed component-wise.
Args
x- n-dimensional field of shape (batch, spacial dimensions…, components)
dx- scalar or 1d tensor
padding- Padding mode.
Must be one of the following:
Extrapolation,Tensoror number for constant extrapolation, name of extrapolation asstr. dims- The second derivative along these dimensions is summed over
weights- (Optional) Multiply the axis terms by these factors before summation. Must be a Tensor with a single channel dimension that lists all laplace dims by name.
padding_kwargs- Additional keyword arguments to be passed to
pad().
Returns
Tensorof same shape asx def layout(objects: Sequence[~T] | ~T, *shape: str | phiml.math._shape.Shape) ‑> phiml.math._tensors.Tensor[~T]-
Expand source code
def layout(objects: Union[Sequence[T], T], *shape: Union[Shape, str]) -> Tensor[T]: """ Wraps a Python tree in a `Tensor`, allowing elements to be accessed via dimensions. A python tree is a structure of nested `tuple`, `list`, `dict` and *leaf* objects where leaves can be any Python object. All keys of `dict` containers must be of type `str`. The keys are automatically assigned as labels along that dimension unless conflicting with other elements. Strings may also be used as containers. Example: >>> t = layout({'a': 'text', 'b': [0, 1]}, channel('dict,inner')) >>> t.inner[1].dict['a'].native() 'e' See Also: `tensor()`, `wrap()`. Args: objects: PyTree of `list` or `tuple`. *shape: Tensor dimensions Returns: `Tensor`. Calling `Tensor.native()` on the returned tensor will return `objects`. """ shape = [parse_shape_spec(s) if isinstance(s, str) else s for s in shape] assert all(isinstance(s, SHAPE_TYPES) for s in shape), f"shape needs to be one or multiple Shape instances but got {shape}" shape = EMPTY_SHAPE if len(shape) == 0 else concat_shapes_(*shape) if isinstance(objects, Layout): assert objects.shape == shape return objects if not shape.well_defined: def recursive_determine_shape(native, shape: Shape): if not shape: return shape if isinstance(native, dict): assert all([isinstance(k, str) for k in native.keys()]), f"All dict keys in PyTrees must be str but got {tuple(native.keys())}" shape = shape.replace(shape[0], shape[0].with_size(tuple(native.keys()))) if shape.rank == 1: return shape.with_sizes((len(native),)) inner_shape = shape[1:] if isinstance(native, (tuple, list)): inner_shapes = [recursive_determine_shape(n, inner_shape) for n in native] elif isinstance(native, dict): inner_shapes = [recursive_determine_shape(n, inner_shape) for n in native.values()] else: raise ValueError(native) return shape_stack(shape[0], *inner_shapes) shape = recursive_determine_shape(objects, shape) return Layout(objects, shape)Wraps a Python tree in a
Tensor, allowing elements to be accessed via dimensions. A python tree is a structure of nestedtuple,list,dictand leaf objects where leaves can be any Python object.All keys of
dictcontainers must be of typestr. The keys are automatically assigned as labels along that dimension unless conflicting with other elements.Strings may also be used as containers.
Example:
>>> t = layout({'a': 'text', 'b': [0, 1]}, channel('dict,inner')) >>> t.inner[1].dict['a'].native() 'e'Args
objects- PyTree of
listortuple. *shape- Tensor dimensions
Returns
Tensor. CallingTensor.native()on the returned tensor will returnobjects. def length(*args, **kwargs)-
Expand source code
def length(*args, **kwargs): """Deprecated. Use `norm` instead.""" warnings.warn("phiml.math.length is deprecated in favor of phiml.math.norm", DeprecationWarning, stacklevel=2) return norm(*args, **kwargs)Deprecated. Use
norm()instead. def linspace(start: float | phiml.math._tensors.Tensor | tuple | list,
stop: float | phiml.math._tensors.Tensor | tuple | list,
dim: phiml.math._shape.Shape) ‑> phiml.math._tensors.Tensor[float]-
Expand source code
def linspace(start: Union[float, Tensor, tuple, list], stop: Union[float, Tensor, tuple, list], dim: Shape) -> Tensor[float]: """ Returns `number` evenly spaced numbers between `start` and `stop` along `dim`. If `dim` contains multiple dimensions, evenly spaces values along each dimension, then stacks the result along a new channel dimension called `vector`. See Also: `arange()`, `meshgrid()`. Args: start: First value, `int` or `Tensor`. stop: Last value, `int` or `Tensor`. dim: Linspace dimension of integer size. The size determines how many values to linearly space between `start` and `stop`. The values will be laid out along `dim`. Returns: `Tensor` Examples: >>> math.linspace(0, 1, spatial(x=5)) (0.000, 0.250, 0.500, 0.750, 1.000) along xˢ >>> math.linspace(0, (-1, 1), spatial(x=3)) (0.000, 0.000); (-0.500, 0.500); (-1.000, 1.000) (xˢ=3, vectorᶜ=2) """ assert isinstance(dim, SHAPE_TYPES), f"dim must be a Shape but got {dim}" assert dim.is_uniform, f"dim must be uniform but got {dim}" start = wrap(start) stop = wrap(stop) if dim.rank > 1: return meshgrid(dim) / (dim - 1) * (stop - start) + start if is_scalar(start) and is_scalar(stop): backend = preferred_backend_for(start, stop) native_linspace = backend.linspace(start.native(), stop.native(), dim.size) return Dense(native_linspace, dim.names, dim, backend) else: from ._functional import map_ return map_(linspace, start, stop, dim=dim)Returns
numberevenly spaced numbers betweenstartandstopalongdim.If
dimcontains multiple dimensions, evenly spaces values along each dimension, then stacks the result along a new channel dimension calledvector.See Also:
arange(),meshgrid().Args
start- First value,
intorTensor. stop- Last value,
intorTensor. dim- Linspace dimension of integer size.
The size determines how many values to linearly space between
startandstop. The values will be laid out alongdim.
Returns
Examples
>>> math.linspace(0, 1, spatial(x=5)) (0.000, 0.250, 0.500, 0.750, 1.000) along xˢ>>> math.linspace(0, (-1, 1), spatial(x=3)) (0.000, 0.000); (-0.500, 0.500); (-1.000, 1.000) (xˢ=3, vectorᶜ=2) def load(file: str | phiml.math._tensors.Tensor)-
Expand source code
def load(file: Union[str, Tensor]): """ Loads a `Tensor` or tree from one or multiple files previously written using `save`. All tensors are restored as NumPy arrays, not the backend-specific tensors they may have been written as. Use `convert()` to convert all or some of the tensors to a different backend. Examples: >>> B = batch(b=3) >>> files = -f-f"data/test_{arange(B)}.npz" >>> data = randn(B, spatial(x=10)) >>> save(files, data) # store 10 values per file >>> assert_close(data, load(files)) See Also: `save()`. Args: file: Either single file to read as `str` or a batch of files as a string `Tensor`. When a batch of paths is provided, each file is loaded and the results are stacked according to the dims of `file`. For obtaining a batch of files, see `wrap()`, `phiml.os.listdir()`, `phiml.math.f`. Returns: Same type as what was written. """ def load_single(file: str): data = np.load(file, allow_pickle=True) all_np = {k: data[k] for k in data if k not in ['tree', 'specs', 'paths']} specs = [unserialize_spec(spec) for spec in data['specs'].tolist()] tensors = assemble_tensors(list(all_np.values()), specs) tree = data['tree'].tolist()['tree'] # this may require outside classes via pickle stored_paths = data['paths'].tolist() new_paths = attr_paths_from_container(tree, all_attributes, 'root') if tuple(stored_paths) != tuple(new_paths): lookup = {path: t for path, t in zip(stored_paths, tensors)} tensors = [lookup[p] for p in new_paths] return assemble_tree(tree, tensors, attr_type=all_attributes) if isinstance(file, str): return load_single(file) from ._functional import map_ return map_(load_single, file)Loads a
Tensoror tree from one or multiple files previously written usingsave().All tensors are restored as NumPy arrays, not the backend-specific tensors they may have been written as. Use
convert()to convert all or some of the tensors to a different backend.Examples
>>> B = batch(b=3) >>> files = -f-f"data/test_{arange(B)}.npz" >>> data = randn(B, spatial(x=10)) >>> save(files, data) # store 10 values per file >>> assert_close(data, load(files))See Also:
save().Args
file- Either single file to read as
stror a batch of files as a stringTensor. When a batch of paths is provided, each file is loaded and the results are stacked according to the dims offile. For obtaining a batch of files, seewrap(),listdir(),f.
Returns
Same type as what was written.
def log(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def log(x: TensorOrTree) -> TensorOrTree: """ Computes the natural logarithm of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.log, log)Computes the natural logarithm of the
TensororPhiTreeNodex. def log10(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def log10(x: TensorOrTree) -> TensorOrTree: """ Computes *log(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x` with base 10. """ return _backend_op1(x, Backend.log10, log10)Computes log(x) of the
TensororPhiTreeNodexwith base 10. def log2(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def log2(x: TensorOrTree) -> TensorOrTree: """ Computes *log(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x` with base 2. """ return _backend_op1(x, Backend.log2, log2)Computes log(x) of the
TensororPhiTreeNodexwith base 2. def log_gamma(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def log_gamma(x: TensorOrTree) -> TensorOrTree: """ Computes *log(gamma(x))* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.log_gamma, log_gamma)Computes log(gamma(x)) of the
TensororPhiTreeNodex. def map(function: Callable[..., ~Y],
*args,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function shape>,
range=builtins.range,
unwrap_scalars=True,
expand_results=False,
simplify=False,
map_name=None,
**kwargs) ‑> phiml.math._tensors.Tensor | ~Y | None-
Expand source code
def map_(function: Callable[..., Y], *args, dims: DimFilter = shape, range=range, unwrap_scalars=True, expand_results=False, simplify=False, map_name=None, **kwargs) -> Union[None, Tensor, Y]: """ Calls `function` on slices of the arguments and returns the stacked result. Args: function: Function to be called on slices of `args` and `kwargs`. Must return one or multiple values that can be stacked. `None` may be returned but if any return value is `None`, all calls to `function` must return `None` in that position. *args: Positional arguments for `function`. Values that are `phiml.math.magic.Sliceable` will be sliced along `dims`. **kwargs: Keyword arguments for `function`. Values that are `phiml.math.magic.Sliceable` will be sliced along `dims`. dims: Dimensions which should be sliced. `function` is called once for each element in `dims`, i.e. `dims.volume` times. If `dims` is not specified, all dimensions from the `phiml.math.magic.Sliceable` values in `args` and `kwargs` will be mapped. Pass `object` to map only objects, not tensors of primitives (`dtype.kind == object`). This will select only `layout`-type dimensions. range: Optional range function. Can be used to generate `tqdm` output by passing `trange`. unwrap_scalars: If `True`, passes the contents of scalar `Tensor`s instead of the tensor objects. simplify: If `True`, reduces constant dims of output tensors that don't vary across mapped slices. Returns: `Tensor` of same shape as `value`. """ sliceable_args = [v for v in args if isinstance(v, Shapable)] sliceable_kwargs = {k: v for k, v in kwargs.items() if isinstance(v, Shapable)} extra_args = [v for v in args if not isinstance(v, Shapable)] extra_kwargs = {k: v for k, v in kwargs.items() if not isinstance(v, Shapable)} if dims is object: dims_ = merge_shapes(*[object_dims(a) for a in sliceable_args], *[object_dims(a) for a in sliceable_kwargs.values()], allow_varying_sizes=True) else: dims_ = merge_shapes(*sliceable_args, *sliceable_kwargs.values(), allow_varying_sizes=True).only(dims) assert dims_.well_defined, f"All arguments must have consistent sizes for all mapped dimensions. Trying to map along {dims} but some have varying sizes (marked as None)." assert dims_.volume > 0, f"map dims must have volume > 0 but got {dims_}" results = [] if _DEFAULT_RANGE is not None and map_name is not None and range is builtin_range and dims_.volume > 1: range = partial(_DEFAULT_RANGE, desc=map_name) for _, idx in zip(range(dims_.volume), dims_.meshgrid()): idx_args = [slice_(v, idx) for v in sliceable_args] idx_kwargs = {k: slice_(v, idx) for k, v in sliceable_kwargs.items()} if unwrap_scalars: idx_args = [v.native() if isinstance(v, Tensor) and v.rank == 0 else v for v in idx_args] idx_kwargs = {k: v.native() if isinstance(v, Tensor) and v.rank == 0 else v for k, v in idx_kwargs.items()} idx_extra_args = list(extra_args) idx_all_args = [idx_args.pop(0) if isinstance(v, Shapable) else idx_extra_args.pop(0) for v in args] f_output = function(*idx_all_args, **idx_kwargs, **extra_kwargs) results.append(f_output) if isinstance(results[0], tuple): stacked: List[Optional[Tensor]] = [] for i in builtin_range(len(results[0])): if any(r[i] is None for r in results): assert all(r[i] is None for r in results), f"map function returned None for some elements, {results}" stacked.append(None) else: stacked.append(math.stack([r[i] for r in results], dims_, expand_values=expand_results, simplify=simplify, layout_non_matching=True)) return tuple(stacked) else: if any(r is None for r in results): assert all(r is None for r in results), f"map function returned None for some elements, {results}" return None return stack(results, dims_, expand_values=expand_results, simplify=simplify, layout_non_matching=True)Calls
functionon slices of the arguments and returns the stacked result.Args
function- Function to be called on slices of
argsandkwargs. Must return one or multiple values that can be stacked.Nonemay be returned but if any return value isNone, all calls tofunctionmust returnNonein that position. *args- Positional arguments for
function. Values that areSliceablewill be sliced alongdims. **kwargs- Keyword arguments for
function. Values that areSliceablewill be sliced alongdims. dims- Dimensions which should be sliced.
functionis called once for each element indims, i.e.dims.volumetimes. Ifdimsis not specified, all dimensions from theSliceablevalues inargsandkwargswill be mapped. Passobjectto map only objects, not tensors of primitives (dtype.kind == object). This will select onlylayout()-type dimensions. range- Optional range function. Can be used to generate
tqdmoutput by passingtrange. unwrap_scalars- If
True, passes the contents of scalarTensors instead of the tensor objects. simplify- If
True, reduces constant dims of output tensors that don't vary across mapped slices.
Returns
Tensorof same shape asvalue. def map_pairs(map_function: Callable,
values: phiml.math._tensors.Tensor,
connections: phiml.math._tensors.Tensor)-
Expand source code
def map_pairs(map_function: Callable, values: Tensor, connections: Tensor): """ Evaluates `map_function` on all pairs of elements present in the sparsity pattern of `connections`. Args: map_function: Function with signature `(Tensor, Tensor) -> Tensor`. values: Values to evaluate `map_function` on. Needs to have a spatial or instance dimension but must not have a dual dimension. connections: Sparse tensor. Returns: `Tensor` with the sparse dimensions of `connections` and all non-instance dimensions returned by `map_function`. """ assert dual(values).is_empty, f"values must not have a dual dimension but got {values.shape}" if isinstance(connections, CompactSparseTensor): rows = connections._uncompressed_dims if instance(connections._uncompressed_dims) else connections._compressed_dims target = values[{rows: connections._indices}] result = map_function(values, target) return connections._with_values(result) indices = stored_indices(connections, invalid='clamp') origin_dim, neighbors_dim = channel(indices).labels[0] if origin_dim not in values.shape: origin_dim, neighbors_dim = neighbors_dim, origin_dim assert origin_dim in values.shape, f"No dimension of connections {connections.shape} is present in values {values.shape}" origin = values[{origin_dim: indices[origin_dim]}] target = values[{origin_dim: indices[neighbors_dim]}] result = map_function(origin, target) return tensor_like(connections, result, value_order='as existing')Evaluates
map_functionon all pairs of elements present in the sparsity pattern ofconnections.Args
map_function- Function with signature
(Tensor, Tensor) -> Tensor. values- Values to evaluate
map_functionon. Needs to have a spatial or instance dimension but must not have a dual dimension. connections- Sparse tensor.
Returns
Tensorwith the sparse dimensions ofconnectionsand all non-instance dimensions returned bymap_function. def map_types(f: Callable,
dims: phiml.math._shape.Shape | tuple | list | str | Callable,
dim_type: str | Callable) ‑> Callable-
Expand source code
def map_types(f: Callable, dims: Union[Shape, tuple, list, str, Callable], dim_type: Union[Callable, str]) -> Callable: """ Wraps a function to change the dimension types of its `Tensor` and `phiml.math.magic.PhiTreeNode` arguments. Args: f: Function to wrap. dims: Concrete dimensions or dimension type, such as `spatial` or `batch`. These dimensions will be mapped to `dim_type` for all positional function arguments. dim_type: Dimension type, such as `spatial` or `batch`. `f` will be called with dimensions remapped to this type. Returns: Function with signature matching `f`. """ def forward_retype(obj, input_types: Dict[str, Callable]): tree, tensors = disassemble_tree(obj, False, all_attributes) retyped = [] for t in tensors: originals = t.shape.only(dims) new_dims = originals.as_type(dim_type) for o, n in zip(originals, new_dims): input_types[n.name] = o.type retyped.append(rename_dims(t, originals, new_dims)) return assemble_tree(tree, retyped), input_types def reverse_retype(obj, input_types: Dict[str, Callable]): tree, tensors = disassemble_tree(obj, False, all_attributes) retyped = [] for t in tensors: output = t.shape.only(set(input_types)) to_dims = [o.as_type(input_types[o.name]) for o in output] retyped.append(rename_dims(t, output, to_dims)) return assemble_tree(tree, retyped) @wraps(f) def retyped_f(*args, **kwargs): input_types = {} retyped_args = [] for arg in args: retyped_arg, input_types = forward_retype(arg, input_types) retyped_args.append(retyped_arg) retyped_kwargs = {} for k, v in kwargs.items(): retyped_kwarg, input_types = forward_retype(v, input_types) retyped_kwargs[k] = retyped_kwarg output = f(*retyped_args, **retyped_kwargs) restored_output = reverse_retype(output, input_types) return restored_output return retyped_fWraps a function to change the dimension types of its
TensorandPhiTreeNodearguments.Args
f- Function to wrap.
dims- Concrete dimensions or dimension type, such as
spatial()orbatch(). These dimensions will be mapped todim_typefor all positional function arguments. dim_type- Dimension type, such as
spatial()orbatch().fwill be called with dimensions remapped to this type.
Returns
Function with signature matching
f. def masked_fill(values: phiml.math._tensors.Tensor,
valid: phiml.math._tensors.Tensor,
distance: int = 1) ‑> Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor]-
Expand source code
def masked_fill(values: Tensor, valid: Tensor, distance: int = 1) -> Tuple[Tensor, Tensor]: """ Extrapolates the values of `values` which are marked by the nonzero values of `valid` for `distance` steps in all spatial directions. Overlapping extrapolated values get averaged. Extrapolation also includes diagonals. Args: values: Tensor which holds the values for extrapolation valid: Tensor with same size as `x` marking the values for extrapolation with nonzero values distance: Number of extrapolation steps Returns: values: Extrapolation result valid: mask marking all valid values after extrapolation """ def binarize(x): return math.safe_div(x, x) distance = min(distance, max(values.shape.sizes)) for _ in range(distance): valid = binarize(valid) valid_values = valid * values overlap = valid # count how many values we are adding for dim in values.shape.spatial.names: values_l, values_r = shift(valid_values, (-1, 1), dims=dim, padding=extrapolation.ZERO) valid_values = math.sum_(values_l + values_r + valid_values, dim='shift') mask_l, mask_r = shift(overlap, (-1, 1), dims=dim, padding=extrapolation.ZERO) overlap = math.sum_(mask_l + mask_r + overlap, dim='shift') extp = math.safe_div(valid_values, overlap) # take mean where extrapolated values overlap values = math.where(valid, values, math.where(binarize(overlap), extp, values)) valid = overlap return values, binarize(valid)Extrapolates the values of
valueswhich are marked by the nonzero values ofvalidfordistancesteps in all spatial directions. Overlapping extrapolated values get averaged. Extrapolation also includes diagonals.Args
values- Tensor which holds the values for extrapolation
valid- Tensor with same size as
xmarking the values for extrapolation with nonzero values distance- Number of extrapolation steps
Returns
values- Extrapolation result
valid- mask marking all valid values after extrapolation
def matrix_from_function(f: Callable,
*args,
auxiliary_args=None,
auto_compress=False,
target_backend: phiml.backend._backend.Backend = None,
debug_checks=False,
**kwargs) ‑> Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor]-
Expand source code
def matrix_from_function(f: Callable, *args, auxiliary_args=None, auto_compress=False, target_backend: Backend = None, debug_checks=False, **kwargs) -> Tuple[Tensor, Tensor]: """ Trace a linear function and construct a matrix. Depending on the functional form of `f`, the returned matrix may be dense or sparse. Args: f: Function to trace. *args: Arguments for `f`. auxiliary_args: Arguments in which the function is not linear. These parameters are not traced but passed on as given in `args` and `kwargs`. auto_compress: If `True`, returns a compressed matrix if supported by the backend. sparsify_batch: If `False`, the matrix will be batched. If `True`, will create dual dimensions for the involved batch dimensions. This will result in one large matrix instead of a batch of matrices. **kwargs: Keyword arguments for `f`. Returns: matrix: Matrix representing the linear dependency of the output `f` on the input of `f`. Input dimensions will be `dual` dimensions of the matrix while output dimensions will be regular. bias: Bias for affine functions or zero-vector if the function is purely linear. """ _, tracer = trace_linear(f, *args, auxiliary_args=auxiliary_args, debug_checks=debug_checks, **kwargs) return matrix_and_bias_from_tracer(tracer, auto_compress=auto_compress, target_backend=target_backend)Trace a linear function and construct a matrix. Depending on the functional form of
f, the returned matrix may be dense or sparse.Args
f- Function to trace.
*args- Arguments for
f. auxiliary_args- Arguments in which the function is not linear.
These parameters are not traced but passed on as given in
argsandkwargs. auto_compress- If
True, returns a compressed matrix if supported by the backend. sparsify_batch- If
False, the matrix will be batched. IfTrue, will create dual dimensions for the involved batch dimensions. This will result in one large matrix instead of a batch of matrices. **kwargs- Keyword arguments for
f.
Returns
def matrix_rank(matrix: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor-
Expand source code
def matrix_rank(matrix: Tensor) -> Tensor: """ Approximates the rank of a matrix. The tolerances used depend on the current precision. Args: matrix: Sparse or dense matrix, i.e. `Tensor` with primal and dual dims. Returns: Matrix rank. """ if is_sparse(matrix): # stored_rank = matrix._matrix_rank # if (stored_rank >= 0).all: # return stored_rank warnings.warn("Matrix rank for sparse matrices is experimental and may not be accurate for large matrices.") from scipy.linalg.interpolative import estimate_rank eps = {16: 1e-2, 32: 1e-5, 64: 1e-10}[get_precision()] def single_sparse_rank(matrix: Tensor) -> Tensor: def scipy_determine_rank(scipy_matrix): if min(scipy_matrix.shape) <= 100: rank = np.linalg.matrix_rank(scipy_matrix.todense()) return np.array(rank, dtype=np.int64) if scipy_matrix.dtype not in (np.float64, np.complex128): scipy_matrix = scipy_matrix.astype(np.complex128 if scipy_matrix.dtype.kind == 'c' else np.float64) rank = estimate_rank(aslinearoperator(scipy_matrix), eps) return np.array(rank, dtype=np.int64) nat_mat = native_matrix(matrix, matrix.default_backend) scipy_result = matrix.default_backend.numpy_call(scipy_determine_rank, (), INT64, nat_mat) return wrap(scipy_result) from ._ops import broadcast_op return broadcast_op(single_sparse_rank, [matrix], batch(matrix)) else: # dense native = matrix.native([batch, primal, dual], force_expand=True) ranks_native = choose_backend(native).matrix_rank_dense(native) return reshaped_tensor(ranks_native, [batch(matrix)], convert=False)Approximates the rank of a matrix. The tolerances used depend on the current precision.
Args
matrix- Sparse or dense matrix, i.e.
Tensorwith primal and dual dims.
Returns
Matrix rank.
def max(value: ~TensorOrTree,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>,
key: phiml.math._tensors.Tensor = None) ‑> ~TensorOrTree-
Expand source code
def max_(value: TensorOrTree, dim: DimFilter = non_batch, key: Tensor = None) -> TensorOrTree: """ Determines the maximum value of `values` along the specified dimensions. Args: value: (Sparse) `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors key: Optional comparison values. If specified, returns the value where `key` is maximal, see `at_max()`. Returns: `Tensor` without the reduced dimensions. """ if key is not None: return at_max(value, key, dim) return reduce_(_max, value, dim)Determines the maximum value of
valuesalong the specified dimensions.Args
value- (Sparse)
Tensororlist/tupleof Tensors. dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
key- Optional comparison values. If specified, returns the value where
keyis maximal, seeat_max().
Returns
Tensorwithout the reduced dimensions. def maximum(x: float | phiml.math._tensors.Tensor,
y: float | phiml.math._tensors.Tensor,
allow_none=False)-
Expand source code
def maximum(x: Union[Tensor, float], y: Union[Tensor, float], allow_none=False): """ Computes the element-wise maximum of `x` and `y`. """ if isinstance(x, Shape) or isinstance(y, Shape): if isinstance(y, Shape) and not isinstance(x, Shape): x, y = y, x if isinstance(y, Number): return x.with_sizes([max(s, y) for s in x.sizes]) elif isinstance(y, Tensor): x = y._tensor(x) if not isinstance(x, Tensor) and not isinstance(y, Tensor): return choose_backend(x, y).maximum(x, y) if allow_none: if x is None: return y elif y is None: return x if is_sparse(x): return x._op2(y, maximum, False) elif is_sparse(y): return y._op2(x, maximum, True) return custom_op2(x, y, xops.maximum)Computes the element-wise maximum of
xandy. def mean(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>,
weight: phiml.math._tensors.Tensor | list | tuple = None,
where_no_weight=nan,
epsilon=1e-10) ‑> phiml.math._tensors.Tensor-
Expand source code
def mean(value, dim: DimFilter = non_batch, weight: Union[Tensor, list, tuple] = None, where_no_weight=float('nan'), epsilon=1e-10) -> Tensor: """ Computes the mean over `values` along the specified dimensions. Args: value: (Sparse) `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors weight: Optionally perform a weighted mean operation. Must broadcast to `value`. where_no_weight: Value to use when the sum of all weights are smaller than `epsilon`. epsilon: Only if `where_no_weight`. Threshold for using `where_no_weight`. Returns: `Tensor` without the reduced dimensions. """ if weight is not None: if isinstance(value, (tuple, list)): assert isinstance(weight, (tuple, list)), f"When computing mean over tuples or lists, the weight must also be a tuple or list" value = stack_tensors([wrap(v) for v in value], instance(**{'0': len(value)})) weight = stack_tensors([wrap(w) for w in weight], instance(**{'0': len(weight)})) dim = value.shape.only(dim) assert '0' in dim, "When passing a sequence of tensors to be reduced, the sequence dimension '0' must be reduced." weight_sum = sum_(weight, dim) if not np.isnan(where_no_weight): weight_sum = where(abs(weight_sum) < epsilon, 1, weight_sum) result = sum_(value * weight, dim) / weight_sum if not np.isnan(where_no_weight): result = where(weight_sum == 0, where_no_weight, result) return result return reduce_(_mean, value, dim)Computes the mean over
valuesalong the specified dimensions.Args
value- (Sparse)
Tensororlist/tupleof Tensors. dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
weight- Optionally perform a weighted mean operation. Must broadcast to
value. where_no_weight- Value to use when the sum of all weights are smaller than
epsilon. epsilon- Only if
where_no_weight. Threshold for usingwhere_no_weight.
Returns
Tensorwithout the reduced dimensions. def median(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>)-
Expand source code
def median(value, dim: DimFilter = non_batch): """ Reduces `dim` of `value` by picking the median value. For odd dimension sizes (ambigous choice), the linear average of the two median values is computed. Currently implemented via `quantile()`. Args: value: `Tensor` dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` """ return reduce_(_median, value, dim)Reduces
dimofvalueby picking the median value. For odd dimension sizes (ambigous choice), the linear average of the two median values is computed.Currently implemented via
quantile().Args
valueTensordim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
Returns
def merge_shapes(*objs: phiml.math._shape.Shape | Any,
allow_varying_sizes=False,
allow_varying_labels=False) ‑> phiml.math._shape.Shape-
Expand source code
def merge_shapes(*objs: Union[Shape, Any], allow_varying_sizes=False, allow_varying_labels=False) -> Shape: """ Combines `shapes` into a single `Shape`, grouping dimensions by type. If dimensions with equal names are present in multiple shapes, their types and sizes must match. The shorthand `shape1 & shape2` merges shapes with `check_exact=[spatial]`. See Also: `concat_shapes()`. Args: *objs: `Shape` or `Shaped` objects to combine. allow_varying_sizes: If `True`, merges incompatible dims by setting their size to `None` and erasing their labels. If `False`, raises an error for incompatible dims. allow_varying_labels: If `True`, removes labels in case of conflict. Else raises an error. Returns: Merged `Shape` Raises: IncompatibleShapes if the shapes are not compatible """ if not objs: return EMPTY_SHAPE shapes = [shape(obj) for obj in objs] is_pure = not any(isinstance(s, MixedShape) for s in shapes) allow_varying_labels = True if allow_varying_sizes else allow_varying_labels if is_pure: is_pure = len(set([s.dim_type for s in shapes])) == 1 if is_pure: return pure_merge(*shapes, allow_varying_sizes=allow_varying_sizes, allow_varying_labels=allow_varying_labels) else: b = pure_merge(*[s.batch for s in shapes], allow_varying_sizes=allow_varying_sizes, allow_varying_labels=allow_varying_labels) d = pure_merge(*[s.dual for s in shapes], allow_varying_sizes=allow_varying_sizes, allow_varying_labels=allow_varying_labels) i = pure_merge(*[s.instance for s in shapes], allow_varying_sizes=allow_varying_sizes, allow_varying_labels=allow_varying_labels) s = pure_merge(*[s.spatial for s in shapes], allow_varying_sizes=allow_varying_sizes, allow_varying_labels=allow_varying_labels) c = pure_merge(*[s.channel for s in shapes], allow_varying_sizes=allow_varying_sizes, allow_varying_labels=allow_varying_labels) dims = {**b.dims, **d.dims, **i.dims, **s.dims, **c.dims} return MixedShape(b, d, i, s, c, dims) if dims else EMPTY_SHAPECombines
shapesinto a singleShape, grouping dimensions by type. If dimensions with equal names are present in multiple shapes, their types and sizes must match.The shorthand
shape1 & shape2merges shapes withcheck_exact=[spatial].See Also:
concat_shapes().Args
*objsShapeorShapedobjects to combine.allow_varying_sizes- If
True, merges incompatible dims by setting their size toNoneand erasing their labels. IfFalse, raises an error for incompatible dims. allow_varying_labels- If
True, removes labels in case of conflict. Else raises an error.
Returns
Merged
ShapeRaises
IncompatibleShapes if the shapes are not compatible
def meshgrid(dims: Callable | phiml.math._shape.Shape = <function spatial>,
stack_dim: str | phiml.math._shape.Shape | None = (vectorᶜ),
**dimensions: int | phiml.math._tensors.Tensor | tuple | list | Any) ‑> phiml.math._tensors.Tensor-
Expand source code
def meshgrid(dims: Union[Callable, Shape] = spatial, stack_dim: Union[Shape, str, None] = channel('vector'), **dimensions: Union[int, Tensor, tuple, list, Any]) -> Tensor: """ Generate a mesh-grid `Tensor` from keyword dimensions. Args: **dimensions: Mesh-grid dimensions, mapping names to values. Values may be `int`, 1D `Tensor` or 1D native tensor. dims: Dimension type of mesh-grid dimensions, one of `spatial`, `channel`, `batch`, `instance`. stack_dim: Channel dim along which grids are stacked. This is optional for 1D mesh-grids. In that case returns a `Tensor` without a stack dim if `None` or an empty `Shape` is passed. Returns: Mesh-grid `Tensor` with the dimensions of `dims` / `dimensions` and `stack_dim`. Examples: >>> math.meshgrid(x=2, y=2) (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.500 ± 0.500 (0e+00...1e+00) >>> math.meshgrid(x=2, y=(-1, 1)) (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.250 ± 0.829 (-1e+00...1e+00) >>> math.meshgrid(x=2, stack_dim=None) (0, 1) along xˢ """ assert 'dim_type' not in dimensions, f"dim_type has been renamed to dims" if isinstance(stack_dim, str): stack_dim = auto(stack_dim, channel) assert not stack_dim or stack_dim.name not in dimensions if isinstance(dims, SHAPE_TYPES): assert not dimensions, f"When passing a Shape to meshgrid(), no kwargs are allowed" dimensions = {d: s for d, s in zip(dims.names, dims.sizes)} grid_shape = dims dim_values = [tuple(range(s)) for s in dims.sizes] else: dim_type = dims assert callable(dim_type), f"dims must be a Shape or dimension type but got {dims}" dim_values = [] dim_sizes = [] for dim, spec in dimensions.items(): if isinstance(spec, int) or (isinstance(spec, Tensor) and spec.rank == 0 and spec.dtype.kind == int): dim_values.append(tuple(range(int(spec)))) dim_sizes.append(spec) elif isinstance(spec, Tensor): assert spec.rank == 1, f"Only 1D sequences allowed, got {spec} for dimension '{dim}'." dim_values.append(spec.native()) dim_sizes.append(spec.shape.volume) else: backend = choose_backend(spec) shape = backend.staticshape(spec) assert len(shape) == 1, "Only 1D sequences allowed, got {spec} for dimension '{dim}'." dim_values.append(spec) dim_sizes.append(shape[0]) grid_shape = dim_type(**{dim: size for dim, size in zip(dimensions.keys(), dim_sizes)}) backend = choose_backend(*dim_values, prefer_default=True) indices_list = backend.meshgrid(*dim_values) channels = [Dense(t, grid_shape.names, grid_shape, backend) for t in indices_list] if not stack_dim: assert len(channels) == 1, f"meshgrid with multiple dimension requires a valid stack_dim but got {stack_dim}" return channels[0] if stack_dim.labels[0] is None: stack_dim = stack_dim.with_size(tuple(dimensions.keys())) return stack_tensors(channels, stack_dim)Generate a mesh-grid
Tensorfrom keyword dimensions.Args
**dimensions- Mesh-grid dimensions, mapping names to values.
Values may be
int, 1DTensoror 1D native tensor. dims- Dimension type of mesh-grid dimensions, one of
spatial(),channel(),batch(),instance(). stack_dim- Channel dim along which grids are stacked.
This is optional for 1D mesh-grids. In that case returns a
Tensorwithout a stack dim ifNoneor an emptyShapeis passed.
Returns
Mesh-grid
Tensorwith the dimensions ofdims/dimensionsandstack_dim.Examples
>>> math.meshgrid(x=2, y=2) (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.500 ± 0.500 (0e+00...1e+00)>>> math.meshgrid(x=2, y=(-1, 1)) (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.250 ± 0.829 (-1e+00...1e+00)>>> math.meshgrid(x=2, stack_dim=None) (0, 1) along xˢ def min(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>,
key: phiml.math._tensors.Tensor = None) ‑> phiml.math._tensors.Tensor-
Expand source code
def min_(value, dim: DimFilter = non_batch, key: Tensor = None) -> Tensor: """ Determines the minimum value of `values` along the specified dimensions. Args: value: (Sparse) `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors key: Optional comparison values. If specified, returns the value where `key` is minimal, see `at_min()`. Returns: `Tensor` without the reduced dimensions. """ if key is not None: return at_min(value, key, dim) return reduce_(_min, value, dim)Determines the minimum value of
valuesalong the specified dimensions.Args
value- (Sparse)
Tensororlist/tupleof Tensors. dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
key- Optional comparison values. If specified, returns the value where
keyis minimal, seeat_min().
Returns
Tensorwithout the reduced dimensions. def minimize(f: Callable[[~X], ~Y], solve: phiml.math._optimize.Solve[~X, ~Y]) ‑> ~X-
Expand source code
def minimize(f: Callable[[X], Y], solve: Solve[X, Y]) -> X: """ Finds a minimum of the scalar function *f(x)*. The `method` argument of `solve` determines which optimizer is used. All optimizers supported by `scipy.optimize.minimize` are supported, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html . Additionally a gradient descent solver with adaptive step size can be used with `method='GD'`. `math.minimize()` is limited to backends that support `jacobian()`, i.e. PyTorch, TensorFlow and Jax. To obtain additional information about the performed solve, use a `SolveTape`. See Also: `solve_nonlinear()`. Args: f: Function whose output is subject to minimization. All positional arguments of `f` are optimized and must be `Tensor` or `phiml.math.magic.PhiTreeNode`. If `solve.x0` is a `tuple` or `list`, it will be passed to *f* as varargs, `f(*x0)`. To minimize a subset of the positional arguments, define a new (lambda) function depending only on those. The first return value of `f` must be a scalar float `Tensor` or `phiml.math.magic.PhiTreeNode`. solve: `Solve` object to specify method type, parameters and initial guess for `x`. Returns: x: solution, the minimum point `x`. Raises: NotConverged: If the desired accuracy was not be reached within the maximum number of iterations. Diverged: If the optimization failed prematurely. """ solve = solve.with_defaults('optimization') assert (solve.rel_tol == 0).all, f"rel_tol must be zero for minimize() but got {solve.rel_tol}" assert solve.preprocess_y is None, "minimize() does not allow preprocess_y" x0_nest, x0_tensors = disassemble_tree(solve.x0, cache=True, attr_type=variable_attributes) x0_tensors = [to_float(t) for t in x0_tensors] backend = preferred_backend_for(*x0_tensors) batch_dims = merge_shapes(*[batch(t) for t in x0_tensors]) x0_natives = [] x0_native_shapes = [] for t in x0_tensors: t = t._cached() if t.shape.is_uniform: x0_natives.append(t.native([batch_dims, t.shape.non_batch])) x0_native_shapes.append(t.shape.non_batch) else: for ut in unstack(t, t.shape.non_uniform_shape): x0_natives.append(ut.native([batch_dims, ut.shape.non_batch])) x0_native_shapes.append(ut.shape.non_batch) x0_flat = backend.concat(x0_natives, -1) def unflatten_assemble(x_flat, additional_dims: Shape = EMPTY_SHAPE, convert=True): partial_tensors = [] i = 0 for x0_native, t_shape in zip(x0_natives, x0_native_shapes): vol = backend.staticshape(x0_native)[-1] flat_native = x_flat[..., i:i + vol] partial_tensor = reshaped_tensor(flat_native, [*additional_dims, batch_dims, t_shape], convert=convert) partial_tensors.append(partial_tensor) i += vol # --- assemble non-uniform tensors --- x_tensors = [] for t in x0_tensors: if t.shape.is_uniform: x_tensors.append(partial_tensors.pop(0)) else: stack_dims = t.shape.non_uniform_shape x_tensors.append(stack(partial_tensors[:stack_dims.volume], stack_dims)) partial_tensors = partial_tensors[stack_dims.volume:] x = assemble_tree(x0_nest, x_tensors, attr_type=variable_attributes) return x def native_function(x_flat): x = unflatten_assemble(x_flat) if isinstance(x, (tuple, list)): y = f(*x) else: y = f(x) _, y_tensors = disassemble_tree(y, cache=False) loss_tensor = y_tensors[0] assert not non_batch(loss_tensor), f"Failed to minimize '{f.__name__}' because it returned a non-scalar output {shape(loss_tensor)}. Reduce all non-batch dimensions, e.g. using math.l2_loss()" extra_batch = loss_tensor.shape.without(batch_dims) if extra_batch: # output added more batch dims. We should expand the initial guess if extra_batch.volume > 1: raise NewBatchDims(loss_tensor.shape, extra_batch) else: loss_tensor = loss_tensor[next(iter(extra_batch.meshgrid()))] loss_native = loss_tensor.native([batch_dims], force_expand=False) return loss_tensor.sum, (loss_native,) atol = backend.to_float(solve.abs_tol.native([batch_dims])) maxi = solve.max_iterations.numpy([batch_dims]) trj = _SOLVE_TAPES and any(t.should_record_trajectory_for(solve) for t in _SOLVE_TAPES) t = time.perf_counter() try: ret = backend.minimize(solve.method, native_function, x0_flat, atol, maxi, trj) except NewBatchDims as new_dims: # try again with expanded initial guess warnings.warn(f"Function returned objective value with dims {new_dims.output_shape} but initial guess was missing {new_dims.missing}. Trying again with expanded initial guess.", RuntimeWarning, stacklevel=2) x0 = expand(solve.x0, new_dims.missing) solve = copy_with(solve, x0=x0) return minimize(f, solve) t = time.perf_counter() - t if not trj: assert isinstance(ret, SolveResult) converged = reshaped_tensor(ret.converged, [batch_dims]) diverged = reshaped_tensor(ret.diverged, [batch_dims]) x = unflatten_assemble(ret.x) iterations = reshaped_tensor(ret.iterations, [batch_dims]) function_evaluations = reshaped_tensor(ret.function_evaluations, [batch_dims]) residual = reshaped_tensor(ret.residual, [batch_dims]) result = SolveInfo(solve, x, residual, iterations, function_evaluations, converged, diverged, ret.method, ret.message, t) else: # trajectory assert isinstance(ret, (tuple, list)) and all(isinstance(r, SolveResult) for r in ret) converged = reshaped_tensor(ret[-1].converged, [batch_dims]) diverged = reshaped_tensor(ret[-1].diverged, [batch_dims]) x = unflatten_assemble(ret[-1].x) x_ = unflatten_assemble(numpy.stack([r.x for r in ret]), additional_dims=batch('trajectory'), convert=False) residual = stack([reshaped_tensor(r.residual, [batch_dims]) for r in ret], batch('trajectory')) iterations = reshaped_tensor(ret[-1].iterations, [batch_dims]) function_evaluations = stack([reshaped_tensor(r.function_evaluations, [batch_dims]) for r in ret], batch('trajectory')) result = SolveInfo(solve, x_, residual, iterations, function_evaluations, converged, diverged, ret[-1].method, ret[-1].message, t) for tape in _SOLVE_TAPES: tape._add(solve, trj, result) result.convergence_check(False) # raises ConvergenceException return xFinds a minimum of the scalar function f(x). The
methodargument ofsolvedetermines which optimizer is used. All optimizers supported byscipy.optimize.minimizeare supported, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html . Additionally a gradient descent solver with adaptive step size can be used withmethod='GD'.math.minimize()is limited to backends that supportjacobian(), i.e. PyTorch, TensorFlow and Jax.To obtain additional information about the performed solve, use a
SolveTape.See Also:
solve_nonlinear().Args
f- Function whose output is subject to minimization.
All positional arguments of
fare optimized and must beTensororPhiTreeNode. Ifsolve.x0is atupleorlist, it will be passed to f as varargs,f(*x0). To minimize a subset of the positional arguments, define a new (lambda) function depending only on those. The first return value offmust be a scalar floatTensororPhiTreeNode. solveSolveobject to specify method type, parameters and initial guess forx.
Returns
x- solution, the minimum point
x.
Raises
NotConverged- If the desired accuracy was not be reached within the maximum number of iterations.
Diverged- If the optimization failed prematurely.
def minimum(x: float | phiml.math._tensors.Tensor,
y: float | phiml.math._tensors.Tensor,
allow_none=False)-
Expand source code
def minimum(x: Union[Tensor, float], y: Union[Tensor, float], allow_none=False): """ Computes the element-wise minimum of `x` and `y`. """ if isinstance(x, Shape) or isinstance(y, Shape): if isinstance(y, Shape) and not isinstance(x, Shape): x, y = y, x if isinstance(y, Number): return x.with_sizes([min(s, y) for s in x.sizes]) elif isinstance(y, Tensor): x = y._tensor(x) if allow_none: if x is None: return y elif y is None: return x if not isinstance(x, Tensor) and not isinstance(y, Tensor): return choose_backend(x, y).minimum(x, y) if is_sparse(x): return x._op2(y, minimum, False) elif is_sparse(y): return y._op2(x, minimum, True) return custom_op2(x, y, xops.minimum)Computes the element-wise minimum of
xandy. def nan_to_0(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def nan_to_0(x: TensorOrTree) -> TensorOrTree: """Replaces all NaN values in `x` with `0`.""" return where(is_nan(x), 0, x)Replaces all NaN values in
xwith0. def native(value: phiml.math._tensors.Tensor | numbers.Number | tuple | list | Any)-
Expand source code
def native(value: Union[Tensor, Number, tuple, list, Any]): """ Returns the native tensor representation of `value`. If `value` is a `phiml.math.Tensor`, this is equal to calling `phiml.math.Tensor.native()`. Otherwise, checks that `value` is a valid tensor object and returns it. Args: value: `Tensor` or native tensor or tensor-like. Returns: Native tensor representation Raises: ValueError if the tensor cannot be transposed to match target_shape """ if isinstance(value, Tensor): return value.native() else: choose_backend(value) # check that value is a native tensor return valueReturns the native tensor representation of
value. Ifvalueis aTensor, this is equal to callingTensor.native(). Otherwise, checks thatvalueis a valid tensor object and returns it.Args
valueTensoror native tensor or tensor-like.
Returns
Native tensor representation
Raises
ValueError if the tensor cannot be transposed to match target_shape
def native_call(f: Callable,
*inputs: phiml.math._tensors.Tensor,
channels_last=None,
channel_dim='vector',
spatial_dim=None,
**f_kwargs)-
Expand source code
def native_call(f: Callable, *inputs: Tensor, channels_last=None, channel_dim='vector', spatial_dim=None, **f_kwargs): """ Calls `f` with the native representations of the `inputs` tensors in standard layout and returns the result as a `Tensor`. All inputs are converted to native tensors (including precision cast) depending on `channels_last`: * `channels_last=True`: Dimension layout `(total_batch_size, spatial_dims..., total_channel_size)` * `channels_last=False`: Dimension layout `(total_batch_size, total_channel_size, spatial_dims...)` All batch dimensions are compressed into a single dimension with `total_batch_size = input.shape.batch.volume`. The same is done for all channel dimensions. Additionally, missing batch and spatial dimensions are added so that all `inputs` have the same batch and spatial shape. Args: f: Function to be called on native tensors of `inputs`. The function output must have the same dimension layout as the inputs, unless overridden by `spatial_dim`, and the batch size must be identical. *inputs: Uniform `Tensor` arguments channels_last: (Optional) Whether to put channels as the last dimension of the native representation. If `None`, the channels are put in the default position associated with the current backend, see `phiml.math.backend.Backend.prefers_channels_last()`. channel_dim: Name of the channel dimension of the result. spatial_dim: Name of the spatial dimension of the result. Returns: `Tensor` with batch and spatial dimensions of `inputs`, unless overridden by `spatial_dim`, and single channel dimension `channel_dim`. """ try: backend = choose_backend(f) except NoBackendFound: backend = preferred_backend_for(*inputs) if channels_last is None: channels_last = backend.prefers_channels_last() b_dims = merge_shapes(*[i.shape.batch & i.shape.dual for i in inputs]) s_dims = merge_shapes(*[i.shape.spatial for i in inputs]) natives = [] for i in inputs: groups = [b_dims, *i.shape.spatial.names, i.shape.channel] if channels_last else [b_dims, i.shape.channel, *i.shape.spatial.names] natives.append(backend.as_tensor(i.native(groups, force_expand=False), True)) output = f(*natives, **f_kwargs) if not channel_dim: channel_dim = EMPTY_SHAPE elif isinstance(channel_dim, str): channel_dim = channel(channel_dim) assert isinstance(channel_dim, SHAPE_TYPES), "channel_dim must be a Shape or str" if isinstance(output, (tuple, list)): raise NotImplementedError() if spatial_dim is None: ndim = choose_backend(output).ndims(output) if ndim == 1: groups = [b_dims] elif ndim == 2: groups = [b_dims, *channel_dim] else: groups = [b_dims, *s_dims, *channel_dim] if channels_last else [b_dims, *channel_dim, *s_dims] else: if isinstance(spatial_dim, str): spatial_dim = spatial(spatial_dim) assert isinstance(spatial_dim, SHAPE_TYPES), "spatial_dim must be a Shape or str" groups = [b_dims, *spatial_dim, *channel_dim] if channels_last else [b_dims, *channel_dim, *spatial_dim] result = reshaped_tensor(output, groups, convert=False) if channel_dim.rank == 1 and result.shape.get_size(channel_dim.name) == 1 and not channel_dim.labels[0]: result = result.dimension(channel_dim.name)[0] # remove vector dim if not required return resultCalls
fwith the native representations of theinputstensors in standard layout and returns the result as aTensor.All inputs are converted to native tensors (including precision cast) depending on
channels_last:channels_last=True: Dimension layout(total_batch_size, spatial_dims…, total_channel_size)channels_last=False: Dimension layout(total_batch_size, total_channel_size, spatial_dims…)
All batch dimensions are compressed into a single dimension with
total_batch_size = input.shape.batch.volume. The same is done for all channel dimensions.Additionally, missing batch and spatial dimensions are added so that all
inputshave the same batch and spatial shape.Args
f- Function to be called on native tensors of
inputs. The function output must have the same dimension layout as the inputs, unless overridden byspatial_dim, and the batch size must be identical. *inputs- Uniform
Tensorarguments channels_last- (Optional) Whether to put channels as the last dimension of the native representation.
If
None, the channels are put in the default position associated with the current backend, seephiml.math.backend.Backend.prefers_channels_last(). channel_dim- Name of the channel dimension of the result.
spatial_dim- Name of the spatial dimension of the result.
Returns
Tensorwith batch and spatial dimensions ofinputs, unless overridden byspatial_dim, and single channel dimensionchannel_dim. def ncat(values: Sequence[~PhiTreeNodeType],
dim: phiml.math._shape.Shape,
expand_values=False) ‑> ~PhiTreeNodeType-
Expand source code
def ncat(values: Sequence[PhiTreeNodeType], dim: Shape, expand_values=False) -> PhiTreeNodeType: """ Concatenate named components along `dim`. Args: values: Each value can contain multiple components of `dim` if `dim` is present in its shape. Else, it is interpreted as a single component whose name will be determined from the leftover labels of `dim`. dim: Single dimension that has labels matching components of `values`. expand_values: If `True`, will add all missing dims to values, not just batch dimensions. This allows tensors with different dims to be concatenated. The resulting tensor will have all dims that are present in `values`. If `False`, this may return a non-numeric object instead. Returns: Same type as any value from `values`. """ order = dim.labels[0] assert dim.rank == 1 and order, f"dim needs to be a single dimension with labels but got {dim}" named = {} unnamed = [] for value in values: s = shape(value) if dim in s: for n in s[dim].labels[0]: named[n] = value[{dim.name: n}] else: unnamed.append(value) missing = [n for n in order if n not in named] assert len(missing) == len(unnamed), f"Components do not match dim {dim}. Given: {len(unnamed)} for remaining names {missing}" named.update({n: v for v, n in zip(unnamed, missing)}) components = [named[n] for n in order] return stack(components, dim, expand_values=expand_values)Concatenate named components along
dim.Args
values- Each value can contain multiple components of
dimifdimis present in its shape. Else, it is interpreted as a single component whose name will be determined from the leftover labels ofdim. dim- Single dimension that has labels matching components of
values. expand_values- If
True, will add all missing dims to values, not just batch dimensions. This allows tensors with different dims to be concatenated. The resulting tensor will have all dims that are present invalues. IfFalse, this may return a non-numeric object instead.
Returns
Same type as any value from
values. def neighbor_max(grid: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = None,
extend_bounds=0) ‑> phiml.math._tensors.Tensor-
Expand source code
def neighbor_max(grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None, extend_bounds=0) -> Tensor: """`neighbor_reduce` with `reduce_fun` set to `phiml.math.max`.""" return neighbor_reduce(math.max_, grid, dims, padding, extend_bounds=extend_bounds)neighbor_reduce()withreduce_funset tomax_(). def neighbor_mean(grid: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = None,
extend_bounds=0) ‑> phiml.math._tensors.Tensor-
Expand source code
def neighbor_mean(grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None, extend_bounds=0) -> Tensor: """`neighbor_reduce` with `reduce_fun` set to `phiml.math.mean`.""" return neighbor_reduce(math.mean, grid, dims, padding, extend_bounds=extend_bounds)neighbor_reduce()withreduce_funset tomean(). def neighbor_min(grid: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = None,
extend_bounds=0) ‑> phiml.math._tensors.Tensor-
Expand source code
def neighbor_min(grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None, extend_bounds=0) -> Tensor: """`neighbor_reduce` with `reduce_fun` set to `phiml.math.min`.""" return neighbor_reduce(math.min_, grid, dims, padding, extend_bounds=extend_bounds)neighbor_reduce()withreduce_funset tomin_(). def neighbor_reduce(reduce_fun: Callable,
grid: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = None,
padding_kwargs: dict = None,
extend_bounds=0) ‑> phiml.math._tensors.Tensor-
Expand source code
def neighbor_reduce(reduce_fun: Callable, grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None, padding_kwargs: dict = None, extend_bounds=0) -> Tensor: """ Computes the sum/mean/min/max/prod/etc. of two neighboring values along each dimension in `dim`. The result tensor has one entry less than `grid` in each averaged dimension unless `padding` is specified. With two `dims`, computes the mean of 4 values, in 3D, the mean of 8 values. Args: reduce_fun: Reduction function, such as `sum`, `mean`, `max`, `min`, `prod`. grid: Values to reduce. dims: Dimensions along which neighbors should be reduced. padding: Padding at the upper edges of `grid` along `dims'. If not `None`, the result tensor will have the same shape as `grid`. padding_kwargs: Additional keyword arguments to be passed to `phiml.math.pad()`. Returns: `Tensor` """ result = grid dims = grid.shape.only(dims) for dim in dims: l, r = shift(result, (0, 1), dim, padding, None, extend_bounds=extend_bounds, padding_kwargs=padding_kwargs) lr = stack([l, r], batch('_reduce')) result = reduce_fun(lr, '_reduce') return resultComputes the sum/mean/min/max/prod/etc. of two neighboring values along each dimension in
dim. The result tensor has one entry less thangridin each averaged dimension unlesspaddingis specified.With two
dims, computes the mean of 4 values, in 3D, the mean of 8 values.Args
reduce_fun- Reduction function, such as
sum_(),mean(),max_(),min_(),prod(). grid- Values to reduce.
dims- Dimensions along which neighbors should be reduced.
padding- Padding at the upper edges of
gridalongdims'. If notNone, the result tensor() will have the same shape() asgrid`. padding_kwargs- Additional keyword arguments to be passed to
pad().
Returns
def neighbor_sum(grid: phiml.math._tensors.Tensor,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = None,
extend_bounds=0) ‑> phiml.math._tensors.Tensor-
Expand source code
def neighbor_sum(grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None, extend_bounds=0) -> Tensor: """`neighbor_reduce` with `reduce_fun` set to `phiml.math.sum`.""" return neighbor_reduce(math.sum_, grid, dims, padding, extend_bounds=extend_bounds)neighbor_reduce()withreduce_funset tosum_(). def non_batch(obj) ‑> phiml.math._shape.Shape-
Expand source code
def non_batch(obj) -> Shape: """ Returns the non-batch dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ return shape(obj).non_batch def non_channel(obj) ‑> phiml.math._shape.Shape-
Expand source code
def non_channel(obj) -> Shape: """ Returns the non-channel dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ return shape(obj).non_channel def non_dual(obj) ‑> phiml.math._shape.Shape-
Expand source code
def non_dual(obj) -> Shape: """ Returns the non-dual dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ return shape(obj).non_dual def non_instance(obj) ‑> phiml.math._shape.Shape-
Expand source code
def non_instance(obj) -> Shape: """ Returns the non-instance dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ return shape(obj).non_instance def non_primal(obj) ‑> phiml.math._shape.Shape-
Expand source code
def non_primal(obj) -> Shape: """ Returns the batch and dual dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ return shape(obj).non_primal def non_spatial(obj) ‑> phiml.math._shape.Shape-
Expand source code
def non_spatial(obj) -> Shape: """ Returns the non-spatial dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ return shape(obj).non_spatial def nonzero(value: phiml.math._tensors.Tensor | bool,
list_dim: str | phiml.math._shape.Shape | int = (nonzeroⁱ),
index_dim: phiml.math._shape.Shape = (vectorᶜ),
element_dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function channel>,
list_dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>,
preserve_names=False)-
Expand source code
def nonzero(value: Union[Tensor, bool], list_dim: Union[Shape, str, int] = instance('nonzero'), index_dim: Shape = channel('vector'), element_dims: DimFilter = channel, list_dims: DimFilter = non_batch, preserve_names=False): """ Get spatial indices of non-zero / True values. Batch dimensions are preserved by this operation. If channel dimensions are present, this method returns the indices where any component is nonzero. Implementations: * NumPy: [`numpy.argwhere`](https://numpy.org/doc/stable/reference/generated/numpy.argwhere.html) * PyTorch: [`torch.nonzero`](https://pytorch.org/docs/stable/generated/torch.nonzero.html) * TensorFlow: [`tf.where(tf.not_equal(values, 0))`](https://www.tensorflow.org/api_docs/python/tf/where) * Jax: [`jax.numpy.nonzero`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.nonzero.html) Args: value: spatial tensor to find non-zero / True values in. list_dim: Dimension listing non-zero values. If size specified, lists only the first `size` non-zero values. Special case: For retrieving only the first non-zero value, you may pass `1` instead of a `Shape` of size 1. index_dim: Index dimension. element_dims: Dims listing components of one value. A value is only considered `zero` if all components are 0. list_dims: Dims in which non-zero elements are searched. These will be stored in the labels of `index_dim`. Returns: `Tensor` of shape (batch dims..., `list_dim`=#non-zero, `index_dim`=value.shape.spatial_rank) """ element_dims = value.shape.only(element_dims) if element_dims: value = sum_(abs(value), element_dims) list_dims = value.shape.only(list_dims) - element_dims if isinstance(list_dim, str): list_dim = auto(list_dim, instance) cutoff = list_dim if isinstance(list_dim, int) else list_dim.size if isinstance(list_dim, int) and list_dim == 1: list_dim = EMPTY_SHAPE elif isinstance(list_dim, int): assert list_dims.rank == 1 list_dim = list_dims.without_sizes() broadcast = value.shape - list_dims - sparse_matrix_dims(value) def unbatched_nonzero(value: Tensor): if isinstance(value, CompressedSparseMatrix): if cutoff is not None and (sparse_dims(value) - list_dims) in value._compressed_dims: v0 = value._pointers[:-1] vs = v0 + arange(instance(_offset=cutoff)) col = value._indices[vs] return ipack(rename_dims(col, '_offset', list_dim), value._compressed_dims) else: value = value.decompress() elif isinstance(value, CompactSparseTensor): if list_dims in value._compressed_dims and value._uncompressed_dims not in list_dims: result = value._indices if result.shape.only(value._compressed_dims).volume == cutoff: return result else: return result[{value._compressed_dims: slice(cutoff)}] else: raise NotImplementedError if isinstance(value, SparseCoordinateTensor) and cutoff is None: assert cutoff is None, f"Cut-off Not implemented for sparse tensors" nonzero_values = nonzero(value._values) nonzero_indices = value._indices[nonzero_values] index_dim_ = index_dim.with_size(channel(value._indices).labels[0]) return rename_dims(rename_dims(nonzero_indices, instance, list_dim), channel, index_dim_) elif isinstance(value, SparseCoordinateTensor): # value = value.compress(sparse_dims(value) - list_dims) raise NotImplementedError else: native = value._reshaped_native([*value.shape]) b = choose_backend(native) indices = b.nonzero(native) if cutoff is not None: indices = indices[:cutoff, :] new_list_dim = list_dim if preserve_names and list_dims.rank == 1 and list_dims.labels[0]: names = [list_dims.labels[0][i] for i in indices[:, 0]] new_list_dim = new_list_dim.with_size(names) return reshaped_tensor(indices, [new_list_dim, index_dim.with_size(value.shape.name_list)], convert=False) return broadcast_op(unbatched_nonzero, [value], iter_dims=broadcast.names)Get spatial indices of non-zero / True values.
Batch dimensions are preserved by this operation. If channel dimensions are present, this method returns the indices where any component is nonzero.
Implementations:
- NumPy:
numpy.argwhere - PyTorch:
torch.nonzero - TensorFlow:
tf.where(tf.not_equal(values, 0)) - Jax:
jax.numpy.nonzero
Args
value- spatial tensor to find non-zero / True values in.
list_dim- Dimension listing non-zero values. If size specified, lists only the first
sizenon-zero values. Special case: For retrieving only the first non-zero value, you may pass1instead of aShapeof size 1. index_dim- Index dimension.
element_dims- Dims listing components of one value. A value is only considered
zeroif all components are 0. list_dims- Dims in which non-zero elements are searched. These will be stored in the labels of
index_dim.
Returns
Tensorof shape (batch dims…,list_dim=#non-zero,index_dim=value.shape.spatial_rank) - NumPy:
def norm(vec: phiml.math._tensors.Tensor,
vec_dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function channel>,
eps: float | phiml.math._tensors.Tensor = None)-
Expand source code
def norm(vec: Tensor, vec_dim: DimFilter = channel, eps: Union[float, Tensor] = None): """ Computes the vector norm (L2 norm) of `vec` defined as √∑v². Args: eps: Minimum valid vector length. Use to avoid `inf` gradients for zero-norm vectors. Lengths shorter than `eps` are set to 0. """ if vec.dtype.kind == complex: vec = stack([vec.real, vec.imag], channel('_ReIm')) squared = math.sum_(vec ** 2, dim=vec_dim) if eps is not None: squared = math.maximum(squared, eps) return math.where(squared < eps**2, 0, math.sqrt(squared)) return math.sqrt(squared)Computes the vector norm (L2 norm) of
vec()defined as √∑v².Args
eps- Minimum valid vector length. Use to avoid
infgradients for zero-norm vectors. Lengths shorter thanepsare set to 0.
def normalize(vec: phiml.math._tensors.Tensor,
vec_dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function channel>,
epsilon=None,
allow_infinite=False,
allow_zero=False)-
Expand source code
def normalize(vec: Tensor, vec_dim: DimFilter = channel, epsilon=None, allow_infinite=False, allow_zero=False): """ Normalizes the vectors in `vec`. If `vec_dim` is None, the combined channel dimensions of `vec` are interpreted as a vector. Args: vec: `Tensor` to normalize. vec_dim: Dimensions to normalize over. By default, all channel dimensions are used to compute the vector length. epsilon: (Optional) Zero-length threshold. Vectors shorter than this length yield the unit vector (1, 0, 0, ...). If not specified, the zero-vector yields `NaN` as it cannot be normalized. allow_infinite: Allow infinite components in vectors. These vectors will then only points towards the infinite components. allow_zero: Whether to return zero vectors for inputs smaller `epsilon` instead of a unit vector. """ vec_dim = vec.shape.only(vec_dim) if allow_infinite: # replace inf by 1, finite by 0 is_infinite = ~math.is_finite(vec) inf_mask = is_infinite & ~math.is_nan(vec) vec = math.where(math.any_(is_infinite, vec_dim), inf_mask, vec) if epsilon is None: return vec / norm(vec, vec_dim=vec_dim) le = norm(vec, vec_dim=vec_dim, eps=epsilon**2 * .99) unit_vec = 0 if allow_zero else stack([1] + [0] * (vec_dim.volume - 1), vec_dim) return math.where(abs(le) <= epsilon, unit_vec, vec / le)Normalizes the vectors in
vec(). Ifvec_dimis None, the combined channel dimensions ofvec()are interpreted as a vector.Args
vecTensorto normalize.vec_dim- Dimensions to normalize over. By default, all channel dimensions are used to compute the vector length.
epsilon- (Optional) Zero-length threshold. Vectors shorter than this length yield the unit vector (1, 0, 0, …).
If not specified, the zero-vector yields
NaNas it cannot be normalized. allow_infinite- Allow infinite components in vectors. These vectors will then only points towards the infinite components.
allow_zero- Whether to return zero vectors for inputs smaller
epsiloninstead of a unit vector.
def normalize_to(target: phiml.math._tensors.Tensor,
source: float | phiml.math._tensors.Tensor,
epsilon=1e-05)-
Expand source code
def normalize_to(target: Tensor, source: Union[float, Tensor], epsilon=1e-5): """ Multiplies the target so that its sum matches the source. Args: target: `Tensor` source: `Tensor` or constant epsilon: Small number to prevent division by zero. Returns: Normalized tensor of the same shape as target """ target_total = math.sum_(target) denominator = math.maximum(target_total, epsilon) if epsilon is not None else target_total source_total = math.sum_(source) return target * (source_total / denominator) def numpy(value: phiml.math._tensors.Tensor | numbers.Number | tuple | list | Any)-
Expand source code
def numpy_(value: Union[Tensor, Number, tuple, list, Any]): """ Converts `value` to a `numpy.ndarray` where value must be a `Tensor`, backend tensor or tensor-like. If `value` is a `phiml.math.Tensor`, this is equal to calling `phiml.math.Tensor.numpy()`. *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use `Tensor.native()` instead. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. If `value` is a NumPy array, it may be returned directly. Returns: NumPy representation of `value` Raises: ValueError if the tensor cannot be transposed to match target_shape """ if isinstance(value, Tensor): return value.numpy() else: backend = choose_backend(value) return backend.numpy(value)Converts
valueto anumpy.ndarraywhere value must be aTensor, backend tensor or tensor-like. Ifvalueis aTensor, this is equal to callingTensor.numpy().Note: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use
Tensor.native()instead.Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in
order, aValueErroris raised.If
valueis a NumPy array, it may be returned directly.Returns
NumPy representation of
valueRaises
ValueError if the tensor cannot be transposed to match target_shape
def object_dims(value)-
Expand source code
def object_dims(value): """For composite types, returns the dims along which objects are arranged, excluding numeric tensor dims.""" if isinstance(value, Layout): return value._stack_dim return EMPTY_SHAPEFor composite types, returns the dims along which objects are arranged, excluding numeric tensor dims.
def ones(*shape: phiml.math._shape.Shape,
dtype: phiml.backend._dtype.DType | tuple | type = builtins.float) ‑> phiml.math._tensors.Tensor-
Expand source code
def ones(*shape: Shape, dtype: Union[DType, tuple, type] = float) -> Tensor: """ Define a tensor with specified shape with value `1.0`/ `1` / `True` everywhere. This method may not immediately allocate the memory to store the values. See Also: `ones_like()`, `zeros()`. Args: *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order. dtype: Data type as `DType` object. Defaults to `float` matching the current precision setting. Returns: `Tensor` """ uinit = lambda shape: expand_tensor(Dense(default_backend().ones((), dtype=DType.as_dtype(dtype)), (), EMPTY_SHAPE, default_backend()), shape) return _initialize(uinit, shape, dtype, ones, {})Define a tensor with specified shape with value
1.0/1/Trueeverywhere.This method may not immediately allocate the memory to store the values.
See Also:
ones_like(),zeros().Args
*shape- This (possibly empty) sequence of
Shapes is concatenated, preserving the order. dtype- Data type as
DTypeobject. Defaults tofloatmatching the current precision setting.
Returns
def ones_like(value: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor-
Expand source code
def ones_like(value: Tensor) -> Tensor: """ Create a `Tensor` containing only `1.0` / `1` / `True` with the same shape and dtype as `obj`. """ return zeros_like(value) + 1Create a
Tensorcontaining only1.0/1/Truewith the same shape and dtype asobj. def p2d(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType-
Expand source code
def p2d(value: PhiTreeNodeType) -> PhiTreeNodeType: """ Change the type of all *primal* dims (instance, spatial, channel) of `value` to *dual* dimensions. See `rename_dims`. """ return rename_dims(value, primal, dual)Change the type of all primal dims (instance, spatial, channel) of
valueto dual dimensions. Seerename_dims(). def pack_dims(value,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
packed_dim: str | phiml.math._shape.Shape,
pos: int | None = None,
**kwargs)-
Expand source code
def pack_dims(value, dims: DimFilter, packed_dim: Union[Shape, str], pos: Optional[int] = None, **kwargs): """ Compresses multiple dims into a single dimension by concatenating the elements. Elements along the new dims are laid out according to the order of `dims`. If the order of `dims` differs from the current dimension order, the tensor is transposed accordingly. This function replaces the traditional `reshape` for these cases. The type of the new dimension will be equal to the types of `dims`. If `dims` have varying types, the new dimension will be a batch dimension. If none of `dims` exist on `value`, `packed_dim` will be added only if it is given with a definite size and `value` is not a primitive type. See Also: `unpack_dim()` Args: value: `phiml.math.magic.Shapable`, such as `phiml.math.Tensor`. dims: Dimensions to be compressed in the specified order. packed_dim: Single-dimension `Shape`. pos: Index of new dimension. `None` for automatic, `-1` for last, `0` for first. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dims to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments. Returns: Same type as `value`. Examples: >>> pack_dims(math.zeros(spatial(x=4, y=3)), spatial, instance('points')) (pointsⁱ=12) const 0.0 """ if isinstance(value, (Number, bool)): return value if DEBUG_CHECKS: assert isinstance(value, Shapable) and isinstance(value, Sliceable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}" packed_dim = auto(packed_dim, dims if callable(dims) else None) if isinstance(packed_dim, str) else packed_dim dims = shape(value).only(dims, reorder=True) if packed_dim in shape(value): assert packed_dim in dims, f"Cannot pack dims into new dimension {packed_dim} because it already exists on value {value} and is not packed." if len(dims) == 0 or all(dim not in shape(value) for dim in dims): return value if packed_dim.size is None else expand(value, packed_dim, **kwargs) # Inserting size=1 can cause shape errors elif len(dims) == 1 and packed_dim.rank == 1: return rename_dims(value, dims, packed_dim, **kwargs) elif len(dims) == 1 and packed_dim.rank > 1: return unpack_dim(value, dims, packed_dim, **kwargs) # --- First try __pack_dims__ --- if hasattr(value, '__pack_dims__'): result = value.__pack_dims__(dims, packed_dim, pos, **kwargs) if result is not NotImplemented: return result # --- Next try Tree Node --- if isinstance(value, PhiTreeNode): return tree_map(pack_dims, value, attr_type=all_attributes, dims=dims, packed_dim=packed_dim, pos=pos, **kwargs) # --- Fallback: unstack and stack --- if shape(value).only(dims).volume > 8: warnings.warn(f"pack_dims() default implementation is slow on large dims ({shape(value).only(dims)}). Please implement __pack_dims__() for {type(value).__name__} as defined in phiml.math.magic", RuntimeWarning, stacklevel=2) return stack(unstack(value, dims), packed_dim, **kwargs)Compresses multiple dims into a single dimension by concatenating the elements. Elements along the new dims are laid out according to the order of
dims. If the order ofdimsdiffers from the current dimension order, the tensor is transposed accordingly. This function replaces the traditionalreshapefor these cases.The type of the new dimension will be equal to the types of
dims. Ifdimshave varying types, the new dimension will be a batch dimension.If none of
dimsexist onvalue,packed_dimwill be added only if it is given with a definite size andvalueis not a primitive type.See Also:
unpack_dim()Args
valueShapable, such asTensor.dims- Dimensions to be compressed in the specified order.
packed_dim- Single-dimension
Shape. pos- Index of new dimension.
Nonefor automatic,-1for last,0for first. **kwargs- Additional keyword arguments required by specific implementations.
Adding spatial dims to fields requires the
bounds: Boxargument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments.
Returns
Same type as
value.Examples
>>> pack_dims(math.zeros(spatial(x=4, y=3)), spatial, instance('points')) (pointsⁱ=12) const 0.0 def pad(value: phiml.math._tensors.Tensor,
widths: dict | tuple | list,
mode: ForwardRef('e_.Extrapolation') | phiml.math._tensors.Tensor | numbers.Number | str | dict = 0,
**kwargs) ‑> phiml.math._tensors.Tensor-
Expand source code
def pad(value: Tensor, widths: Union[dict, tuple, list], mode: Union['e_.Extrapolation', Tensor, Number, str, dict] = 0, **kwargs) -> Tensor: """ Pads a tensor along the specified dimensions, determining the added values using the given extrapolation. Unlike `Extrapolation.pad()`, this function can handle negative widths which slice off outer values. Args: value: `Tensor` to be padded widths: Number of values to add at the edge of `value`. Negative values can be used to slice off edge values. Must be one of the following: * `tuple` containing `(lower: int, upper: int)`. This will pad all non-batch dimensions by `lower` and `upper` at the lower and upper edge, respectively. * `dict` mapping `dim: str -> (lower: int, upper: int)` * Sequence of slicing `dict`s. This will add all values specified by the slicing dicts and is the inverse operation to `slice_off`. Exactly one value in each slicing dict must be a `slice` object. mode: Padding mode used to determine values added from positive `widths`. Must be one of the following: `Extrapolation`, `Tensor` or number for constant extrapolation, name of extrapolation as `str`. kwargs: Additional padding arguments. These are ignored by the standard extrapolations defined in `phiml.math.extrapolation` but can be used to pass additional contextual information to custom extrapolations. Returns: Padded `Tensor` Examples: >>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, 1), 'y': (2, 1)}, 0) (xˢ=12, yˢ=13) 0.641 ± 0.480 (0e+00...1e+00) >>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, -1)}, 0) (xˢ=10, yˢ=10) 0.900 ± 0.300 (0e+00...1e+00) """ mode = e_.as_extrapolation(mode) if isinstance(widths, (tuple, list)): if len(widths) == 0 or isinstance(widths[0], dict): # add sliced-off slices return _pad_slices(value, widths, mode, **kwargs) if len(widths) == 2 and isinstance(widths[0], int) and isinstance(widths[1], int): # (lower, upper) assert non_batch(value).rank == 1, f"Can only pad 1D tensors (excluding batch dims) when widths=(lower, upper) but got {shape(value)} and widths={widths}" widths = {non_batch(value).name: widths} else: # ((lo0, up0), (lo1, up1), ...) assert len(widths) == non_batch(value), f"Cannot pad tensor with non-batch dims {non_batch(value)} by widths {widths}. Sizes must match." warnings.warn("Padding by sequence of (lower, upper) is not recommended. Please use a dict instead.", SyntaxWarning, stacklevel=2) widths = {dim: w for dim, w in zip(non_batch(value).names, widths)} if isinstance(widths, dict): for k_old, v in dict(widths).items(): k = k_old if callable(k): k = k(value) if isinstance(k, Shape): k = k.names if isinstance(k, tuple): del widths[k_old] for k_ in k: widths[k_] = v has_negative_widths = any(w0 < 0 or w1 < 0 for w0, w1 in widths.values()) has_positive_widths = any(w0 > 0 or w1 > 0 for w0, w1 in widths.values()) slices = None if has_negative_widths: slices = {dim: slice(max(0, -w[0]), min(0, w[1]) or None) for dim, w in widths.items()} widths = {dim: (max(0, w[0]), max(0, w[1])) for dim, w in widths.items()} result_padded = mode.pad(value, widths, **kwargs) if has_positive_widths else value result_sliced = result_padded[slices] if has_negative_widths else result_padded return result_slicedPads a tensor along the specified dimensions, determining the added values using the given extrapolation. Unlike
Extrapolation.pad(), this function can handle negative widths which slice off outer values.Args
valueTensorto be paddedwidths-
Number of values to add at the edge of
value. Negative values can be used to slice off edge values. Must be one of the following:tuplecontaining(lower: int, upper: int). This will pad all non-batch dimensions bylowerandupperat the lower and upper edge, respectively.dictmappingdim: str -> (lower: int, upper: int)- Sequence of slicing
dicts. This will add all values specified by the slicing dicts and is the inverse operation toslice_off(). Exactly one value in each slicing dict must be aslice_()object.
mode- Padding mode used to determine values added from positive
widths. Must be one of the following:Extrapolation,Tensoror number for constant extrapolation, name of extrapolation asstr. kwargs- Additional padding arguments.
These are ignored by the standard extrapolations defined in
phiml.math.extrapolationbut can be used to pass additional contextual information to custom extrapolations.
Returns
Padded
TensorExamples
>>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, 1), 'y': (2, 1)}, 0) (xˢ=12, yˢ=13) 0.641 ± 0.480 (0e+00...1e+00)>>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, -1)}, 0) (xˢ=10, yˢ=10) 0.900 ± 0.300 (0e+00...1e+00) def pad_to_uniform(x: phiml.math._tensors.Tensor,
target_shape: phiml.math._shape.Shape = (),
pad_value=0)-
Expand source code
def pad_to_uniform(x: Tensor, target_shape: Shape = EMPTY_SHAPE, pad_value=0): """ Pads a possibly non-uniform to the smallest uniform shape that fits all elements. Args: x: Value to pad. Can be uniform or non-uniform. target_shape: Override size along any dims in order to add more padding. pad_value: Value to insert at the end of padded dims. Returns: Uniform `Tensor` """ if isinstance(x, Dense): if target_shape is None: return x return pad(x, widths={d: (0, s - x.shape.get_size(d)) for d, s in target_shape.untyped_dict.items()}) assert isinstance(x, TensorStack), f"Cannot pad_to_uniform tensor of type {type(x)}" inner_shape = x.shape - x._stack_dim sizes = [target_shape.get_size(d.name) if d.name in target_shape else (d.size if isinstance(d.size, int) else d.size.max) for d in inner_shape] natives = [t.native(inner_shape.names) for t in x._tensors] native_result = x.backend.pad_stack(natives, sizes, pad_value) result_shape = x.shape.with_sizes([x._stack_dim.size, *sizes]) return Dense(native_result, [x._stack_dim.name, *inner_shape.names], result_shape, x.backend)Pads a possibly non-uniform to the smallest uniform shape that fits all elements.
Args
x- Value to pad. Can be uniform or non-uniform.
target_shape- Override size along any dims in order to add more padding.
pad_value- Value to insert at the end of padded dims.
Returns
Uniform
Tensor def pairwise_differences(positions: phiml.math._tensors.Tensor,
max_distance: float | phiml.math._tensors.Tensor = None,
format: str | phiml.math._tensors.Tensor = 'dense',
domain: Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor] | None = None,
periodic: phiml.math._tensors.Tensor | bool = False,
method: str = 'auto',
default: float = nan,
avg_neighbors=8.0) ‑> phiml.math._tensors.Tensor-
Expand source code
def pairwise_differences(positions: Tensor, max_distance: Union[float, Tensor] = None, format: Union[str, Tensor] = 'dense', domain: Optional[Tuple[Tensor, Tensor]] = None, periodic: Union[bool, Tensor] = False, method: str = 'auto', default: float = float('nan'), avg_neighbors=8.) -> Tensor: """ Computes the distance matrix containing the pairwise position differences between each pair of points. The matrix will consist of the channel and batch dimension of `positions` and the primal dimensions plus their dual counterparts, spanning the matrix. Points that are further apart than `max_distance` (if specified) are assigned an invalid value given by `default`. The diagonal of the matrix (self-distance) consists purely of zero-vectors and is always stored explicitly. The neighbors of the positions are listed along the dual dimension(s) of the matrix, and vectors point *towards* the neighbors. This function can operate in *dense* mode or *sparse* mode, depending on `format`. If `format=='dense'` or a dense `Tensor`, all possible pair-wise distances are considered and a full-rank tensor is returned. The value of `method` is ignored in that case. Otherwise, if `format` is a sparse format identifier or sparse `Tensor`, only a subset of distances is considered, depending on `method`. In this case, the result is a sparse matrix with the same dimensionos as the dense tensor would have had. **JIT behavior:** This function can be JIT compiled with all backends. However, as the exact number of neighbors is unknown beforehand, all sparse methods rely on a variable-size buffer. PyTorch and TensorFlow allow variable shapes and behave the same way with JIT compilation as without. JAX, however, requires all tensor shapes to be known beforehand. This function will guess the required buffer size based on `avg_neighbors` and track the actually required sizes. When using `phiml.math.jit_compile`, this will automatically trigger a re-tracing when a buffer overflow is detected. User calling `jax.jit` manually must retrieve these sizes from the buffer API and implement buffer overflow handling. Args: positions: `Tensor`. Channel dimensions are interpreted as position components. Instance and spatial dimensions list nodes. max_distance: Scalar or `Tensor` specifying a max_radius for each point separately. Can contain additional batch dimensions but spatial/instance dimensions must match `positions` if present. If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors. format: Matrix format as `str` or concrete sparsity pattern as `Tensor`. Allowed strings are `'dense'', `'sparse'`, `'csr'`, `'coo'`, `'csc'`. When a `Tensor` is passed, it needs to have all instance and spatial dims as `positions` as well as corresponding dual dimensions. The distances will be evaluated at all stored entries of the `format` tensor. domain: Lower and upper corner of the bounding box. All positions must lie within this box. This must be specified to use with periodic boundaries. periodic: Which domain boundaries should be treated as periodic, i.e. particles on opposite sides are neighbors. Can be specified as a `bool` for all sides or as a vector-valued boolean `Tensor` to specify periodicity by direction. default: Value for distances greater than `max_distance`. Only for dense distance matrices. method: Neighbor search algorithm; only used if `format` is a sparse format or `Tensor`. The default, `'auto'` lets the runtime decide on the best method. Supported methods: * `'sparse'`: GPU-supported hash grid implementation with fully sparse connectivity. * `'scipy-kd'`: SciPy's [kd-tree](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html#scipy.spatial.KDTree.query_ball_point) implementation. avg_neighbors: Expected average number of neighbors. This is only relevant for hash grid searches, where it influences the default buffer sizes. Returns: Distance matrix as sparse or dense `Tensor`, depending on `format`. For each spatial/instance dimension in `positions`, the matrix also contains a dual dimension of the same name and size. The matrix also contains all batch dimensions of `positions` and the channel dimension of `positions`. Examples: >>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles'))) >>> dx = pairwise_differences(pos, format='dense', max_distance=2) >>> dx.particles[0] (x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y) """ assert isinstance(positions, Tensor), f"positions must be a Tensor but got {type(positions)}" assert channel(positions).rank == 1, f"positions must have exactly one channel dimension but got {positions.shape}" primal_dims = positions.shape.non_batch.non_channel.non_dual dual_dims = primal_dims.as_dual() if isinstance(periodic, bool): any_periodic = periodic periodic = expand(periodic, channel(positions)) else: assert isinstance(periodic, Tensor), f"periodic must be a bool or Tensor but got {periodic}" assert periodic.shape.names == channel(positions).names assert periodic.shape.labels == channel(positions).labels any_periodic = periodic.any # --- Dense --- if (isinstance(format, str) and format == 'dense') or (isinstance(format, Tensor) and get_format(format) == 'dense'): if isinstance(format, Tensor): dual_dims = dual(format) dx = unpack_dim(pack_dims(positions, non_batch(positions).non_channel.non_dual, instance('_tmp')), '_tmp', dual_dims) - positions if max_distance is not None: if any_periodic: domain_size = domain[1] - domain[0] dx_periodic = (dx + domain_size / 2) % domain_size - domain_size / 2 dx = where(periodic, dx_periodic, dx) neighbors = sum_(dx ** 2, channel) <= max_distance ** 2 dx = where(neighbors, dx, default) return dx # --- sparse with known connectivity --- if isinstance(format, Tensor): # sparse connectivity specified, no neighborhood search required assert max_distance is None, "max_distance not allowed when connectivity is specified (passing a Tensor for format)" assert is_sparse(format) if any_periodic: from .extrapolation import PERIODIC def periodic_dist(p1, p2): p_dist = PERIODIC.shortest_distance(p1-domain[0], p2-domain[0], domain[1] - domain[0]) return where(periodic, p_dist, p2 - p1) return map_pairs(periodic_dist, positions, format) return map_pairs(lambda p1, p2: p2 - p1, positions, format) # --- Sparse neighbor search --- assert max_distance is not None, "max_distance must be specified when computing distance in sparse format" max_distance = wrap(max_distance) index_dtype = INT32 backend = backend_for(positions, max_distance) batch_shape = batch(positions) & batch(max_distance) if not dual_dims.well_defined: assert dual_dims.rank == 1, f"others_dims sizes must be specified when passing more then one dimension but got {dual_dims}" dual_dims = dual_dims.with_size(primal_dims.volume) if domain is not None: assert isinstance(domain, tuple) and len(domain) == 2, f"Domain needs to be of the form (lower_corner, upper_corner) but got {domain}" domain = (wrap(domain[0]), wrap(domain[1])) if channel(positions).size > 1: assert domain[0].shape.names == channel(positions).names, f"Domain must have exactly the channel dimensions of positions but got {domain[0]}" assert domain[1].shape.names == channel(positions).names, f"Domain must have exactly the channel dimensions of positions but got {domain[1]}" domain = (domain[0]._reshaped_native([channel(domain[0])]), domain[1]._reshaped_native([channel(domain[1])])) if method == 'auto': method = 'sparse' assert method in ['sparse', 'scipy-kd'], f"Invalid neighbor search method: '{method}'" if any_periodic: assert domain is not None, f"domain must be specified when periodic=True" if method in ['scipy-kd']: warnings.warn(f"Neighbor search method '{method}' is not compatible with periodic boundaries.", RuntimeWarning, stacklevel=2) method = 'sparse' def uniform_neighbor_search(positions: Tensor, max_distance: Tensor): native_positions = positions._reshaped_native([primal_dims, channel(positions)]) native_max_dist = max_distance.native() if method == 'sparse': from ..backend._partition import find_neighbors_sparse nat_rows, nat_cols, nat_deltas = find_neighbors_sparse(native_positions, native_max_dist, domain, periodic=periodic, default=default, index_dtype=index_dtype, avg_neighbors=avg_neighbors) nat_indices = backend.stack([nat_rows, nat_cols], -1) indices = reshaped_tensor(nat_indices, [instance('pairs'), channel(vector=primal_dims.names + dual_dims.names)], convert=False) deltas = reshaped_tensor(nat_deltas, [instance('pairs'), channel(positions)], convert=False) return SparseCoordinateTensor(indices, deltas, primal_dims & dual_dims, can_contain_double_entries=False, indices_sorted=True, indices_constant=False) elif method == 'scipy-kd': from ..backend._partition import find_neighbors_scipy_kd nat_idx, nat_ptr, nat_deltas = find_neighbors_scipy_kd(native_positions, native_max_dist, avg_neighbors, index_dtype) indices = reshaped_tensor(nat_idx, [instance('pairs')], convert=False) pointers = reshaped_tensor(nat_ptr, [instance('pointers')], convert=False) deltas = reshaped_tensor(nat_deltas, [instance('pairs'), channel(positions)], convert=False) if format == 'csc': # the matrix is symmetric, so we can transpose to match desired result uncompressed, compressed = primal_dims, dual_dims else: uncompressed, compressed = dual_dims, primal_dims deltas = -deltas return CompressedSparseMatrix(indices, pointers, deltas, uncompressed, compressed, indices_constant=False) # elif method == 'semi-sparse': # from phiml.backend._partition import find_neighbors_semi_sparse # native_positions = positions.native([primal_dims, channel(positions)]) # native_max_dist = max_distance.native() # nat_rows, nat_cols, nat_vals, req_pair_count, req_max_occupancy = find_neighbors_semi_sparse(native_positions, native_max_dist, None, periodic=False, default=default) # elif method == 'matscipy': # positions.backend.numpy_call() # from phiml.backend._partition import find_neighbors_matscipy # nat_rows, nat_cols, nat_vals = find_neighbors_matscipy(native_positions, native_max_dist, None, periodic=False) # elif method == 'sklearn': # assert positions.available, f"Cannot jit-compile matscipy neighborhood search" # from phiml.backend._partition import find_neighbors_sklearn # nat_rows, nat_cols, nat_vals = find_neighbors_sklearn(native_positions, native_max_dist) else: raise ValueError(method) matrix = broadcast_op(uniform_neighbor_search, [positions, max_distance], iter_dims=batch_shape) # --- Assemble sparse matrix --- return to_format(matrix, format)Computes the distance matrix containing the pairwise position differences between each pair of points. The matrix will consist of the channel and batch dimension of
positionsand the primal dimensions plus their dual counterparts, spanning the matrix. Points that are further apart thanmax_distance(if specified) are assigned an invalid value given bydefault. The diagonal of the matrix (self-distance) consists purely of zero-vectors and is always stored explicitly. The neighbors of the positions are listed along the dual dimension(s) of the matrix, and vectors point towards the neighbors.This function can operate in dense mode or sparse mode, depending on
format. Ifformat=='dense'or a denseTensor, all possible pair-wise distances are considered and a full-rank tensor is returned. The value ofmethodis ignored in that case.Otherwise, if
formatis a sparse format identifier or sparseTensor, only a subset of distances is considered, depending onmethod. In this case, the result is a sparse matrix with the same dimensionos as the dense tensor would have had.JIT behavior: This function can be JIT compiled with all backends. However, as the exact number of neighbors is unknown beforehand, all sparse methods rely on a variable-size buffer. PyTorch and TensorFlow allow variable shapes and behave the same way with JIT compilation as without. JAX, however, requires all tensor shapes to be known beforehand. This function will guess the required buffer size based on
avg_neighborsand track the actually required sizes. When usingjit_compile(), this will automatically trigger a re-tracing when a buffer overflow is detected. User callingjax.jitmanually must retrieve these sizes from the buffer API and implement buffer overflow handling.Args
positionsTensor. Channel dimensions are interpreted as position components. Instance and spatial dimensions list nodes.max_distance- Scalar or
Tensorspecifying a max_radius for each point separately. Can contain additional batch dimensions but spatial/instance dimensions must matchpositionsif present. If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors. format- Matrix format as
stror concrete sparsity pattern asTensor. Allowed strings are'dense'','sparse','csr','coo','csc'`. When aTensoris passed, it needs to have all instance and spatial dims aspositionsas well as corresponding dual dimensions. The distances will be evaluated at all stored entries of theformattensor. domain- Lower and upper corner of the bounding box. All positions must lie within this box. This must be specified to use with periodic boundaries.
periodic- Which domain boundaries should be treated as periodic, i.e. particles on opposite sides are neighbors.
Can be specified as a
boolfor all sides or as a vector-valued booleanTensorto specify periodicity by direction. default- Value for distances greater than
max_distance. Only for dense distance matrices. method-
Neighbor search algorithm; only used if
formatis a sparse format orTensor. The default,'auto'lets the runtime decide on the best method. Supported methods:'sparse': GPU-supported hash grid implementation with fully sparse connectivity.'scipy-kd': SciPy's kd-tree implementation.
avg_neighbors- Expected average number of neighbors. This is only relevant for hash grid searches, where it influences the default buffer sizes.
Returns
Distance matrix as sparse or dense
Tensor, depending onformat. For each spatial/instance dimension inpositions, the matrix also contains a dual dimension of the same name and size. The matrix also contains all batch dimensions ofpositionsand the channel dimension ofpositions.Examples
>>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles'))) >>> dx = pairwise_differences(pos, format='dense', max_distance=2) >>> dx.particles[0] (x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y) def pairwise_distances(positions: phiml.math._tensors.Tensor,
max_distance: float | phiml.math._tensors.Tensor = None,
format: str | phiml.math._tensors.Tensor = 'dense',
domain: Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor] | None = None,
periodic: phiml.math._tensors.Tensor | bool = False,
method: str = 'auto',
default: float = nan,
avg_neighbors=8.0) ‑> phiml.math._tensors.Tensor-
Expand source code
def pairwise_differences(positions: Tensor, max_distance: Union[float, Tensor] = None, format: Union[str, Tensor] = 'dense', domain: Optional[Tuple[Tensor, Tensor]] = None, periodic: Union[bool, Tensor] = False, method: str = 'auto', default: float = float('nan'), avg_neighbors=8.) -> Tensor: """ Computes the distance matrix containing the pairwise position differences between each pair of points. The matrix will consist of the channel and batch dimension of `positions` and the primal dimensions plus their dual counterparts, spanning the matrix. Points that are further apart than `max_distance` (if specified) are assigned an invalid value given by `default`. The diagonal of the matrix (self-distance) consists purely of zero-vectors and is always stored explicitly. The neighbors of the positions are listed along the dual dimension(s) of the matrix, and vectors point *towards* the neighbors. This function can operate in *dense* mode or *sparse* mode, depending on `format`. If `format=='dense'` or a dense `Tensor`, all possible pair-wise distances are considered and a full-rank tensor is returned. The value of `method` is ignored in that case. Otherwise, if `format` is a sparse format identifier or sparse `Tensor`, only a subset of distances is considered, depending on `method`. In this case, the result is a sparse matrix with the same dimensionos as the dense tensor would have had. **JIT behavior:** This function can be JIT compiled with all backends. However, as the exact number of neighbors is unknown beforehand, all sparse methods rely on a variable-size buffer. PyTorch and TensorFlow allow variable shapes and behave the same way with JIT compilation as without. JAX, however, requires all tensor shapes to be known beforehand. This function will guess the required buffer size based on `avg_neighbors` and track the actually required sizes. When using `phiml.math.jit_compile`, this will automatically trigger a re-tracing when a buffer overflow is detected. User calling `jax.jit` manually must retrieve these sizes from the buffer API and implement buffer overflow handling. Args: positions: `Tensor`. Channel dimensions are interpreted as position components. Instance and spatial dimensions list nodes. max_distance: Scalar or `Tensor` specifying a max_radius for each point separately. Can contain additional batch dimensions but spatial/instance dimensions must match `positions` if present. If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors. format: Matrix format as `str` or concrete sparsity pattern as `Tensor`. Allowed strings are `'dense'', `'sparse'`, `'csr'`, `'coo'`, `'csc'`. When a `Tensor` is passed, it needs to have all instance and spatial dims as `positions` as well as corresponding dual dimensions. The distances will be evaluated at all stored entries of the `format` tensor. domain: Lower and upper corner of the bounding box. All positions must lie within this box. This must be specified to use with periodic boundaries. periodic: Which domain boundaries should be treated as periodic, i.e. particles on opposite sides are neighbors. Can be specified as a `bool` for all sides or as a vector-valued boolean `Tensor` to specify periodicity by direction. default: Value for distances greater than `max_distance`. Only for dense distance matrices. method: Neighbor search algorithm; only used if `format` is a sparse format or `Tensor`. The default, `'auto'` lets the runtime decide on the best method. Supported methods: * `'sparse'`: GPU-supported hash grid implementation with fully sparse connectivity. * `'scipy-kd'`: SciPy's [kd-tree](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html#scipy.spatial.KDTree.query_ball_point) implementation. avg_neighbors: Expected average number of neighbors. This is only relevant for hash grid searches, where it influences the default buffer sizes. Returns: Distance matrix as sparse or dense `Tensor`, depending on `format`. For each spatial/instance dimension in `positions`, the matrix also contains a dual dimension of the same name and size. The matrix also contains all batch dimensions of `positions` and the channel dimension of `positions`. Examples: >>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles'))) >>> dx = pairwise_differences(pos, format='dense', max_distance=2) >>> dx.particles[0] (x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y) """ assert isinstance(positions, Tensor), f"positions must be a Tensor but got {type(positions)}" assert channel(positions).rank == 1, f"positions must have exactly one channel dimension but got {positions.shape}" primal_dims = positions.shape.non_batch.non_channel.non_dual dual_dims = primal_dims.as_dual() if isinstance(periodic, bool): any_periodic = periodic periodic = expand(periodic, channel(positions)) else: assert isinstance(periodic, Tensor), f"periodic must be a bool or Tensor but got {periodic}" assert periodic.shape.names == channel(positions).names assert periodic.shape.labels == channel(positions).labels any_periodic = periodic.any # --- Dense --- if (isinstance(format, str) and format == 'dense') or (isinstance(format, Tensor) and get_format(format) == 'dense'): if isinstance(format, Tensor): dual_dims = dual(format) dx = unpack_dim(pack_dims(positions, non_batch(positions).non_channel.non_dual, instance('_tmp')), '_tmp', dual_dims) - positions if max_distance is not None: if any_periodic: domain_size = domain[1] - domain[0] dx_periodic = (dx + domain_size / 2) % domain_size - domain_size / 2 dx = where(periodic, dx_periodic, dx) neighbors = sum_(dx ** 2, channel) <= max_distance ** 2 dx = where(neighbors, dx, default) return dx # --- sparse with known connectivity --- if isinstance(format, Tensor): # sparse connectivity specified, no neighborhood search required assert max_distance is None, "max_distance not allowed when connectivity is specified (passing a Tensor for format)" assert is_sparse(format) if any_periodic: from .extrapolation import PERIODIC def periodic_dist(p1, p2): p_dist = PERIODIC.shortest_distance(p1-domain[0], p2-domain[0], domain[1] - domain[0]) return where(periodic, p_dist, p2 - p1) return map_pairs(periodic_dist, positions, format) return map_pairs(lambda p1, p2: p2 - p1, positions, format) # --- Sparse neighbor search --- assert max_distance is not None, "max_distance must be specified when computing distance in sparse format" max_distance = wrap(max_distance) index_dtype = INT32 backend = backend_for(positions, max_distance) batch_shape = batch(positions) & batch(max_distance) if not dual_dims.well_defined: assert dual_dims.rank == 1, f"others_dims sizes must be specified when passing more then one dimension but got {dual_dims}" dual_dims = dual_dims.with_size(primal_dims.volume) if domain is not None: assert isinstance(domain, tuple) and len(domain) == 2, f"Domain needs to be of the form (lower_corner, upper_corner) but got {domain}" domain = (wrap(domain[0]), wrap(domain[1])) if channel(positions).size > 1: assert domain[0].shape.names == channel(positions).names, f"Domain must have exactly the channel dimensions of positions but got {domain[0]}" assert domain[1].shape.names == channel(positions).names, f"Domain must have exactly the channel dimensions of positions but got {domain[1]}" domain = (domain[0]._reshaped_native([channel(domain[0])]), domain[1]._reshaped_native([channel(domain[1])])) if method == 'auto': method = 'sparse' assert method in ['sparse', 'scipy-kd'], f"Invalid neighbor search method: '{method}'" if any_periodic: assert domain is not None, f"domain must be specified when periodic=True" if method in ['scipy-kd']: warnings.warn(f"Neighbor search method '{method}' is not compatible with periodic boundaries.", RuntimeWarning, stacklevel=2) method = 'sparse' def uniform_neighbor_search(positions: Tensor, max_distance: Tensor): native_positions = positions._reshaped_native([primal_dims, channel(positions)]) native_max_dist = max_distance.native() if method == 'sparse': from ..backend._partition import find_neighbors_sparse nat_rows, nat_cols, nat_deltas = find_neighbors_sparse(native_positions, native_max_dist, domain, periodic=periodic, default=default, index_dtype=index_dtype, avg_neighbors=avg_neighbors) nat_indices = backend.stack([nat_rows, nat_cols], -1) indices = reshaped_tensor(nat_indices, [instance('pairs'), channel(vector=primal_dims.names + dual_dims.names)], convert=False) deltas = reshaped_tensor(nat_deltas, [instance('pairs'), channel(positions)], convert=False) return SparseCoordinateTensor(indices, deltas, primal_dims & dual_dims, can_contain_double_entries=False, indices_sorted=True, indices_constant=False) elif method == 'scipy-kd': from ..backend._partition import find_neighbors_scipy_kd nat_idx, nat_ptr, nat_deltas = find_neighbors_scipy_kd(native_positions, native_max_dist, avg_neighbors, index_dtype) indices = reshaped_tensor(nat_idx, [instance('pairs')], convert=False) pointers = reshaped_tensor(nat_ptr, [instance('pointers')], convert=False) deltas = reshaped_tensor(nat_deltas, [instance('pairs'), channel(positions)], convert=False) if format == 'csc': # the matrix is symmetric, so we can transpose to match desired result uncompressed, compressed = primal_dims, dual_dims else: uncompressed, compressed = dual_dims, primal_dims deltas = -deltas return CompressedSparseMatrix(indices, pointers, deltas, uncompressed, compressed, indices_constant=False) # elif method == 'semi-sparse': # from phiml.backend._partition import find_neighbors_semi_sparse # native_positions = positions.native([primal_dims, channel(positions)]) # native_max_dist = max_distance.native() # nat_rows, nat_cols, nat_vals, req_pair_count, req_max_occupancy = find_neighbors_semi_sparse(native_positions, native_max_dist, None, periodic=False, default=default) # elif method == 'matscipy': # positions.backend.numpy_call() # from phiml.backend._partition import find_neighbors_matscipy # nat_rows, nat_cols, nat_vals = find_neighbors_matscipy(native_positions, native_max_dist, None, periodic=False) # elif method == 'sklearn': # assert positions.available, f"Cannot jit-compile matscipy neighborhood search" # from phiml.backend._partition import find_neighbors_sklearn # nat_rows, nat_cols, nat_vals = find_neighbors_sklearn(native_positions, native_max_dist) else: raise ValueError(method) matrix = broadcast_op(uniform_neighbor_search, [positions, max_distance], iter_dims=batch_shape) # --- Assemble sparse matrix --- return to_format(matrix, format)Computes the distance matrix containing the pairwise position differences between each pair of points. The matrix will consist of the channel and batch dimension of
positionsand the primal dimensions plus their dual counterparts, spanning the matrix. Points that are further apart thanmax_distance(if specified) are assigned an invalid value given bydefault. The diagonal of the matrix (self-distance) consists purely of zero-vectors and is always stored explicitly. The neighbors of the positions are listed along the dual dimension(s) of the matrix, and vectors point towards the neighbors.This function can operate in dense mode or sparse mode, depending on
format. Ifformat=='dense'or a denseTensor, all possible pair-wise distances are considered and a full-rank tensor is returned. The value ofmethodis ignored in that case.Otherwise, if
formatis a sparse format identifier or sparseTensor, only a subset of distances is considered, depending onmethod. In this case, the result is a sparse matrix with the same dimensionos as the dense tensor would have had.JIT behavior: This function can be JIT compiled with all backends. However, as the exact number of neighbors is unknown beforehand, all sparse methods rely on a variable-size buffer. PyTorch and TensorFlow allow variable shapes and behave the same way with JIT compilation as without. JAX, however, requires all tensor shapes to be known beforehand. This function will guess the required buffer size based on
avg_neighborsand track the actually required sizes. When usingjit_compile(), this will automatically trigger a re-tracing when a buffer overflow is detected. User callingjax.jitmanually must retrieve these sizes from the buffer API and implement buffer overflow handling.Args
positionsTensor. Channel dimensions are interpreted as position components. Instance and spatial dimensions list nodes.max_distance- Scalar or
Tensorspecifying a max_radius for each point separately. Can contain additional batch dimensions but spatial/instance dimensions must matchpositionsif present. If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors. format- Matrix format as
stror concrete sparsity pattern asTensor. Allowed strings are'dense'','sparse','csr','coo','csc'`. When aTensoris passed, it needs to have all instance and spatial dims aspositionsas well as corresponding dual dimensions. The distances will be evaluated at all stored entries of theformattensor. domain- Lower and upper corner of the bounding box. All positions must lie within this box. This must be specified to use with periodic boundaries.
periodic- Which domain boundaries should be treated as periodic, i.e. particles on opposite sides are neighbors.
Can be specified as a
boolfor all sides or as a vector-valued booleanTensorto specify periodicity by direction. default- Value for distances greater than
max_distance. Only for dense distance matrices. method-
Neighbor search algorithm; only used if
formatis a sparse format orTensor. The default,'auto'lets the runtime decide on the best method. Supported methods:'sparse': GPU-supported hash grid implementation with fully sparse connectivity.'scipy-kd': SciPy's kd-tree implementation.
avg_neighbors- Expected average number of neighbors. This is only relevant for hash grid searches, where it influences the default buffer sizes.
Returns
Distance matrix as sparse or dense
Tensor, depending onformat. For each spatial/instance dimension inpositions, the matrix also contains a dual dimension of the same name and size. The matrix also contains all batch dimensions ofpositionsand the channel dimension ofpositions.Examples
>>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles'))) >>> dx = pairwise_differences(pos, format='dense', max_distance=2) >>> dx.particles[0] (x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y) def perf_counter(wait_for_tensor, *wait_for_tensors: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor-
Expand source code
def perf_counter(wait_for_tensor, *wait_for_tensors: Tensor) -> Tensor: """ Get the time (`time.perf_counter()`) at which all `wait_for_tensors` are computed. If all tensors are already available, returns the current `time.perf_counter()`. Args: wait_for_tensor: `Tensor` that need to be computed before the time is measured. *wait_for_tensors: Additional tensors that need to be computed before the time is measured. Returns: Time at which all `wait_for_tensors` are ready as a scalar `Tensor`. """ assert not _TRACING_LINEAR, f"Cannot use perf_counter inside a function decorated with @jit_compile_linear" if not _TRACING_JIT: return wrap(time.perf_counter()) else: # jit backend = _TRACING_JIT[0]._tracing_in_key.backend natives, _, _ = disassemble_tensors([wait_for_tensor, *wait_for_tensors], expand=False) natives = [n for n in natives if backend.is_tensor(n, only_native=True)] assert natives, f"in jit mode, perf_counter must be given at least one traced tensor, as the current time is evaluated after all tensors are computed." def perf_counter(*_wait_for_natives): return np.asarray(time.perf_counter()) return wrap(backend.numpy_call(perf_counter, (), FLOAT64, *natives))Get the time (
time.perf_counter()) at which allwait_for_tensorsare computed. If all tensors are already available, returns the currenttime.perf_counter().Args
wait_for_tensorTensorthat need to be computed before the time is measured.*wait_for_tensors- Additional tensors that need to be computed before the time is measured.
Returns
Time at which all
wait_for_tensorsare ready as a scalarTensor. def precision(floating_point_bits: int)-
Expand source code
@contextmanager def precision(floating_point_bits: int): """ Sets the floating point precision for the local context. Usage: `with precision(p):` This overrides the global setting, see `set_global_precision()`. Args: floating_point_bits: 16 for half, 32 for single, 64 for double """ _PRECISION.append(floating_point_bits) try: yield None finally: _PRECISION.pop(-1)Sets the floating point precision for the local context.
Usage:
with precision(p):This overrides the global setting, see
set_global_precision().Args
floating_point_bits- 16 for half, 32 for single, 64 for double
def primal(obj) ‑> phiml.math._shape.Shape-
Expand source code
def primal(obj) -> Shape: """ Returns the instance, spatial and channel dimensions of an object. Args: obj: `Shape` or object with a valid `shape` property. Returns: `Shape` """ return shape(obj).primal def print(obj: phiml.math._tensors.Tensor | PhiTreeNode | numbers.Number | tuple | list | None = None,
name: str = '')-
Expand source code
def print_(obj: Union[Tensor, PhiTreeNode, Number, tuple, list, None] = None, name: str = ""): """ Print a tensor with no more than two spatial dimensions, slicing it along all batch and channel dimensions. Unlike NumPy's array printing, the dimensions are sorted. Elements along the alphabetically first dimension is printed to the right, the second dimension upward. Typically, this means x right, y up. Args: obj: tensor-like name: name of the tensor Returns: """ def variables(obj) -> dict: if hasattr(obj, '__variable_attrs__') or hasattr(obj, '__value_attrs__'): return {f".{a}": getattr(obj, a) for a in variable_attributes(obj)} elif isinstance(obj, (tuple, list)): return {f"[{i}]": item for i, item in enumerate(obj)} elif isinstance(obj, dict): return obj else: raise ValueError(f"Not PhiTreeNode: {type(obj)}") if name: print(" " * 12 + name) if obj is None: print("None") elif isinstance(obj, Tensor): print(f"{obj:full}") elif isinstance(obj, PhiTreeNode): for n, val in variables(obj).items(): print_(val, name + n) else: print(f"{wrap(obj):full}")Print a tensor with no more than two spatial dimensions, slicing it along all batch and channel dimensions.
Unlike NumPy's array printing, the dimensions are sorted. Elements along the alphabetically first dimension is printed to the right, the second dimension upward. Typically, this means x right, y up.
Args
obj- tensor-like
name- name of the tensor
Returns:
def print_gradient(value: phiml.math._tensors.Tensor, name='', detailed=False) ‑> phiml.math._tensors.Tensor-
Expand source code
def print_gradient(value: Tensor, name="", detailed=False) -> Tensor: """ Prints the gradient vector of `value` when computed. The gradient at `value` is the vector-Jacobian product of all operations between the output of this function and the loss value. The gradient is not printed in jit mode, see `jit_compile()`. Example: ```python def f(x): x = math.print_gradient(x, 'dx') return math.l1_loss(x) math.jacobian(f)(math.ones(x=6)) ``` Args: value: `Tensor` for which the gradient may be computed later. name: (Optional) Name to print along with the gradient values detailed: If `False`, prints a short summary of the gradient tensor. Returns: `identity(value)` which when differentiated, prints the gradient vector. """ def print_grad(params: dict, _y, dx): param_name, x = next(iter(params.items())) if math.all_available(x, dx): if detailed: math.print_(dx, name=name) else: print(f"{name}: \t{dx}") else: print(f"Cannot print gradient for {param_name}, data not available.") return {param_name: dx} identity = custom_gradient(lambda x: x, print_grad) return identity(value)Prints the gradient vector of
valuewhen computed. The gradient atvalueis the vector-Jacobian product of all operations between the output of this function and the loss value.The gradient is not printed in jit mode, see
jit_compile().Example
def f(x): x = math.print_gradient(x, 'dx') return math.l1_loss(x) math.jacobian(f)(math.ones(x=6))Args
valueTensorfor which the gradient may be computed later.name- (Optional) Name to print along with the gradient values
detailed- If
False, prints a short summary of the gradient tensor.
Returns
identity()(value)which when differentiated, prints the gradient vector. def prod(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>) ‑> phiml.math._tensors.Tensor-
Expand source code
def prod(value, dim: DimFilter = non_batch) -> Tensor: """ Multiplies `values` along the specified dimensions. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_prod, value, dim, require_all_dims_present=True)Multiplies
valuesalong the specified dimensions.Args
valueTensororlist/tupleof Tensors.dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
Returns
Tensorwithout the reduced dimensions. def quantile(value: phiml.math._tensors.Tensor,
quantiles: float | phiml.math._tensors.Tensor | tuple | list,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>)-
Expand source code
def quantile(value: Tensor, quantiles: Union[float, tuple, list, Tensor], dim: DimFilter = non_batch): """ Compute the q-th quantile of `value` along `dim` for each q in `quantiles`. Implementations: * NumPy: [`quantile`](https://numpy.org/doc/stable/reference/generated/numpy.quantile.html) * PyTorch: [`quantile`](https://pytorch.org/docs/stable/generated/torch.quantile.html#torch.quantile) * TensorFlow: [`tfp.stats.percentile`](https://www.tensorflow.org/probability/api_docs/python/tfp/stats/percentile) * Jax: [`quantile`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.quantile.html) Args: value: `Tensor` quantiles: Single quantile or tensor of quantiles to compute. Must be of type `float`, `tuple`, `list` or `Tensor`. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to reduce the sequence of Tensors Returns: `Tensor` with dimensions of `quantiles` and non-reduced dimensions of `value`. """ quantiles = wrap(quantiles, default_list_dim=instance('quantiles')) shared_dims = value.shape.only(quantiles.shape) broadcast = broadcast_dims(value, quantiles) | set(shared_dims.names) def uniform_quantile(value: Tensor, q: Tensor): dims = value.shape.only(dim) backend = value.backend if dims.volume == 0: return zeros((value.shape-dims) & q.shape, dtype=value.dtype) + float('nan') native_values = value._reshaped_native([*(value.shape-dims), dims]) native_quantiles = q._reshaped_native([q.shape]) native_result = backend.quantile(native_values, native_quantiles) if native_result is not NotImplemented: return reshaped_tensor(native_result, [q.shape, *value.shape.without(dims)]) # --- fallback: custom quantile implementation --- v_sorted = sort(value, dims) q_idx = q * (v_sorted.shape.get_size(dims) - 1) q_idx = expand(q_idx, channel(vector=dims)) result = grid_sample(v_sorted, q_idx, e_.ZERO_GRADIENT) return result return broadcast_op(uniform_quantile, [value, quantiles], broadcast)Compute the q-th quantile of
valuealongdimfor each q inquantiles.Implementations:
- NumPy:
quantile - PyTorch:
quantile - TensorFlow:
tfp.stats.percentile - Jax:
quantile
Args
valueTensorquantiles- Single quantile or tensor of quantiles to compute.
Must be of type
float,tuple,listorTensor. dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to reduce the sequence of Tensors
Returns
Tensorwith dimensions ofquantilesand non-reduced dimensions ofvalue. - NumPy:
def radians_to_degrees(rad: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def radians_to_degrees(rad: TensorOrTree) -> TensorOrTree: """ Convert degrees to radians. """ return tree_map(lambda x: x * (180 / 3.14159265358979323846), rad)Convert degrees to radians.
def rand(*shape: phiml.math._shape.Shape,
low: float | phiml.math._tensors.Tensor = 0,
high: float | phiml.math._tensors.Tensor = 1,
dtype: phiml.backend._dtype.DType | tuple | type = builtins.float) ‑> phiml.math._tensors.Tensor-
Expand source code
def random_uniform(*shape: Shape, low: Union[Tensor, float] = 0, high: Union[Tensor, float] = 1, dtype: Union[DType, tuple, type] = float) -> Tensor: """ Creates a `Tensor` with the specified shape, filled with random values sampled from a uniform distribution. Args: *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order. dtype: (optional) `DType` or `(kind, bits)`. The dtype kind must be one of `float`, `int`, `complex`. If not specified, a `float` tensor with the current default precision is created, see `get_precision()`. low: Minimum value, included. high: Maximum value, excluded. Returns: `Tensor` """ if get_shape(low).volume == 1 and get_shape(high).volume == 1: low = low.native() if isinstance(low, Tensor) else low high = high.native() if isinstance(high, Tensor) else high def uniform_random_uniform(shape): backend = choose_backend(low, high, *shape.sizes, prefer_default=True) native = backend.random_uniform(shape.sizes, low, high, DType.as_dtype(dtype)) return Dense(native, shape.names, shape, backend) return _initialize(uniform_random_uniform, shape, dtype, random_uniform, {'low': low, 'high': high}) else: def uniform_random_uniform(shape): backend = choose_backend(*shape.sizes, prefer_default=True) native = backend.random_uniform(shape.sizes, 0, 1, DType.as_dtype(dtype)) return Dense(native, shape.names, shape, backend) return _initialize(uniform_random_uniform, shape, dtype, random_uniform, {'low': 0, 'high': 1}) * (high - low) + lowCreates a
Tensorwith the specified shape, filled with random values sampled from a uniform distribution.Args
*shape- This (possibly empty) sequence of
Shapes is concatenated, preserving the order. dtype- (optional)
DTypeor(kind, bits). The dtype kind must be one offloat,int,complex. If not specified, afloattensor with the current default precision is created, seeget_precision(). low- Minimum value, included.
high- Maximum value, excluded.
Returns
def randn(*shape: phiml.math._shape.Shape,
dtype: phiml.backend._dtype.DType | tuple | type = builtins.float) ‑> phiml.math._tensors.Tensor-
Expand source code
def random_normal(*shape: Shape, dtype: Union[DType, tuple, type] = float) -> Tensor: """ Creates a `Tensor` with the specified shape, filled with random values sampled from a normal / Gaussian distribution. Implementations: * NumPy: [`numpy.random.standard_normal`](https://numpy.org/doc/stable/reference/random/generated/numpy.random.standard_normal.html) * PyTorch: [`torch.randn`](https://pytorch.org/docs/stable/generated/torch.randn.html) * TensorFlow: [`tf.random.normal`](https://www.tensorflow.org/api_docs/python/tf/random/normal) * Jax: [`jax.random.normal`](https://jax.readthedocs.io/en/latest/_autosummary/jax.random.normal.html) Args: *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order. dtype: (optional) floating point `DType`. If `None`, a float tensor with the current default precision is created, see `get_precision()`. Returns: `Tensor` """ def uniform_random_normal(shape): backend = choose_backend(*shape.sizes, prefer_default=True) native = backend.random_normal(shape.sizes, DType.as_dtype(dtype)) return Dense(native, shape.names, shape, backend) return _initialize(uniform_random_normal, shape, dtype, random_normal, {})Creates a
Tensorwith the specified shape, filled with random values sampled from a normal / Gaussian distribution.Implementations:
- NumPy:
numpy.random.standard_normal - PyTorch:
torch.randn - TensorFlow:
tf.random.normal - Jax:
jax.random.normal
Args
*shape- This (possibly empty) sequence of
Shapes is concatenated, preserving the order. dtype- (optional) floating point
DType. IfNone, a float tensor with the current default precision is created, seeget_precision().
Returns
- NumPy:
def random_normal(*shape: phiml.math._shape.Shape,
dtype: phiml.backend._dtype.DType | tuple | type = builtins.float) ‑> phiml.math._tensors.Tensor-
Expand source code
def random_normal(*shape: Shape, dtype: Union[DType, tuple, type] = float) -> Tensor: """ Creates a `Tensor` with the specified shape, filled with random values sampled from a normal / Gaussian distribution. Implementations: * NumPy: [`numpy.random.standard_normal`](https://numpy.org/doc/stable/reference/random/generated/numpy.random.standard_normal.html) * PyTorch: [`torch.randn`](https://pytorch.org/docs/stable/generated/torch.randn.html) * TensorFlow: [`tf.random.normal`](https://www.tensorflow.org/api_docs/python/tf/random/normal) * Jax: [`jax.random.normal`](https://jax.readthedocs.io/en/latest/_autosummary/jax.random.normal.html) Args: *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order. dtype: (optional) floating point `DType`. If `None`, a float tensor with the current default precision is created, see `get_precision()`. Returns: `Tensor` """ def uniform_random_normal(shape): backend = choose_backend(*shape.sizes, prefer_default=True) native = backend.random_normal(shape.sizes, DType.as_dtype(dtype)) return Dense(native, shape.names, shape, backend) return _initialize(uniform_random_normal, shape, dtype, random_normal, {})Creates a
Tensorwith the specified shape, filled with random values sampled from a normal / Gaussian distribution.Implementations:
- NumPy:
numpy.random.standard_normal - PyTorch:
torch.randn - TensorFlow:
tf.random.normal - Jax:
jax.random.normal
Args
*shape- This (possibly empty) sequence of
Shapes is concatenated, preserving the order. dtype- (optional) floating point
DType. IfNone, a float tensor with the current default precision is created, seeget_precision().
Returns
- NumPy:
def random_uniform(*shape: phiml.math._shape.Shape,
low: float | phiml.math._tensors.Tensor = 0,
high: float | phiml.math._tensors.Tensor = 1,
dtype: phiml.backend._dtype.DType | tuple | type = builtins.float) ‑> phiml.math._tensors.Tensor-
Expand source code
def random_uniform(*shape: Shape, low: Union[Tensor, float] = 0, high: Union[Tensor, float] = 1, dtype: Union[DType, tuple, type] = float) -> Tensor: """ Creates a `Tensor` with the specified shape, filled with random values sampled from a uniform distribution. Args: *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order. dtype: (optional) `DType` or `(kind, bits)`. The dtype kind must be one of `float`, `int`, `complex`. If not specified, a `float` tensor with the current default precision is created, see `get_precision()`. low: Minimum value, included. high: Maximum value, excluded. Returns: `Tensor` """ if get_shape(low).volume == 1 and get_shape(high).volume == 1: low = low.native() if isinstance(low, Tensor) else low high = high.native() if isinstance(high, Tensor) else high def uniform_random_uniform(shape): backend = choose_backend(low, high, *shape.sizes, prefer_default=True) native = backend.random_uniform(shape.sizes, low, high, DType.as_dtype(dtype)) return Dense(native, shape.names, shape, backend) return _initialize(uniform_random_uniform, shape, dtype, random_uniform, {'low': low, 'high': high}) else: def uniform_random_uniform(shape): backend = choose_backend(*shape.sizes, prefer_default=True) native = backend.random_uniform(shape.sizes, 0, 1, DType.as_dtype(dtype)) return Dense(native, shape.names, shape, backend) return _initialize(uniform_random_uniform, shape, dtype, random_uniform, {'low': 0, 'high': 1}) * (high - low) + lowCreates a
Tensorwith the specified shape, filled with random values sampled from a uniform distribution.Args
*shape- This (possibly empty) sequence of
Shapes is concatenated, preserving the order. dtype- (optional)
DTypeor(kind, bits). The dtype kind must be one offloat,int,complex. If not specified, afloattensor with the current default precision is created, seeget_precision(). low- Minimum value, included.
high- Maximum value, excluded.
Returns
def range(dim: phiml.math._shape.Shape,
start_or_stop: int | None = None,
stop: int | None = None,
step=1,
backend=None) ‑> phiml.math._tensors.Tensor[int]-
Expand source code
def arange(dim: Shape, start_or_stop: Union[int, None] = None, stop: Union[int, None] = None, step=1, backend=None) -> Tensor[int]: """ Returns evenly spaced values between `start` and `stop`. If only one limit is given, `0` is used for the start. See Also: `range_tensor()`, `linspace()`, `meshgrid()`. Args: dim: Dimension name and type as `Shape` object. The `size` of `dim` is interpreted as `stop` unless `start_or_stop` is specified. start_or_stop: (Optional) `int`. Interpreted as `start` if `stop` is specified as well. Otherwise this is `stop`. stop: (Optional) `int`. `stop` value. step: Distance between values. backend: Backend to use for creating the tensor. If unspecified, uses the current default. Returns: `Tensor` """ assert dim.primal.rank <= 1, f"dim can have at most one primal dimension" if dim.primal.rank == 0: assert dim.rank == 1, f"When no primal dimension is specified, dim must have rank 1" range_dim = dim else: range_dim = dim.primal if start_or_stop is None: assert stop is None, "start_or_stop must be specified when stop is given." assert dim.well_defined, "When start_or_stop is not specified, all sizes of dim must be specified." start, stop = 0, (dim.primal.size if dim.primal else dim.size) elif stop is None: start, stop = 0, start_or_stop else: start = start_or_stop start, stop, step = wrap(start), wrap(stop), wrap(step) assert range_dim not in start and range_dim not in stop and range_dim not in step, f"range dim {range_dim} must not be present in either start, stop, or step" def batched_range(dims: Shape, start: Tensor, stop: Tensor, step: Tensor): batches = (dims - range_dim) & start.shape & stop.shape & step.shape if batches: b0 = batches.non_uniform[0] if batches.is_non_uniform else batches ranges = [batched_range(dims.after_gather(i), start[i], stop[i], step[i]) for i in b0.meshgrid()] return stack(ranges, b0) b = backend or preferred_backend_for(start, stop) native = b.range(start.native(), stop.native(), step.native(), INT32) return Dense(native, range_dim.names, range_dim.with_size(len(native)), b) return batched_range(dim, start, stop, step)Returns evenly spaced values between
startandstop. If only one limit is given,0is used for the start.See Also:
range_tensor(),linspace(),meshgrid().Args
dim- Dimension name and type as
Shapeobject. Thesizeofdimis interpreted asstopunlessstart_or_stopis specified. start_or_stop- (Optional)
int. Interpreted asstartifstopis specified as well. Otherwise this isstop. stop- (Optional)
int.stopvalue. step- Distance between values.
backend- Backend to use for creating the tensor. If unspecified, uses the current default.
Returns
def range_tensor(*shape: phiml.math._shape.Shape) ‑> phiml.math._tensors.Tensor[int]-
Expand source code
def range_tensor(*shape: Shape) -> Tensor[int]: """ Returns a `Tensor` with given `shape` containing the linear indices of each element. For 1D tensors, this equivalent to `arange()` with `step=1`. See Also: `arange()`, `meshgrid()`. Args: shape: Tensor shape. Returns: `Tensor` """ shape = concat_shapes_(*shape) data = arange(spatial('range'), 0, shape.volume) return unpack_dim(data, 'range', shape) def ravel_index(index: phiml.math._tensors.Tensor,
resolution: phiml.math._shape.Shape,
dim=<function channel>,
mode='undefined') ‑> phiml.math._tensors.Tensor-
Expand source code
def ravel_index(index: Tensor, resolution: Shape, dim=channel, mode='undefined') -> Tensor: """ Computes a scalar index from a vector index. Args: index: `Tensor` with one channel dim. resolution: `Shape` mode: `'undefined'`, `'periodic'`, `'clamp'` or an `int` to use for all invalid indices. Returns: `Tensor` """ index_dim = index.shape.only(dim) assert index_dim.rank == 1, f"index must have exaclty one index dim but got {index_dim}" nat_idx = index._reshaped_native([*(index.shape-index_dim), index_dim]) if index_dim.labels[0]: sizes = [resolution.get_size(dim) for dim in index_dim.labels[0]] else: assert resolution.rank == index_dim.size sizes = resolution.sizes nat_result = index.backend.ravel_multi_index(nat_idx, sizes, mode) return reshaped_tensor(nat_result, [index.shape - index_dim], convert=False) def real(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def real(x: TensorOrTree) -> TensorOrTree: """ See Also: `imag()`, `conjugate()`. Args: x: `Tensor` or `phiml.math.magic.PhiTreeNode` or native tensor. Returns: Real component of `x`. """ return _backend_op1(x, Backend.real, real)See Also:
imag(),conjugate().Args
xTensororPhiTreeNodeor native tensor.
Returns
Real component of
x. def rename_dims(value: ~PhiTreeNodeType,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
names: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
**kwargs) ‑> ~PhiTreeNodeType-
Expand source code
def rename_dims(value: PhiTreeNodeType, dims: DimFilter, names: DimFilter, **kwargs) -> PhiTreeNodeType: """ Change the name and optionally the type of some dims of `value`. Dimensions that are not present on value will be ignored. The corresponding new dims given by `names` will not be added. Args: value: `Shape` or `Tensor` or `Shapable`. dims: Existing dims of `value` as comma-separated `str`, `tuple`, `list`, `Shape` or filter function. names: Either * Sequence of names matching `dims` as `tuple`, `list` or `str`. This replaces only the dimension names but leaves the types untouched. * `Shape` matching `dims` to replace names and types. * Dimension type function to replace only types. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dims to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments. Returns: Same type as `value`. """ if isinstance(value, SHAPE_TYPES): return replace_dims(value, dims, names) elif isinstance(value, (Number, bool)): return value if DEBUG_CHECKS: assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be a Shape or Shapable but got {type(value).__name__}" old_dims, new_dims = _shape_replace(shape(value), dims, names) if not new_dims: return value if new_dims.names == old_dims.names and new_dims == old_dims: return value # --- First try __replace_dims__ --- if hasattr(value, '__replace_dims__'): result = value.__replace_dims__(old_dims.names, new_dims, **kwargs) if result is not NotImplemented: return result # --- Next try Tree Node --- if isinstance(value, PhiTreeNode): return tree_map(rename_dims, value, all_attributes, treat_layout_as_leaf=True, dims=old_dims, names=new_dims, **kwargs) # --- Fallback: unstack and stack --- if shape(value).only(old_dims).volume > 8: warnings.warn(f"rename_dims() default implementation is slow on large dims ({old_dims}). Please implement __replace_dims__() for {type(value).__name__} as defined in phiml.math.magic", RuntimeWarning, stacklevel=2) for old_name, new_dim in zip(old_dims.names, new_dims): value = stack(unstack(value, old_name), new_dim, **kwargs) return valueChange the name and optionally the type of some dims of
value.Dimensions that are not present on value will be ignored. The corresponding new dims given by
nameswill not be added.Args
valueShapeorTensororShapable.dims- Existing dims of
valueas comma-separatedstr,tuple,list,Shapeor filter function. names-
Either
- Sequence of names matching
dimsastuple,listorstr. This replaces only the dimension names but leaves the types untouched. Shapematchingdimsto replace names and types.- Dimension type function to replace only types.
- Sequence of names matching
**kwargs- Additional keyword arguments required by specific implementations.
Adding spatial dims to fields requires the
bounds: Boxargument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments.
Returns
Same type as
value. def replace(obj: ~PhiTreeNodeType, **updates) ‑> ~PhiTreeNodeType-
Expand source code
def replace(obj: PhiTreeNodeType, **updates) -> PhiTreeNodeType: """ Creates a copy of the given `phiml.math.magic.PhiTreeNode` with updated values as specified in `updates`. If `obj` overrides `__with_attrs__`, the copy will be created via that specific implementation. Otherwise, the `copy` module and `setattr` will be used. Args: obj: `phiml.math.magic.PhiTreeNode` **updates: Values to be replaced. Returns: Copy of `obj` with updated values. """ if isinstance(obj, (Number, bool)): return obj elif hasattr(obj, '__with_attrs__'): result = obj.__with_attrs__(**updates) if result is not NotImplemented: return result if dataclasses.is_dataclass(obj): return dataclasses.replace(obj, **updates) else: cpy = copy.copy(obj) for attr, value in updates.items(): setattr(cpy, attr, value) return cpyCreates a copy of the given
PhiTreeNodewith updated values as specified inupdates.If
objoverrides__with_attrs__, the copy will be created via that specific implementation. Otherwise, thecopy()module andsetattrwill be used.Args
objPhiTreeNode**updates- Values to be replaced.
Returns
Copy of
objwith updated values. def replace_dims(value: ~PhiTreeNodeType,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
names: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
**kwargs) ‑> ~PhiTreeNodeType-
Expand source code
def rename_dims(value: PhiTreeNodeType, dims: DimFilter, names: DimFilter, **kwargs) -> PhiTreeNodeType: """ Change the name and optionally the type of some dims of `value`. Dimensions that are not present on value will be ignored. The corresponding new dims given by `names` will not be added. Args: value: `Shape` or `Tensor` or `Shapable`. dims: Existing dims of `value` as comma-separated `str`, `tuple`, `list`, `Shape` or filter function. names: Either * Sequence of names matching `dims` as `tuple`, `list` or `str`. This replaces only the dimension names but leaves the types untouched. * `Shape` matching `dims` to replace names and types. * Dimension type function to replace only types. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dims to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments. Returns: Same type as `value`. """ if isinstance(value, SHAPE_TYPES): return replace_dims(value, dims, names) elif isinstance(value, (Number, bool)): return value if DEBUG_CHECKS: assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be a Shape or Shapable but got {type(value).__name__}" old_dims, new_dims = _shape_replace(shape(value), dims, names) if not new_dims: return value if new_dims.names == old_dims.names and new_dims == old_dims: return value # --- First try __replace_dims__ --- if hasattr(value, '__replace_dims__'): result = value.__replace_dims__(old_dims.names, new_dims, **kwargs) if result is not NotImplemented: return result # --- Next try Tree Node --- if isinstance(value, PhiTreeNode): return tree_map(rename_dims, value, all_attributes, treat_layout_as_leaf=True, dims=old_dims, names=new_dims, **kwargs) # --- Fallback: unstack and stack --- if shape(value).only(old_dims).volume > 8: warnings.warn(f"rename_dims() default implementation is slow on large dims ({old_dims}). Please implement __replace_dims__() for {type(value).__name__} as defined in phiml.math.magic", RuntimeWarning, stacklevel=2) for old_name, new_dim in zip(old_dims.names, new_dims): value = stack(unstack(value, old_name), new_dim, **kwargs) return valueChange the name and optionally the type of some dims of
value.Dimensions that are not present on value will be ignored. The corresponding new dims given by
nameswill not be added.Args
valueShapeorTensororShapable.dims- Existing dims of
valueas comma-separatedstr,tuple,list,Shapeor filter function. names-
Either
- Sequence of names matching
dimsastuple,listorstr. This replaces only the dimension names but leaves the types untouched. Shapematchingdimsto replace names and types.- Dimension type function to replace only types.
- Sequence of names matching
**kwargs- Additional keyword arguments required by specific implementations.
Adding spatial dims to fields requires the
bounds: Boxargument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments.
Returns
Same type as
value. def reshaped_native(value: phiml.math._tensors.Tensor,
groups: tuple | list,
force_expand: Any = True,
to_numpy=False)-
Expand source code
def reshaped_native(value: Tensor, groups: Union[tuple, list], force_expand: Any = True, to_numpy=False): """ Returns a native representation of `value` where dimensions are laid out according to `groups`. See Also: `native()`, `pack_dims()`. Args: value: `Tensor` groups: `tuple` or `list` of dimensions to be packed into one native dimension. Each entry must be one of the following: * `str`: the name of one dimension that is present on `value`. * `Shape`: Dimensions to be packed. If `force_expand`, missing dimensions are first added, otherwise they are ignored. * Filter function: Packs all dimensions of this type that are present on `value`. * Ellipsis `...`: Packs all remaining dimensions into this slot. Can only be passed once. * `None` or `()`: Adds a singleton dimension. Collections of or comma-separated dims may also be used but only if all dims are present on `value`. force_expand: `bool` or sequence of dimensions. If `True`, repeats the tensor along missing dimensions. If `False`, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions. to_numpy: If True, converts the native tensor to a `numpy.ndarray`. Returns: Native tensor with dimensions matching `groups`. """ warnings.warn("phiml.math.reshaped_native() is deprecated. Use Tensor.native() instead.", DeprecationWarning, stacklevel=2) assert isinstance(value, Tensor), f"value must be a Tensor but got {value} {type(value)}" assert not value._is_tracer, f"Failed accessing native values because tensor {value.shape} is a tracer" return value.numpy(groups, force_expand=force_expand) if to_numpy else value.native(groups, force_expand=force_expand)Returns a native representation of
valuewhere dimensions are laid out according togroups.See Also:
native(),pack_dims().Args
valueTensorgroups-
tupleorlistof dimensions to be packed into one native dimension. Each entry must be one of the following:str: the name of one dimension that is present onvalue.Shape: Dimensions to be packed. Ifforce_expand, missing dimensions are first added, otherwise they are ignored.- Filter function: Packs all dimensions of this type that are present on
value. - Ellipsis
…: Packs all remaining dimensions into this slot. Can only be passed once. Noneor(): Adds a singleton dimension.
Collections of or comma-separated dims may also be used but only if all dims are present on
value. force_expandboolor sequence of dimensions. IfTrue, repeats the tensor along missing dimensions. IfFalse, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.to_numpy- If True, converts the native tensor to a
numpy.ndarray.
Returns
Native tensor with dimensions matching
groups. def reshaped_numpy(value: phiml.math._tensors.Tensor,
groups: tuple | list,
force_expand: Any = True) ‑> numpy.ndarray-
Expand source code
def reshaped_numpy(value: Tensor, groups: Union[tuple, list], force_expand: Any = True) -> np.ndarray: """ Returns the NumPy representation of `value` where dimensions are laid out according to `groups`. See Also: `numpy()`, `reshaped_native()`, `pack_dims()`, `reshaped_tensor()`. Args: value: `Tensor` groups: Sequence of dimension names as `str` or groups of dimensions to be packed_dim as `Shape`. force_expand: `bool` or sequence of dimensions. If `True`, repeats the tensor along missing dimensions. If `False`, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions. Returns: NumPy `ndarray` with dimensions matching `groups`. """ warnings.warn("phiml.math.reshaped_numpy() is deprecated. Use Tensor.numpy() instead.", DeprecationWarning, stacklevel=2) return reshaped_native(value, groups, force_expand=force_expand, to_numpy=True)Returns the NumPy representation of
valuewhere dimensions are laid out according togroups.See Also:
numpy_(),reshaped_native(),pack_dims(),reshaped_tensor().Args
valueTensorgroups- Sequence of dimension names as
stror groups of dimensions to be packed_dim asShape. force_expandboolor sequence of dimensions. IfTrue, repeats the tensor along missing dimensions. IfFalse, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.
Returns
NumPy
ndarraywith dimensions matchinggroups. def reshaped_tensor(value: Any,
groups: Sequence[phiml.math._shape.Shape | str],
check_sizes=False,
convert=True)-
Expand source code
def reshaped_tensor(value: Any, groups: Sequence[Union[Shape, str]], check_sizes=False, convert=True): """ Creates a `Tensor` from a native tensor or tensor-like whereby the dimensions of `value` are split according to `groups`. See Also: `phiml.math.tensor()`, `reshaped_native()`, `unpack_dim()`. Args: value: Native tensor or tensor-like. groups: Sequence of dimension groups to be packed_dim as `tuple[Shape]` or `list[Shape]`. check_sizes: If True, group sizes must match the sizes of `value` exactly. Otherwise, allows singleton dimensions. convert: If True, converts the data to the native format of the current default backend. If False, wraps the data in a `Tensor` but keeps the given data reference if possible. Returns: `Tensor` with all dimensions from `groups` """ v_shape = choose_backend(value).staticshape(value) groups = [g if isinstance(g, Shape) else (EMPTY_SHAPE if not g else auto(g)) for g in groups] dims = [batch(f'group{i}') if group.rank != 1 else (group if check_sizes else group.with_size(v_shape[i])) for i, group in enumerate(groups)] try: value = tensor(value, *dims, convert=convert) except IncompatibleShapes: raise IncompatibleShapes(f"Cannot reshape native tensor {type(value)} with sizes {value.shape} given groups {groups}") for i, group in enumerate(groups): if group.rank != 1: from ._magic_ops import unpack_dim if value.shape.get_size(f'group{i}') == group.volume: value = unpack_dim(value, f'group{i}', group) elif check_sizes: raise AssertionError(f"Group {group} does not match dimension {i} of value {value.shape}") else: value = unpack_dim(value, f'group{i}', group) return valueCreates a
Tensorfrom a native tensor or tensor-like whereby the dimensions ofvalueare split according togroups.See Also:
tensor(),reshaped_native(),unpack_dim().Args
value- Native tensor or tensor-like.
groups- Sequence of dimension groups to be packed_dim as
tuple[Shape]orlist[Shape]. check_sizes- If True, group sizes must match the sizes of
valueexactly. Otherwise, allows singleton dimensions. convert- If True, converts the data to the native format of the current default backend.
If False, wraps the data in a
Tensorbut keeps the given data reference if possible.
Returns
Tensorwith all dimensions fromgroups def rotate_vector(vector: phiml.math._tensors.Tensor,
angle: float | phiml.math._tensors.Tensor | None,
invert=False,
dim='vector') ‑> phiml.math._tensors.Tensor-
Expand source code
def rotate_vector(vector: math.Tensor, angle: Optional[Union[float, math.Tensor]], invert=False, dim='vector') -> Tensor: """ Rotates `vector` around the origin. Args: vector: n-dimensional vector with exactly one channel dimension angle: Euler angle(s) or rotation matrix. `None` is interpreted as no rotation. invert: Whether to apply the inverse rotation. Returns: Rotated vector as `Tensor` """ warnings.warn("phiml.math.rotate_vector() is deprecated. Use PhiFlow's geometry functions instead.", DeprecationWarning) assert 'vector' in vector.shape, f"vector must have exactly a channel dimension named 'vector'" if angle is None: return vector matrix = rotation_matrix(angle, matrix_dim=channel(vector)) if invert: matrix = rename_dims(matrix, '~vector,vector', matrix.shape['vector'] + matrix.shape['~vector']) assert matrix.vector.dual.size == vector.vector.size, f"Rotation matrix from {shape(angle)} is {matrix.vector.dual.size}D but vector {vector.shape} is {vector.vector.size}D." dim = vector.shape.only(dim) return math.dot(matrix, dim.as_dual(), vector, dim)Rotates
vectoraround the origin.Args
vector- n-dimensional vector with exactly one channel dimension
angle- Euler angle(s) or rotation matrix.
Noneis interpreted as no rotation. invert- Whether to apply the inverse rotation.
Returns
Rotated vector as
Tensor def rotation_matrix(x: float | phiml.math._tensors.Tensor | None, matrix_dim=(vectorᶜ)) ‑> phiml.math._tensors.Tensor | None-
Expand source code
def rotation_matrix(x: Union[float, math.Tensor, None], matrix_dim=channel('vector')) -> Optional[Tensor]: """ Create a 2D or 3D rotation matrix from the corresponding angle(s). Args: x: 2D: scalar angle 3D: Either vector pointing along the rotation axis with rotation angle as length or Euler angles. Euler angles need to be laid out along a `angle` channel dimension with dimension names listing the spatial dimensions. E.g. a 90° rotation about the z-axis is represented by `vec('angles', x=0, y=0, z=PI/2)`. If a rotation matrix is passed for `angle`, it is returned without modification. matrix_dim: Matrix dimension for 2D rotations. In 3D, the channel dimension of angle is used. Returns: Matrix containing `matrix_dim` in primal and dual form as well as all non-channel dimensions of `x`. """ warnings.warn("phiml.math.rotation_matrix() is deprecated. Use PhiFlow's geometry functions instead.", DeprecationWarning) if x is None: return None if isinstance(x, Tensor) and '~vector' in x.shape and 'vector' in x.shape.channel and x.shape.get_size('~vector') == x.shape.get_size('vector'): return x # already a rotation matrix elif 'angle' in shape(x) and shape(x).get_size('angle') == 3: # 3D Euler angles assert channel(x).rank == 1 and channel(x).size == 3, f"x for 3D rotations needs to be a 3-vector but got {x}" s1, s2, s3 = math.sin(x).angle # x, y, z c1, c2, c3 = math.cos(x).angle matrix_dim = matrix_dim.with_size(shape(x).get_item_names('angle')) return wrap([[c3 * c2, c3 * s2 * s1 - s3 * c1, c3 * s2 * c1 + s3 * s1], [s3 * c2, s3 * s2 * s1 + c3 * c1, s3 * s2 * c1 - c3 * s1], [-s2, c2 * s1, c2 * c1]], matrix_dim, matrix_dim.as_dual()) # Rz * Ry * Rx (1. rotate about X by first angle) elif 'vector' in shape(x) and shape(x).get_size('vector') == 3: # 3D axis + x angle = length(x) s, c = math.sin(angle), math.cos(angle) t = 1 - c k1, k2, k3 = normalize(x, epsilon=1e-12).vector matrix_dim = matrix_dim.with_size(shape(x).get_item_names('vector')) return wrap([[c + k1**2 * t, k1 * k2 * t - k3 * s, k1 * k3 * t + k2 * s], [k2 * k1 * t + k3 * s, c + k2**2 * t, k2 * k3 * t - k1 * s], [k3 * k1 * t - k2 * s, k3 * k2 * t + k1 * s, c + k3**2 * t]], matrix_dim, matrix_dim.as_dual()) else: # 2D rotation sin = wrap(math.sin(x)) cos = wrap(math.cos(x)) return wrap([[cos, -sin], [sin, cos]], matrix_dim, matrix_dim.as_dual())Create a 2D or 3D rotation matrix from the corresponding angle(s).
Args
- x:
- 2D: scalar angle
- 3D: Either vector pointing along the rotation axis with rotation angle as length or Euler angles.
- Euler angles need to be laid out along a
angle()channel dimension with dimension names listing the spatial dimensions. - E.g. a 90° rotation about the z-axis is represented by
vec('angles', x=0, y=0, z=PI/2). - If a rotation matrix is passed for
angle(), it is returned without modification. matrix_dim- Matrix dimension for 2D rotations. In 3D, the channel dimension of angle is used.
Returns
Matrix containing
matrix_dimin primal and dual form as well as all non-channel dimensions ofx. def round(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def round_(x: TensorOrTree) -> TensorOrTree: """ Rounds the `Tensor` or `phiml.math.magic.PhiTreeNode` `x` to the closest integer. """ return _backend_op1(x, Backend.round, round_)Rounds the
TensororPhiTreeNodexto the closest integer. def s2b(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType-
Expand source code
def s2b(value: PhiTreeNodeType) -> PhiTreeNodeType: """ Change the type of all *spatial* dims of `value` to *batch* dimensions. See `rename_dims`. """ return rename_dims(value, spatial, batch)Change the type of all spatial dims of
valueto batch dimensions. Seerename_dims(). def safe_div(x: numbers.Number | phiml.math._tensors.Tensor,
y: numbers.Number | phiml.math._tensors.Tensor)-
Expand source code
def safe_div(x: Union[Number, Tensor], y: Union[Number, Tensor]): """ Computes *x/y* with the `Tensor`s `x` and `y` but returns 0 where *y=0*. """ return custom_op2(x, y, xops.save_div)Computes x/y with the
Tensorsxandybut returns 0 where y=0. def safe_mul(x: numbers.Number | phiml.math._tensors.Tensor,
y: numbers.Number | phiml.math._tensors.Tensor)-
Expand source code
def safe_mul(x: Union[Number, Tensor], y: Union[Number, Tensor]): """Multiplication for tensors with non-finite values. Computes *x·y* in the forward pass but drops gradient contributions from infinite and `NaN` values.""" return _safe_mul(x, y)Multiplication for tensors with non-finite values. Computes x·y in the forward pass but drops gradient contributions from infinite and
NaNvalues. def sample_subgrid(grid: phiml.math._tensors.Tensor,
start: phiml.math._tensors.Tensor,
size: phiml.math._shape.Shape) ‑> phiml.math._tensors.Tensor-
Expand source code
def sample_subgrid(grid: Tensor, start: Tensor, size: Shape) -> Tensor: """ Samples a sub-grid from `grid` with equal distance between sampling points. The values at the new sample points are determined via linear interpolation. Args: grid: `Tensor` to be resampled. Values are assumed to be sampled at cell centers. start: Origin point of sub-grid within `grid`, measured in number of cells. Must have a single dimension called `vector`. Example: `start=(1, 0.5)` would slice off the first grid point in dim 1 and take the mean of neighbouring points in dim 2. The order of dims must be equal to `size` and `grid.shape.spatial`. size: Resolution of the sub-grid. Must not be larger than the resolution of `grid`. The order of dims must be equal to `start` and `grid.shape.spatial`. Returns: Sub-grid as `Tensor` """ assert start.shape.names == ('vector',) assert grid.shape.spatial.names == size.names assert math.all_available(start), "Cannot perform sample_subgrid() during tracing, 'start' must be known." crop = {} for dim, d_start, d_size in zip(grid.shape.spatial.names, start, size.sizes): crop[dim] = slice(int(d_start), int(d_start) + d_size + (0 if d_start % 1 in (0, 1) else 1)) grid = grid[crop] upper_weight = start % 1 lower_weight = 1 - upper_weight for i, dim in enumerate(grid.shape.spatial.names): if upper_weight[i].native() not in (0, 1): lower, upper = shift(grid, (0, 1), [dim], padding=None, stack_dim=None) grid = upper * upper_weight[i] + lower * lower_weight[i] return gridSamples a sub-grid from
gridwith equal distance between sampling points. The values at the new sample points are determined via linear interpolation.Args
gridTensorto be resampled. Values are assumed to be sampled at cell centers.start- Origin point of sub-grid within
grid, measured in number of cells. Must have a single dimension calledvector. Example:start=(1, 0.5)would slice off the first grid point in dim 1 and take the mean of neighbouring points in dim 2. The order of dims must be equal tosizeandgrid.shape.spatial. size- Resolution of the sub-grid. Must not be larger than the resolution of
grid. The order of dims must be equal tostartandgrid.shape.spatial.
Returns
Sub-grid as
Tensor def save(file: str | phiml.math._tensors.Tensor, obj: ~PhiTreeNodeType, mkdir=True)-
Expand source code
def save(file: Union[Tensor, str], obj: PhiTreeNodeType, mkdir=True): """ Saves a `Tensor` or tree using NumPy. This function converts all tensors contained in `obj` to NumPy tensors before storing. Each tensor is given a name corresponding to its path within `obj`, allowing reading only specific arrays from the file later on. Pickle is used for structures, but no reference to `Tensor` or its sub-classes is included. Examples: >>> B = batch(b=3) >>> files = -f-f"data/test_{arange(B)}.npz" >>> data = randn(B, spatial(x=10)) >>> save(files, data) # store 10 values per file >>> assert_close(data, load(files)) See Also: `load()`. Args: file: Either single file to read as `str` or a batch of files as a string `Tensor`. The file ending will be completed to `.npz`. When a batch of paths is provided, the data `obj` is sliced along the dims of `file` and broken up to be stored among the multiple files. For obtaining a batch of files, see `wrap()`, `phiml.os.listdir()`, `phiml.math.f`. obj: `Tensor` or tree to store. mkdir: Whether to create the file's directory if it doesn't exist. """ tree, tensors = disassemble_tree(obj, False, all_attributes) paths = attr_paths(obj, all_attributes, 'root') assert len(paths) == len(tensors) for idx in shape(file).meshgrid(): file_i = file[idx].native() if isinstance(file, Tensor) else file tensors_i = [t[idx] for t in tensors] if idx else tensors natives = [t._natives() for t in tensors_i] specs = [serialize_spec(t._spec_dict()) for t in tensors_i] native_paths = [[f'{p}:{i}' for i in range(len(ns))] for p, ns in zip(paths, natives)] all_natives = sum(natives, ()) all_paths = sum(native_paths, []) all_np = [choose_backend(n).numpy(n) for n in all_natives] if mkdir and os.path.dirname(file_i): os.makedirs(os.path.dirname(file_i), exist_ok=True) np.savez(file_i, tree=np.asarray({'tree': tree}, dtype=object), specs=specs, paths=paths, **{p: n for p, n in zip(all_paths, all_np)})Saves a
Tensoror tree using NumPy. This function converts all tensors contained inobjto NumPy tensors before storing. Each tensor is given a name corresponding to its path withinobj, allowing reading only specific arrays from the file later on. Pickle is used for structures, but no reference toTensoror its sub-classes is included.Examples
>>> B = batch(b=3) >>> files = -f-f"data/test_{arange(B)}.npz" >>> data = randn(B, spatial(x=10)) >>> save(files, data) # store 10 values per file >>> assert_close(data, load(files))See Also:
load().Args
file- Either single file to read as
stror a batch of files as a stringTensor. The file ending will be completed to.npz. When a batch of paths is provided, the dataobjis sliced along the dims offileand broken up to be stored among the multiple files. For obtaining a batch of files, seewrap(),listdir(),f. objTensoror tree to store.mkdir- Whether to create the file's directory if it doesn't exist.
def scatter(base_grid: phiml.math._tensors.Tensor | phiml.math._shape.Shape,
indices: phiml.math._tensors.Tensor | dict,
values: float | phiml.math._tensors.Tensor,
mode: str | Callable = 'update',
outside_handling: str = 'check',
indices_gradient=False,
default=None,
treat_as_batch=None,
pref_index_dim='index')-
Expand source code
def scatter(base_grid: Union[Tensor, Shape], indices: Union[Tensor, dict], values: Union[Tensor, float], mode: Union[str, Callable] = 'update', outside_handling: str = 'check', indices_gradient=False, default=None, treat_as_batch=None, pref_index_dim='index'): """ Scatters `values` into `base_grid` at `indices`. instance dimensions of `indices` and/or `values` are reduced during scattering. Depending on `mode`, this method has one of the following effects: * `mode='update'`: Replaces the values of `base_grid` at `indices` by `values`. The result is undefined if `indices` contains duplicates. * `mode='add'`: Adds `values` to `base_grid` at `indices`. The values corresponding to duplicate indices are accumulated. * `mode='mean'`: Replaces the values of `base_grid` at `indices` by the mean of all `values` with the same index. Implementations: * NumPy: Slice assignment / `numpy.add.at` * PyTorch: [`torch.scatter`](https://pytorch.org/docs/stable/generated/torch.scatter.html), [`torch.scatter_add`](https://pytorch.org/docs/stable/generated/torch.scatter_add.html) * TensorFlow: [`tf.tensor_scatter_nd_add`](https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_add), [`tf.tensor_scatter_nd_update`](https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) * Jax: [`jax.lax.scatter_add`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scatter_add.html), [`jax.lax.scatter`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scatter.html) See Also: `gather()`. Args: base_grid: `Tensor` into which `values` are scattered. indices: `Tensor` of n-dimensional indices at which to place `values`. Must have a single channel dimension with size matching the number of spatial dimensions of `base_grid`. This dimension is optional if the spatial rank is 1. Must also contain all `scatter_dims`. values: `Tensor` of values to scatter at `indices`. mode: Scatter mode as `str` or function. Supported modes are 'add', 'mean', 'update', 'max', 'min', 'prod', 'any', 'all'. The corresponding functions are the built-in `sum`, `max´, `min`, as well as the reduce functions in `phiml.math`. outside_handling: Defines how indices lying outside the bounds of `base_grid` are handled. * `'check'`: Raise an error if any index is out of bounds. * `'discard'`: Outside indices are ignored. * `'clamp'`: Outside indices are projected onto the closest point inside the grid. * `'undefined'`: All points are expected to lie inside the grid. Otherwise an error may be thrown or an undefined tensor may be returned. indices_gradient: Whether to allow the gradient of this operation to be backpropagated through `indices`. default: Default value to use for bins into which no value is scattered. By default, `NaN` is used for the modes `update` and `mean`, `0` for `sum`, `inf` for min and `-inf` for max. This will upgrade the data type to `float` if necessary. treat_as_batch: Dimensions which should be treated like dims by this operation. This can be used for scattering vectors along instance dims into a grid. Normally, instance dims on `values` and `indices` would not be matched to `base_grid` but when treated as batch, they will be. Returns: Copy of `base_grid` with updated values at `indices`. """ if callable(mode): mode = {sum: 'add', max: 'max', min: 'min', sum_: 'add', max_: 'max', min_: 'min', mean: 'mean', prod: 'prod', any_: 'any', all_: 'all'}[mode] if mode == 'prod': log_base_grid = log(base_grid) if isinstance(base_grid, Tensor) else base_grid log_default = None if default is None else log(default) log_result = scatter(log_base_grid, indices, log(values), 'add', outside_handling, indices_gradient, log_default) return exp(log_result) elif mode == 'any': b_base_grid = cast(base_grid, bool) if isinstance(base_grid, Tensor) else base_grid b_values = cast(values, bool) i_result = scatter(b_base_grid, indices, b_values, 'add', outside_handling, indices_gradient, False) return cast(i_result, bool) elif mode == 'all': not_base_grid = ~cast(base_grid, bool) if isinstance(base_grid, Tensor) else base_grid not_values = ~cast(values, bool) i_result = scatter(not_base_grid, indices, not_values, 'add', outside_handling, indices_gradient, False) return ~cast(i_result, bool) assert mode in ('update', 'add', 'mean', 'max', 'min'), f"Invalid scatter mode: '{mode}'" assert outside_handling in ('discard', 'clamp', 'undefined', 'check') assert isinstance(indices_gradient, bool) if isinstance(indices, dict): # update a slice if len(indices) == 1 and isinstance(next(iter(indices.values())), (str, int, slice)): # update a range dim, sel = next(iter(indices.items())) full_dim = base_grid.shape[dim] if isinstance(sel, str): sel = full_dim.labels[0].index(sel) if isinstance(sel, int): sel = slice(sel, sel+1) assert isinstance(sel, slice), f"Selection must be a str, int or slice but got {type(sel)}" values = expand(values, full_dim.after_gather({dim: sel})) parts = [ base_grid[{dim: slice(sel.start)}], values, base_grid[{dim: slice(sel.stop, None)}] ] return concat(parts, dim) else: raise NotImplementedError("scattering into non-continuous values not yet supported by dimension") grid_shape = base_grid if isinstance(base_grid, SHAPE_TYPES) else base_grid.shape values = wrap(values) # --- Determine index_dim, indexed_dim --- if channel(indices).rank > 1: assert pref_index_dim in channel(indices) index_dim = indices.shape[pref_index_dim] else: index_dim = channel(indices) if index_dim and index_dim.labels[0]: indexed_dims = index_dim.labels[0] assert indexed_dims in grid_shape, f"Scatter indices {indices.shape} point to missing dimensions in grid {grid_shape}" if indexed_dims != grid_shape.only(indexed_dims).names: indices = indices[{index_dim: grid_shape.only(indexed_dims).names}] indexed_dims = grid_shape.only(indexed_dims) else: indexed_dims = grid_shape.spatial or grid_shape.instance assert index_dim.rank == 1 or (grid_shape.spatial_rank + grid_shape.instance_rank == 1 and indices.shape.channel_rank == 0), f"indices must have a channel dimension listing the indexed dims {indexed_dims} but got {indices.shape}. You can create it via vec({', '.join([d+'=...' for d in indexed_dims.names])}) or channel(index='{','.join(indexed_dims.names)}'). If you have raveled indices, use unpack_dim(indices, channel, base_grid.shape['{','.join(indexed_dims.names)}'])." assert not index_dim or index_dim.size == indexed_dims.rank if not index_dim: index_dim = channel(_index=indexed_dims.name_list) indices = expand(indices, index_dim) # --- Determine batches --- batches = values.shape.non_channel.non_instance & indices.shape.non_channel.non_instance batches &= values.shape.only(treat_as_batch) & indices.shape.only(treat_as_batch) # --- Set up grid --- if isinstance(base_grid, SHAPE_TYPES): with backend_for(indices, values): base_grid = zeros(base_grid & batches & values.shape.channel, dtype=values.dtype) if default is not None: if base_grid.dtype.kind != bool or default: base_grid += default elif mode in ['update', 'mean']: base_grid += float('nan') elif mode == 'max': base_grid -= float('inf') elif mode == 'min': base_grid += float('inf') else: assert mode == 'add' # initialize with zeros # --- Handle outside indices --- limit = wrap(indexed_dims, index_dim) - 1 if outside_handling == 'check': from ._functional import when_available def check(indices): assert_close(True, (indices >= 0) & (indices < (limit+1))) when_available(check, indices) elif outside_handling == 'clamp': indices = clip(indices, 0, limit) elif outside_handling == 'discard': indices_linear = pack_dims(indices, instance, instance(_scatter_instance=1)) indices_inside = min_((round_(indices_linear) >= 0) & (round_(indices_linear) < wrap(indexed_dims, index_dim)), channel) indices_linear = boolean_mask(indices_linear, '_scatter_instance', indices_inside) if instance(values).rank > 0: values_linear = pack_dims(values, instance, instance(_scatter_instance=1)) values_linear = boolean_mask(values_linear, '_scatter_instance', indices_inside) values = unpack_dim(values_linear, '_scatter_instance', instance(values)) indices = unpack_dim(indices_linear, '_scatter_instance', instance(indices)) if indices.shape.is_non_uniform: raise NotImplementedError() broadcast = broadcast_dims(base_grid, indices, values) def scatter_forward(base_grid: Tensor, indices: Tensor, values: Tensor, indexed_dims=indexed_dims): indexed_dims = base_grid.shape[indexed_dims] - broadcast batches = (values.shape.non_instance & indices.shape.non_instance.non_channel & (indices.shape.instance.only(base_grid.shape.instance))) - indexed_dims - index_dim batches &= values.shape.only(treat_as_batch) & indices.shape.only(treat_as_batch) batches -= broadcast channels = (base_grid.shape & values.shape.channel) - indexed_dims - batches - broadcast lists = ((indices.shape - index_dim) & values.shape.non_channel) - batches - broadcast - channels if values._is_tracer: if indices._is_tracer or base_grid._is_tracer: raise NotImplementedError("scattering linear tracer into linear tracer not supported") return values._scatter(base_grid, indices, mode, index_dim, indexed_dims, batches, channels, lists) indices = to_int32(round_(indices)) backend = backend_for(indices, values, base_grid) native_grid = base_grid._reshaped_native([batches, *indexed_dims, channels]) if lists.undefined or lists.volume > 0: native_values = values._reshaped_native([batches, lists, channels]) native_indices = indices._reshaped_native([batches, lists, index_dim]) if mode != 'mean': native_result = backend.scatter(native_grid, native_indices, native_values, mode=mode) else: # mean zero_grid = backend.zeros_like(native_grid) summed = backend.scatter(zero_grid, native_indices, native_values, mode='add') count = backend.scatter(zero_grid, native_indices, backend.ones_like(native_values), mode='add') native_result = summed / backend.maximum(count, 1) native_result = backend.where(count == 0, native_grid, native_result) else: native_result = native_grid return reshaped_tensor(native_result, [batches, *indexed_dims, channels], check_sizes=True, convert=False) def scatter_backward(args: dict, _output, d_output): from ._nd import spatial_gradient values_grad = gather(d_output, args['indices']) spatial_gradient_indices = gather(spatial_gradient(d_output, dims=indexed_dims), args['indices']) indices_grad = mean(spatial_gradient_indices * args['values'], 'vector_') return None, indices_grad, values_grad from ._functional import custom_gradient scatter_function = custom_gradient(scatter_forward, scatter_backward) if indices_gradient else scatter_forward result = broadcast_op(scatter_function, [base_grid, indices, values], broadcast) return resultScatters
valuesintobase_gridatindices. instance dimensions ofindicesand/orvaluesare reduced during scattering. Depending onmode, this method has one of the following effects:mode='update': Replaces the values ofbase_gridatindicesbyvalues. The result is undefined ifindicescontains duplicates.mode='add': Addsvaluestobase_gridatindices. The values corresponding to duplicate indices are accumulated.mode='mean': Replaces the values ofbase_gridatindicesby the mean of allvalueswith the same index.
Implementations:
- NumPy: Slice assignment /
numpy.add.at - PyTorch:
torch.scatter,torch.scatter_add - TensorFlow:
tf.tensor_scatter_nd_add,tf.tensor_scatter_nd_update - Jax:
jax.lax.scatter_add,jax.lax.scatter
See Also:
gather().Args
base_gridTensorinto whichvaluesare scattered.indicesTensorof n-dimensional indices at which to placevalues. Must have a single channel dimension with size matching the number of spatial dimensions ofbase_grid. This dimension is optional if the spatial rank is 1. Must also contain allscatter_dims.valuesTensorof values to scatter atindices.mode- Scatter mode as
stror function. Supported modes are 'add', 'mean', 'update', 'max', 'min', 'prod', 'any', 'all'. The corresponding functions are the built-insum_(),max´,min, as well as the reduce functions inphiml.math`. outside_handling-
Defines how indices lying outside the bounds of
base_gridare handled.'check': Raise an error if any index is out of bounds.'discard': Outside indices are ignored.'clamp': Outside indices are projected onto the closest point inside the grid.'undefined': All points are expected to lie inside the grid. Otherwise an error may be thrown or an undefined tensor may be returned.
indices_gradient- Whether to allow the gradient of this operation to be backpropagated through
indices. default- Default value to use for bins into which no value is scattered.
By default,
NaNis used for the modesupdateandmean(),0forsum_(),inffor min and-inffor max. This will upgrade the data type tofloatif necessary. treat_as_batch- Dimensions which should be treated like dims by this operation.
This can be used for scattering vectors along instance dims into a grid.
Normally, instance dims on
valuesandindiceswould not be matched tobase_gridbut when treated as batch, they will be.
Returns
Copy of
base_gridwith updated values atindices. def seed(seed: int)-
Expand source code
def seed(seed: int): """ Sets the current seed of all backends and the built-in `random` package. Calling this function with a fixed value at the start of an application yields reproducible results as long as the same backend is used. Args: seed: Seed to use. """ for backend in BACKENDS: backend.seed(seed) import random random.seed(0)Sets the current seed of all backends and the built-in
randompackage.Calling this function with a fixed value at the start of an application yields reproducible results as long as the same backend is used.
Args
seed- Seed to use.
def set_global_precision(floating_point_bits: int)-
Expand source code
def set_global_precision(floating_point_bits: int): """ Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends. If `floating_point_bits` is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64. Operations may also convert floating point values to this precision, even if the input had a different precision. If `floating_point_bits` is None, new tensors will default to float32 unless specified otherwise. The output of math operations has the same precision as its inputs. Args: floating_point_bits: one of (16, 32, 64, None) """ _PRECISION[0] = floating_point_bitsSets the floating point precision of DYNAMIC_BACKEND which affects all registered backends.
If
floating_point_bitsis an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64. Operations may also convert floating point values to this precision, even if the input had a different precision.If
floating_point_bitsis None, new tensors will default to float32 unless specified otherwise. The output of math operations has the same precision as its inputs.Args
floating_point_bits- one of (16, 32, 64, None)
def shape(obj, allow_unshaped=False) ‑> phiml.math._shape.Shape-
Expand source code
def shape(obj, allow_unshaped=False) -> Shape: """ If `obj` is a `Tensor` or `phiml.math.magic.Shaped`, returns its shape. If `obj` is a `Shape`, returns `obj`. This function can be passed as a `dim` argument to an operation to specify that it should act upon all dimensions. Args: obj: `Tensor` or `Shape` or `Shaped` allow_unshaped: If `True`, returns an empty shape for unsupported objects, else raises a `ValueError`. Returns: `Shape` """ if isinstance(obj, SHAPE_TYPES): return obj if hasattr(obj, 'shape') and isinstance(obj.shape, SHAPE_TYPES): return obj.shape if hasattr(obj, '__shape__'): return obj.__shape__() if isinstance(obj, (Number, bool)): return EMPTY_SHAPE if obj is None: return EMPTY_SHAPE if isinstance(obj, (tuple, list)) and all(isinstance(item, (int, float, complex, bool)) for item in obj): return channel(vector=len(obj)) from .magic import PhiTreeNode, Shaped, BoundDim if isinstance(obj, BoundDim): return shape(obj.obj)[obj.name] if isinstance(obj, (tuple, list)) and all(isinstance(item, (PhiTreeNode, Shaped)) for item in obj): return merge_shapes(*obj, allow_varying_sizes=True) if isinstance(obj, dict) and all(isinstance(item, (PhiTreeNode, Shaped)) for item in obj): return merge_shapes(*obj.values(), allow_varying_sizes=True) if isinstance(obj, PhiTreeNode): from ._magic_ops import all_attributes return merge_shapes(*[getattr(obj, a) for a in all_attributes(obj, assert_any=True)], allow_varying_sizes=True, allow_varying_labels=True) from ..backend import choose_backend, NoBackendFound try: backend = choose_backend(obj) shape_tuple = backend.staticshape(obj) if len(shape_tuple) == 0: return EMPTY_SHAPE elif len(shape_tuple) == 1: return channel('vector') else: raise ValueError(f"Cannot auto-complete shape of {backend} tensor with shape {shape_tuple}. Only 0D and 1D tensors have a Φ-ML shape by default.") except NoBackendFound: if allow_unshaped: return EMPTY_SHAPE raise ValueError(f'shape() requires Shaped or Shape argument but got {type(obj)}')If
objis aTensororShaped, returns its shape. Ifobjis aShape, returnsobj.This function can be passed as a
dimargument to an operation to specify that it should act upon all dimensions.Args
objTensororShapeorShapedallow_unshaped- If
True, returns an empty shape for unsupported objects, else raises aValueError.
Returns
def shift(x: phiml.math._tensors.Tensor,
offsets: Sequence[int],
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = zero-gradient,
stack_dim: str | phiml.math._shape.Shape | None = (shiftᶜ),
extend_bounds: int | tuple = 0,
padding_kwargs: dict = None) ‑> List[phiml.math._tensors.Tensor]-
Expand source code
def shift(x: Tensor, offsets: Sequence[int], dims: DimFilter = math.spatial, padding: Union[Extrapolation, float, Tensor, str, None] = extrapolation.BOUNDARY, stack_dim: Union[Shape, str, None] = channel('shift'), extend_bounds: Union[tuple, int] = 0, padding_kwargs: dict = None) -> List[Tensor]: """ Shift the tensor `x` by a fixed offset, using `padding` for edge values. This is similar to `numpy.roll()` but with major differences: * Values shifted in from the boundary are defined by `padding`. * Positive offsets represent negative shifts. * Support for multi-dimensional shifts See Also: `index_shift`, `neighbor_reduce`. Args: x: Input grid-like `Tensor`. offsets: `tuple` listing shifts to compute, each must be an `int`. One `Tensor` will be returned for each entry. dims: Dimensions along which to shift, defaults to all *spatial* dims of `x`. padding: Padding to be performed at the boundary so that the shifted versions have the same size as `x`. Must be one of the following: `Extrapolation`, `Tensor` or number for constant extrapolation, name of extrapolation as `str`. Can be set to `None` to disable padding. Then the result tensors will be smaller than `x`. stack_dim: Dimension along which the components corresponding to each dim in `dims` should be stacked. This can be set to `None` only if `dims` is a single dimension. extend_bounds: Number of cells by which to pad the tensors in addition to the number required to maintain the size of `x`. Can only be used with a valid `padding`. padding_kwargs: Additional keyword arguments to be passed to `phiml.math.pad()`. Returns: `list` of shifted tensors. The number of return tensors is equal to the number of `offsets`. """ if dims is None: raise ValueError("dims=None is not supported anymore.") dims = x.shape.only(dims, reorder=True).names if stack_dim is None: assert len(dims) == 1 elif stack_dim.labels[0]: assert set(stack_dim.labels[0]) == set(dims), f"stack_dim labels {stack_dim.labels[0]} not compatible with shift dims {dims}." dims = stack_dim.labels[0] x = wrap(x) pad_lower = max(0, -min(offsets)) pad_upper = max(0, max(offsets)) extend_tuple = (extend_bounds,)*2 if isinstance(extend_bounds, int) else extend_bounds if padding is not None: x = math.pad(x, {axis: (pad_lower + extend_tuple[0], pad_upper + extend_tuple[1]) for axis in dims}, mode=padding, **(padding_kwargs or {})) if extend_bounds: assert padding is not None offset_tensors = [] for offset in offsets: components = {} for dimension in dims: if padding is not None: slices = {dim: slice(pad_lower + offset, (-pad_upper + offset) or None) if dim == dimension else slice(pad_lower, -pad_upper or None) for dim in dims} else: slices = {dim: slice(pad_lower + offset, (-pad_upper + offset) or None) if dim == dimension else slice(None, None) for dim in dims} components[dimension] = x[slices] offset_tensors.append(stack(components, stack_dim) if stack_dim is not None else next(iter(components.values()))) return offset_tensorsShift the tensor
xby a fixed offset, usingpaddingfor edge values.This is similar to
numpy.roll()but with major differences:- Values shifted in from the boundary are defined by
padding. - Positive offsets represent negative shifts.
- Support for multi-dimensional shifts
See Also:
index_shift(),neighbor_reduce().Args
x- Input grid-like
Tensor. offsetstuplelisting shifts to compute, each must be anint. OneTensorwill be returned for each entry.dims- Dimensions along which to shift, defaults to all spatial dims of
x. padding- Padding to be performed at the boundary so that the shifted versions have the same size as
x. Must be one of the following:Extrapolation,Tensoror number for constant extrapolation, name of extrapolation asstr. Can be set toNoneto disable padding. Then the result tensors will be smaller thanx. stack_dim- Dimension along which the components corresponding to each dim in
dimsshould be stacked. This can be set toNoneonly ifdimsis a single dimension. extend_bounds- Number of cells by which to pad the tensors in addition to the number required to maintain the size of
x. Can only be used with a validpadding. padding_kwargs- Additional keyword arguments to be passed to
pad().
Returns
listof shifted tensors. The number of return tensors is equal to the number ofoffsets. - Values shifted in from the boundary are defined by
def si2d(value: ~PhiTreeNodeType) ‑> ~PhiTreeNodeType-
Expand source code
def si2d(value: PhiTreeNodeType) -> PhiTreeNodeType: """ Change the type of all *spatial* and *instance* dims of `value` to *dual* dimensions. See `rename_dims`. """ return rename_dims(value, lambda s: s.non_channel.non_dual.non_batch, dual)Change the type of all spatial and instance dims of
valueto dual dimensions. Seerename_dims(). def sigmoid(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def sigmoid(x: TensorOrTree) -> TensorOrTree: """ Computes the sigmoid function of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.sigmoid, sigmoid)Computes the sigmoid function of the
TensororPhiTreeNodex. def sign(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def sign(x: TensorOrTree) -> TensorOrTree: """ The sign of positive numbers is 1 and -1 for negative numbers. The sign of 0 is undefined. Args: x: `Tensor` or `phiml.math.magic.PhiTreeNode` Returns: `Tensor` or `phiml.math.magic.PhiTreeNode` matching `x`. """ return _backend_op1(x, Backend.sign, sign)The sign of positive numbers is 1 and -1 for negative numbers. The sign of 0 is undefined.
Args
xTensororPhiTreeNode
Returns
TensororPhiTreeNodematchingx. def sin(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def sin(x: TensorOrTree) -> TensorOrTree: """ Computes *sin(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.sin, sin)Computes sin(x) of the
TensororPhiTreeNodex. def sinh(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def sinh(x: TensorOrTree) -> TensorOrTree: """ Computes *sinh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.sinh, sinh)Computes sinh(x) of the
TensororPhiTreeNodex. def slice(value: ~PhiTreeNodeType,
slices: Dict[str, int | slice_() | str | tuple | list | Any] | Any) ‑> ~PhiTreeNodeType-
Expand source code
def slice_(value: PhiTreeNodeType, slices: Union[Dict[str, Union[int, slice, str, tuple, list, Any]], Any]) -> PhiTreeNodeType: """ Slices a `Tensor` or `phiml.math.magic.PhiTreeNode` along named dimensions. See Also: `unstack`. Args: value: `Tensor` or `phiml.math.magic.PhiTreeNode` or `Number` or `None`. slices: `dict` mapping dimension names to slices. A slice can be one of the following: * An index (`int`) * A range (`slice`) * An item name (`str`) * Multiple labels (comma-separated `str`) * Multiple indices or labels (`tuple` or `list`) Returns: `Tensor` or `phiml.math.magic.PhiTreeNode` of the same type as `value`. Examples: >>> math.slice([vec(x=0, y=1), vec(x=2, y=3)], {'vector': 'y'}) [1, 3] """ if slices is None: return value if isinstance(value, (bool, Number, str)) or value is None: return value if isinstance(value, tuple): return tuple([slice_(v, slices) for v in value]) if isinstance(value, list): return [slice_(v, slices) for v in value] if isinstance(value, dict): return {k: slice_(v, slices) for k, v in value.items()} if isinstance(value, SHAPE_TYPES): return value.after_gather(slices) if value is range: from ._tensors import Tensor if isinstance(slices, Tensor): return slices raise NotImplementedError("range only supported for index slicing") if hasattr(value, '__getitem__'): return value[slices] if isinstance(value, PhiTreeNode): attrs = {key: getattr(value, key) for key in all_attributes(value)} new_attrs = {k: slice_(v, slices) for k, v in attrs.items()} return copy_with(value, **new_attrs) raise ValueError(f"value must be a PhiTreeNode but got {type(value)}")Slices a
TensororPhiTreeNodealong named dimensions.See Also:
unstack().Args
valueTensororPhiTreeNodeorNumberorNone.slices-
dictmapping dimension names to slices. A slice can be one of the following:- An index (
int) - A range (
slice_()) - An item name (
str) - Multiple labels (comma-separated
str) - Multiple indices or labels (
tupleorlist)
- An index (
Returns
TensororPhiTreeNodeof the same type asvalue.Examples
>>> math.slice([vec(x=0, y=1), vec(x=2, y=3)], {'vector': 'y'}) [1, 3] def slice_off(x,
*slices: Dict[str, slice_() | int | str])-
Expand source code
def slice_off(x, *slices: Dict[str, Union[slice, int, str]]): """ Args: x: Any instance of `phiml.math.magic.Shapable` *slices: Returns: """ if not slices: return x x_shape = shape(x) def to_slices(s): if isinstance(s, Tensor): assert len(s.shape.channel) == 1, f"Indices tensors must have a single channel dim but got {s}" dims = s.shape.channel.labels[0] indices = s.numpy([..., channel]) slices = [{d: i for d, i in zip(dims, idx)} for idx in indices] return slices assert isinstance(s, dict), f"Not a valid slice: {s}" return [s] slices = sum([to_slices(s) for s in slices], []) dims = set().union(*[s.keys() for s in slices]) dims = x_shape.only(dims).names depth = max(len(s) for s in slices) if depth == 1: if len(dims) == 1: d = dims[0] if all(all(_edge_slice(x_shape, dim, s) for dim, s in s_dict.items()) for s_dict in slices): # only edges edge_slices = [_edge_slice(x_shape, dim, s) for s_dict in slices for dim, s in s_dict.items()] if any(s.start == 0 and s.stop is None for s in edge_slices): # everything sliced off return x[{d: slice(0, 0)}] start_slices = [s for s in edge_slices if s.start == 0] end_slices = [s for s in edge_slices if s.stop is None] start = max(s.stop for s in start_slices) if start_slices else 0 # at this point, s.stop must be an int end = min(s.start for s in end_slices) if end_slices else None return x[{d: slice(start, end)}] else: size = x_shape.get_size(d) mask = np.ones(size, dtype=np.bool_) for s_dict in slices: s = next(iter(s_dict.values())) if isinstance(s, str): names = x_shape.get_labels(d) s = [names.index(n.strip()) for n in s.split(',')] mask[s] = 0 return boolean_mask(x, d, wrap(mask, x_shape[d])) unstack_dim = x_shape.only(_preferred_unstack_dim(x, dims)) x_slices = unstack(x, unstack_dim) x_slices_out = [] for i, x_slice in enumerate(x_slices): slices_without_unstack_dim = [{k: v for k, v in s_dict.items() if k != unstack_dim.name} for s_dict in slices if _includes_slice(s_dict, unstack_dim, i)] sliced_x_slice = slice_off(x_slice, *slices_without_unstack_dim) x_slices_out.append(sliced_x_slice) assembled = stack(x_slices_out, unstack_dim) slices_for_unstack_dim_only = [s_dict for s_dict in slices if len(s_dict) == 1 and unstack_dim.name in s_dict] result = slice_off(assembled, *slices_for_unstack_dim_only) return result def soft_plus(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def soft_plus(x: TensorOrTree) -> TensorOrTree: """ Computes *softplus(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.softplus, soft_plus)Computes softplus(x) of the
TensororPhiTreeNodex. def softmax(x, reduce: str | Sequence | set | phiml.math._shape.Shape | Callable | None)-
Expand source code
def softmax(x, reduce: DimFilter): """Compute the softmax of `x` over any dimension. The softmax is e^x / ∑ e^x .""" e = exp(x) return e / sum_(e, reduce)Compute the softmax of
xover any dimension. The softmax is e^x / ∑ e^x . def solve_linear(f: Callable[[~X], ~Y] | phiml.math._tensors.Tensor,
y: ~Y,
solve: phiml.math._optimize.Solve[~X, ~Y],
*f_args,
grad_for_f=False,
f_kwargs: dict = None,
**f_kwargs_) ‑> ~X-
Expand source code
def solve_linear(f: Union[Callable[[X], Y], Tensor], y: Y, solve: Solve[X, Y], *f_args, grad_for_f=False, f_kwargs: dict = None, **f_kwargs_) -> X: """ Solves the system of linear equations *f(x) = y* and returns *x*. This method will use the solver specified in `solve`. The following method identifiers are supported by all backends: * `'auto'`: Automatically choose a solver * `'CG'`: Conjugate gradient, only for symmetric and positive definite matrices. * `'CG-adaptive'`: Conjugate gradient with adaptive step size, only for symmetric and positive definite matrices. * `'biCG'` or `'biCG-stab(0)'`: Biconjugate gradient * `'biCG-stab'` or `'biCG-stab(1)'`: Biconjugate gradient stabilized, first order * `'biCG-stab(2)'`, `'biCG-stab(4)'`, ...: Biconjugate gradient stabilized, second or higher order * `'scipy-direct'`: SciPy direct solve always run on the CPU using `scipy.sparse.linalg.spsolve`. * `'scipy-CG'`, `'scipy-GMres'`, `'scipy-biCG'`, `'scipy-biCG-stab'`, `'scipy-CGS'`, `'scipy-QMR'`, `'scipy-GCrotMK'`, `'scipy-lsqr'`: SciPy iterative solvers always run on the CPU, both in eager execution and JIT mode. For maximum performance, compile `f` using `jit_compile_linear()` beforehand. Then, an optimized representation of `f` (such as a sparse matrix) will be used to solve the linear system. **Caution:** The matrix construction may potentially be performed each time `solve_linear` is called if auxiliary arguments change. To prevent this, jit-compile the function that makes the call to `solve_linear`. To obtain additional information about the performed solve, perform the solve within a `SolveTape` context. The used implementation can be obtained as `SolveInfo.method`. The gradient of this operation will perform another linear solve with the parameters specified by `Solve.gradient_solve`. See Also: `solve_nonlinear()`, `jit_compile_linear()`. Args: f: One of the following: * Linear function with `Tensor` or `phiml.math.magic.PhiTreeNode` first parameter and return value. `f` can have additional auxiliary arguments and return auxiliary values. * Dense matrix (`Tensor` with at least one dual dimension) * Sparse matrix (Sparse `Tensor` with at least one dual dimension) * Native tensor (not yet supported) y: Desired output of `f(x)` as `Tensor` or `phiml.math.magic.PhiTreeNode`. solve: `Solve` object specifying optimization method, parameters and initial guess for `x`. *f_args: Positional arguments to be passed to `f` after `solve.x0`. These arguments will not be solved for. Supports vararg mode or pass all arguments as a `tuple`. f_kwargs: Additional keyword arguments to be passed to `f`. These arguments are treated as auxiliary arguments and can be of any type. Returns: x: solution of the linear system of equations `f(x) = y` as `Tensor` or `phiml.math.magic.PhiTreeNode`. Raises: NotConverged: If the desired accuracy was not be reached within the maximum number of iterations. Diverged: If the solve failed prematurely. """ assert solve.x0 is not None, "Please specify the initial guess as Solve(..., x0=initial_guess)" if solve.method == 'auto' and solve.rank_deficiency: solve = copy_with(solve, method='scipy-direct') # --- Handle parameters --- f_kwargs = f_kwargs or {} f_kwargs.update(f_kwargs_) f_args = f_args[0] if len(f_args) == 1 and isinstance(f_args[0], tuple) else f_args # --- Get input and output tensors --- y_tree, y_tensors = disassemble_tree(y, cache=False, attr_type=value_attributes) x0_tree, x0_tensors = disassemble_tree(solve.x0, cache=False, attr_type=variable_attributes) _, other_tensors = disassemble_tree(f_kwargs, cache=False, attr_type=variable_attributes) assert len(x0_tensors) == len(y_tensors) == 1, "Only single-tensor linear solves are currently supported" # --- If native tensors passed, return native tensor --- if isinstance(y_tree, str) and y_tree == NATIVE_TENSOR and isinstance(x0_tree, str) and x0_tree == NATIVE_TENSOR: if callable(f): # assume batch + 1 dim rank = y_tensors[0].rank assert x0_tensors[0].rank == rank, f"y and x0 must have the same rank but got {y_tensors[0].shape.sizes} for y and {x0_tensors[0].shape.sizes} for x0" if rank == 0: y = wrap(y) x0 = wrap(solve.x0) else: y = wrap(y, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector')) x0 = wrap(solve.x0, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector')) solve = copy_with(solve, x0=x0) solution = solve_linear(f, y, solve, *f_args, grad_for_f=grad_for_f, f_kwargs=f_kwargs, **f_kwargs_) return solution.native(','.join([f'batch{i}' for i in range(rank - 1)]) + ',vector') else: b = choose_backend(y, solve.x0, f) f_dims = b.staticshape(f) y_dims = b.staticshape(y) x_dims = b.staticshape(solve.x0) rank = len(f_dims) - 2 assert rank >= 0, f"f must be a matrix but got shape {f_dims}" f = wrap(f, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector'), dual('vector')) if len(x_dims) == len(f_dims): # matrix solve assert len(x_dims) == len(f_dims) assert x_dims[-2] == f_dims[-1] assert y_dims[-2] == f_dims[-2] y = wrap(y, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector'), batch('extra_batch')) x0 = wrap(solve.x0, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector'), batch('extra_batch')) solve = copy_with(solve, x0=x0) solution = solve_linear(f, y, solve, *f_args, grad_for_f=grad_for_f, f_kwargs=f_kwargs, **f_kwargs_) return solution.native(','.join([f'batch{i}' for i in range(rank - 1)]) + ',vector,extra_batch') else: assert len(x_dims) == len(f_dims) - 1 assert x_dims[-1] == f_dims[-1] assert y_dims[-1] == f_dims[-2] y = wrap(y, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector')) x0 = wrap(solve.x0, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector')) solve = copy_with(solve, x0=x0) solution = solve_linear(f, y, solve, *f_args, grad_for_f=grad_for_f, f_kwargs=f_kwargs, **f_kwargs_) return solution.native(','.join([f'batch{i}' for i in range(rank - 1)]) + ',vector') # --- PhiML Tensors --- backend = backend_for(*y_tensors, *x0_tensors, *other_tensors) prefer_explicit = backend.supports(Backend.sparse_coo_tensor) or backend.supports(Backend.csr_matrix) or grad_for_f if isinstance(f, Tensor) or (isinstance(f, LinearFunction) and prefer_explicit): # Matrix solve expand_x = expand_y = None if isinstance(f, LinearFunction): x0 = math.convert(solve.x0, backend) matrix, bias = f.sparse_matrix_and_bias(x0, *f_args, **f_kwargs) y_tensors = [y - bias for y in y_tensors] else: matrix = f bias = 0 if solve.rank_deficiency is None: deficiency = min_rank_deficiency(matrix) # None for unknown solve = copy_with(solve, rank_deficiency=deficiency) reduce_x = reduce_y = expand_x = expand_y = None if (wrap(solve.rank_deficiency) > 0).any: empty_rows = guaranteed_empty_rows(matrix) empty_cols = guaranteed_empty_cols(matrix) if isize(empty_rows) == isize(empty_cols) and isize(empty_rows) > 0: # We can eliminate some rank deficiency by reducing the matrix size matrix, reduce_x, reduce_y, expand_x, expand_y = drop_rows_and_cols_from_system(matrix, x0_tensors[0].shape, empty_rows, empty_cols) remaining_deficiency = solve.rank_deficiency - isize(empty_rows) if (remaining_deficiency > 0).any: warnings.warn(f"Matrix is rank-deficient after removing {isize(empty_rows)} empty rows and columns: rank deficiency >= {remaining_deficiency} in linear solve. Matrix might be singular which can lead to convergence problems. Please specify using Solve(rank_deficiency=...).", RuntimeWarning, stacklevel=2) solve = copy_with(solve, rank_deficiency=remaining_deficiency) else: warnings.warn(f"Rank deficiency >= {solve.rank_deficiency} detected in linear solve. Matrix might be singular which can lead to convergence problems. Please specify using Solve(rank_deficiency=...).", RuntimeWarning, stacklevel=2) preconditioner = compute_preconditioner(solve.preconditioner, matrix, rank_deficiency=solve.rank_deficiency, target_backend=NUMPY if solve.method.startswith('scipy-') else backend, solver=solve.method) if solve.preconditioner is not None else None def _matrix_solve_forward(y, solve: Solve, matrix: Tensor, is_backprop=False): pattern_dims_in = dual(matrix).as_channel().names pattern_dims_out = non_dual(matrix).names # batch dims can be sparse or batched matrices _, y_tensors = disassemble_tree(y, False, value_attributes) b = backend_for(*y_tensors, matrix) nat_matrix = native_matrix(matrix, b) if solve.rank_deficiency: if is_sparse(matrix): N = dual(matrix).volume if b.get_sparse_format(nat_matrix) == 'csr': _, (data, idx, ptr) = b.disassemble(nat_matrix) idx = b.csr_to_coo(idx[None, :], ptr[None, :])[0, :] elif b.get_sparse_format(nat_matrix) == 'csc': _, (data, ptr, idx) = b.disassemble(nat_matrix) idx = b.csr_to_coo(idx[None, :], ptr[None, :])[0, :] idx = b.flip(idx, (-1,)) elif b.get_sparse_format(nat_matrix) == 'coo': _, (idx, data) = b.disassemble(nat_matrix) else: raise NotImplementedError(b.get_sparse_format(nat_matrix)) # --- Add a row and column of ones to the matrix to make the system non-singular --- data = b.pad(data, [(0, 2*N)], constant_values=1) i = b.range(N, dtype=b.dtype(idx)) j = N + b.zeros((N,), dtype=b.dtype(idx)) new_col = b.stack([i, j], -1) new_row = b.stack([j, i], -1) idx = b.concat([idx, new_col, new_row], 0) nat_matrix = b.sparse_coo_tensor(idx, data, (N+1, N+1)) else: nat_matrix = b.pad(nat_matrix, [(0, 1), (0, 1)], constant_values=1) rx, ry, ex, ey = reduce_x, reduce_y, expand_x, expand_y if is_backprop: rx, ry, ex, ey = reduce_y, reduce_x, expand_y, expand_x # pattern_dims are already switched due to matrix transposition result = _linear_solve_forward(y, solve, nat_matrix, pattern_dims_in, pattern_dims_out, preconditioner, backend, is_backprop, rx, ry, ex, ey) return result # must return exactly `x` so gradient isn't computed w.r.t. other quantities _matrix_solve = attach_gradient_solve(_matrix_solve_forward, auxiliary_args=f'is_backprop,solve{",matrix" if matrix.backend == NUMPY else ""}', matrix_adjoint=grad_for_f) return _matrix_solve(assemble_tree(y_tree, y_tensors), solve, matrix) else: # Matrix-free solve from ._ops import cached f_args = cached(f_args) solve = cached(solve) assert not grad_for_f, f"grad_for_f=True can only be used for math.jit_compile_linear functions but got '{f_name(f)}'. Please decorate the linear function with @jit_compile_linear" assert solve.preconditioner is None, f"Preconditioners not currently supported for matrix-free solves. Decorate '{f_name(f)}' with @math.jit_compile_linear to perform a matrix solve." def _function_solve_forward(y, solve: Solve, f_args: tuple, f_kwargs: dict = None, is_backprop=False): y_nest, (y_tensor,) = disassemble_tree(y, cache=False, attr_type=value_attributes) x0_nest, (x0_tensor,) = disassemble_tree(solve.x0, cache=False, attr_type=variable_attributes) # active_dims = (y_tensor.shape & x0_tensor.shape).non_batch # assumes batch dimensions are not active batches = (y_tensor.shape & x0_tensor.shape).batch def native_lin_f(native_x, batch_index=None, is_trajectory=False): assert not solve.rank_deficiency # ToDo add and remove zeros around function call if batch_index is not None and batches.volume > 1: native_x = backend.tile(backend.expand_dims(native_x), [batches.volume, 1]) if is_trajectory: x_tensor = reshaped_tensor(native_x, [non_batch(x0_tensor), batch('trajectory') + batches]) else: x_tensor = reshaped_tensor(native_x, [batches, non_batch(x0_tensor)] if backend.ndims(native_x) >= 2 else [non_batch(x0_tensor)], convert=False) x = assemble_tree(x0_nest, [x_tensor], attr_type=variable_attributes) y_ = f(x, *f_args, **f_kwargs) _, (y_tensor_,) = disassemble_tree(y_, cache=False, attr_type=value_attributes) assert set(non_batch(y_tensor_)) == set(non_batch(y_tensor)), f"Function returned dimensions {y_tensor_.shape} but right-hand-side has shape {y_tensor.shape}" if is_trajectory: y_native = y_tensor_.native([non_batch(y_tensor), batch(x_tensor)]) else: y_native = y_tensor_.native([batches, non_batch(y_tensor)] if backend.ndims(native_x) >= 2 else [non_batch(y_tensor)]) # order like right-hand-side if batch_index is not None and batches.volume > 1: y_native = y_native[batch_index] return y_native result = _linear_solve_forward(y, solve, native_lin_f, pattern_dims_in=non_batch(x0_tensor).names, pattern_dims_out=non_batch(y_tensor).names, preconditioner=None, backend=backend, is_backprop=is_backprop) return result # must return exactly `x` so gradient isn't computed w.r.t. other quantities _function_solve = attach_gradient_solve(_function_solve_forward, auxiliary_args='is_backprop,f_kwargs,solve', matrix_adjoint=grad_for_f) return _function_solve(y, solve, f_args, f_kwargs=f_kwargs)Solves the system of linear equations f(x) = y and returns x. This method will use the solver specified in
solve. The following method identifiers are supported by all backends:'auto': Automatically choose a solver'CG': Conjugate gradient, only for symmetric and positive definite matrices.'CG-adaptive': Conjugate gradient with adaptive step size, only for symmetric and positive definite matrices.'biCG'or'biCG-stab(0)': Biconjugate gradient'biCG-stab'or'biCG-stab(1)': Biconjugate gradient stabilized, first order'biCG-stab(2)','biCG-stab(4)', …: Biconjugate gradient stabilized, second or higher order'scipy-direct': SciPy direct solve always run on the CPU usingscipy.sparse.linalg.spsolve.'scipy-CG','scipy-GMres','scipy-biCG','scipy-biCG-stab','scipy-CGS','scipy-QMR','scipy-GCrotMK','scipy-lsqr': SciPy iterative solvers always run on the CPU, both in eager execution and JIT mode.
For maximum performance, compile
fusingjit_compile_linear()beforehand. Then, an optimized representation off(such as a sparse matrix) will be used to solve the linear system.Caution: The matrix construction may potentially be performed each time
solve_linear()is called if auxiliary arguments change. To prevent this, jit-compile the function that makes the call tosolve_linear().To obtain additional information about the performed solve, perform the solve within a
SolveTapecontext. The used implementation can be obtained asSolveInfo.method.The gradient of this operation will perform another linear solve with the parameters specified by
Solve.gradient_solve.See Also:
solve_nonlinear(),jit_compile_linear().Args
f-
One of the following:
- Linear function with
TensororPhiTreeNodefirst parameter and return value.fcan have additional auxiliary arguments and return auxiliary values. - Dense matrix (
Tensorwith at least one dual dimension) - Sparse matrix (Sparse
Tensorwith at least one dual dimension) - Native tensor (not yet supported)
- Linear function with
y- Desired output of
f(x)asTensororPhiTreeNode. solveSolveobject specifying optimization method, parameters and initial guess forx.*f_args- Positional arguments to be passed to
faftersolve.x0. These arguments will not be solved for. Supports vararg mode or pass all arguments as atuple. f_kwargs- Additional keyword arguments to be passed to
f. These arguments are treated as auxiliary arguments and can be of any type.
Returns
x- solution of the linear system of equations
f(x) = yasTensororPhiTreeNode.
Raises
NotConverged- If the desired accuracy was not be reached within the maximum number of iterations.
Diverged- If the solve failed prematurely.
def solve_nonlinear(f: Callable, y, solve: phiml.math._optimize.Solve) ‑> phiml.math._tensors.Tensor-
Expand source code
def solve_nonlinear(f: Callable, y, solve: Solve) -> Tensor: """ Solves the non-linear equation *f(x) = y* by minimizing the norm of the residual. This method is limited to backends that support `jacobian()`, currently PyTorch, TensorFlow and Jax. To obtain additional information about the performed solve, use a `SolveTape`. See Also: `minimize()`, `solve_linear()`. Args: f: Function whose output is optimized to match `y`. All positional arguments of `f` are optimized and must be `Tensor` or `phiml.math.magic.PhiTreeNode`. The output of `f` must match `y`. y: Desired output of `f(x)` as `Tensor` or `phiml.math.magic.PhiTreeNode`. solve: `Solve` object specifying optimization method, parameters and initial guess for `x`. Returns: x: Solution fulfilling `f(x) = y` within specified tolerance as `Tensor` or `phiml.math.magic.PhiTreeNode`. Raises: NotConverged: If the desired accuracy was not be reached within the maximum number of iterations. Diverged: If the solve failed prematurely. """ def min_func(x): diff = f(x) - y l2 = l2_loss(diff) return l2 if solve.preprocess_y is not None: y = solve.preprocess_y(y) from ._nd import l2_loss solve = solve.with_defaults('solve') tol = math.maximum(solve.rel_tol * l2_loss(y), solve.abs_tol) min_solve = copy_with(solve, abs_tol=tol, rel_tol=0, preprocess_y=None) return minimize(min_func, min_solve)Solves the non-linear equation f(x) = y by minimizing the norm of the residual.
This method is limited to backends that support
jacobian(), currently PyTorch, TensorFlow and Jax.To obtain additional information about the performed solve, use a
SolveTape.See Also:
minimize(),solve_linear().Args
f- Function whose output is optimized to match
y. All positional arguments offare optimized and must beTensororPhiTreeNode. The output offmust matchy. y- Desired output of
f(x)asTensororPhiTreeNode. solveSolveobject specifying optimization method, parameters and initial guess forx.
Returns
x- Solution fulfilling
f(x) = ywithin specified tolerance asTensororPhiTreeNode.
Raises
NotConverged- If the desired accuracy was not be reached within the maximum number of iterations.
Diverged- If the solve failed prematurely.
def sort(x: ~TensorOrTree,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>,
key: phiml.math._tensors.Tensor = None) ‑> phiml.math._tensors.Tensor-
Expand source code
def sort(x: TensorOrTree, dim: DimFilter = non_batch, key: Tensor = None) -> Tensor: """ Sort the values of `x` along `dim`. If `key` is specified, sorts `x` according to the corresponding values in the `key` tensor. When sorting by key, you can pass pytrees and dataclasses for `x`. The value `range` for `x` returns the sorting permutation. In order to sort a flattened array, use `pack_dims` first. Examples: >>> x = tensor([1, 3, 2, -1], spatial('x')) >>> math.sort(x) >>> # Out: (-1, 1, 2, 3) along xˢ >>> math.sort(range, 'x', key=x) >>> # Out: (3, 0, 2, 1) along xˢ int64 >>> result, perm = math.sort((x, range), key=x) Args: x: `Tensor` to sort. If `key` is specified, can be a tree as well. dim: Dimension to sort. If not present, sorting will be skipped. Defaults to non-batch dim. key: `Tensor` holding values to compare during sorting. Returns: Sorted `Tensor` or `x` if `x` is constant along `dims`. """ if key is None: x_shape = x.shape dim = x_shape.only(dim) var_names = variable_dim_names(x) if not dim or dim.name not in var_names: return x # nothing to do; x is constant along dim assert dim.rank == 1, f"Can only sort one dimension at a time. Use pack_dims() to jointly sort over multiple dimensions." axis = var_names.index(dim.name) x_native = x._native if isinstance(x, Dense) else x.native(x.shape) sorted_native = x.backend.sort(x_native, axis=axis) if x.shape.get_labels(dim): warnings.warn(f"sort() removes labels along sorted axis '{dim}'. Was {x.shape.get_labels(dim)}", RuntimeWarning, stacklevel=2) x_shape = x_shape.with_dim_size(dim, x_shape.get_size(dim), keep_labels=False) return Dense(sorted_native, var_names, x_shape, x.backend) else: k_shape = key.shape dim = k_shape.only(dim) var_names = variable_dim_names(key) if not dim or dim.name not in var_names: return x # nothing to do; key is constant along dim assert dim.rank == 1, f"Can only sort one dimension at a time. Use pack_dims() to jointly sort over multiple dimensions." axis = var_names.index(dim.name) x_native = key._native if isinstance(key, Dense) else key.native(key.shape) native_perm = key.backend.argsort(x_native, axis=axis) if key.shape.get_labels(dim): warnings.warn(f"sort() removes labels along sorted axis '{dim}'. Was {x.shape.get_labels(dim)}", RuntimeWarning, stacklevel=2) k_shape = k_shape.with_dim_size(dim, k_shape.get_size(dim), keep_labels=False) & channel(index=dim.name) perm = Dense(native_perm, var_names, k_shape, key.backend) return slice_(x, perm)Sort the values of
xalongdim. Ifkeyis specified, sortsxaccording to the corresponding values in thekeytensor. When sorting by key, you can pass pytrees and dataclasses forx. The valuearange()forxreturns the sorting permutation.In order to sort a flattened array, use
pack_dims()first.Examples:
>>> x = tensor([1, 3, 2, -1], spatial('x')) >>> math.sort(x) >>> # Out: (-1, 1, 2, 3) along xˢ>>> math.sort(range, 'x', key=x) >>> # Out: (3, 0, 2, 1) along xˢ int64>>> result, perm = math.sort((x, range), key=x)Args
xTensorto sort. Ifkeyis specified, can be a tree as well.dim- Dimension to sort. If not present, sorting will be skipped. Defaults to non-batch dim.
keyTensorholding values to compare during sorting.
Returns
Sorted
Tensororxifxis constant alongdims. def spack(value,
packed_dim: str | phiml.math._shape.Shape,
pos: int | None = None,
**kwargs)-
Expand source code
def spack(value, packed_dim: Union[Shape, str], pos: Optional[int] = None, **kwargs): """Short for `pack_dims(..., dims=spatial)""" return pack_dims(value, spatial, packed_dim, pos=pos, **kwargs)Short for `pack_dims(…, dims=spatial)
def sparse_tensor(indices: phiml.math._tensors.Tensor | None,
values: numbers.Number | phiml.math._tensors.Tensor,
dense_shape: phiml.math._shape.Shape,
can_contain_double_entries=True,
indices_sorted=False,
format=None,
indices_constant: bool = True) ‑> phiml.math._tensors.Tensor-
Expand source code
def sparse_tensor(indices: Optional[Tensor], values: Union[Tensor, Number], dense_shape: Shape, can_contain_double_entries=True, indices_sorted=False, format=None, indices_constant: bool = True) -> Tensor: """ Construct a sparse tensor that stores `values` at the corresponding `indices` and is 0 everywhere else. Duplicate entries (entries with the same indices) are identical to one entry with the sum of the corresponding values. This can be performed explicitly using `sum_equal_entries()`. In addition to the sparse dimensions indexed by `indices`, the tensor inherits all batch and channel dimensions from `values`. Sparse tensors can be used to implement `bincount`, i.e. `bincount = dense(sparse_tensor(indices, weights, dims))`. Args: indices: `Tensor` encoding the positions of stored values. It can either list the individual stored indices (COO format) or encode only part of the index while containing other dimensions directly (compact format). For COO, it has the following dimensions: * One instance dimension exactly matching the instance dimension on `values`. It enumerates the positions of stored entries. * One channel dimension. Its labels must match the dimension names of `dense_shape` but the order can be arbitrary. * Any number of batch dimensions You may pass `None` to create a sparse tensor with no entries. values: `Tensor` containing the stored values at positions given by `indices`. It has the following dimensions: * One instance dimension exactly matching the instance dimension on `indices`. It enumerates the values of stored entries. * Any number of channel dimensions if multiple values are stored at each index. * Any number of batch dimensions dense_shape: Dimensions listed in `indices`. The order can differ from the labels of `indices`. can_contain_double_entries: Whether some indices might occur more than once. If so, values at the same index will be summed. indices_sorted: Whether the indices are sorted in ascending order given the dimension order of the labels of `indices`. indices_constant: Whether the positions of the non-zero values are fixed. If `True`, JIT compilation will not create a placeholder for `indices`. format: Sparse format in which to store the data, such as `'coo'` or `'csr'`. See `phiml.math.get_format`. If `None`, uses the format in which the indices were given. Returns: Sparse `Tensor` with the specified `format`. """ assert values is not None, f"values must be a number of Tensor but got None. Pass values=1 for unit values." assert dense_shape.well_defined, f"Dense shape must be well-defined but got {dense_shape}" if indices_constant is None: indices_constant = indices.default_backend.name == 'numpy' assert isinstance(indices_constant, bool) if indices is None: from ._ops import ones indices = ones(instance(entries=0), channel(idx=dense_shape.name_list), dtype=int) can_contain_double_entries = False indices_constant = True # --- type of sparse tensor --- if dense_shape in indices: # compact compressed = concat_shapes_(*[dim for dim in dense_shape if dim.size > indices.shape.get_size(dim)]) values = expand(1, non_batch(indices)) sparse = CompactSparseTensor(indices, values, compressed, indices_constant) else: values = expand(values, instance(indices)) sparse = SparseCoordinateTensor(indices, values, dense_shape, can_contain_double_entries, indices_sorted, indices_constant) return to_format(sparse, format) if format is not None else sparseConstruct a sparse tensor that stores
valuesat the correspondingindicesand is 0 everywhere else. Duplicate entries (entries with the same indices) are identical to one entry with the sum of the corresponding values. This can be performed explicitly usingsum_equal_entries().In addition to the sparse dimensions indexed by
indices, the tensor inherits all batch and channel dimensions fromvalues.Sparse tensors can be used to implement
bincount, i.e.bincount = dense(sparse_tensor(indices, weights, dims)).Args
indices-
Tensorencoding the positions of stored values. It can either list the individual stored indices (COO format) or encode only part of the index while containing other dimensions directly (compact format).For COO, it has the following dimensions:
- One instance dimension exactly matching the instance dimension on
values. It enumerates the positions of stored entries. - One channel dimension.
Its labels must match the dimension names of
dense_shapebut the order can be arbitrary. - Any number of batch dimensions
You may pass
Noneto create a sparse tensor with no entries. - One instance dimension exactly matching the instance dimension on
values-
Tensorcontaining the stored values at positions given byindices. It has the following dimensions:- One instance dimension exactly matching the instance dimension on
indices. It enumerates the values of stored entries. - Any number of channel dimensions if multiple values are stored at each index.
- Any number of batch dimensions
- One instance dimension exactly matching the instance dimension on
dense_shape- Dimensions listed in
indices. The order can differ from the labels ofindices. can_contain_double_entries- Whether some indices might occur more than once. If so, values at the same index will be summed.
indices_sorted- Whether the indices are sorted in ascending order given the dimension order of the labels of
indices. indices_constant- Whether the positions of the non-zero values are fixed.
If
True, JIT compilation will not create a placeholder forindices. format- Sparse format in which to store the data, such as
'coo'or'csr'. Seeget_format(). IfNone, uses the format in which the indices were given.
Returns
Sparse
Tensorwith the specifiedformat. def spatial(*args,
**dims: int | str | tuple | list | phiml.math._shape.Shape | ForwardRef('Tensor')) ‑> phiml.math._shape.Shape-
Expand source code
def spatial(*args, **dims: Union[int, str, tuple, list, Shape, 'Tensor']) -> Shape: """ Returns the spatial dimensions of an existing `Shape` or creates a new `Shape` with only spatial dimensions. Usage for filtering spatial dimensions: >>> spatial_dims = spatial(shape) >>> spatial_dims = spatial(tensor) Usage for creating a `Shape` with only spatial dimensions: >>> spatial_shape = spatial('undef', x=2, y=3) (x=2, y=3, undef=None) Here, the dimension `undef` is created with an undefined size of `None`. Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`. To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`. See Also: `channel`, `batch`, `instance` Args: *args: Either * `Shape` or `Tensor` to filter or * Names of dimensions with undefined sizes as `str`. **dims: Dimension sizes and names. Must be empty when used as a filter operation. Returns: `Shape` containing only dimensions of type spatial. """ if all(isinstance(arg, str) for arg in args) or dims: return _construct_shape(SPATIAL_DIM, *args, **dims) elif len(args) == 1 and isinstance(args[0], SHAPE_TYPES): return args[0].spatial assert len(args) == 1, f"spatial() must be called either as a selector spatial(Shape) or spatial(Tensor) or as a constructor spatial(*names, **dims). Got *args={args}, **dims={dims}" return shape(args[0]).spatialReturns the spatial dimensions of an existing
Shapeor creates a newShapewith only spatial dimensions.Usage for filtering spatial dimensions:
>>> spatial_dims = spatial(shape) >>> spatial_dims = spatial(tensor)Usage for creating a
Shapewith only spatial dimensions:>>> spatial_shape = spatial('undef', x=2, y=3) (x=2, y=3, undef=None)Here, the dimension
undefis created with an undefined size ofNone. Undefined sizes are automatically filled in bytensor(),wrap(),stack()andconcat().To create a shape with multiple types, use
merge_shapes(),concat_shapes()or the syntaxshape1 & shape2.See Also:
channel(),batch(),instance()Args
*args-
Either
**dims- Dimension sizes and names. Must be empty when used as a filter operation.
Returns
Shapecontaining only dimensions of type spatial. def spatial_gradient(grid: phiml.math._tensors.Tensor,
dx: float | phiml.math._tensors.Tensor = 1,
difference: str = 'central',
padding: Extrapolation | float | phiml.math._tensors.Tensor | str | None = zero-gradient,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
stack_dim: str | phiml.math._shape.Shape | None = (gradientᶜ),
pad=0,
padding_kwargs: dict = None) ‑> phiml.math._tensors.Tensor-
Expand source code
def spatial_gradient(grid: Tensor, dx: Union[float, Tensor] = 1, difference: str = 'central', padding: Union[Extrapolation, float, Tensor, str, None] = extrapolation.BOUNDARY, dims: DimFilter = spatial, stack_dim: Union[Shape, str, None] = channel('gradient'), pad=0, padding_kwargs: dict = None) -> Tensor: """ Calculates the spatial_gradient of a scalar channel from finite differences. The spatial_gradient vectors are in reverse order, lowest dimension first. Args: grid: grid values dims: (Optional) Dimensions along which the spatial derivative will be computed. sequence of dimension names dx: Physical distance between grid points, `float` or `Tensor`. When passing a vector-valued `Tensor`, the dx values should be listed along `stack_dim`, matching `dims`. difference: type of difference, one of ('forward', 'backward', 'central') (default 'forward') padding: Padding mode. Must be one of the following: `Extrapolation`, `Tensor` or number for constant extrapolation, name of extrapolation as `str`. stack_dim: name of the new vector dimension listing the spatial_gradient w.r.t. the various axes pad: How many cells to extend the result compared to `grid`. This value is added to the internal padding. For non-trivial extrapolations, this gives the correct result while manual padding before or after this operation would not respect the boundary locations. padding_kwargs: Additional keyword arguments to be passed to `phiml.math.pad()`. Returns: `Tensor` """ grid = wrap(grid) if stack_dim and stack_dim in grid.shape: assert grid.shape.only(stack_dim).size == 1, f"spatial_gradient() cannot list components along {stack_dim.name} because that dimension already exists on grid {grid}" grid = grid[{stack_dim.name: 0}] dims = grid.shape.only(dims) if stack_dim and stack_dim.labels[0]: assert set(stack_dim.labels[0]) == set(dims.names), f"stack_dim labels {stack_dim.labels[0]} not compatible with gradient dims {dims.names}." dims = grid.shape.only(stack_dim.labels[0], reorder=True) dx = wrap(dx) if 'vector' in dx.shape: dx = dx.vector[dims] if dx.vector.size in (None, 1): dx = dx.vector[0] if difference.lower() == 'central': left, right = shift(grid, (-1, 1), dims, padding, stack_dim=stack_dim, extend_bounds=pad, padding_kwargs=padding_kwargs) return (right - left) / (dx * 2) elif difference.lower() == 'forward': left, right = shift(grid, (0, 1), dims, padding, stack_dim=stack_dim, extend_bounds=pad, padding_kwargs=padding_kwargs) return (right - left) / dx elif difference.lower() == 'backward': left, right = shift(grid, (-1, 0), dims, padding, stack_dim=stack_dim, extend_bounds=pad, padding_kwargs=padding_kwargs) return (right - left) / dx else: raise ValueError('Invalid difference type: {}. Can be CENTRAL or FORWARD'.format(difference))Calculates the spatial_gradient of a scalar channel from finite differences. The spatial_gradient vectors are in reverse order, lowest dimension first.
Args
grid- grid values
dims- (Optional) Dimensions along which the spatial derivative will be computed. sequence of dimension names
dx- Physical distance between grid points,
floatorTensor. When passing a vector-valuedTensor, the dx values should be listed alongstack_dim, matchingdims. difference- type of difference, one of ('forward', 'backward', 'central') (default 'forward')
padding- Padding mode.
Must be one of the following:
Extrapolation,Tensoror number for constant extrapolation, name of extrapolation asstr. stack_dim- name of the new vector dimension listing the spatial_gradient w.r.t. the various axes
pad- How many cells to extend the result compared to
grid. This value is added to the internal padding. For non-trivial extrapolations, this gives the correct result while manual padding before or after this operation would not respect the boundary locations. padding_kwargs- Additional keyword arguments to be passed to
pad().
Returns
def sqrt(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def sqrt(x: TensorOrTree) -> TensorOrTree: """ Computes *sqrt(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.sqrt, sqrt)Computes sqrt(x) of the
TensororPhiTreeNodex. def squared_norm(vec: phiml.math._tensors.Tensor,
vec_dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function channel>)-
Expand source code
def squared_norm(vec: Tensor, vec_dim: DimFilter = channel): """ Computes the squared norm of `vec`. If `vec_dim` is None, the combined channel dimensions of `vec` are interpreted as a vector. """ return math.sum_(vec ** 2, dim=vec_dim) def squeeze(x: ~PhiTreeNodeType,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None) ‑> ~PhiTreeNodeType-
Expand source code
def squeeze(x: PhiTreeNodeType, dims: DimFilter) -> PhiTreeNodeType: """ Remove specific singleton (volume=1) dims from `x`. Args: x: Tensor or composite type / tree. dims: Singleton dims to remove. Returns: Same type as `x`. """ dims = shape(x).only(dims) if not dims: return x assert dims.volume == 1, f"Cannot squeeze non-singleton dims {dims} from {x}" return x[{d: 0 for d in dims.names}]Remove specific singleton (volume=1) dims from
x.Args
x- Tensor or composite type / tree.
dims- Singleton dims to remove.
Returns
Same type as
x. def srange(start: int = 0, **stop: int) ‑> phiml.math._tensors.Tensor[int]-
Expand source code
def srange(start: int = 0, **stop: int) -> Tensor[int]: """ Construct a range `Tensor` along one spatial dim. """ assert len(stop) == 1, f"srange() requires exactly one stop dimension but got {stop}" return arange(spatial(next(iter(stop))), start, next(iter(stop.values())))Construct a range
Tensoralong one spatial dim. def ssize(obj) ‑> int | None-
Expand source code
def ssize(obj) -> Optional[int]: """ Returns the total number of elements listed along spatial dims of an object, equal to the product of the sizes of all spatial dims. Args: obj: `Shape` or object with a valid `shape` property. Returns: Size as `int`. If `obj` is an undefined `Shape`, returns `None`. """ return spatial(obj).volume def stack(values: Sequence[~PhiTreeNodeType] | Dict[str, ~PhiTreeNodeType],
dim: str | phiml.math._shape.Shape,
expand_values=False,
simplify=False,
layout_non_matching=False,
allow_varying_labels=True,
**kwargs) ‑> ~PhiTreeNodeType-
Expand source code
def stack(values: Union[Sequence[PhiTreeNodeType], Dict[str, PhiTreeNodeType]], dim: Union[Shape, str], expand_values=False, simplify=False, layout_non_matching=False, allow_varying_labels=True, **kwargs) -> PhiTreeNodeType: """ Stacks `values` along the new dimension `dim`. All values must have the same spatial, instance and channel dimensions. If the dimension sizes vary, the resulting tensor will be non-uniform. Batch dims will be added as needed. Stacking tensors is performed lazily, i.e. the memory is allocated only when needed. This makes repeated stacking and slicing along the same dimension very efficient, i.e. jit-compiled functions will not perform these operations. Args: values: Collection of `phiml.math.magic.Shapable`, such as `phiml.math.Tensor` If a `dict`, keys must be of type `str` and are used as labels along `dim`. dim: `Shape` with a least one dimension. None of these dims can be present with any of the `values`. If `dim` is a single-dimension shape, its size is determined from `len(values)` and can be left undefined (`None`). If `dim` is a multi-dimension shape, its volume must be equal to `len(values)`. expand_values: If `True`, will first add missing dims to all values, not just batch dimensions. This allows tensors with different dims to be stacked. The resulting tensor will have all dims that are present in `values`. If `False`, this may return a non-numeric object instead. simplify: If `True` and all values are equal, returns one value without adding the dimension. layout_non_matching: If non-matching values should be stacked using a Layout object, i.e. should be put into a named list instead. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dims to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments. Returns: `Tensor` containing `values` stacked along `dim`. Examples: >>> stack({'x': 0, 'y': 1}, channel('vector')) (x=0, y=1) >>> stack([math.zeros(batch(b=2)), math.ones(batch(b=2))], channel(c='x,y')) (x=0.000, y=1.000); (x=0.000, y=1.000) (bᵇ=2, cᶜ=x,y) >>> stack([vec(x=1, y=0), vec(x=2, y=3.)], batch('b')) (x=1.000, y=0.000); (x=2.000, y=3.000) (bᵇ=2, vectorᶜ=x,y) """ assert len(values) > 0, f"stack() got empty sequence {values}" if simplify and len(values) == 1: return next(iter(values.values())) if isinstance(values, dict) else values[0] if not dim: assert len(values) == 1, f"Only one element can be passed as `values` if no dim is passed but got {values}" return next(iter(values.values())) if isinstance(values, dict) else values[0] if not isinstance(dim, SHAPE_TYPES): dim = auto(dim) values_ = tuple(values.values()) if isinstance(values, dict) else values if simplify: if all(v is None for v in values_): return None if all(type(v) == type(values_[0]) for v in values_[1:]): from ._tensors import equality_by_shape_and_value with equality_by_shape_and_value(equal_nan=True): if all(v == values_[0] for v in values_[1:]): return values_[0] shapes = [shape(v) for v in values_] if not expand_values: v0_dims = set(shapes[0].non_batch.names) for s in shapes[1:]: if set(s.non_batch.names) != v0_dims: # shapes don't match if layout_non_matching: from ._tensors import layout return layout(values, dim) raise ValueError(f"Non-batch dims must match but got: {v0_dims} and {s.non_batch.names}. Manually expand tensors or set expand_values=True") # --- Add missing dims --- if expand_values: all_dims = merge_shapes(*shapes, allow_varying_sizes=True) if isinstance(values, dict): values = {k: expand(v, all_dims.with_sizes(s)) for (k, v), s in zip(values.items(), shapes)} else: values = [expand(v, all_dims.with_sizes(s)) for v, s in zip(values, shapes)] else: all_batch_dims = merge_shapes(*[s.batch for s in shapes], allow_varying_sizes=True) if isinstance(values, dict): values = {k: expand(v, all_batch_dims - s) for (k, v), s in zip(values.items(), shapes)} else: values = [expand(v, all_batch_dims - s) for v, s in zip(values, shapes)] if dim.rank == 1: assert dim.size == len(values) or dim.size is None, f"stack dim size must match len(values) or be undefined but got {dim} for {len(values)} values" if dim.size is None: dim = dim.with_size(len(values)) if isinstance(values, dict): dim_labels = tuple([k.name if isinstance(k, SHAPE_TYPES) else k for k in values.keys()]) assert all(isinstance(k, str) for k in dim_labels), f"dict keys must be of type str but got {dim_labels}" values = tuple(values.values()) dim = dim.with_size(dim_labels) # --- First try __stack__ --- for v in values: if hasattr(v, '__stack__'): result = v.__stack__(values, dim, allow_varying_labels=allow_varying_labels, **kwargs) if result is not NotImplemented: if DEBUG_CHECKS: assert isinstance(result, SHAPE_TYPES) if isinstance(v, SHAPE_TYPES) else isinstance(result, Shapable), "__stack__ must return a Shapable object" return result # --- Next: try stacking attributes for tree nodes --- from ._tensors import Tensor if any(dataclasses.is_dataclass(v) and not isinstance(v, Tensor) for v in values): from ..dataclasses._merge import dc_stack try: return dc_stack(values, dim, expand_values=expand_values, simplify=simplify, layout_non_matching=layout_non_matching, **kwargs) except NotCompatible as err: if layout_non_matching: from ._tensors import layout return layout(values, dim) raise err if all(isinstance(v, dict) for v in values): keys = set(values[0]) if all(set(v) == keys for v in values[1:]): new_dict = {} for k in keys: k_values = [v[k] for v in values] new_dict[k] = stack(k_values, dim, expand_values=expand_values, simplify=simplify, **kwargs) return new_dict raise NotImplementedError if any(isinstance(v, (tuple, list, dict)) for v in values_): from ._tensors import wrap, layout if _is_data_array(values_): tensors = [wrap(v) for v in values_] return stack(tensors, dim) elif all(isinstance(v, (tuple, list, dict)) for v in values_) and _contains_tensor(values_): if all(isinstance(v, (tuple, list)) for v in values_): return [stack([v[i] for v in values_], dim, expand_values=expand_values, simplify=simplify, layout_non_matching=layout_non_matching, **kwargs) for i in range(len(values_[0]))] # the case of dicts is handled above else: assert len(dim) == 1, f"Cannot stack values with nested tuples, lists or dicts along multiple dimensions {dim}" return layout(values_, dim) if all(isinstance(v, PhiTreeNode) for v in values): attributes = all_attributes(values[0]) if attributes and all(all_attributes(v) == attributes for v in values): new_attrs = {} for a in attributes: a_values = [getattr(v, a) for v in values] if all(v is a_values[0] for v in a_values[1:]): new_attrs[a] = expand(a_values[0], dim, **kwargs) if a_values[0] is not None else a_values[0] else: new_attrs[a] = stack(a_values, dim, expand_values=expand_values, simplify=simplify, **kwargs) return copy_with(values[0], **new_attrs) else: warnings.warn(f"Failed to concat values using value attributes because attributes differ among values {values}") # --- Fallback: use expand and concat --- for v in values: if not hasattr(v, '__stack__') and hasattr(v, '__concat__') and hasattr(v, '__expand__'): expanded_values = tuple([expand(v, dim.with_size(1 if dim.labels[0] is None else dim.labels[0][i]), **kwargs) for i, v in enumerate(values)]) if len(expanded_values) > 8: warnings.warn(f"stack() default implementation is slow on large dims ({dim.name}={len(expanded_values)}). Please implement __stack__()", RuntimeWarning, stacklevel=2) result = v.__concat__(expanded_values, dim.name, **kwargs) if result is not NotImplemented: assert isinstance(result, Shapable), "__concat__ must return a Shapable object" return result # --- else maybe all values are native scalars --- from ._tensors import wrap try: values = tuple([wrap(v) for v in values]) except ValueError: raise MagicNotImplemented(f"At least one item in values must be Shapable but got types {[type(v) for v in values]}") return values[0].__stack__(values, dim, **kwargs) else: # multi-dim stack assert dim.volume == len(values), f"When passing multiple stack dims, their volume must equal len(values) but got {dim} for {len(values)} values" if isinstance(values, dict): warnings.warn(f"When stacking a dict along multiple dimensions, the key names are discarded. Got keys {tuple(values.keys())}", RuntimeWarning, stacklevel=2) values = tuple(values.values()) # --- if any value implements Shapable, use stack and unpack_dim --- for v in values: if hasattr(v, '__stack__') and hasattr(v, '__unpack_dim__'): stack_dim = batch('_stack') stacked = v.__stack__(values, stack_dim, **kwargs) if stacked is not NotImplemented: assert isinstance(stacked, Shapable), "__stack__ must return a Shapable object" assert hasattr(stacked, '__unpack_dim__'), "If a value supports __unpack_dim__, the result of __stack__ must also support it." reshaped = stacked.__unpack_dim__(stack_dim.name, dim, **kwargs) if reshaped is NotImplemented: warnings.warn("__unpack_dim__ is overridden but returned NotImplemented during multi-dimensional stack. This results in unnecessary stack operations.", RuntimeWarning, stacklevel=2) else: return reshaped # --- Fallback: multi-level stack --- for dim_ in reversed(dim): values = [stack(values[i:i + dim_.size], dim_, **kwargs) for i in range(0, len(values), dim_.size)] return values[0]Stacks
valuesalong the new dimensiondim. All values must have the same spatial, instance and channel dimensions. If the dimension sizes vary, the resulting tensor will be non-uniform. Batch dims will be added as needed.Stacking tensors is performed lazily, i.e. the memory is allocated only when needed. This makes repeated stacking and slicing along the same dimension very efficient, i.e. jit-compiled functions will not perform these operations.
Args
values- Collection of
Shapable, such asTensorIf adict, keys must be of typestrand are used as labels alongdim. dimShapewith a least one dimension. None of these dims can be present with any of thevalues. Ifdimis a single-dimension shape, its size is determined fromlen(values)and can be left undefined (None). Ifdimis a multi-dimension shape, its volume must be equal tolen(values).expand_values- If
True, will first add missing dims to all values, not just batch dimensions. This allows tensors with different dims to be stacked. The resulting tensor will have all dims that are present invalues. IfFalse, this may return a non-numeric object instead. simplify- If
Trueand all values are equal, returns one value without adding the dimension. layout_non_matching- If non-matching values should be stacked using a Layout object, i.e. should be put into a named list instead.
**kwargs- Additional keyword arguments required by specific implementations.
Adding spatial dims to fields requires the
bounds: Boxargument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments.
Returns
Tensorcontainingvaluesstacked alongdim.Examples
>>> stack({'x': 0, 'y': 1}, channel('vector')) (x=0, y=1)>>> stack([math.zeros(batch(b=2)), math.ones(batch(b=2))], channel(c='x,y')) (x=0.000, y=1.000); (x=0.000, y=1.000) (bᵇ=2, cᶜ=x,y)>>> stack([vec(x=1, y=0), vec(x=2, y=3.)], batch('b')) (x=1.000, y=0.000); (x=2.000, y=3.000) (bᵇ=2, vectorᶜ=x,y) def std(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>) ‑> phiml.math._tensors.Tensor-
Expand source code
def std(value, dim: DimFilter = non_batch) -> Tensor: """ Computes the standard deviation over `values` along the specified dimensions. *Warning*: The standard deviation of non-uniform tensors along the stack dimension is undefined. Args: value: `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ if not dim: warnings.warn("std along empty shape returns 0", RuntimeWarning, stacklevel=2) return zeros_like(value) return reduce_(_std, value, dim)Computes the standard deviation over
valuesalong the specified dimensions.Warning: The standard deviation of non-uniform tensors along the stack dimension is undefined.
Args
valueTensororlist/tupleof Tensors.dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
Returns
Tensorwithout the reduced dimensions. def stop_gradient(x)-
Expand source code
def stop_gradient(x): """ Disables gradients for the given tensor. This may switch off the gradients for `x` itself or create a copy of `x` with disabled gradients. Implementations: * PyTorch: [`x.detach()`](https://pytorch.org/docs/stable/autograd.html#torch.Tensor.detach) * TensorFlow: [`tf.stop_gradient`](https://www.tensorflow.org/api_docs/python/tf/stop_gradient) * Jax: [`jax.lax.stop_gradient`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.stop_gradient.html) Args: x: `Tensor` or `phiml.math.magic.PhiTreeNode` for which gradients should be disabled. Returns: Copy of `x`. """ if isinstance(x, SHAPE_TYPES): return x return _backend_op1(x, Backend.stop_gradient, stop_gradient, attr_type=variable_attributes)Disables gradients for the given tensor. This may switch off the gradients for
xitself or create a copy ofxwith disabled gradients.Implementations:
- PyTorch:
x.detach() - TensorFlow:
tf.stop_gradient - Jax:
jax.lax.stop_gradient
Args
xTensororPhiTreeNodefor which gradients should be disabled.
Returns
Copy of
x. - PyTorch:
def stored_indices(x: phiml.math._tensors.Tensor,
list_dim=(entriesⁱ),
index_dim=(indexᶜ),
invalid='discard') ‑> phiml.math._tensors.Tensor-
Expand source code
def stored_indices(x: Tensor, list_dim=instance('entries'), index_dim=channel('index'), invalid='discard') -> Tensor: """ Returns the indices of the stored values for a given `Tensor``. For sparse tensors, this will return the stored indices tensor. For collapsed tensors, only the stored dimensions will be returned. Args: x: `Tensor` list_dim: Dimension along which stored indices should be laid out. invalid: One of `'discard'`, `'clamp'`, `'keep'` Filter result by valid indices. Internally, invalid indices may be stored for performance reasons. Returns: `Tensor` representing all indices of stored values. """ assert invalid in ['discard', 'clamp', 'keep'], f"invalid handling must be one of 'discard', 'clamp', 'keep' but got {invalid}" if isinstance(x, Dense): from ._ops import meshgrid if batch(x): raise NotImplementedError indices = meshgrid(x._shape[x._names].non_batch.non_channel, stack_dim=index_dim) return pack_dims(indices, non_channel, list_dim) if isinstance(x, TensorStack): if x.is_cached or not x.requires_broadcast: return stored_indices(x._cached()) if x._stack_dim.batch_rank: return stack([stored_indices(t, list_dim, index_dim, invalid) for t in x._tensors], x._stack_dim) raise NotImplementedError # ToDo add index for stack dim elif isinstance(x, CompressedSparseMatrix): return rename_dims(x._coo_indices(invalid, stack_dim=index_dim), instance, list_dim) elif isinstance(x, CompactSparseTensor): # col = pack_dims(x._indices, x._compressed_dims + x._uncompressed_dims, list_dim) x = to_format(x, 'coo') if isinstance(x, SparseCoordinateTensor): if x._can_contain_double_entries: warnings.warn(f"stored_values of sparse tensor {x.shape} may contain multiple values for the same position.") new_index_dim = index_dim.with_size(channel(x._indices).labels[0]) return rename_dims(x._indices, [instance(x._indices).name, channel(x._indices).name], [list_dim, new_index_dim]) raise ValueError(x)Returns the indices of the stored values for a given `Tensor``. For sparse tensors, this will return the stored indices tensor. For collapsed tensors, only the stored dimensions will be returned.
Args
xTensorlist_dim- Dimension along which stored indices should be laid out.
invalid- One of
'discard','clamp','keep'Filter result by valid indices. Internally, invalid indices may be stored for performance reasons.
Returns
Tensorrepresenting all indices of stored values. def stored_values(x: phiml.math._tensors.Tensor, list_dim=(entriesⁱ), invalid='discard') ‑> phiml.math._tensors.Tensor-
Expand source code
def stored_values(x: Tensor, list_dim=instance('entries'), invalid='discard') -> Tensor: """ Returns the stored values for a given `Tensor``. For sparse tensors, this will return only the stored entries. Dense tensors are reshaped so that all non-batch dimensions are packed into `list_dim`. Batch dimensions are preserved. Args: x: `Tensor` list_dim: Dimension along which stored values should be laid out. invalid: One of `'discard'`, `'clamp'`, `'keep'` Filter result by valid indices. Internally, invalid indices may be stored for performance reasons. Returns: `Tensor` representing all values stored to represent `x`. """ assert invalid in ['discard', 'clamp', 'keep'], f"invalid handling must be one of 'discard', 'clamp', 'keep' but got {invalid}" if isinstance(x, Dense): x = Dense(x._native, x._names, x._shape[x._names], x._backend) entries_dims = x.shape.non_batch return pack_dims(x, entries_dims, list_dim) if isinstance(x, TensorStack): if x.is_cached: return stored_values(x._cached()) return stack([stored_values(t, list_dim) for t in x._tensors], x._stack_dim) elif isinstance(x, CompressedSparseMatrix): if invalid in ['keep', 'clamp']: return rename_dims(x._values, instance, list_dim) else: x = x.decompress() # or apply slices, then return values elif isinstance(x, CompactSparseTensor): x = to_format(x, 'coo') if isinstance(x, SparseCoordinateTensor): if x._can_contain_double_entries: warnings.warn(f"stored_values of sparse tensor {x.shape} may contain multiple values for the same position.") return rename_dims(x._values, instance, list_dim) raise ValueError(type(x))Returns the stored values for a given `Tensor``.
For sparse tensors, this will return only the stored entries.
Dense tensors are reshaped so that all non-batch dimensions are packed into
list_dim. Batch dimensions are preserved.Args
xTensorlist_dim- Dimension along which stored values should be laid out.
invalid- One of
'discard','clamp','keep'Filter result by valid indices. Internally, invalid indices may be stored for performance reasons.
Returns
Tensorrepresenting all values stored to representx. def sum(value: ~TensorOrTree,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function non_batch>) ‑> ~TensorOrTree-
Expand source code
def sum_(value: TensorOrTree, dim: DimFilter = non_batch) -> TensorOrTree: """ Sums `values` along the specified dimensions. Args: value: (Sparse) `Tensor` or `list` / `tuple` of Tensors. dim: Dimension or dimensions to be reduced. One of * `None` to reduce all non-batch dimensions * `str` containing single dimension or comma-separated list of dimensions * `Tuple[str]` or `List[str]` * `Shape` * `batch`, `instance`, `spatial`, `channel` to select dimensions by type * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors Returns: `Tensor` without the reduced dimensions. """ return reduce_(_sum, bool_to_int(value), dim, require_all_dims_present=True)Sums
valuesalong the specified dimensions.Args
value- (Sparse)
Tensororlist/tupleof Tensors. dim-
Dimension or dimensions to be reduced. One of
Noneto reduce all non-batch dimensionsstrcontaining single dimension or comma-separated list of dimensionsTuple[str]orList[str]Shapebatch(),instance(),spatial(),channel()to select dimensions by type'0'whenisinstance(value, (tuple, list))to add up the sequence of Tensors
Returns
Tensorwithout the reduced dimensions. def svd(x: phiml.math._tensors.Tensor,
feature_dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function channel>,
list_dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = None,
latent_dim=(singularᶜ),
full_matrices=False)-
Expand source code
def svd(x: Tensor, feature_dim: DimFilter = channel, list_dim: DimFilter = None, latent_dim=channel('singular'), full_matrices=False): """ Singular value decomposition. The original matrix is approximated by `(latent_to_value * singular.T) @ latents` or `latent_to_value @ (singular * latents)`. **Warning:** Even for well-defined SVDs, different backend use different sign conventions, causing results to differ. Args: x: Matrix containing `feature_dim` and `list_dim`. feature_dim: Dimensions that list the features (columns). list_dim: Dimensions that list the data points (rows). latent_dim: Latent dimension. If a size is specified, truncates the SVD to this size. full_matrices: If `True`, return full-sized (square) matrices for latent_by_example and latent_to_value. These may not match the singular values. Returns: latents: Latent vectors of each item listed. `Tensor` with `list_dim` and `latent_dim`. singular: List of singular values. `Tensor` with `latent_dim`. features: Stacked normalized features / trends. This matrix can be used to compute the original value from a latent vector. `Tensor` with `latent_dim` and `feature_dim`. """ feature_dim = x.shape.only(feature_dim) if list_dim is not None: list_dim = x.shape.only(list_dim) else: if non_batch(x) - feature_dim: list_dim = non_batch(x) - feature_dim else: list_dim = x.shape - feature_dim assert feature_dim, f"No valid feature dim specified: {feature_dim} for data {x}" assert list_dim, f"No valid list dim specified: {list_dim} for data {x}" batch_dims = x.shape - feature_dim - list_dim latent_dim = auto(latent_dim, channel) if isinstance(latent_dim, str) else latent_dim native = x._reshaped_native([batch_dims, list_dim, feature_dim]) u, s, v = x.backend.svd(native, full_matrices=full_matrices) truncate = latent_dim.size if truncate is not None: if s.shape[1] < truncate: warnings.warn(f"Trying to truncate SVD but there are too few values: {s.shape[1]} < {truncate}") u = u[:, :, :truncate] s = s[:, :truncate] v = v[:, :truncate, :] latent_by_example = reshaped_tensor(u, [batch_dims, list_dim, latent_dim]) singular_values = reshaped_tensor(s, [batch_dims, latent_dim]) latent_to_value = reshaped_tensor(v, [batch_dims, latent_dim.as_dual(), feature_dim]) return latent_by_example, singular_values, latent_to_valueSingular value decomposition.
The original matrix is approximated by
(latent_to_value * singular.T) @ latentsorlatent_to_value @ (singular * latents).Warning: Even for well-defined SVDs, different backend use different sign conventions, causing results to differ.
Args
x- Matrix containing
feature_dimandlist_dim. feature_dim- Dimensions that list the features (columns).
list_dim- Dimensions that list the data points (rows).
latent_dim- Latent dimension. If a size is specified, truncates the SVD to this size.
full_matrices- If
True, return full-sized (square) matrices for latent_by_example and latent_to_value. These may not match the singular values.
Returns
latents- Latent vectors of each item listed.
Tensorwithlist_dimandlatent_dim. singular- List of singular values.
Tensorwithlatent_dim. features- Stacked normalized features / trends. This matrix can be used to compute the original value from a latent vector.
Tensorwithlatent_dimandfeature_dim.
def swap_axes(x, axes)-
Expand source code
def swap_axes(x, axes): """ Swap the dimension order of `x`. This operation is generally not necessary for `Tensor`s because tensors will be reshaped under the hood or when getting the native/numpy representations. It can be used to transpose native tensors. Implementations: * NumPy: [`numpy.transpose`](https://numpy.org/doc/stable/reference/generated/numpy.transpose.html) * PyTorch: [`x.permute`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.permute) * TensorFlow: [`tf.transpose`](https://www.tensorflow.org/api_docs/python/tf/transpose) * Jax: [`jax.numpy.transpose`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.transpose.html) Args: x: `Tensor` or native tensor or `phiml.math.magic.Shapable`. axes: `tuple` or `list` Returns: `Tensor` or native tensor, depending on `x`. """ if isinstance(x, Tensor): if isinstance(axes, (tuple, list)) and all(isinstance(a, int) for a in axes): axes = [x.shape.names[a] for a in axes] if x.shape[axes].names == x.shape.only(axes).names: # order is correct return x new_shape = x.shape[axes] packed = x.__pack_dims__(new_shape, instance('_t_flat'), None) return unpack_dim(packed, '_t_flat', new_shape) else: return choose_backend(x).transpose(x, axes)Swap the dimension order of
x. This operation is generally not necessary forTensors because tensors will be reshaped under the hood or when getting the native/numpy representations. It can be used to transpose native tensors.Implementations:
- NumPy:
numpy.transpose - PyTorch:
x.permute - TensorFlow:
tf.transpose - Jax:
jax.numpy.transpose
Args
Returns
Tensoror native tensor, depending onx. - NumPy:
def tan(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def tan(x: TensorOrTree) -> TensorOrTree: """ Computes *tan(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.tan, tan)Computes tan(x) of the
TensororPhiTreeNodex. def tanh(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def tanh(x: TensorOrTree) -> TensorOrTree: """ Computes *tanh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """ return _backend_op1(x, Backend.tanh, tanh)Computes tanh(x) of the
TensororPhiTreeNodex. def tcat(values: Sequence[~PhiTreeNodeType],
dim_type: Callable,
expand_values=False,
default_name='tcat') ‑> ~PhiTreeNodeType-
Expand source code
def tcat(values: Sequence[PhiTreeNodeType], dim_type: Callable, expand_values=False, default_name='tcat') -> PhiTreeNodeType: """ Concatenate values by dim type. This function first packs all dims of `dim_type` into one dim, then concatenates all `values`. Values that do not have a dim of `dim_type` are considered a size-1 slice. The name of the first matching dim of `dim_type` is used as the concatenated output dim name. If no value has a matching dim, `default_name` is used instead. Args: values: Values to be concatenated. dim_type: Dimension type along which to concatenate. expand_values: Whether to add missing other non-batch dims to values as needed. default_name: Concatenation dim name if none of the values have a matching dim. Returns: Same type as any value. """ dims = [dim_type(v) for v in values] present_names = tuple(set(sum([s.names for s in dims], ()))) if len(present_names) == 1: dim_name = present_names[0] elif len(present_names) > 1: if default_name in present_names: dim_name = default_name else: dim_name = present_names[0] else: dim_name = default_name single = dim_type(**{dim_name: 1}) flat_values = [pack_dims(v, dim_type, dim_type(dim_name)) if s else expand(v, single) for v, s in zip(values, dims)] return concat(flat_values, dim_name, expand_values=expand_values)Concatenate values by dim type. This function first packs all dims of
dim_typeinto one dim, then concatenates allvalues. Values that do not have a dim ofdim_typeare considered a size-1 slice.The name of the first matching dim of
dim_typeis used as the concatenated output dim name. If no value has a matching dim,default_nameis used instead.Args
values- Values to be concatenated.
dim_type- Dimension type along which to concatenate.
expand_values- Whether to add missing other non-batch dims to values as needed.
default_name- Concatenation dim name if none of the values have a matching dim.
Returns
Same type as any value.
def tensor(data: Sequence[~T] | ~T,
*shape: phiml.math._shape.Shape | str | list,
convert: bool = True,
default_list_dim=(vectorᶜ)) ‑> phiml.math._tensors.Tensor[~T]-
Expand source code
def tensor(data: Union[Sequence[T], T], *shape: Union[Shape, str, list], convert: bool = True, default_list_dim=channel('vector')) -> Tensor[T]: # TODO assume convert_unsupported, add convert_external=False for constants """ Create a Tensor from the specified `data`. If `convert=True`, converts `data` to the preferred format of the default backend. `data` must be one of the following: * Number: returns a dimensionless Tensor. * Native tensor such as NumPy array, TensorFlow tensor or PyTorch tensor. * `tuple` or `list` of numbers: backs the Tensor with native tensor. * `tuple` or `list` of non-numbers: creates tensors for the items and stacks them. * Tensor: renames dimensions and dimension types if `names` is specified. Converts all internal native values of the tensor if `convert=True`. * Shape: creates a 1D tensor listing the dimension sizes. While specifying `names` is optional in some cases, it is recommended to always specify them. Dimension types are always inferred from the dimension names if specified. Implementations: * NumPy: [`numpy.array`](https://numpy.org/doc/stable/reference/generated/numpy.array.html) * PyTorch: [`torch.tensor`](https://pytorch.org/docs/stable/generated/torch.tensor.html), [`torch.from_numpy`](https://pytorch.org/docs/stable/generated/torch.from_numpy.html) * TensorFlow: [`tf.convert_to_tensor`](https://www.tensorflow.org/api_docs/python/tf/convert_to_tensor) * Jax: [`jax.numpy.array`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.array.html) See Also: `phiml.math.wrap()` which uses `convert=False`, `layout()`. Args: data: native tensor, sparse COO / CSR / CSC matrix, scalar, sequence, `Shape` or `Tensor` shape: Ordered dimensions and types. If sizes are defined, they will be checked against `data`.` When passing multiple shapes, they will be concatenated. Duplicate names are not allowed. Instead of `Shape` instances, you may pass strings specifying dims in the format `name:t` or `name:t=(labels)` where `t` refers to the type letter, one of s,i,c,d,b. Alternatively, you can pass a single `list` of shapes which will call `reshaped_tensor`. This allows for unpacking native dims into multiple dims. convert: If True, converts the data to the native format of the current default backend. If False, wraps the data in a `Tensor` but keeps the given data reference if possible. Raises: AssertionError: if dimension names are not provided and cannot automatically be inferred ValueError: if `data` is not tensor-like Returns: Tensor containing same values as data Examples: >>> tensor([1, 2, 3], channel(vector='x,y,z')) (x=1, y=2, z=3) >>> tensor([1., 2, 3], channel(vector='x,y,z')) (x=1.000, y=2.000, z=3.000) float64 >>> tensor(numpy.zeros([10, 8, 6, 2]), batch('batch'), spatial('x,y'), channel(vector='x,y')) (batchᵇ=10, xˢ=8, yˢ=6, vectorᶜ=x,y) float64 const 0.0 >>> tensor([(0, 1), (0, 2), (1, 3)], instance('particles'), channel(vector='x,y')) (x=0, y=1); (x=0, y=2); (x=1, y=3) (particlesⁱ=3, vectorᶜ=x,y) >>> tensor(numpy.random.randn(10)) (vectorᶜ=10) float64 -0.128 ± 1.197 (-2e+00...2e+00) """ if len(shape) == 1 and isinstance(shape[0], list): return reshaped_tensor(data, shape[0], convert=convert, check_sizes=True) shape = [parse_shape_spec(s) if isinstance(s, str) else s for s in shape] shape = None if len(shape) == 0 else concat_shapes_(*shape) if isinstance(data, SHAPE_TYPES): if shape is None: shape = channel('dims') shape = shape.with_size(data.names) data = data.sizes elif not shape: assert data.rank == 1, f"When wrapping a Shape as a scalar tensor, it must be a rank-1 shape but got {data}" data = data.size else: assert shape.rank == 1, "Can only convert 1D shapes to Tensors" shape = shape.with_size(data.names) data = data.sizes if isinstance(data, Tensor): if convert: backend = data.backend if backend != default_backend(): data = data._from_spec_and_natives(data._spec_dict(), [convert_(n, use_dlpack=False) for n in data._natives()]) if shape is None: return data else: if None in shape.sizes: shape = shape.with_sizes(data.shape.sizes) return data._with_shape_replaced(shape) from ._tree import layout if isinstance(data, str) or data is None: return layout(data) elif isinstance(data, (Number, bool)): assert not shape, f"Trying to create a zero-dimensional Tensor from value '{data}' but shape={shape}" if convert: data = default_backend().as_tensor(data, convert_external=True) return Dense(data, (), EMPTY_SHAPE, default_backend() if convert else NUMPY) if isinstance(data, (tuple, list)): if all(isinstance(d, (bool, int, float, complex, np.generic)) for d in data): data = np.array(data) assert data.dtype != object data = NUMPY.auto_cast1(data) elif all(isinstance(d, str) for d in data): return layout(data, shape or default_list_dim) else: try: inner_shape = [] if shape is None else [shape[1:]] tensors = [d if isinstance(d, Tensor) else tensor(d, *inner_shape, convert=convert) for d in data] sh = merge_shapes(*tensors) from ._magic_ops import expand, stack tensors = [expand(t, sh) for t in tensors] return stack(tensors, default_list_dim if shape is None else shape[0].with_sizes([len(tensors)])) except IncompatibleShapes: assert not convert, f"Cannot convert {data} to tensor given shape {shape}" return layout(data, shape or default_list_dim) except ValueError: assert not convert, f"Cannot convert {data} to tensor" return layout(data, shape or default_list_dim) try: backend = choose_backend(data) sizes = backend.staticshape(data) if backend.is_sparse(data): from ._sparse import from_sparse_native return from_sparse_native(data, shape, indices_constant=backend == NUMPY, convert=convert) if shape is None: assert backend.ndims(data) <= 1, "Specify dimension names for tensors with more than 1 dimension" shape = default_list_dim if backend.ndims(data) == 1 else EMPTY_SHAPE shape = shape.with_sizes(sizes) elif 0 not in sizes: # fill in sizes or check them if len(sizes) != len(shape): raise IncompatibleShapes(f"Rank of given shape {shape} does not match data with sizes {sizes}") for size, s in zip(sizes, shape.sizes): if s is not None: assert s == size, f"Given shape {shape} does not match data with sizes {sizes}. Consider leaving the sizes undefined." shape = shape.with_sizes(sizes, keep_labels=True) if convert: data = convert_(data, use_dlpack=False) backend = default_backend() if 0 in sizes: present_shape = shape[:len(sizes)].with_sizes(sizes) return Dense(data, present_shape.names, shape.with_sizes(shape.undefined.with_sizes(0)).with_sizes(present_shape), backend) return Dense(data, shape.names, shape, backend) except NoBackendFound: raise ValueError(f"{type(data)} is not supported. Only (Tensor, tuple, list, np.ndarray, native tensors) are allowed.\nCurrent backends: {BACKENDS}")Create a Tensor from the specified
data. Ifconvert=True, convertsdatato the preferred format of the default backend.datamust be one of the following:- Number: returns a dimensionless Tensor.
- Native tensor such as NumPy array, TensorFlow tensor or PyTorch tensor.
tupleorlistof numbers: backs the Tensor with native tensor.tupleorlistof non-numbers: creates tensors for the items and stacks them.- Tensor: renames dimensions and dimension types if
namesis specified. Converts all internal native values of the tensor ifconvert=True. - Shape: creates a 1D tensor listing the dimension sizes.
While specifying
namesis optional in some cases, it is recommended to always specify them.Dimension types are always inferred from the dimension names if specified.
Implementations:
- NumPy:
numpy.array - PyTorch:
torch.tensor,torch.from_numpy - TensorFlow:
tf.convert_to_tensor - Jax:
jax.numpy.array
See Also:
wrap()which usesconvert=False,layout().Args
data- native tensor, sparse COO / CSR / CSC matrix, scalar, sequence,
ShapeorTensor shape- Ordered dimensions and types. If sizes are defined, they will be checked against
data.When passing multiple shapes, they will be concatenated. Duplicate names are not allowed. Instead of <code><a title="phiml.math.Shape" href="#phiml.math.Shape">Shape</a></code> instances, you may pass strings specifying dims in the formatname:torname:t=(labels)` wheretrefers to the type letter, one of s,i,c,d,b. Alternatively, you can pass a singlelistof shapes which will callreshaped_tensor(). This allows for unpacking native dims into multiple dims. convert- If True, converts the data to the native format of the current default backend.
If False, wraps the data in a
Tensorbut keeps the given data reference if possible.
Raises
AssertionError- if dimension names are not provided and cannot automatically be inferred
ValueError- if
datais not tensor-like
Returns
Tensor containing same values as data
Examples
>>> tensor([1, 2, 3], channel(vector='x,y,z')) (x=1, y=2, z=3)>>> tensor([1., 2, 3], channel(vector='x,y,z')) (x=1.000, y=2.000, z=3.000) float64>>> tensor(numpy.zeros([10, 8, 6, 2]), batch('batch'), spatial('x,y'), channel(vector='x,y')) (batchᵇ=10, xˢ=8, yˢ=6, vectorᶜ=x,y) float64 const 0.0>>> tensor([(0, 1), (0, 2), (1, 3)], instance('particles'), channel(vector='x,y')) (x=0, y=1); (x=0, y=2); (x=1, y=3) (particlesⁱ=3, vectorᶜ=x,y)>>> tensor(numpy.random.randn(10)) (vectorᶜ=10) float64 -0.128 ± 1.197 (-2e+00...2e+00) def tensor_like(existing_tensor: phiml.math._tensors.Tensor,
values: numbers.Number | phiml.math._tensors.Tensor | bool,
value_order: str = None)-
Expand source code
def tensor_like(existing_tensor: Tensor, values: Union[Tensor, Number, bool], value_order: str = None): """ Creates a tensor with the same format and shape as `existing_tensor`. Args: existing_tensor: Any `Tensor`, sparse or dense. values: New values to replace the existing values by. If `existing_tensor` is sparse, `values` must broadcast to the instance dimension listing the stored indices. value_order: Order of `values` compared to `existing_tensor`, only relevant if `existing_tensor` is sparse. If `'original'`, the values are ordered like the values that was used to create the first tensor with this sparsity pattern. If `'as existing'`, the values match the current order of `existing_tensor`. Note that the order of values may be changed upon creating a sparse tensor. Returns: `Tensor` """ assert value_order in ['original', 'as existing', None] if isinstance(existing_tensor, (SparseCoordinateTensor, CompressedSparseMatrix, CompactSparseTensor)): if value_order is None: assert not instance(values), f"When creating a sparse tensor from a list of values, value_order must be specified." if instance(values): values = rename_dims(values, instance, instance(existing_tensor._values)) values = expand(values, existing_tensor._values.shape.only([instance, dual]) if isinstance(existing_tensor, CompactSparseTensor) else instance(existing_tensor._values)) if value_order == 'original' and isinstance(existing_tensor, CompressedSparseMatrix) and existing_tensor._uncompressed_indices_perm is not None: values = values[existing_tensor._uncompressed_indices_perm] if isinstance(existing_tensor, CompressedSparseMatrix) and existing_tensor._uncompressed_offset is not None: from ._ops import where values = where(existing_tensor._valid_mask(), values, 0) return existing_tensor._with_values(values) if not is_sparse(existing_tensor): if instance(values): return unpack_dim(values, instance, existing_tensor.shape.non_batch) else: return expand(values, existing_tensor.shape.non_batch) raise NotImplementedErrorCreates a tensor with the same format and shape as
existing_tensor.Args
existing_tensor- Any
Tensor, sparse or dense. values- New values to replace the existing values by.
If
existing_tensoris sparse,valuesmust broadcast to the instance dimension listing the stored indices. value_order- Order of
valuescompared toexisting_tensor, only relevant ifexisting_tensoris sparse. If'original', the values are ordered like the values that was used to create the first tensor with this sparsity pattern. If'as existing', the values match the current order ofexisting_tensor. Note that the order of values may be changed upon creating a sparse tensor.
Returns
def to_complex(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def to_complex(x: TensorOrTree) -> TensorOrTree: """ Converts the given tensor to complex floating point format with the currently specified precision. The precision can be set globally using `math.set_global_precision()` and locally using `with math.precision()`. See the documentation at https://tum-pbs.github.io/PhiML/Data_Types.html See Also: `cast()`. Args: x: values to convert Returns: `Tensor` of same shape as `x` """ dtype = default_backend().complex_type return tree_map(lambda t: t.__cast__(dtype) if hasattr(t, '__cast__') else choose_backend(t).cast(t, dtype), x, all_attributes, op_name='cast')Converts the given tensor to complex floating point format with the currently specified precision.
The precision can be set globally using
math.set_global_precision()and locally usingwith math.precision().See the documentation at https://tum-pbs.github.io/PhiML/Data_Types.html
See Also:
cast().Args
x- values to convert
Returns
Tensorof same shape asx def to_device(value,
device: phiml.backend._backend.ComputeDevice | str,
convert=True,
use_dlpack=True)-
Expand source code
def to_device(value, device: Union[ComputeDevice, str], convert=True, use_dlpack=True): """ Allocates the tensors of `value` on `device`. If the value already exists on that device, this function may either create a copy of `value` or return `value` directly. See Also: `to_cpu()`, `to_gpu()`. Args: value: `Tensor` or `phiml.math.magic.PhiTreeNode` or native tensor. device: Device to allocate value on. Either `ComputeDevice` or category `str`, such as `'CPU'` or `'GPU'`. convert: Whether to convert tensors that do not belong to the corresponding backend to compatible native tensors. If `False`, this function has no effect on numpy tensors. use_dlpack: Only if `convert==True`. Whether to use the DLPack library to convert from one GPU-enabled backend to another. Returns: Same type as `value`. """ assert isinstance(device, (ComputeDevice, str)), f"device must be a ComputeDevice or str but got {type(device)}" return tree_map(_to_device, value, device=device, convert_to_backend=convert, use_dlpack=use_dlpack)Allocates the tensors of
valueondevice. If the value already exists on that device, this function may either create a copy ofvalueor returnvaluedirectly.See Also:
to_cpu(),to_gpu().Args
valueTensororPhiTreeNodeor native tensor.device- Device to allocate value on.
Either
ComputeDeviceor categorystr, such as'CPU'or'GPU'. convert- Whether to convert tensors that do not belong to the corresponding backend to compatible native tensors.
If
False, this function has no effect on numpy tensors. use_dlpack- Only if
convert==True. Whether to use the DLPack library to convert from one GPU-enabled backend to another.
Returns
Same type as
value. def to_dict(value: phiml.math._tensors.Tensor | phiml.math._shape.Shape)-
Expand source code
def to_dict(value: Union[Tensor, Shape]): """ Returns a serializable form of a `Tensor` or `Shape`. The result can be written to a JSON file, for example. See Also: `from_dict()`. Args: value: `Tensor` or `Shape` Returns: Serializable Python tree of primitives """ if isinstance(value, SHAPE_TYPES): return value._to_dict(include_sizes=True) elif isinstance(value, Tensor): return value._to_dict() raise ValueError(f"Cannot convert {value} to a dict")Returns a serializable form of a
TensororShape. The result can be written to a JSON file, for example.See Also:
from_dict().Args
Returns
Serializable Python tree of primitives
def to_float(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def to_float(x: TensorOrTree) -> TensorOrTree: """ Converts the given tensor to floating point format with the currently specified precision. The precision can be set globally using `math.set_global_precision()` and locally using `with math.precision()`. See the documentation at https://tum-pbs.github.io/PhiML/Data_Types.html See Also: `cast()`. Args: x: `Tensor` or `phiml.math.magic.PhiTreeNode` to convert Returns: `Tensor` or `phiml.math.magic.PhiTreeNode` matching `x`. """ dtype = default_backend().float_type return tree_map(lambda t: t.__cast__(dtype) if hasattr(t, '__cast__') else choose_backend(t).cast(t, dtype), x, all_attributes, op_name='cast')Converts the given tensor to floating point format with the currently specified precision.
The precision can be set globally using
math.set_global_precision()and locally usingwith math.precision().See the documentation at https://tum-pbs.github.io/PhiML/Data_Types.html
See Also:
cast().Args
xTensororPhiTreeNodeto convert
Returns
TensororPhiTreeNodematchingx. def to_format(x: phiml.math._tensors.Tensor, format: str)-
Expand source code
def to_format(x: Tensor, format: str): """ Converts a `Tensor` to the specified sparse format or to a dense tensor. See Also: `dense`, `sparse`, `to_coo`, `to_csr`, `to_csc`, `to_compact_rows`, `to_compact_cols`. Args: x: Sparse or dense `Tensor` format: Target format. One of `('dense', 'sparse', 'coo', 'csr', 'csc', 'compact-rows', 'compact-cols')`. Returns: `Tensor` of the specified format. """ assert format in ('dense', 'sparse', 'coo', 'csr', 'csc', 'compact-rows', 'compact-cols'), f"Invalid format: '{format}'. Must be one of 'coo', 'csr', 'csc', 'dense'" if format == 'sparse': if is_sparse(x): return x else: format = 'csr' if x.default_backend.supports(Backend.mul_csr_dense) else 'coo' if get_format(x) == format: return x if format == 'dense': return dense(x) if isinstance(x, SparseCoordinateTensor): if format == 'csr': return x.compress_rows() elif format == 'csc': return x.compress_cols() elif format == 'compact-cols': x = x.compress_rows() elif format == 'compact-rows': x = x.compress_cols() if isinstance(x, CompressedSparseMatrix): if format == 'coo': return x.decompress() elif format == 'compact-cols' and get_format(x) == 'csr': return compressed_to_compact(x, False) elif format == 'compact-rows' and get_format(x) == 'csc': return compressed_to_compact(x, False) else: return to_format(x.decompress(), format) elif isinstance(x, CompactSparseTensor): if format == 'coo': return x.to_coo() elif format == 'csr' and dual(x._compressed_dims): return x.to_cs() elif format == 'csc' and primal(x._compressed_dims): return x.to_cs() else: return to_format(x.to_coo(), format) elif isinstance(x, TensorStack): converted = [to_format(t, format) for t in x._tensors] return TensorStack(converted, x._stack_dim) else: # dense to sparse from ._ops import nonzero indices = nonzero(rename_dims(x, channel, instance)) values = x[indices] coo = SparseCoordinateTensor(indices, values, x.shape, can_contain_double_entries=False, indices_sorted=False, indices_constant=x.default_backend.name == 'numpy') return to_format(coo, format)Converts a
Tensorto the specified sparse format or to a dense tensor.See Also:
dense(),sparse,to_coo,to_csr,to_csc,to_compact_rows,to_compact_cols.Args
x- Sparse or dense
Tensor format- Target format. One of
('dense', 'sparse', 'coo', 'csr', 'csc', 'compact-rows', 'compact-cols').
Returns
Tensorof the specified format. def to_int32(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def to_int32(x: TensorOrTree) -> TensorOrTree: """ Converts the `Tensor` or `phiml.math.magic.PhiTreeNode` `x` to 32-bit integer. """ return tree_map(lambda t: t.__cast__(INT32) if hasattr(t, '__cast__') else choose_backend(t).cast(t, INT32), x, all_attributes, op_name='cast')Converts the
TensororPhiTreeNodexto 32-bit integer. def to_int64(x: ~TensorOrTree) ‑> ~TensorOrTree-
Expand source code
def to_int64(x: TensorOrTree) -> TensorOrTree: """ Converts the `Tensor` or `phiml.math.magic.PhiTreeNode` `x` to 64-bit integer. """ return tree_map(lambda t: t.__cast__(INT64) if hasattr(t, '__cast__') else choose_backend(t).cast(t, INT64), x, all_attributes, op_name='cast')Converts the
TensororPhiTreeNodexto 64-bit integer. def trace_check(traced_function, *args, **kwargs) ‑> Tuple[bool, str]-
Expand source code
def trace_check(traced_function, *args, **kwargs) -> Tuple[bool, str]: """ Tests if `f(*args, **kwargs)` has already been traced for arguments compatible with `args` and `kwargs`. If true, jit-compiled functions are very fast since the Python function is not actually called anymore. Args: traced_function: Transformed Function, e.g. jit-compiled or linear function. *args: Hypothetical arguments to be passed to `f` **kwargs: Hypothetical keyword arguments to be passed to `f` Returns: result: `True` if there is an existing trace that can be used, `False` if `f` would have to be re-traced. message: A `str` that, if `result == False`, gives hints as to why `f` needs to be re-traced given `args` and `kwargs`. """ assert args or kwargs, f"Please pass the hypothetical function arguments to trace_check()" f = traced_function if isinstance(f, (JitFunction, GradientFunction, HessianFunction, CustomGradientFunction)): keys = f.traces.keys() use = 'jit' if isinstance(f, JitFunction) else 'gradient' elif isinstance(f, LinearFunction): keys = f.matrices_and_biases.keys() use = 'linear' else: raise ValueError(f"{f_name(f)} is not a traceable function. Only supports jit_compile, jit_compile_linear, gradient, custom_gradient, jacobian, hessian") key, *_ = key_from_args(args, kwargs, f.f_params, aux=f.auxiliary_args, use=use) if not keys: return False, "Function has not yet been traced" if key in keys: return True, "" traced_key = next(iter(keys)) # ToDo compare against all with equality_by_shape_and_value(): cond_equal = key.auxiliary_kwargs == traced_key.auxiliary_kwargs if isinstance(cond_equal, Tensor): cond_equal = cond_equal.all if not cond_equal: return False, "Auxiliary arguments do not match" # shapes need not be compared because they are included in specs if traced_key.tree.keys() != key.tree.keys(): return False, f"Different primary arguments passed: {set(traced_key.tree.keys())} vs {set(key.tree.keys())}" for name in traced_key.tree.keys(): if traced_key.tree[name] != key.tree[name]: return False, f"Primary argument '{name}' differs in non-traced variables: {traced_key.tree[name]} vs {key.tree[name]}. Make sure the corresponding class overrides __eq__()." with equality_by_shape_and_value(): if traced_key.specs != key.specs: return False, "Traced variables differ in shape" if traced_key.backend != key.backend: return False, f"Function was not traced with backend {key.backend}" if traced_key.spatial_derivative_order != key.spatial_derivative_order: return False, f"Different in spatial_derivative_order. This is likely an internal problem." return True, ""Tests if
f(*args, **kwargs)has already been traced for arguments compatible withargsandkwargs. If true, jit-compiled functions are very fast since the Python function is not actually called anymore.Args
traced_function- Transformed Function, e.g. jit-compiled or linear function.
*args- Hypothetical arguments to be passed to
f **kwargs- Hypothetical keyword arguments to be passed to
f
Returns
def unpack_dim(value,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
*unpacked_dims: phiml.math._shape.Shape | Sequence[phiml.math._shape.Shape],
**kwargs)-
Expand source code
def unpack_dim(value, dim: DimFilter, *unpacked_dims: Union[Shape, Sequence[Shape]], **kwargs): """ Decompresses a dimension by unstacking the elements along it. This function replaces the traditional `reshape` for these cases. The compressed dimension `dim` is assumed to contain elements laid out according to the order of `unpacked_dims`. If `dim` does not exist on `value`, this function will return `value` as-is. This includes primitive types. See Also: `pack_dims()` Args: value: `phiml.math.magic.Shapable`, such as `Tensor`, for which one dimension should be split. dim: Single dimension to be decompressed. *unpacked_dims: Either vararg `Shape`, ordered dims to replace `dim`, fulfilling `unpacked_dims.volume == shape(self)[dim].rank`. This results in a single tensor output. Alternatively, pass a `tuple` or `list` of shapes to unpack a dim into multiple tensors whose combined volumes match `dim.size`. **kwargs: Additional keyword arguments required by specific implementations. Adding spatial dims to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments. Returns: Same type as `value`. Examples: >>> unpack_dim(math.zeros(instance(points=12)), 'points', spatial(x=4, y=3)) (xˢ=4, yˢ=3) const 0.0 """ if len(unpacked_dims) == 1 and isinstance(unpacked_dims[0], (tuple, list)): from ._ops import unflatten_unpack return unflatten_unpack(value, dim, unpacked_dims[0]) if isinstance(value, (Number, bool)): return value if DEBUG_CHECKS: assert isinstance(value, Shapable) and isinstance(value, Sliceable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}" dim = shape(value).only(dim) if dim.is_empty: return value # Nothing to do, maybe expand? assert dim.rank == 1, f"unpack_dim requires as single dimension to be unpacked but got {dim}" dim = dim.name unpacked_dims = concat_shapes_(*unpacked_dims) if unpacked_dims.rank == 0: return value[{dim: 0}] # remove dim elif unpacked_dims.rank == 1: return rename_dims(value, dim, unpacked_dims, **kwargs) # --- First try __unpack_dim__ if hasattr(value, '__unpack_dim__'): result = value.__unpack_dim__(dim, unpacked_dims, **kwargs) if result is not NotImplemented: return result # --- Next try Tree Node --- if isinstance(value, PhiTreeNode) and all_attributes(value): new_attributes = {a: unpack_dim(getattr(value, a), dim, unpacked_dims, **kwargs) for a in all_attributes(value)} return copy_with(value, **new_attributes) # --- Fallback: unstack and stack --- if shape(value).only(dim).volume > 8: warnings.warn(f"pack_dims() default implementation is slow on large dims ({shape(value).only(dim)}). Please implement __unpack_dim__() for {type(value).__name__} as defined in phiml.math.magic", RuntimeWarning, stacklevel=2) unstacked = unstack(value, dim) for dim in reversed(unpacked_dims): unstacked = [stack(unstacked[i:i+dim.size], dim, **kwargs) for i in range(0, len(unstacked), dim.size)] return unstacked[0]Decompresses a dimension by unstacking the elements along it. This function replaces the traditional
reshapefor these cases. The compressed dimensiondimis assumed to contain elements laid out according to the order ofunpacked_dims.If
dimdoes not exist onvalue, this function will returnvalueas-is. This includes primitive types.See Also:
pack_dims()Args
valueShapable, such asTensor, for which one dimension should be split.dim- Single dimension to be decompressed.
*unpacked_dims- Either vararg
Shape, ordered dims to replacedim, fulfillingunpacked_dims.volume == shape(self)[dim].rank. This results in a single tensor output. Alternatively, pass atupleorlistof shapes to unpack a dim into multiple tensors whose combined volumes matchdim.size. **kwargs- Additional keyword arguments required by specific implementations.
Adding spatial dims to fields requires the
bounds: Boxargument specifying the physical extent of the new dimensions. Adding batch dims must always work without keyword arguments.
Returns
Same type as
value.Examples
>>> unpack_dim(math.zeros(instance(points=12)), 'points', spatial(x=4, y=3)) (xˢ=4, yˢ=3) const 0.0 def unravel_index(index: phiml.math._tensors.Tensor,
resolution: phiml.math._shape.Shape,
index_dim=(indexᶜ))-
Expand source code
def unravel_index(index: Tensor, resolution: Shape, index_dim=channel('index')): """ Computes a vector index from a scalar index. Args: index: Scalar index. May have a channel dimension of size 1. resolution: `Shape` Returns: `Tensor` like `index` but with `index_dim` listing the dims in `resolution`. """ index = squeeze(index, channel) if not isinstance(index, Dense): raise NotImplementedError nat_v_idx = index.backend.unravel_index(index._native, resolution.sizes) v_idx = wrap(nat_v_idx, index.shape + index_dim.with_size(resolution.names)) return v_idx def unstack(value: ~MagicType,
dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None,
expand=False) ‑> Tuple[~MagicType, ...]-
Expand source code
def unstack(value: MagicType, dim: DimFilter, expand=False) -> Tuple[MagicType, ...]: """ Un-stacks a `Sliceable` along one or multiple dimensions. If multiple dims are given, the order of elements will be according to the dimension order in `dim`, i.e. elements along the last dimension will be neighbors in the returned `tuple`. If no dimension is given or none of the given dims exists on `value`, returns a list containing only `value`. See Also: `phiml.math.slice`. Args: value: `phiml.math.magic.Shapable`, such as `phiml.math.Tensor` dim: Dimensions as `Shape` or comma-separated `str` or dimension type, i.e. `channel`, `spatial`, `instance`, `batch`. expand: If `True`, `dim` must be a `Shape` and the returned tuple will have length `dim.volume`. Otherwise, only existing dims are unstacked. Returns: `tuple` of objects matching the type of `value`. Examples: >>> unstack(expand(0, spatial(x=5)), 'x') (0.0, 0.0, 0.0, 0.0, 0.0) """ if DEBUG_CHECKS: assert isinstance(value, Sliceable) and isinstance(value, Shaped), f"Cannot unstack {type(value).__name__}. Must be Sliceable and Shaped, see https://tum-pbs.github.io/PhiML/phiml/math/magic.html" dims = shape(value).only(dim, reorder=True) if expand: assert isinstance(dim, Shape) if dim not in dims: value = expand_(value, dim) dims = dim if dims.rank == 0: return value, if dims.rank == 1: if hasattr(value, '__unstack__'): result = value.__unstack__(dims.names) if result is not NotImplemented: if DEBUG_CHECKS: assert isinstance(result, tuple), f"__unstack__ must return a tuple but got {type(result)}" assert all([isinstance(item, Sliceable) for item in result]), f"__unstack__ must return a tuple of Sliceable objects but not all items were sliceable in {result}" return result from ._tree import slice_ return tuple([slice_(value, {dims.name: i}) for i in range(dims.size)]) else: # multiple dimensions if hasattr(value, '__pack_dims__'): packed_dim = batch('_unstack') value_packed = value.__pack_dims__(dims, packed_dim, pos=None) if value_packed is not NotImplemented: return unstack(value_packed, packed_dim) unstack_dim = _any_uniform_dim(dims) first_unstacked = unstack(value, unstack_dim) inner_unstacked = [unstack(v, dims.without(unstack_dim)) for v in first_unstacked] return sum(inner_unstacked, ())Un-stacks a
Sliceablealong one or multiple dimensions.If multiple dims are given, the order of elements will be according to the dimension order in
dim, i.e. elements along the last dimension will be neighbors in the returnedtuple. If no dimension is given or none of the given dims exists onvalue, returns a list containing onlyvalue.See Also:
slice_().Args
valueShapable, such asTensordim- Dimensions as
Shapeor comma-separatedstror dimension type, i.e.channel(),spatial(),instance(),batch(). expand- If
True,dimmust be aShapeand the returned tuple will have lengthdim.volume. Otherwise, only existing dims are unstacked.
Returns
tupleof objects matching the type ofvalue.Examples
>>> unstack(expand(0, spatial(x=5)), 'x') (0.0, 0.0, 0.0, 0.0, 0.0) def upsample2x(grid: phiml.math._tensors.Tensor,
padding: Extrapolation = zero-gradient,
dims: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function spatial>,
padding_kwargs: dict = None) ‑> phiml.math._tensors.Tensor-
Expand source code
def upsample2x(grid: Tensor, padding: Extrapolation = extrapolation.BOUNDARY, dims: DimFilter = spatial, padding_kwargs: dict = None) -> Tensor: """ Resamples a regular grid to double the number of spatial sample points per dimension. The grid values at the new points are determined via linear interpolation. Args: grid: half-size grid padding: grid extrapolation dims: dims along which up-sampling is applied. If None, up-sample along all spatial dims. grid: Tensor: padding: Extrapolation: (Default value = extrapolation.BOUNDARY) dims: tuple or None: (Default value = None) padding_kwargs: Additional keyword arguments to be passed to `phiml.math.pad()`. Returns: double-size grid """ if grid is None: return None for dim in grid.shape.only(dims): left, center, right = shift(grid, (-1, 0, 1), dim.names, padding, None, padding_kwargs=padding_kwargs) interp_left = 0.25 * left + 0.75 * center interp_right = 0.75 * center + 0.25 * right stacked = math.stack_tensors([interp_left, interp_right], channel(_interleave='left,right')) grid = pack_dims(stacked, (dim.name, '_interleave'), dim) return gridResamples a regular grid to double the number of spatial sample points per dimension. The grid values at the new points are determined via linear interpolation.
Args
grid- half-size grid
padding- grid extrapolation
dims- dims along which up-sampling is applied. If None, up-sample along all spatial dims.
grid- Tensor:
padding- Extrapolation: (Default value = extrapolation.BOUNDARY)
dims- tuple or None: (Default value = None)
padding_kwargs- Additional keyword arguments to be passed to
pad().
Returns
double-size grid
def use(backend: str | phiml.backend._backend.Backend) ‑> phiml.backend._backend.Backend-
Expand source code
def set_global_default_backend(backend: Union[str, Backend]) -> Backend: """ Sets the given backend as default. This setting can be overridden using `with backend:`. See `default_backend()`, `choose_backend()`. Args: backend: `Backend` or backend name to set as default. Possible names are `'torch'`, `'tensorflow'`, `'jax'`, `'numpy'`. Returns: The chosen backend as a `Backend´ instance. """ if isinstance(backend, ModuleType): backend = str(backend) if isinstance(backend, str): init_backend(backend) matches = [b for b in BACKENDS if b.name == backend.lower()] if not matches: raise ValueError(f"Illegal backend: '{backend}'") backend = matches[0] assert isinstance(backend, Backend), backend if backend not in BACKENDS: BACKENDS.append(backend) if _DEFAULT[0] is not backend: _DEFAULT[0] = backend ML_LOGGER.info(f"Φ-ML's default backend is now {backend}") return backendSets the given backend as default. This setting can be overridden using
with backend:.See
default_backend(),backend_for().Args
backendBackendor backend name to set as default. Possible names are'torch','tensorflow','jax','numpy'.
Returns
The chosen backend as a `Backend´ instance.
def vec(name: str | phiml.math._shape.Shape = 'vector',
*sequence,
tuple_dim=(sequenceˢ),
list_dim=(sequenceⁱ),
**components) ‑> phiml.math._tensors.Tensor-
Expand source code
def vec(name: Union[str, Shape] = 'vector', *sequence, tuple_dim=spatial('sequence'), list_dim=instance('sequence'), **components) -> Tensor: """ Lay out the given values along a channel dimension without converting them to the current backend. Args: name: Dimension name. *sequence: Component values that will also be used as labels. If specified, `components` must be empty. **components: Values by component name. If specified, no additional positional arguments must be given. tuple_dim: Dimension for `tuple` values passed as components, e.g. `vec(x=(0, 1), ...)` list_dim: Dimension for `list` values passed as components, e.g. `vec(x=[0, 1], ...)` Returns: `Tensor` Examples: >>> vec(x=1, y=0, z=-1) (x=1, y=0, z=-1) >>> vec(x=1., z=0) (x=1.000, z=0.000) >>> vec(x=tensor([1, 2, 3], instance('particles')), y=0) (x=1, y=0); (x=2, y=0); (x=3, y=0) (particlesⁱ=3, vectorᶜ=x,y) >>> vec(x=0, y=[0, 1]) (x=0, y=0); (x=0, y=1) (vectorᶜ=x,y, sequenceⁱ=2) >>> vec(x=0, y=(0, 1)) (x=0, y=0); (x=0, y=1) (sequenceˢ=2, vectorᶜ=x,y) """ dim = auto(name, channel) assert isinstance(dim, SHAPE_TYPES), f"name must be a str or Shape but got '{type(name)}'" if sequence: assert not components, "vec() must be given either positional or keyword arguments but not both" if len(sequence) == 1 and isinstance(sequence[0], (tuple, list)): sequence = sequence[0] labels = [str(v) for v in sequence] if len(set(labels)) == len(labels): dim = dim.with_size(labels) return wrap(sequence, dim) else: def wrap_sequence(value): if isinstance(value, tuple): return wrap(value, tuple_dim) elif isinstance(value, list): return wrap(value, list_dim) else: return value components = {n: wrap_sequence(v) for n, v in components.items()} if not components: return wrap([], dim) return stack(components, dim, expand_values=True)Lay out the given values along a channel dimension without converting them to the current backend.
Args
name- Dimension name.
*sequence- Component values that will also be used as labels.
If specified,
componentsmust be empty. **components- Values by component name. If specified, no additional positional arguments must be given.
tuple_dim- Dimension for
tuplevalues passed as components, e.g.vec(x=(0, 1), ...) list_dim- Dimension for
listvalues passed as components, e.g.vec(x=[0, 1], ...)
Returns
Examples
>>> vec(x=1, y=0, z=-1) (x=1, y=0, z=-1)>>> vec(x=1., z=0) (x=1.000, z=0.000)>>> vec(x=tensor([1, 2, 3], instance('particles')), y=0) (x=1, y=0); (x=2, y=0); (x=3, y=0) (particlesⁱ=3, vectorᶜ=x,y)>>> vec(x=0, y=[0, 1]) (x=0, y=0); (x=0, y=1) (vectorᶜ=x,y, sequenceⁱ=2)>>> vec(x=0, y=(0, 1)) (x=0, y=0); (x=0, y=1) (sequenceˢ=2, vectorᶜ=x,y) def vec_length(*args, **kwargs)-
Expand source code
def length(*args, **kwargs): """Deprecated. Use `norm` instead.""" warnings.warn("phiml.math.length is deprecated in favor of phiml.math.norm", DeprecationWarning, stacklevel=2) return norm(*args, **kwargs)Deprecated. Use
norm()instead. def vec_normalize(vec: phiml.math._tensors.Tensor,
vec_dim: str | Sequence | set | phiml.math._shape.Shape | Callable | None = <function channel>,
epsilon=None,
allow_infinite=False,
allow_zero=False)-
Expand source code
def normalize(vec: Tensor, vec_dim: DimFilter = channel, epsilon=None, allow_infinite=False, allow_zero=False): """ Normalizes the vectors in `vec`. If `vec_dim` is None, the combined channel dimensions of `vec` are interpreted as a vector. Args: vec: `Tensor` to normalize. vec_dim: Dimensions to normalize over. By default, all channel dimensions are used to compute the vector length. epsilon: (Optional) Zero-length threshold. Vectors shorter than this length yield the unit vector (1, 0, 0, ...). If not specified, the zero-vector yields `NaN` as it cannot be normalized. allow_infinite: Allow infinite components in vectors. These vectors will then only points towards the infinite components. allow_zero: Whether to return zero vectors for inputs smaller `epsilon` instead of a unit vector. """ vec_dim = vec.shape.only(vec_dim) if allow_infinite: # replace inf by 1, finite by 0 is_infinite = ~math.is_finite(vec) inf_mask = is_infinite & ~math.is_nan(vec) vec = math.where(math.any_(is_infinite, vec_dim), inf_mask, vec) if epsilon is None: return vec / norm(vec, vec_dim=vec_dim) le = norm(vec, vec_dim=vec_dim, eps=epsilon**2 * .99) unit_vec = 0 if allow_zero else stack([1] + [0] * (vec_dim.volume - 1), vec_dim) return math.where(abs(le) <= epsilon, unit_vec, vec / le)Normalizes the vectors in
vec(). Ifvec_dimis None, the combined channel dimensions ofvec()are interpreted as a vector.Args
vecTensorto normalize.vec_dim- Dimensions to normalize over. By default, all channel dimensions are used to compute the vector length.
epsilon- (Optional) Zero-length threshold. Vectors shorter than this length yield the unit vector (1, 0, 0, …).
If not specified, the zero-vector yields
NaNas it cannot be normalized. allow_infinite- Allow infinite components in vectors. These vectors will then only points towards the infinite components.
allow_zero- Whether to return zero vectors for inputs smaller
epsiloninstead of a unit vector.
def vec_squared(*args, **kwargs)-
Expand source code
def vec_squared(*args, **kwargs): """Deprecated. Use `squared_norm` instead.""" warnings.warn("phiml.math.vec_squared is deprecated in favor of phiml.math.squared_norm", DeprecationWarning, stacklevel=2) return squared_norm(*args, **kwargs)Deprecated. Use
squared_norm()instead. def when_available(runnable: Callable, *tensor_args: phiml.math._tensors.Tensor)-
Expand source code
def when_available(runnable: Callable, *tensor_args: Tensor): """ Calls `runnable(*tensor_args)` once the concrete values of all tensors are available. In eager mode, `runnable` is called immediately. When jit-compiled, `runnable` is called after the jit-compiled function has returned. Args: runnable: Function to call as `runnable(*tensor_args)`. This can be a `lambda` function. *tensor_args: `Tensor` values to pass to `runnable` with concrete values. """ if _TRACING_LINEAR: raise RuntimeError(f"when_available() cannot be called inside a function marked as @jit_compile_linear") if all_available(*tensor_args): # eager or NumPy runnable(*tensor_args) else: assert _TRACING_JIT, f"tensors are not available but no JIT function is being traced. Maybe you are using external jit?" for jit_f in _TRACING_JIT: jit_f.extract_and_call(tensor_args, runnable)Calls
runnable(*tensor_args)once the concrete values of all tensors are available. In eager mode,runnableis called immediately. When jit-compiled,runnableis called after the jit-compiled function has returned.Args
runnable- Function to call as
runnable(*tensor_args). This can be alambdafunction. *tensor_argsTensorvalues to pass torunnablewith concrete values.
def where(condition: phiml.math._tensors.Tensor | bool,
value_true: phiml.math._tensors.Tensor | float | int | Any = None,
value_false: phiml.math._tensors.Tensor | float | int | Any = None)-
Expand source code
def where(condition: Union[Tensor, bool], value_true: Union[Tensor, float, int, Any] = None, value_false: Union[Tensor, float, int, Any] = None): """ Builds a tensor by choosing either values from `value_true` or `value_false` depending on `condition`. If `condition` is not of type boolean, non-zero values are interpreted as True. This function requires non-None values for `value_true` and `value_false`. To get the indices of True / non-zero values, use :func:`nonzero`. Args: condition: determines where to choose values from value_true or from value_false value_true: Values to pick where `condition != 0 / True` value_false: Values to pick where `condition == 0 / False` Returns: `Tensor` containing dimensions of all inputs. """ if isinstance(condition, bool): return value_true if condition else value_false if value_true is None: assert value_false is None, f"where can be used either with value_true and value_false or without both but got only value_false" warnings.warn("Use nonzero() instead of where() to get indices of non-zero elements.", SyntaxWarning, stacklevel=2) return nonzero(condition) if not isinstance(value_true, Tensor) or not isinstance(value_false, Tensor): from .extrapolation import Extrapolation, where as ext_where if isinstance(value_true, Extrapolation) or isinstance(value_false, Extrapolation): return ext_where(condition, value_true, value_false) elif dataclasses.is_dataclass(value_true) or dataclasses.is_dataclass(value_false): assert type(value_true) is type(value_false), f"Dataclasses must have the same type but got {type(value_true)} and {type(value_false)}" from ..dataclasses import data_fields, replace new_values = {f.name: where(condition, getattr(value_true, f.name), getattr(value_false, f.name)) for f in data_fields(value_true)} return replace(value_true, **new_values) elif isinstance(value_true, dict): return {k: where(condition, value_true[k], value_false[k]) for k in value_true} condition = wrap(condition) value_true = wrap(value_true) value_false = wrap(value_false) def inner_where(c: Tensor, vt: Tensor, vf: Tensor): if isinstance(value_true, Layout) or isinstance(value_false, Layout): # result must be a Layout shape = merge_shapes(c, vt, vf) result = [] for idx in shape.meshgrid(): result.append(vt[idx] if c[idx].any else vf[idx]) return stack(result, shape) if isinstance(vt, Tracer) or isinstance(vf, Tracer) or isinstance(c, Tracer): tracer = [t for t in [vt, vf, c] if isinstance(t, Tracer)][0] trace = tracer._trace op = trace.add_op('fun', 'where', (c, vt, vf), {}, EMPTY_SHAPE) return op.add_output(c.shape & vt.shape & vf.shape, vt.dtype & vf.dtype, tracer._renamed) if vt._is_tracer or vf._is_tracer or c._is_tracer: return c * vt + (1 - c) * vf # ToDo this does not take NaN into account if is_sparse(c) or is_sparse(vt) or is_sparse(vf): if not same_sparsity_pattern(vt, vf, allow_const=True) or not same_sparsity_pattern(c, vt, allow_const=True): raise NotImplementedError(f"When calling where() on sparse tensors, all arguments must have the same sparsity pattern or be dense") sp_dims = sparse_dims(c) & sparse_dims(vt) & sparse_dims(vf) d_dims = dense_dims(c) & dense_dims(vt) & dense_dims(vf) if d_dims and d_dims in sp_dims: # sparse / dense conflict -> first apply sparse format any_sparse = c if is_sparse(c) else vt if is_sparse(vt) else vf sparse_ones = tensor_like(any_sparse, 1) c = c if is_sparse(c) else sparse_ones * c vt = vt if is_sparse(vt) else sparse_ones * vt vf = vf if is_sparse(vf) else sparse_ones * vf c_values = c._values if is_sparse(c) else c vt_values = vt._values if is_sparse(vt) else vt vf_values = vf._values if is_sparse(vf) else vf return c._with_values(where(c_values, vt_values, vf_values)) names, shape, (c, vt, vf) = broadcastable_native_tensors(c, vt, vf) backend = choose_backend(c, vt, vf) result = backend.where(c, vt, vf) return Dense(result, names, shape, backend) return broadcast_op(inner_where, [condition, value_true, value_false])Builds a tensor by choosing either values from
value_trueorvalue_falsedepending oncondition. Ifconditionis not of type boolean, non-zero values are interpreted as True.This function requires non-None values for
value_trueandvalue_false. To get the indices of True / non-zero values, use :func:nonzero().Args
condition- determines where to choose values from value_true or from value_false
value_true- Values to pick where
condition != 0 / True value_false- Values to pick where
condition == 0 / False
Returns
Tensorcontaining dimensions of all inputs. def with_diagonal(matrix: phiml.math._tensors.Tensor,
values: float | phiml.math._tensors.Tensor,
check_square=True)-
Expand source code
def with_diagonal(matrix: Tensor, values: Union[float, Tensor], check_square=True): """ Create a copy of `matrix`, replacing the diagonal elements. If `matrix` is sparse, diagonal zeros (and possibly other explicitly stored zeros) will be dropped from the sparse matrix. This function currently only supports sparse COO,CSR,CSC SciPy matrices. Args: matrix: `Tensor` with at least one dual dim. values: Diagonal values check_square: If `True` allow this function only for square matrices. Returns: `Tensor` """ col_dims = matrix.shape.dual row_dims = matrix.shape.only(col_dims.as_channel()) if not row_dims: row_dims = primal(matrix) if not row_dims: row_dims = batch(matrix) if check_square: assert row_dims.volume == col_dims.volume, f"matrix is not square (check_square=True). rows={row_dims}, cols={col_dims}" if is_sparse(matrix): assert matrix.backend.name == 'numpy', f"with_diagonal currently only supports SciPy matrices" values = wrap(values) result = [] for idx in (batch(values) & batch(matrix)).meshgrid(): scipy_matrix = matrix[idx].native() values = values[idx].native() scipy_matrix.setdiag(values) if close(0, values): scipy_matrix.eliminate_zeros() result.append(wrap(scipy_matrix, row_dims.after_gather(idx), col_dims.after_gather(idx))) return stack(result, batch(values) & batch(matrix)) else: raise NotImplementedError("with_diagonal currently only supports sparse matrices")Create a copy of
matrix, replacing the diagonal elements. Ifmatrixis sparse, diagonal zeros (and possibly other explicitly stored zeros) will be dropped from the sparse matrix.This function currently only supports sparse COO,CSR,CSC SciPy matrices.
Args
matrixTensorwith at least one dual dim.values- Diagonal values
check_square- If
Trueallow this function only for square matrices.
Returns
def wrap(data: Sequence[~T] | ~T,
*shape: phiml.math._shape.Shape | str | list,
default_list_dim=(vectorᶜ)) ‑> phiml.math._tensors.Tensor[~T]-
Expand source code
def wrap(data: Union[Sequence[T], T], *shape: Union[Shape, str, list], default_list_dim=channel('vector')) -> Tensor[T]: """ Short for `phiml.math.tensor()` with `convert=False`. """ return tensor(data, *shape, convert=False, default_list_dim=default_list_dim)Short for
tensor()withconvert=False. def zeros(*shape: phiml.math._shape.Shape,
dtype: phiml.backend._dtype.DType | tuple | type = builtins.float) ‑> phiml.math._tensors.Tensor-
Expand source code
def zeros(*shape: Shape, dtype: Union[DType, tuple, type] = float) -> Tensor: """ Define a tensor with specified shape with value `0.0` / `0` / `False` everywhere. This method may not immediately allocate the memory to store the values. See Also: `zeros_like()`, `ones()`. Args: *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order. dtype: Data type as `DType` object. Defaults to `float` matching the current precision setting. Returns: `Tensor` """ uinit = lambda shape: expand_tensor(Dense(default_backend().zeros((), dtype=DType.as_dtype(dtype)), (), EMPTY_SHAPE, default_backend()), shape) return _initialize(uinit, shape, dtype, zeros, {})Define a tensor with specified shape with value
0.0/0/Falseeverywhere.This method may not immediately allocate the memory to store the values.
See Also:
zeros_like(),ones().Args
*shape- This (possibly empty) sequence of
Shapes is concatenated, preserving the order. dtype- Data type as
DTypeobject. Defaults tofloatmatching the current precision setting.
Returns
def zeros_like(obj: phiml.math._tensors.Tensor | PhiTreeNode) ‑> phiml.math._tensors.Tensor | PhiTreeNode-
Expand source code
def zeros_like(obj: Union[Tensor, PhiTreeNode]) -> Union[Tensor, PhiTreeNode]: """ Create a `Tensor` containing only `0.0` / `0` / `False` with the same shape and dtype as `obj`. """ nest, values = disassemble_tree(obj, cache=False, attr_type=value_attributes) zeros_ = [] for val in values: val = wrap(val) with val.backend: zeros_.append(zeros(val.shape, dtype=val.dtype)) return assemble_tree(nest, zeros_, attr_type=value_attributes)Create a
Tensorcontaining only0.0/0/Falsewith the same shape and dtype asobj.
Classes
class ConvergenceException-
Expand source code
class ConvergenceException(RuntimeError): """ Base class for exceptions raised when a solve does not converge. See Also: `Diverged`, `NotConverged`. """ def __init__(self, result: SolveInfo): RuntimeError.__init__(self, result.msg) self.result: SolveInfo = result """ `SolveInfo` holding information about the solve. """Base class for exceptions raised when a solve does not converge.
See Also:
Diverged,NotConverged.Ancestors
- builtins.RuntimeError
- builtins.Exception
- builtins.BaseException
Subclasses
- phiml.math._optimize.Diverged
- phiml.math._optimize.NotConverged
Instance variables
var result-
SolveInfoholding information about the solve.
class DType (kind: type,
bits: int,
unsigned: bool,
exponent_bits: int,
mantissa_bits: int,
finite_only: bool,
unsigned_zero: bool)-
Expand source code
@dataclass(frozen=True) class DType(metaclass=DTypeMeta): """ Instances of `DType` represent the kind and size of data elements. The data type of tensors can be obtained via `Tensor.dtype`. The following kinds of data types are supported: * `float` with 32 / 64 bits * `complex` with 64 / 128 bits * `int` with 8 / 16 / 32 / 64 bits * `bool` with 8 bits * `str` with 8*n* bits Unlike with many computing libraries, there are no global variables corresponding to the available types. Instead, data types can simply be instantiated as needed. """ kind: type """Python type, one of `(bool, int, float, complex, str, object)`""" bits: int """Number of bits per element, typically a multiple of 8.""" unsigned: bool """If `True`, the data type is unsigned, meaning it can only represent non-negative values.""" exponent_bits: int """Number of bits used for the exponent in floating point types. 0 for integers.""" mantissa_bits: int """Number of bits used for the mantissa in floating point types. Same as `bits` for integers.""" finite_only: bool """If `True`, the data type can only represent finite values, i.e., no NaN or Inf.""" unsigned_zero: bool """If `True`, the data type cannot represent signed zeros. This is `True` for integers and `False` for most floating point types.""" @property def precision(self): """ Floating point precision. Only defined if `kind in (float, complex)`. For complex values, returns half of `DType.bits`. """ if self.kind == float: return self.bits if self.kind == complex: return self.bits // 2 else: return None @property def itemsize(self): """ Number of bytes used to storea single value of this type. See `DType.bits`. """ return self.bits // 8 if self.bits % 8 == 0 else self.bits / 8 def __eq__(self, other): if isinstance(other, DType): return (self.kind == other.kind and self.bits == other.bits and self.unsigned == other.unsigned and self.exponent_bits == other.exponent_bits and self.mantissa_bits == other.mantissa_bits and self.finite_only == other.finite_only and self.unsigned_zero == other.unsigned_zero) elif other in {bool, int, float, complex, object}: return self.kind == other else: return False def __ne__(self, other): return not self == other def __hash__(self): return hash(self.kind) def __repr__(self): if self.kind == int: if self.unsigned: return f"uint{self.bits}" else: return f"int{self.bits}" elif self.kind == float: if self == FLOAT16: return "float16" elif self == FLOAT32: return "float32" elif self == FLOAT64: return "float64" else: return f"float{self.bits}_e{self.exponent_bits}_m{self.mantissa_bits}{'fn' if self.finite_only else ''}{'uz' if self.unsigned_zero else ''}" elif self.kind == complex: if self == COMPLEX64: return "complex64" elif self == COMPLEX128: return "complex128" else: return f"complex{self.bits}_e{self.exponent_bits}_m{self.mantissa_bits}{'fn' if self.finite_only else ''}{'uz' if self.unsigned_zero else ''}" elif self.kind == str: return f"str{self.bits}" elif self.kind == bool: return "bool" elif self.kind == object: return "object" else: return f"{self.kind.__name__}{self.bits}" @staticmethod def as_dtype(value: Union['DType', tuple, type, None]) -> Union['DType', None]: if isinstance(value, DType): return value elif value is int: return INT32 elif value is float: from . import get_precision return DType.by_precision(float, get_precision()) elif value is complex: from . import get_precision return DType.by_precision(complex, get_precision()) elif value is None: return None elif isinstance(value, tuple): if len(value) == 2: return { (int, 8): INT8, (int, 16): INT16, (int, 32): INT32, (int, 64): INT64, (float, 16): FLOAT16, (float, 32): FLOAT32, (float, 64): FLOAT64, (complex, 64): COMPLEX64, (complex, 128): COMPLEX128, }[value] return DType(*value) elif value is str: raise ValueError("str DTypes must specify bits") return {bool: BOOL, object: OBJECT}[value] @staticmethod def by_precision(kind: type, precision: int) -> 'DType': if kind == float: return {16: FLOAT16, 32: FLOAT32, 64: FLOAT64}[precision] elif kind == complex: return {32: COMPLEX64, 64: COMPLEX128}[precision] else: raise ValueError(f"Unsupported kind: {kind}") @staticmethod def int_by_bits(bits: int): return {8: INT8, 16: INT16, 32: INT32, 64: INT64}[bits] @staticmethod def by_bits(kind: type, bits: int): if kind is int: return {8: INT8, 16: INT16, 32: INT32, 64: INT64}[bits] elif kind is float: return {16: FLOAT16, 32: FLOAT32, 64: FLOAT64}[bits] elif kind is complex: return {64: COMPLEX64, 128: COMPLEX128}[bits] elif kind is str: return DType(str, bits, False, 0, 0, True, True) raise ValueError @staticmethod def from_name(name: str): dtypes = {str(d): d for d in _TO_NUMPY} return dtypes[name] def __and__(self, other): return combine_types(self, other)Instances of
DTyperepresent the kind and size of data elements. The data type of tensors can be obtained viaTensor.dtype.The following kinds of data types are supported:
floatwith 32 / 64 bitscomplexwith 64 / 128 bitsintwith 8 / 16 / 32 / 64 bitsboolwith 8 bitsstrwith 8n bits
Unlike with many computing libraries, there are no global variables corresponding to the available types. Instead, data types can simply be instantiated as needed.
Static methods
def as_dtype(value: ForwardRef('DType') | tuple | type | None) ‑> phiml.backend._dtype.DType | None-
Expand source code
@staticmethod def as_dtype(value: Union['DType', tuple, type, None]) -> Union['DType', None]: if isinstance(value, DType): return value elif value is int: return INT32 elif value is float: from . import get_precision return DType.by_precision(float, get_precision()) elif value is complex: from . import get_precision return DType.by_precision(complex, get_precision()) elif value is None: return None elif isinstance(value, tuple): if len(value) == 2: return { (int, 8): INT8, (int, 16): INT16, (int, 32): INT32, (int, 64): INT64, (float, 16): FLOAT16, (float, 32): FLOAT32, (float, 64): FLOAT64, (complex, 64): COMPLEX64, (complex, 128): COMPLEX128, }[value] return DType(*value) elif value is str: raise ValueError("str DTypes must specify bits") return {bool: BOOL, object: OBJECT}[value] def by_bits(kind: type, bits: int)-
Expand source code
@staticmethod def by_bits(kind: type, bits: int): if kind is int: return {8: INT8, 16: INT16, 32: INT32, 64: INT64}[bits] elif kind is float: return {16: FLOAT16, 32: FLOAT32, 64: FLOAT64}[bits] elif kind is complex: return {64: COMPLEX64, 128: COMPLEX128}[bits] elif kind is str: return DType(str, bits, False, 0, 0, True, True) raise ValueError def by_precision(kind: type, precision: int) ‑> phiml.backend._dtype.DType-
Expand source code
@staticmethod def by_precision(kind: type, precision: int) -> 'DType': if kind == float: return {16: FLOAT16, 32: FLOAT32, 64: FLOAT64}[precision] elif kind == complex: return {32: COMPLEX64, 64: COMPLEX128}[precision] else: raise ValueError(f"Unsupported kind: {kind}") def from_name(name: str)-
Expand source code
@staticmethod def from_name(name: str): dtypes = {str(d): d for d in _TO_NUMPY} return dtypes[name] def int_by_bits(bits: int)-
Expand source code
@staticmethod def int_by_bits(bits: int): return {8: INT8, 16: INT16, 32: INT32, 64: INT64}[bits]
Instance variables
var bits : int-
Number of bits per element, typically a multiple of 8.
var exponent_bits : int-
Number of bits used for the exponent in floating point types. 0 for integers.
var finite_only : bool-
If
True, the data type can only represent finite values, i.e., no NaN or Inf. prop itemsize-
Expand source code
@property def itemsize(self): """ Number of bytes used to storea single value of this type. See `DType.bits`. """ return self.bits // 8 if self.bits % 8 == 0 else self.bits / 8Number of bytes used to storea single value of this type. See
DType.bits. var kind : type-
Python type, one of
(bool, int, float, complex, str, object) var mantissa_bits : int-
Number of bits used for the mantissa in floating point types. Same as
bitsfor integers. prop precision-
Expand source code
@property def precision(self): """ Floating point precision. Only defined if `kind in (float, complex)`. For complex values, returns half of `DType.bits`. """ if self.kind == float: return self.bits if self.kind == complex: return self.bits // 2 else: return NoneFloating point precision. Only defined if
kind in (float, complex). For complex values, returns half ofDType.bits. var unsigned : bool-
If
True, the data type is unsigned, meaning it can only represent non-negative values. var unsigned_zero : bool-
If
True, the data type cannot represent signed zeros. This isTruefor integers andFalsefor most floating point types.
class Dict (*args, **kwargs)-
Expand source code
class Dict(dict): """ Dictionary of `Tensor` or `phiml.math.magic.PhiTreeNode` values. Dicts are not themselves tensors and do not have a shape. Use `layout()` to treat `dict` instances like tensors. In addition to dictionary functions, supports mathematical operators with other `Dict`s and lookup via `.key` syntax. `Dict` implements `phiml.math.magic.PhiTreeNode` so instances can be passed to math operations like `sin`. """ def __value_attrs__(self): return tuple(self.keys()) # --- Dict[key] --- def __getattr__(self, key): try: return self[key] except KeyError as k: raise AttributeError(k) def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): try: del self[key] except KeyError as k: raise AttributeError(k) # --- operators --- def __neg__(self): return Dict({k: -v for k, v in self.items()}) def __invert__(self): return Dict({k: ~v for k, v in self.items()}) def __abs__(self): return Dict({k: abs(v) for k, v in self.items()}) def __round__(self, n=None): return Dict({k: round(v) for k, v in self.items()}) def __add__(self, other): if isinstance(other, Dict): return Dict({key: val + other[key] for key, val in self.items()}) else: return Dict({key: val + other for key, val in self.items()}) def __radd__(self, other): if isinstance(other, Dict): return Dict({key: other[key] + val for key, val in self.items()}) else: return Dict({key: other + val for key, val in self.items()}) def __sub__(self, other): if isinstance(other, Dict): return Dict({key: val - other[key] for key, val in self.items()}) else: return Dict({key: val - other for key, val in self.items()}) def __rsub__(self, other): if isinstance(other, Dict): return Dict({key: other[key] - val for key, val in self.items()}) else: return Dict({key: other - val for key, val in self.items()}) def __mul__(self, other): if isinstance(other, Dict): return Dict({key: val * other[key] for key, val in self.items()}) else: return Dict({key: val * other for key, val in self.items()}) def __rmul__(self, other): if isinstance(other, Dict): return Dict({key: other[key] * val for key, val in self.items()}) else: return Dict({key: other * val for key, val in self.items()}) def __truediv__(self, other): if isinstance(other, Dict): return Dict({key: val / other[key] for key, val in self.items()}) else: return Dict({key: val / other for key, val in self.items()}) def __rtruediv__(self, other): if isinstance(other, Dict): return Dict({key: other[key] / val for key, val in self.items()}) else: return Dict({key: other / val for key, val in self.items()}) def __floordiv__(self, other): if isinstance(other, Dict): return Dict({key: val // other[key] for key, val in self.items()}) else: return Dict({key: val // other for key, val in self.items()}) def __rfloordiv__(self, other): if isinstance(other, Dict): return Dict({key: other[key] // val for key, val in self.items()}) else: return Dict({key: other // val for key, val in self.items()}) def __pow__(self, power, modulo=None): assert modulo is None if isinstance(power, Dict): return Dict({key: val ** power[key] for key, val in self.items()}) else: return Dict({key: val ** power for key, val in self.items()}) def __rpow__(self, other): if isinstance(other, Dict): return Dict({key: other[key] ** val for key, val in self.items()}) else: return Dict({key: other ** val for key, val in self.items()}) def __mod__(self, other): if isinstance(other, Dict): return Dict({key: val % other[key] for key, val in self.items()}) else: return Dict({key: val % other for key, val in self.items()}) def __rmod__(self, other): if isinstance(other, Dict): return Dict({key: other[key] % val for key, val in self.items()}) else: return Dict({key: other % val for key, val in self.items()}) def __eq__(self, other): if isinstance(other, Dict): return Dict({key: val == other[key] for key, val in self.items()}) else: return Dict({key: val == other for key, val in self.items()}) def __ne__(self, other): if isinstance(other, Dict): return Dict({key: val != other[key] for key, val in self.items()}) else: return Dict({key: val != other for key, val in self.items()}) def __lt__(self, other): if isinstance(other, Dict): return Dict({key: val < other[key] for key, val in self.items()}) else: return Dict({key: val < other for key, val in self.items()}) def __le__(self, other): if isinstance(other, Dict): return Dict({key: val <= other[key] for key, val in self.items()}) else: return Dict({key: val <= other for key, val in self.items()}) def __gt__(self, other): if isinstance(other, Dict): return Dict({key: val > other[key] for key, val in self.items()}) else: return Dict({key: val > other for key, val in self.items()}) def __ge__(self, other): if isinstance(other, Dict): return Dict({key: val >= other[key] for key, val in self.items()}) else: return Dict({key: val >= other for key, val in self.items()}) # --- overridden methods --- def copy(self): return Dict(self)Dictionary of
TensororPhiTreeNodevalues. Dicts are not themselves tensors and do not have a shape. Uselayout()to treatdictinstances like tensors.In addition to dictionary functions, supports mathematical operators with other
Dicts and lookup via.keysyntax.DictimplementsPhiTreeNodeso instances can be passed to math operations likesin().Ancestors
- builtins.dict
Methods
def copy(self)-
Expand source code
def copy(self): return Dict(self)D.copy() -> a shallow copy of D
class Diverged-
Expand source code
class Diverged(ConvergenceException): """ Raised if the optimization was stopped prematurely and cannot continue. This may indicate that no solution exists. The values of the last estimate `x` may or may not be finite. This exception inherits from `ConvergenceException`. See Also: `NotConverged`. """ def __init__(self, result: SolveInfo): ConvergenceException.__init__(self, result)Raised if the optimization was stopped prematurely and cannot continue. This may indicate that no solution exists.
The values of the last estimate
xmay or may not be finite.This exception inherits from
ConvergenceException.See Also:
NotConverged.Ancestors
- phiml.math._optimize.ConvergenceException
- builtins.RuntimeError
- builtins.Exception
- builtins.BaseException
class IncompatibleShapes (message, *shapes: phiml.math._shape.Shape)-
Expand source code
class IncompatibleShapes(NotCompatible): """ Raised when the shape of a tensor does not match the other arguments. """ def __init__(self, message, *shapes: Shape): Exception.__init__(self, message) self.shapes = shapesRaised when the shape of a tensor does not match the other arguments.
Ancestors
- phiml.math._shape.NotCompatible
- builtins.Exception
- builtins.BaseException
class LinearFunction-
Expand source code
class LinearFunction(Generic[X, Y], Callable[[X], Y]): """ Just-in-time compiled linear function of `Tensor` arguments and return values. Use `jit_compile_linear()` to create a linear function representation. """ def __init__(self, f, auxiliary_args: Set[str], forget_traces: bool): self.f = f self.f_params = function_parameters(f) self.auxiliary_args = auxiliary_args self.forget_traces = forget_traces self.matrices_and_biases: Dict[SignatureKey, Tuple[SparseCoordinateTensor, Tensor, Tuple]] = {} self.nl_jit = JitFunction(f, self.auxiliary_args, forget_traces) # for backends that do not support sparse matrices def _get_or_trace(self, key: SignatureKey, args: tuple, f_kwargs: dict): if not key.tracing and key in self.matrices_and_biases: return self.matrices_and_biases[key] else: if self.forget_traces: self.matrices_and_biases.clear() _TRACING_LINEAR.append(self) try: raw_out = trace_linear(self.f, *args, **f_kwargs) matrix, bias = matrix_and_bias_from_tracer(raw_out[1], auto_compress=True) finally: assert _TRACING_LINEAR.pop(-1) is self if not key.tracing: self.matrices_and_biases[key] = matrix, bias, raw_out if len(self.matrices_and_biases) >= 4: warnings.warn(f"""Φ-ML-lin: The compiled linear function '{f_name(self.f)}' was traced {len(self.matrices_and_biases)} times. Performing many traces may be slow and cause memory leaks. Tensors in auxiliary arguments (all except the first parameter unless specified otherwise) are compared by reference, not by tensor values. Auxiliary arguments: {key.auxiliary_kwargs} Multiple linear traces can be avoided by jit-compiling the code that calls the linear function or setting forget_traces=True.""", RuntimeWarning, stacklevel=3) return matrix, bias, raw_out def __call__(self, *args: X, **kwargs) -> Y: try: key, tensors, natives, x, aux_kwargs = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args, use='linear') except LinearTraceInProgress: return self.f(*args, **kwargs) assert tensors, "Linear function requires at least one argument" if any(isinstance(t, LinTracer) for t in tensors): # TODO: if t is identity, use cached ShiftLinTracer, otherwise multiply two ShiftLinTracers return self.f(*args, **kwargs) if not key.backend.supports(Backend.sparse_coo_tensor): # This might be called inside a Jax linear solve # warnings.warn(f"Sparse matrices are not supported by {backend}. Falling back to regular jit compilation.", RuntimeWarning) if not math.all_available(*tensors): # avoid nested tracing, Typical case jax.scipy.sparse.cg(LinearFunction). Nested traces cannot be reused which results in lots of traces per cg. ML_LOGGER.debug(f"Φ-ML-lin: Running '{f_name(self.f)}' as-is with {key.backend} because it is being traced.") return self.f(*args, **kwargs) else: return self.nl_jit(*args, **kwargs) matrix, bias, (out_tree, out_tracer) = self._get_or_trace(key, args, aux_kwargs) result = matrix @ tensors[0] + bias return assemble_tree(out_tree, [result], value_attributes) def sparse_matrix(self, *args, **kwargs): """ Create an explicit representation of this linear function as a sparse matrix. See Also: `sparse_matrix_and_bias()`. Args: *args: Function arguments. This determines the size of the matrix. **kwargs: Additional keyword arguments for the linear function. Returns: Sparse matrix representation with `values` property and `native()` method. """ key, *_, aux_kwargs = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args, use='linear') matrix, bias, *_ = self._get_or_trace(key, args, aux_kwargs) assert math.close(bias, 0), "This is an affine function and cannot be represented by a single matrix. Use sparse_matrix_and_bias() instead." return matrix def sparse_matrix_and_bias(self, *args, **kwargs): """ Create an explicit representation of this affine function as a sparse matrix and a bias vector. Args: *args: Positional arguments to the linear function. This determines the size of the matrix. **kwargs: Additional keyword arguments for the linear function. Returns: matrix: Sparse matrix representation with `values` property and `native()` method. bias: `Tensor` """ key, *_, aux_kwargs = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args, use='linear') return self._get_or_trace(key, args, aux_kwargs)[:2] def __repr__(self): return f"lin({f_name(self.f)})"Just-in-time compiled linear function of
Tensorarguments and return values.Use
jit_compile_linear()to create a linear function representation.Ancestors
- collections.abc.Callable
- typing.Generic
Methods
def sparse_matrix(self, *args, **kwargs)-
Expand source code
def sparse_matrix(self, *args, **kwargs): """ Create an explicit representation of this linear function as a sparse matrix. See Also: `sparse_matrix_and_bias()`. Args: *args: Function arguments. This determines the size of the matrix. **kwargs: Additional keyword arguments for the linear function. Returns: Sparse matrix representation with `values` property and `native()` method. """ key, *_, aux_kwargs = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args, use='linear') matrix, bias, *_ = self._get_or_trace(key, args, aux_kwargs) assert math.close(bias, 0), "This is an affine function and cannot be represented by a single matrix. Use sparse_matrix_and_bias() instead." return matrixCreate an explicit representation of this linear function as a sparse matrix.
See Also:
sparse_matrix_and_bias().Args
*args- Function arguments. This determines the size of the matrix.
**kwargs- Additional keyword arguments for the linear function.
Returns
Sparse matrix representation with
valuesproperty andnative()method. def sparse_matrix_and_bias(self, *args, **kwargs)-
Expand source code
def sparse_matrix_and_bias(self, *args, **kwargs): """ Create an explicit representation of this affine function as a sparse matrix and a bias vector. Args: *args: Positional arguments to the linear function. This determines the size of the matrix. **kwargs: Additional keyword arguments for the linear function. Returns: matrix: Sparse matrix representation with `values` property and `native()` method. bias: `Tensor` """ key, *_, aux_kwargs = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args, use='linear') return self._get_or_trace(key, args, aux_kwargs)[:2]Create an explicit representation of this affine function as a sparse matrix and a bias vector.
Args
*args- Positional arguments to the linear function. This determines the size of the matrix.
**kwargs- Additional keyword arguments for the linear function.
Returns
class NotConverged-
Expand source code
class NotConverged(ConvergenceException): """ Raised during optimization if the desired accuracy was not reached within the maximum number of iterations. This exception inherits from `ConvergenceException`. See Also: `Diverged`. """ def __init__(self, result: SolveInfo): ConvergenceException.__init__(self, result)Raised during optimization if the desired accuracy was not reached within the maximum number of iterations.
This exception inherits from
ConvergenceException.See Also:
Diverged.Ancestors
- phiml.math._optimize.ConvergenceException
- builtins.RuntimeError
- builtins.Exception
- builtins.BaseException
class Shape-
Expand source code
@runtime_checkable class Shape(Protocol, metaclass=ShapeMeta): @property def names(self) -> Tuple[str]: """ Ordered dimension names as `tuple[str]`. See Also: `Shape.name`. """ ... @property def sizes(self) -> Sequence: """ Ordered dimension sizes as `tuple`. The size of a dimension can be an `int` or a `Tensor` for [non-uniform shapes](https://tum-pbs.github.io/PhiML/Non_Uniform.html). See Also: `Shape.get_size()`, `Shape.size`, `Shape.shape`. """ ... @property def types(self) -> Sequence[Callable]: ... @property def dim_types(self) -> Sequence[str]: ... @property def labels(self) -> Sequence[Optional[Sequence[str]]]: ... @property def name_list(self) -> List[str]: ... @property def untyped_dict(self) -> dict: """ Returns: `dict` containing dimension names as keys. The values are either the labels as `tuple` if available, otherwise the size. """ ... def __len__(self): ... def __contains__(self, item): ... def isdisjoint(self, other: Union['Shape', tuple, list, str]): """ Shapes are disjoint if all dimension names of one shape do not occur in the other shape. """ ... def __iter__(self): ... def index(self, dim: Union[str, 'Shape', None]) -> int: """ Finds the index of the dimension within this `Shape`. See Also: `Shape.indices()`. Args: dim: Dimension name or single-dimension `Shape`. Returns: Index as `int`. """ ... def indices(self, names: Sequence[str]) -> Tuple[int, ...]: """ Finds the indices of the given dimensions within this `Shape`. See Also: `Shape.index()`. Args: names: Sequence of dim names as `tuple` or `list`. No name can occur in `names` more than once. Returns: Indices as `tuple[int]`. """ ... def get_size(self, dim: Union[str, 'Shape', int], default=None): """ Args: dim: Dimension, either as name `str` or single-dimension `Shape` or index `int`. default: (Optional) If the dim does not exist, return this value instead of raising an error. Returns: Size associated with `dim` as `int` or `Tensor`. """ ... def get_dim_type(self, dim: str) -> str: """ Args: dim: Dimension, either as name `str` or single-dimension `Shape`. Returns: Dimension type, one of `batch`, `spatial`, `instance`, `channel`. """ ... def get_labels(self, dim: Union[str, 'Shape', int], fallback_spatial=False) -> Union[tuple, None]: """ Args: fallback_spatial: If `True` and no labels are defined for `dim` and `dim` is a channel dimension, the spatial dimension names are interpreted as labels along `dim` in the order they are listed in this `Shape`. dim: Dimension, either as `int` index, `str` name or single-dimension `Shape`. Returns: Item names as `tuple` or `None` if not defined. """ ... def flipped(self, dims: Union[List[str], Tuple[str]]): ... def __getitem__(self, selection): ... @property def reversed(self): return ... @property def batch(self) -> 'Shape': """ Filters this shape, returning only the batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def non_batch(self) -> 'Shape': """ Filters this shape, returning only the non-batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def spatial(self) -> 'Shape': """ Filters this shape, returning only the spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def non_spatial(self) -> 'Shape': """ Filters this shape, returning only the non-spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def instance(self) -> 'Shape': """ Filters this shape, returning only the instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def non_instance(self) -> 'Shape': """ Filters this shape, returning only the non-instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def channel(self) -> 'Shape': """ Filters this shape, returning only the channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def non_channel(self) -> 'Shape': """ Filters this shape, returning only the non-channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def dual(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def non_dual(self) -> 'Shape': """ Filters this shape, returning only the non-dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def primal(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def non_primal(self) -> 'Shape': """ Filters this shape, returning only batch and dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ... @property def non_singleton(self) -> 'Shape': """ Filters this shape, returning only non-singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ ... @property def singleton(self) -> 'Shape': """ Filters this shape, returning only singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ ... def as_channel(self) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of type *channel*.""" ... def as_batch(self) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of type *batch*.""" ... def as_spatial(self) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of type *spatial*.""" ... def as_instance(self) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of type *instance*.""" ... def as_dual(self) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of type *dual*.""" ... def as_type(self, new_type: Callable) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of the given type, either `batch`, `dual`, `spatial`, `instance`, or `channel` .""" ... def transpose(self, dim_type: str) -> 'Shape': ... @property def name(self) -> str: """ Only for Shapes containing exactly one single dimension. Returns the name of the dimension. See Also: `Shape.names`. """ ... @property def size(self): """ Only for Shapes containing exactly one single dimension. Returns the size of the dimension. See Also: `Shape.sizes`, `Shape.get_size()`. """ ... @property def type(self) -> Callable: """ Only for Shapes containing exactly one single dimension. Returns the type of the dimension. See Also: `Shape.get_type()`. """ ... @property def dim_type(self) -> str: ... def mask(self, names: Union[tuple, list, set, 'Shape']): """ Returns a binary sequence corresponding to the names of this Shape. A value of 1 means that a dimension of this Shape is contained in `names`. Args: names: instance of dimension names: tuple or list or set: Returns: binary sequence """ ... def without(self, dims: 'DimFilter') -> 'Shape': """ Builds a new shape from this one that is missing all given dimensions. Dimensions in `dims` that are not part of this Shape are ignored. The complementary operation is `Shape.only()`. Args: dims: Single dimension (str) or instance of dimensions (tuple, list, Shape) dims: Dimensions to exclude as `str` or `tuple` or `list` or `Shape`. Dimensions that are not included in this shape are ignored. Returns: Shape without specified dimensions """ ... def __and__(self, other) -> 'Shape': ... def __add__(self, other) -> 'Shape': ... def __sub__(self, other) -> 'Shape': ... def __mul__(self, other) -> 'Shape': ... def __rmul__(self, other) -> 'Shape': ... def only(self, dims: 'DimFilter', reorder=False) -> 'Shape': """ Builds a new shape from this one that only contains the given dimensions. Dimensions in `dims` that are not part of this Shape are ignored. The complementary operation is :func:`Shape.without`. Args: dims: comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function. reorder: If `False`, keeps the dimension order as defined in this shape. If `True`, reorders the dimensions of this shape to match the order of `dims`. Returns: Shape containing only specified dimensions """ ... def is_compatible(self, *others: 'Shape') -> bool: """ Checks if this shape and the others can be broadcast. Args: others: Other shapes. Returns: `True` only if all shapes are compatible. """ ... @property def rank(self) -> int: """ Returns the number of dimensions. Equal to `len(shape)`. See `Shape.is_empty`, `Shape.batch_rank`, `Shape.spatial_rank`, `Shape.channel_rank`. """ ... @property def batch_rank(self) -> int: """ Number of batch dimensions """ ... @property def instance_rank(self) -> int: ... @property def spatial_rank(self) -> int: """ Number of spatial dimensions """ ... @property def dual_rank(self) -> int: """ Number of spatial dimensions """ ... @property def channel_rank(self) -> int: """ Number of channel dimensions """ ... @property def well_defined(self): """ Returns `True` if no dimension size is `None`. Shapes with undefined sizes may be used in `phiml.math.tensor()`, `phiml.math.wrap()`, `phiml.math.stack()` or `phiml.math.concat()`. To create an undefined size, call a constructor function (`batch()`, `spatial()`, `channel()`, `instance()`) with positional `str` arguments, e.g. `spatial('x')`. """ ... @property def defined(self): ... @property def undefined(self): ... @property def shape(self) -> 'Shape': """ Higher-order `Shape`. The returned shape will always contain the channel dimension `dims` with a size equal to the `Shape.rank` of this shape. For uniform shapes, `Shape.shape` will only contain the dimension `dims` but the shapes of [non-uniform shapes](https://tum-pbs.github.io/PhiML/Non_Uniform.html) may contain additional dimensions. See Also: `Shape.is_uniform`. Returns: `Shape`. """ ... @property def is_uniform(self) -> bool: """ A shape is uniform if it all sizes have a single integer value. See Also: `Shape.is_non_uniform`, `Shape.shape`. """ ... @property def is_non_uniform(self) -> bool: """ A shape is non-uniform if the size of any dimension varies along another dimension. See Also: `Shape.is_uniform`, `Shape.shape`. """ ... @property def non_uniform(self) -> 'Shape': """ Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension. See Also `Shape.non_uniform_shape` """ ... @property def non_uniform_shape(self) -> 'Shape': """ Returns the stack dimensions of non-uniform shapes. This is equal to `Shape.shape` excluding the `dims` dimension. For example, when stacking `(x=3)` and `(x=2)` along `vector`, the resulting shape is non_uniform. Its `non_uniform_shape` is `vector` and its `non_uniform` dimension is `x`. See Also `Shape.non_uniform`. """ ... def with_size(self, size: Union[int, Sequence[str]]): """ Only for single-dimension shapes. Returns a `Shape` representing this dimension but with a different size. See Also: `Shape.with_sizes()`. Args: size: Replacement size for this dimension. Returns: `Shape` """ ... def with_sizes(self, sizes: Union[Sequence[int], Sequence[Tuple[str, ...]], 'Shape', int], keep_labels=True): """ Returns a new `Shape` matching the dimension names and types of `self` but with different sizes. See Also: `Shape.with_size()`. Args: sizes: One of * `tuple` / `list` of same length as `self` containing replacement sizes or replacement labels. * `Shape` of any rank. Replaces sizes for dimensions shared by `sizes` and `self`. * `int`: new size for all dimensions keep_labels: If `False`, forgets all labels. If `True`, keeps labels where the size does not change. Returns: `Shape` with same names and types as `self`. """ ... def without_sizes(self): """ Returns: `Shape` with all sizes undefined (`None`) """ ... def with_dim_size(self, dim: Union[str, 'Shape'], size: Union[int, 'math.Tensor', str, tuple, list, None], keep_labels=True): """ Returns a new `Shape` that has a different size for `dim`. Args: dim: Dimension for which to replace the size, `Shape` or `str`. size: New size, `int` or `Tensor` Returns: `Shape` with same names and types as `self`. """ ... def replace(self, dims: Union['Shape', str, tuple, list], new: 'Shape') -> 'Shape': """ Returns a copy of `self` with `dims` replaced by `new`. Dimensions that are not present in `self` are ignored. The dimension order is preserved. Args: dims: Dimensions to replace. new: New dimensions, must have same length as `dims` if `len(dims) > 1`. If a `Shape` is given, replaces the dimension types and labels as well. Returns: `Shape` with same rank and dimension order as `self`. """ ... def replace_selection(self, names: Sequence[str], new: 'Shape') -> 'Shape': """ Replace some of the dims of this shape. Args: names: Sequence of dim names. new: Replacement dims, must have same length as `old`. Returns: Copy of `self` with replaced dims. """ ... @property def volume(self) -> Union[int, None]: """ Returns the total number of values contained in a tensor of this shape. This is the product of all dimension sizes. Returns: volume as `int` or `Tensor` or `None` if the shape is not `Shape.well_defined` """ ... @property def is_empty(self) -> bool: """ True if this shape has no dimensions. Equivalent to `Shape.rank` `== 0`. """ ... def prepare_gather(self, dim: str, selection: Union[slice, int, 'Shape', str, tuple, list]) -> Union[slice, List[int]]: """ Parse a slice object for a specific dimension. Args: dim: Name of dimension to slice. selection: Slice object. Returns: """ ... def prepare_renaming_gather(self, dim: str, selection: Union[slice, int, 'Shape', str, tuple, list]): ... def after_gather(self, selection: dict) -> 'Shape': ... def meshgrid(self, names=False): """ Builds a sequence containing all multi-indices within a tensor of this shape. All indices are returned as `dict` mapping dimension names to `int` indices. The corresponding values can be retrieved from Tensors and other Sliceables using `tensor[index]`. This function currently only supports uniform tensors. Args: names: If `True`, replace indices by their labels if available. Returns: `dict` iterator. """ ...Base class for protocol classes.
Protocol classes are defined as::
class Proto(Protocol): def meth(self) -> int: ...Such classes are primarily used with static type checkers that recognize structural subtyping (static duck-typing).
For example::
class C: def meth(self) -> int: return 0 def func(x: Proto) -> int: return x.meth() func(C()) # Passes static type checkSee PEP 544 for details. Protocol classes decorated with @typing.runtime_checkable act as simple-minded runtime protocols that check only the presence of given attributes, ignoring their type signatures. Protocol classes can be generic, they are defined as::
class GenProto(Protocol[T]): def meth(self) -> T: ...Ancestors
- typing.Protocol
- typing.Generic
Instance variables
prop batch : Shape-
Expand source code
@property def batch(self) -> 'Shape': """ Filters this shape, returning only the batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the batch dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop batch_rank : int-
Expand source code
@property def batch_rank(self) -> int: """ Number of batch dimensions """ ...Number of batch dimensions
prop channel : Shape-
Expand source code
@property def channel(self) -> 'Shape': """ Filters this shape, returning only the channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the channel dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop channel_rank : int-
Expand source code
@property def channel_rank(self) -> int: """ Number of channel dimensions """ ...Number of channel dimensions
prop defined-
Expand source code
@property def defined(self): ... prop dim_type : str-
Expand source code
@property def dim_type(self) -> str: ... prop dim_types : Sequence[str]-
Expand source code
@property def dim_types(self) -> Sequence[str]: ... prop dual : Shape-
Expand source code
@property def dual(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the dual dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop dual_rank : int-
Expand source code
@property def dual_rank(self) -> int: """ Number of spatial dimensions """ ...Number of spatial dimensions
prop instance : Shape-
Expand source code
@property def instance(self) -> 'Shape': """ Filters this shape, returning only the instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the instance dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop instance_rank : int-
Expand source code
@property def instance_rank(self) -> int: ... prop is_empty : bool-
Expand source code
@property def is_empty(self) -> bool: """ True if this shape has no dimensions. Equivalent to `Shape.rank` `== 0`. """ ...True if this shape has no dimensions. Equivalent to
Shape.rank== 0. prop is_non_uniform : bool-
Expand source code
@property def is_non_uniform(self) -> bool: """ A shape is non-uniform if the size of any dimension varies along another dimension. See Also: `Shape.is_uniform`, `Shape.shape`. """ ...A shape is non-uniform if the size of any dimension varies along another dimension.
See Also:
Shape.is_uniform,Shape.shape. prop is_uniform : bool-
Expand source code
@property def is_uniform(self) -> bool: """ A shape is uniform if it all sizes have a single integer value. See Also: `Shape.is_non_uniform`, `Shape.shape`. """ ...A shape is uniform if it all sizes have a single integer value.
See Also:
Shape.is_non_uniform,Shape.shape. prop labels : Sequence[Sequence[str] | None]-
Expand source code
@property def labels(self) -> Sequence[Optional[Sequence[str]]]: ... prop name : str-
Expand source code
@property def name(self) -> str: """ Only for Shapes containing exactly one single dimension. Returns the name of the dimension. See Also: `Shape.names`. """ ...Only for Shapes containing exactly one single dimension. Returns the name of the dimension.
See Also:
Shape.names. prop name_list : List[str]-
Expand source code
@property def name_list(self) -> List[str]: ... prop names : Tuple[str]-
Expand source code
@property def names(self) -> Tuple[str]: """ Ordered dimension names as `tuple[str]`. See Also: `Shape.name`. """ ...Ordered dimension names as
tuple[str].See Also:
Shape.name. prop non_batch : Shape-
Expand source code
@property def non_batch(self) -> 'Shape': """ Filters this shape, returning only the non-batch dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the non-batch dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop non_channel : Shape-
Expand source code
@property def non_channel(self) -> 'Shape': """ Filters this shape, returning only the non-channel dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the non-channel dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop non_dual : Shape-
Expand source code
@property def non_dual(self) -> 'Shape': """ Filters this shape, returning only the non-dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the non-dual dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop non_instance : Shape-
Expand source code
@property def non_instance(self) -> 'Shape': """ Filters this shape, returning only the non-instance dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the non-instance dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop non_primal : Shape-
Expand source code
@property def non_primal(self) -> 'Shape': """ Filters this shape, returning only batch and dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only batch and dual dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop non_singleton : Shape-
Expand source code
@property def non_singleton(self) -> 'Shape': """ Filters this shape, returning only non-singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ ... prop non_spatial : Shape-
Expand source code
@property def non_spatial(self) -> 'Shape': """ Filters this shape, returning only the non-spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the non-spatial dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop non_uniform : Shape-
Expand source code
@property def non_uniform(self) -> 'Shape': """ Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension. See Also `Shape.non_uniform_shape` """ ...Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension.
See Also
Shape.non_uniform_shape prop non_uniform_shape : Shape-
Expand source code
@property def non_uniform_shape(self) -> 'Shape': """ Returns the stack dimensions of non-uniform shapes. This is equal to `Shape.shape` excluding the `dims` dimension. For example, when stacking `(x=3)` and `(x=2)` along `vector`, the resulting shape is non_uniform. Its `non_uniform_shape` is `vector` and its `non_uniform` dimension is `x`. See Also `Shape.non_uniform`. """ ...Returns the stack dimensions of non-uniform shapes. This is equal to
Shape.shapeexcluding thedimsdimension.For example, when stacking
(x=3)and(x=2)alongvector, the resulting shape is non_uniform. Itsnon_uniform_shapeisvectorand itsnon_uniformdimension isx.See Also
Shape.non_uniform. prop primal : Shape-
Expand source code
@property def primal(self) -> 'Shape': """ Filters this shape, returning only the dual dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the dual dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop rank : int-
Expand source code
@property def rank(self) -> int: """ Returns the number of dimensions. Equal to `len(shape)`. See `Shape.is_empty`, `Shape.batch_rank`, `Shape.spatial_rank`, `Shape.channel_rank`. """ ...Returns the number of dimensions. Equal to
len(shape()).See
Shape.is_empty,Shape.batch_rank,Shape.spatial_rank,Shape.channel_rank. prop reversed-
Expand source code
@property def reversed(self): return ... prop shape : Shape-
Expand source code
@property def shape(self) -> 'Shape': """ Higher-order `Shape`. The returned shape will always contain the channel dimension `dims` with a size equal to the `Shape.rank` of this shape. For uniform shapes, `Shape.shape` will only contain the dimension `dims` but the shapes of [non-uniform shapes](https://tum-pbs.github.io/PhiML/Non_Uniform.html) may contain additional dimensions. See Also: `Shape.is_uniform`. Returns: `Shape`. """ ...Higher-order
Shape. The returned shape will always contain the channel dimensiondimswith a size equal to theShape.rankof this shape.For uniform shapes,
Shape.shapewill only contain the dimensiondimsbut the shapes of non-uniform shapes may contain additional dimensions.See Also:
Shape.is_uniform.Returns
prop singleton : Shape-
Expand source code
@property def singleton(self) -> 'Shape': """ Filters this shape, returning only singleton dimensions as a new `Shape` object. Dimensions are singleton if their size is exactly `1`. Returns: New `Shape` object """ ... prop size-
Expand source code
@property def size(self): """ Only for Shapes containing exactly one single dimension. Returns the size of the dimension. See Also: `Shape.sizes`, `Shape.get_size()`. """ ...Only for Shapes containing exactly one single dimension. Returns the size of the dimension.
See Also:
Shape.sizes,Shape.get_size(). prop sizes : Sequence-
Expand source code
@property def sizes(self) -> Sequence: """ Ordered dimension sizes as `tuple`. The size of a dimension can be an `int` or a `Tensor` for [non-uniform shapes](https://tum-pbs.github.io/PhiML/Non_Uniform.html). See Also: `Shape.get_size()`, `Shape.size`, `Shape.shape`. """ ...Ordered dimension sizes as
tuple. The size of a dimension can be anintor aTensorfor non-uniform shapes.See Also:
Shape.get_size(),Shape.size,Shape.shape. prop spatial : Shape-
Expand source code
@property def spatial(self) -> 'Shape': """ Filters this shape, returning only the spatial dimensions as a new `Shape` object. See also: `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`. Returns: New `Shape` object """ ...Filters this shape, returning only the spatial dimensions as a new
Shapeobject.See also:
Shape.batch,Shape.spatial,Shape.instance,Shape.channel,Shape.dual,Shape.non_batch,Shape.non_spatial,Shape.non_instance,Shape.non_channel,Shape.non_dual.Returns
New
Shapeobject prop spatial_rank : int-
Expand source code
@property def spatial_rank(self) -> int: """ Number of spatial dimensions """ ...Number of spatial dimensions
prop type : Callable-
Expand source code
@property def type(self) -> Callable: """ Only for Shapes containing exactly one single dimension. Returns the type of the dimension. See Also: `Shape.get_type()`. """ ...Only for Shapes containing exactly one single dimension. Returns the type of the dimension.
See Also:
Shape.get_type(). prop types : Sequence[Callable]-
Expand source code
@property def types(self) -> Sequence[Callable]: ... prop undefined-
Expand source code
@property def undefined(self): ... prop untyped_dict : dict-
Expand source code
@property def untyped_dict(self) -> dict: """ Returns: `dict` containing dimension names as keys. The values are either the labels as `tuple` if available, otherwise the size. """ ...Returns
dictcontaining dimension names as keys. The values are either the labels astupleif available, otherwise the size. prop volume : int | None-
Expand source code
@property def volume(self) -> Union[int, None]: """ Returns the total number of values contained in a tensor of this shape. This is the product of all dimension sizes. Returns: volume as `int` or `Tensor` or `None` if the shape is not `Shape.well_defined` """ ...Returns the total number of values contained in a tensor of this shape. This is the product of all dimension sizes.
Returns
volume as
intorTensororNoneif the shape is notShape.well_defined prop well_defined-
Expand source code
@property def well_defined(self): """ Returns `True` if no dimension size is `None`. Shapes with undefined sizes may be used in `phiml.math.tensor()`, `phiml.math.wrap()`, `phiml.math.stack()` or `phiml.math.concat()`. To create an undefined size, call a constructor function (`batch()`, `spatial()`, `channel()`, `instance()`) with positional `str` arguments, e.g. `spatial('x')`. """ ...
Methods
def after_gather(self, selection: dict) ‑> phiml.math._shape.Shape-
Expand source code
def after_gather(self, selection: dict) -> 'Shape': ... def as_batch(self) ‑> phiml.math._shape.Shape-
Expand source code
def as_batch(self) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of type *batch*.""" ...Returns a copy of this
Shapewith all dimensions of type batch. def as_channel(self) ‑> phiml.math._shape.Shape-
Expand source code
def as_channel(self) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of type *channel*.""" ...Returns a copy of this
Shapewith all dimensions of type channel. def as_dual(self) ‑> phiml.math._shape.Shape-
Expand source code
def as_dual(self) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of type *dual*.""" ...Returns a copy of this
Shapewith all dimensions of type dual. def as_instance(self) ‑> phiml.math._shape.Shape-
Expand source code
def as_instance(self) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of type *instance*.""" ...Returns a copy of this
Shapewith all dimensions of type instance. def as_spatial(self) ‑> phiml.math._shape.Shape-
Expand source code
def as_spatial(self) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of type *spatial*.""" ...Returns a copy of this
Shapewith all dimensions of type spatial. def as_type(self, new_type: Callable) ‑> phiml.math._shape.Shape-
Expand source code
def as_type(self, new_type: Callable) -> 'Shape': """Returns a copy of this `Shape` with all dimensions of the given type, either `batch`, `dual`, `spatial`, `instance`, or `channel` .""" ... def flipped(self, dims: List[str] | Tuple[str])-
Expand source code
def flipped(self, dims: Union[List[str], Tuple[str]]): ... def get_dim_type(self, dim: str) ‑> str-
Expand source code
def get_dim_type(self, dim: str) -> str: """ Args: dim: Dimension, either as name `str` or single-dimension `Shape`. Returns: Dimension type, one of `batch`, `spatial`, `instance`, `channel`. """ ...Args
dim- Dimension, either as name
stror single-dimensionShape.
Returns
Dimension type, one of
batch(),spatial(),instance(),channel(). def get_labels(self,
dim: str | ForwardRef('Shape') | int,
fallback_spatial=False) ‑> tuple | None-
Expand source code
def get_labels(self, dim: Union[str, 'Shape', int], fallback_spatial=False) -> Union[tuple, None]: """ Args: fallback_spatial: If `True` and no labels are defined for `dim` and `dim` is a channel dimension, the spatial dimension names are interpreted as labels along `dim` in the order they are listed in this `Shape`. dim: Dimension, either as `int` index, `str` name or single-dimension `Shape`. Returns: Item names as `tuple` or `None` if not defined. """ ...Args
fallback_spatial- If
Trueand no labels are defined fordimanddimis a channel dimension, the spatial dimension names are interpreted as labels alongdimin the order they are listed in thisShape. dim- Dimension, either as
intindex,strname or single-dimensionShape.
Returns
Item names as
tupleorNoneif not defined. def get_size(self,
dim: str | ForwardRef('Shape') | int,
default=None)-
Expand source code
def get_size(self, dim: Union[str, 'Shape', int], default=None): """ Args: dim: Dimension, either as name `str` or single-dimension `Shape` or index `int`. default: (Optional) If the dim does not exist, return this value instead of raising an error. Returns: Size associated with `dim` as `int` or `Tensor`. """ ... def index(self,
dim: str | ForwardRef('Shape') | None) ‑> int-
Expand source code
def index(self, dim: Union[str, 'Shape', None]) -> int: """ Finds the index of the dimension within this `Shape`. See Also: `Shape.indices()`. Args: dim: Dimension name or single-dimension `Shape`. Returns: Index as `int`. """ ...Finds the index of the dimension within this
Shape.See Also:
Shape.indices().Args
dim- Dimension name or single-dimension
Shape.
Returns
Index as
int. def indices(self, names: Sequence[str]) ‑> Tuple[int, ...]-
Expand source code
def indices(self, names: Sequence[str]) -> Tuple[int, ...]: """ Finds the indices of the given dimensions within this `Shape`. See Also: `Shape.index()`. Args: names: Sequence of dim names as `tuple` or `list`. No name can occur in `names` more than once. Returns: Indices as `tuple[int]`. """ ...Finds the indices of the given dimensions within this
Shape.See Also:
Shape.index().Args
names- Sequence of dim names as
tupleorlist. No name can occur innamesmore than once.
Returns
Indices as
tuple[int]. def is_compatible(self, *others: Shape) ‑> bool-
Expand source code
def is_compatible(self, *others: 'Shape') -> bool: """ Checks if this shape and the others can be broadcast. Args: others: Other shapes. Returns: `True` only if all shapes are compatible. """ ...Checks if this shape and the others can be broadcast.
Args
others- Other shapes.
Returns
Trueonly if all shapes are compatible. def isdisjoint(self,
other: ForwardRef('Shape') | tuple | list | str)-
Expand source code
def isdisjoint(self, other: Union['Shape', tuple, list, str]): """ Shapes are disjoint if all dimension names of one shape do not occur in the other shape. """ ...Shapes are disjoint if all dimension names of one shape do not occur in the other shape.
def mask(self,
names: tuple | list | set | ForwardRef('Shape'))-
Expand source code
def mask(self, names: Union[tuple, list, set, 'Shape']): """ Returns a binary sequence corresponding to the names of this Shape. A value of 1 means that a dimension of this Shape is contained in `names`. Args: names: instance of dimension names: tuple or list or set: Returns: binary sequence """ ...Returns a binary sequence corresponding to the names of this Shape. A value of 1 means that a dimension of this Shape is contained in
names.Args
names- instance of dimension
names- tuple or list or set:
Returns
binary sequence
def meshgrid(self, names=False)-
Expand source code
def meshgrid(self, names=False): """ Builds a sequence containing all multi-indices within a tensor of this shape. All indices are returned as `dict` mapping dimension names to `int` indices. The corresponding values can be retrieved from Tensors and other Sliceables using `tensor[index]`. This function currently only supports uniform tensors. Args: names: If `True`, replace indices by their labels if available. Returns: `dict` iterator. """ ...Builds a sequence containing all multi-indices within a tensor of this shape. All indices are returned as
dictmapping dimension names tointindices.The corresponding values can be retrieved from Tensors and other Sliceables using
tensor()[index].This function currently only supports uniform tensors.
Args
names- If
True, replace indices by their labels if available.
Returns
dictiterator. def only(self, dims: DimFilter, reorder=False) ‑> phiml.math._shape.Shape-
Expand source code
def only(self, dims: 'DimFilter', reorder=False) -> 'Shape': """ Builds a new shape from this one that only contains the given dimensions. Dimensions in `dims` that are not part of this Shape are ignored. The complementary operation is :func:`Shape.without`. Args: dims: comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function. reorder: If `False`, keeps the dimension order as defined in this shape. If `True`, reorders the dimensions of this shape to match the order of `dims`. Returns: Shape containing only specified dimensions """ ...Builds a new shape from this one that only contains the given dimensions. Dimensions in
dimsthat are not part of this Shape are ignored.The complementary operation is :func:
Shape.without().Args
dims- comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function.
reorder- If
False, keeps the dimension order as defined in this shape. IfTrue, reorders the dimensions of this shape to match the order ofdims.
Returns
Shape containing only specified dimensions
def prepare_gather(self,
dim: str,
selection: slice_() | int | ForwardRef('Shape') | str | tuple | list) ‑> slice_() | List[int]-
Expand source code
def prepare_gather(self, dim: str, selection: Union[slice, int, 'Shape', str, tuple, list]) -> Union[slice, List[int]]: """ Parse a slice object for a specific dimension. Args: dim: Name of dimension to slice. selection: Slice object. Returns: """ ...Parse a slice object for a specific dimension.
Args
dim- Name of dimension to slice.
selection- Slice object.
Returns:
def prepare_renaming_gather(self,
dim: str,
selection: slice_() | int | ForwardRef('Shape') | str | tuple | list)-
Expand source code
def prepare_renaming_gather(self, dim: str, selection: Union[slice, int, 'Shape', str, tuple, list]): ... def replace(self,
dims: ForwardRef('Shape') | tuple | list | str,
new: Shape) ‑> phiml.math._shape.Shape-
Expand source code
def replace(self, dims: Union['Shape', str, tuple, list], new: 'Shape') -> 'Shape': """ Returns a copy of `self` with `dims` replaced by `new`. Dimensions that are not present in `self` are ignored. The dimension order is preserved. Args: dims: Dimensions to replace. new: New dimensions, must have same length as `dims` if `len(dims) > 1`. If a `Shape` is given, replaces the dimension types and labels as well. Returns: `Shape` with same rank and dimension order as `self`. """ ...Returns a copy of
selfwithdimsreplaced bynew. Dimensions that are not present inselfare ignored.The dimension order is preserved.
Args
dims- Dimensions to replace.
new- New dimensions, must have same length as
dimsiflen(dims) > 1. If aShapeis given, replaces the dimension types and labels as well.
Returns
Shapewith same rank and dimension order asself. def replace_selection(self,
names: Sequence[str],
new: Shape) ‑> phiml.math._shape.Shape-
Expand source code
def replace_selection(self, names: Sequence[str], new: 'Shape') -> 'Shape': """ Replace some of the dims of this shape. Args: names: Sequence of dim names. new: Replacement dims, must have same length as `old`. Returns: Copy of `self` with replaced dims. """ ...Replace some of the dims of this shape.
Args
names- Sequence of dim names.
new- Replacement dims, must have same length as
old.
Returns
Copy of
selfwith replaced dims. def transpose(self, dim_type: str) ‑> phiml.math._shape.Shape-
Expand source code
def transpose(self, dim_type: str) -> 'Shape': ... def with_dim_size(self,
dim: str | ForwardRef('Shape'),
size: int | ForwardRef('math.Tensor') | str | tuple | list | None,
keep_labels=True)-
Expand source code
def with_dim_size(self, dim: Union[str, 'Shape'], size: Union[int, 'math.Tensor', str, tuple, list, None], keep_labels=True): """ Returns a new `Shape` that has a different size for `dim`. Args: dim: Dimension for which to replace the size, `Shape` or `str`. size: New size, `int` or `Tensor` Returns: `Shape` with same names and types as `self`. """ ... def with_size(self, size: int | Sequence[str])-
Expand source code
def with_size(self, size: Union[int, Sequence[str]]): """ Only for single-dimension shapes. Returns a `Shape` representing this dimension but with a different size. See Also: `Shape.with_sizes()`. Args: size: Replacement size for this dimension. Returns: `Shape` """ ...Only for single-dimension shapes. Returns a
Shaperepresenting this dimension but with a different size.See Also:
Shape.with_sizes().Args
size- Replacement size for this dimension.
Returns
def with_sizes(self,
sizes: Sequence[int] | Sequence[Tuple[str, ...]] | ForwardRef('Shape') | int,
keep_labels=True)-
Expand source code
def with_sizes(self, sizes: Union[Sequence[int], Sequence[Tuple[str, ...]], 'Shape', int], keep_labels=True): """ Returns a new `Shape` matching the dimension names and types of `self` but with different sizes. See Also: `Shape.with_size()`. Args: sizes: One of * `tuple` / `list` of same length as `self` containing replacement sizes or replacement labels. * `Shape` of any rank. Replaces sizes for dimensions shared by `sizes` and `self`. * `int`: new size for all dimensions keep_labels: If `False`, forgets all labels. If `True`, keeps labels where the size does not change. Returns: `Shape` with same names and types as `self`. """ ...Returns a new
Shapematching the dimension names and types ofselfbut with different sizes.See Also:
Shape.with_size().Args
sizes-
One of
tuple/listof same length asselfcontaining replacement sizes or replacement labels.Shapeof any rank. Replaces sizes for dimensions shared bysizesandself.int: new size for all dimensions
keep_labels- If
False, forgets all labels. IfTrue, keeps labels where the size does not change.
Returns
Shapewith same names and types asself. def without(self, dims: DimFilter) ‑> phiml.math._shape.Shape-
Expand source code
def without(self, dims: 'DimFilter') -> 'Shape': """ Builds a new shape from this one that is missing all given dimensions. Dimensions in `dims` that are not part of this Shape are ignored. The complementary operation is `Shape.only()`. Args: dims: Single dimension (str) or instance of dimensions (tuple, list, Shape) dims: Dimensions to exclude as `str` or `tuple` or `list` or `Shape`. Dimensions that are not included in this shape are ignored. Returns: Shape without specified dimensions """ ...Builds a new shape from this one that is missing all given dimensions. Dimensions in
dimsthat are not part of this Shape are ignored.The complementary operation is
Shape.only().Args
dims- Single dimension (str) or instance of dimensions (tuple, list, Shape)
dims- Dimensions to exclude as
strortupleorlistorShape. Dimensions that are not included in this shape are ignored.
Returns
Shape without specified dimensions
def without_sizes(self)-
Expand source code
def without_sizes(self): """ Returns: `Shape` with all sizes undefined (`None`) """ ...Returns
Shapewith all sizes undefined (None)
class Solve (method: str | None = 'auto',
rel_tol: float | phiml.math._tensors.Tensor = None,
abs_tol: float | phiml.math._tensors.Tensor = None,
x0: ~X | Any = None,
max_iterations: int | phiml.math._tensors.Tensor = 1000,
suppress: tuple | list = (),
preprocess_y: Callable | None = None,
preprocess_y_args: tuple = (),
preconditioner: str | None = None,
rank_deficiency: int = None,
gradient_solve: ForwardRef('Solve[Y, X]') | None = None)-
Expand source code
class Solve(Generic[X, Y]): """ Specifies parameters and stopping criteria for solving a minimization problem or system of equations. """ def __init__(self, method: Union[str, None] = 'auto', rel_tol: Union[float, Tensor] = None, abs_tol: Union[float, Tensor] = None, x0: Union[X, Any] = None, max_iterations: Union[int, Tensor] = 1000, suppress: Union[tuple, list] = (), preprocess_y: Optional[Callable] = None, preprocess_y_args: tuple = (), preconditioner: Optional[str] = None, rank_deficiency: int = None, gradient_solve: Union['Solve[Y, X]', None] = None): method = method or 'auto' assert isinstance(method, str) self.method: str = method """ Optimization method to use. Available solvers depend on the solve function that is used to perform the solve. """ self.rel_tol: Tensor = math.to_float(wrap(rel_tol)) if rel_tol is not None else None """Relative tolerance for linear solves only, defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. This must be unset or `0` for minimization problems. For systems of equations *f(x)=y*, the final tolerance is `max(rel_tol * norm(y), abs_tol)`. """ self.abs_tol: Tensor = math.to_float(wrap(abs_tol)) if abs_tol is not None else None """ Absolut tolerance for optimization problems and linear solves. Defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. For systems of equations *f(x)=y*, the final tolerance is `max(rel_tol * norm(y), abs_tol)`. """ self.max_iterations: Tensor = math.to_int32(wrap(max_iterations)) """ Maximum number of iterations to perform before raising a `NotConverged` error is raised. """ self.x0 = x0 """ Initial guess for the method, of same type and dimensionality as the solve result. This property must be set to a value compatible with the solution `x` before running a method. """ self.preprocess_y: Callable = preprocess_y """ Function to be applied to the right-hand-side vector of an equation system before solving the system. This property is propagated to gradient solves by default. """ self.preprocess_y_args: tuple = preprocess_y_args assert all(issubclass(err, ConvergenceException) for err in suppress) self.suppress: tuple = tuple(suppress) """ Error types to suppress; `tuple` of `ConvergenceException` types. For these errors, the solve function will instead return the partial result without raising the error. """ self.preconditioner = preconditioner assert isinstance(rank_deficiency, int) or rank_deficiency is None, f"rank_deficiency must be an integer but got {rank_deficiency}" self.rank_deficiency: int = rank_deficiency """Rank deficiency of matrix or linear function. If not specified, will be determined for (implicit or explicit) matrix solves and assumed 0 for function-based solves.""" self._gradient_solve: Solve[Y, X] = gradient_solve self.id = str(uuid.uuid4()) # not altered by copy_with(), so that the lookup SolveTape[Solve] works after solve has been copied @property def gradient_solve(self) -> 'Solve[Y, X]': """ Parameters to use for the gradient pass when an implicit gradient is computed. If `None`, a duplicate of this `Solve` is created for the gradient solve. In any case, the gradient solve information will be stored in `gradient_solve.result`. """ if self._gradient_solve is None: self._gradient_solve = copy_with(self, x0=None) return self._gradient_solve def __repr__(self): return f"{self.method} with tolerance {self.rel_tol} (rel), {self.abs_tol} (abs), max_iterations={self.max_iterations}" + (" including preprocessing" if self.preprocess_y else "") def __eq__(self, other): if not isinstance(other, Solve): return False if self.method != other.method \ or not math.equal(self.abs_tol, other.abs_tol) \ or not math.equal(self.rel_tol, other.rel_tol) \ or (self.max_iterations != other.max_iterations).any \ or self.preprocess_y is not other.preprocess_y \ or self.suppress != other.suppress \ or self.preconditioner != other.preconditioner \ or self.rank_deficiency != other.rank_deficiency: return False return self.x0 == other.x0 def __variable_attrs__(self): return 'x0', 'rel_tol', 'abs_tol', 'max_iterations' def __value_attrs__(self): return self.__variable_attrs__() def with_defaults(self, mode: str): assert mode in ('solve', 'optimization') result = self if result.rel_tol is None: result = copy_with(result, rel_tol=_default_tolerance() if mode == 'solve' else wrap(0.)) if result.abs_tol is None: result = copy_with(result, abs_tol=_default_tolerance()) return result def with_preprocessing(self, preprocess_y: Callable, *args) -> 'Solve': """ Adds preprocessing to this `Solve` and all corresponding gradient solves. Args: preprocess_y: Preprocessing function. *args: Arguments for the preprocessing function. Returns: Copy of this `Solve` with given preprocessing. """ assert self.preprocess_y is None, f"preprocessing for linear solve '{self}' already set" gradient_solve = self._gradient_solve.with_preprocessing(preprocess_y, *args) if self._gradient_solve is not None else None return copy_with(self, preprocess_y=preprocess_y, preprocess_y_args=args, _gradient_solve=gradient_solve)Specifies parameters and stopping criteria for solving a minimization problem or system of equations.
Ancestors
- typing.Generic
Instance variables
var abs_tol-
Absolut tolerance for optimization problems and linear solves. Defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. For systems of equations f(x)=y, the final tolerance is
max(rel_tol * norm(y), abs_tol). prop gradient_solve : Solve[Y, X]-
Expand source code
@property def gradient_solve(self) -> 'Solve[Y, X]': """ Parameters to use for the gradient pass when an implicit gradient is computed. If `None`, a duplicate of this `Solve` is created for the gradient solve. In any case, the gradient solve information will be stored in `gradient_solve.result`. """ if self._gradient_solve is None: self._gradient_solve = copy_with(self, x0=None) return self._gradient_solveParameters to use for the gradient pass when an implicit gradient is computed. If
None, a duplicate of thisSolveis created for the gradient solve.In any case, the gradient solve information will be stored in
gradient_solve.result. var max_iterations-
Maximum number of iterations to perform before raising a
NotConvergederror is raised. var method-
Optimization method to use. Available solvers depend on the solve function that is used to perform the solve.
var preprocess_y-
Function to be applied to the right-hand-side vector of an equation system before solving the system. This property is propagated to gradient solves by default.
var rank_deficiency-
Rank deficiency of matrix or linear function. If not specified, will be determined for (implicit or explicit) matrix solves and assumed 0 for function-based solves.
var rel_tol-
Relative tolerance for linear solves only, defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. This must be unset or
0for minimization problems. For systems of equations f(x)=y, the final tolerance ismax(rel_tol * norm(y), abs_tol). var suppress-
Error types to suppress;
tupleofConvergenceExceptiontypes. For these errors, the solve function will instead return the partial result without raising the error. var x0-
Initial guess for the method, of same type and dimensionality as the solve result. This property must be set to a value compatible with the solution
xbefore running a method.
Methods
def with_defaults(self, mode: str)-
Expand source code
def with_defaults(self, mode: str): assert mode in ('solve', 'optimization') result = self if result.rel_tol is None: result = copy_with(result, rel_tol=_default_tolerance() if mode == 'solve' else wrap(0.)) if result.abs_tol is None: result = copy_with(result, abs_tol=_default_tolerance()) return result def with_preprocessing(self, preprocess_y: Callable, *args) ‑> phiml.math._optimize.Solve-
Expand source code
def with_preprocessing(self, preprocess_y: Callable, *args) -> 'Solve': """ Adds preprocessing to this `Solve` and all corresponding gradient solves. Args: preprocess_y: Preprocessing function. *args: Arguments for the preprocessing function. Returns: Copy of this `Solve` with given preprocessing. """ assert self.preprocess_y is None, f"preprocessing for linear solve '{self}' already set" gradient_solve = self._gradient_solve.with_preprocessing(preprocess_y, *args) if self._gradient_solve is not None else None return copy_with(self, preprocess_y=preprocess_y, preprocess_y_args=args, _gradient_solve=gradient_solve)
class SolveInfo-
Expand source code
class SolveInfo(Generic[X, Y]): """ Stores information about the solution or trajectory of a solve. When representing the full optimization trajectory, all tracked quantities will have an additional `trajectory` batch dimension. """ def __init__(self, solve: Solve, x: X, residual: Union[Y, None], iterations: Union[Tensor, None], function_evaluations: Union[Tensor, None], converged: Tensor, diverged: Tensor, method: str, msg: Tensor, solve_time: float): # tuple.__new__(SolveInfo, (x, residual, iterations, function_evaluations, converged, diverged)) self.solve: Solve[X, Y] = solve """ `Solve`, Parameters specified for the solve. """ self.x: X = x """ `Tensor` or `phiml.math.magic.PhiTreeNode`, solution estimate. """ self.residual: Y = residual """ `Tensor` or `phiml.math.magic.PhiTreeNode`, residual vector for systems of equations or function value for minimization problems. """ self.iterations: Tensor = iterations """ `Tensor`, number of performed iterations to reach this state. """ self.function_evaluations: Tensor = function_evaluations """ `Tensor`, how often the function (or its gradient function) was called. """ self.converged: Tensor = converged """ `Tensor`, whether the residual is within the specified tolerance. """ self.diverged: Tensor = diverged """ `Tensor`, whether the solve has diverged at this point. """ self.method = method """ `str`, which method and implementation that was used. """ if all_available(diverged, converged, iterations): _, res_tensors = disassemble_tree(residual, cache=False) msg_fun = partial(_default_solve_info_msg, solve=solve) msg = map_(msg_fun, msg, converged.trajectory[-1], diverged.trajectory[-1], iterations.trajectory[-1], method=method, residual=res_tensors[0], dims=converged.shape.without('trajectory')) self.msg = msg """ `str`, termination message """ self.solve_time = solve_time """ Time spent in Backend solve function (in seconds) """ def __repr__(self): return f"{self.method}: {self.converged.trajectory[-1].sum} converged, {self.diverged.trajectory[-1].sum} diverged" def snapshot(self, index): return SolveInfo(self.solve, self.x.trajectory[index], self.residual.trajectory[index], self.iterations.trajectory[index], self.function_evaluations.trajectory[index], self.converged.trajectory[index], self.diverged.trajectory[index], self.method, self.msg, self.solve_time) def convergence_check(self, only_warn: bool): if not all_available(self.diverged, self.converged): return if self.diverged.any: if Diverged not in self.solve.suppress: if only_warn: warnings.warn(self.msg, ConvergenceWarning) else: raise Diverged(self) if not self.converged.trajectory[-1].all: if NotConverged not in self.solve.suppress: if only_warn: warnings.warn(self.msg, ConvergenceWarning) else: raise NotConverged(self)Stores information about the solution or trajectory of a solve.
When representing the full optimization trajectory, all tracked quantities will have an additional
trajectorybatch dimension.Ancestors
- typing.Generic
Instance variables
var converged-
Tensor, whether the residual is within the specified tolerance. var diverged-
Tensor, whether the solve has diverged at this point. var function_evaluations-
Tensor, how often the function (or its gradient function) was called. var iterations-
Tensor, number of performed iterations to reach this state. var method-
str, which method and implementation that was used. var msg-
str, termination message var residual-
TensororPhiTreeNode, residual vector for systems of equations or function value for minimization problems. var solve-
Solve, Parameters specified for the solve. var solve_time-
Time spent in Backend solve function (in seconds)
var x-
TensororPhiTreeNode, solution estimate.
Methods
def convergence_check(self, only_warn: bool)-
Expand source code
def convergence_check(self, only_warn: bool): if not all_available(self.diverged, self.converged): return if self.diverged.any: if Diverged not in self.solve.suppress: if only_warn: warnings.warn(self.msg, ConvergenceWarning) else: raise Diverged(self) if not self.converged.trajectory[-1].all: if NotConverged not in self.solve.suppress: if only_warn: warnings.warn(self.msg, ConvergenceWarning) else: raise NotConverged(self) def snapshot(self, index)-
Expand source code
def snapshot(self, index): return SolveInfo(self.solve, self.x.trajectory[index], self.residual.trajectory[index], self.iterations.trajectory[index], self.function_evaluations.trajectory[index], self.converged.trajectory[index], self.diverged.trajectory[index], self.method, self.msg, self.solve_time)
class SolveTape (*solves: phiml.math._optimize.Solve, record_trajectories=False)-
Expand source code
class SolveTape: """ Used to record additional information about solves invoked via `solve_linear()`, `solve_nonlinear()` or `minimize()`. While a `SolveTape` is active, certain performance optimizations and algorithm implementations may be disabled. To access a `SolveInfo` of a recorded solve, use >>> solve = Solve(method, ...) >>> with SolveTape() as solves: >>> x = math.solve_linear(f, y, solve) >>> result: SolveInfo = solves[solve] # get by Solve >>> result: SolveInfo = solves[0] # get by index """ def __init__(self, *solves: Solve, record_trajectories=False): """ Args: *solves: (Optional) Select specific `solves` to be recorded. If none is given, records all solves that occur within the scope of this `SolveTape`. record_trajectories: When enabled, the entries of `SolveInfo` will contain an additional batch dimension named `trajectory`. """ self.record_only_ids = [s.id for s in solves] self.record_trajectories = record_trajectories self.solves: List[SolveInfo] = [] def should_record_trajectory_for(self, solve: Solve): if not self.record_trajectories: return False if not self.record_only_ids: return True return solve.id in self.record_only_ids def __enter__(self): _SOLVE_TAPES.append(self) return self def __exit__(self, exc_type, exc_val, exc_tb): _SOLVE_TAPES.remove(self) def _add(self, solve: Solve, trj: bool, result: SolveInfo): if any(s.solve.id == solve.id for s in self.solves): warnings.warn("SolveTape contains two results for the same solve settings. SolveTape[solve] will return the first solve result.", RuntimeWarning) if self.record_only_ids and solve.id not in self.record_only_ids: return # this solve should not be recorded if self.record_trajectories: assert trj, "Solve did not record a trajectory." self.solves.append(result) elif trj: self.solves.append(result.snapshot(-1)) else: self.solves.append(result) def __getitem__(self, item) -> SolveInfo: if isinstance(item, int): return self.solves[item] else: assert isinstance(item, Solve) solves = [s for s in self.solves if s.solve.id == item.id] if len(solves) == 0: raise KeyError(f"No solve recorded with key '{item}'.") assert len(solves) == 1 return solves[0] def __iter__(self): return iter(self.solves) def __len__(self): return len(self.solves)Used to record additional information about solves invoked via
solve_linear(),solve_nonlinear()orminimize(). While aSolveTapeis active, certain performance optimizations and algorithm implementations may be disabled.To access a
SolveInfoof a recorded solve, use>>> solve = Solve(method, ...) >>> with SolveTape() as solves: >>> x = math.solve_linear(f, y, solve) >>> result: SolveInfo = solves[solve] # get by Solve >>> result: SolveInfo = solves[0] # get by indexArgs
Methods
def should_record_trajectory_for(self, solve: phiml.math._optimize.Solve)-
Expand source code
def should_record_trajectory_for(self, solve: Solve): if not self.record_trajectories: return False if not self.record_only_ids: return True return solve.id in self.record_only_ids
class Tensor (properties: TensorProperties = None)-
Expand source code
class Tensor(Generic[T]): """ Abstract base class to represent structured data of one data type. This class replaces the native tensor classes `numpy.ndarray`, `torch.Tensor`, `tensorflow.Tensor` or `jax.numpy.ndarray` as the main data container in Φ-ML. `Tensor` instances are different from native tensors in two important ways: * The dimensions of Tensors have *names* and *types*. * Tensors can have non-uniform shapes, meaning that the size of dimensions can vary along other dimensions. To check whether a value is a tensor, use `isinstance(value, Tensor)`. To construct a Tensor, use `phiml.math.tensor()`, `phiml.math.wrap()` or one of the basic tensor creation functions, see https://tum-pbs.github.io/PhiML/Tensors.html . Tensors are not editable. When backed by an editable native tensor, e.g. a `numpy.ndarray`, do not edit the underlying data structure. """ def __init__(self, properties: 'TensorProperties' = None): self._prop = properties or EMPTY_TENSOR_PROPERTIES if DEBUG_CHECKS: self._init_stack = traceback.extract_stack() def native(self, order: Union[str, tuple, list, Shape] = None, force_expand=True): """ Returns a native tensor object with the dimensions ordered according to `order`. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. Additionally, groups of dims can be specified for `order` to pack dims. To do this, pass a `tuple` or `list` of dims to be packed into one native axis. Each entry must be one of the following: * `str`: the name of one dimension that is present on `value`. * `Shape`: Dimensions to be packed. If `force_expand`, missing dimensions are first added, otherwise they are ignored. * Filter function: Packs all dimensions of this type that are present on `value`. * Ellipsis `...`: Packs all remaining dimensions into this slot. Can only be passed once. * `None` or `()`: Adds a singleton dimension. Collections of or comma-separated dims may also be used but only if all dims are present on `value`. Args: order: (Optional) Order of dimension names as comma-separated string, list or `Shape`. force_expand: If `False`, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions. If `True`, repeats the tensor along missing dimensions. If `False`, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions. Returns: Native tensor representation, such as PyTorch tensor or NumPy array. Raises: `ValueError` if the tensor cannot be transposed to match target_shape """ raise NotImplementedError def _reshaped_native(self, groups: Sequence[Shape]): """constant (collapsed) dims may be left out of groups to avoid expanding. All present dims will be expanded to match `groups`""" raise NotImplementedError def _transposed_native(self, order: Sequence[str], force_expand: bool): """Returns the native tensor, transposing it to match `order`. New names in `order` are added as singleton dims. Constant dims may be dropped to avoid expanding. If present, they will be included as singleton, not expanded to their full size.""" raise NotImplementedError def numpy(self, order: Union[str, tuple, list, Shape] = None, force_expand=True) -> np.ndarray: """ Converts this tensor to a `numpy.ndarray` with dimensions ordered according to `order`. *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use `Tensor.native()` instead. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. If this `Tensor` is backed by a NumPy array, a reference to this array may be returned. See Also: `phiml.math.numpy()` Args: order: (Optional) Order of dimension names as comma-separated string, list or `Shape`. force_expand: If `False`, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions. Returns: NumPy representation Raises: ValueError if the tensor cannot be transposed to match target_shape """ return self.backend.numpy(self.native(order, force_expand)) def __array__(self, dtype=None): # NumPy conversion if self.rank > 1: warnings.warn("Automatic conversion of Φ-ML tensors to NumPy can cause problems because the dimension order is not guaranteed.", SyntaxWarning, stacklevel=3) return self.numpy(self._shape) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # NumPy interface if len(inputs) != 2: return NotImplemented switch_args = self is inputs[1] other = inputs[0] if switch_args else inputs[1] if ufunc.__name__ == 'multiply': return self._op2(other, operator.mul, switch_args) if ufunc.__name__ == 'add': return self._op2(other, operator.add, switch_args) if ufunc.__name__ == 'subtract': return self._op2(other, operator.sub, switch_args) if ufunc.__name__ in ['divide', 'true_divide']: return self._op2(other, operator.truediv, switch_args) if ufunc.__name__ == 'floor_divide': return self._op2(other, operator.floordiv, switch_args) if ufunc.__name__ == 'remainder': return self._op2(other, operator.mod, switch_args) if ufunc.__name__ == 'power': return self._op2(other, operator.pow, switch_args) if ufunc.__name__ == 'equal': return self.__eq__(inputs[1] if self is inputs[0] else inputs[0]) if ufunc.__name__ == 'not_equal': return self.__ne__(inputs[1] if self is inputs[0] else inputs[0]) if ufunc.__name__ == 'greater': return self._op2(other, operator.gt, switch_args) if ufunc.__name__ == 'greater_equal': return self._op2(other, operator.ge, switch_args) if ufunc.__name__ == 'less': return self._op2(other, operator.gt, not switch_args) if ufunc.__name__ == 'less_equal': return self._op2(other, operator.ge, not switch_args) if ufunc.__name__ == 'left_shift': return self._op2(other, operator.lshift, switch_args) if ufunc.__name__ == 'right_shift': return self._op2(other, operator.rshift, switch_args) raise NotImplementedError(f"NumPy function '{ufunc.__name__}' is not compatible with Φ-ML tensors.") def __torch_function__(self, func, types, args=(), kwargs=None): from ..backend.torch._torch_hooks import handle_torch_function return handle_torch_function(func, types, args, kwargs) @property def dtype(self) -> DType: """ Data type of the elements of this `Tensor`. """ raise NotImplementedError(self.__class__) @property def shape(self) -> Shape: """ The `Shape` lists the dimensions with their sizes, names and types. """ raise NotImplementedError(self.__class__) @property def backend(self) -> Backend: raise NotImplementedError(self.__class__) @property def default_backend(self) -> Backend: return self.backend def _with_shape_replaced(self, new_shape: Shape): raise NotImplementedError(self.__class__) def _with_natives_replaced(self, natives: list): """ Replaces all n _natives() of this Tensor with the first n elements of the list and removes them from the list. """ raise NotImplementedError(self.__class__) @property def _var_dims(self) -> Tuple[str, ...]: """Returns the names of all non-constant dims.""" return self.shape.names @property def rank(self) -> int: """ Number of explicit dimensions of this `Tensor`. Equal to `tensor.shape.rank`. This replaces [`numpy.ndarray.ndim`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html) / [`torch.Tensor.dim`](https://pytorch.org/docs/master/generated/torch.Tensor.dim.html) / [`tf.rank()`](https://www.tensorflow.org/api_docs/python/tf/rank) / [`jax.numpy.ndim()`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndim.html). """ return self.shape.rank @property def _is_tracer(self) -> bool: """ Tracers store additional internal information. They should not be converted to `native()` in intermediate operations. TensorStack prevents performing the actual stack operation if one of its component tensors is special. """ raise NotImplementedError(self.__class__) def _cached(self): raise NotImplementedError(self.__class__) def _to_dict(self): return self._cached()._to_dict() def __len__(self): return self.shape.volume if self.rank == 1 else NotImplemented def __bool__(self): assert self.rank == 0, f"Cannot convert tensor with non-empty shape {self.shape} to bool. Use tensor.any or tensor.all instead." from ._ops import all_ if not self.default_backend.supports(Backend.jit_compile): # NumPy return bool(self.native()) if self.rank == 0 else bool(all_(self).native()) else: # __bool__ does not work with TensorFlow tracing. # TensorFlow needs to see a tf.Tensor in loop conditions but won't allow bool() invocations. # However, this function must always return a Python bool. raise AssertionError("To evaluate the boolean value of a Tensor, use 'Tensor.all'.") @property def all(self): """ Whether all values of this `Tensor` are `True` as a native bool. """ from ._ops import all_, cast if self.rank == 0: return cast(self, BOOL).native() else: return all_(self, dim=self.shape).native() @property def any(self): """ Whether this `Tensor` contains a `True` value as a native bool. """ from ._ops import any_, cast if self.rank == 0: return cast(self, BOOL).native() else: return any_(self, dim=self.shape).native() @property def mean(self): """ Mean value of this `Tensor` as a native scalar. """ from ._ops import mean return mean(self, dim=self.shape).native() @property def finite_mean(self): """ Mean value of all finite values in this `Tensor` as a native scalar. """ from ._ops import finite_mean return finite_mean(self, dim=self.shape).native() @property def std(self): """ Standard deviation of this `Tensor` as a native scalar. """ from ._ops import std return std(self, dim=self.shape).native() @property def finite_std(self): """ Standard deviation of all finite values in this `Tensor` as a native scalar. """ from ._ops import finite_std return finite_std(self, dim=self.shape).native() @property def sum(self): """ Sum of all values of this `Tensor` as a native scalar. """ from ._ops import sum_ return sum_(self, dim=self.shape).native() @property def finite_sum(self): """ Sum of all finite values of this `Tensor` as a native scalar. """ from ._ops import finite_sum return finite_sum(self, dim=self.shape).native() @property def min(self): """ Minimum value of this `Tensor` as a native scalar. """ from ._ops import min_ return min_(self, dim=self.shape).native() @property def finite_min(self): """ Minimum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_min return finite_min(self, dim=self.shape).native() @property def max(self): """ Maximum value of this `Tensor` as a native scalar. """ from ._ops import max_ return max_(self, dim=self.shape).native() @property def finite_max(self): """ Maximum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_max return finite_max(self, dim=self.shape).native() @property def real(self) -> 'Tensor': """ Returns the real part of this tensor. See Also: `phiml.math.real()` """ from ._ops import real return real(self) @property def imag(self) -> 'Tensor': """ Returns the imaginary part of this tensor. If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor. See Also: `phiml.math.imag()` """ from ._ops import imag return imag(self) @property def available(self) -> bool: """ A tensor is available if it stores concrete values and these can currently be read. Tracers used inside jit compilation are typically not available. See Also: `phiml.math.jit_compile()`. """ if self._is_tracer: return False natives = self._natives() natives_available = [choose_backend(native).is_available(native) for native in natives] return all(natives_available) @property def device(self) -> Union[ComputeDevice, None]: """ Returns the `ComputeDevice` that this tensor is allocated on. The device belongs to this tensor's `default_backend`. See Also: `Tensor.default_backend`. """ natives = self._natives() if not natives: return None return self.default_backend.get_device(natives[0]) def __int__(self): return int(self.native()) if self.shape.volume == 1 else NotImplemented def __float__(self): return float(self.native()) if self.shape.volume == 1 else NotImplemented def __complex__(self): return complex(self.native()) if self.shape.volume == 1 else NotImplemented def __index__(self): assert self.shape.volume == 1, f"Only scalar tensors can be converted to index but has shape {self.shape}" assert self.dtype.kind == int, f"Only int tensors can be converted to index but dtype is {self.dtype}" return int(self.native()) def __contains__(self, item): if isinstance(item, SHAPE_TYPES): return item in self.shape elif isinstance(item, BoundDim): return item.name in self.shape elif isinstance(item, _BoundDims): return item.dims in self.shape elif isinstance(item, str): assert self.dtype.kind != object, "str in Tensor not allowed for object-type Tensors" return item in self.shape raise ValueError(f"'dim in Tensor' requires dim to be a Shape or str but got {item}") def __repr__(self): try: return format_tensor(self, PrintOptions()) except Exception: return f"Tensor {self.shape}" def _repr_pretty_(self, printer, cycle): printer.text(format_tensor(self, PrintOptions(colors=DEFAULT_COLORS))) def print(self, layout='full', float_format=None, threshold=8, include_shape=None, include_dtype=None): print(format_tensor(self, PrintOptions(layout=layout, float_format=float_format, threshold=threshold, colors=DEFAULT_COLORS, include_shape=include_shape, include_dtype=include_dtype))) def __format__(self, format_spec: str): if BROADCAST_FORMATTER.values is not None: return BROADCAST_FORMATTER.register_formatted(self, format_spec) specs = format_spec.split(':') layout_ = 'auto' for possible_layout in ['summary', 'full', 'row', 'numpy']: if possible_layout in specs: assert layout_ == 'auto', f"Two layout identifiers encountered in '{format_spec}'" layout_ = possible_layout include_shape = 'shape' in specs or (False if 'no-shape' in specs else None) include_dtype = 'dtype' in specs or (False if 'no-dtype' in specs else None) color = 'color' in specs or (False if 'no-color' in specs else None) threshold = 8 float_format = None for spec in specs: if spec.startswith('threshold='): threshold = int(spec[len('threshold='):]) elif '.' in spec: float_format = spec result = format_tensor(self, PrintOptions(layout_, float_format, threshold, color, include_shape, include_dtype)) return result def __getitem__(self, item) -> 'Tensor': if item is None: return self if isinstance(item, Tensor): if item.dtype.kind == bool: from ._ops import boolean_mask return boolean_mask(self, item.shape.non_batch or item.shape, item) elif item.dtype.kind == int: from ._ops import gather return gather(self, item) else: raise AssertionError(f"Index tensor must be of dtype int (gather) or bool (boolean_mask) but got {item}") item = slicing_dict(self, item) selections = {} sliced = self for dim, selection in item.items(): if dim not in self.shape: continue selection, new_dim = prepare_renaming_gather(self.shape, dim, selection) # Either handle slicing directly or add it to the dict if isinstance(selection, (tuple, list)): result = [sliced[{dim: i}] for i in selection] stack_dim = after_gather(sliced.shape[dim], {dim: selection}) from ._ops import stack_tensors sliced = stack_tensors(result, stack_dim) if new_dim is not None: sliced = sliced.__replace_dims__(dim, new_dim) elif isinstance(selection, Tensor) and selection.dtype.kind == bool: from ._ops import boolean_mask sliced = boolean_mask(sliced, dim, selection) elif isinstance(selection, Tensor) and selection.dtype.kind == int: from ._ops import gather sliced = gather(sliced, selection, dims=dim) elif isinstance(selection, slice): if selection.start in (0, None) and selection.stop is not None and isinstance(self.shape.get_size(dim), int) and selection.stop >= self.shape.get_size(dim): continue if selection.start is not None and isinstance(self.shape.get_size(dim), int) and selection.start >= self.shape.get_size(dim) and (selection.step is None or selection.step > 0): return Dense(0, (), self.shape.after_gather(item), self.backend) selections[dim] = selection else: selections[dim] = selection return sliced._getitem(selections) if selections else sliced def _getitem(self, selection: dict) -> 'Tensor': """ Slice the tensor along specified dimensions. Args: selection: dim_name: str -> Union[int, slice] """ raise NotImplementedError() def __setitem__(self, key, value): raise SyntaxError("Tensors are not editable to preserve the autodiff chain. This feature might be added in the future. To update part of a tensor, use math.where() or math.scatter()") def __unstack__(self, dims: Tuple[str, ...]) -> Tuple['Tensor[T]', ...]: # from phiml.math.magic.Sliceable if len(dims) == 1: return self._unstack(dims[0]) else: return NotImplemented def _unstack(self, dim: str) -> Tuple['Tensor[T]', ...]: """ Splits this tensor along the specified dimension. The returned tensors have the same dimensions as this tensor save the unstacked dimension. Raises an error if the dimension is not part of the `Shape` of this `Tensor`. See Also: `TensorDim.unstack()` Args: dim: name of dimension to unstack Returns: tuple of tensors """ raise NotImplementedError() @staticmethod def __stack__(values: tuple, dim: Shape, **_kwargs) -> 'Tensor': from ._tree import Layout if any(isinstance(v, Layout) for v in values): layout_ = [v for v in values if isinstance(v, Layout)][0] return layout_.__stack__(values, dim, **_kwargs) from ._ops import stack_tensors return stack_tensors(values, dim, **_kwargs) def __expand__(self, dims: Shape, **kwargs) -> 'Tensor': return expand_tensor(self, dims) @staticmethod def __concat__(values: tuple, dim: str, **kwargs) -> 'Tensor': from ._ops import concat_tensor return concat_tensor(values, dim) def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'Tensor': return self._with_shape_replaced(replace_dims(self.shape, dims, new_dims)) def __pack_dims__(self, dims: Shape, packed_dim: Shape, pos: Union[int, None], **kwargs) -> 'Tensor': raise NotImplementedError(self.__class__) def __unpack_dim__(self, dim: str, unpacked_dims: Shape, **kwargs) -> 'Tensor': if self.shape.is_uniform: native = self.native(shape_, True) new_shape = self.shape.replace(dim, unpacked_dims) if not new_shape.well_defined: assert new_shape.undefined.rank <= 1, f"At most one dim can have an undefined size to be inferred during un-packing but got {new_shape}" missing = int(self.shape.volume / new_shape.defined.volume) sizes = [missing if s is None else s for s in new_shape.sizes] new_shape = new_shape.with_sizes(sizes) if new_shape.is_uniform: native_reshaped = choose_backend(native).reshape(native, new_shape.sizes) return Dense(native_reshaped, new_shape.names, new_shape, self.backend) else: split_dim = new_shape.non_uniform_shape[-1] i = 0 result = [] for idx in split_dim.meshgrid(): s = after_gather(new_shape, idx).get_size(new_shape.non_uniform.name) sliced = self[{dim: slice(i, i + s)}] result.append(sliced._with_shape_replaced(sliced.shape.replace(dim, unpacked_dims - split_dim))) i += s from ._ops import stack_tensors return stack_tensors(result, split_dim) else: tensors = self._tensors if dim == self._stack_dim.name: for udim in unpacked_dims: tensors = [TensorStack(tensors[o::len(tensors)//udim.size], udim) for o in range(len(tensors)//udim.size)] assert len(tensors) == 1 return tensors[0] raise NotImplementedError def __cast__(self, dtype: DType): if self.dtype == dtype: return self return self._op1(lambda native: choose_backend(native).cast(native, dtype=dtype), 'cast') def dimension(self, name: Union[str, Shape]) -> 'TensorDim': """ Returns a reference to a specific dimension of this tensor. This is equivalent to the syntax `tensor.<name>`. The dimension need not be part of the `Tensor.shape` in which case its size is 1. Args: name: dimension name Returns: `TensorDim` corresponding to a dimension of this tensor """ if isinstance(name, str): return TensorDim(self, name) elif isinstance(name, SHAPE_TYPES): return TensorDim(self, name.name) else: raise ValueError(name) def pack(self, dims, packed_dim) -> 'Tensor[T]': """ See `pack_dims()` """ from ._ops import pack_dims return pack_dims(self, dims, packed_dim) def unpack(self, dim, unpacked_dims) -> 'Tensor[T]': """ See `unpack_dim()` """ from ._ops import unpack_dim return unpack_dim(self, dim, unpacked_dims) @property def T(self): raise NotImplementedError("Tensor.T is deprecated. Use dim.Ti .Tc or .Ts instead") # return self._with_shape_replaced(self.shape.transposed()) @property def Ti(self) -> 'Tensor[T]': return self._with_shape_replaced(self.shape.transpose(INSTANCE_DIM)) @property def Tc(self) -> 'Tensor[T]': return self._with_shape_replaced(self.shape.transpose(CHANNEL_DIM)) @property def Ts(self) -> 'Tensor[T]': return self._with_shape_replaced(self.shape.transpose(SPATIAL_DIM)) def map(self, function: Callable, dims=shape_, range=range, unwrap_scalars=True, **kwargs): from ._functional import map_ return map_(function, self, dims=dims, range=range, unwrap_scalars=unwrap_scalars, **kwargs) def __getattr__(self, name): if name.startswith('__'): # called by hasattr in magic ops raise AttributeError if name.startswith('_'): raise AttributeError(f"'{type(self)}' object has no attribute '{name}'") if name == 'is_tensor_like': # TensorFlow replaces abs() while tracing and checks for this attribute raise AttributeError(f"'{type(self)}' object has no attribute '{name}'") assert name not in ('shape', '_shape', 'tensor'), name return TensorDim(self, name) def __add__(self, other): return self._op2(other, operator.add, False) def __radd__(self, other): return self._op2(other, operator.add, True) def __sub__(self, other): return self._op2(other, operator.sub, False) def __rsub__(self, other): return self._op2(other, operator.sub, True) def __and__(self, other): return self._op2(other, operator.and_, False) def __rand__(self, other): return self._op2(other, operator.and_, True) def __or__(self, other): return self._op2(other, operator.or_, False) def __ror__(self, other): return self._op2(other, operator.or_, True) def __xor__(self, other): return self._op2(other, operator.xor, False) def __rxor__(self, other): return self._op2(other, operator.xor, True) def __mul__(self, other): return self._op2(other, operator.mul, False) def __rmul__(self, other): return self._op2(other, operator.mul, True) def __truediv__(self, other): return self._op2(other, operator.truediv, False) def __rtruediv__(self, other): return self._op2(other, operator.truediv, True) def __divmod__(self, other): return self._op2(other, divmod, False) def __rdivmod__(self, other): return self._op2(other, divmod, True) def __floordiv__(self, other): return self._op2(other, operator.floordiv, False) def __rfloordiv__(self, other): return self._op2(other, operator.floordiv, True) def __pow__(self, power, modulo=None): assert modulo is None return self._op2(power, operator.pow, False) def __rpow__(self, other): return self._op2(other, operator.pow, True) def __mod__(self, other): return self._op2(other, operator.mod, False) def __rmod__(self, other): return self._op2(other, operator.mod, True) def __eq__(self, other) -> 'Tensor[bool]': if self is other: return TRUE if _EQUALITY_REDUCE[-1]['type'] == 'ref': return wrap(self is other) elif _EQUALITY_REDUCE[-1]['type'] == 'shape_and_value': if set(self.shape) != set(other.shape): return wrap(False) from ._ops import close return wrap(close(self, other, rel_tolerance=_EQUALITY_REDUCE[-1]['rel_tolerance'], abs_tolerance=_EQUALITY_REDUCE[-1]['abs_tolerance'], equal_nan=_EQUALITY_REDUCE[-1]['equal_nan'])) if other is None: other = float('nan') if self.shape.is_compatible(shape(other)): return self._op2(other, operator.eq, False) else: return wrap(False) def __hash__(self): return hash((self.shape, self.dtype)) def __ne__(self, other) -> 'Tensor[bool]': if _EQUALITY_REDUCE[-1]['type'] == 'ref': return wrap(self is not other) elif _EQUALITY_REDUCE[-1]['type'] == 'shape_and_value': if set(self.shape) != set(other.shape): return wrap(True) from ._ops import close return wrap(not close(self, other, rel_tolerance=_EQUALITY_REDUCE[-1]['rel_tolerance'], abs_tolerance=_EQUALITY_REDUCE[-1]['abs_tolerance'], equal_nan=_EQUALITY_REDUCE[-1]['equal_nan'])) if other is None: other = float('nan') if self.shape.is_compatible(shape(other)): return self._op2(other, operator.ne, False) else: return wrap(True) def __lt__(self, other) -> 'Tensor[bool]': return self._op2(other, operator.gt, True) def __le__(self, other) -> 'Tensor[bool]': return self._op2(other, operator.ge, True) def __gt__(self, other) -> 'Tensor[bool]': return self._op2(other, operator.gt, False) def __ge__(self, other) -> 'Tensor[bool]': return self._op2(other, operator.ge, False) def __lshift__(self, other) -> 'Tensor[T]': return self._op2(other, operator.lshift, False) def __rlshift__(self, other) -> 'Tensor[T]': return self._op2(other, operator.lshift, True) def __rshift__(self, other) -> 'Tensor[T]': return self._op2(other, operator.rshift, False) def __rrshift__(self, other) -> 'Tensor[T]': return self._op2(other, operator.rshift, True) def __abs__(self) -> 'Tensor[T]': return self._op1(lambda t: choose_backend(t).abs(t), 'abs') def __round__(self, n=None) -> 'Tensor[int]': return self._op1(lambda t: choose_backend(t).round(t), 'round') def __copy__(self) -> 'Tensor[T]': return self._op1(lambda t: choose_backend(t).copy(t, only_mutable=True), 'copy') def __deepcopy__(self, memodict: Dict) -> 'Tensor[T]': return self._op1(lambda t: choose_backend(t).copy(t, only_mutable=False), 'deepcopy') def __neg__(self) -> 'Tensor[T]': return self._op1(operator.neg, 'neg') def __invert__(self) -> 'Tensor[T]': return self._op1(lambda t: choose_backend(t).invert(t), 'invert') def __reversed__(self) -> 'Tensor[T]': assert self.shape.channel.rank == 1 return self[::-1] def __iter__(self): if self.rank == 1: return iter(self.native()) elif self.rank == 0: return iter([self.native()]) else: native = self.native([self.shape]) return iter(native) def item(self) -> T: assert self.shape.volume == 1, f"Tensor.item() is only available for single-element Tensors but got {self.shape}" return next(iter(self)) def __matmul__(self, other) -> 'Tensor[bool]': from ._ops import dot assert isinstance(other, Tensor), f"Matmul '@' requires two Tensor arguments but got {type(other)}" if not self.shape.dual_rank and self.shape.channel_rank: match = self.shape.channel.only(other.shape.channel) if match: return dot(self, match, other, match) match_names = self.shape.dual.as_batch().names if not match_names: # this is not a matrix assert self.shape.primal.only(other.shape).is_empty, f"Cannot compute matmul {self.shape} @ {other.shape}. First argument is not a matrix; it has no dual dimensions." return self * other match_primal = other.shape.only(match_names, reorder=True) if not match_primal: assert non_batch(other).non_dual.rank == 1, f"Cannot multiply {self.shape} @ {other.shape} because arg2 does not have appropriate non-dual dimensions" assert non_batch(other).non_dual.size == match_primal.volume, f"Cannot multiply {self.shape} @ {other.shape} because dual dims of arg1 have no match" match_primal = non_batch(other).non_dual match_dual = self.shape.dual.only(match_primal.as_dual(), reorder=True) if match_dual.rank == 1: left_arg, l_name = self, match_dual.name else: left_arg, l_name = self.__pack_dims__(match_dual, dual('_reduce'), None), '~_reduce' if match_primal.rank == 1: right_arg, r_name = other, match_primal.name else: right_arg, r_name = other.__pack_dims__(match_primal, channel('_reduce'), None), '_reduce' return dot(left_arg, l_name, right_arg, r_name) # def __rmatmul__(self, other): def _tensor(self, other) -> 'Tensor': if isinstance(other, Tensor): return other elif isinstance(other, (tuple, list)) and any(isinstance(v, Tensor) for v in other): if 'vector' in self.shape: outer_dim = self.shape['vector'] elif self.shape.channel_rank == 1: outer_dim = self.shape.channel else: raise ValueError(f"Cannot combine tensor of shape {self.shape} with tuple {tuple([type(v).__name__ for v in other])}") remaining_shape = self.shape.without(outer_dim) other_items = [v if isinstance(v, Tensor) else compatible_tensor(v, compat_shape=remaining_shape, compat_natives=self._natives(), convert=False) for v in other] sh = merge_shapes(*other_items) from ._ops import stack_tensors other_stacked = stack_tensors([expand_tensor(t, sh) for t in other_items], outer_dim) return other_stacked else: return compatible_tensor(other, compat_shape=self.shape, compat_natives=self._natives(), convert=False) def _op1(self, native_function, op_name: str) -> 'Tensor': """ Transform the values of this tensor given a function that can be applied to any native tensor. Args: native_function: Returns: """ raise NotImplementedError(self.__class__) def _op2(self, other, op: Callable, switch_args: bool) -> 'Tensor': """ Apply a broadcast operation on two tensors. Args: other: second argument op: Operator function (a, b) -> c, used to propagate the operation to children tensors to have Python choose the callee Returns: `Tensor` """ raise NotImplementedError(self.__class__) def _disassemble(self, include_constants: bool): """ Args: include_constants: True for JIT, False for gradient Returns: spec_dict: dict natives: Sequence """ return self._spec_dict(), self._natives() def _natives(self) -> tuple: raise NotImplementedError(self.__class__) def _spec_dict(self) -> dict: raise NotImplementedError(self.__class__) @classmethod def _from_spec_and_natives(cls, spec: dict, natives: list): raise NotImplementedError(cls) def _simplify(self): """ Does not cache this value but if it is already cached, returns the cached version. """ return selfAbstract base class to represent structured data of one data type. This class replaces the native tensor classes
numpy.ndarray,torch.Tensor,tensorflow.Tensororjax.numpy.ndarrayas the main data container in Φ-ML.Tensorinstances are different from native tensors in two important ways:- The dimensions of Tensors have names and types.
- Tensors can have non-uniform shapes, meaning that the size of dimensions can vary along other dimensions.
To check whether a value is a tensor, use
isinstance(value, Tensor).To construct a Tensor, use
tensor(),wrap()or one of the basic tensor creation functions, see https://tum-pbs.github.io/PhiML/Tensors.html .Tensors are not editable. When backed by an editable native tensor, e.g. a
numpy.ndarray, do not edit the underlying data structure.Ancestors
- typing.Generic
Subclasses
- phiml.math._lin_trace.LinTracer
- phiml.math._sparse.CompactSparseTensor
- phiml.math._sparse.CompressedSparseMatrix
- phiml.math._sparse.SparseCoordinateTensor
- phiml.math._tensors.BlockTensor
- phiml.math._tensors.Dense
- phiml.math._tensors.TensorStack
- phiml.math._trace.Tracer
- phiml.math._tree.Layout
Instance variables
prop T-
Expand source code
@property def T(self): raise NotImplementedError("Tensor.T is deprecated. Use dim.Ti .Tc or .Ts instead") # return self._with_shape_replaced(self.shape.transposed()) prop Tc : Tensor[T]-
Expand source code
@property def Tc(self) -> 'Tensor[T]': return self._with_shape_replaced(self.shape.transpose(CHANNEL_DIM)) prop Ti : Tensor[T]-
Expand source code
@property def Ti(self) -> 'Tensor[T]': return self._with_shape_replaced(self.shape.transpose(INSTANCE_DIM)) prop Ts : Tensor[T]-
Expand source code
@property def Ts(self) -> 'Tensor[T]': return self._with_shape_replaced(self.shape.transpose(SPATIAL_DIM)) prop all-
Expand source code
@property def all(self): """ Whether all values of this `Tensor` are `True` as a native bool. """ from ._ops import all_, cast if self.rank == 0: return cast(self, BOOL).native() else: return all_(self, dim=self.shape).native()Whether all values of this
TensorareTrueas a native bool. prop any-
Expand source code
@property def any(self): """ Whether this `Tensor` contains a `True` value as a native bool. """ from ._ops import any_, cast if self.rank == 0: return cast(self, BOOL).native() else: return any_(self, dim=self.shape).native()Whether this
Tensorcontains aTruevalue as a native bool. prop available : bool-
Expand source code
@property def available(self) -> bool: """ A tensor is available if it stores concrete values and these can currently be read. Tracers used inside jit compilation are typically not available. See Also: `phiml.math.jit_compile()`. """ if self._is_tracer: return False natives = self._natives() natives_available = [choose_backend(native).is_available(native) for native in natives] return all(natives_available)A tensor is available if it stores concrete values and these can currently be read.
Tracers used inside jit compilation are typically not available.
See Also:
jit_compile(). prop backend : phiml.backend._backend.Backend-
Expand source code
@property def backend(self) -> Backend: raise NotImplementedError(self.__class__) prop default_backend : phiml.backend._backend.Backend-
Expand source code
@property def default_backend(self) -> Backend: return self.backend prop device : phiml.backend._backend.ComputeDevice | None-
Expand source code
@property def device(self) -> Union[ComputeDevice, None]: """ Returns the `ComputeDevice` that this tensor is allocated on. The device belongs to this tensor's `default_backend`. See Also: `Tensor.default_backend`. """ natives = self._natives() if not natives: return None return self.default_backend.get_device(natives[0])Returns the
ComputeDevicethat this tensor is allocated on. The device belongs to this tensor'sdefault_backend.See Also:
Tensor.default_backend. prop dtype : phiml.backend._dtype.DType-
Expand source code
@property def dtype(self) -> DType: """ Data type of the elements of this `Tensor`. """ raise NotImplementedError(self.__class__)Data type of the elements of this
Tensor. prop finite_max-
Expand source code
@property def finite_max(self): """ Maximum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_max return finite_max(self, dim=self.shape).native()Maximum finite value of this
Tensoras a native scalar. prop finite_mean-
Expand source code
@property def finite_mean(self): """ Mean value of all finite values in this `Tensor` as a native scalar. """ from ._ops import finite_mean return finite_mean(self, dim=self.shape).native()Mean value of all finite values in this
Tensoras a native scalar. prop finite_min-
Expand source code
@property def finite_min(self): """ Minimum finite value of this `Tensor` as a native scalar. """ from ._ops import finite_min return finite_min(self, dim=self.shape).native()Minimum finite value of this
Tensoras a native scalar. prop finite_std-
Expand source code
@property def finite_std(self): """ Standard deviation of all finite values in this `Tensor` as a native scalar. """ from ._ops import finite_std return finite_std(self, dim=self.shape).native()Standard deviation of all finite values in this
Tensoras a native scalar. prop finite_sum-
Expand source code
@property def finite_sum(self): """ Sum of all finite values of this `Tensor` as a native scalar. """ from ._ops import finite_sum return finite_sum(self, dim=self.shape).native()Sum of all finite values of this
Tensoras a native scalar. prop imag : Tensor-
Expand source code
@property def imag(self) -> 'Tensor': """ Returns the imaginary part of this tensor. If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor. See Also: `phiml.math.imag()` """ from ._ops import imag return imag(self)Returns the imaginary part of this tensor. If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.
See Also:
imag() prop max-
Expand source code
@property def max(self): """ Maximum value of this `Tensor` as a native scalar. """ from ._ops import max_ return max_(self, dim=self.shape).native()Maximum value of this
Tensoras a native scalar. prop mean-
Expand source code
@property def mean(self): """ Mean value of this `Tensor` as a native scalar. """ from ._ops import mean return mean(self, dim=self.shape).native()Mean value of this
Tensoras a native scalar. prop min-
Expand source code
@property def min(self): """ Minimum value of this `Tensor` as a native scalar. """ from ._ops import min_ return min_(self, dim=self.shape).native()Minimum value of this
Tensoras a native scalar. prop rank : int-
Expand source code
@property def rank(self) -> int: """ Number of explicit dimensions of this `Tensor`. Equal to `tensor.shape.rank`. This replaces [`numpy.ndarray.ndim`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html) / [`torch.Tensor.dim`](https://pytorch.org/docs/master/generated/torch.Tensor.dim.html) / [`tf.rank()`](https://www.tensorflow.org/api_docs/python/tf/rank) / [`jax.numpy.ndim()`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndim.html). """ return self.shape.rankNumber of explicit dimensions of this
Tensor. Equal totensor.shape.rank. This replacesnumpy.ndarray.ndim/torch.Tensor.dim/tf.rank()/jax.numpy.ndim(). prop real : Tensor-
Expand source code
@property def real(self) -> 'Tensor': """ Returns the real part of this tensor. See Also: `phiml.math.real()` """ from ._ops import real return real(self)Returns the real part of this tensor.
See Also:
real() prop shape : phiml.math._shape.Shape-
Expand source code
@property def shape(self) -> Shape: """ The `Shape` lists the dimensions with their sizes, names and types. """ raise NotImplementedError(self.__class__)The
Shapelists the dimensions with their sizes, names and types. prop std-
Expand source code
@property def std(self): """ Standard deviation of this `Tensor` as a native scalar. """ from ._ops import std return std(self, dim=self.shape).native()Standard deviation of this
Tensoras a native scalar. prop sum-
Expand source code
@property def sum(self): """ Sum of all values of this `Tensor` as a native scalar. """ from ._ops import sum_ return sum_(self, dim=self.shape).native()Sum of all values of this
Tensoras a native scalar.
Methods
def dimension(self, name: str | phiml.math._shape.Shape) ‑> phiml.math._tensors.TensorDim-
Expand source code
def dimension(self, name: Union[str, Shape]) -> 'TensorDim': """ Returns a reference to a specific dimension of this tensor. This is equivalent to the syntax `tensor.<name>`. The dimension need not be part of the `Tensor.shape` in which case its size is 1. Args: name: dimension name Returns: `TensorDim` corresponding to a dimension of this tensor """ if isinstance(name, str): return TensorDim(self, name) elif isinstance(name, SHAPE_TYPES): return TensorDim(self, name.name) else: raise ValueError(name)Returns a reference to a specific dimension of this tensor. This is equivalent to the syntax
tensor.<name>.The dimension need not be part of the
Tensor.shapein which case its size is 1.Args
name- dimension name
Returns
TensorDimcorresponding to a dimension of this tensor def item(self) ‑>-
Expand source code
def item(self) -> T: assert self.shape.volume == 1, f"Tensor.item() is only available for single-element Tensors but got {self.shape}" return next(iter(self)) def map(self,
function: Callable,
dims=<function shape>,
range=builtins.range,
unwrap_scalars=True,
**kwargs)-
Expand source code
def map(self, function: Callable, dims=shape_, range=range, unwrap_scalars=True, **kwargs): from ._functional import map_ return map_(function, self, dims=dims, range=range, unwrap_scalars=unwrap_scalars, **kwargs) def native(self,
order: phiml.math._shape.Shape | tuple | list | str = None,
force_expand=True)-
Expand source code
def native(self, order: Union[str, tuple, list, Shape] = None, force_expand=True): """ Returns a native tensor object with the dimensions ordered according to `order`. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. Additionally, groups of dims can be specified for `order` to pack dims. To do this, pass a `tuple` or `list` of dims to be packed into one native axis. Each entry must be one of the following: * `str`: the name of one dimension that is present on `value`. * `Shape`: Dimensions to be packed. If `force_expand`, missing dimensions are first added, otherwise they are ignored. * Filter function: Packs all dimensions of this type that are present on `value`. * Ellipsis `...`: Packs all remaining dimensions into this slot. Can only be passed once. * `None` or `()`: Adds a singleton dimension. Collections of or comma-separated dims may also be used but only if all dims are present on `value`. Args: order: (Optional) Order of dimension names as comma-separated string, list or `Shape`. force_expand: If `False`, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions. If `True`, repeats the tensor along missing dimensions. If `False`, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions. Returns: Native tensor representation, such as PyTorch tensor or NumPy array. Raises: `ValueError` if the tensor cannot be transposed to match target_shape """ raise NotImplementedErrorReturns a native tensor object with the dimensions ordered according to
order.Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in
order, aValueErroris raised.Additionally, groups of dims can be specified for
orderto pack dims. To do this, pass atupleorlistof dims to be packed into one native axis. Each entry must be one of the following:str: the name of one dimension that is present onvalue.Shape: Dimensions to be packed. Ifforce_expand, missing dimensions are first added, otherwise they are ignored.- Filter function: Packs all dimensions of this type that are present on
value. - Ellipsis
…: Packs all remaining dimensions into this slot. Can only be passed once. Noneor(): Adds a singleton dimension.
Collections of or comma-separated dims may also be used but only if all dims are present on
value.Args
order- (Optional) Order of dimension names as comma-separated string, list or
Shape. force_expand- If
False, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions. IfTrue, repeats the tensor along missing dimensions. IfFalse, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.
Returns
Native tensor representation, such as PyTorch tensor or NumPy array.
Raises
ValueErrorif the tensor cannot be transposed to match target_shape def numpy(self,
order: phiml.math._shape.Shape | tuple | list | str = None,
force_expand=True) ‑> numpy.ndarray-
Expand source code
def numpy(self, order: Union[str, tuple, list, Shape] = None, force_expand=True) -> np.ndarray: """ Converts this tensor to a `numpy.ndarray` with dimensions ordered according to `order`. *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use `Tensor.native()` instead. Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in `order`, a `ValueError` is raised. If this `Tensor` is backed by a NumPy array, a reference to this array may be returned. See Also: `phiml.math.numpy()` Args: order: (Optional) Order of dimension names as comma-separated string, list or `Shape`. force_expand: If `False`, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions. Returns: NumPy representation Raises: ValueError if the tensor cannot be transposed to match target_shape """ return self.backend.numpy(self.native(order, force_expand))Converts this tensor to a
numpy.ndarraywith dimensions ordered according toorder.Note: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use
Tensor.native()instead.Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in
order, aValueErroris raised.If this
Tensoris backed by a NumPy array, a reference to this array may be returned.See Also:
numpy_()Args
order- (Optional) Order of dimension names as comma-separated string, list or
Shape. force_expand- If
False, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions.
Returns
NumPy representation
Raises
ValueError if the tensor cannot be transposed to match target_shape
def pack(self, dims, packed_dim) ‑> phiml.math._tensors.Tensor[~T]-
Expand source code
def pack(self, dims, packed_dim) -> 'Tensor[T]': """ See `pack_dims()` """ from ._ops import pack_dims return pack_dims(self, dims, packed_dim)See
pack_dims() def print(self,
layout='full',
float_format=None,
threshold=8,
include_shape=None,
include_dtype=None)-
Expand source code
def print(self, layout='full', float_format=None, threshold=8, include_shape=None, include_dtype=None): print(format_tensor(self, PrintOptions(layout=layout, float_format=float_format, threshold=threshold, colors=DEFAULT_COLORS, include_shape=include_shape, include_dtype=include_dtype))) def unpack(self, dim, unpacked_dims) ‑> phiml.math._tensors.Tensor[~T]-
Expand source code
def unpack(self, dim, unpacked_dims) -> 'Tensor[T]': """ See `unpack_dim()` """ from ._ops import unpack_dim return unpack_dim(self, dim, unpacked_dims)See
unpack_dim()