Module phiml.math

Vectorized operations, tensors with named dimensions.

This package provides a common interface for tensor operations. Is internally uses NumPy, TensorFlow or PyTorch.

Main classes: Tensor, Shape, DType, Extrapolation.

The provided operations are not implemented directly. Instead, they delegate the actual computation to either NumPy, TensorFlow or PyTorch, depending on the configuration. This allows the user to write simulation code once and have it run with various computation backends.

See the documentation at https://tum-pbs.github.io/PhiML/

Expand source code
"""
Vectorized operations, tensors with named dimensions.

This package provides a common interface for tensor operations.
Is internally uses NumPy, TensorFlow or PyTorch.

Main classes: `Tensor`, `Shape`, `DType`, `Extrapolation`.

The provided operations are not implemented directly.
Instead, they delegate the actual computation to either NumPy, TensorFlow or PyTorch, depending on the configuration.
This allows the user to write simulation code once and have it run with various computation backends.

See the documentation at https://tum-pbs.github.io/PhiML/
"""

from ..backend._dtype import DType
from ..backend import NUMPY, precision, set_global_precision, get_precision, set_global_default_backend as use

from ._shape import (
    shape, Shape, EMPTY_SHAPE, DimFilter,
    spatial, channel, batch, instance, dual,
    non_batch, non_spatial, non_instance, non_channel, non_dual, non_primal, primal,
    merge_shapes, concat_shapes, IncompatibleShapes,
    enable_debug_checks,
)

from ._magic_ops import (
    slice_ as slice, unstack,
    stack, concat, expand,
    rename_dims, rename_dims as replace_dims, pack_dims, unpack_dim, flatten,
    b2i, c2b, c2d, i2b, s2b, si2d,
    copy_with, replace, find_differences
)

from ._tensors import wrap, tensor, layout, Tensor, Dict, to_dict, from_dict, is_scalar, BROADCAST_FORMATTER as f

from ._sparse import dense, get_sparsity, get_format, to_format, is_sparse, sparse_tensor, stored_indices, stored_values, tensor_like

from .extrapolation import Extrapolation, as_extrapolation

from ._ops import (
    choose_backend_t as choose_backend, all_available, convert, seed, to_device,
    native, numpy, reshaped_native, reshaped_tensor, reshaped_numpy, copy, native_call,
    print_ as print,
    slice_off,
    zeros, ones, fftfreq, random_normal, random_uniform, meshgrid, linspace, arange as range, range_tensor,  # creation operators (use default backend)
    zeros_like, ones_like,
    pad,
    transpose,  # reshape operations
    sort,
    safe_div,
    where, nonzero,
    sum_ as sum, finite_sum, mean, finite_mean, std, prod, max_ as max, finite_max, min_ as min, finite_min, any_ as any, all_ as all, quantile, median,  # reduce
    at_max, at_min, argmax, argmin,
    dot,
    abs_ as abs, sign,
    round_ as round, ceil, floor,
    maximum, minimum, clip,
    sqrt, exp, erf, log, log2, log10, sigmoid, soft_plus,
    sin, cos, tan, sinh, cosh, tanh, arcsin, arccos, arctan, arcsinh, arccosh, arctanh, log_gamma, factorial, incomplete_gamma,
    to_float, to_int32, to_int64, to_complex, imag, real, conjugate, angle,
    radians_to_degrees, degrees_to_radians,
    boolean_mask,
    is_finite, is_nan, is_inf,
    closest_grid_values, grid_sample, scatter, gather,
    histogram,
    fft, ifft, convolve, cumulative_sum,
    dtype, cast,
    close, always_close, assert_close, equal,
    stop_gradient,
    pairwise_differences, pairwise_differences as pairwise_distances, map_pairs,
)

from ._nd import (
    shift, index_shift,
    vec, const_vec, vec_length, vec_squared, vec_normalize, cross_product, rotate_vector, rotation_matrix, rotation_angles, dim_mask,
    normalize_to,
    l1_loss, l2_loss, frequency_loss,
    spatial_gradient, laplace,
    neighbor_reduce, neighbor_mean, neighbor_sum, neighbor_max, neighbor_min, at_min_neighbor, at_max_neighbor,
    fourier_laplace, fourier_poisson, abs_square,
    downsample2x, upsample2x, sample_subgrid,
    masked_fill, finite_fill,
    find_closest,
)

from ._trace import matrix_from_function

from ._functional import (
    LinearFunction, jit_compile_linear, jit_compile,
    jacobian, gradient, custom_gradient, print_gradient,
    map_types, map_s2b, map_i2b, map_c2b,
    broadcast,
    iterate,
    identity,
    trace_check,
    map_ as map,
    when_available,
    perf_counter,
)

from ._optimize import solve_linear, solve_nonlinear, minimize, Solve, SolveInfo, ConvergenceException, NotConverged, Diverged, SolveTape, factor_ilu

import sys as _sys
math = _sys.modules[__name__]
"""Convenience alias for the module `phiml.math`.
This way, you can import the module and contained items in one line.
```
from phiml.math import math, Tensor, wrap, extrapolation, l2_loss
```"""

PI = 3.14159265358979323846
"""Value of π to double precision """
pi = PI  # intentionally undocumented, use PI instead. Exists only as an anlog to numpy.pi

INF = float("inf")
""" Floating-point representation of positive infinity. """
inf = INF  # intentionally undocumented, use INF instead. Exists only as an anlog to numpy.inf


NAN = float("nan")
""" Floating-point representation of NaN (not a number). """
nan = NAN  # intentionally undocumented, use NAN instead. Exists only as an anlog to numpy.nan

NUMPY = NUMPY  # to show up in pdoc
"""Default backend for NumPy arrays and SciPy objects."""

f = f
"""
Automatic mapper for broadcast string formatting of tensors, resulting in tensors of strings.
Used with the special `-f-` syntax.

Examples:
    >>> from phiml.math import f
    >>> -f-f'String containing {tensor1} and {tensor2:.1f}'
    # Result is a str tensor containing all dims of tensor1 and tensor2
"""

__all__ = [key for key in globals().keys() if not key.startswith('_')]

__pdoc__ = {
    'Extrapolation': False,
    'Shape.__init__': False,
    'SolveInfo.__init__': False,
    'TensorDim.__init__': False,
    'ConvergenceException.__init__': False,
    'Diverged.__init__': False,
    'NotConverged.__init__': False,
    'LinearFunction.__init__': False,
}

Sub-modules

phiml.math.extrapolation

Extrapolations are used for padding tensors and sampling coordinates lying outside the tensor bounds. Standard extrapolations are listed as global …

phiml.math.magic

Magic methods allow custom classes to be compatible with various functions defined in phiml.math, analogous to how implementing __hash__ allows …

Global variables

var INF

Floating-point representation of positive infinity.

var NAN

Floating-point representation of NaN (not a number).

var NUMPY

Default backend for NumPy arrays and SciPy objects.

var PI

Value of π to double precision

var f

Automatic mapper for broadcast string formatting of tensors, resulting in tensors of strings. Used with the special -f- syntax.

Examples

>>> from phiml.math import f
>>> -f-f'String containing {tensor1} and {tensor2:.1f}'
# Result is a str tensor containing all dims of tensor1 and tensor2
var math

Convenience alias for the module phiml.math. This way, you can import the module and contained items in one line.

from phiml.math import math, Tensor, wrap, extrapolation, l2_loss
Expand source code
"""
Vectorized operations, tensors with named dimensions.

This package provides a common interface for tensor operations.
Is internally uses NumPy, TensorFlow or PyTorch.

Main classes: `Tensor`, `Shape`, `DType`, `Extrapolation`.

The provided operations are not implemented directly.
Instead, they delegate the actual computation to either NumPy, TensorFlow or PyTorch, depending on the configuration.
This allows the user to write simulation code once and have it run with various computation backends.

See the documentation at https://tum-pbs.github.io/PhiML/
"""

from ..backend._dtype import DType
from ..backend import NUMPY, precision, set_global_precision, get_precision, set_global_default_backend as use

from ._shape import (
    shape, Shape, EMPTY_SHAPE, DimFilter,
    spatial, channel, batch, instance, dual,
    non_batch, non_spatial, non_instance, non_channel, non_dual, non_primal, primal,
    merge_shapes, concat_shapes, IncompatibleShapes,
    enable_debug_checks,
)

from ._magic_ops import (
    slice_ as slice, unstack,
    stack, concat, expand,
    rename_dims, rename_dims as replace_dims, pack_dims, unpack_dim, flatten,
    b2i, c2b, c2d, i2b, s2b, si2d,
    copy_with, replace, find_differences
)

from ._tensors import wrap, tensor, layout, Tensor, Dict, to_dict, from_dict, is_scalar, BROADCAST_FORMATTER as f

from ._sparse import dense, get_sparsity, get_format, to_format, is_sparse, sparse_tensor, stored_indices, stored_values, tensor_like

from .extrapolation import Extrapolation, as_extrapolation

from ._ops import (
    choose_backend_t as choose_backend, all_available, convert, seed, to_device,
    native, numpy, reshaped_native, reshaped_tensor, reshaped_numpy, copy, native_call,
    print_ as print,
    slice_off,
    zeros, ones, fftfreq, random_normal, random_uniform, meshgrid, linspace, arange as range, range_tensor,  # creation operators (use default backend)
    zeros_like, ones_like,
    pad,
    transpose,  # reshape operations
    sort,
    safe_div,
    where, nonzero,
    sum_ as sum, finite_sum, mean, finite_mean, std, prod, max_ as max, finite_max, min_ as min, finite_min, any_ as any, all_ as all, quantile, median,  # reduce
    at_max, at_min, argmax, argmin,
    dot,
    abs_ as abs, sign,
    round_ as round, ceil, floor,
    maximum, minimum, clip,
    sqrt, exp, erf, log, log2, log10, sigmoid, soft_plus,
    sin, cos, tan, sinh, cosh, tanh, arcsin, arccos, arctan, arcsinh, arccosh, arctanh, log_gamma, factorial, incomplete_gamma,
    to_float, to_int32, to_int64, to_complex, imag, real, conjugate, angle,
    radians_to_degrees, degrees_to_radians,
    boolean_mask,
    is_finite, is_nan, is_inf,
    closest_grid_values, grid_sample, scatter, gather,
    histogram,
    fft, ifft, convolve, cumulative_sum,
    dtype, cast,
    close, always_close, assert_close, equal,
    stop_gradient,
    pairwise_differences, pairwise_differences as pairwise_distances, map_pairs,
)

from ._nd import (
    shift, index_shift,
    vec, const_vec, vec_length, vec_squared, vec_normalize, cross_product, rotate_vector, rotation_matrix, rotation_angles, dim_mask,
    normalize_to,
    l1_loss, l2_loss, frequency_loss,
    spatial_gradient, laplace,
    neighbor_reduce, neighbor_mean, neighbor_sum, neighbor_max, neighbor_min, at_min_neighbor, at_max_neighbor,
    fourier_laplace, fourier_poisson, abs_square,
    downsample2x, upsample2x, sample_subgrid,
    masked_fill, finite_fill,
    find_closest,
)

from ._trace import matrix_from_function

from ._functional import (
    LinearFunction, jit_compile_linear, jit_compile,
    jacobian, gradient, custom_gradient, print_gradient,
    map_types, map_s2b, map_i2b, map_c2b,
    broadcast,
    iterate,
    identity,
    trace_check,
    map_ as map,
    when_available,
    perf_counter,
)

from ._optimize import solve_linear, solve_nonlinear, minimize, Solve, SolveInfo, ConvergenceException, NotConverged, Diverged, SolveTape, factor_ilu

import sys as _sys
math = _sys.modules[__name__]
"""Convenience alias for the module `phiml.math`.
This way, you can import the module and contained items in one line.
```
from phiml.math import math, Tensor, wrap, extrapolation, l2_loss
```"""

PI = 3.14159265358979323846
"""Value of π to double precision """
pi = PI  # intentionally undocumented, use PI instead. Exists only as an anlog to numpy.pi

INF = float("inf")
""" Floating-point representation of positive infinity. """
inf = INF  # intentionally undocumented, use INF instead. Exists only as an anlog to numpy.inf


NAN = float("nan")
""" Floating-point representation of NaN (not a number). """
nan = NAN  # intentionally undocumented, use NAN instead. Exists only as an anlog to numpy.nan

NUMPY = NUMPY  # to show up in pdoc
"""Default backend for NumPy arrays and SciPy objects."""

f = f
"""
Automatic mapper for broadcast string formatting of tensors, resulting in tensors of strings.
Used with the special `-f-` syntax.

Examples:
    >>> from phiml.math import f
    >>> -f-f'String containing {tensor1} and {tensor2:.1f}'
    # Result is a str tensor containing all dims of tensor1 and tensor2
"""

__all__ = [key for key in globals().keys() if not key.startswith('_')]

__pdoc__ = {
    'Extrapolation': False,
    'Shape.__init__': False,
    'SolveInfo.__init__': False,
    'TensorDim.__init__': False,
    'ConvergenceException.__init__': False,
    'Diverged.__init__': False,
    'NotConverged.__init__': False,
    'LinearFunction.__init__': False,
}

Functions

def abs(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes ||x||1. Complex x result in matching precision float values.

Note: The gradient of this operation is undefined for x=0. TensorFlow and PyTorch return 0 while Jax returns 1.

Args

x
Tensor or PhiTreeNode

Returns

Absolute value of x of same type as x.

Expand source code
def abs_(x: TensorOrTree) -> TensorOrTree:
    """
    Computes *||x||<sub>1</sub>*.
    Complex `x` result in matching precision float values.

    *Note*: The gradient of this operation is undefined for *x=0*.
    TensorFlow and PyTorch return 0 while Jax returns 1.

    Args:
        x: `Tensor` or `phiml.math.magic.PhiTreeNode`

    Returns:
        Absolute value of `x` of same type as `x`.
    """
    return _backend_op1(x, Backend.abs)
def abs_square(complex_values: Union[phiml.math._tensors.Tensor, complex]) ‑> phiml.math._tensors.Tensor

Squared magnitude of complex values.

Args

complex_values
complex Tensor

Returns

Tensor
real valued magnitude squared
Expand source code
def abs_square(complex_values: Union[Tensor, complex]) -> Tensor:
    """
    Squared magnitude of complex values.

    Args:
      complex_values: complex `Tensor`

    Returns:
        Tensor: real valued magnitude squared

    """
    return math.imag(complex_values) ** 2 + math.real(complex_values) ** 2
def all(boolean_tensor: Union[phiml.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Tests whether all entries of boolean_tensor are True along the specified dimensions.

Args

boolean_tensor
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors

Returns

Tensor without the reduced dimensions.

Expand source code
def all_(boolean_tensor: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor:
    """
    Tests whether all entries of `boolean_tensor` are `True` along the specified dimensions.

    Args:
        boolean_tensor: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

    Returns:
        `Tensor` without the reduced dimensions.
    """
    return reduce_(_all, boolean_tensor, dim, required_kind=bool)
def all_available(*values) ‑> bool

Tests if all tensors contained in the given values are currently known and can be read. Placeholder tensors used to trace functions for just-in-time compilation or matrix construction are considered not available, even when they hold example values like with PyTorch's JIT.

Tensors are not available during jit_compile(), jit_compile_linear() or while using TensorFlow's legacy graph mode.

Tensors are typically available when the backend operates in eager mode and is not currently tracing a function.

This can be used instead of the native checks

  • PyTorch: torch._C._get_tracing_state()
  • TensorFlow: tf.executing_eagerly()
  • Jax: isinstance(x, jax.core.Tracer)

Args

values
Tensors to check.

Returns

True if no value is a placeholder or being traced, False otherwise.

Expand source code
def all_available(*values) -> bool:
    """
    Tests if all tensors contained in the given `values` are currently known and can be read.
    Placeholder tensors used to trace functions for just-in-time compilation or matrix construction are considered not available, even when they hold example values like with PyTorch's JIT.

    Tensors are not available during `jit_compile()`, `jit_compile_linear()` or while using TensorFlow's legacy graph mode.
    
    Tensors are typically available when the backend operates in eager mode and is not currently tracing a function.

    This can be used instead of the native checks

    * PyTorch: `torch._C._get_tracing_state()`
    * TensorFlow: `tf.executing_eagerly()`
    * Jax: `isinstance(x, jax.core.Tracer)`

    Args:
        values: Tensors to check.

    Returns:
        `True` if no value is a placeholder or being traced, `False` otherwise.
    """
    _, tensors = disassemble_tree(values, cache=False)
    return all([t.available for t in tensors])
def always_close(t1: Union[numbers.Number, phiml.math._tensors.Tensor, bool], t2: Union[numbers.Number, phiml.math._tensors.Tensor, bool], rel_tolerance=1e-05, abs_tolerance=0, equal_nan=False) ‑> bool

Checks whether two tensors are guaranteed to be close() in all values. Unlike close(), this function can be used with JIT compilation and with tensors of incompatible shapes. Incompatible tensors are never close.

If one of the given tensors is being traced, the tensors are only equal if they reference the same native tensor. Otherwise, an element-wise equality check is performed.

See Also: close().

Args

t1
First tensor or number to compare.
t2
Second tensor or number to compare.
rel_tolerance
Relative tolerance, only used if neither tensor is traced.
abs_tolerance
Absolute tolerance, only used if neither tensor is traced.
equal_nan
If True, tensors are considered close if they are NaN in the same places.

Returns

bool

Expand source code
def always_close(t1: Union[Number, Tensor, bool], t2: Union[Number, Tensor, bool], rel_tolerance=1e-5, abs_tolerance=0, equal_nan=False) -> bool:
    """
    Checks whether two tensors are guaranteed to be `close` in all values.
    Unlike `close()`, this function can be used with JIT compilation and with tensors of incompatible shapes.
    Incompatible tensors are never close.

    If one of the given tensors is being traced, the tensors are only equal if they reference the same native tensor.
    Otherwise, an element-wise equality check is performed.

    See Also:
        `close()`.

    Args:
        t1: First tensor or number to compare.
        t2: Second tensor or number to compare.
        rel_tolerance: Relative tolerance, only used if neither tensor is traced.
        abs_tolerance: Absolute tolerance, only used if neither tensor is traced.
        equal_nan: If `True`, tensors are considered close if they are NaN in the same places.

    Returns:
        `bool`
    """
    if t1 is None or t2 is None:
        return t1 is None and t2 is None
    t1 = wrap(t1)
    t2 = wrap(t2)
    if t1.available != t2.available:
        return False
    if t1.available and t2.available:
        try:
            return close(t1, t2, rel_tolerance=rel_tolerance, abs_tolerance=abs_tolerance, equal_nan=equal_nan)
        except IncompatibleShapes:
            return False
    elif isinstance(t1, NativeTensor) and isinstance(t2, NativeTensor):
        return t1._native is t2._native
    else:
        return t1 is t2
def angle(x: ~TensorOrTree) ‑> ~TensorOrTree

Compute the angle of a complex number. This is equal to atan(Im/Re) for most values.

Args

x
Tensor or PhiTreeNode

Returns

Angle of complex number in radians.

Expand source code
def angle(x: TensorOrTree) -> TensorOrTree:
    """
    Compute the angle of a complex number.
    This is equal to *atan(Im/Re)* for most values.

    Args:
        x: `Tensor` or `phiml.math.magic.PhiTreeNode`

    Returns:
        Angle of complex number in radians.
    """
    return arctan(imag(x), divide_by=real(x))
def any(boolean_tensor: Union[phiml.math._tensors.Tensor, list, tuple], dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Tests whether any entry of boolean_tensor is True along the specified dimensions.

Args

boolean_tensor
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors

Returns

Tensor without the reduced dimensions.

Expand source code
def any_(boolean_tensor: Union[Tensor, list, tuple], dim: DimFilter = non_batch) -> Tensor:
    """
    Tests whether any entry of `boolean_tensor` is `True` along the specified dimensions.

    Args:
        boolean_tensor: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

    Returns:
        `Tensor` without the reduced dimensions.
    """
    return reduce_(_any, boolean_tensor, dim, required_kind=bool)
def arccos(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes the inverse of cos(x) of the Tensor or PhiTreeNode x. For real arguments, the result lies in the range [0, π].

Expand source code
def arccos(x: TensorOrTree) -> TensorOrTree:
    """ Computes the inverse of *cos(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`.
    For real arguments, the result lies in the range [0, π].
    """
    return _backend_op1(x, Backend.arccos)
def arccosh(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes the inverse of cosh(x) of the Tensor or PhiTreeNode x.

Expand source code
def arccosh(x: TensorOrTree) -> TensorOrTree:
    """ Computes the inverse of *cosh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.arccosh)
def arcsin(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes the inverse of sin(x) of the Tensor or PhiTreeNode x. For real arguments, the result lies in the range [-π/2, π/2].

Expand source code
def arcsin(x: TensorOrTree) -> TensorOrTree:
    """ Computes the inverse of *sin(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`.
    For real arguments, the result lies in the range [-π/2, π/2].
    """
    return _backend_op1(x, Backend.arcsin)
def arcsinh(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes the inverse of sinh(x) of the Tensor or PhiTreeNode x.

Expand source code
def arcsinh(x: TensorOrTree) -> TensorOrTree:
    """ Computes the inverse of *sinh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.arcsinh)
def arctan(x: ~TensorOrTree, divide_by=None) ‑> ~TensorOrTree

Computes the inverse of tan(x) of the Tensor or PhiTreeNode x.

Args

x
Input. The single-argument arctan() function cannot output π/2 or -π/2 since tan(π/2) is infinite.
divide_by
If specified, computes arctan(x/divide_by) so that it can return π/2 and -π/2. This is equivalent to the common arctan2 function.
Expand source code
def arctan(x: TensorOrTree, divide_by=None) -> TensorOrTree:
    """
    Computes the inverse of *tan(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`.

    Args:
        x: Input. The single-argument `arctan` function cannot output π/2 or -π/2 since tan(π/2) is infinite.
        divide_by: If specified, computes `arctan(x/divide_by)` so that it can return π/2 and -π/2.
            This is equivalent to the common `arctan2` function.
    """
    if divide_by is None:
        return _backend_op1(x, Backend.arctan)
    else:
        divide_by = to_float(divide_by)
        return custom_op2(x, divide_by, arctan, lambda a, b: choose_backend(a, b).arctan2(a, b), 'arctan')
def arctanh(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes the inverse of tanh(x) of the Tensor or PhiTreeNode x.

Expand source code
def arctanh(x: TensorOrTree) -> TensorOrTree:
    """ Computes the inverse of *tanh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.arctanh)
def argmax(x: phiml.math._tensors.Tensor, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], index_dim=(indexᶜ=None))

Finds the maximum value along one or multiple dimensions and returns the corresponding index.

See Also: argmin(), at_max().

Args

x
Tensor
dim
Dimensions along which the maximum should be determined. These are reduced in the operation.
index_dim
Dimension listing the index components for multidimensional argmax.

Returns

Index tensor idx, such that x[idx] = max(x).

Expand source code
def argmax(x: Tensor, dim: DimFilter, index_dim=channel('index')):
    """
    Finds the maximum value along one or multiple dimensions and returns the corresponding index.

    See Also:
        `argmin`, `at_max`.

    Args:
        x: `Tensor`
        dim: Dimensions along which the maximum should be determined. These are reduced in the operation.
        index_dim: Dimension listing the index components for multidimensional argmax.

    Returns:
        Index tensor `idx`, such that `x[idx] = max(x)`.
    """
    dims = x.shape.only(dim)
    keep = x.shape.without(dims)
    assert dim, f"No dimensions {dim} present on key {x.shape}"
    if isinstance(x, (SparseCoordinateTensor, CompressedSparseMatrix)):
        if dims in sparse_dims(x):
            max_val = max_(x, dim)
            is_max = x == max_val
            is_max_idx = nonzero(is_max, list_dim=instance('true_values'))
            scatter_val = is_max_idx[dims.only(sparse_dims(x)).name_list]
            scatter_idx = is_max_idx[sparse_dims(x).without(dims).name_list]
            result_shape = max_val.shape & channel(scatter_val)
            result = scatter(result_shape, scatter_idx, scatter_val, mode='update', default=-1)
            return rename_dims(result, channel(scatter_val), index_dim)
        else:
            raise NotImplementedError
    v_native = reshaped_native(x, [keep, dims])
    idx_native = x.default_backend.argmax(v_native, 1, keepdims=True)
    multi_idx_native = choose_backend(idx_native).unravel_index(idx_native[:, 0], dims.sizes)
    return reshaped_tensor(multi_idx_native, [keep, index_dim.with_size(dims)])
def argmin(x: phiml.math._tensors.Tensor, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], index_dim=(indexᶜ=None))

Finds the minimum value along one or multiple dimensions and returns the corresponding index.

See Also: argmax(), at_min().

Args

x
Tensor
dim
Dimensions along which the minimum should be determined. These are reduced in the operation.
index_dim
Dimension listing the index components for multidimensional argmin.

Returns

Index tensor idx, such that x[idx] = min(x).

Expand source code
def argmin(x: Tensor, dim: DimFilter, index_dim=channel('index')):
    """
    Finds the minimum value along one or multiple dimensions and returns the corresponding index.

    See Also:
        `argmax`, `at_min`.

    Args:
        x: `Tensor`
        dim: Dimensions along which the minimum should be determined. These are reduced in the operation.
        index_dim: Dimension listing the index components for multidimensional argmin.

    Returns:
        Index tensor `idx`, such that `x[idx] = min(x)`.
    """
    dims = x.shape.only(dim)
    keep = x.shape.without(dims)
    assert dim, f"No dimensions {dim} present on key {x.shape}"
    v_native = reshaped_native(x, [keep, dims])
    idx_native = x.default_backend.argmin(v_native, 1, keepdims=True)
    multi_idx_native = choose_backend(idx_native).unravel_index(idx_native[:, 0], dims.sizes)
    return reshaped_tensor(multi_idx_native, [keep, index_dim.with_size(dims)])
def as_extrapolation(obj, two_sided=True) ‑> Extrapolation

Creates an Extrapolation from a descriptor object.

Args

obj

Extrapolation specification, one of the following:

  • Extrapolation
  • Primitive name as str: periodic, zero, one, zero-gradient, symmetric, symmetric-gradient, antisymmetric, reflect, antireflect
  • dict containing exactly the keys 'normal' and 'tangential'
  • dict mapping spatial dimension names to extrapolations
two_sided
If True, will add two entries (name, False), (name, True) for dimension names in dict objects.

Returns

Extrapolation

Expand source code
def as_extrapolation(obj, two_sided=True) -> Extrapolation:
    """
    Creates an `Extrapolation` from a descriptor object.

    Args:
        obj: Extrapolation specification, one of the following:

            * `Extrapolation`
            * Primitive name as `str`: periodic, zero, one, zero-gradient, symmetric, symmetric-gradient, antisymmetric, reflect, antireflect
            * `dict` containing exactly the keys `'normal'` and `'tangential'`
            * `dict` mapping spatial dimension names to extrapolations

        two_sided: If `True`, will add two entries `(name, False), (name, True)` for dimension names in `dict` objects.

    Returns:
        `Extrapolation`
    """
    if isinstance(obj, Extrapolation):
        return obj
    if obj is None:
        return NONE
    if isinstance(obj, str):
        assert obj in _PRIMITIVES, f"Unrecognized extrapolation type: '{obj}'"
        return _PRIMITIVES[obj]
    if isinstance(obj, dict):
        if 'normal' in obj or 'tangential' in obj:
            assert 'normal' in obj and 'tangential' in obj, f"Normal/tangential dict requires both entries 'normal' and 'tangential' but got {obj}"
            assert len(obj) == 2, f"Normal/tangential dict must only contain entries 'normal' and 'tangential' but got {obj}"
            normal = as_extrapolation(obj['normal'])
            tangential = as_extrapolation(obj['tangential'])
            return combine_by_direction(normal=normal, tangential=tangential)
        else:
            ext = {dim: (as_extrapolation(spec[0]), as_extrapolation(spec[1])) if isinstance(spec, tuple) else as_extrapolation(spec) for dim, spec in obj.items()}
            return combine_sides(**ext) if two_sided else combine_sides(ext)
    return ConstantExtrapolation(obj)
def assert_close(*values, rel_tolerance: float = 1e-05, abs_tolerance: float = 0, msg: str = '', verbose: bool = True)

Checks that all given tensors have equal values within the specified tolerance. Raises an AssertionError if the values of this tensor are not within tolerance of any of the other tensors.

Does not check that the shapes match as long as they can be broadcast to a common shape.

Args

values
Tensors or native tensors or numbers or sequences of numbers.
rel_tolerance
Relative tolerance.
abs_tolerance
Absolute tolerance.
msg
Optional error message.
verbose
Whether to print conflicting values.
Expand source code
def assert_close(*values,
                 rel_tolerance: float = 1e-5,
                 abs_tolerance: float = 0,
                 msg: str = "",
                 verbose: bool = True):
    """
    Checks that all given tensors have equal values within the specified tolerance.
    Raises an AssertionError if the values of this tensor are not within tolerance of any of the other tensors.
    
    Does not check that the shapes match as long as they can be broadcast to a common shape.

    Args:
      values: Tensors or native tensors or numbers or sequences of numbers.
      rel_tolerance: Relative tolerance.
      abs_tolerance: Absolute tolerance.
      msg: Optional error message.
      verbose: Whether to print conflicting values.
    """
    if not values:
        return
    ml_tensors = [t for t in values if isinstance(t, Tensor)]
    if ml_tensors:
        values = [compatible_tensor(t, ml_tensors[0].shape)._simplify() for t in values]  # use Tensor to infer dimensions
        for other in values[1:]:
            _assert_close(values[0], other, rel_tolerance, abs_tolerance, msg, verbose)
    elif all(isinstance(v, PhiTreeNode) for v in values):
        tree0, tensors0 = disassemble_tree(values[0], cache=False, attr_type=value_attributes)
        for value in values[1:]:
            tree, tensors_ = disassemble_tree(value, cache=False, attr_type=value_attributes)
            assert tree0 == tree, f"Tree structures do not match: {tree0} and {tree}"
            for t0, t in zip(tensors0, tensors_):
                _assert_close(t0, t, rel_tolerance, abs_tolerance, msg, verbose)
    else:
        np_values = [choose_backend(t).numpy(t) for t in values]
        for other in np_values[1:]:
            np.testing.assert_allclose(np_values[0], other, rel_tolerance, abs_tolerance, err_msg=msg, verbose=verbose)
def at_max(value, key: phiml.math._tensors.Tensor, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>)

Looks up the values of value at the positions where the maximum values in key are located along dim.

See Also: at_min(), max_().

Args

value
Tensors or trees from which to lookup and return values. These tensors are indexed at the maximum index in `key´.
key
Tensor containing at least one dimension of dim. The maximum index of key is determined.
dim
Dimensions along which to compute the maximum of key.

Returns

The values of other_tensors at the positions where the maximum values in value are located along dim.

Expand source code
def at_max(value, key: Tensor, dim: DimFilter = non_batch):
    """
    Looks up the values of `value` at the positions where the maximum values in `key` are located along `dim`.

    See Also:
        `at_min`, `phiml.math.max`.

    Args:
        value: Tensors or trees from which to lookup and return values. These tensors are indexed at the maximum index in `key´.
        key: `Tensor` containing at least one dimension of `dim`. The maximum index of `key` is determined.
        dim: Dimensions along which to compute the maximum of `key`.

    Returns:
        The values of `other_tensors` at the positions where the maximum values in `value` are located along `dim`.
    """
    return lookup_where(lambda v: choose_backend(v).argmax(v, 1, keepdims=True), value, key, dim)
def at_max_neighbor(values, key_grid: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None) ‑> phiml.math._tensors.Tensor

at_neighbor_where with reduce_fun set to at_max().

Expand source code
def at_max_neighbor(values, key_grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None) -> Tensor:
    """`at_neighbor_where` with `reduce_fun` set to `phiml.math.at_max`."""
    return at_neighbor_where(math.at_max, values, key_grid, dims, padding=padding)
def at_min(value, key: phiml.math._tensors.Tensor, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>)

Looks up the values of value at the positions where the minimum values in key are located along dim.

See Also: at_max(), min_().

Args

value
Tensors or trees from which to lookup and return values. These tensors are indexed at the minimum index in `key´.
key
Tensor containing at least one dimension of dim. The minimum index of key is determined.
dim
Dimensions along which to compute the minimum of key.

Returns

The values of other_tensors at the positions where the minimum values in value are located along dim.

Expand source code
def at_min(value, key: Tensor, dim: DimFilter = non_batch):
    """
    Looks up the values of `value` at the positions where the minimum values in `key` are located along `dim`.

    See Also:
        `at_max`, `phiml.math.min`.

    Args:
        value: Tensors or trees from which to lookup and return values. These tensors are indexed at the minimum index in `key´.
        key: `Tensor` containing at least one dimension of `dim`. The minimum index of `key` is determined.
        dim: Dimensions along which to compute the minimum of `key`.

    Returns:
        The values of `other_tensors` at the positions where the minimum values in `value` are located along `dim`.
    """
    return lookup_where(lambda v: choose_backend(v).argmin(v, 1, keepdims=True), value, key, dim)
def at_min_neighbor(values, key_grid: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None) ‑> phiml.math._tensors.Tensor

at_neighbor_where with reduce_fun set to at_min().

Expand source code
def at_min_neighbor(values, key_grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None) -> Tensor:
    """`at_neighbor_where` with `reduce_fun` set to `phiml.math.at_min`."""
    return at_neighbor_where(math.at_min, values, key_grid, dims, padding=padding)
def b2i(value)

Change the type of all batch dimensions of value to instance dimensions. See rename_dims().

Expand source code
def b2i(value):
    """ Change the type of all *batch* dimensions of `value` to *instance* dimensions. See `rename_dims`. """
    return rename_dims(value, batch, instance)
def batch(*args, **dims: Union[int, str, tuple, list, phiml.math._shape.Shape, ForwardRef('Tensor')]) ‑> phiml.math._shape.Shape

Returns the batch dimensions of an existing Shape or creates a new Shape with only batch dimensions.

Usage for filtering batch dimensions:

>>> batch_dims = batch(shape)
>>> batch_dims = batch(tensor)

Usage for creating a Shape with only batch dimensions:

>>> batch_shape = batch('undef', batch=2)
(batch=2, undef=None)

Here, the dimension undef is created with an undefined size of None. Undefined sizes are automatically filled in by tensor(), wrap(), stack() and concat().

To create a shape with multiple types, use merge_shapes(), concat_shapes() or the syntax shape1 & shape2.

See Also: channel(), spatial(), instance()

Args

*args

Either

  • Shape or Tensor to filter or
  • Names of dimensions with undefined sizes as str.
**dims
Dimension sizes and names. Must be empty when used as a filter operation.

Returns

Shape containing only dimensions of type batch.

Expand source code
def batch(*args, **dims: Union[int, str, tuple, list, Shape, 'Tensor']) -> Shape:
    """
    Returns the batch dimensions of an existing `Shape` or creates a new `Shape` with only batch dimensions.

    Usage for filtering batch dimensions:
    >>> batch_dims = batch(shape)
    >>> batch_dims = batch(tensor)

    Usage for creating a `Shape` with only batch dimensions:
    >>> batch_shape = batch('undef', batch=2)
    (batch=2, undef=None)

    Here, the dimension `undef` is created with an undefined size of `None`.
    Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`.

    To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`.

    See Also:
        `channel`, `spatial`, `instance`

    Args:
        *args: Either

            * `Shape` or `Tensor` to filter or
            * Names of dimensions with undefined sizes as `str`.

        **dims: Dimension sizes and names. Must be empty when used as a filter operation.

    Returns:
        `Shape` containing only dimensions of type batch.
    """
    from .magic import Shaped
    if all(isinstance(arg, str) for arg in args) or dims:
        return _construct_shape(BATCH_DIM, *args, **dims)
    elif len(args) == 1 and isinstance(args[0], Shape):
        return args[0].batch
    elif len(args) == 1 and isinstance(args[0], Shaped):
        return shape(args[0]).batch
    else:
        raise AssertionError(f"batch() must be called either as a selector batch(Shape) or batch(Tensor) or as a constructor batch(*names, **dims). Got *args={args}, **dims={dims}")
def boolean_mask(x, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], mask: phiml.math._tensors.Tensor)

Discards values x.dim[i] where mask.dim[i]=False. All dimensions of mask that are not dim are treated as batch dimensions.

Alternative syntax: x.dim[mask].

Implementations:

Args

x
Tensor or Sliceable.
dim
Dimension of x to along which to discard slices.
mask
Boolean Tensor marking which values to keep. Must have the dimension dim matching `x´.

Returns

Selected values of x as Tensor with dimensions from x and mask.

Expand source code
def boolean_mask(x, dim: DimFilter, mask: Tensor):
    """
    Discards values `x.dim[i]` where `mask.dim[i]=False`.
    All dimensions of `mask` that are not `dim` are treated as batch dimensions.

    Alternative syntax: `x.dim[mask]`.

    Implementations:

    * NumPy: Slicing
    * PyTorch: [`masked_select`](https://pytorch.org/docs/stable/generated/torch.masked_select.html)
    * TensorFlow: [`tf.boolean_mask`](https://www.tensorflow.org/api_docs/python/tf/boolean_mask)
    * Jax: Slicing

    Args:
        x: `Tensor` or `phiml.math.magic.Sliceable`.
        dim: Dimension of `x` to along which to discard slices.
        mask: Boolean `Tensor` marking which values to keep. Must have the dimension `dim` matching `x´.

    Returns:
        Selected values of `x` as `Tensor` with dimensions from `x` and `mask`.
    """
    dim, original_dim = shape(mask).only(dim), dim
    assert dim, f"mask dimension '{original_dim}' must be present on the mask {mask.shape}"
    assert dim.rank == 1, f"boolean mask only supports 1D selection"
    if not isinstance(x, Tensor) or is_sparse(x):
        keep_slices = nonzero_slices(mask)
        x_slices = [x[s] for s in keep_slices]
        return concat(x_slices, dim.name)
    
    def uniform_boolean_mask(x: Tensor, mask_1d: Tensor):
        if dim in x.shape:
            x_native = x.native(x.shape.names)  # order does not matter
            mask_native = mask_1d.native()  # only has 1 dim
            backend = choose_backend(x_native, mask_native)
            result_native = backend.boolean_mask(x_native, mask_native, axis=x.shape.index(dim))
            new_shape = x.shape.with_sizes(backend.staticshape(result_native))
            return NativeTensor(result_native, new_shape)
        else:
            total = int(sum_(to_int64(mask_1d), mask_1d.shape))
            new_shape = mask_1d.shape.with_sizes([total])
            return expand(x, new_shape)

    return broadcast_op(uniform_boolean_mask, [x, mask], iter_dims=mask.shape.without(dim))
def broadcast(function=None, dims=<function shape>, range=builtins.range, unwrap_scalars=True)

Function decorator for non-vectorized functions. When passing Tensor arguments to a broadcast function, the function is called once for each slice of the tensor. How tensors are sliced is determined by dims. Decorating a function with broadcast() is equal to passing the function to phi.math.map().

See Also: map_()

Args

function
Function to broadcast.
dims
Dimensions which should be sliced. function is called once for each element in dims, i.e. dims.volume times. If dims is not specified, all dimensions from the Sliceable values in args and kwargs will be mapped.
range
Optional range function. Can be used to generate tqdm output by passing trange.
unwrap_scalars
If True, passes the contents of scalar Tensors instead of the tensor objects.

Returns

Broadcast function

Expand source code
def broadcast(function=None, dims=shape, range=range, unwrap_scalars=True):
    """
    Function decorator for non-vectorized functions.
    When passing `Tensor` arguments to a broadcast function, the function is called once for each slice of the tensor.
    How tensors are sliced is determined by `dims`.
    Decorating a function with `broadcast` is equal to passing the function to `phi.math.map()`.

    See Also:
        `phiml.math.map`

    Args:
        function: Function to broadcast.
        dims: Dimensions which should be sliced.
            `function` is called once for each element in `dims`, i.e. `dims.volume` times.
            If `dims` is not specified, all dimensions from the `phiml.math.magic.Sliceable` values in `args` and `kwargs` will be mapped.
        range: Optional range function. Can be used to generate `tqdm` output by passing `trange`.
        unwrap_scalars: If `True`, passes the contents of scalar `Tensor`s instead of the tensor objects.

    Returns:
        Broadcast function
    """
    if function is None:
        kwargs = {k: v for k, v in locals().items() if v is not None}
        return partial(broadcast, **kwargs)
    @wraps(function)
    def broadcast_(*args, **kwargs):
        return map_(function, *args, dims=dims, range=range, unwrap_scalars=unwrap_scalars, **kwargs)
    return broadcast_
def c2b(value)

Change the type of all channel dimensions of value to batch dimensions. See rename_dims().

Expand source code
def c2b(value):
    """ Change the type of all *channel* dimensions of `value` to *batch* dimensions. See `rename_dims`. """
    return rename_dims(value, channel, batch)
def c2d(value)

Change the type of all channel dimensions of value to dual dimensions. See rename_dims().

Expand source code
def c2d(value):
    """ Change the type of all *channel* dimensions of `value` to *dual* dimensions. See `rename_dims`. """
    return rename_dims(value, channel, dual)
def cast(x: ~MagicType, dtype: Union[phiml.backend._dtype.DType, type]) ‑> ~OtherMagicType

Casts x to a different data type.

Implementations:

See Also: to_float(), to_int32(), to_int64(), to_complex().

Args

x
Tensor
dtype
New data type as DType, e.g. DType(int, 16).

Returns

Tensor with data type dtype()

Expand source code
def cast(x: MagicType, dtype: Union[DType, type]) -> OtherMagicType:
    """
    Casts `x` to a different data type.

    Implementations:

    * NumPy: [`x.astype()`](numpy.ndarray.astype)
    * PyTorch: [`x.to()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.to)
    * TensorFlow: [`tf.cast`](https://www.tensorflow.org/api_docs/python/tf/cast)
    * Jax: [`jax.numpy.array`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.array.html)

    See Also:
        `to_float`, `to_int32`, `to_int64`, `to_complex`.

    Args:
        x: `Tensor`
        dtype: New data type as `phiml.math.DType`, e.g. `DType(int, 16)`.

    Returns:
        `Tensor` with data type `dtype`
    """
    if not isinstance(dtype, DType):
        dtype = DType.as_dtype(dtype)
    if hasattr(x, '__cast__'):
        return x.__cast__(dtype)
    elif isinstance(x, (Number, bool)):
        return dtype.kind(x)
    elif isinstance(x, PhiTreeNode):
        attrs = {key: getattr(x, key) for key in value_attributes(x)}
        new_attrs = {k: cast(v, dtype) for k, v in attrs.items()}
        return copy_with(x, **new_attrs)
    try:
        backend = choose_backend(x)
        return backend.cast(x, dtype)
    except NoBackendFound:
        if dtype.kind == bool:
            return bool(x)
        raise ValueError(f"Cannot cast object of type '{type(x).__name__}'")
def ceil(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes ⌈x⌉ of the Tensor or PhiTreeNode x.

Expand source code
def ceil(x: TensorOrTree) -> TensorOrTree:
    """ Computes *⌈x⌉* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.ceil)
def channel(*args, **dims: Union[int, str, tuple, list, phiml.math._shape.Shape, ForwardRef('Tensor')]) ‑> phiml.math._shape.Shape

Returns the channel dimensions of an existing Shape or creates a new Shape with only channel dimensions.

Usage for filtering channel dimensions:

>>> channel_dims = channel(shape)
>>> channel_dims = channel(tensor)

Usage for creating a Shape with only channel dimensions:

>>> channel_shape = channel('undef', vector=2)
(vector=2, undef=None)

Here, the dimension undef is created with an undefined size of None. Undefined sizes are automatically filled in by tensor(), wrap(), stack() and concat().

To create a shape with multiple types, use merge_shapes(), concat_shapes() or the syntax shape1 & shape2.

See Also: spatial(), batch(), instance()

Args

*args

Either

  • Shape or Tensor to filter or
  • Names of dimensions with undefined sizes as str.
**dims
Dimension sizes and names. Must be empty when used as a filter operation.

Returns

Shape containing only dimensions of type channel.

Expand source code
def channel(*args, **dims: Union[int, str, tuple, list, Shape, 'Tensor']) -> Shape:
    """
    Returns the channel dimensions of an existing `Shape` or creates a new `Shape` with only channel dimensions.

    Usage for filtering channel dimensions:
    >>> channel_dims = channel(shape)
    >>> channel_dims = channel(tensor)

    Usage for creating a `Shape` with only channel dimensions:
    >>> channel_shape = channel('undef', vector=2)
    (vector=2, undef=None)

    Here, the dimension `undef` is created with an undefined size of `None`.
    Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`.

    To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`.

    See Also:
        `spatial`, `batch`, `instance`

    Args:
        *args: Either

            * `Shape` or `Tensor` to filter or
            * Names of dimensions with undefined sizes as `str`.

        **dims: Dimension sizes and names. Must be empty when used as a filter operation.

    Returns:
        `Shape` containing only dimensions of type channel.
    """
    from .magic import Shaped
    if all(isinstance(arg, str) for arg in args) or dims:
        return _construct_shape(CHANNEL_DIM, *args, **dims)
    elif len(args) == 1 and isinstance(args[0], Shape):
        return args[0].channel
    elif len(args) == 1 and isinstance(args[0], Shaped):
        return shape(args[0]).channel
    else:
        raise AssertionError(f"channel() must be called either as a selector channel(Shape) or channel(Tensor) or as a constructor channel(*names, **dims). Got *args={args}, **dims={dims}")
def choose_backend(*values, prefer_default=False) ‑> phiml.backend._backend.Backend

Choose backend for given Tensor or native tensor values. Backends need to be registered to be available, e.g. via init() or set_global_default_backend().

Args

*values
Sequence of Tensors, native tensors or constants.
prefer_default
Whether to always select the default backend if it can work with values, see default_backend().

Returns

The selected phiml.math.backend.Backend

Expand source code
def choose_backend_t(*values, prefer_default=False) -> Backend:
    """
    Choose backend for given `Tensor` or native tensor values.
    Backends need to be registered to be available, e.g. via `init()` or `use()`.

    Args:
        *values: Sequence of `Tensor`s, native tensors or constants.
        prefer_default: Whether to always select the default backend if it can work with `values`, see `default_backend()`.

    Returns:
        The selected `phiml.math.backend.Backend`
    """
    natives = sum([v._natives() if isinstance(v, Tensor) else (v,) for v in values], ())
    return choose_backend(*natives, prefer_default=prefer_default)
def clip(x: phiml.math._tensors.Tensor, lower_limit: Union[float, phiml.math._tensors.Tensor], upper_limit: Union[float, phiml.math._tensors.Tensor])

Limits the values of the Tensor x to lie between lower_limit and upper_limit (inclusive).

Expand source code
def clip(x: Tensor, lower_limit: Union[float, Tensor], upper_limit: Union[float, Tensor]):
    """ Limits the values of the `Tensor` `x` to lie between `lower_limit` and `upper_limit` (inclusive). """
    if isinstance(lower_limit, Number) and isinstance(upper_limit, Number):

        def clip_(x):
            return x._op1(lambda native: choose_backend(native).clip(native, lower_limit, upper_limit))

        return broadcast_op(clip_, [x])
    else:
        return maximum(lower_limit, minimum(x, upper_limit))
def close(*tensors, rel_tolerance=1e-05, abs_tolerance=0, equal_nan=False) ‑> bool

Checks whether all tensors have equal values within the specified tolerance.

Does not check that the shapes exactly match. Unlike with always_close(), all shapes must be compatible and tensors with different shapes are reshaped before comparing.

See Also: always_close().

Args

*tensors
At least two Tensor or tensor-like objects or None. The shapes of all tensors must be compatible but not all tensors must have all dimensions. If any argument is None, returns True only if all are None.
rel_tolerance
Relative tolerance
abs_tolerance
Absolute tolerance
equal_nan
If True, tensors are considered close if they are NaN in the same places.

Returns

bool, whether all given tensors are equal to the first tensor within the specified tolerance.

Expand source code
def close(*tensors, rel_tolerance=1e-5, abs_tolerance=0, equal_nan=False) -> bool:
    """
    Checks whether all tensors have equal values within the specified tolerance.
    
    Does not check that the shapes exactly match.
    Unlike with `always_close()`, all shapes must be compatible and tensors with different shapes are reshaped before comparing.

    See Also:
        `always_close()`.

    Args:
        *tensors: At least two  `Tensor` or tensor-like objects or `None`.
            The shapes of all tensors must be compatible but not all tensors must have all dimensions.
            If any argument is `None`, returns `True` only if all are `None`.
        rel_tolerance: Relative tolerance
        abs_tolerance: Absolute tolerance
        equal_nan: If `True`, tensors are considered close if they are NaN in the same places.

    Returns:
        `bool`, whether all given tensors are equal to the first tensor within the specified tolerance.
    """
    if tensors[0] is None:
        return all(o is None for o in tensors)
    if any(o is None for o in tensors):
        return False
    if all(t is tensors[0] for t in tensors):
        return True
    tensors = [wrap(t) for t in tensors]
    for other in tensors[1:]:
        if not _close(tensors[0], other, rel_tolerance=rel_tolerance, abs_tolerance=abs_tolerance, equal_nan=equal_nan):
            return False
    return True
def closest_grid_values(grid: phiml.math._tensors.Tensor, coordinates: phiml.math._tensors.Tensor, extrap: e_.Extrapolation, stack_dim_prefix='closest_', **kwargs)

Finds the neighboring grid points in all directions and returns their values. The result will have 2^d values for each vector in coordinates in d dimensions.

If coordinates does not have a channel dimension with item names, the spatial dims of grid will be used.

Args

grid
grid data. The grid is spanned by the spatial dimensions of the tensor
coordinates
tensor with 1 channel dimension holding vectors pointing to locations in grid index space
extrap
grid extrapolation
stack_dim_prefix
For each spatial dimension dim, stacks lower and upper closest values along dimension stack_dim_prefix+dim.
kwargs
Additional information for the extrapolation.

Returns

Tensor of shape (batch, coord_spatial, grid_spatial=(2, 2,…), grid_channel)

Expand source code
def closest_grid_values(grid: Tensor,
                        coordinates: Tensor,
                        extrap: 'e_.Extrapolation',
                        stack_dim_prefix='closest_',
                        **kwargs):
    """
    Finds the neighboring grid points in all directions and returns their values.
    The result will have 2^d values for each vector in coordinates in d dimensions.

    If `coordinates` does not have a channel dimension with item names, the spatial dims of `grid` will be used.

    Args:
        grid: grid data. The grid is spanned by the spatial dimensions of the tensor
        coordinates: tensor with 1 channel dimension holding vectors pointing to locations in grid index space
        extrap: grid extrapolation
        stack_dim_prefix: For each spatial dimension `dim`, stacks lower and upper closest values along dimension `stack_dim_prefix+dim`.
        kwargs: Additional information for the extrapolation.

    Returns:
        `Tensor` of shape (batch, coord_spatial, grid_spatial=(2, 2,...), grid_channel)
    """
    return broadcast_op(functools.partial(_closest_grid_values, extrap=extrap, stack_dim_prefix=stack_dim_prefix, pad_kwargs=kwargs), [grid, coordinates])
def concat(values: Union[tuple, list], dim: Union[str, phiml.math._shape.Shape], expand_values=False, **kwargs)

Concatenates a sequence of Shapable objects, e.g. Tensor, along one dimension. All values must have the same spatial, instance and channel dimensions and their sizes must be equal, except for dim. Batch dimensions will be added as needed.

Args

values
Tuple or list of Shapable, such as Tensor
dim
Concatenation dimension, must be present in all values. The size along dim is determined from values and can be set to undefined (None).
expand_values
If True, will first add missing dimensions to all values, not just batch dimensions. This allows tensors with different dimensions to be concatenated. The resulting tensor will have all dimensions that are present in values.
**kwargs
Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the bounds: Box argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.

Returns

Concatenated Tensor

Examples

>>> concat([math.zeros(batch(b=10)), math.ones(batch(b=10))], 'b')
(bᵇ=20) 0.500 ± 0.500 (0e+00...1e+00)
>>> concat([vec(x=1, y=0), vec(z=2.)], 'vector')
(x=1.000, y=0.000, z=2.000) float64
Expand source code
def concat(values: Union[tuple, list], dim: Union[str, Shape], expand_values=False, **kwargs):
    """
    Concatenates a sequence of `phiml.math.magic.Shapable` objects, e.g. `Tensor`, along one dimension.
    All values must have the same spatial, instance and channel dimensions and their sizes must be equal, except for `dim`.
    Batch dimensions will be added as needed.

    Args:
        values: Tuple or list of `phiml.math.magic.Shapable`, such as `phiml.math.Tensor`
        dim: Concatenation dimension, must be present in all `values`.
            The size along `dim` is determined from `values` and can be set to undefined (`None`).
        expand_values: If `True`, will first add missing dimensions to all values, not just batch dimensions.
            This allows tensors with different dimensions to be concatenated.
            The resulting tensor will have all dimensions that are present in `values`.
        **kwargs: Additional keyword arguments required by specific implementations.
            Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
            Adding batch dimensions must always work without keyword arguments.

    Returns:
        Concatenated `Tensor`

    Examples:
        >>> concat([math.zeros(batch(b=10)), math.ones(batch(b=10))], 'b')
        (bᵇ=20) 0.500 ± 0.500 (0e+00...1e+00)

        >>> concat([vec(x=1, y=0), vec(z=2.)], 'vector')
        (x=1.000, y=0.000, z=2.000) float64
    """
    assert len(values) > 0, f"concat() got empty sequence {values}"
    if isinstance(dim, Shape):
        dim = dim.name
    assert isinstance(dim, str), f"dim must be a str or Shape but got '{dim}' of type {type(dim)}"
    dim = auto(dim, channel).name
    # Add missing dimensions
    if expand_values:
        all_dims = merge_shapes(*values, allow_varying_sizes=True)
        all_dims = all_dims.with_dim_size(dim, 1, keep_item_names=False)
        values = [expand(v, all_dims.without(shape(v))) for v in values]
    else:
        for v in values:
            assert dim in shape(v), f"concat dim '{dim}' must be present in the shapes of all values bot got value {type(v).__name__} with shape {shape(v)}"
        for v in values[1:]:
            assert set(non_batch(v).names) == set(non_batch(values[0]).names), f"Concatenated values must have the same non-batch dimensions but got {non_batch(values[0])} and {non_batch(v)}"
        all_batch_dims = merge_shapes(*[shape(v).batch for v in values])
        values = [expand(v, all_batch_dims) for v in values]
    # --- First try __concat__ ---
    for v in values:
        if isinstance(v, Shapable):
            if hasattr(v, '__concat__'):
                result = v.__concat__(values, dim, **kwargs)
                if result is not NotImplemented:
                    assert isinstance(result, Shapable), f"__concat__ must return a Shapable object but got {type(result).__name__} from {type(v).__name__} {v}"
                    return result
    # --- Next: try concat attributes for tree nodes ---
    if all(isinstance(v, PhiTreeNode) for v in values):
        attributes = all_attributes(values[0])
        if attributes and all(all_attributes(v) == attributes for v in values):
            new_attrs = {}
            for a in attributes:
                common_shape = merge_shapes(*[shape(getattr(v, a)).without(dim) for v in values])
                a_values = [expand(getattr(v, a), common_shape & shape(v).only(dim)) for v in values]  # expand by dim if missing, and dims of others
                new_attrs[a] = concat(a_values, dim, **kwargs)
            return copy_with(values[0], **new_attrs)
        else:
            warnings.warn(f"Failed to concat values using value attributes because attributes differ among values {values}")
    # --- Fallback: slice and stack ---
    try:
        unstacked = sum([unstack(v, dim) for v in values], ())
    except MagicNotImplemented:
        raise MagicNotImplemented(f"concat: No value implemented __concat__ and not all values were Sliceable along {dim}. values = {[type(v) for v in values]}")
    if len(unstacked) > 8:
        warnings.warn(f"concat() default implementation is slow on large dimensions ({dim}={len(unstacked)}). Please implement __concat__()", RuntimeWarning, stacklevel=2)
    dim = shape(values[0])[dim].with_size(None)
    try:
        return stack(unstacked, dim, **kwargs)
    except MagicNotImplemented:
        raise MagicNotImplemented(f"concat: No value implemented __concat__ and slices could not be stacked. values = {[type(v) for v in values]}")
def concat_shapes(*shapes: Union[phiml.math._shape.Shape, Any]) ‑> phiml.math._shape.Shape

Creates a Shape listing the dimensions of all shapes in the given order.

See Also: merge_shapes().

Args

*shapes
Shapes to concatenate. No two shapes must contain a dimension with the same name.

Returns

Combined Shape.

Expand source code
def concat_shapes(*shapes: Union[Shape, Any]) -> Shape:
    """
    Creates a `Shape` listing the dimensions of all `shapes` in the given order.

    See Also:
        `merge_shapes()`.

    Args:
        *shapes: Shapes to concatenate. No two shapes must contain a dimension with the same name.

    Returns:
        Combined `Shape`.
    """
    shapes = [obj if isinstance(obj, Shape) else shape(obj) for obj in shapes]
    names = sum([s.names for s in shapes], ())
    if len(set(names)) != len(names):
        raise IncompatibleShapes(f"Cannot concatenate shapes {list(shapes)}. Duplicate dimension names are not allowed.")
    sizes = sum([s.sizes for s in shapes], ())
    types = sum([s.types for s in shapes], ())
    item_names = sum([s.item_names for s in shapes], ())
    return Shape(sizes, names, types, item_names)
def conjugate(x: ~TensorOrTree) ‑> ~TensorOrTree

See Also: imag(), real().

Args

x
Real or complex Tensor or PhiTreeNode or native tensor.

Returns

Complex conjugate of x if x is complex, else x.

Expand source code
def conjugate(x: TensorOrTree) -> TensorOrTree:
    """
    See Also:
        `imag()`, `real()`.

    Args:
        x: Real or complex `Tensor` or `phiml.math.magic.PhiTreeNode` or native tensor.

    Returns:
        Complex conjugate of `x` if `x` is complex, else `x`.
    """
    return _backend_op1(x, Backend.conj)
def const_vec(value: Union[float, phiml.math._tensors.Tensor], dim: Union[phiml.math._shape.Shape, tuple, list, str])

Creates a single-dimension tensor with all values equal to value. value is not converted to the default backend, even when it is a Python primitive.

Args

value
Value for filling the vector.
dim
Either single-dimension non-spatial Shape or Shape consisting of any number of spatial dimensions. In the latter case, a new channel dimension named 'vector' will be created from the spatial shape.

Returns

Tensor

Expand source code
def const_vec(value: Union[float, Tensor], dim: Union[Shape, tuple, list, str]):
    """
    Creates a single-dimension tensor with all values equal to `value`.
    `value` is not converted to the default backend, even when it is a Python primitive.

    Args:
        value: Value for filling the vector.
        dim: Either single-dimension non-spatial Shape or `Shape` consisting of any number of spatial dimensions.
            In the latter case, a new channel dimension named `'vector'` will be created from the spatial shape.

    Returns:
        `Tensor`
    """
    if isinstance(dim, Shape):
        if dim.spatial:
            assert not dim.non_spatial, f"When creating a vector given spatial dimensions, the shape may only contain spatial dimensions but got {dim}"
            shape = channel(vector=dim.names)
        else:
            assert dim.rank == 1, f"Cannot create vector from {dim}"
            shape = dim
    else:
        dims = parse_dim_order(dim)
        shape = channel(vector=dims)
    return wrap([value] * shape.size, shape)
def convert(x, backend: phiml.backend._backend.Backend = None, use_dlpack=True)

Convert the native representation of a Tensor or PhiTreeNode to the native format of backend.

Warning: This operation breaks the automatic differentiation chain.

See Also: phiml.math.backend.convert().

Args

x
Tensor to convert. If x is a PhiTreeNode, its variable attributes are converted.
backend
Target backend. If None, uses the current default backend, see phiml.math.backend.default_backend().

Returns

Tensor with native representation belonging to backend.

Expand source code
def convert(x, backend: Backend = None, use_dlpack=True):
    """
    Convert the native representation of a `Tensor` or `phiml.math.magic.PhiTreeNode` to the native format of `backend`.

    *Warning*: This operation breaks the automatic differentiation chain.

    See Also:
        `phiml.math.backend.convert()`.

    Args:
        x: `Tensor` to convert. If `x` is a `phiml.math.magic.PhiTreeNode`, its variable attributes are converted.
        backend: Target backend. If `None`, uses the current default backend, see `phiml.math.backend.default_backend()`.

    Returns:
        `Tensor` with native representation belonging to `backend`.
    """
    if isinstance(x, Tensor):
        return x._op1(lambda native: b_convert(native, backend, use_dlpack=use_dlpack))
    elif isinstance(x, PhiTreeNode):
        return tree_map(convert, x, backend=backend, use_dlpack=use_dlpack)
    else:
        return b_convert(x, backend, use_dlpack=use_dlpack)
def convolve(value: phiml.math._tensors.Tensor, kernel: phiml.math._tensors.Tensor, extrapolation: e_.Extrapolation = None) ‑> phiml.math._tensors.Tensor

Computes the convolution of value and kernel along the spatial axes of kernel.

The channel dimensions of value are reduced against the equally named dimensions of kernel. The result will have the non-reduced channel dimensions of kernel.

Args

value
Tensor whose shape includes all spatial dimensions of kernel.
kernel
Tensor used as convolutional filter.
extrapolation
If not None, pads value so that the result has the same shape as value.

Returns

Tensor

Expand source code
def convolve(value: Tensor,
             kernel: Tensor,
             extrapolation: 'e_.Extrapolation' = None) -> Tensor:
    """
    Computes the convolution of `value` and `kernel` along the spatial axes of `kernel`.

    The channel dimensions of `value` are reduced against the equally named dimensions of `kernel`.
    The result will have the non-reduced channel dimensions of `kernel`.

    Args:
        value: `Tensor` whose shape includes all spatial dimensions of `kernel`.
        kernel: `Tensor` used as convolutional filter.
        extrapolation: If not None, pads `value` so that the result has the same shape as `value`.

    Returns:
        `Tensor`
    """
    assert all(dim in value.shape for dim in kernel.shape.spatial.names), f"Value must have all spatial dimensions of kernel but got value {value} kernel {kernel}"
    conv_shape = kernel.shape.spatial
    in_channels = value.shape.channel
    out_channels = kernel.shape.channel.without(in_channels)
    batch = value.shape.batch & kernel.shape.batch
    if extrapolation is not None and extrapolation != e_.ZERO:
        value = pad(value, {dim: (kernel.shape.get_size(dim) // 2, (kernel.shape.get_size(dim) - 1) // 2) for dim in conv_shape.names}, extrapolation)
    native_kernel = reshaped_native(kernel, (batch, out_channels, in_channels, *conv_shape.names), force_expand=in_channels)
    native_value = reshaped_native(value, (batch, in_channels, *conv_shape.names), force_expand=batch)
    backend = choose_backend(native_value, native_kernel)
    native_result = backend.conv(native_value, native_kernel, zero_padding=extrapolation == e_.ZERO)
    result = reshaped_tensor(native_result, (batch, out_channels, *conv_shape))
    return result
def copy(value: phiml.math._tensors.Tensor)

Copies the data buffer and encapsulating Tensor object.

Args

value
Tensor to be copied.

Returns

Copy of value.

Expand source code
def copy(value: Tensor):
    """
    Copies the data buffer and encapsulating `Tensor` object.

    Args:
        value: `Tensor` to be copied.

    Returns:
        Copy of `value`.
    """
    if value._is_tracer:
        warnings.warn("Tracing tensors cannot be copied.", RuntimeWarning)
        return value
    return value._op1(lambda native: choose_backend(native).copy(native))
def copy_with(obj: ~PhiTreeNodeType, **updates) ‑> ~PhiTreeNodeType

Creates a copy of the given PhiTreeNode with updated values as specified in updates.

If obj overrides __with_attrs__, the copy will be created via that specific implementation. Otherwise, the copy() module and setattr will be used.

Args

obj
PhiTreeNode
**updates
Values to be replaced.

Returns

Copy of obj with updated values.

Expand source code
def replace(obj: PhiTreeNodeType, **updates) -> PhiTreeNodeType:
    """
    Creates a copy of the given `phiml.math.magic.PhiTreeNode` with updated values as specified in `updates`.

    If `obj` overrides `__with_attrs__`, the copy will be created via that specific implementation.
    Otherwise, the `copy` module and `setattr` will be used.

    Args:
        obj: `phiml.math.magic.PhiTreeNode`
        **updates: Values to be replaced.

    Returns:
        Copy of `obj` with updated values.
    """
    if hasattr(obj, '__with_attrs__'):
        result = obj.__with_attrs__(**updates)
        if result is not NotImplemented:
            return result
    elif isinstance(obj, (Number, bool)):
        return obj
    if dataclasses.is_dataclass(obj):
        return dataclasses.replace(obj, **updates)
    else:
        cpy = copy.copy(obj)
        for attr, value in updates.items():
            setattr(cpy, attr, value)
        return cpy
def cos(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes cos(x) of the Tensor or PhiTreeNode x.

Expand source code
def cos(x: TensorOrTree) -> TensorOrTree:
    """ Computes *cos(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.cos)
def cosh(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes cosh(x) of the Tensor or PhiTreeNode x.

Expand source code
def cosh(x: TensorOrTree) -> TensorOrTree:
    """ Computes *cosh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.cosh)
def cross_product(vec1: phiml.math._tensors.Tensor, vec2: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor

Computes the cross product of two vectors in 2D.

Args

vec1
Tensor with a single channel dimension called 'vector'
vec2
Tensor with a single channel dimension called 'vector'

Returns

Tensor

Expand source code
def cross_product(vec1: Tensor, vec2: Tensor) -> Tensor:
    """
    Computes the cross product of two vectors in 2D.

    Args:
        vec1: `Tensor` with a single channel dimension called `'vector'`
        vec2: `Tensor` with a single channel dimension called `'vector'`

    Returns:
        `Tensor`
    """
    vec1 = math.tensor(vec1)
    vec2 = math.tensor(vec2)
    spatial_rank = vec1.vector.size if 'vector' in vec1.shape else vec2.vector.size
    if spatial_rank == 2:  # Curl in 2D
        assert vec2.vector.exists
        if vec1.vector.exists:
            v1_x, v1_y = vec1.vector
            v2_x, v2_y = vec2.vector
            return v1_x * v2_y - v1_y * v2_x
        else:
            v2_x, v2_y = vec2.vector
            return vec1 * math.stack_tensors([-v2_y, v2_x], channel(vec2))
    elif spatial_rank == 3:  # Curl in 3D
        assert vec1.vector.exists and vec2.vector.exists, f"Both vectors must have a 'vector' dimension but got shapes {vec1.shape}, {vec2.shape}"
        v1_x, v1_y, v1_z = vec1.vector
        v2_x, v2_y, v2_z = vec2.vector
        return math.stack([
            v1_y * v2_z - v1_z * v2_y,
            v1_z * v2_x - v1_x * v2_z,
            v1_x * v2_y - v1_y * v2_x,
        ], vec1.shape['vector'])
    else:
        raise AssertionError(f'dims = {spatial_rank}. Vector product not available in > 3 dimensions')
def cumulative_sum(x: phiml.math._tensors.Tensor, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable])

Performs a cumulative sum of x along dim.

Implementations:

Args

x
Tensor
dim
Dimension along which to sum, as str or Shape.

Returns

Tensor with the same shape as x.

Expand source code
def cumulative_sum(x: Tensor, dim: DimFilter):
    """
    Performs a cumulative sum of `x` along `dim`.

    Implementations:

    * NumPy: [`cumsum`](https://numpy.org/doc/stable/reference/generated/numpy.cumsum.html)
    * PyTorch: [`cumsum`](https://pytorch.org/docs/stable/generated/torch.cumsum.html)
    * TensorFlow: [`cumsum`](https://www.tensorflow.org/api_docs/python/tf/math/cumsum)
    * Jax: [`cumsum`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.cumsum.html)

    Args:
        x: `Tensor`
        dim: Dimension along which to sum, as `str` or `Shape`.

    Returns:
        `Tensor` with the same shape as `x`.
    """
    dim = x.shape.only(dim)
    assert len(dim) == 1, f"dim must be a single dimension but got {dim}"
    native_x = x.native(x.shape)
    native_result = choose_backend(native_x).cumsum(native_x, x.shape.index(dim))
    return NativeTensor(native_result, x.shape)
def custom_gradient(f: Callable, gradient: Callable, auxiliary_args: str = '')

Creates a function based on f that uses a custom gradient for the backpropagation pass.

Warning This method can lead to memory leaks if the gradient function is not called. Make sure to pass tensors without gradients if the gradient is not required, see stop_gradient().

Args

f
Forward function mapping Tensor arguments x to a single Tensor output or sequence of tensors y.
gradient
Function to compute the vector-Jacobian product for backpropagation. Will be called as gradient(input_dict, *y, *dy) -> output_dict where input_dict contains all named arguments passed to the forward function and output_dict contains only those parameters for which a gradient is defined.
auxiliary_args
Comma-separated parameter names of arguments that are not relevant to backpropagation.

Returns

Function with similar signature and return values as f. However, the returned function does not support keyword arguments.

Expand source code
def custom_gradient(f: Callable, gradient: Callable, auxiliary_args: str = ''):
    """
    Creates a function based on `f` that uses a custom gradient for the backpropagation pass.

    *Warning* This method can lead to memory leaks if the gradient function is not called.
    Make sure to pass tensors without gradients if the gradient is not required, see `stop_gradient()`.

    Args:
        f: Forward function mapping `Tensor` arguments `x` to a single `Tensor` output or sequence of tensors `y`.
        gradient: Function to compute the vector-Jacobian product for backpropagation.
            Will be called as `gradient(input_dict, *y, *dy) -> output_dict` where `input_dict` contains all named arguments passed to the forward function
            and `output_dict` contains only those parameters for which a gradient is defined.
        auxiliary_args: Comma-separated parameter names of arguments that are not relevant to backpropagation.

    Returns:
        Function with similar signature and return values as `f`. However, the returned function does not support keyword arguments.
    """
    auxiliary_args = set(s.strip() for s in auxiliary_args.split(',') if s.strip())
    return CustomGradientFunction(f, gradient, auxiliary_args)
def degrees_to_radians(deg: ~TensorOrTree) ‑> ~TensorOrTree

Convert degrees to radians.

Expand source code
def degrees_to_radians(deg: TensorOrTree) -> TensorOrTree:
    """ Convert degrees to radians. """
    return tree_map(lambda x: x * (3.14159265358979323846 / 180), deg)
def dense(x: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor

Convert a sparse tensor representation to an equivalent dense one in which all values are explicitly stored contiguously in memory.

Args

x
Any Tensor. Python primitives like float, int or bool will be converted to Tensors in the process.

Returns

Dense tensor.

Expand source code
def dense(x: Tensor) -> Tensor:
    """
    Convert a sparse tensor representation to an equivalent dense one in which all values are explicitly stored contiguously in memory.

    Args:
        x: Any `Tensor`.
            Python primitives like `float`, `int` or `bool` will be converted to `Tensors` in the process.

    Returns:
        Dense tensor.
    """
    from . import reshaped_tensor
    if isinstance(x, SparseCoordinateTensor):
        from ._ops import scatter
        return scatter(x.shape, x._indices, x._values, mode='add', outside_handling='undefined')
    elif isinstance(x, CompressedSparseMatrix):
        ind_batch, channels, native_indices, native_pointers, native_values, native_shape = x._native_csr_components()
        native_dense = x.default_backend.csr_to_dense(native_indices, native_pointers, native_values, native_shape)
        return reshaped_tensor(native_dense, [ind_batch, x._compressed_dims, x._uncompressed_dims, channels])
    elif isinstance(x, NativeTensor):
        return x
    elif isinstance(x, Tensor):
        return cached(x)
    elif isinstance(x, (Number, bool)):
        return wrap(x)
def dim_mask(all_dims: Union[phiml.math._shape.Shape, tuple, list], dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], mask_dim=(vectorᶜ=None)) ‑> phiml.math._tensors.Tensor

Creates a masked vector with 1 elements for dims and 0 for all other dimensions in all_dims.

Args

all_dims
All dimensions for which the vector should have an entry.
dims
Dimensions marked as 1.
mask_dim
Dimension of the masked vector. Item names are assigned automatically.

Returns

Tensor

Expand source code
def dim_mask(all_dims: Union[Shape, tuple, list], dims: DimFilter, mask_dim=channel('vector')) -> Tensor:
    """
    Creates a masked vector with 1 elements for `dims` and 0 for all other dimensions in `all_dims`.

    Args:
        all_dims: All dimensions for which the vector should have an entry.
        dims: Dimensions marked as 1.
        mask_dim: Dimension of the masked vector. Item names are assigned automatically.

    Returns:
        `Tensor`
    """
    assert isinstance(all_dims, (Shape, tuple, list)), f"all_dims must be a tuple or Shape but got {type(all_dims)}"
    assert isinstance(mask_dim, Shape) and mask_dim.rank == 1, f"mask_dim must be a single-dimension Shape but got {mask_dim}"
    if isinstance(all_dims, (tuple, list)):
        all_dims = spatial(*all_dims)
    dims = all_dims.only(dims)
    mask = [1 if dim in dims else 0 for dim in all_dims]
    mask_dim = mask_dim.with_size(all_dims.names)
    return wrap(mask, mask_dim)
def dot(x: phiml.math._tensors.Tensor, x_dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], y: phiml.math._tensors.Tensor, y_dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable]) ‑> phiml.math._tensors.Tensor

Computes the dot product along the specified dimensions. Contracts x_dims with y_dims by first multiplying the elements and then summing them up.

For one dimension, this is equal to matrix-matrix or matrix-vector multiplication.

The function replaces the traditional dot() / tensordot / matmul / einsum functions.

Args

x
First Tensor
x_dims
Dimensions of x to reduce against y
y
Second Tensor
y_dims
Dimensions of y to reduce against x.

Returns

Dot product as Tensor.

Expand source code
def dot(x: Tensor,
        x_dims: DimFilter,
        y: Tensor,
        y_dims: DimFilter) -> Tensor:
    """
    Computes the dot product along the specified dimensions.
    Contracts `x_dims` with `y_dims` by first multiplying the elements and then summing them up.

    For one dimension, this is equal to matrix-matrix or matrix-vector multiplication.

    The function replaces the traditional `dot` / `tensordot` / `matmul` / `einsum` functions.

    * NumPy: [`numpy.tensordot`](https://numpy.org/doc/stable/reference/generated/numpy.tensordot.html), [`numpy.einsum`](https://numpy.org/doc/stable/reference/generated/numpy.einsum.html)
    * PyTorch: [`torch.tensordot`](https://pytorch.org/docs/stable/generated/torch.tensordot.html#torch.tensordot), [`torch.einsum`](https://pytorch.org/docs/stable/generated/torch.einsum.html)
    * TensorFlow: [`tf.tensordot`](https://www.tensorflow.org/api_docs/python/tf/tensordot), [`tf.einsum`](https://www.tensorflow.org/api_docs/python/tf/einsum)
    * Jax: [`jax.numpy.tensordot`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.tensordot.html), [`jax.numpy.einsum`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.einsum.html)

    Args:
        x: First `Tensor`
        x_dims: Dimensions of `x` to reduce against `y`
        y: Second `Tensor`
        y_dims: Dimensions of `y` to reduce against `x`.

    Returns:
        Dot product as `Tensor`.
    """
    x_dims = x.shape.only(x_dims)
    y_dims = y.shape.only(y_dims)
    if not x_dims:
        return x * sum_(y, y_dims)
    if not y_dims:
        return sum_(x, x_dims) * y

    def tensor_dot(x, y):
        if is_sparse(x) or is_sparse(y):
            if x_dims.isdisjoint(sparse_dims(x)) and y_dims.isdisjoint(sparse_dims(y)):
                if is_sparse(x):
                    return x._op2(y, lambda vx, vy: dot(vx, x_dims, vy, y_dims), None, 'dot', '@')
                else:
                    return y._op2(x, lambda vy, vx: dot(vx, x_dims, vy, y_dims), None, 'dot', '@')
            else:
                return sparse_dot(x, x_dims, y, y_dims)
        if x._is_tracer:
            return x._matmul(x_dims, y, y_dims)
        if y._is_tracer:
            return y._matmul(y_dims, x, x_dims)
        x_native = x.native(x.shape)
        y_native = y.native(y.shape)
        backend = choose_backend(x_native, y_native)
        remaining_shape_x = x.shape.without(x_dims)
        remaining_shape_y = y.shape.without(y_dims)
        assert x_dims.volume == y_dims.volume, f"Failed to reduce {x_dims} against {y_dims} in dot product of {x.shape} and {y.shape}. Sizes do not match."
        if remaining_shape_y.isdisjoint(remaining_shape_x):  # no shared batch dimensions -> tensordot
            result_native = backend.tensordot(x_native, x.shape.indices(x_dims), y_native, y.shape.indices(y_dims))
            result_shape = concat_shapes(remaining_shape_x, remaining_shape_y)
        else:  # shared batch dimensions -> einsum
            result_shape = merge_shapes(x.shape.without(x_dims), y.shape.without(y_dims))
            REDUCE_LETTERS = list('ijklmn')
            KEEP_LETTERS = list('abcdefgh')
            x_letters = [(REDUCE_LETTERS if dim in x_dims else KEEP_LETTERS).pop(0) for dim in x.shape.names]
            letter_map = {dim: letter for dim, letter in zip(x.shape.names, x_letters)}
            REDUCE_LETTERS = list('ijklmn')
            y_letters = []
            for dim in y.shape.names:
                if dim in y_dims:
                    y_letters.append(REDUCE_LETTERS.pop(0))
                else:
                    if dim in x.shape and dim not in x_dims:
                        y_letters.append(letter_map[dim])
                    else:
                        next_letter = KEEP_LETTERS.pop(0)
                        letter_map[dim] = next_letter
                        y_letters.append(next_letter)
            keep_letters = [letter_map[dim] for dim in result_shape.names]
            subscripts = f'{"".join(x_letters)},{"".join(y_letters)}->{"".join(keep_letters)}'
            result_native = backend.einsum(subscripts, x_native, y_native)
        return NativeTensor(result_native, result_shape)

    return broadcast_op(tensor_dot, [x, y])
def downsample2x(grid: phiml.math._tensors.Tensor, padding: Extrapolation = zero-gradient, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>) ‑> phiml.math._tensors.Tensor

Resamples a regular grid to half the number of spatial sample points per dimension. The grid values at the new points are determined via mean (linear interpolation).

Args

grid
full size grid
padding
grid extrapolation. Used to insert an additional value for odd spatial dims
dims
dims along which down-sampling is applied. If None, down-sample along all spatial dims.
grid
Tensor:
padding
Extrapolation: (Default value = extrapolation.BOUNDARY)
dims
tuple or None: (Default value = None)

Returns

half-size grid

Expand source code
def downsample2x(grid: Tensor,
                 padding: Extrapolation = extrapolation.BOUNDARY,
                 dims: DimFilter = spatial) -> Tensor:
    """
    Resamples a regular grid to half the number of spatial sample points per dimension.
    The grid values at the new points are determined via mean (linear interpolation).

    Args:
      grid: full size grid
      padding: grid extrapolation. Used to insert an additional value for odd spatial dims
      dims: dims along which down-sampling is applied. If None, down-sample along all spatial dims.
      grid: Tensor: 
      padding: Extrapolation:  (Default value = extrapolation.BOUNDARY)
      dims: tuple or None:  (Default value = None)

    Returns:
      half-size grid

    """
    dims = grid.shape.only(dims).names
    odd_dimensions = [dim for dim in dims if grid.shape.get_size(dim) % 2 != 0]
    grid = math.pad(grid, {dim: (0, 1) for dim in odd_dimensions}, padding)
    for dim in dims:
        grid = (grid[{dim: slice(1, None, 2)}] + grid[{dim: slice(0, None, 2)}]) / 2
    return grid
def dtype(x) ‑> phiml.backend._dtype.DType

Returns the data type of x.

Args

x
Tensor or native tensor.

Returns

DType

Expand source code
def dtype(x) -> DType:
    """
    Returns the data type of `x`.

    Args:
        x: `Tensor` or native tensor.

    Returns:
        `DType`
    """
    if isinstance(x, Tensor):
        return x.dtype
    else:
        return choose_backend(x).dtype(x)
def dual(*args, **dims: Union[int, str, tuple, list, phiml.math._shape.Shape, ForwardRef('Tensor')]) ‑> phiml.math._shape.Shape

Returns the dual dimensions of an existing Shape or creates a new Shape with only dual dimensions.

Dual dimensions are assigned the prefix ~ to distinguish them from regular dimensions. This way, a regular and dual dimension of the same name can exist in one Shape.

Dual dimensions represent the input space and are typically only present on matrices or higher-order matrices. Dual dimensions behave like batch dimensions in regular operations, if supported. During matrix multiplication, they are matched against their regular counterparts by name (ignoring the ~ prefix).

Usage for filtering dual dimensions:

>>> dual_dims = dual(shape)
>>> dual_dims = dual(tensor)

Usage for creating a Shape with only dual dimensions:

>>> dual('undef', points=2)
(~undefᵈ=None, ~pointsᵈ=2)

Here, the dimension undef is created with an undefined size of None. Undefined sizes are automatically filled in by tensor(), wrap(), stack() and concat().

To create a shape with multiple types, use merge_shapes(), concat_shapes() or the syntax shape1 & shape2.

See Also: channel(), batch(), spatial()

Args

*args

Either

  • Shape or Tensor to filter or
  • Names of dimensions with undefined sizes as str.
**dims
Dimension sizes and names. Must be empty when used as a filter operation.

Returns

Shape containing only dimensions of type dual.

Expand source code
def dual(*args, **dims: Union[int, str, tuple, list, Shape, 'Tensor']) -> Shape:
    """
    Returns the dual dimensions of an existing `Shape` or creates a new `Shape` with only dual dimensions.

    Dual dimensions are assigned the prefix `~` to distinguish them from regular dimensions.
    This way, a regular and dual dimension of the same name can exist in one `Shape`.

    Dual dimensions represent the input space and are typically only present on matrices or higher-order matrices.
    Dual dimensions behave like batch dimensions in regular operations, if supported.
    During matrix multiplication, they are matched against their regular counterparts by name (ignoring the `~` prefix).

    Usage for filtering dual dimensions:

    >>> dual_dims = dual(shape)
    >>> dual_dims = dual(tensor)

    Usage for creating a `Shape` with only dual dimensions:

    >>> dual('undef', points=2)
    (~undefᵈ=None, ~pointsᵈ=2)

    Here, the dimension `undef` is created with an undefined size of `None`.
    Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`.

    To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`.

    See Also:
        `channel`, `batch`, `spatial`

    Args:
        *args: Either

            * `Shape` or `Tensor` to filter or
            * Names of dimensions with undefined sizes as `str`.

        **dims: Dimension sizes and names. Must be empty when used as a filter operation.

    Returns:
        `Shape` containing only dimensions of type dual.
    """
    from .magic import Shaped
    if all(isinstance(arg, str) for arg in args) or dims:
        return _construct_shape(DUAL_DIM, *args, **dims)
    elif len(args) == 1 and isinstance(args[0], Shape):
        return args[0].dual
    elif len(args) == 1 and isinstance(args[0], Shaped):
        return shape(args[0]).dual
    else:
        raise AssertionError(f"dual() must be called either as a selector dual(Shape) or dual(Tensor) or as a constructor dual(*names, **dims). Got *args={args}, **dims={dims}")
def enable_debug_checks()

Once called, additional type checks are enabled. This may result in a noticeable drop in performance.

Expand source code
def enable_debug_checks():
    """
    Once called, additional type checks are enabled.
    This may result in a noticeable drop in performance.
    """
    DEBUG_CHECKS.append(True)
def equal(*objects, equal_nan=False) ‑> bool

Checks whether all objects are equal.

See Also: close(), always_close().

Args

*objects
Objects to compare. Can be tensors or other objects or None
equal_nan
If all objects are tensor-like, whether to count NaN values as equal.

Returns

bool, whether all given objects are equal to the first one.

Expand source code
def equal(*objects, equal_nan=False) -> bool:
    """
    Checks whether all objects are equal.

    See Also:
        `close()`, `always_close()`.

    Args:
        *objects: Objects to compare. Can be tensors or other objects or `None`
        equal_nan: If all objects are tensor-like, whether to count `NaN` values as equal.

    Returns:
        `bool`, whether all given objects are equal to the first one.
    """
    if objects[0] is None:
        return all(o is None for o in objects)
    if any(o is None for o in objects):
        return False
    if all(o is objects[0] for o in objects):
        return True
    try:
        tensors = [wrap(o) for o in objects]
        if any(t.dtype.kind == object for t in tensors):
            raise ValueError
    except ValueError:  # not all are tensor-like
        return all(o == objects[0] for o in objects)
    return close(*tensors, rel_tolerance=0, abs_tolerance=0, equal_nan=equal_nan)
def erf(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes the error function erf(x) of the Tensor or PhiTreeNode x.

Expand source code
def erf(x: TensorOrTree) -> TensorOrTree:
    """ Computes the error function *erf(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.erf)
def exp(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes exp(x) of the Tensor or PhiTreeNode x.

Expand source code
def exp(x: TensorOrTree) -> TensorOrTree:
    """ Computes *exp(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.exp)
def expand(value, *dims: phiml.math._shape.Shape, **kwargs)

Adds dimensions to a Tensor or tensor-like object by implicitly repeating the tensor values along the new dimensions. If value already contains any of the new dimensions, a size and type check is performed for these instead.

If any of dims varies along a dimension that is present neither in value nor on dims, it will also be added to value.

This function replaces the usual tile / repeat functions of NumPy, PyTorch, TensorFlow and Jax.

Additionally, it replaces the traditional unsqueeze / expand_dims functions.

Args

value
Shapable, such as Tensor For tree nodes, expands all value attributes by dims or the first variable attribute if no value attributes are set.
*dims
Dimensions to be added as Shape
**kwargs
Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the bounds: Box argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.

Returns

Same type as value.

Expand source code
def expand(value, *dims: Shape, **kwargs):
    """
    Adds dimensions to a `Tensor` or tensor-like object by implicitly repeating the tensor values along the new dimensions.
    If `value` already contains any of the new dimensions, a size and type check is performed for these instead.

    If any of `dims` varies along a dimension that is present neither in `value` nor on `dims`, it will also be added to `value`.

    This function replaces the usual `tile` / `repeat` functions of
    [NumPy](https://numpy.org/doc/stable/reference/generated/numpy.tile.html),
    [PyTorch](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.repeat),
    [TensorFlow](https://www.tensorflow.org/api_docs/python/tf/tile) and
    [Jax](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.tile.html).

    Additionally, it replaces the traditional `unsqueeze` / `expand_dims` functions.

    Args:
        value: `phiml.math.magic.Shapable`, such as `phiml.math.Tensor`
            For tree nodes, expands all value attributes by `dims` or the first variable attribute if no value attributes are set.
        *dims: Dimensions to be added as `Shape`
        **kwargs: Additional keyword arguments required by specific implementations.
            Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
            Adding batch dimensions must always work without keyword arguments.

    Returns:
        Same type as `value`.
    """
    dims = concat_shapes(*dims)
    combined = merge_shapes(value, dims)  # check that existing sizes match
    if not dims.without(shape(value)):  # no new dims to add
        if set(dims) == set(shape(value).only(dims)):  # sizes and item names might differ, though
            return value
    dims &= combined.shape.without('dims')  # add missing non-uniform dims
    # --- First try __expand__
    if hasattr(value, '__expand__'):
        result = value.__expand__(dims, **kwargs)
        if result is not NotImplemented:
            return result
    # --- Next try Tree Node ---
    if isinstance(value, PhiTreeNode):
        attributes = value_attributes(value) if hasattr(value, '__value_attrs__') else [variable_attributes(value)[0]]
        new_attributes = {a: expand(getattr(value, a), dims, **kwargs) for a in attributes}
        return copy_with(value, **new_attributes)
    # --- Fallback: stack ---
    if hasattr(value, '__stack__'):
        if dims.volume > 8:
            warnings.warn(f"expand() default implementation is slow on large shapes {dims}. Please implement __expand__() for {type(value).__name__} as defined in phiml.math.magic", RuntimeWarning, stacklevel=2)
        for dim in reversed(dims):
            value = stack((value,) * dim.size, dim, **kwargs)
            assert value is not NotImplemented, "Value must implement either __expand__ or __stack__"
        return value
    try:  # value may be a native scalar
        from ._tensors import expand_tensor, wrap
        value = wrap(value)
    except ValueError:
        raise AssertionError(f"Cannot expand non-shapable object {type(value)}")
    return expand_tensor(value, dims)
def factor_ilu(matrix: phiml.math._tensors.Tensor, iterations: int, safe=False)

Incomplete LU factorization for dense or sparse matrices.

For sparse matrices, keeps the sparsity pattern of matrix. L and U will be trimmed to the respective areas, i.e. stored upper elements in L will be dropped, unless this would lead to varying numbers of stored elements along a batch dimension.

Args

matrix
Dense or sparse matrix to factor. Currently, compressed sparse matrices are decompressed before running the ILU algorithm.
iterations
(Optional) Number of fixed-point iterations to perform. If not given, will be automatically determined from matrix size and sparsity.
safe
If False (default), only matrices with a rank deficiency of up to 1 can be factored as all values of L and U are uniquely determined. For matrices with higher rank deficiencies, the result includes NaN values. If True, the algorithm runs slightly slower but can factor highly rank-deficient matrices as well. However, then L is undeterdetermined and unused values of L are set to 0. Rank deficiencies of 1 occur frequently in periodic settings but higher ones are rare.

Returns

L
Lower-triangular matrix as Tensor with all diagonal elements equal to 1.
U
Upper-triangular matrix as Tensor.

Examples

>>> matrix = wrap([[-2, 1, 0],
>>>                [1, -2, 1],
>>>                [0, 1, -2]], channel('row'), dual('col'))
>>> L, U = math.factor_ilu(matrix)
>>> math.print(L)
row=0      1.          0.          0.         along ~col
row=1     -0.5         1.          0.         along ~col
row=2      0.         -0.6666667   1.         along ~col
>>> math.print(L @ U, "L @ U")
            L @ U
row=0     -2.   1.   0.  along ~col
row=1      1.  -2.   1.  along ~col
row=2      0.   1.  -2.  along ~col
Expand source code
def factor_ilu(matrix: Tensor, iterations: int, safe=False):
    """
    Incomplete LU factorization for dense or sparse matrices.

    For sparse matrices, keeps the sparsity pattern of `matrix`.
    L and U will be trimmed to the respective areas, i.e. stored upper elements in L will be dropped,
     unless this would lead to varying numbers of stored elements along a batch dimension.

    Args:
        matrix: Dense or sparse matrix to factor.
            Currently, compressed sparse matrices are decompressed before running the ILU algorithm.
        iterations: (Optional) Number of fixed-point iterations to perform.
            If not given, will be automatically determined from matrix size and sparsity.
        safe: If `False` (default), only matrices with a rank deficiency of up to 1 can be factored as all values of L and U are uniquely determined.
            For matrices with higher rank deficiencies, the result includes `NaN` values.
            If `True`, the algorithm runs slightly slower but can factor highly rank-deficient matrices as well.
            However, then L is undeterdetermined and unused values of L are set to 0.
            Rank deficiencies of 1 occur frequently in periodic settings but higher ones are rare.

    Returns:
        L: Lower-triangular matrix as `Tensor` with all diagonal elements equal to 1.
        U: Upper-triangular matrix as `Tensor`.

    Examples:
        >>> matrix = wrap([[-2, 1, 0],
        >>>                [1, -2, 1],
        >>>                [0, 1, -2]], channel('row'), dual('col'))
        >>> L, U = math.factor_ilu(matrix)
        >>> math.print(L)
        row=0      1.          0.          0.         along ~col
        row=1     -0.5         1.          0.         along ~col
        row=2      0.         -0.6666667   1.         along ~col
        >>> math.print(L @ U, "L @ U")
                    L @ U
        row=0     -2.   1.   0.  along ~col
        row=1      1.  -2.   1.  along ~col
        row=2      0.   1.  -2.  along ~col
    """
    if isinstance(matrix, CompressedSparseMatrix):
        matrix = matrix.decompress()
    if isinstance(matrix, SparseCoordinateTensor):
        ind_batch, channels, indices, values, shape = matrix._native_coo_components(dual, matrix=True)
        (l_idx_nat, l_val_nat), (u_idx_nat, u_val_nat) = incomplete_lu_coo(indices, values, shape, iterations, safe)
        col_dims = matrix._shape.only(dual)
        row_dims = matrix._dense_shape.without(col_dims)
        l_indices = matrix._unpack_indices(l_idx_nat[..., 0], l_idx_nat[..., 1], row_dims, col_dims, ind_batch)
        u_indices = matrix._unpack_indices(u_idx_nat[..., 0], u_idx_nat[..., 1], row_dims, col_dims, ind_batch)
        l_values = reshaped_tensor(l_val_nat, [ind_batch, instance(matrix._values), channels], convert=False)
        u_values = reshaped_tensor(u_val_nat, [ind_batch, instance(matrix._values), channels], convert=False)
        lower = SparseCoordinateTensor(l_indices, l_values, matrix._dense_shape, matrix._can_contain_double_entries, matrix._indices_sorted, matrix._indices_constant)
        upper = SparseCoordinateTensor(u_indices, u_values, matrix._dense_shape, matrix._can_contain_double_entries, matrix._indices_sorted, matrix._indices_constant)
    else:  # dense matrix
        native_matrix = reshaped_native(matrix, [batch, non_batch(matrix).non_dual, dual, EMPTY_SHAPE])
        l_native, u_native = incomplete_lu_dense(native_matrix, iterations, safe)
        lower = reshaped_tensor(l_native, [batch(matrix), non_batch(matrix).non_dual, dual(matrix), EMPTY_SHAPE])
        upper = reshaped_tensor(u_native, [batch(matrix), non_batch(matrix).non_dual, dual(matrix), EMPTY_SHAPE])
    return lower, upper
def factorial(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes factorial(x) of the Tensor or PhiTreeNode x. For floating-point numbers computes the continuous factorial using the gamma function. For integer numbers computes the exact factorial and returns the same integer type. However, this results in integer overflow for inputs larger than 12 (int32) or 19 (int64).

Expand source code
def factorial(x: TensorOrTree) -> TensorOrTree:
    """
    Computes *factorial(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`.
    For floating-point numbers computes the continuous factorial using the gamma function.
    For integer numbers computes the exact factorial and returns the same integer type.
    However, this results in integer overflow for inputs larger than 12 (int32) or 19 (int64).
    """
    return _backend_op1(x, Backend.factorial)
def fft(x: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>) ‑> phiml.math._tensors.Tensor

Performs a fast Fourier transform (FFT) on all spatial dimensions of x.

The inverse operation is ifft().

Implementations:

Args

x
Uniform complex or float Tensor with at least one spatial dimension.
dims
Dimensions along which to perform the FFT. If None, performs the FFT along all spatial dimensions of x.

Returns

Ƒ(x) as complex Tensor

Expand source code
def fft(x: Tensor, dims: DimFilter = spatial) -> Tensor:
    """
    Performs a fast Fourier transform (FFT) on all spatial dimensions of x.
    
    The inverse operation is `ifft()`.

    Implementations:

    * NumPy: [`np.fft.fft`](https://numpy.org/doc/stable/reference/generated/numpy.fft.fft.html),
      [`numpy.fft.fft2`](https://numpy.org/doc/stable/reference/generated/numpy.fft.fft2.html),
      [`numpy.fft.fftn`](https://numpy.org/doc/stable/reference/generated/numpy.fft.fftn.html)
    * PyTorch: [`torch.fft.fft`](https://pytorch.org/docs/stable/fft.html)
    * TensorFlow: [`tf.signal.fft`](https://www.tensorflow.org/api_docs/python/tf/signal/fft),
      [`tf.signal.fft2d`](https://www.tensorflow.org/api_docs/python/tf/signal/fft2d),
      [`tf.signal.fft3d`](https://www.tensorflow.org/api_docs/python/tf/signal/fft3d)
    * Jax: [`jax.numpy.fft.fft`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.fft.fft.html),
      [`jax.numpy.fft.fft2`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.fft.fft2.html)
      [`jax.numpy.fft.fft`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.fft.fftn.html)

    Args:
        x: Uniform complex or float `Tensor` with at least one spatial dimension.
        dims: Dimensions along which to perform the FFT.
            If `None`, performs the FFT along all spatial dimensions of `x`.

    Returns:
        *Ƒ(x)* as complex `Tensor`
    """
    dims = x.shape.only(dims)
    x_native = x.native(x.shape)
    result_native = choose_backend(x_native).fft(x_native, x.shape.indices(dims))
    return NativeTensor(result_native, x.shape)
def fftfreq(resolution: phiml.math._shape.Shape, dx: Union[float, phiml.math._tensors.Tensor] = 1, dtype: phiml.backend._dtype.DType = None)

Returns the discrete Fourier transform sample frequencies. These are the frequencies corresponding to the components of the result of math.fft on a tensor of shape resolution.

Args

resolution
Grid resolution measured in cells
dx
Distance between sampling points in real space.
dtype
Data type of the returned tensor (Default value = None)

Returns

Tensor holding the frequencies of the corresponding values computed by math.fft

Expand source code
def fftfreq(resolution: Shape, dx: Union[Tensor, float] = 1, dtype: DType = None):
    """
    Returns the discrete Fourier transform sample frequencies.
    These are the frequencies corresponding to the components of the result of `math.fft` on a tensor of shape `resolution`.

    Args:
        resolution: Grid resolution measured in cells
        dx: Distance between sampling points in real space.
        dtype: Data type of the returned tensor (Default value = None)

    Returns:
        `Tensor` holding the frequencies of the corresponding values computed by math.fft
    """
    assert resolution.spatial and f"resolution must contain at least one spatial dimension"
    k = meshgrid(**{dim: np.fft.fftfreq(int(n)) for dim, n in resolution.spatial._named_sizes})
    k /= dx
    return to_float(k) if dtype is None else cast(k, dtype)
def find_closest(vectors: phiml.math._tensors.Tensor, query: phiml.math._tensors.Tensor, method='kd', index_dim=(indexᶜ=None))

Finds the closest vector to query from vectors. This is implemented using a k-d tree built from vectors.

Args

vectors
Points to find.
query
Target locations.
method

One of the following:

  • 'dense': compute the pair-wise distances between all vectors and query points, then return the index of the smallest distance for each query point.
  • 'kd' (default): Build a k-d tree from vectors and use it to query all points in query. The tree will be cached if this call is jit-compiled and vectors is constant.
index_dim
Dimension along which components should be listed.

Returns

Index tensor idx so that the closest points to query are vectors[idx].

Expand source code
def find_closest(vectors: Tensor, query: Tensor, method='kd', index_dim=channel('index')):
    """
    Finds the closest vector to `query` from `vectors`.
    This is implemented using a k-d tree built from `vectors`.


    Args:
        vectors: Points to find.
        query: Target locations.
        method: One of the following:

            * `'dense'`: compute the pair-wise distances between all vectors and query points, then return the index of the smallest distance for each query point.
            * `'kd'` (default): Build a k-d tree from `vectors` and use it to query all points in `query`. The tree will be cached if this call is jit-compiled and `vectors` is constant.
        index_dim: Dimension along which components should be listed.

    Returns:
        Index tensor `idx` so that the closest points to `query` are `vectors[idx]`.
    """
    assert not dual(vectors), f"vectors cannot have dual dims"
    if method == 'dense':
        dist = vec_squared(query - vectors)
        idx = math.argmin(dist, non_batch(vectors).non_channel)
        return rename_dims(idx, '_index', index_dim.with_size(non_batch(vectors).non_channel.names))
    # --- k-d tree ---
    from scipy.spatial import KDTree
    native_query = reshaped_native(query, [batch(vectors), query.shape.without(batch(vectors)).non_channel, channel(query)])
    if vectors.available:
        kd_trees = [KDTree(reshaped_numpy(vectors[b], [vectors.shape.non_channel.non_batch, channel])) for b in batch(vectors).meshgrid()]
        def perform_query(np_query):
            return np.stack([kd_tree.query(np_query[i])[1] for i, kd_tree in enumerate(kd_trees)])
        native_idx = query.default_backend.numpy_call(perform_query, (batch(vectors).volume, query.shape.without(batch(vectors)).non_channel.volume), DType(int, 64), native_query)
    else:
        b = choose_backend_t(vectors, query)
        native_vectors = reshaped_native(vectors, [batch, ..., channel])
        def perform_query(np_vectors, np_query):
            return np.stack([KDTree(np_vectors[i]).query(np_query[i])[1] for i in range(batch(vectors).volume)])
        native_idx = b.numpy_call(perform_query, (batch(vectors).volume, query.shape.without(batch(vectors)).non_channel.volume), DType(int, 64), native_vectors, native_query)
    native_multi_idx = choose_backend(native_idx).unravel_index(native_idx, vectors.shape.non_batch.non_channel.sizes)
    return reshaped_tensor(native_multi_idx, [batch(vectors), query.shape.without(batch(vectors)).non_channel, index_dim.with_size(non_batch(vectors).non_channel.names)])
def find_differences(tree1, tree2, compare_tensors_by_id=False) ‑> Sequence[Tuple[str, str, Any, Any]]

Compares tree1 and tree2 and returns all differences in the form (difference_description: str, variable_identifier: str, value1, value2).

Args

tree1
Nested tree or leaf
tree2
Nested tree or leaf
compare_tensors_by_id
Whether Tensor objects should be compared by identity or values.

Returns

List of differences, each represented as a tuple.

Expand source code
def find_differences(tree1, tree2, compare_tensors_by_id=False) -> Sequence[Tuple[str, str, Any, Any]]:
    """
    Compares `tree1` and `tree2` and returns all differences in the form `(difference_description: str, variable_identifier: str, value1, value2)`.

    Args:
        tree1: Nested tree or leaf
        tree2: Nested tree or leaf
        compare_tensors_by_id: Whether `phiml.math.Tensor` objects should be compared by identity or values.

    Returns:
        List of differences, each represented as a `tuple`.
    """
    from ._ops import equal, always_close
    result = []
    _recursive_diff(tree1, tree2, '', result, compare_tensors_by_id)
    return result
def finite_fill(values: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, distance: int = 1, diagonal: bool = True, padding=zero-gradient) ‑> Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor]

Fills non-finite (NaN, inf, -inf) values from nearby finite values. Extrapolates the finite values of values for distance steps along dims. Where multiple finite values could fill an invalid value, the average is computed.

Args

values
Floating-point Tensor. All non-numeric values (NaN, inf, -inf) are interpreted as invalid.
dims
Dimensions along which to fill invalid values from finite ones.
distance
Number of extrapolation steps, each extrapolating one cell out.
diagonal
Whether to extrapolate values to their diagonal neighbors per step.
padding
Extrapolation of values. Determines whether to extrapolate from the edges as well.

Returns

Tensor of same shape as values.

Expand source code
def finite_fill(values: Tensor, dims: DimFilter = spatial, distance: int = 1, diagonal: bool = True, padding=extrapolation.BOUNDARY) -> Tuple[Tensor, Tensor]:
    """
    Fills non-finite (NaN, inf, -inf) values from nearby finite values.
    Extrapolates the finite values of `values` for `distance` steps along `dims`.
    Where multiple finite values could fill an invalid value, the average is computed.

    Args:
        values: Floating-point `Tensor`. All non-numeric values (`NaN`, `inf`, `-inf`) are interpreted as invalid.
        dims: Dimensions along which to fill invalid values from finite ones.
        distance: Number of extrapolation steps, each extrapolating one cell out.
        diagonal: Whether to extrapolate values to their diagonal neighbors per step.
        padding: Extrapolation of `values`. Determines whether to extrapolate from the edges as well.

    Returns:
        `Tensor` of same shape as `values`.
    """
    if diagonal:
        distance = min(distance, max(values.shape.sizes))
        dims = values.shape.only(dims)
        for _ in range(distance):
            valid = math.is_finite(values)
            valid_values = math.where(valid, values, 0)
            overlap = valid
            for dim in dims:
                values_l, values_r = shift(valid_values, (-1, 1), dims=dim, padding=padding)
                valid_values = math.sum_(values_l + values_r + valid_values, dim='shift')
                mask_l, mask_r = shift(overlap, (-1, 1), dims=dim, padding=padding)
                overlap = math.sum_(mask_l + mask_r + overlap, dim='shift')
            values = math.where(valid, values, valid_values / overlap)
    else:
        distance = min(distance, sum(values.shape.sizes))
        for _ in range(distance):
            neighbors = concat(shift(values, (-1, 1), dims, padding=padding, stack_dim=channel('neighbors')), 'neighbors')
            finite = math.is_finite(neighbors)
            avg_neighbors = math.sum_(math.where(finite, neighbors, 0), 'neighbors') / math.sum_(finite, 'neighbors')
            values = math.where(math.is_finite(values), values, avg_neighbors)
    return values
def finite_max(value, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>, default: Union[complex, float] = nan)

Finds the maximum along dim ignoring all non-finite values.

Args

value
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors
default
Value to use where no finite value was encountered.

Returns

Tensor without the reduced dimensions.

Expand source code
def finite_max(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')):
    """
    Finds the maximum along `dim` ignoring all non-finite values.

    Args:
        value: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

        default: Value to use where no finite value was encountered.

    Returns:
        `Tensor` without the reduced dimensions.
    """
    value_inf = where(is_finite(value), value, float('-inf'))
    result_inf = max_(value_inf, dim)
    return where(is_finite(result_inf), result_inf, default)
def finite_mean(value, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>, default: Union[complex, float] = nan)

Computes the mean value of all finite values in value along dim.

Args

value
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors
default
Value to use where no finite value was encountered.

Returns

Tensor without the reduced dimensions.

Expand source code
def finite_mean(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')):
    """
    Computes the mean value of all finite values in `value` along `dim`.

    Args:
        value: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

        default: Value to use where no finite value was encountered.

    Returns:
        `Tensor` without the reduced dimensions.
    """
    finite = is_finite(value)
    summed = sum_(where(finite, value, 0), dim)
    count = sum_(finite, dim)
    mean_nan = summed / count
    return where(is_finite(mean_nan), mean_nan, default)
def finite_min(value, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>, default: Union[complex, float] = nan)

Finds the minimum along dim ignoring all non-finite values.

Args

value
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors
default
Value to use where no finite value was encountered.

Returns

Tensor without the reduced dimensions.

Expand source code
def finite_min(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')):
    """
    Finds the minimum along `dim` ignoring all non-finite values.

    Args:
        value: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

        default: Value to use where no finite value was encountered.

    Returns:
        `Tensor` without the reduced dimensions.
    """
    value_inf = where(is_finite(value), value, float('inf'))
    result_inf = min_(value_inf, dim)
    return where(is_finite(result_inf), result_inf, default)
def finite_sum(value, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>, default: Union[complex, float] = nan)

Sums all finite values in value along dim.

Args

value
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors
default
Value to use where no finite value was encountered.

Returns

Tensor without the reduced dimensions.

Expand source code
def finite_sum(value, dim: DimFilter = non_batch, default: Union[complex, float] = float('NaN')):
    """
    Sums all finite values in `value` along `dim`.

    Args:
        value: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

        default: Value to use where no finite value was encountered.

    Returns:
        `Tensor` without the reduced dimensions.
    """
    finite = is_finite(value)
    summed = sum_(where(finite, value, 0), dim)
    return where(any_(finite, dim), summed, default)
def flatten(value, flat_dim: phiml.math._shape.Shape = (flatⁱ=None), flatten_batch=False, **kwargs)

Returns a Tensor with the same values as value but only a single dimension flat_dim. The order of the values in memory is not changed.

Args

value
Shapable, such as Tensor. If a non-Shaped object or one with an empty Shape is passed, it is returned without alteration.
flat_dim
Dimension name and type as Shape object. The size is ignored.
flatten_batch
Whether to flatten batch dimensions as well. If False, batch dimensions are kept, only onn-batch dimensions are flattened.
**kwargs
Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the bounds: Box argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.

Returns

Same type as value.

Examples

>>> flatten(math.zeros(spatial(x=4, y=3)))
(flatⁱ=12) const 0.0
Expand source code
def flatten(value, flat_dim: Shape = instance('flat'), flatten_batch=False, **kwargs):
    """
    Returns a `Tensor` with the same values as `value` but only a single dimension `flat_dim`.
    The order of the values in memory is not changed.

    Args:
        value: `phiml.math.magic.Shapable`, such as `Tensor`.
            If a non-`phiml.math.magic.Shaped` object or one with an empty `Shape` is passed, it is returned without alteration.
        flat_dim: Dimension name and type as `Shape` object. The size is ignored.
        flatten_batch: Whether to flatten batch dimensions as well.
            If `False`, batch dimensions are kept, only onn-batch dimensions are flattened.
        **kwargs: Additional keyword arguments required by specific implementations.
            Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
            Adding batch dimensions must always work without keyword arguments.

    Returns:
        Same type as `value`.

    Examples:
        >>> flatten(math.zeros(spatial(x=4, y=3)))
        (flatⁱ=12) const 0.0
    """
    assert isinstance(flat_dim, Shape) and flat_dim.rank == 1, flat_dim
    if not isinstance(value, Shaped):
        return value
    if shape(value).is_empty:
        return value
    assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}"
    # --- First try __flatten__ ---
    if hasattr(value, '__flatten__'):
        result = value.__flatten__(flat_dim, flatten_batch, **kwargs)
        if result is not NotImplemented:
            return result
    # There is no tree node implementation for flatten because pack_dims is just as fast
    # --- Fallback: pack_dims ---
    return pack_dims(value, shape(value) if flatten_batch else non_batch(value), flat_dim, **kwargs)
def floor(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes ⌊x⌋ of the Tensor or PhiTreeNode x.

Expand source code
def floor(x: TensorOrTree) -> TensorOrTree:
    """ Computes *⌊x⌋* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.floor)
def fourier_laplace(grid: phiml.math._tensors.Tensor, dx: Union[phiml.math._tensors.Tensor, phiml.math._shape.Shape, float, list, tuple], times: int = 1)

Applies the spatial laplace operator to the given tensor with periodic boundary conditions.

Note: The results of fourier_laplace() and laplace() are close but not identical.

This implementation computes the laplace operator in Fourier space. The result for periodic fields is exact, i.e. no numerical instabilities can occur, even for higher-order derivatives.

Args

grid
tensor, assumed to have periodic boundary conditions
dx
distance between grid points, tensor-like, scalar or vector
times
number of times the laplace operator is applied. The computational cost is independent of this parameter.
grid
Tensor:
dx
Tensor or Shape or float or list or tuple:
times
int: (Default value = 1)

Returns

tensor of same shape as tensor()

Expand source code
def fourier_laplace(grid: Tensor,
                    dx: Union[Tensor, Shape, float, list, tuple],
                    times: int = 1):
    """
    Applies the spatial laplace operator to the given tensor with periodic boundary conditions.
    
    *Note:* The results of `fourier_laplace` and `laplace` are close but not identical.
    
    This implementation computes the laplace operator in Fourier space.
    The result for periodic fields is exact, i.e. no numerical instabilities can occur, even for higher-order derivatives.

    Args:
      grid: tensor, assumed to have periodic boundary conditions
      dx: distance between grid points, tensor-like, scalar or vector
      times: number of times the laplace operator is applied. The computational cost is independent of this parameter.
      grid: Tensor: 
      dx: Tensor or Shape or float or list or tuple: 
      times: int:  (Default value = 1)

    Returns:
      tensor of same shape as `tensor`

    """
    frequencies = math.fft(math.to_complex(grid))
    k_squared = math.sum_(math.fftfreq(grid.shape) ** 2, 'vector')
    fft_laplace = -(2 * np.pi) ** 2 * k_squared
    result = math.real(math.ifft(frequencies * fft_laplace ** times))
    return math.cast(result / wrap(dx) ** 2, grid.dtype)
def fourier_poisson(grid: phiml.math._tensors.Tensor, dx: Union[phiml.math._tensors.Tensor, phiml.math._shape.Shape, float, list, tuple], times: int = 1)

Inverse operation to fourier_laplace().

Args

grid
Tensor:
dx
Tensor or Shape or float or list or tuple:
times
int: (Default value = 1)

Returns:

Expand source code
def fourier_poisson(grid: Tensor,
                    dx: Union[Tensor, Shape, float, list, tuple],
                    times: int = 1):
    """
    Inverse operation to `fourier_laplace`.

    Args:
      grid: Tensor: 
      dx: Tensor or Shape or float or list or tuple: 
      times: int:  (Default value = 1)

    Returns:

    """
    frequencies = math.fft(math.to_complex(grid))
    k_squared = math.sum_(math.fftfreq(grid.shape) ** 2, 'vector')
    fft_laplace = -(2 * np.pi) ** 2 * k_squared
    # fft_laplace.tensor[(0,) * math.ndims(k_squared)] = math.inf  # assume NumPy array to edit
    result = math.real(math.ifft(math.safe_div(frequencies, math.to_complex(fft_laplace ** times))))
    return math.cast(result * wrap(dx) ** 2, grid.dtype)
def frequency_loss(x, frequency_falloff: float = 100, threshold=1e-05, ignore_mean=False, n=2) ‑> phiml.math._tensors.Tensor

Penalizes the squared values in frequency (Fourier) space. Lower frequencies are weighted more strongly then higher frequencies, depending on frequency_falloff.

Args

x
Tensor or PhiTreeNode Values to penalize, typically actual - target.
frequency_falloff
Large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally. Note: The total loss is not normalized. Varying the value will result in losses of different magnitudes.
threshold
Frequency amplitudes below this value are ignored. Setting this to zero may cause infinities or NaN values during backpropagation.
ignore_mean
If True, does not penalize the mean value (frequency=0 component).

Returns

Scalar loss value

Expand source code
def frequency_loss(x,
                   frequency_falloff: float = 100,
                   threshold=1e-5,
                   ignore_mean=False,
                   n=2) -> Tensor:
    """
    Penalizes the squared `values` in frequency (Fourier) space.
    Lower frequencies are weighted more strongly then higher frequencies, depending on `frequency_falloff`.

    Args:
        x: `Tensor` or `phiml.math.magic.PhiTreeNode` Values to penalize, typically `actual - target`.
        frequency_falloff: Large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally.
            *Note*: The total loss is not normalized. Varying the value will result in losses of different magnitudes.
        threshold: Frequency amplitudes below this value are ignored.
            Setting this to zero may cause infinities or NaN values during backpropagation.
        ignore_mean: If `True`, does not penalize the mean value (frequency=0 component).

    Returns:
      Scalar loss value
    """
    assert n in (1, 2)
    if isinstance(x, Tensor):
        if ignore_mean:
            x -= math.mean(x, x.shape.non_batch)
        k_squared = vec_squared(math.fftfreq(x.shape.spatial))
        weights = math.exp(-0.5 * k_squared * frequency_falloff ** 2)

        diff_fft = abs_square(math.fft(x) * weights)
        diff_fft = math.sqrt(math.maximum(diff_fft, threshold))
        return l2_loss(diff_fft) if n == 2 else l1_loss(diff_fft)
    elif isinstance(x, PhiTreeNode):
        losses = [frequency_loss(getattr(x, a), frequency_falloff, threshold, ignore_mean, n) for a in variable_values(x)]
        return sum(losses)
    else:
        raise ValueError(x)
def from_dict(dict_: dict, convert=False)

Loads a Tensor or Shape from a serialized form.

See Also: to_dict().

Args

dict_
Serialized tensor properties.
convert
Whether to convert the data to the current backend format or keep it as a Numpy array.

Returns

Tensor or Shape.

Expand source code
def from_dict(dict_: dict, convert=False):
    """
    Loads a `Tensor` or `Shape` from a serialized form.

    See Also:
        `to_dict()`.

    Args:
        dict_: Serialized tensor properties.
        convert: Whether to convert the data to the current backend format or keep it as a Numpy array.

    Returns:
        `Tensor` or `Shape`.
    """
    shape = Shape._from_dict(dict_)
    if 'data' in dict_:
        return tensor(dict_['data'], shape, convert=convert)
    else:
        return shape
def gather(values, indices: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable, None] = None)

Gathers the entries of values at positions described by indices. All non-channel dimensions of indices that are part of values but not indexed are treated as batch dimensions.

See Also: scatter().

Args

values
Tensor or phiml.math.matic.PhiTreeNode containing values to gather.
indices
int Tensor. Multidimensional position references in values. Must contain a single channel dimension for the index vector matching the number of dimensions to index. This channel dimension should list the dimension names to index as item names unless explicitly specified as dims.
dims
(Optional) Dimensions indexed by indices. Alternatively, the dimensions can be specified as the item names of the channel dimension of indices. If None and no index item names are specified, will default to all spatial dimensions or all instance dimensions, depending on which ones are present (but not both).

Returns

Tensor with combined batch dimensions, channel dimensions of values and spatial/instance dimensions of indices.

Expand source code
def gather(values, indices: Tensor, dims: Union[DimFilter, None] = None):
    """
    Gathers the entries of `values` at positions described by `indices`.
    All non-channel dimensions of `indices` that are part of `values` but not indexed are treated as batch dimensions.

    See Also:
        `scatter()`.

    Args:
        values: `Tensor` or `phiml.math.matic.PhiTreeNode` containing values to gather.
        indices: `int` `Tensor`. Multidimensional position references in `values`.
            Must contain a single channel dimension for the index vector matching the number of dimensions to index.
            This channel dimension should list the dimension names to index as item names unless explicitly specified as `dims`.
        dims: (Optional) Dimensions indexed by `indices`.
            Alternatively, the dimensions can be specified as the item names of the channel dimension of `indices`.
            If `None` and no index item names are specified, will default to all spatial dimensions or all instance dimensions, depending on which ones are present (but not both).

    Returns:
        `Tensor` with combined batch dimensions, channel dimensions of `values` and spatial/instance dimensions of `indices`.
    """
    if not isinstance(values, Tensor):
        return tree_map(lambda v: gather(v, indices, dims), values)
    assert channel(indices).rank < 2, f"indices can at most have one channel dimension but got {indices.shape}"
    if dims is None:
        if channel(indices) and channel(indices).item_names[0]:
            dims = channel(indices).item_names[0]
        else:  # Fallback to spatial / instance
            warnings.warn(f"Indexing without item names is not recommended. Got indices {indices.shape}", SyntaxWarning, stacklevel=2)
            assert values.shape.instance.is_empty or values.shape.spatial.is_empty, f"Specify gather dimensions for values with both instance and spatial dimensions. Got {values.shape}"
            dims = values.shape.instance if values.shape.spatial.is_empty else values.shape.spatial
            assert dims, f"Specify gather dimensions for values with neither instance nor spatial dimensions. Got {values.shape}"
    dims = parse_dim_order(dims)
    assert dims, f"No indexing dimensions for tensor {values.shape} given indices {indices.shape}"
    if dims not in values.shape:
        return expand(values, non_channel(indices))
    if len(dims) > 1:
        assert channel(indices).rank > 0, f"indices must have a channel dimension listing the indexed dims {dims} but got {indices.shape}. You can create it via vec({', '.join([d+'=...' for d in dims])}) or channel(index='{','.join(dims)}'). If you have raveled indices, use unpack_dim(indices, channel, values.shape['{','.join(dims)}'])."
        assert channel(indices).rank == 1, f"indices must have a single channel dimension listing the indexed dims {dims} but got {indices.shape}."
    assert channel(indices).volume == len(dims), f"channel dim of indices must have size equal to the number of indexed dims {dims} but got {channel(indices)} which has {channel(indices).volume} entries"
    if indices.dtype.kind == bool:
        indices = to_int32(indices)
    if values._is_tracer or is_sparse(values):
        if not channel(indices):
            indices = expand(indices, channel(gather=dims))
        if not channel(indices).item_names[0]:
            indices = indices._with_shape_replaced(indices.shape.with_dim_size(channel(indices), dims))
        if values._is_tracer:
            return values._gather(indices)
        else:
            return sparse_gather(values, indices)
    broadcast = broadcast_dims(values, indices)
    treat_as_batch = non_channel(indices).non_instance.only(values.shape).without(dims)
    batch_ = ((values.shape.batch & indices.shape.batch).without(dims) & treat_as_batch).without(broadcast)
    channel_ = values.shape.without(dims).without(batch_).without(broadcast)

    def uniform_gather(values, indices):
        index_list_dims = indices.shape.non_channel.without(batch_)
        squeeze_index_list = False
        if not index_list_dims:
            index_list_dims = instance(_single_index=1)
            squeeze_index_list = True
        native_values = reshaped_native(values, [batch_, *dims, channel_])
        native_indices = reshaped_native(indices, [batch_, *index_list_dims, channel(indices)])
        backend = choose_backend(native_values, native_indices)
        native_result = backend.batched_gather_nd(native_values, native_indices)
        result = reshaped_tensor(native_result, [batch_, *index_list_dims, channel_], convert=False)
        if squeeze_index_list:
            result = result[{'_single_index': 0}]
        return result

    return broadcast_op(uniform_gather, [values, indices], )
def get_format(x: phiml.math._tensors.Tensor) ‑> str

Returns the sparse storage format of a tensor.

Args

x
Tensor

Returns

One of 'coo', 'csr', 'csc', 'dense'.

Expand source code
def get_format(x: Tensor) -> str:
    """
    Returns the sparse storage format of a tensor.

    Args:
        x: `Tensor`

    Returns:
        One of `'coo'`, `'csr'`, `'csc'`, `'dense'`.
    """
    if isinstance(x, SparseCoordinateTensor):
        return 'coo'
    elif isinstance(x, CompressedSparseMatrix):
        if dual(x._uncompressed_dims):
            return 'csr'
        else:
            assert not dual(x._uncompressed_dims), f"Compressed matrix {x.shape} does not match 'csr' or 'csc' because dual dimensions are present in rows and columns."
            return 'csc'
    elif isinstance(x, TensorStack):
        formats = [get_format(t) for t in x._tensors]
        if all(f == formats[0] for f in formats):
            return formats[0]
        return 'mixed'
    elif isinstance(x, Tensor):
        return 'dense'
    b = choose_backend(x)
    if not b.is_sparse(x):
        return 'dense'
    return b.get_sparse_format(x)
def get_precision() ‑> int

Gets the current target floating point precision in bits. The precision can be set globally using set_global_precision() or locally using with precision(p):.

Any Backend method may convert floating point values to this precision, even if the input had a different precision.

Returns

16 for half, 32 for single, 64 for double

Expand source code
def get_precision() -> int:
    """
    Gets the current target floating point precision in bits.
    The precision can be set globally using `set_global_precision()` or locally using `with precision(p):`.

    Any Backend method may convert floating point values to this precision, even if the input had a different precision.

    Returns:
        16 for half, 32 for single, 64 for double
    """
    return _PRECISION[-1]
def get_sparsity(x: phiml.math._tensors.Tensor)

Fraction of values currently stored on disk for the given Tensor x. For sparse tensors, this is nnz / shape.

This is a lower limit on the number of values that will need to be processed for operations involving x. The actual number is often higher since many operations require data be laid out in a certain format. In these cases, missing values, such as zeros, are filled in before the operation.

The following operations may return tensors whose values are only partially stored:

Args

x
Tensor

Returns

The number of values that are actually stored on disk. This does not include additional information, such as position information / indices. For sparse matrices, this is equal to the number of nonzero values.

Expand source code
def get_sparsity(x: Tensor):
    """
    Fraction of values currently stored on disk for the given `Tensor` `x`.
    For sparse tensors, this is `nnz / shape`.

    This is a lower limit on the number of values that will need to be processed for operations involving `x`.
    The actual number is often higher since many operations require data be laid out in a certain format.
    In these cases, missing values, such as zeros, are filled in before the operation.

    The following operations may return tensors whose values are only partially stored:

    * `phiml.math.expand()`
    * `phiml.math.pairwise_distance()` with `max_distance` set.
    * Tracers used in `phiml.math.jit_compile_linear()`
    * Stacking any of the above.

    Args:
        x: `Tensor`

    Returns:
        The number of values that are actually stored on disk.
        This does not include additional information, such as position information / indices.
        For sparse matrices, this is equal to the number of nonzero values.
    """
    return stored_values(x, invalid='keep').shape.volume / x.shape.volume
def gradient(f: Callable, wrt: str = None, get_output=True) ‑> Callable

Creates a function which computes the gradient of f.

Example:

def loss_function(x, y):
    prediction = f(x)
    loss = math.l2_loss(prediction - y)
    return loss, prediction

dx = gradient(loss_function, 'x', get_output=False)(x, y)

(loss, prediction), (dx, dy) = gradient(loss_function,
                                        'x,y', get_output=True)(x, y)

Functional gradients are implemented for the following backends:

When the gradient function is invoked, f is called with tensors that track the gradient. For PyTorch, arg.requires_grad = True for all positional arguments of f.

Args

f
Function to be differentiated. f must return a floating point Tensor with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if return_values=True. All arguments for which the gradient is computed must be of dtype float or complex.
get_output
Whether the gradient function should also return the return values of f.
wrt
Comma-separated parameter names of f with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).

Returns

Function with the same arguments as f that returns the value of f, auxiliary data and gradient of f if get_output=True, else just the gradient of f.

Expand source code
def gradient(f: Callable, wrt: str = None, get_output=True) -> Callable:
    """
    Creates a function which computes the gradient of `f`.

    Example:
    ```python
    def loss_function(x, y):
        prediction = f(x)
        loss = math.l2_loss(prediction - y)
        return loss, prediction

    dx = gradient(loss_function, 'x', get_output=False)(x, y)

    (loss, prediction), (dx, dy) = gradient(loss_function,
                                            'x,y', get_output=True)(x, y)
    ```

    Functional gradients are implemented for the following backends:

    * PyTorch: [`torch.autograd.grad`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.grad) / [`torch.autograd.backward`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.backward)
    * TensorFlow: [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape)
    * Jax: [`jax.grad`](https://jax.readthedocs.io/en/latest/jax.html#jax.grad)

    When the gradient function is invoked, `f` is called with tensors that track the gradient.
    For PyTorch, `arg.requires_grad = True` for all positional arguments of `f`.

    Args:
        f: Function to be differentiated.
            `f` must return a floating point `Tensor` with rank zero.
            It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if `return_values=True`.
            All arguments for which the gradient is computed must be of dtype float or complex.
        get_output: Whether the gradient function should also return the return values of `f`.
        wrt: Comma-separated parameter names of `f` with respect to which the gradient should be computed.
            If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).

    Returns:
        Function with the same arguments as `f` that returns the value of `f`, auxiliary data and gradient of `f` if `get_output=True`, else just the gradient of `f`.
    """
    f_params, wrt = simplify_wrt(f, wrt)
    return GradientFunction(f, f_params, wrt, get_output, is_f_scalar=True)
def grid_sample(grid: phiml.math._tensors.Tensor, coordinates: phiml.math._tensors.Tensor, extrap: Union[ForwardRef('e_.Extrapolation'), float, str], **kwargs)

Samples values of grid at the locations referenced by coordinates. Values lying in between sample points are determined via linear interpolation.

If coordinates has a channel dimension, its item names are used to determine the grid dimensions of grid. Otherwise, the spatial dims of grid will be used.

For values outside the valid bounds of grid (coord < 0 or coord > grid.shape - 1), extrap is used to determine the neighboring grid values. If the extrapolation does not support resampling, the grid is padded by one cell layer before resampling. In that case, values lying further outside will not be sampled according to the extrapolation.

Args

grid
Grid with at least one spatial dimension and no instance dimensions.
coordinates
Coordinates with a single channel dimension called 'vector'. The size of the vector dimension must match the number of spatial dimensions of grid.
extrap
Extrapolation used to determine the values of grid outside its valid bounds.
kwargs
Additional information for the extrapolation.

Returns

Tensor with channel dimensions of grid, spatial and instance dimensions of coordinates and combined batch dimensions.

Expand source code
def grid_sample(grid: Tensor, coordinates: Tensor, extrap: Union['e_.Extrapolation', float, str], **kwargs):
    """
    Samples values of `grid` at the locations referenced by `coordinates`.
    Values lying in between sample points are determined via linear interpolation.

    If `coordinates` has a channel dimension, its item names are used to determine the grid dimensions of `grid`.
    Otherwise, the spatial dims of `grid` will be used.

    For values outside the valid bounds of `grid` (`coord < 0 or coord > grid.shape - 1`), `extrap` is used to determine the neighboring grid values.
    If the extrapolation does not support resampling, the grid is padded by one cell layer before resampling.
    In that case, values lying further outside will not be sampled according to the extrapolation.

    Args:
        grid: Grid with at least one spatial dimension and no instance dimensions.
        coordinates: Coordinates with a single channel dimension called `'vector'`.
            The size of the `vector` dimension must match the number of spatial dimensions of `grid`.
        extrap: Extrapolation used to determine the values of `grid` outside its valid bounds.
        kwargs: Additional information for the extrapolation.

    Returns:
        `Tensor` with channel dimensions of `grid`, spatial and instance dimensions of `coordinates` and combined batch dimensions.
    """
    extrap = e_.as_extrapolation(extrap) if extrap is not None else None
    if not channel(coordinates):
        assert spatial(grid).rank == 1, f"grid must have 1 spatial dimension if coordinates does not have a channel dimension"
        coordinates = expand(coordinates, channel(vector=spatial(grid)))
    assert channel(coordinates).rank == 1, f"coordinates must have at most one channel dimension but got {channel(coordinates)}"
    coordinates = rename_dims(coordinates, channel, 'vector')
    result = broadcast_op(functools.partial(_grid_sample, extrap=extrap, pad_kwargs=kwargs), [grid, coordinates])
    return result
def histogram(values: phiml.math._tensors.Tensor, bins: phiml.math._shape.Shape = (binsˢ=30), weights=1, same_bins: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = None)

Compute a histogram of a distribution of values.

Important Note: In its current implementation, values outside the range of bins may or may not be added to the outermost bins.

Args

values
Tensor listing the values to be binned along spatial or instance dimensions. `values´ may not contain channel or dual dimensions.
bins
Either Shape specifying the number of equally-spaced bins to use or bin edge positions as Tensor with a spatial or instance dimension.
weights
Tensor assigning a weight to every value in values that will be added to the bin, default 1.
same_bins
Only used if bins is given as a Shape. Use the same bin sizes and positions across these batch dimensions. By default, bins will be chosen independently for each example.

Returns

hist
Tensor containing all batch dimensions and the bins dimension with dtype matching weights.
bin_edges
Tensor
bin_center
Tensor
Expand source code
def histogram(values: Tensor, bins: Shape or Tensor = spatial(bins=30), weights=1, same_bins: DimFilter = None):
    """
    Compute a histogram of a distribution of values.

    *Important Note:* In its current implementation, values outside the range of bins may or may not be added to the outermost bins.

    Args:
        values: `Tensor` listing the values to be binned along spatial or instance dimensions.
            `values´ may not contain channel or dual dimensions.
        bins: Either `Shape` specifying the number of equally-spaced bins to use or bin edge positions as `Tensor` with a spatial or instance dimension.
        weights: `Tensor` assigning a weight to every value in `values` that will be added to the bin, default 1.
        same_bins: Only used if `bins` is given as a `Shape`.
            Use the same bin sizes and positions across these batch dimensions.
            By default, bins will be chosen independently for each example.

    Returns:
        hist: `Tensor` containing all batch dimensions and the `bins` dimension with dtype matching `weights`.
        bin_edges: `Tensor`
        bin_center: `Tensor`
    """
    assert isinstance(values, Tensor), f"values must be a Tensor but got {type(values)}"
    assert channel(values).is_empty, f"Only 1D histograms supported but values have a channel dimension: {values.shape}"
    assert dual(values).is_empty, f"values cannot contain dual dimensions but got shape {values.shape}"
    weights = wrap(weights)
    if isinstance(bins, Shape):
        def equal_bins(v):
            return linspace(finite_min(v, shape), finite_max(v, shape), bins.with_size(bins.size + 1))
        bins = broadcast_op(equal_bins, [values], iter_dims=(batch(values) & batch(weights)).without(same_bins))
    assert isinstance(bins, Tensor), f"bins must be a Tensor but got {type(bins)}"
    assert non_batch(bins).rank == 1, f"bins must contain exactly one spatial or instance dimension listing the bin edges but got shape {bins.shape}"
    assert channel(bins).rank == dual(bins).rank == 0, f"bins cannot have any channel or dual dimensions but got shape {bins.shape}"
    tensors = [values, bins] if weights is None else [values, weights, bins]
    backend = choose_backend_t(*tensors)

    def histogram_uniform(values: Tensor, bin_edges: Tensor, weights):
        batch_dims = batch(values) & batch(bin_edges) & batch(weights)
        value_dims = non_batch(values) & non_batch(weights)
        values_native = reshaped_native(values, [batch_dims, value_dims])
        weights_native = reshaped_native(weights, [batch_dims, value_dims])
        bin_edges_native = reshaped_native(bin_edges, [batch_dims, non_batch(bin_edges)])
        hist_native = backend.histogram1d(values_native, weights_native, bin_edges_native)
        hist = reshaped_tensor(hist_native, [batch_dims, non_batch(bin_edges).with_size(non_batch(bin_edges).size - 1)])
        return hist
        # return stack_tensors([bin_edges, hist], channel(vector=[bin_edges.shape.name, 'hist']))

    bin_center = (bins[{non_batch(bins).name: slice(1, None)}] + bins[{non_batch(bins).name: slice(0, -1)}]) / 2
    bin_center = expand(bin_center, channel(vector=non_batch(bins).names))
    bin_edges = stack_tensors([bins], channel(values)) if channel(values) else bins
    return broadcast_op(histogram_uniform, [values, bins, weights]), bin_edges, bin_center
def i2b(value)

Change the type of all instance dimensions of value to batch dimensions. See rename_dims().

Expand source code
def i2b(value):
    """ Change the type of all *instance* dimensions of `value` to *batch* dimensions. See `rename_dims`. """
    return rename_dims(value, instance, batch)
def identity(x)

Identity function for one argument. Vararg functions cannot be transformed as the argument names are unknown.

Args

x
Positional argument.

Returns

x

Expand source code
def identity(x):
    """
    Identity function for one argument.
    Vararg functions cannot be transformed as the argument names are unknown.

    Args:
        x: Positional argument.

    Returns:
        `x`
    """
    return x
def ifft(k: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>)

Inverse of fft().

Args

k
Complex or float Tensor with at least one spatial dimension.
dims
Dimensions along which to perform the inverse FFT. If None, performs the inverse FFT along all spatial dimensions of k.

Returns

Ƒ-1(k) as complex Tensor

Expand source code
def ifft(k: Tensor, dims: DimFilter = spatial):
    """
    Inverse of `fft()`.

    Args:
        k: Complex or float `Tensor` with at least one spatial dimension.
        dims: Dimensions along which to perform the inverse FFT.
            If `None`, performs the inverse FFT along all spatial dimensions of `k`.

    Returns:
        *Ƒ<sup>-1</sup>(k)* as complex `Tensor`
    """
    dims = k.shape.only(dims)
    k_native = k.native(k.shape)
    result_native = choose_backend(k_native).ifft(k_native, k.shape.indices(dims))
    return NativeTensor(result_native, k.shape)
def imag(x: ~TensorOrTree) ‑> ~TensorOrTree

Returns the imaginary part of x. If x does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.

See Also: real(), conjugate().

Args

x
Tensor or PhiTreeNode or native tensor.

Returns

Imaginary component of x if x is complex, zeros otherwise.

Expand source code
def imag(x: TensorOrTree) -> TensorOrTree:
    """
    Returns the imaginary part of `x`.
    If `x` does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.

    See Also:
        `real()`, `conjugate()`.

    Args:
        x: `Tensor` or `phiml.math.magic.PhiTreeNode` or native tensor.

    Returns:
        Imaginary component of `x` if `x` is complex, zeros otherwise.
    """
    return _backend_op1(x, Backend.imag)
def incomplete_gamma(a: ~TensorOrTree, x: ~TensorOrTree, upper=False, regularized=True) ‑> ~TensorOrTree

Computes the incomplete gamma function.

Args

a
Positive parameter, Tensor or tree.
x
Non-negative argument, Tensor or tree.
upper
Whether to complete the upper integral (x to infinity) or the lower integral (0 to x).
regularized
Whether the integral is divided by Γ(a).
Expand source code
def incomplete_gamma(a: TensorOrTree, x: TensorOrTree, upper=False, regularized=True) -> TensorOrTree:
    """
    Computes the incomplete gamma function.

    Args:
        a: Positive parameter, `Tensor` or tree.
        x: Non-negative argument, `Tensor` or tree.
        upper: Whether to complete the upper integral (x to infinity) or the lower integral (0 to x).
        regularized: Whether the integral is divided by Γ(a).
    """
    call = lambda a, x: incomplete_gamma(a, x, upper=upper, regularized=regularized)
    if upper:
        reg = custom_op2(a, x, call, lambda a, x: choose_backend(a, x).gamma_inc_u(a, x), 'gamma_inc_u')
    else:
        reg = custom_op2(a, x, call, lambda a, x: choose_backend(a, x).gamma_inc_l(a, x), 'gamma_inc_l')
    return reg if regularized else reg * exp(log_gamma(a))
def index_shift(x: phiml.math._tensors.Tensor, offsets: Sequence[Union[int, phiml.math._tensors.Tensor]], padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None) ‑> List[phiml.math._tensors.Tensor]

Returns shifted versions of x according to offsets where each offset is an int vector indexing some dimensions of x.

See Also: shift(), neighbor_reduce().

Args

x
Input grid-like Tensor.
offsets
Sequence of offset vectors. Each offset is an int vector indexing some dimensions of x. Offsets can have different subsets of the dimensions of x. Missing dimensions count as 0. The value 0 can also be passed as a zero-shift.
padding
Padding to be performed at the boundary so that the shifted versions have the same size as x. Must be one of the following: Extrapolation, Tensor or number for constant extrapolation, name of extrapolation as str. Can be set to None to disable padding. Then the result tensors will be smaller than x.

Returns

list of shifted tensors. The number of return tensors is equal to the number of offsets.

Expand source code
def index_shift(x: Tensor, offsets: Sequence[Union[int, Tensor]], padding: Union[Extrapolation, float, Tensor, str, None] = None) -> List[Tensor]:
    """
    Returns shifted versions of `x` according to `offsets` where each offset is an `int` vector indexing some dimensions of `x`.

    See Also:
        `shift`, `neighbor_reduce`.

    Args:
        x: Input grid-like `Tensor`.
        offsets: Sequence of offset vectors. Each offset is an `int` vector indexing some dimensions of `x`.
            Offsets can have different subsets of the dimensions of `x`. Missing dimensions count as 0.
            The value `0` can also be passed as a zero-shift.
        padding: Padding to be performed at the boundary so that the shifted versions have the same size as `x`.
            Must be one of the following: `Extrapolation`, `Tensor` or number for constant extrapolation, name of extrapolation as `str`.
            Can be set to `None` to disable padding. Then the result tensors will be smaller than `x`.

    Returns:
        `list` of shifted tensors. The number of return tensors is equal to the number of `offsets`.
    """
    _, widths_list, min_by_dim, max_by_dim = join_index_offsets(offsets, negate=True)
    if padding is not None:
        pad_lower = {d: max(0, -m) for d, m in min_by_dim.items()}
        pad_upper = {d: max(0, m) for d, m in max_by_dim.items()}
        widths = {d: (pad_lower[d], pad_upper[d]) for d in pad_lower.keys()}
        x = math.pad(x, widths, mode=padding)
    return [math.pad(x, w, extrapolation.NONE) for w in widths_list]
def instance(*args, **dims: Union[int, str, tuple, list, phiml.math._shape.Shape, ForwardRef('Tensor')]) ‑> phiml.math._shape.Shape

Returns the instance dimensions of an existing Shape or creates a new Shape with only instance dimensions.

Usage for filtering instance dimensions:

>>> instance_dims = instance(shape)
>>> instance_dims = instance(tensor)

Usage for creating a Shape with only instance dimensions:

>>> instance_shape = instance('undef', points=2)
(points=2, undef=None)

Here, the dimension undef is created with an undefined size of None. Undefined sizes are automatically filled in by tensor(), wrap(), stack() and concat().

To create a shape with multiple types, use merge_shapes(), concat_shapes() or the syntax shape1 & shape2.

See Also: channel(), batch(), spatial()

Args

*args

Either

  • Shape or Tensor to filter or
  • Names of dimensions with undefined sizes as str.
**dims
Dimension sizes and names. Must be empty when used as a filter operation.

Returns

Shape containing only dimensions of type instance.

Expand source code
def instance(*args, **dims: Union[int, str, tuple, list, Shape, 'Tensor']) -> Shape:
    """
    Returns the instance dimensions of an existing `Shape` or creates a new `Shape` with only instance dimensions.

    Usage for filtering instance dimensions:
    >>> instance_dims = instance(shape)
    >>> instance_dims = instance(tensor)

    Usage for creating a `Shape` with only instance dimensions:
    >>> instance_shape = instance('undef', points=2)
    (points=2, undef=None)

    Here, the dimension `undef` is created with an undefined size of `None`.
    Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`.

    To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`.

    See Also:
        `channel`, `batch`, `spatial`

    Args:
        *args: Either

            * `Shape` or `Tensor` to filter or
            * Names of dimensions with undefined sizes as `str`.

        **dims: Dimension sizes and names. Must be empty when used as a filter operation.

    Returns:
        `Shape` containing only dimensions of type instance.
    """
    from .magic import Shaped
    if all(isinstance(arg, str) for arg in args) or dims:
        return _construct_shape(INSTANCE_DIM, *args, **dims)
    elif len(args) == 1 and isinstance(args[0], Shape):
        return args[0].instance
    elif len(args) == 1 and isinstance(args[0], Shaped):
        return shape(args[0]).instance
    else:
        raise AssertionError(f"instance() must be called either as a selector instance(Shape) or instance(Tensor) or as a constructor instance(*names, **dims). Got *args={args}, **dims={dims}")
def is_finite(x: ~TensorOrTree) ‑> ~TensorOrTree

Returns a Tensor or PhiTreeNode matching x with values True where x has a finite value and False otherwise.

Expand source code
def is_finite(x: TensorOrTree) -> TensorOrTree:
    """ Returns a `Tensor` or `phiml.math.magic.PhiTreeNode` matching `x` with values `True` where `x` has a finite value and `False` otherwise. """
    return _backend_op1(x, Backend.isfinite)
def is_inf(x: ~TensorOrTree) ‑> ~TensorOrTree

Returns a Tensor or PhiTreeNode matching x with values True where x is +inf or -inf and False otherwise.

Expand source code
def is_inf(x: TensorOrTree) -> TensorOrTree:
    """ Returns a `Tensor` or `phiml.math.magic.PhiTreeNode` matching `x` with values `True` where `x` is `+inf` or `-inf` and `False` otherwise. """
    return _backend_op1(x, Backend.isnan)
def is_nan(x: ~TensorOrTree) ‑> ~TensorOrTree

Returns a Tensor or PhiTreeNode matching x with values True where x is NaN and False otherwise.

Expand source code
def is_nan(x: TensorOrTree) -> TensorOrTree:
    """ Returns a `Tensor` or `phiml.math.magic.PhiTreeNode` matching `x` with values `True` where `x` is `NaN` and `False` otherwise. """
    return _backend_op1(x, Backend.isnan)
def is_scalar(value) ‑> bool

Checks whether value has no dimensions.

Args

value
Tensor or Python primitive or native tensor.

Returns

bool

Expand source code
def is_scalar(value) -> bool:
    """
    Checks whether `value` has no dimensions.

    Args:
        value: `Tensor` or Python primitive or native tensor.

    Returns:
        `bool`
    """
    if isinstance(value, Tensor):
        return value.shape.rank == 0
    elif isinstance(value, numbers.Number):
        return True
    else:
        return len(choose_backend(value).staticshape(value)) == 0
def is_sparse(x: phiml.math._tensors.Tensor)

Checks whether a tensor is represented in COO, CSR or CSC format. If the tensor is neither sparse nor dense, this function raises an error.

Args

x
Tensor to test.

Returns

True if x is sparse, False if x is dense.

Raises

AssertionError if x is neither sparse nor fully dense.

Expand source code
def is_sparse(x: Tensor):
    """
    Checks whether a tensor is represented in COO, CSR or CSC format.
    If the tensor is neither sparse nor dense, this function raises an error.

    Args:
        x: `Tensor` to test.

    Returns:
        `True` if `x` is sparse, `False` if `x` is dense.

    Raises:
        `AssertionError` if `x` is neither sparse nor fully dense.
    """
    f = get_format(x)
    if f == 'dense':
        return False
    if f in ['csr', 'csc', 'coo']:
        return True
    raise AssertionError(f"Tensor {x} is neither sparse nor dense")
def iterate(f: Callable, iterations: Union[int, phiml.math._shape.Shape], *x0, f_kwargs: dict = None, range: Callable = builtins.range, measure: Callable = None, **f_kwargs_)

Repeatedly call function, passing the previous output as the next input.

If the function outputs more values than the number of arguments in x0, only the first len(x0) ones are passed to f. However, all outputs will be returned by iterate().

Args

f
Function to call. Must be callable as f(x0, **f_kwargs) and f(f(x0, **f_kwargs), **f_kwargs).
iterations
Number of iterations as int or single-dimension Shape. If int, returns the final output of f. If Shape, returns the trajectory (x0 and all outputs of f), stacking the values along this dimension.
x0
Initial positional arguments for f. Values that are initially None are not stacked with the other values if iterations is a Shape.
range
Range function. Can be used to generate tqdm output by passing trange.
measure
Function without arguments to call at the start and end (and in between if isinstance(iterations, Shape)) calls to f. The measure of each call to f is measure() after minus measure() before the call.
f_kwargs
Additional keyword arguments to be passed to f. These arguments can be of any type.
f_kwargs_
More keyword arguments.

Returns

trajectory
Trajectory of final output of f, depending on iterations.
measured
Only if measure was specified, returns the measured value or trajectory tensor.
Expand source code
def iterate(f: Callable,
            iterations: Union[int, Shape],
            *x0,
            f_kwargs: dict = None,
            range: Callable = range,
            measure: Callable = None,
            **f_kwargs_):
    """
    Repeatedly call `function`, passing the previous output as the next input.

    If the function outputs more values than the number of arguments in `x0`, only the first `len(x0)` ones are passed to `f`.
    However, all outputs will be returned by `iterate`.

    Args:
        f: Function to call. Must be callable as `f(x0, **f_kwargs)` and `f(f(x0, **f_kwargs), **f_kwargs)`.
        iterations: Number of iterations as `int` or single-dimension `Shape`.
            If `int`, returns the final output of `f`.
            If `Shape`, returns the trajectory (`x0` and all outputs of `f`), stacking the values along this dimension.
        x0: Initial positional arguments for `f`.
            Values that are initially `None` are not stacked with the other values if `iterations` is a `Shape`.
        range: Range function. Can be used to generate tqdm output by passing `trange`.
        measure: Function without arguments to call at the start and end (and in between if `isinstance(iterations, Shape)`) calls to `f`.
            The measure of each call to `f` is `measure()` after minus `measure()` before the call.
        f_kwargs: Additional keyword arguments to be passed to `f`.
            These arguments can be of any type.
        f_kwargs_: More keyword arguments.

    Returns:
        trajectory: Trajectory of final output of `f`, depending on `iterations`.
        measured: Only if `measure` was specified, returns the measured value or trajectory tensor.
    """
    if f_kwargs is None:
        f_kwargs = {}
    f_kwargs.update(f_kwargs_)
    x = x0
    if isinstance(iterations, int):
        start_time = measure() if measure else None
        for _ in range(iterations):
            x = f(*x[:len(x0)], **f_kwargs)
            x = x if isinstance(x, tuple) else (x,)
            if len(x) < len(x0):
                raise AssertionError(f"Function to iterate must return at least {len(x0)} outputs to match input but got {x}")
        result = x[0] if len(x) == 1 else x
        return (result, measure() - start_time) if measure else result
    elif isinstance(iterations, Shape):
        xs = [x0]
        ts = [measure()] if measure else None
        for _ in range(iterations.size):
            x = f(*x[:len(x0)], **f_kwargs)
            x = x if isinstance(x, tuple) else (x,)
            if len(x) < len(x0):
                raise AssertionError(f"Function to iterate must return at least {len(x0)} outputs to match input but got {x}")
            elif len(x) > len(x0):
                xs[0] = xs[0] + (None,) * (len(x) - len(x0))
            xs.append(x)
            if measure:
                ts.append(measure())
        xs = [stack(item[1:] if item[0] is None else item, iterations.with_size(None)) for item in zip(*xs)]
        result = xs[0] if len(xs) == 1 else xs
        ts = np.asarray(ts)
        return (result, wrap(ts[1:] - ts[:-1], iterations.with_size(None))) if measure else result
    else:
        raise ValueError(f"iterations must be an int or Shape but got {type(iterations)}")
def jacobian(f: Callable, wrt: str = None, get_output=True) ‑> Callable

Creates a function which computes the Jacobian matrix of f. For scalar functions, consider using gradient() instead.

Example:

def f(x, y):
    prediction = f(x)
    loss = math.l2_loss(prediction - y)
    return loss, prediction

dx = jacobian(loss_function, wrt='x', get_output=False)(x, y)

(loss, prediction), (dx, dy) = jacobian(loss_function,
                                    wrt='x,y', get_output=True)(x, y)

Functional gradients are implemented for the following backends:

When the gradient function is invoked, f is called with tensors that track the gradient. For PyTorch, arg.requires_grad = True for all positional arguments of f.

Args

f
Function to be differentiated. f must return a floating point Tensor with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if return_values=True. All arguments for which the gradient is computed must be of dtype float or complex.
get_output
Whether the gradient function should also return the return values of f.
wrt
Comma-separated parameter names of f with respect to which the gradient should be computed. If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).

Returns

Function with the same arguments as f that returns the value of f, auxiliary data and Jacobian of f if get_output=True, else just the Jacobian of f.

Expand source code
def jacobian(f: Callable, wrt: str = None, get_output=True) -> Callable:
    """
    Creates a function which computes the Jacobian matrix of `f`.
    For scalar functions, consider using `gradient()` instead.

    Example:
    ```python
    def f(x, y):
        prediction = f(x)
        loss = math.l2_loss(prediction - y)
        return loss, prediction

    dx = jacobian(loss_function, wrt='x', get_output=False)(x, y)

    (loss, prediction), (dx, dy) = jacobian(loss_function,
                                        wrt='x,y', get_output=True)(x, y)
    ```

    Functional gradients are implemented for the following backends:

    * PyTorch: [`torch.autograd.grad`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.grad) / [`torch.autograd.backward`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.backward)
    * TensorFlow: [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape)
    * Jax: [`jax.grad`](https://jax.readthedocs.io/en/latest/jax.html#jax.grad)

    When the gradient function is invoked, `f` is called with tensors that track the gradient.
    For PyTorch, `arg.requires_grad = True` for all positional arguments of `f`.

    Args:
        f: Function to be differentiated.
            `f` must return a floating point `Tensor` with rank zero.
            It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if `return_values=True`.
            All arguments for which the gradient is computed must be of dtype float or complex.
        get_output: Whether the gradient function should also return the return values of `f`.
        wrt: Comma-separated parameter names of `f` with respect to which the gradient should be computed.
            If not specified, the gradient will be computed w.r.t. the first positional argument (highly discouraged).

    Returns:
        Function with the same arguments as `f` that returns the value of `f`, auxiliary data and Jacobian of `f` if `get_output=True`, else just the Jacobian of `f`.
    """
    f_params, wrt = simplify_wrt(f, wrt)
    return GradientFunction(f, f_params, wrt, get_output, is_f_scalar=False)
def jit_compile(f: Callable = None, auxiliary_args: str = '', forget_traces: bool = None) ‑> Callable

Compiles a graph based on the function f. The graph compilation is performed just-in-time (jit), e.g. when the returned function is called for the first time.

The traced function will compute the same result as f but may run much faster. Some checks may be disabled in the compiled function.

Can be used as a decorator:

@math.jit_compile
def my_function(x: math.Tensor) -> math.Tensor:

Invoking the returned function may invoke re-tracing / re-compiling f after the first call if either

  • it is called with a different number of arguments,
  • the tensor arguments have different dimension names or types (the dimension order also counts),
  • any Tensor arguments require a different backend than previous invocations,
  • PhiTreeNode positional arguments do not match in non-variable properties.

Compilation is implemented for the following backends:

Jit-compilations cannot be nested, i.e. you cannot call jit_compile() while another function is being compiled. An exception to this is jit_compile_linear() which can be called from within a jit-compiled function.

See Also: jit_compile_linear()

Args

f
Function to be traced. All positional arguments must be of type Tensor or PhiTreeNode returning a single Tensor or PhiTreeNode.
auxiliary_args
Comma-separated parameter names of arguments that are not relevant to backpropagation.
forget_traces
If True, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces.

Returns

Function with similar signature and return values as f.

Expand source code
def jit_compile(f: Callable = None, auxiliary_args: str = '', forget_traces: bool = None) -> Callable:
    """
    Compiles a graph based on the function `f`.
    The graph compilation is performed just-in-time (jit), e.g. when the returned function is called for the first time.

    The traced function will compute the same result as `f` but may run much faster.
    Some checks may be disabled in the compiled function.

    Can be used as a decorator:
    ```python
    @math.jit_compile
    def my_function(x: math.Tensor) -> math.Tensor:
    ```

    Invoking the returned function may invoke re-tracing / re-compiling `f` after the first call if either

    * it is called with a different number of arguments,
    * the tensor arguments have different dimension names or types (the dimension order also counts),
    * any `Tensor` arguments require a different backend than previous invocations,
    * `phiml.math.magic.PhiTreeNode` positional arguments do not match in non-variable properties.

    Compilation is implemented for the following backends:

    * PyTorch: [`torch.jit.trace`](https://pytorch.org/docs/stable/jit.html)
    * TensorFlow: [`tf.function`](https://www.tensorflow.org/guide/function)
    * Jax: [`jax.jit`](https://jax.readthedocs.io/en/latest/notebooks/quickstart.html#using-jit-to-speed-up-functions)

    Jit-compilations cannot be nested, i.e. you cannot call `jit_compile()` while another function is being compiled.
    An exception to this is `jit_compile_linear()` which can be called from within a jit-compiled function.

    See Also:
        `jit_compile_linear()`

    Args:
        f: Function to be traced.
            All positional arguments must be of type `Tensor` or `phiml.math.magic.PhiTreeNode` returning a single `Tensor` or `phiml.math.magic.PhiTreeNode`.
        auxiliary_args: Comma-separated parameter names of arguments that are not relevant to backpropagation.
        forget_traces: If `True`, only remembers the most recent compiled instance of this function.
            Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces.

    Returns:
        Function with similar signature and return values as `f`.
    """
    if f is None:
        kwargs = {k: v for k, v in locals().items() if v is not None}
        return partial(jit_compile, **kwargs)
    auxiliary_args = set(s.strip() for s in auxiliary_args.split(',') if s.strip())
    return f if isinstance(f, (JitFunction, LinearFunction)) and f.auxiliary_args == auxiliary_args else JitFunction(f, auxiliary_args, forget_traces or False)
def jit_compile_linear(f: Callable[[~X], ~Y] = None, auxiliary_args: str = None, forget_traces: bool = None) ‑> phiml.math._functional.LinearFunction[~X, ~Y]

Compile an optimized representation of the linear function f. For backends that support sparse tensors, a sparse matrix will be constructed for f.

Can be used as a decorator:

@math.jit_compile_linear
def my_linear_function(x: math.Tensor) -> math.Tensor:

Unlike jit_compile(), jit_compile_linear() can be called during a regular jit compilation.

See Also: jit_compile()

Args

f
Function that is linear in its positional arguments. All positional arguments must be of type Tensor and f must return a Tensor.
auxiliary_args
Which parameters f is not linear in. These arguments are treated as conditioning arguments and will cause re-tracing on change.
forget_traces
If True, only remembers the most recent compiled instance of this function. Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces.

Returns

LinearFunction with similar signature and return values as f.

Expand source code
def jit_compile_linear(f: Callable[[X], Y] = None, auxiliary_args: str = None, forget_traces: bool = None) -> 'LinearFunction[X, Y]':
    """
    Compile an optimized representation of the linear function `f`.
    For backends that support sparse tensors, a sparse matrix will be constructed for `f`.

    Can be used as a decorator:
    ```python
    @math.jit_compile_linear
    def my_linear_function(x: math.Tensor) -> math.Tensor:
    ```

    Unlike `jit_compile()`, `jit_compile_linear()` can be called during a regular jit compilation.

    See Also:
        `jit_compile()`

    Args:
        f: Function that is linear in its positional arguments.
            All positional arguments must be of type `Tensor` and `f` must return a `Tensor`.
        auxiliary_args: Which parameters `f` is not linear in. These arguments are treated as conditioning arguments and will cause re-tracing on change.
        forget_traces: If `True`, only remembers the most recent compiled instance of this function.
            Upon tracing with new instance (due to changed shapes or auxiliary args), deletes the previous traces.

    Returns:
        `LinearFunction` with similar signature and return values as `f`.
    """
    if f is None:
        kwargs = {k: v for k, v in locals().items() if v is not None}
        return partial(jit_compile_linear, **kwargs)
    if isinstance(f, JitFunction):
        f = f.f  # cannot trace linear function from jitted version
    if isinstance(auxiliary_args, str):
        auxiliary_args = set(s.strip() for s in auxiliary_args.split(',') if s.strip())
    else:
        assert auxiliary_args is None
        f_params = function_parameters(f)
        auxiliary_args = f_params[1:]
    return f if isinstance(f, LinearFunction) and f.auxiliary_args == auxiliary_args else LinearFunction(f, auxiliary_args, forget_traces or False)
def l1_loss(x, reduce: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Computes i ||xi||1, summing over all non-batch dimensions.

Args

x
Tensor or PhiTreeNode or 0D or 1D native tensor. For PhiTreeNode objects, only value the sum over all value attributes is computed.
reduce
Dimensions to reduce as DimFilter.

Returns

loss
Tensor
Expand source code
def l1_loss(x, reduce: DimFilter = math.non_batch) -> Tensor:
    """
    Computes *∑<sub>i</sub> ||x<sub>i</sub>||<sub>1</sub>*, summing over all non-batch dimensions.

    Args:
        x: `Tensor` or `phiml.math.magic.PhiTreeNode` or 0D or 1D native tensor.
            For `phiml.math.magic.PhiTreeNode` objects, only value the sum over all value attributes is computed.
        reduce: Dimensions to reduce as `DimFilter`.

    Returns:
        loss: `Tensor`
    """
    if isinstance(x, Tensor):
        return math.sum_(abs(x), reduce)
    elif isinstance(x, PhiTreeNode):
        return sum([l1_loss(getattr(x, a), reduce) for a in variable_values(x)])
    else:
        try:
            backend = math.choose_backend(x)
            shape = backend.staticshape(x)
            if len(shape) == 0:
                return abs(x)
            elif len(shape) == 1:
                return backend.sum(abs(x))
            else:
                raise ValueError("l2_loss is only defined for 0D and 1D native tensors. For higher-dimensional data, use Φ-ML tensors.")
        except math.NoBackendFound:
            raise ValueError(x)
def l2_loss(x, reduce: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Computes i ||xi||22 / 2, summing over all non-batch dimensions.

Args

x
Tensor or PhiTreeNode or 0D or 1D native tensor. For PhiTreeNode objects, only value the sum over all value attributes is computed.
reduce
Dimensions to reduce as DimFilter.

Returns

loss
Tensor
Expand source code
def l2_loss(x, reduce: DimFilter = math.non_batch) -> Tensor:
    """
    Computes *∑<sub>i</sub> ||x<sub>i</sub>||<sub>2</sub><sup>2</sup> / 2*, summing over all non-batch dimensions.

    Args:
        x: `Tensor` or `phiml.math.magic.PhiTreeNode` or 0D or 1D native tensor.
            For `phiml.math.magic.PhiTreeNode` objects, only value the sum over all value attributes is computed.
        reduce: Dimensions to reduce as `DimFilter`.

    Returns:
        loss: `Tensor`
    """
    if isinstance(x, Tensor):
        if x.dtype.kind == complex:
            x = abs(x)
        return math.sum_(x ** 2, reduce) * 0.5
    elif isinstance(x, PhiTreeNode):
        return sum([l2_loss(getattr(x, a), reduce) for a in variable_values(x)])
    else:
        try:
            backend = math.choose_backend(x)
            shape = backend.staticshape(x)
            if len(shape) == 0:
                return x ** 2 * 0.5
            elif len(shape) == 1:
                return backend.sum(x ** 2) * 0.5
            else:
                raise ValueError("l2_loss is only defined for 0D and 1D native tensors. For higher-dimensional data, use Φ-ML tensors.")
        except math.NoBackendFound:
            raise ValueError(x)
def laplace(x: phiml.math._tensors.Tensor, dx: Union[float, phiml.math._tensors.Tensor] = 1, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = zero-gradient, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, weights: phiml.math._tensors.Tensor = None)

Spatial Laplace operator as defined for scalar fields. If a vector field is passed, the laplace is computed component-wise.

Args

x
n-dimensional field of shape (batch, spacial dimensions…, components)
dx
scalar or 1d tensor
padding
Padding mode. Must be one of the following: Extrapolation, Tensor or number for constant extrapolation, name of extrapolation as str.
dims
The second derivative along these dimensions is summed over
weights
(Optional) Multiply the axis terms by these factors before summation. Must be a Tensor with a single channel dimension that lists all laplace dims by name.

Returns

Tensor of same shape as x

Expand source code
def laplace(x: Tensor,
            dx: Union[Tensor, float] = 1,
            padding: Union[Extrapolation, float, Tensor, str, None] = extrapolation.BOUNDARY,
            dims: DimFilter = spatial,
            weights: Tensor = None):
    """
    Spatial Laplace operator as defined for scalar fields.
    If a vector field is passed, the laplace is computed component-wise.

    Args:
        x: n-dimensional field of shape (batch, spacial dimensions..., components)
        dx: scalar or 1d tensor
        padding: Padding mode.
            Must be one of the following: `Extrapolation`, `Tensor` or number for constant extrapolation, name of extrapolation as `str`.
        dims: The second derivative along these dimensions is summed over
        weights: (Optional) Multiply the axis terms by these factors before summation.
            Must be a Tensor with a single channel dimension that lists all laplace dims by name.

    Returns:
        `phiml.math.Tensor` of same shape as `x`
    """
    if isinstance(dx, (tuple, list)):
        dx = wrap(dx, batch('_laplace'))
    elif isinstance(dx, Tensor) and dx.vector.exists:
        dx = rename_dims(dx, 'vector', batch('_laplace'))
    if isinstance(x, Extrapolation):
        return x.spatial_gradient()
    left, center, right = shift(wrap(x), (-1, 0, 1), dims, padding, stack_dim=batch('_laplace'))
    result = (left + right - 2 * center) / (dx ** 2)
    if weights is not None:
        dim_names = x.shape.only(dims).names
        assert channel(weights).rank == 1 and channel(weights).item_names is not None, f"weights must have one channel dimension listing the laplace dims but got {shape(weights)}"
        assert set(channel(weights).item_names[0]) >= set(dim_names), f"the channel dim of weights must contain all laplace dims {dim_names} but only has {channel(weights).item_names}"
        result *= rename_dims(weights, channel, batch('_laplace'))
    result = math.sum_(result, '_laplace')
    return result
def layout(objects, *shape: phiml.math._shape.Shape) ‑> phiml.math._tensors.Tensor

Wraps a Python tree in a Tensor, allowing elements to be accessed via dimensions. A python tree is a structure of nested tuple, list, dict and leaf objects where leaves can be any Python object.

All keys of dict containers must be of type str. The keys are automatically assigned as item names along that dimension unless conflicting with other elements.

Strings may also be used as containers.

Example:

>>> t = layout({'a': 'text', 'b': [0, 1]}, channel('dict,inner'))
>>> t.inner[1].dict['a'].native()
'e'

See Also: tensor(), wrap().

Args

objects
PyTree of list or tuple.
*shape
Tensor dimensions

Returns

Tensor. Calling Tensor.native() on the returned tensor will return objects.

Expand source code
def layout(objects, *shape: Shape) -> Tensor:
    """
    Wraps a Python tree in a `Tensor`, allowing elements to be accessed via dimensions.
    A python tree is a structure of nested `tuple`, `list`, `dict` and *leaf* objects where leaves can be any Python object.

    All keys of `dict` containers must be of type `str`.
    The keys are automatically assigned as item names along that dimension unless conflicting with other elements.

    Strings may also be used as containers.

    Example:
    >>> t = layout({'a': 'text', 'b': [0, 1]}, channel('dict,inner'))
    >>> t.inner[1].dict['a'].native()
    'e'

    See Also:
        `tensor()`, `wrap()`.

    Args:
        objects: PyTree of `list` or `tuple`.
        *shape: Tensor dimensions

    Returns:
        `Tensor`.
        Calling `Tensor.native()` on the returned tensor will return `objects`.
    """
    assert all(isinstance(s, Shape) for s in shape), f"shape needs to be one or multiple Shape instances but got {shape}"
    shape = EMPTY_SHAPE if len(shape) == 0 else concat_shapes(*shape)
    if isinstance(objects, Layout):
        assert objects.shape == shape
        return objects

    if not shape.well_defined:

        def recursive_determine_shape(native, shape: Shape):
            if not shape:
                return shape
            if isinstance(native, dict):
                assert all([isinstance(k, str) for k in native.keys()]), f"All dict keys in PyTrees must be str but got {tuple(native.keys())}"
                shape = shape.replace(shape[0], shape[0].with_size(tuple(native.keys())))
            if shape.rank == 1:
                return shape.with_sizes((len(native),))
            inner_shape = shape[1:]
            if isinstance(native, (tuple, list)):
                inner_shapes = [recursive_determine_shape(n, inner_shape) for n in native]
            elif isinstance(native, dict):
                inner_shapes = [recursive_determine_shape(n, inner_shape) for n in native.values()]
            else:
                raise ValueError(native)
            return shape_stack(shape[0], *inner_shapes)

        shape = recursive_determine_shape(objects, shape)

    return Layout(objects, shape)
    # if shape.volume == 1:
    #     objects = np.asarray(objects, dtype=object)
    #
    # if isinstance(objects, (tuple, list)):
    #     objects = np.asarray(objects, dtype=object)
    # if isinstance(objects, np.ndarray) and objects.dtype == object:
    #     return Layout(objects, shape)
    # else:
    #     assert shape.volume == 1, f"Cannot layout object of type {objects} along {shape}, a tuple, list or object array is required."
def linspace(start: Union[float, phiml.math._tensors.Tensor, tuple, list], stop: Union[float, phiml.math._tensors.Tensor, tuple, list], dim: phiml.math._shape.Shape) ‑> phiml.math._tensors.Tensor

Returns number evenly spaced numbers between start and stop along dim.

If dim contains multiple dimensions, evenly spaces values along each dimension, then stacks the result along a new channel dimension called vector.

See Also: arange(), meshgrid().

Args

start
First value, int or Tensor.
stop
Last value, int or Tensor.
dim
Linspace dimension of integer size. The size determines how many values to linearly space between start and stop. The values will be laid out along dim.

Returns

Tensor

Examples

>>> math.linspace(0, 1, spatial(x=5))
(0.000, 0.250, 0.500, 0.750, 1.000) along xˢ
>>> math.linspace(0, (-1, 1), spatial(x=3))
(0.000, 0.000); (-0.500, 0.500); (-1.000, 1.000) (xˢ=3, vectorᶜ=2)
Expand source code
def linspace(start: Union[float, Tensor, tuple, list], stop: Union[float, Tensor, tuple, list], dim: Shape) -> Tensor:
    """
    Returns `number` evenly spaced numbers between `start` and `stop` along `dim`.

    If `dim` contains multiple dimensions, evenly spaces values along each dimension, then stacks the result along a new channel dimension called `vector`.

    See Also:
        `arange()`, `meshgrid()`.

    Args:
        start: First value, `int` or `Tensor`.
        stop: Last value, `int` or `Tensor`.
        dim: Linspace dimension of integer size.
            The size determines how many values to linearly space between `start` and `stop`.
            The values will be laid out along `dim`.

    Returns:
        `Tensor`

    Examples:
        >>> math.linspace(0, 1, spatial(x=5))
        (0.000, 0.250, 0.500, 0.750, 1.000) along xˢ

        >>> math.linspace(0, (-1, 1), spatial(x=3))
        (0.000, 0.000); (-0.500, 0.500); (-1.000, 1.000) (xˢ=3, vectorᶜ=2)
    """
    assert isinstance(dim, Shape), f"dim must be a Shape but got {dim}"
    assert dim.is_uniform, f"dim must be uniform but got {dim}"
    start = wrap(start)
    stop = wrap(stop)
    if dim.rank > 1:
        return meshgrid(dim) / (dim - 1) * (stop - start) + start
    if is_scalar(start) and is_scalar(stop):
        start = start.native()
        stop = stop.native()
        native_linspace = choose_backend(start, stop, prefer_default=True).linspace(start, stop, dim.size)
        return NativeTensor(native_linspace, dim)
    else:
        from ._functional import map_
        return map_(linspace, start, stop, dim=dim)
def log(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes the natural logarithm of the Tensor or PhiTreeNode x.

Expand source code
def log(x: TensorOrTree) -> TensorOrTree:
    """ Computes the natural logarithm of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.log)
def log10(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes log(x) of the Tensor or PhiTreeNode x with base 10.

Expand source code
def log10(x: TensorOrTree) -> TensorOrTree:
    """ Computes *log(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x` with base 10. """
    return _backend_op1(x, Backend.log10)
def log2(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes log(x) of the Tensor or PhiTreeNode x with base 2.

Expand source code
def log2(x: TensorOrTree) -> TensorOrTree:
    """ Computes *log(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x` with base 2. """
    return _backend_op1(x, Backend.log2)
def log_gamma(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes log(gamma(x)) of the Tensor or PhiTreeNode x.

Expand source code
def log_gamma(x: TensorOrTree) -> TensorOrTree:
    """ Computes *log(gamma(x))* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.log_gamma)
def map(function, *args, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function shape>, range=builtins.range, unwrap_scalars=True, **kwargs) ‑> Union[None, phiml.math._tensors.Tensor, Tuple[Optional[phiml.math._tensors.Tensor]]]

Calls function on slices of the arguments and returns the stacked result.

Args

function
Function to be called on slices of args and kwargs. Must return one or multiple values that can be stacked. None may be returned but if any return value is None, all calls to function must return None in that position.
*args
Positional arguments for function. Values that are Sliceable will be sliced along dims.
**kwargs
Keyword arguments for function. Values that are Sliceable will be sliced along dims.
dims
Dimensions which should be sliced. function is called once for each element in dims, i.e. dims.volume times. If dims is not specified, all dimensions from the Sliceable values in args and kwargs will be mapped. Pass object to map only objects, not tensors of primitives (dtype.kind == object). This will select only layout()-type dimensions.
range
Optional range function. Can be used to generate tqdm output by passing trange.
unwrap_scalars
If True, passes the contents of scalar Tensors instead of the tensor objects.

Returns

Tensor of same shape as value.

Expand source code
def map_(function, *args, dims: DimFilter = shape, range=range, unwrap_scalars=True, **kwargs) -> Union[None, Tensor, Tuple[Optional[Tensor]]]:
    """
    Calls `function` on slices of the arguments and returns the stacked result.

    Args:
        function: Function to be called on slices of `args` and `kwargs`.
            Must return one or multiple values that can be stacked.
            `None` may be returned but if any return value is `None`, all calls to `function` must return `None` in that position.
        *args: Positional arguments for `function`.
            Values that are `phiml.math.magic.Sliceable` will be sliced along `dims`.
        **kwargs: Keyword arguments for `function`.
            Values that are `phiml.math.magic.Sliceable` will be sliced along `dims`.
        dims: Dimensions which should be sliced.
            `function` is called once for each element in `dims`, i.e. `dims.volume` times.
            If `dims` is not specified, all dimensions from the `phiml.math.magic.Sliceable` values in `args` and `kwargs` will be mapped.
            Pass `object` to map only objects, not tensors of primitives (`dtype.kind == object`). This will select only `layout`-type dimensions.
        range: Optional range function. Can be used to generate `tqdm` output by passing `trange`.
        unwrap_scalars: If `True`, passes the contents of scalar `Tensor`s instead of the tensor objects.

    Returns:
        `Tensor` of same shape as `value`.
    """
    sliceable_args = [v for v in args if isinstance(v, Shapable)]
    sliceable_kwargs = {k: v for k, v in kwargs.items() if isinstance(v, Shapable)}
    extra_args = [v for v in args if not isinstance(v, Shapable)]
    extra_kwargs = {k: v for k, v in kwargs.items() if not isinstance(v, Shapable)}
    if dims is object:
        dims_ = merge_shapes(*[object_dims(a) for a in sliceable_args], *sliceable_kwargs.values(), allow_varying_sizes=True)
    else:
        dims_ = merge_shapes(*[shape(a) for a in sliceable_args], *sliceable_kwargs.values(), allow_varying_sizes=True).only(dims)
    assert dims_.well_defined, f"All arguments must have consistent sizes for all mapped dimensions. Trying to map along {dims} but some have varying sizes (marked as None)."
    assert dims_.volume > 0, f"map dims must have volume > 0 but got {dims_}"
    results = []
    for _, idx in zip(range(dims_.volume), dims_.meshgrid()):
        idx_args = [slice_(v, idx) for v in sliceable_args]
        idx_kwargs = {k: slice_(v, idx) for k, v in sliceable_kwargs.items()}
        if unwrap_scalars:
            idx_args = [v.native() if isinstance(v, Tensor) and v.rank == 0 else v for v in idx_args]
            idx_kwargs = {k: v.native() if isinstance(v, Tensor) and v.rank == 0 else v for k, v in idx_kwargs.items()}
        idx_extra_args = list(extra_args)
        idx_all_args = [idx_args.pop(0) if isinstance(v, Shapable) else idx_extra_args.pop(0) for v in args]
        f_output = function(*idx_all_args, **idx_kwargs, **extra_kwargs)
        results.append(f_output)
    if isinstance(results[0], tuple):
        stacked: List[Optional[Tensor]] = []
        for i in range(len(results[0])):
            if any(r[i] is None for r in results):
                assert all(r[i] is None for r in results), f"map function returned None for some elements, {results}"
                stacked.append(None)
            else:
                stacked.append(math.stack([r[i] for r in results], dims_, expand_values=True))
        return tuple(stacked)
    else:
        if any(r is None for r in results):
            assert all(r is None for r in results), f"map function returned None for some elements, {results}"
            return None
        return stack(results, dims_, expand_values=True)
def map_c2b(f: Callable) ‑> Callable

Map channel dimensions to batch dimensions. Short for map_types()(f, instance(), batch()).

Expand source code
def map_c2b(f: Callable) -> Callable:
    """ Map channel dimensions to batch dimensions. Short for `map_types(f, instance, batch)`. """
    return map_types(f, channel, batch)
def map_i2b(f: Callable) ‑> Callable

Map instance dimensions to batch dimensions. Short for map_types()(f, instance(), batch()).

Expand source code
def map_i2b(f: Callable) -> Callable:
    """ Map instance dimensions to batch dimensions. Short for `map_types(f, instance, batch)`. """
    return map_types(f, instance, batch)
def map_pairs(map_function: Callable, values: phiml.math._tensors.Tensor, connections: phiml.math._tensors.Tensor)

Evaluates map_function on all pairs of elements present in the sparsity pattern of connections.

Args

map_function
Function with signature (Tensor, Tensor) -> Tensor.
values
Values to evaluate map_function on. Needs to have a spatial or instance dimension but must not have a dual dimension.
connections
Sparse tensor.

Returns

Tensor with the sparse dimensions of connections and all non-instance dimensions returned by map_function.

Expand source code
def map_pairs(map_function: Callable, values: Tensor, connections: Tensor):
    """
    Evaluates `map_function` on all pairs of elements present in the sparsity pattern of `connections`.

    Args:
        map_function: Function with signature `(Tensor, Tensor) -> Tensor`.
        values: Values to evaluate `map_function` on.
            Needs to have a spatial or instance dimension but must not have a dual dimension.
        connections: Sparse tensor.

    Returns:
        `Tensor` with the sparse dimensions of `connections` and all non-instance dimensions returned by `map_function`.
    """
    assert dual(values).is_empty, f"values must not have a dual dimension but got {values.shape}"
    indices = stored_indices(connections, invalid='clamp')
    origin_dim, neighbors_dim = channel(indices).item_names[0]
    if origin_dim not in values.shape:
        origin_dim, neighbors_dim = neighbors_dim, origin_dim
    assert origin_dim in values.shape, f"No dimension of connections {connections.shape} is present in values {values.shape}"
    origin = values[{origin_dim: indices[origin_dim]}]
    target = values[{origin_dim: indices[neighbors_dim]}]
    result = map_function(origin, target)
    return tensor_like(connections, result, value_order='as existing')
def map_s2b(f: Callable) ‑> Callable

Map spatial dimensions to batch dimensions. Short for map_types()(f, spatial(), batch()).

Expand source code
def map_s2b(f: Callable) -> Callable:
    """ Map spatial dimensions to batch dimensions. Short for `map_types(f, spatial, batch)`. """
    return map_types(f, spatial, batch)
def map_types(f: Callable, dims: Union[phiml.math._shape.Shape, tuple, list, str, Callable], dim_type: Union[str, Callable]) ‑> Callable

Wraps a function to change the dimension types of its Tensor and PhiTreeNode arguments.

Args

f
Function to wrap.
dims
Concrete dimensions or dimension type, such as spatial() or batch(). These dimensions will be mapped to dim_type for all positional function arguments.
dim_type
Dimension type, such as spatial() or batch(). f will be called with dimensions remapped to this type.

Returns

Function with signature matching f.

Expand source code
def map_types(f: Callable, dims: Union[Shape, tuple, list, str, Callable], dim_type: Union[Callable, str]) -> Callable:
    """
    Wraps a function to change the dimension types of its `Tensor` and `phiml.math.magic.PhiTreeNode` arguments.

    Args:
        f: Function to wrap.
        dims: Concrete dimensions or dimension type, such as `spatial` or `batch`.
            These dimensions will be mapped to `dim_type` for all positional function arguments.
        dim_type: Dimension type, such as `spatial` or `batch`.
            `f` will be called with dimensions remapped to this type.

    Returns:
        Function with signature matching `f`.
    """

    def forward_retype(obj, input_types: Shape):
        tree, tensors = disassemble_tree(obj, cache=False)
        retyped = []
        for t in tensors:
            for dim in t.shape.only(dims):
                t = t.dimension(dim).as_type(dim_type)
                input_types = math.merge_shapes(input_types, dim.with_size(None))
            retyped.append(t)
        return assemble_tree(tree, retyped), input_types

    def reverse_retype(obj, input_types: Shape):
        tree, tensors = disassemble_tree(obj, cache=False)
        retyped = []
        for t in tensors:
            for dim in t.shape.only(input_types.names):
                t = t.dimension(dim).as_type(input_types.get_type(dim))
            retyped.append(t)
        return assemble_tree(tree, retyped)

    @wraps(f)
    def retyped_f(*args, **kwargs):
        input_types = EMPTY_SHAPE
        retyped_args = []
        for arg in args:
            retyped_arg, input_types = forward_retype(arg, input_types)
            retyped_args.append(retyped_arg)
        output = f(*retyped_args, **kwargs)
        restored_output = reverse_retype(output, input_types)
        return restored_output

    return retyped_f
def masked_fill(values: phiml.math._tensors.Tensor, valid: phiml.math._tensors.Tensor, distance: int = 1) ‑> Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor]

Extrapolates the values of values which are marked by the nonzero values of valid for distance steps in all spatial directions. Overlapping extrapolated values get averaged. Extrapolation also includes diagonals.

Args

values
Tensor which holds the values for extrapolation
valid
Tensor with same size as x marking the values for extrapolation with nonzero values
distance
Number of extrapolation steps

Returns

values
Extrapolation result
valid
mask marking all valid values after extrapolation
Expand source code
def masked_fill(values: Tensor, valid: Tensor, distance: int = 1) -> Tuple[Tensor, Tensor]:
    """
    Extrapolates the values of `values` which are marked by the nonzero values of `valid` for `distance` steps in all spatial directions.
    Overlapping extrapolated values get averaged. Extrapolation also includes diagonals.

    Args:
        values: Tensor which holds the values for extrapolation
        valid: Tensor with same size as `x` marking the values for extrapolation with nonzero values
        distance: Number of extrapolation steps

    Returns:
        values: Extrapolation result
        valid: mask marking all valid values after extrapolation
    """
    def binarize(x):
        return math.safe_div(x, x)
    distance = min(distance, max(values.shape.sizes))
    for _ in range(distance):
        valid = binarize(valid)
        valid_values = valid * values
        overlap = valid  # count how many values we are adding
        for dim in values.shape.spatial.names:
            values_l, values_r = shift(valid_values, (-1, 1), dims=dim, padding=extrapolation.ZERO)
            valid_values = math.sum_(values_l + values_r + valid_values, dim='shift')
            mask_l, mask_r = shift(overlap, (-1, 1), dims=dim, padding=extrapolation.ZERO)
            overlap = math.sum_(mask_l + mask_r + overlap, dim='shift')
        extp = math.safe_div(valid_values, overlap)  # take mean where extrapolated values overlap
        values = math.where(valid, values, math.where(binarize(overlap), extp, values))
        valid = overlap
    return values, binarize(valid)
def matrix_from_function(f: Callable, *args, auxiliary_args=None, auto_compress=False, sparsify_batch=None, separate_independent=False, **kwargs) ‑> Tuple[phiml.math._tensors.Tensor, phiml.math._tensors.Tensor]

Trace a linear function and construct a matrix. Depending on the functional form of f, the returned matrix may be dense or sparse.

Args

f
Function to trace.
*args
Arguments for f.
auxiliary_args
Arguments in which the function is not linear. These parameters are not traced but passed on as given in args and kwargs.
auto_compress
If True, returns a compressed matrix if supported by the backend.
sparsify_batch
If False, the matrix will be batched. If True, will create dual dimensions for the involved batch dimensions. This will result in one large matrix instead of a batch of matrices.
**kwargs
Keyword arguments for f.

Returns

matrix
Matrix representing the linear dependency of the output f on the input of f. Input dimensions will be dual() dimensions of the matrix while output dimensions will be regular.
bias
Bias for affine functions or zero-vector if the function is purely linear.
Expand source code
def matrix_from_function(f: Callable,
                         *args,
                         auxiliary_args=None,
                         auto_compress=False,
                         sparsify_batch=None,
                         separate_independent=False,  # not fully implemented, requires auto_compress=False
                         _return_raw_output=False,
                         **kwargs) -> Tuple[Tensor, Tensor]:
    """
    Trace a linear function and construct a matrix.
    Depending on the functional form of `f`, the returned matrix may be dense or sparse.

    Args:
        f: Function to trace.
        *args: Arguments for `f`.
        auxiliary_args: Arguments in which the function is not linear.
            These parameters are not traced but passed on as given in `args` and `kwargs`.
        auto_compress: If `True`, returns a compressed matrix if supported by the backend.
        sparsify_batch: If `False`, the matrix will be batched.
            If `True`, will create dual dimensions for the involved batch dimensions.
            This will result in one large matrix instead of a batch of matrices.
        **kwargs: Keyword arguments for `f`.

    Returns:
        matrix: Matrix representing the linear dependency of the output `f` on the input of `f`.
            Input dimensions will be `dual` dimensions of the matrix while output dimensions will be regular.
        bias: Bias for affine functions or zero-vector if the function is purely linear.
    """
    assert isinstance(auxiliary_args, str) or auxiliary_args is None, f"auxiliary_args must be a comma-separated str but got {auxiliary_args}"
    from ._functional import function_parameters, f_name
    f_params = function_parameters(f)
    aux = set(s.strip() for s in auxiliary_args.split(',') if s.strip()) if isinstance(auxiliary_args, str) else f_params[1:]
    all_args = {**kwargs, **{f_params[i]: v for i, v in enumerate(args)}}
    aux_args = {k: v for k, v in all_args.items() if k in aux}
    trace_args = {k: v for k, v in all_args.items() if k not in aux}
    tree, tensors = disassemble_tree(trace_args, cache=False, attr_type=value_attributes)
    assert len(tensors) == 1, f"Only one input tensor can be traced bot got {tensors}"
    target_backend = choose_backend_t(*tensors)
    # --- Trace function ---
    with NUMPY:
        src = TracerSource(tensors[0].shape, tensors[0].dtype, tuple(trace_args.keys())[0], 0)
        tracer = ShiftLinTracer(src, {EMPTY_SHAPE: math.ones()}, tensors[0].shape, bias=math.zeros(dtype=tensors[0].dtype), renamed={d: d for d in tensors[0].shape.names})
        x_kwargs = assemble_tree(tree, [tracer] + tensors[1:], attr_type=value_attributes)
        result = f(**x_kwargs, **aux_args)
    out_tree, result_tensors = disassemble_tree(result, cache=False, attr_type=value_attributes)
    assert len(result_tensors) == 1, f"Linear function output must be or contain a single Tensor but got {result}"
    # for t in result_tensors[1:]:
    #     assert not t._is_tracer, f"Linear function must only return a single tracer at position 0 but got {result_tensors}"
    tracer = result_tensors[0]._simplify()
    assert tracer._is_tracer, f"Tracing linear function '{f_name(f)}' failed. Make sure only linear operations are used. Output: {tracer.shape}"
    # --- Convert to COO ---
    if sparsify_batch is None:
        if auto_compress:
            sparsify_batch = not target_backend.supports(Backend.csr_matrix_batched)
        else:
            sparsify_batch = not target_backend.supports(Backend.sparse_coo_tensor_batched)
    if isinstance(tracer, SparseLinTracer):
        matrix, bias = tracer._get_matrix(sparsify_batch), tracer._bias
    elif isinstance(tracer, GatherLinTracer):
        matrix, bias = to_sparse_tracer(tracer, None)._get_matrix(sparsify_batch), tracer._bias
    else:
        matrix, bias = tracer_to_coo(tracer, sparsify_batch, separate_independent)
    # --- Compress ---
    if auto_compress and matrix.default_backend.supports(Backend.mul_csr_dense) and target_backend.supports(Backend.mul_csr_dense) and isinstance(matrix, SparseCoordinateTensor):
        matrix = matrix.compress_rows()
    # elif backend.supports(Backend.mul_csc_dense):
    #     return matrix.compress_cols(), tracer._bias
    return (matrix, bias, (out_tree, result_tensors)) if _return_raw_output else (matrix, bias)
def max(value: Union[phiml.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Determines the maximum value of values along the specified dimensions.

Args

value
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors

Returns

Tensor without the reduced dimensions.

Expand source code
def max_(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor:
    """
    Determines the maximum value of `values` along the specified dimensions.

    Args:
        value: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

    Returns:
        `Tensor` without the reduced dimensions.
    """
    return reduce_(_max, value, dim)
def maximum(x: Union[float, phiml.math._tensors.Tensor], y: Union[float, phiml.math._tensors.Tensor])

Computes the element-wise maximum of x and y.

Expand source code
def maximum(x: Union[Tensor, float], y: Union[Tensor, float]):
    """ Computes the element-wise maximum of `x` and `y`. """
    return custom_op2(x, y, maximum, lambda x_, y_: choose_backend(x_, y_).maximum(x_, y_), op_name='maximum')
def mean(value: Union[phiml.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Computes the mean over values along the specified dimensions.

Args

value
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors

Returns

Tensor without the reduced dimensions.

Expand source code
def mean(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor:
    """
    Computes the mean over `values` along the specified dimensions.

    Args:
        value: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

    Returns:
        `Tensor` without the reduced dimensions.
    """
    return reduce_(_mean, value, dim)
def median(value, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>)

Reduces dim of value by picking the median value. For odd dimension sizes (ambigous choice), the linear average of the two median values is computed.

Currently implemented via quantile().

Args

value
Tensor
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors

Returns

Tensor

Expand source code
def median(value, dim: DimFilter = non_batch):
    """
    Reduces `dim` of `value` by picking the median value.
    For odd dimension sizes (ambigous choice), the linear average of the two median values is computed.

    Currently implemented via `quantile()`.

    Args:
        value: `Tensor`
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

    Returns:
        `Tensor`
    """
    return reduce_(_median, value, dim)
def merge_shapes(*objs: Union[phiml.math._shape.Shape, Any], order=(<function batch>, <function dual>, <function instance>, <function spatial>, <function channel>), allow_varying_sizes=False)

Combines shapes into a single Shape, grouping dimensions by type. If dimensions with equal names are present in multiple shapes, their types and sizes must match.

The shorthand shape1 & shape2 merges shapes with check_exact=[spatial].

See Also: concat_shapes().

Args

*objs
Shape or Shaped objects to combine.
order
Dimension type order as tuple of type filters (channel(), batch(), spatial() or instance()). Dimensions are grouped by type while merging.

Returns

Merged Shape

Raises

IncompatibleShapes if the shapes are not compatible

Expand source code
def merge_shapes(*objs: Union[Shape, Any], order=(batch, dual, instance, spatial, channel), allow_varying_sizes=False):
    """
    Combines `shapes` into a single `Shape`, grouping dimensions by type.
    If dimensions with equal names are present in multiple shapes, their types and sizes must match.

    The shorthand `shape1 & shape2` merges shapes with `check_exact=[spatial]`.

    See Also:
        `concat_shapes()`.

    Args:
        *objs: `Shape` or `Shaped` objects to combine.
        order: Dimension type order as `tuple` of type filters (`channel`, `batch`, `spatial` or `instance`). Dimensions are grouped by type while merging.

    Returns:
        Merged `Shape`

    Raises:
        IncompatibleShapes if the shapes are not compatible
    """
    if not objs:
        return EMPTY_SHAPE
    shapes = [obj if isinstance(obj, Shape) else shape(obj) for obj in objs]
    merged = []
    for dim_type in order:
        type_group = dim_type(shapes[0])
        for sh in shapes[1:]:
            sh = dim_type(sh)
            for dim in sh:
                if dim not in type_group:
                    type_group = type_group._expand(dim, pos=-1)
                else:  # check size match
                    sizes_match = _size_equal(dim.size, type_group.get_size(dim.name))
                    if allow_varying_sizes:
                        if not sizes_match:
                            type_group = type_group.with_dim_size(dim, None)
                    else:
                        if not sizes_match:
                            raise IncompatibleShapes(f"Cannot merge shapes {shapes} because dimension '{dim.name}' exists with different sizes.", *shapes)
                        names1 = type_group.get_item_names(dim)
                        names2 = sh.get_item_names(dim)
                        if names1 is not None and names2 is not None and len(names1) > 1:
                            if names1 != names2:
                                if set(names1) == set(names2):
                                    raise IncompatibleShapes(f"Inconsistent component order on {dim.name}: '{','.join(names1)}' vs '{','.join(names2)}' in dimension '{dim.name}'. Failed to merge shapes {shapes}", *shapes)
                                else:
                                    raise IncompatibleShapes(f"Cannot merge shapes {shapes} because dimension '{dim.name}' exists with different item names.", *shapes)
                        elif names1 is None and names2 is not None:
                            type_group = type_group._with_item_name(dim, tuple(names2))
        merged.append(type_group)
    return concat_shapes(*merged)
def meshgrid(dims: Union[Callable, phiml.math._shape.Shape] = <function spatial>, stack_dim=(vectorᶜ=None), **dimensions: Union[int, phiml.math._tensors.Tensor, tuple, list, Any]) ‑> phiml.math._tensors.Tensor

Generate a mesh-grid Tensor from keyword dimensions.

Args

**dimensions
Mesh-grid dimensions, mapping names to values. Values may be int, 1D Tensor or 1D native tensor.
dims
Dimension type of mesh-grid dimensions, one of spatial(), channel(), batch(), instance().
stack_dim
Channel dim along which grids are stacked. This is optional for 1D mesh-grids. In that case returns a Tensor without a stack dim if None or an empty Shape is passed.

Returns

Mesh-grid Tensor with the dimensions of dims / dimensions and stack_dim.

Examples

>>> math.meshgrid(x=2, y=2)
(xˢ=2, yˢ=2, vectorᶜ=x,y) 0.500 ± 0.500 (0e+00...1e+00)
>>> math.meshgrid(x=2, y=(-1, 1))
(xˢ=2, yˢ=2, vectorᶜ=x,y) 0.250 ± 0.829 (-1e+00...1e+00)
>>> math.meshgrid(x=2, stack_dim=None)
(0, 1) along xˢ
Expand source code
def meshgrid(dims: Union[Callable, Shape] = spatial, stack_dim=channel('vector'), **dimensions: Union[int, Tensor, tuple, list, Any]) -> Tensor:
    """
    Generate a mesh-grid `Tensor` from keyword dimensions.

    Args:
        **dimensions: Mesh-grid dimensions, mapping names to values.
            Values may be `int`, 1D `Tensor` or 1D native tensor.
        dims: Dimension type of mesh-grid dimensions, one of `spatial`, `channel`, `batch`, `instance`.
        stack_dim: Channel dim along which grids are stacked.
            This is optional for 1D mesh-grids. In that case returns a `Tensor` without a stack dim if `None` or an empty `Shape` is passed.

    Returns:
        Mesh-grid `Tensor` with the dimensions of `dims` / `dimensions` and `stack_dim`.

    Examples:
        >>> math.meshgrid(x=2, y=2)
        (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.500 ± 0.500 (0e+00...1e+00)

        >>> math.meshgrid(x=2, y=(-1, 1))
        (xˢ=2, yˢ=2, vectorᶜ=x,y) 0.250 ± 0.829 (-1e+00...1e+00)

        >>> math.meshgrid(x=2, stack_dim=None)
        (0, 1) along xˢ
    """
    assert 'dim_type' not in dimensions, f"dim_type has been renamed to dims"
    assert not stack_dim or stack_dim.name not in dimensions
    if isinstance(dims, Shape):
        assert not dimensions, f"When passing a Shape to meshgrid(), no kwargs are allowed"
        dimensions = {d: s for d, s in zip(dims.names, dims.sizes)}
        grid_shape = dims
        dim_values = [tuple(range(s)) for s in dims.sizes]
    else:
        dim_type = dims
        assert callable(dim_type), f"dims must be a Shape or dimension type but got {dims}"
        dim_values = []
        dim_sizes = []
        for dim, spec in dimensions.items():
            if isinstance(spec, int) or (isinstance(spec, Tensor) and spec.rank == 0 and spec.dtype.kind == int):
                dim_values.append(tuple(range(int(spec))))
                dim_sizes.append(spec)
            elif isinstance(spec, Tensor):
                assert spec.rank == 1, f"Only 1D sequences allowed, got {spec} for dimension '{dim}'."
                dim_values.append(spec.native())
                dim_sizes.append(spec.shape.volume)
            else:
                backend = choose_backend(spec)
                shape = backend.staticshape(spec)
                assert len(shape) == 1, "Only 1D sequences allowed, got {spec} for dimension '{dim}'."
                dim_values.append(spec)
                dim_sizes.append(shape[0])
        grid_shape = dim_type(**{dim: size for dim, size in zip(dimensions.keys(), dim_sizes)})
    backend = choose_backend(*dim_values, prefer_default=True)
    indices_list = backend.meshgrid(*dim_values)
    channels = [NativeTensor(t, grid_shape) for t in indices_list]
    if not stack_dim:
        assert len(channels) == 1, f"meshgrid with multiple dimension requires a valid stack_dim but got {stack_dim}"
        return channels[0]
    if stack_dim.item_names[0] is None:
        stack_dim = stack_dim.with_size(tuple(dimensions.keys()))
    return stack_tensors(channels, stack_dim)
def min(value: Union[phiml.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Determines the minimum value of values along the specified dimensions.

Args

value
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors

Returns

Tensor without the reduced dimensions.

Expand source code
def min_(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor:
    """
    Determines the minimum value of `values` along the specified dimensions.

    Args:
        value: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

    Returns:
        `Tensor` without the reduced dimensions.
    """
    return reduce_(_min, value, dim)
def minimize(f: Callable[[~X], ~Y], solve: phiml.math._optimize.Solve[~X, ~Y]) ‑> ~X

Finds a minimum of the scalar function f(x). The method argument of solve determines which optimizer is used. All optimizers supported by scipy.optimize.minimize are supported, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html . Additionally a gradient descent solver with adaptive step size can be used with method='GD'.

math.minimize() is limited to backends that support jacobian(), i.e. PyTorch, TensorFlow and Jax.

To obtain additional information about the performed solve, use a SolveTape.

See Also: solve_nonlinear().

Args

f
Function whose output is subject to minimization. All positional arguments of f are optimized and must be Tensor or PhiTreeNode. If solve.x0 is a tuple or list, it will be passed to f as varargs, f(*x0). To minimize a subset of the positional arguments, define a new (lambda) function depending only on those. The first return value of f must be a scalar float Tensor or PhiTreeNode.
solve
Solve object to specify method type, parameters and initial guess for x.

Returns

x
solution, the minimum point x.

Raises

NotConverged
If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
If the optimization failed prematurely.
Expand source code
def minimize(f: Callable[[X], Y], solve: Solve[X, Y]) -> X:
    """
    Finds a minimum of the scalar function *f(x)*.
    The `method` argument of `solve` determines which optimizer is used.
    All optimizers supported by `scipy.optimize.minimize` are supported,
    see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html .
    Additionally a gradient descent solver with adaptive step size can be used with `method='GD'`.

    `math.minimize()` is limited to backends that support `jacobian()`, i.e. PyTorch, TensorFlow and Jax.

    To obtain additional information about the performed solve, use a `SolveTape`.

    See Also:
        `solve_nonlinear()`.

    Args:
        f: Function whose output is subject to minimization.
            All positional arguments of `f` are optimized and must be `Tensor` or `phiml.math.magic.PhiTreeNode`.
            If `solve.x0` is a `tuple` or `list`, it will be passed to *f* as varargs, `f(*x0)`.
            To minimize a subset of the positional arguments, define a new (lambda) function depending only on those.
            The first return value of `f` must be a scalar float `Tensor` or `phiml.math.magic.PhiTreeNode`.
        solve: `Solve` object to specify method type, parameters and initial guess for `x`.

    Returns:
        x: solution, the minimum point `x`.

    Raises:
        NotConverged: If the desired accuracy was not be reached within the maximum number of iterations.
        Diverged: If the optimization failed prematurely.
    """
    solve = solve.with_defaults('optimization')
    assert (solve.rel_tol == 0).all, f"rel_tol must be zero for minimize() but got {solve.rel_tol}"
    assert solve.preprocess_y is None, "minimize() does not allow preprocess_y"
    x0_nest, x0_tensors = disassemble_tree(solve.x0, cache=True, attr_type=value_attributes)
    x0_tensors = [to_float(t) for t in x0_tensors]
    backend = choose_backend_t(*x0_tensors, prefer_default=True)
    batch_dims = merge_shapes(*[t.shape for t in x0_tensors]).batch
    x0_natives = []
    x0_native_shapes = []
    for t in x0_tensors:
        t = cached(t)
        if t.shape.is_uniform:
            x0_natives.append(reshaped_native(t, [batch_dims, t.shape.non_batch]))
            x0_native_shapes.append(t.shape.non_batch)
        else:
            for ut in unstack(t, t.shape.shape.without('dims')):
                x0_natives.append(reshaped_native(ut, [batch_dims, ut.shape.non_batch]))
                x0_native_shapes.append(ut.shape.non_batch)
    x0_flat = backend.concat(x0_natives, -1)

    def unflatten_assemble(x_flat, additional_dims: Shape = EMPTY_SHAPE, convert=True):
        partial_tensors = []
        i = 0
        for x0_native, t_shape in zip(x0_natives, x0_native_shapes):
            vol = backend.staticshape(x0_native)[-1]
            flat_native = x_flat[..., i:i + vol]
            partial_tensor = reshaped_tensor(flat_native, [*additional_dims, batch_dims, t_shape], convert=convert)
            partial_tensors.append(partial_tensor)
            i += vol
        # --- assemble non-uniform tensors ---
        x_tensors = []
        for t in x0_tensors:
            if t.shape.is_uniform:
                x_tensors.append(partial_tensors.pop(0))
            else:
                stack_dims = t.shape.shape.without('dims')
                x_tensors.append(stack(partial_tensors[:stack_dims.volume], stack_dims))
                partial_tensors = partial_tensors[stack_dims.volume:]
        x = assemble_tree(x0_nest, x_tensors, attr_type=value_attributes)
        return x

    def native_function(x_flat):
        x = unflatten_assemble(x_flat)
        if isinstance(x, (tuple, list)):
            y = f(*x)
        else:
            y = f(x)
        _, y_tensors = disassemble_tree(y, cache=False)
        loss_tensor = y_tensors[0]
        assert not non_batch(loss_tensor), f"Failed to minimize '{f.__name__}' because it returned a non-scalar output {shape(loss_tensor)}. Reduce all non-batch dimensions, e.g. using math.l2_loss()"
        extra_batch = loss_tensor.shape.without(batch_dims)
        if extra_batch:  # output added more batch dims. We should expand the initial guess
            if extra_batch.volume > 1:
                raise NewBatchDims(loss_tensor.shape, extra_batch)
            else:
                loss_tensor = loss_tensor[next(iter(extra_batch.meshgrid()))]
        loss_native = reshaped_native(loss_tensor, [batch_dims], force_expand=False)
        return loss_tensor.sum, (loss_native,)

    atol = backend.to_float(reshaped_native(solve.abs_tol, [batch_dims]))
    maxi = reshaped_numpy(solve.max_iterations, [batch_dims])
    trj = _SOLVE_TAPES and any(t.should_record_trajectory_for(solve) for t in _SOLVE_TAPES)
    t = time.perf_counter()
    try:
        ret = backend.minimize(solve.method, native_function, x0_flat, atol, maxi, trj)
    except NewBatchDims as new_dims:  # try again with expanded initial guess
        warnings.warn(f"Function returned objective value with dims {new_dims.output_shape} but initial guess was missing {new_dims.missing}. Trying again with expanded initial guess.", RuntimeWarning, stacklevel=2)
        x0 = expand(solve.x0, new_dims.missing)
        solve = copy_with(solve, x0=x0)
        return minimize(f, solve)
    t = time.perf_counter() - t
    if not trj:
        assert isinstance(ret, SolveResult)
        converged = reshaped_tensor(ret.converged, [batch_dims])
        diverged = reshaped_tensor(ret.diverged, [batch_dims])
        x = unflatten_assemble(ret.x)
        iterations = reshaped_tensor(ret.iterations, [batch_dims])
        function_evaluations = reshaped_tensor(ret.function_evaluations, [batch_dims])
        residual = reshaped_tensor(ret.residual, [batch_dims])
        result = SolveInfo(solve, x, residual, iterations, function_evaluations, converged, diverged, ret.method, ret.message, t)
    else:  # trajectory
        assert isinstance(ret, (tuple, list)) and all(isinstance(r, SolveResult) for r in ret)
        converged = reshaped_tensor(ret[-1].converged, [batch_dims])
        diverged = reshaped_tensor(ret[-1].diverged, [batch_dims])
        x = unflatten_assemble(ret[-1].x)
        x_ = unflatten_assemble(numpy.stack([r.x for r in ret]), additional_dims=batch('trajectory'), convert=False)
        residual = stack([reshaped_tensor(r.residual, [batch_dims]) for r in ret], batch('trajectory'))
        iterations = reshaped_tensor(ret[-1].iterations, [batch_dims])
        function_evaluations = stack([reshaped_tensor(r.function_evaluations, [batch_dims]) for r in ret], batch('trajectory'))
        result = SolveInfo(solve, x_, residual, iterations, function_evaluations, converged, diverged, ret[-1].method, ret[-1].message, t)
    for tape in _SOLVE_TAPES:
        tape._add(solve, trj, result)
    result.convergence_check(False)  # raises ConvergenceException
    return x
def minimum(x: Union[float, phiml.math._tensors.Tensor], y: Union[float, phiml.math._tensors.Tensor])

Computes the element-wise minimum of x and y.

Expand source code
def minimum(x: Union[Tensor, float], y: Union[Tensor, float]):
    """ Computes the element-wise minimum of `x` and `y`. """
    return custom_op2(x, y, minimum, lambda x_, y_: choose_backend(x_, y_).minimum(x_, y_), op_name='minimum')
def native(value: Union[phiml.math._tensors.Tensor, numbers.Number, tuple, list, Any])

Returns the native tensor representation of value. If value is a Tensor, this is equal to calling Tensor.native(). Otherwise, checks that value is a valid tensor object and returns it.

Args

value
Tensor or native tensor or tensor-like.

Returns

Native tensor representation

Raises

ValueError if the tensor cannot be transposed to match target_shape

Expand source code
def native(value: Union[Tensor, Number, tuple, list, Any]):
    """
    Returns the native tensor representation of `value`.
    If `value` is a `phiml.math.Tensor`, this is equal to calling `phiml.math.Tensor.native()`.
    Otherwise, checks that `value` is a valid tensor object and returns it.

    Args:
        value: `Tensor` or native tensor or tensor-like.

    Returns:
        Native tensor representation

    Raises:
        ValueError if the tensor cannot be transposed to match target_shape
    """
    if isinstance(value, Tensor):
        return value.native()
    else:
        choose_backend(value)  # check that value is a native tensor
        return value
def native_call(f: Callable, *inputs: phiml.math._tensors.Tensor, channels_last=None, channel_dim='vector', spatial_dim=None)

Calls f with the native representations of the inputs tensors in standard layout and returns the result as a Tensor.

All inputs are converted to native tensors (including precision cast) depending on channels_last:

  • channels_last=True: Dimension layout (total_batch_size, spatial_dims…, total_channel_size)
  • channels_last=False: Dimension layout (total_batch_size, total_channel_size, spatial_dims…)

All batch dimensions are compressed into a single dimension with total_batch_size = input.shape.batch.volume. The same is done for all channel dimensions.

Additionally, missing batch and spatial dimensions are added so that all inputs have the same batch and spatial shape.

Args

f
Function to be called on native tensors of inputs. The function output must have the same dimension layout as the inputs, unless overridden by spatial_dim, and the batch size must be identical.
*inputs
Uniform Tensor arguments
channels_last
(Optional) Whether to put channels as the last dimension of the native representation. If None, the channels are put in the default position associated with the current backend, see phiml.math.backend.Backend.prefers_channels_last().
channel_dim
Name of the channel dimension of the result.
spatial_dim
Name of the spatial dimension of the result.

Returns

Tensor with batch and spatial dimensions of inputs, unless overridden by spatial_dim, and single channel dimension channel_dim.

Expand source code
def native_call(f: Callable, *inputs: Tensor, channels_last=None, channel_dim='vector', spatial_dim=None):
    """
    Calls `f` with the native representations of the `inputs` tensors in standard layout and returns the result as a `Tensor`.

    All inputs are converted to native tensors (including precision cast) depending on `channels_last`:

    * `channels_last=True`: Dimension layout `(total_batch_size, spatial_dims..., total_channel_size)`
    * `channels_last=False`: Dimension layout `(total_batch_size, total_channel_size, spatial_dims...)`

    All batch dimensions are compressed into a single dimension with `total_batch_size = input.shape.batch.volume`.
    The same is done for all channel dimensions.

    Additionally, missing batch and spatial dimensions are added so that all `inputs` have the same batch and spatial shape.

    Args:
        f: Function to be called on native tensors of `inputs`.
            The function output must have the same dimension layout as the inputs, unless overridden by `spatial_dim`,
            and the batch size must be identical.
        *inputs: Uniform `Tensor` arguments
        channels_last: (Optional) Whether to put channels as the last dimension of the native representation.
            If `None`, the channels are put in the default position associated with the current backend,
            see `phiml.math.backend.Backend.prefers_channels_last()`.
        channel_dim: Name of the channel dimension of the result.
        spatial_dim: Name of the spatial dimension of the result.

    Returns:
        `Tensor` with batch and spatial dimensions of `inputs`, unless overridden by `spatial_dim`,
        and single channel dimension `channel_dim`.
    """
    if channels_last is None:
        try:
            backend = choose_backend(f)
        except NoBackendFound:
            backend = choose_backend_t(*inputs, prefer_default=True)
        channels_last = backend.prefers_channels_last()
    batch = merge_shapes(*[i.shape.batch for i in inputs])
    spatial = merge_shapes(*[i.shape.spatial for i in inputs])
    natives = []
    for i in inputs:
        groups = (batch, *i.shape.spatial.names, i.shape.channel) if channels_last else (batch, i.shape.channel, *i.shape.spatial.names)
        natives.append(reshaped_native(i, groups, force_expand=False))
    output = f(*natives)
    if isinstance(channel_dim, str):
        channel_dim = channel(channel_dim)
    assert isinstance(channel_dim, Shape), "channel_dim must be a Shape or str"
    if isinstance(output, (tuple, list)):
        raise NotImplementedError()
    else:
        if spatial_dim is None:
            groups = (batch, *spatial, channel_dim) if channels_last else (batch, channel_dim, *spatial)
        else:
            if isinstance(spatial_dim, str):
                spatial_dim = spatial(spatial_dim)
            assert isinstance(spatial_dim, Shape), "spatial_dim must be a Shape or str"
            groups = (batch, *spatial_dim, channel_dim) if channels_last else (batch, channel_dim, *spatial_dim)
        result = reshaped_tensor(output, groups, convert=False)
        if result.shape.get_size(channel_dim.name) == 1 and not channel_dim.item_names[0]:
            result = result.dimension(channel_dim.name)[0]  # remove vector dim if not required
        return result
def neighbor_max(grid: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None) ‑> phiml.math._tensors.Tensor

neighbor_reduce() with reduce_fun set to max_().

Expand source code
def neighbor_max(grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None) -> Tensor:
    """`neighbor_reduce` with `reduce_fun` set to `phiml.math.max`."""
    return neighbor_reduce(math.max_, grid, dims, padding)
def neighbor_mean(grid: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None) ‑> phiml.math._tensors.Tensor

neighbor_reduce() with reduce_fun set to mean().

Expand source code
def neighbor_mean(grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None) -> Tensor:
    """`neighbor_reduce` with `reduce_fun` set to `phiml.math.mean`."""
    return neighbor_reduce(math.mean, grid, dims, padding)
def neighbor_min(grid: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None) ‑> phiml.math._tensors.Tensor

neighbor_reduce() with reduce_fun set to min_().

Expand source code
def neighbor_min(grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None) -> Tensor:
    """`neighbor_reduce` with `reduce_fun` set to `phiml.math.min`."""
    return neighbor_reduce(math.min_, grid, dims, padding)
def neighbor_reduce(reduce_fun: Callable, grid: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None) ‑> phiml.math._tensors.Tensor

Computes the sum/mean/min/max/prod/etc. of two neighboring values along each dimension in dim. The result tensor has one entry less than grid in each averaged dimension unless padding is specified.

With two dims, computes the mean of 4 values, in 3D, the mean of 8 values.

Args

reduce_fun
Reduction function, such as sum_(), mean(), max_(), min_(), prod().
grid
Values to reduce.
dims
Dimensions along which neighbors should be reduced.
padding
Padding at the upper edges of grid along dims'. If notNone, the result tensor() will have the same shape() as grid`.

Returns

Tensor

Expand source code
def neighbor_reduce(reduce_fun: Callable, grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None) -> Tensor:
    """
    Computes the sum/mean/min/max/prod/etc. of two neighboring values along each dimension in `dim`.
    The result tensor has one entry less than `grid` in each averaged dimension unless `padding` is specified.

    With two `dims`, computes the mean of 4 values, in 3D, the mean of 8 values.

    Args:
        reduce_fun: Reduction function, such as `sum`, `mean`, `max`, `min`, `prod`.
        grid: Values to reduce.
        dims: Dimensions along which neighbors should be reduced.
        padding: Padding at the upper edges of `grid` along `dims'. If not `None`, the result tensor will have the same shape as `grid`.

    Returns:
        `Tensor`
    """
    result = grid
    dims = grid.shape.only(dims)
    for dim in dims:
        l, r = shift(result, (0, 1), dim, padding, None)
        lr = stack([l, r], batch('_reduce'))
        result = reduce_fun(lr, '_reduce')
    return result
def neighbor_sum(grid: phiml.math._tensors.Tensor, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = None) ‑> phiml.math._tensors.Tensor

neighbor_reduce() with reduce_fun set to sum_().

Expand source code
def neighbor_sum(grid: Tensor, dims: DimFilter = spatial, padding: Union[Extrapolation, float, Tensor, str, None] = None) -> Tensor:
    """`neighbor_reduce` with `reduce_fun` set to `phiml.math.sum`."""
    return neighbor_reduce(math.sum_, grid, dims, padding)
def non_batch(obj) ‑> phiml.math._shape.Shape

Returns the non-batch dimensions of an object.

Args

obj
Shape or object with a valid shape() property.

Returns

Shape

Expand source code
def non_batch(obj) -> Shape:
    """
    Returns the non-batch dimensions of an object.

    Args:
        obj: `Shape` or object with a valid `shape` property.

    Returns:
        `Shape`
    """
    from .magic import Shaped
    if isinstance(obj, Shape):
        return obj.non_batch
    elif isinstance(obj, Shaped):
        return shape(obj).non_batch
    else:
        raise AssertionError(f"non_batch() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def non_channel(obj) ‑> phiml.math._shape.Shape

Returns the non-channel dimensions of an object.

Args

obj
Shape or object with a valid shape() property.

Returns

Shape

Expand source code
def non_channel(obj) -> Shape:
    """
    Returns the non-channel dimensions of an object.

    Args:
        obj: `Shape` or object with a valid `shape` property.

    Returns:
        `Shape`
    """
    from .magic import Shaped
    if isinstance(obj, Shape):
        return obj.non_channel
    elif isinstance(obj, Shaped):
        return shape(obj).non_channel
    else:
        raise AssertionError(f"non_channel() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def non_dual(obj) ‑> phiml.math._shape.Shape

Returns the non-dual dimensions of an object.

Args

obj
Shape or object with a valid shape() property.

Returns

Shape

Expand source code
def non_dual(obj) -> Shape:
    """
    Returns the non-dual dimensions of an object.

    Args:
        obj: `Shape` or object with a valid `shape` property.

    Returns:
        `Shape`
    """
    from .magic import Shaped
    if isinstance(obj, Shape):
        return obj.non_dual
    elif isinstance(obj, Shaped):
        return shape(obj).non_dual
    else:
        raise AssertionError(f"non_dual() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def non_instance(obj) ‑> phiml.math._shape.Shape

Returns the non-instance dimensions of an object.

Args

obj
Shape or object with a valid shape() property.

Returns

Shape

Expand source code
def non_instance(obj) -> Shape:
    """
    Returns the non-instance dimensions of an object.

    Args:
        obj: `Shape` or object with a valid `shape` property.

    Returns:
        `Shape`
    """
    from .magic import Shaped
    if isinstance(obj, Shape):
        return obj.non_instance
    elif isinstance(obj, Shaped):
        return shape(obj).non_instance
    else:
        raise AssertionError(f"non_instance() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def non_primal(obj) ‑> phiml.math._shape.Shape

Returns the batch and dual dimensions of an object.

Args

obj
Shape or object with a valid shape() property.

Returns

Shape

Expand source code
def non_primal(obj) -> Shape:
    """
    Returns the batch and dual dimensions of an object.

    Args:
        obj: `Shape` or object with a valid `shape` property.

    Returns:
        `Shape`
    """
    from .magic import Shaped
    if isinstance(obj, Shape):
        return obj.non_primal
    elif isinstance(obj, Shaped):
        return shape(obj).non_primal
    else:
        raise AssertionError(f"non_dual() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def non_spatial(obj) ‑> phiml.math._shape.Shape

Returns the non-spatial dimensions of an object.

Args

obj
Shape or object with a valid shape() property.

Returns

Shape

Expand source code
def non_spatial(obj) -> Shape:
    """
    Returns the non-spatial dimensions of an object.

    Args:
        obj: `Shape` or object with a valid `shape` property.

    Returns:
        `Shape`
    """
    from .magic import Shaped
    if isinstance(obj, Shape):
        return obj.non_spatial
    elif isinstance(obj, Shaped):
        return shape(obj).non_spatial
    else:
        raise AssertionError(f"non_spatial() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def nonzero(value: phiml.math._tensors.Tensor, list_dim: Union[str, phiml.math._shape.Shape] = (nonzeroⁱ=None), index_dim: phiml.math._shape.Shape = (vectorᶜ=None))

Get spatial indices of non-zero / True values.

Batch dimensions are preserved by this operation. If channel dimensions are present, this method returns the indices where any component is nonzero.

Implementations:

Args

value
spatial tensor to find non-zero / True values in.
list_dim
Dimension listing non-zero values.
index_dim
Index dimension.

Returns

Tensor of shape (batch dims…, list_dim=#non-zero, index_dim=value.shape.spatial_rank)

Expand source code
def nonzero(value: Tensor, list_dim: Union[Shape, str] = instance('nonzero'), index_dim: Shape = channel('vector')):
    """
    Get spatial indices of non-zero / True values.
    
    Batch dimensions are preserved by this operation.
    If channel dimensions are present, this method returns the indices where any component is nonzero.

    Implementations:

    * NumPy: [`numpy.argwhere`](https://numpy.org/doc/stable/reference/generated/numpy.argwhere.html)
    * PyTorch: [`torch.nonzero`](https://pytorch.org/docs/stable/generated/torch.nonzero.html)
    * TensorFlow: [`tf.where(tf.not_equal(values, 0))`](https://www.tensorflow.org/api_docs/python/tf/where)
    * Jax: [`jax.numpy.nonzero`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.nonzero.html)

    Args:
        value: spatial tensor to find non-zero / True values in.
        list_dim: Dimension listing non-zero values.
        index_dim: Index dimension.

    Returns:
        `Tensor` of shape (batch dims..., `list_dim`=#non-zero, `index_dim`=value.shape.spatial_rank)

    """
    if value.shape.channel_rank > 0:
        value = sum_(abs(value), value.shape.channel)
    if isinstance(list_dim, str):
        list_dim = instance(list_dim)
    def unbatched_nonzero(value: Tensor):
        if isinstance(value, CompressedSparseMatrix):
            value = value.decompress()
        if isinstance(value, SparseCoordinateTensor):
            nonzero_values = nonzero(value._values)
            nonzero_indices = value._indices[nonzero_values]
            index_dim_ = index_dim.with_size(channel(value._indices).item_names[0])
            return rename_dims(rename_dims(nonzero_indices, instance, list_dim), channel, index_dim_)
        else:
            dims = value.shape.non_channel
            native = reshaped_native(value, [*dims])
            backend = choose_backend(native)
            indices = backend.nonzero(native)
            indices_shape = Shape(backend.staticshape(indices), (list_dim.name, index_dim.name), (list_dim.type, index_dim.type), (None, dims.names))
            return NativeTensor(indices, indices_shape)
    return broadcast_op(unbatched_nonzero, [value], iter_dims=value.shape.batch.names)
def normalize_to(target: phiml.math._tensors.Tensor, source: Union[float, phiml.math._tensors.Tensor], epsilon=1e-05)

Multiplies the target so that its sum matches the source.

Args

target
Tensor
source
Tensor or constant
epsilon
Small number to prevent division by zero.

Returns

Normalized tensor of the same shape as target

Expand source code
def normalize_to(target: Tensor, source: Union[float, Tensor], epsilon=1e-5):
    """
    Multiplies the target so that its sum matches the source.

    Args:
        target: `Tensor`
        source: `Tensor` or constant
        epsilon: Small number to prevent division by zero.

    Returns:
        Normalized tensor of the same shape as target
    """
    target_total = math.sum_(target)
    denominator = math.maximum(target_total, epsilon) if epsilon is not None else target_total
    source_total = math.sum_(source)
    return target * (source_total / denominator)
def numpy(value: Union[phiml.math._tensors.Tensor, numbers.Number, tuple, list, Any])

Converts value to a numpy.ndarray where value must be a Tensor, backend tensor or tensor-like. If value is a Tensor, this is equal to calling Tensor.numpy().

Note: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use Tensor.native() instead.

Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in order, a ValueError is raised.

If value is a NumPy array, it may be returned directly.

Returns

NumPy representation of value

Raises

ValueError if the tensor cannot be transposed to match target_shape

Expand source code
def numpy(value: Union[Tensor, Number, tuple, list, Any]):
    """
    Converts `value` to a `numpy.ndarray` where value must be a `Tensor`, backend tensor or tensor-like.
    If `value` is a `phiml.math.Tensor`, this is equal to calling `phiml.math.Tensor.numpy()`.

    *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable.
    To get a differentiable tensor, use `Tensor.native()` instead.

    Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names.
    If a dimension of the tensor is not listed in `order`, a `ValueError` is raised.

    If `value` is a NumPy array, it may be returned directly.

    Returns:
        NumPy representation of `value`

    Raises:
        ValueError if the tensor cannot be transposed to match target_shape
    """
    if isinstance(value, Tensor):
        return value.numpy()
    else:
        backend = choose_backend(value)
        return backend.numpy(value)
def ones(*shape: phiml.math._shape.Shape, dtype: Union[phiml.backend._dtype.DType, tuple, type] = None) ‑> phiml.math._tensors.Tensor

Define a tensor with specified shape with value 1.0/ 1 / True everywhere.

This method may not immediately allocate the memory to store the values.

See Also: ones_like(), zeros().

Args

*shape
This (possibly empty) sequence of Shapes is concatenated, preserving the order.
dtype
Data type as DType object. Defaults to float matching the current precision setting.

Returns

Tensor

Expand source code
def ones(*shape: Shape, dtype: Union[DType, tuple, type] = None) -> Tensor:
    """
    Define a tensor with specified shape with value `1.0`/ `1` / `True` everywhere.
    
    This method may not immediately allocate the memory to store the values.

    See Also:
        `ones_like()`, `zeros()`.

    Args:
        *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order.
        dtype: Data type as `DType` object. Defaults to `float` matching the current precision setting.

    Returns:
        `Tensor`
    """
    return _initialize(lambda shape: expand_tensor(NativeTensor(default_backend().ones((), dtype=DType.as_dtype(dtype)), EMPTY_SHAPE), shape), shape)
def ones_like(value: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor

Create a Tensor containing only 1.0 / 1 / True with the same shape and dtype as obj.

Expand source code
def ones_like(value: Tensor) -> Tensor:
    """ Create a `Tensor` containing only `1.0` / `1` / `True` with the same shape and dtype as `obj`. """
    return zeros_like(value) + 1
def pack_dims(value, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], packed_dim: phiml.math._shape.Shape, pos: Optional[int] = None, **kwargs)

Compresses multiple dimensions into a single dimension by concatenating the elements. Elements along the new dimensions are laid out according to the order of dims. If the order of dims differs from the current dimension order, the tensor is transposed accordingly. This function replaces the traditional reshape for these cases.

The type of the new dimension will be equal to the types of dims. If dims have varying types, the new dimension will be a batch dimension.

If none of dims exist on value, packed_dim will be added only if it is given with a definite size and value is not a primitive type.

See Also: unpack_dim()

Args

value
Shapable, such as Tensor.
dims
Dimensions to be compressed in the specified order.
packed_dim
Single-dimension Shape.
pos
Index of new dimension. None for automatic, -1 for last, 0 for first.
**kwargs
Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the bounds: Box argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.

Returns

Same type as value.

Examples

>>> pack_dims(math.zeros(spatial(x=4, y=3)), spatial, instance('points'))
(pointsⁱ=12) const 0.0
Expand source code
def pack_dims(value, dims: DimFilter, packed_dim: Shape, pos: Optional[int] = None, **kwargs):
    """
    Compresses multiple dimensions into a single dimension by concatenating the elements.
    Elements along the new dimensions are laid out according to the order of `dims`.
    If the order of `dims` differs from the current dimension order, the tensor is transposed accordingly.
    This function replaces the traditional `reshape` for these cases.

    The type of the new dimension will be equal to the types of `dims`.
    If `dims` have varying types, the new dimension will be a batch dimension.

    If none of `dims` exist on `value`, `packed_dim` will be added only if it is given with a definite size and `value` is not a primitive type.

    See Also:
        `unpack_dim()`

    Args:
        value: `phiml.math.magic.Shapable`, such as `phiml.math.Tensor`.
        dims: Dimensions to be compressed in the specified order.
        packed_dim: Single-dimension `Shape`.
        pos: Index of new dimension. `None` for automatic, `-1` for last, `0` for first.
        **kwargs: Additional keyword arguments required by specific implementations.
            Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
            Adding batch dimensions must always work without keyword arguments.

    Returns:
        Same type as `value`.

    Examples:
        >>> pack_dims(math.zeros(spatial(x=4, y=3)), spatial, instance('points'))
        (pointsⁱ=12) const 0.0
    """
    if isinstance(value, (Number, bool)):
        return value
    assert isinstance(value, Shapable) and isinstance(value, Sliceable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}"
    dims = shape(value).only(dims, reorder=True)
    if packed_dim in shape(value):
        assert packed_dim in dims, f"Cannot pack dims into new dimension {packed_dim} because it already exists on value {value} and is not packed."
    if len(dims) == 0 or all(dim not in shape(value) for dim in dims):
        return value if packed_dim.size is None else expand(value, packed_dim, **kwargs)  # Inserting size=1 can cause shape errors
    elif len(dims) == 1:
        return rename_dims(value, dims, packed_dim, **kwargs)
    # --- First try __pack_dims__ ---
    if hasattr(value, '__pack_dims__'):
        result = value.__pack_dims__(dims.names, packed_dim, pos, **kwargs)
        if result is not NotImplemented:
            return result
    # --- Next try Tree Node ---
    if isinstance(value, PhiTreeNode):
        new_attributes = {a: pack_dims(getattr(value, a), dims, packed_dim, pos=pos, **kwargs) for a in all_attributes(value)}
        return copy_with(value, **new_attributes)
    # --- Fallback: unstack and stack ---
    if shape(value).only(dims).volume > 8:
        warnings.warn(f"pack_dims() default implementation is slow on large dimensions ({shape(value).only(dims)}). Please implement __pack_dims__() for {type(value).__name__} as defined in phiml.math.magic", RuntimeWarning, stacklevel=2)
    return stack(unstack(value, dims), packed_dim, **kwargs)
def pad(value: phiml.math._tensors.Tensor, widths: Union[dict, tuple, list], mode: Union[ForwardRef('e_.Extrapolation'), phiml.math._tensors.Tensor, numbers.Number, str, dict] = 0, **kwargs) ‑> phiml.math._tensors.Tensor

Pads a tensor along the specified dimensions, determining the added values using the given extrapolation. Unlike Extrapolation.pad(), this function can handle negative widths which slice off outer values.

Args

value
Tensor to be padded
widths

Number of values to add at the edge of value. Negative values can be used to slice off edge values. Must be one of the following:

  • tuple containing (lower: int, upper: int). This will pad all non-batch dimensions by lower and upper at the lower and upper edge, respectively.
  • dict mapping dim: str -> (lower: int, upper: int)
  • Sequence of slicing dicts. This will add all values specified by the slicing dicts and is the inverse operation to slice_off(). Exactly one value in each slicing dict must be a slice_() object.
mode
Padding mode used to determine values added from positive widths. Must be one of the following: Extrapolation, Tensor or number for constant extrapolation, name of extrapolation as str.
kwargs
Additional padding arguments. These are ignored by the standard extrapolations defined in phiml.math.extrapolation but can be used to pass additional contextual information to custom extrapolations.

Returns

Padded Tensor

Examples

>>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, 1), 'y': (2, 1)}, 0)
(xˢ=12, yˢ=13) 0.641 ± 0.480 (0e+00...1e+00)
>>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, -1)}, 0)
(xˢ=10, yˢ=10) 0.900 ± 0.300 (0e+00...1e+00)
Expand source code
def pad(value: Tensor, widths: Union[dict, tuple, list], mode: Union['e_.Extrapolation', Tensor, Number, str, dict] = 0, **kwargs) -> Tensor:
    """
    Pads a tensor along the specified dimensions, determining the added values using the given extrapolation.
    Unlike `Extrapolation.pad()`, this function can handle negative widths which slice off outer values.

    Args:
        value: `Tensor` to be padded
        widths: Number of values to add at the edge of `value`. Negative values can be used to slice off edge values. Must be one of the following:

            * `tuple` containing `(lower: int, upper: int)`. This will pad all non-batch dimensions by `lower` and `upper` at the lower and upper edge, respectively.
            * `dict` mapping `dim: str -> (lower: int, upper: int)`
            * Sequence of slicing `dict`s. This will add all values specified by the slicing dicts and is the inverse operation to `slice_off`. Exactly one value in each slicing dict must be a `slice` object.

        mode: Padding mode used to determine values added from positive `widths`.
            Must be one of the following: `Extrapolation`, `Tensor` or number for constant extrapolation, name of extrapolation as `str`.
        kwargs: Additional padding arguments.
            These are ignored by the standard extrapolations defined in `phiml.math.extrapolation` but can be used to pass additional contextual information to custom extrapolations.

    Returns:
        Padded `Tensor`

    Examples:
        >>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, 1), 'y': (2, 1)}, 0)
        (xˢ=12, yˢ=13) 0.641 ± 0.480 (0e+00...1e+00)

        >>> math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, -1)}, 0)
        (xˢ=10, yˢ=10) 0.900 ± 0.300 (0e+00...1e+00)
    """
    mode = e_.as_extrapolation(mode)
    if isinstance(widths, (tuple, list)):
        if len(widths) == 0 or isinstance(widths[0], dict):  # add sliced-off slices
            return _pad_slices(value, widths, mode, **kwargs)
        if len(widths) == 2 and isinstance(widths[0], int) and isinstance(widths[1], int):  # (lower, upper)
            assert non_batch(value).rank == 1, f"Can only pad 1D tensors (excluding batch dims) when widths=(lower, upper) but got {shape(value)} and widths={widths}"
            widths = {non_batch(value).name: widths}
        else:  # ((lo0, up0), (lo1, up1), ...)
            assert len(widths) == non_batch(value), f"Cannot pad tensor with non-batch dims {non_batch(value)} by widths {widths}. Sizes must match."
            warnings.warn("Padding by sequence of (lower, upper) is not recommended. Please use a dict instead.", SyntaxWarning, stacklevel=2)
            widths = {dim: w for dim, w in zip(non_batch(value).names, widths)}
    has_negative_widths = any(w0 < 0 or w1 < 0 for w0, w1 in widths.values())
    has_positive_widths = any(w0 > 0 or w1 > 0 for w0, w1 in widths.values())
    slices = None
    if has_negative_widths:
        slices = {dim: slice(max(0, -w[0]), min(0, w[1]) or None) for dim, w in widths.items()}
        widths = {dim: (max(0, w[0]), max(0, w[1])) for dim, w in widths.items()}
    result_padded = mode.pad(value, widths, **kwargs) if has_positive_widths else value
    result_sliced = result_padded[slices] if has_negative_widths else result_padded
    return result_sliced
def pairwise_differences(positions: phiml.math._tensors.Tensor, max_distance: Union[float, phiml.math._tensors.Tensor] = None, format: Union[str, phiml.math._tensors.Tensor] = 'dense', default: Optional[float] = None, method: str = 'sparse') ‑> phiml.math._tensors.Tensor

Computes the distance matrix containing the pairwise position differences between each pair of points. Points that are further apart than max_distance (if specified) are assigned a distance value of 0. The diagonal of the matrix (self-distance) also consists purely of zero-vectors and may or may not be stored explicitly.

Args

positions
Tensor. Channel dimensions are interpreted as position components. Instance and spatial dimensions list nodes.
max_distance
Scalar or Tensor specifying a max_radius for each point separately. Can contain additional batch dimensions but spatial/instance dimensions must match positions if present. If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors.
format
Matrix format as str or concrete sparsity pattern as Tensor. Allowed strings are 'dense','csr', 'coo', 'csc'`. When a Tensor is passed, it needs to have all instance and spatial dims as positions as well as corresponding dual dimensions. The distances will be evaluated at all stored entries of the format tensor.
default
Value the sparse tensor returns for non-stored values. Must be 0 or None.

Returns

Distance matrix as sparse or dense Tensor, depending on format. For each spatial/instance dimension in positions, the matrix also contains a dual dimension of the same name and size. The matrix also contains all batch dimensions of positions and one channel dimension called vector.

Examples

>>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles')))
>>> dx = pairwise_distances(pos, format='dense', max_distance=2)
>>> dx.particles[0]
(x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y)
Expand source code
def pairwise_differences(positions: Tensor,
                       max_distance: Union[float, Tensor] = None,
                       format: Union[str, Tensor] = 'dense',
                       default: Optional[float] = None,
                       method: str = 'sparse') -> Tensor:
    """
    Computes the distance matrix containing the pairwise position differences between each pair of points.
    Points that are further apart than `max_distance` (if specified) are assigned a distance value of `0`.
    The diagonal of the matrix (self-distance) also consists purely of zero-vectors and may or may not be stored explicitly.

    Args:
        positions: `Tensor`.
            Channel dimensions are interpreted as position components.
            Instance and spatial dimensions list nodes.
        max_distance: Scalar or `Tensor` specifying a max_radius for each point separately.
            Can contain additional batch dimensions but spatial/instance dimensions must match `positions` if present.
            If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors.
        format: Matrix format as `str` or concrete sparsity pattern as `Tensor`.
            Allowed strings are `'dense', `'csr'`, `'coo'`, `'csc'`.
            When a `Tensor` is passed, it needs to have all instance and spatial dims as `positions` as well as corresponding dual dimensions.
            The distances will be evaluated at all stored entries of the `format` tensor.
        default: Value the sparse tensor returns for non-stored values. Must be `0` or `None`.

    Returns:
        Distance matrix as sparse or dense `Tensor`, depending on `format`.
        For each spatial/instance dimension in `positions`, the matrix also contains a dual dimension of the same name and size.
        The matrix also contains all batch dimensions of `positions` and one channel dimension called `vector`.

    Examples:
        >>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles')))
        >>> dx = pairwise_distances(pos, format='dense', max_distance=2)
        >>> dx.particles[0]
        (x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y)
    """
    assert isinstance(positions, Tensor), f"positions must be a Tensor but got {type(positions)}"
    primal_dims = positions.shape.non_batch.non_channel.non_dual
    dual_dims = primal_dims.as_dual()
    # --- Dense ---
    if (isinstance(format, str) and format == 'dense') or (isinstance(format, Tensor) and get_format(format) == 'dense'):
        if isinstance(format, Tensor):
            dual_dims = dual(format)
        dx = unpack_dim(pack_dims(positions, non_batch(positions).non_channel.non_dual, instance('_tmp')), '_tmp', dual_dims) - positions
        if max_distance is not None:
            neighbors = sum_(dx ** 2, channel) <= max_distance ** 2
            default = float('nan') if default is None else default
            dx = where(neighbors, dx, default)
        return dx
    # --- sparse with known connectivity ---
    if isinstance(format, Tensor):  # sparse connectivity specified, no neighborhood search required
        assert max_distance is None, "max_distance not allowed when connectivity is specified (passing a Tensor for format)"
        assert is_sparse(format)
        return map_pairs(lambda p1, p2: p2 - p1, positions, format)
    # --- Sparse neighbor search ---
    assert max_distance is not None, "max_distance must be specified when computing distance in sparse format"
    max_distance = wrap(max_distance)
    index_dtype = DType(int, 32)
    backend = choose_backend_t(positions, max_distance)
    batch_shape = batch(positions) & batch(max_distance)
    if not dual_dims.well_defined:
        assert dual_dims.rank == 1, f"others_dims sizes must be specified when passing more then one dimension but got {dual_dims}"
        dual_dims = dual_dims.with_size(primal_dims.volume)
    # --- Determine mode ---
    # if method == 'sklearn':

    pair_count = None
    table_len = None
    mode = 'vectorize' if batch_shape.volume > 1 and batch_shape.is_uniform else 'loop'
    if backend.is_available(positions):
        if mode == 'vectorize':
            # ToDo determine limits from positions? build_cells+bincount would be enough
            pair_count = 7
    else:  # tracing
        if backend.requires_fixed_shapes_when_tracing():
            # ToDo use fixed limits (set by user)
            pair_count = 7
            mode = 'vectorize'
    # --- Run neighborhood search ---
    from ..backend._partition import find_neighbors_sparse, find_neighbors_semi_sparse, find_neighbors_matscipy, find_neighbors_sklearn
    if mode == 'loop':
        indices = []
        values = []
        for b in batch_shape.meshgrid():
            native_positions = reshaped_native(positions[b], [primal_dims, channel(positions)])
            native_max_dist = max_distance[b].native()
            if method == 'auto':
                method = 'sparse'  # ToDo
            if method == 'sparse':
                nat_rows, nat_cols, nat_vals = find_neighbors_sparse(native_positions, native_max_dist, None, periodic=False, default=default)
            elif method == 'semi-sparse':
                nat_rows, nat_cols, nat_vals, req_pair_count, req_max_occupancy = find_neighbors_semi_sparse(native_positions, native_max_dist, None, periodic=False, default=default)
            elif method == 'matscipy':
                assert positions.available, f"Cannot jit-compile matscipy neighborhood search"
                nat_rows, nat_cols, nat_vals = find_neighbors_matscipy(native_positions, native_max_dist, None, periodic=False)
            elif method == 'sklearn':
                assert positions.available, f"Cannot jit-compile matscipy neighborhood search"
                nat_rows, nat_cols, nat_vals = find_neighbors_sklearn(native_positions, native_max_dist)
            else:
                raise ValueError(method)
            nat_indices = backend.stack([nat_rows, nat_cols], -1)
            indices.append(reshaped_tensor(nat_indices, [instance('pairs'), channel(vector=primal_dims.names + dual_dims.names)], convert=False))
            values.append(reshaped_tensor(nat_vals, [instance('pairs'), channel(positions)]))
        indices = stack(indices, batch_shape)
        values = stack(values, batch_shape)
    elif mode == 'vectorize':
        raise NotImplementedError
        # native_positions = reshaped_native(positions, [batch_shape, primal_dims, channel(positions)])
        # native_max_dist = reshaped_native(max_distance, [batch_shape, primal_dims], force_expand=False)
        # def single_search(pos, r):
        #     return find_neighbors(pos, r, None, periodic=False, pair_count=pair_count, default=default)
        # nat_rows, nat_cols, nat_vals = backend.vectorized_call(single_search, native_positions, native_max_dist, output_dtypes=(index_dtype, index_dtype, positions.dtype))
        # nat_indices = backend.stack([nat_rows, nat_cols], -1)
        # indices = reshaped_tensor(nat_indices, [batch_shape, instance('pairs'), channel(vector=primal_dims.names + dual_dims.names)], convert=False)
        # values = reshaped_tensor(nat_vals, [batch_shape, instance('pairs'), channel(positions)])
    else:
        raise RuntimeError
    # --- Assemble sparse matrix ---
    dense_shape = primal_dims & dual_dims
    coo = SparseCoordinateTensor(indices, values, dense_shape, can_contain_double_entries=False, indices_sorted=False, indices_constant=False)
    return to_format(coo, format)
def pairwise_distances(positions: phiml.math._tensors.Tensor, max_distance: Union[float, phiml.math._tensors.Tensor] = None, format: Union[str, phiml.math._tensors.Tensor] = 'dense', default: Optional[float] = None, method: str = 'sparse') ‑> phiml.math._tensors.Tensor

Computes the distance matrix containing the pairwise position differences between each pair of points. Points that are further apart than max_distance (if specified) are assigned a distance value of 0. The diagonal of the matrix (self-distance) also consists purely of zero-vectors and may or may not be stored explicitly.

Args

positions
Tensor. Channel dimensions are interpreted as position components. Instance and spatial dimensions list nodes.
max_distance
Scalar or Tensor specifying a max_radius for each point separately. Can contain additional batch dimensions but spatial/instance dimensions must match positions if present. If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors.
format
Matrix format as str or concrete sparsity pattern as Tensor. Allowed strings are 'dense','csr', 'coo', 'csc'`. When a Tensor is passed, it needs to have all instance and spatial dims as positions as well as corresponding dual dimensions. The distances will be evaluated at all stored entries of the format tensor.
default
Value the sparse tensor returns for non-stored values. Must be 0 or None.

Returns

Distance matrix as sparse or dense Tensor, depending on format. For each spatial/instance dimension in positions, the matrix also contains a dual dimension of the same name and size. The matrix also contains all batch dimensions of positions and one channel dimension called vector.

Examples

>>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles')))
>>> dx = pairwise_distances(pos, format='dense', max_distance=2)
>>> dx.particles[0]
(x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y)
Expand source code
def pairwise_differences(positions: Tensor,
                       max_distance: Union[float, Tensor] = None,
                       format: Union[str, Tensor] = 'dense',
                       default: Optional[float] = None,
                       method: str = 'sparse') -> Tensor:
    """
    Computes the distance matrix containing the pairwise position differences between each pair of points.
    Points that are further apart than `max_distance` (if specified) are assigned a distance value of `0`.
    The diagonal of the matrix (self-distance) also consists purely of zero-vectors and may or may not be stored explicitly.

    Args:
        positions: `Tensor`.
            Channel dimensions are interpreted as position components.
            Instance and spatial dimensions list nodes.
        max_distance: Scalar or `Tensor` specifying a max_radius for each point separately.
            Can contain additional batch dimensions but spatial/instance dimensions must match `positions` if present.
            If not specified, uses an infinite cutoff radius, i.e. all points will be considered neighbors.
        format: Matrix format as `str` or concrete sparsity pattern as `Tensor`.
            Allowed strings are `'dense', `'csr'`, `'coo'`, `'csc'`.
            When a `Tensor` is passed, it needs to have all instance and spatial dims as `positions` as well as corresponding dual dimensions.
            The distances will be evaluated at all stored entries of the `format` tensor.
        default: Value the sparse tensor returns for non-stored values. Must be `0` or `None`.

    Returns:
        Distance matrix as sparse or dense `Tensor`, depending on `format`.
        For each spatial/instance dimension in `positions`, the matrix also contains a dual dimension of the same name and size.
        The matrix also contains all batch dimensions of `positions` and one channel dimension called `vector`.

    Examples:
        >>> pos = vec(x=0, y=tensor([0, 1, 2.5], instance('particles')))
        >>> dx = pairwise_distances(pos, format='dense', max_distance=2)
        >>> dx.particles[0]
        (x=0.000, y=0.000); (x=0.000, y=1.000); (x=0.000, y=0.000) (~particlesᵈ=3, vectorᶜ=x,y)
    """
    assert isinstance(positions, Tensor), f"positions must be a Tensor but got {type(positions)}"
    primal_dims = positions.shape.non_batch.non_channel.non_dual
    dual_dims = primal_dims.as_dual()
    # --- Dense ---
    if (isinstance(format, str) and format == 'dense') or (isinstance(format, Tensor) and get_format(format) == 'dense'):
        if isinstance(format, Tensor):
            dual_dims = dual(format)
        dx = unpack_dim(pack_dims(positions, non_batch(positions).non_channel.non_dual, instance('_tmp')), '_tmp', dual_dims) - positions
        if max_distance is not None:
            neighbors = sum_(dx ** 2, channel) <= max_distance ** 2
            default = float('nan') if default is None else default
            dx = where(neighbors, dx, default)
        return dx
    # --- sparse with known connectivity ---
    if isinstance(format, Tensor):  # sparse connectivity specified, no neighborhood search required
        assert max_distance is None, "max_distance not allowed when connectivity is specified (passing a Tensor for format)"
        assert is_sparse(format)
        return map_pairs(lambda p1, p2: p2 - p1, positions, format)
    # --- Sparse neighbor search ---
    assert max_distance is not None, "max_distance must be specified when computing distance in sparse format"
    max_distance = wrap(max_distance)
    index_dtype = DType(int, 32)
    backend = choose_backend_t(positions, max_distance)
    batch_shape = batch(positions) & batch(max_distance)
    if not dual_dims.well_defined:
        assert dual_dims.rank == 1, f"others_dims sizes must be specified when passing more then one dimension but got {dual_dims}"
        dual_dims = dual_dims.with_size(primal_dims.volume)
    # --- Determine mode ---
    # if method == 'sklearn':

    pair_count = None
    table_len = None
    mode = 'vectorize' if batch_shape.volume > 1 and batch_shape.is_uniform else 'loop'
    if backend.is_available(positions):
        if mode == 'vectorize':
            # ToDo determine limits from positions? build_cells+bincount would be enough
            pair_count = 7
    else:  # tracing
        if backend.requires_fixed_shapes_when_tracing():
            # ToDo use fixed limits (set by user)
            pair_count = 7
            mode = 'vectorize'
    # --- Run neighborhood search ---
    from ..backend._partition import find_neighbors_sparse, find_neighbors_semi_sparse, find_neighbors_matscipy, find_neighbors_sklearn
    if mode == 'loop':
        indices = []
        values = []
        for b in batch_shape.meshgrid():
            native_positions = reshaped_native(positions[b], [primal_dims, channel(positions)])
            native_max_dist = max_distance[b].native()
            if method == 'auto':
                method = 'sparse'  # ToDo
            if method == 'sparse':
                nat_rows, nat_cols, nat_vals = find_neighbors_sparse(native_positions, native_max_dist, None, periodic=False, default=default)
            elif method == 'semi-sparse':
                nat_rows, nat_cols, nat_vals, req_pair_count, req_max_occupancy = find_neighbors_semi_sparse(native_positions, native_max_dist, None, periodic=False, default=default)
            elif method == 'matscipy':
                assert positions.available, f"Cannot jit-compile matscipy neighborhood search"
                nat_rows, nat_cols, nat_vals = find_neighbors_matscipy(native_positions, native_max_dist, None, periodic=False)
            elif method == 'sklearn':
                assert positions.available, f"Cannot jit-compile matscipy neighborhood search"
                nat_rows, nat_cols, nat_vals = find_neighbors_sklearn(native_positions, native_max_dist)
            else:
                raise ValueError(method)
            nat_indices = backend.stack([nat_rows, nat_cols], -1)
            indices.append(reshaped_tensor(nat_indices, [instance('pairs'), channel(vector=primal_dims.names + dual_dims.names)], convert=False))
            values.append(reshaped_tensor(nat_vals, [instance('pairs'), channel(positions)]))
        indices = stack(indices, batch_shape)
        values = stack(values, batch_shape)
    elif mode == 'vectorize':
        raise NotImplementedError
        # native_positions = reshaped_native(positions, [batch_shape, primal_dims, channel(positions)])
        # native_max_dist = reshaped_native(max_distance, [batch_shape, primal_dims], force_expand=False)
        # def single_search(pos, r):
        #     return find_neighbors(pos, r, None, periodic=False, pair_count=pair_count, default=default)
        # nat_rows, nat_cols, nat_vals = backend.vectorized_call(single_search, native_positions, native_max_dist, output_dtypes=(index_dtype, index_dtype, positions.dtype))
        # nat_indices = backend.stack([nat_rows, nat_cols], -1)
        # indices = reshaped_tensor(nat_indices, [batch_shape, instance('pairs'), channel(vector=primal_dims.names + dual_dims.names)], convert=False)
        # values = reshaped_tensor(nat_vals, [batch_shape, instance('pairs'), channel(positions)])
    else:
        raise RuntimeError
    # --- Assemble sparse matrix ---
    dense_shape = primal_dims & dual_dims
    coo = SparseCoordinateTensor(indices, values, dense_shape, can_contain_double_entries=False, indices_sorted=False, indices_constant=False)
    return to_format(coo, format)
def perf_counter(wait_for_tensor, *wait_for_tensors: phiml.math._tensors.Tensor) ‑> phiml.math._tensors.Tensor

Get the time (time.perf_counter()) at which all wait_for_tensors are computed. If all tensors are already available, returns the current time.perf_counter().

Args

wait_for_tensor
Tensor that need to be computed before the time is measured.
*wait_for_tensors
Additional tensors that need to be computed before the time is measured.

Returns

Time at which all wait_for_tensors are ready as a scalar Tensor.

Expand source code
def perf_counter(wait_for_tensor, *wait_for_tensors: Tensor) -> Tensor:
    """
    Get the time (`time.perf_counter()`) at which all `wait_for_tensors` are computed.
    If all tensors are already available, returns the current `time.perf_counter()`.

    Args:
        wait_for_tensor: `Tensor` that need to be computed before the time is measured.
        *wait_for_tensors: Additional tensors that need to be computed before the time is measured.

    Returns:
        Time at which all `wait_for_tensors` are ready as a scalar `Tensor`.
    """
    assert not _TRACING_LINEAR, f"Cannot use perf_counter inside a function decorated with @jit_compile_linear"
    if not _TRACING_JIT:
        return wrap(time.perf_counter())
    else:  # jit
        backend = _TRACING_JIT[0]._tracing_in_key.backend
        natives, _, _ = disassemble_tensors([wait_for_tensor, *wait_for_tensors], expand=False)
        natives = [n for n in natives if backend.is_tensor(n, only_native=True)]
        assert natives, f"in jit mode, perf_counter must be given at least one traced tensor, as the current time is evaluated after all tensors are computed."
        def perf_counter(*_wait_for_natives):
            return np.asarray(time.perf_counter())
        return wrap(backend.numpy_call(perf_counter, (), DType(float, 64), *natives))
def precision(floating_point_bits: int)

Sets the floating point precision for the local context.

Usage: with precision(p):

This overrides the global setting, see set_global_precision().

Args

floating_point_bits
16 for half, 32 for single, 64 for double
Expand source code
@contextmanager
def precision(floating_point_bits: int):
    """
    Sets the floating point precision for the local context.

    Usage: `with precision(p):`

    This overrides the global setting, see `set_global_precision()`.

    Args:
        floating_point_bits: 16 for half, 32 for single, 64 for double
    """
    _PRECISION.append(floating_point_bits)
    try:
        yield None
    finally:
        _PRECISION.pop(-1)
def primal(obj) ‑> phiml.math._shape.Shape

Returns the instance, spatial and channel dimensions of an object.

Args

obj
Shape or object with a valid shape() property.

Returns

Shape

Expand source code
def primal(obj) -> Shape:
    """
    Returns the instance, spatial and channel dimensions of an object.

    Args:
        obj: `Shape` or object with a valid `shape` property.

    Returns:
        `Shape`
    """
    from .magic import Shaped
    if isinstance(obj, Shape):
        return obj.primal
    elif isinstance(obj, Shaped):
        return shape(obj).primal
    else:
        raise AssertionError(f"primal() must be called either on a Shape or an object with a 'shape' property but got {obj}")
def print(obj: Union[phiml.math._tensors.Tensor, PhiTreeNode, numbers.Number, tuple, list, None] = None, name: str = '')

Print a tensor with no more than two spatial dimensions, slicing it along all batch and channel dimensions.

Unlike NumPy's array printing, the dimensions are sorted. Elements along the alphabetically first dimension is printed to the right, the second dimension upward. Typically, this means x right, y up.

Args

obj
tensor-like
name
name of the tensor

Returns:

Expand source code
def print_(obj: Union[Tensor, PhiTreeNode, Number, tuple, list, None] = None, name: str = ""):
    """
    Print a tensor with no more than two spatial dimensions, slicing it along all batch and channel dimensions.
    
    Unlike NumPy's array printing, the dimensions are sorted.
    Elements along the alphabetically first dimension is printed to the right, the second dimension upward.
    Typically, this means x right, y up.

    Args:
        obj: tensor-like
        name: name of the tensor

    Returns:

    """
    def variables(obj) -> dict:
        if hasattr(obj, '__variable_attrs__') or hasattr(obj, '__value_attrs__'):
            return {f".{a}": getattr(obj, a) for a in variable_attributes(obj)}
        elif isinstance(obj, (tuple, list)):
            return {f"[{i}]": item for i, item in enumerate(obj)}
        elif isinstance(obj, dict):
            return obj
        else:
            raise ValueError(f"Not PhiTreeNode: {type(obj)}")

    if name:
        print(" " * 12 + name)
    if obj is None:
        print("None")
    elif isinstance(obj, Tensor):
        print(f"{obj:full}")
    elif isinstance(obj, PhiTreeNode):
        for n, val in variables(obj).items():
            print_(val, name + n)
    else:
        print(f"{wrap(obj):full}")
def print_gradient(value: phiml.math._tensors.Tensor, name='', detailed=False) ‑> phiml.math._tensors.Tensor

Prints the gradient vector of value when computed. The gradient at value is the vector-Jacobian product of all operations between the output of this function and the loss value.

The gradient is not printed in jit mode, see jit_compile().

Example

def f(x):
    x = math.print_gradient(x, 'dx')
    return math.l1_loss(x)

math.jacobian(f)(math.ones(x=6))

Args

value
Tensor for which the gradient may be computed later.
name
(Optional) Name to print along with the gradient values
detailed
If False, prints a short summary of the gradient tensor.

Returns

identity()(value) which when differentiated, prints the gradient vector.

Expand source code
def print_gradient(value: Tensor, name="", detailed=False) -> Tensor:
    """
    Prints the gradient vector of `value` when computed.
    The gradient at `value` is the vector-Jacobian product of all operations between the output of this function and the loss value.

    The gradient is not printed in jit mode, see `jit_compile()`.

    Example:
        ```python
        def f(x):
            x = math.print_gradient(x, 'dx')
            return math.l1_loss(x)

        math.jacobian(f)(math.ones(x=6))
        ```

    Args:
        value: `Tensor` for which the gradient may be computed later.
        name: (Optional) Name to print along with the gradient values
        detailed: If `False`, prints a short summary of the gradient tensor.

    Returns:
        `identity(value)` which when differentiated, prints the gradient vector.
    """

    def print_grad(params: dict, _y, dx):
        param_name, x = next(iter(params.items()))
        if math.all_available(x, dx):
            if detailed:
                math.print_(dx, name=name)
            else:
                print(f"{name}:  \t{dx}")
        else:
            print(f"Cannot print gradient for {param_name}, data not available.")
        return {param_name: dx}

    identity = custom_gradient(lambda x: x, print_grad)
    return identity(value)
def prod(value: Union[phiml.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Multiplies values along the specified dimensions.

Args

value
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors

Returns

Tensor without the reduced dimensions.

Expand source code
def prod(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor:
    """
    Multiplies `values` along the specified dimensions.

    Args:
        value: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

    Returns:
        `Tensor` without the reduced dimensions.
    """
    return reduce_(_prod, value, dim, require_all_dims_present=True)
def quantile(value: phiml.math._tensors.Tensor, quantiles: Union[float, phiml.math._tensors.Tensor, tuple, list], dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>)

Compute the q-th quantile of value along dim for each q in quantiles.

Implementations:

Args

value
Tensor
quantiles
Single quantile or tensor of quantiles to compute. Must be of type float, tuple, list or Tensor.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to reduce the sequence of Tensors

Returns

Tensor with dimensions of quantiles and non-reduced dimensions of value.

Expand source code
def quantile(value: Tensor,
             quantiles: Union[float, tuple, list, Tensor],
             dim: DimFilter = non_batch):
    """
    Compute the q-th quantile of `value` along `dim` for each q in `quantiles`.

    Implementations:

    * NumPy: [`quantile`](https://numpy.org/doc/stable/reference/generated/numpy.quantile.html)
    * PyTorch: [`quantile`](https://pytorch.org/docs/stable/generated/torch.quantile.html#torch.quantile)
    * TensorFlow: [`tfp.stats.percentile`](https://www.tensorflow.org/probability/api_docs/python/tfp/stats/percentile)
    * Jax: [`quantile`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.quantile.html)

    Args:
        value: `Tensor`
        quantiles: Single quantile or tensor of quantiles to compute.
            Must be of type `float`, `tuple`, `list` or `Tensor`.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to reduce the sequence of Tensors

    Returns:
        `Tensor` with dimensions of `quantiles` and non-reduced dimensions of `value`.
    """
    dims = value.shape.only(dim)
    native_values = reshaped_native(value, [*value.shape.without(dims), value.shape.only(dims)])
    backend = choose_backend(native_values)
    q = wrap(quantiles, default_list_dim=instance('quantiles'))
    native_quantiles = reshaped_native(q, [q.shape])
    native_result = backend.quantile(native_values, native_quantiles)
    if native_result is not NotImplemented:
        return reshaped_tensor(native_result, [q.shape, *value.shape.without(dims)])
    # --- fallback: custom quantile implementation ---
    v_sorted = sort(value, dims)
    q_idx = q * (v_sorted.shape.get_size(dims) - 1)
    q_idx = expand(q_idx, channel(vector=dims))
    result = grid_sample(v_sorted, q_idx, e_.ZERO_GRADIENT)
    return result
def radians_to_degrees(rad: ~TensorOrTree) ‑> ~TensorOrTree

Convert degrees to radians.

Expand source code
def radians_to_degrees(rad: TensorOrTree) -> TensorOrTree:
    """ Convert degrees to radians. """
    return tree_map(lambda x: x * (180 / 3.14159265358979323846), rad)
def random_normal(*shape: phiml.math._shape.Shape, dtype: Union[phiml.backend._dtype.DType, tuple, type] = None) ‑> phiml.math._tensors.Tensor

Creates a Tensor with the specified shape, filled with random values sampled from a normal / Gaussian distribution.

Implementations:

Args

*shape
This (possibly empty) sequence of Shapes is concatenated, preserving the order.
dtype
(optional) floating point DType. If None, a float tensor with the current default precision is created, see get_precision().

Returns

Tensor

Expand source code
def random_normal(*shape: Shape, dtype: Union[DType, tuple, type] = None) -> Tensor:
    """
    Creates a `Tensor` with the specified shape, filled with random values sampled from a normal / Gaussian distribution.

    Implementations:

    * NumPy: [`numpy.random.standard_normal`](https://numpy.org/doc/stable/reference/random/generated/numpy.random.standard_normal.html)
    * PyTorch: [`torch.randn`](https://pytorch.org/docs/stable/generated/torch.randn.html)
    * TensorFlow: [`tf.random.normal`](https://www.tensorflow.org/api_docs/python/tf/random/normal)
    * Jax: [`jax.random.normal`](https://jax.readthedocs.io/en/latest/_autosummary/jax.random.normal.html)

    Args:
        *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order.
        dtype: (optional) floating point `DType`. If `None`, a float tensor with the current default precision is created, see `get_precision()`.

    Returns:
        `Tensor`
    """

    def uniform_random_normal(shape):
        native = choose_backend(*shape.sizes, prefer_default=True).random_normal(shape.sizes, DType.as_dtype(dtype))
        return NativeTensor(native, shape)

    return _initialize(uniform_random_normal, shape)
def random_uniform(*shape: phiml.math._shape.Shape, low: Union[float, phiml.math._tensors.Tensor] = 0, high: Union[float, phiml.math._tensors.Tensor] = 1, dtype: Union[phiml.backend._dtype.DType, tuple, type] = None) ‑> phiml.math._tensors.Tensor

Creates a Tensor with the specified shape, filled with random values sampled from a uniform distribution.

Args

*shape
This (possibly empty) sequence of Shapes is concatenated, preserving the order.
dtype
(optional) DType or (kind, bits). The dtype kind must be one of float, int, complex. If not specified, a float tensor with the current default precision is created, see get_precision().
low
Minimum value, included.
high
Maximum value, excluded.

Returns

Tensor

Expand source code
def random_uniform(*shape: Shape,
                   low: Union[Tensor, float] = 0,
                   high: Union[Tensor, float] = 1,
                   dtype: Union[DType, tuple, type] = None) -> Tensor:
    """
    Creates a `Tensor` with the specified shape, filled with random values sampled from a uniform distribution.

    Args:
        *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order.
        dtype: (optional) `DType` or `(kind, bits)`.
            The dtype kind must be one of `float`, `int`, `complex`.
            If not specified, a `float` tensor with the current default precision is created, see `get_precision()`.
        low: Minimum value, included.
        high: Maximum value, excluded.
    Returns:
        `Tensor`
    """
    if get_shape(low).volume == 1 and get_shape(high).volume == 1:
        def uniform_random_uniform(shape):
            native = choose_backend(low, high, *shape.sizes, prefer_default=True).random_uniform(shape.sizes, low, high, DType.as_dtype(dtype))
            return NativeTensor(native, shape)
        return _initialize(uniform_random_uniform, shape)
    else:
        def uniform_random_uniform(shape):
            native = choose_backend(*shape.sizes, prefer_default=True).random_uniform(shape.sizes, 0, 1, DType.as_dtype(dtype))
            return NativeTensor(native, shape)
        return _initialize(uniform_random_uniform, shape) * (high - low) + low
def range(dim: phiml.math._shape.Shape, start_or_stop: Optional[int] = None, stop: Optional[int] = None, step=1)

Returns evenly spaced values between start and stop. If only one limit is given, 0 is used for the start.

See Also: range_tensor(), linspace(), meshgrid().

Args

dim
Dimension name and type as Shape object. The size of dim is interpreted as stop unless start_or_stop is specified.
start_or_stop
(Optional) int. Interpreted as start if stop is specified as well. Otherwise this is stop.
stop
(Optional) int. stop value.
step
Distance between values.

Returns

Tensor

Expand source code
def arange(dim: Shape, start_or_stop: Union[int, None] = None, stop: Union[int, None] = None, step=1):
    """
    Returns evenly spaced values between `start` and `stop`.
    If only one limit is given, `0` is used for the start.

    See Also:
        `range_tensor()`, `linspace()`, `meshgrid()`.

    Args:
        dim: Dimension name and type as `Shape` object.
            The `size` of `dim` is interpreted as `stop` unless `start_or_stop` is specified.
        start_or_stop: (Optional) `int`. Interpreted as `start` if `stop` is specified as well. Otherwise this is `stop`.
        stop: (Optional) `int`. `stop` value.
        step: Distance between values.

    Returns:
        `Tensor`
    """
    if start_or_stop is None:
        assert stop is None, "start_or_stop must be specified when stop is given."
        assert isinstance(dim.size, int), "When start_or_stop is not specified, dim.size must be an integer."
        start, stop = 0, dim.size
    elif stop is None:
        start, stop = 0, start_or_stop
    else:
        start = start_or_stop
    native = choose_backend(start, stop, prefer_default=True).range(start, stop, step, DType(int, 32))
    return NativeTensor(native, dim.with_sizes(len(native)))
def range_tensor(*shape: phiml.math._shape.Shape)

Returns a Tensor with given shape() containing the linear indices of each element. For 1D tensors, this equivalent to arange() with step=1.

See Also: arange(), meshgrid().

Args

shape
Tensor shape.

Returns

Tensor

Expand source code
def range_tensor(*shape: Shape):
    """
    Returns a `Tensor` with given `shape` containing the linear indices of each element.
    For 1D tensors, this equivalent to `arange()` with `step=1`.

    See Also:
        `arange()`, `meshgrid()`.

    Args:
        shape: Tensor shape.

    Returns:
        `Tensor`
    """
    shape = concat_shapes(*shape)
    data = arange(spatial('range'), 0, shape.volume)
    return unpack_dim(data, 'range', shape)
def real(x: ~TensorOrTree) ‑> ~TensorOrTree

See Also: imag(), conjugate().

Args

x
Tensor or PhiTreeNode or native tensor.

Returns

Real component of x.

Expand source code
def real(x: TensorOrTree) -> TensorOrTree:
    """
    See Also:
        `imag()`, `conjugate()`.

    Args:
        x: `Tensor` or `phiml.math.magic.PhiTreeNode` or native tensor.

    Returns:
        Real component of `x`.
    """
    return _backend_op1(x, Backend.real)
def rename_dims(value, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], names: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], **kwargs)

Change the name and optionally the type of some dimensions of value.

Dimensions that are not present on value will be ignored. The corresponding new dimensions given by names will not be added.

Args

value
Shape or Tensor or Shapable.
dims
Existing dimensions of value as comma-separated str, tuple, list, Shape or filter function.
names

Either

  • Sequence of names matching dims as tuple, list or str. This replaces only the dimension names but leaves the types untouched.
  • Shape matching dims to replace names and types.
  • Dimension type function to replace only types.
**kwargs
Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the bounds: Box argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.

Returns

Same type as value.

Expand source code
def rename_dims(value,
                dims: DimFilter,
                names: DimFilter,
                **kwargs):
    """
    Change the name and optionally the type of some dimensions of `value`.

    Dimensions that are not present on value will be ignored. The corresponding new dimensions given by `names` will not be added.

    Args:
        value: `Shape` or `Tensor` or `Shapable`.
        dims: Existing dimensions of `value` as comma-separated `str`, `tuple`, `list`, `Shape` or filter function.
        names: Either

            * Sequence of names matching `dims` as `tuple`, `list` or `str`. This replaces only the dimension names but leaves the types untouched.
            * `Shape` matching `dims` to replace names and types.
            * Dimension type function to replace only types.

        **kwargs: Additional keyword arguments required by specific implementations.
            Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
            Adding batch dimensions must always work without keyword arguments.

    Returns:
        Same type as `value`.
    """
    if isinstance(value, Shape):
        return value._replace_names_and_types(dims, names)
    elif isinstance(value, (Number, bool)):
        return value
    assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be a Shape or Shapable but got {type(value).__name__}"
    dims = shape(value).only(dims).names if callable(dims) else parse_dim_order(dims)
    existing_dims = shape(value).only(dims, reorder=True)
    if isinstance(names, str):
        names = parse_dim_order(names)
    elif callable(names):
        names = names(**existing_dims.untyped_dict)
        dims = existing_dims
    assert len(dims) == len(names), f"names and dims must be of equal length but got #dims={len(dims)} and #names={len(names)}"
    if not existing_dims:
        return value
    existing_names = [n for i, n in enumerate(names) if dims[i] in existing_dims]
    existing_names = existing_dims._replace_names_and_types(existing_dims, existing_names)
    # --- First try __replace_dims__ ---
    if hasattr(value, '__replace_dims__'):
        result = value.__replace_dims__(existing_dims.names, existing_names, **kwargs)
        if result is not NotImplemented:
            return result
    # --- Next try Tree Node ---
    if isinstance(value, PhiTreeNode):
        new_attributes = {a: rename_dims(getattr(value, a), existing_dims, existing_names, **kwargs) for a in all_attributes(value)}
        return copy_with(value, **new_attributes)
    # --- Fallback: unstack and stack ---
    if shape(value).only(existing_dims).volume > 8:
        warnings.warn(f"rename_dims() default implementation is slow on large dimensions ({existing_dims}). Please implement __replace_dims__() for {type(value).__name__} as defined in phiml.math.magic", RuntimeWarning, stacklevel=2)
    for old_name, new_dim in zip(existing_dims.names, existing_names):
        value = stack(unstack(value, old_name), new_dim, **kwargs)
    return value
def replace(obj: ~PhiTreeNodeType, **updates) ‑> ~PhiTreeNodeType

Creates a copy of the given PhiTreeNode with updated values as specified in updates.

If obj overrides __with_attrs__, the copy will be created via that specific implementation. Otherwise, the copy() module and setattr will be used.

Args

obj
PhiTreeNode
**updates
Values to be replaced.

Returns

Copy of obj with updated values.

Expand source code
def replace(obj: PhiTreeNodeType, **updates) -> PhiTreeNodeType:
    """
    Creates a copy of the given `phiml.math.magic.PhiTreeNode` with updated values as specified in `updates`.

    If `obj` overrides `__with_attrs__`, the copy will be created via that specific implementation.
    Otherwise, the `copy` module and `setattr` will be used.

    Args:
        obj: `phiml.math.magic.PhiTreeNode`
        **updates: Values to be replaced.

    Returns:
        Copy of `obj` with updated values.
    """
    if hasattr(obj, '__with_attrs__'):
        result = obj.__with_attrs__(**updates)
        if result is not NotImplemented:
            return result
    elif isinstance(obj, (Number, bool)):
        return obj
    if dataclasses.is_dataclass(obj):
        return dataclasses.replace(obj, **updates)
    else:
        cpy = copy.copy(obj)
        for attr, value in updates.items():
            setattr(cpy, attr, value)
        return cpy
def replace_dims(value, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], names: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], **kwargs)

Change the name and optionally the type of some dimensions of value.

Dimensions that are not present on value will be ignored. The corresponding new dimensions given by names will not be added.

Args

value
Shape or Tensor or Shapable.
dims
Existing dimensions of value as comma-separated str, tuple, list, Shape or filter function.
names

Either

  • Sequence of names matching dims as tuple, list or str. This replaces only the dimension names but leaves the types untouched.
  • Shape matching dims to replace names and types.
  • Dimension type function to replace only types.
**kwargs
Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the bounds: Box argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.

Returns

Same type as value.

Expand source code
def rename_dims(value,
                dims: DimFilter,
                names: DimFilter,
                **kwargs):
    """
    Change the name and optionally the type of some dimensions of `value`.

    Dimensions that are not present on value will be ignored. The corresponding new dimensions given by `names` will not be added.

    Args:
        value: `Shape` or `Tensor` or `Shapable`.
        dims: Existing dimensions of `value` as comma-separated `str`, `tuple`, `list`, `Shape` or filter function.
        names: Either

            * Sequence of names matching `dims` as `tuple`, `list` or `str`. This replaces only the dimension names but leaves the types untouched.
            * `Shape` matching `dims` to replace names and types.
            * Dimension type function to replace only types.

        **kwargs: Additional keyword arguments required by specific implementations.
            Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
            Adding batch dimensions must always work without keyword arguments.

    Returns:
        Same type as `value`.
    """
    if isinstance(value, Shape):
        return value._replace_names_and_types(dims, names)
    elif isinstance(value, (Number, bool)):
        return value
    assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be a Shape or Shapable but got {type(value).__name__}"
    dims = shape(value).only(dims).names if callable(dims) else parse_dim_order(dims)
    existing_dims = shape(value).only(dims, reorder=True)
    if isinstance(names, str):
        names = parse_dim_order(names)
    elif callable(names):
        names = names(**existing_dims.untyped_dict)
        dims = existing_dims
    assert len(dims) == len(names), f"names and dims must be of equal length but got #dims={len(dims)} and #names={len(names)}"
    if not existing_dims:
        return value
    existing_names = [n for i, n in enumerate(names) if dims[i] in existing_dims]
    existing_names = existing_dims._replace_names_and_types(existing_dims, existing_names)
    # --- First try __replace_dims__ ---
    if hasattr(value, '__replace_dims__'):
        result = value.__replace_dims__(existing_dims.names, existing_names, **kwargs)
        if result is not NotImplemented:
            return result
    # --- Next try Tree Node ---
    if isinstance(value, PhiTreeNode):
        new_attributes = {a: rename_dims(getattr(value, a), existing_dims, existing_names, **kwargs) for a in all_attributes(value)}
        return copy_with(value, **new_attributes)
    # --- Fallback: unstack and stack ---
    if shape(value).only(existing_dims).volume > 8:
        warnings.warn(f"rename_dims() default implementation is slow on large dimensions ({existing_dims}). Please implement __replace_dims__() for {type(value).__name__} as defined in phiml.math.magic", RuntimeWarning, stacklevel=2)
    for old_name, new_dim in zip(existing_dims.names, existing_names):
        value = stack(unstack(value, old_name), new_dim, **kwargs)
    return value
def reshaped_native(value: phiml.math._tensors.Tensor, groups: Union[tuple, list], force_expand: Any = True, to_numpy=False)

Returns a native representation of value where dimensions are laid out according to groups.

See Also: native(), pack_dims(), reshaped_tensor(), reshaped_numpy().

Args

value
Tensor
groups

tuple or list of dimensions to be packed into one native dimension. Each entry must be one of the following:

  • str: the name of one dimension that is present on value.
  • Shape: Dimensions to be packed. If force_expand, missing dimensions are first added, otherwise they are ignored.
  • Filter function: Packs all dimensions of this type that are present on value.
  • Ellipsis : Packs all remaining dimensions into this slot. Can only be passed once.
force_expand
bool or sequence of dimensions. If True, repeats the tensor along missing dimensions. If False, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.
to_numpy
If True, converts the native tensor to a numpy.ndarray.

Returns

Native tensor with dimensions matching groups.

Expand source code
def reshaped_native(value: Tensor,
                    groups: Union[tuple, list],
                    force_expand: Any = True,
                    to_numpy=False):
    """
    Returns a native representation of `value` where dimensions are laid out according to `groups`.

    See Also:
        `native()`, `pack_dims()`, `reshaped_tensor()`, `reshaped_numpy()`.

    Args:
        value: `Tensor`
        groups: `tuple` or `list` of dimensions to be packed into one native dimension. Each entry must be one of the following:

            * `str`: the name of one dimension that is present on `value`.
            * `Shape`: Dimensions to be packed. If `force_expand`, missing dimensions are first added, otherwise they are ignored.
            * Filter function: Packs all dimensions of this type that are present on `value`.
            * Ellipsis `...`: Packs all remaining dimensions into this slot. Can only be passed once.

        force_expand: `bool` or sequence of dimensions.
            If `True`, repeats the tensor along missing dimensions.
            If `False`, puts singleton dimensions where possible.
            If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.
        to_numpy: If True, converts the native tensor to a `numpy.ndarray`.

    Returns:
        Native tensor with dimensions matching `groups`.
    """
    assert isinstance(value, Tensor), f"value must be a Tensor but got {type(value)}"
    assert not value._is_tracer, f"Failed accessing native values because tensor {value.shape} is a tracer"
    assert value.shape.is_uniform, f"Only uniform (homogenous) tensors can be converted to native but got shape {value.shape}"
    assert isinstance(groups, (tuple, list)), f"groups must be a tuple or list but got {type(value)}"
    order = []
    if Ellipsis in groups:
        ellipsis_dims = value.shape.without([g for g in groups if g is not Ellipsis])
        groups = [ellipsis_dims if g is Ellipsis else g for g in groups]
    groups = [group(value) if callable(group) else group for group in groups]
    for i, group in enumerate(groups):
        if isinstance(group, Shape):
            present = value.shape.only(group)
            if force_expand is True or present.volume > 1 or (force_expand is not False and group.only(force_expand).volume > 1):
                value = expand(value, group)
            value = pack_dims(value, group, batch(f"group{i}"))
            order.append(f"group{i}")
        else:
            assert isinstance(group, str), f"Groups must be either single-dim str or Shape but got {group}"
            assert ',' not in group, f"When packing multiple dimensions, pass a well-defined Shape instead of a comma-separated str. Got {group}"
            order.append(group)
    return value.numpy(order) if to_numpy else value.native(order)
def reshaped_numpy(value: phiml.math._tensors.Tensor, groups: Union[tuple, list], force_expand: Any = True)

Returns the NumPy representation of value where dimensions are laid out according to groups.

See Also: numpy(), reshaped_native(), pack_dims(), reshaped_tensor().

Args

value
Tensor
groups
Sequence of dimension names as str or groups of dimensions to be packed_dim as Shape.
force_expand
bool or sequence of dimensions. If True, repeats the tensor along missing dimensions. If False, puts singleton dimensions where possible. If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.

Returns

NumPy ndarray with dimensions matching groups.

Expand source code
def reshaped_numpy(value: Tensor, groups: Union[tuple, list], force_expand: Any = True):
    """
    Returns the NumPy representation of `value` where dimensions are laid out according to `groups`.

    See Also:
        `numpy()`, `reshaped_native()`, `pack_dims()`, `reshaped_tensor()`.

    Args:
        value: `Tensor`
        groups: Sequence of dimension names as `str` or groups of dimensions to be packed_dim as `Shape`.
        force_expand: `bool` or sequence of dimensions.
            If `True`, repeats the tensor along missing dimensions.
            If `False`, puts singleton dimensions where possible.
            If a sequence of dimensions is provided, only forces the expansion for groups containing those dimensions.

    Returns:
        NumPy `ndarray` with dimensions matching `groups`.
    """
    return reshaped_native(value, groups, force_expand=force_expand, to_numpy=True)
def reshaped_tensor(value: Any, groups: Union[tuple, list], check_sizes=False, convert=True)

Creates a Tensor from a native tensor or tensor-like whereby the dimensions of value are split according to groups.

See Also: tensor(), reshaped_native(), unpack_dim().

Args

value
Native tensor or tensor-like.
groups
Sequence of dimension groups to be packed_dim as tuple[Shape] or list[Shape].
check_sizes
If True, group sizes must match the sizes of value exactly. Otherwise, allows singleton dimensions.
convert
If True, converts the data to the native format of the current default backend. If False, wraps the data in a Tensor but keeps the given data reference if possible.

Returns

Tensor with all dimensions from groups

Expand source code
def reshaped_tensor(value: Any,
                    groups: Union[tuple, list],
                    check_sizes=False,
                    convert=True):
    """
    Creates a `Tensor` from a native tensor or tensor-like whereby the dimensions of `value` are split according to `groups`.

    See Also:
        `phiml.math.tensor()`, `reshaped_native()`, `unpack_dim()`.

    Args:
        value: Native tensor or tensor-like.
        groups: Sequence of dimension groups to be packed_dim as `tuple[Shape]` or `list[Shape]`.
        check_sizes: If True, group sizes must match the sizes of `value` exactly. Otherwise, allows singleton dimensions.
        convert: If True, converts the data to the native format of the current default backend.
            If False, wraps the data in a `Tensor` but keeps the given data reference if possible.

    Returns:
        `Tensor` with all dimensions from `groups`
    """
    assert all(isinstance(g, Shape) for g in groups), "groups must be a sequence of Shapes"
    dims = [batch(f'group{i}') for i, group in enumerate(groups)]
    try:
        value = tensor(value, *dims, convert=convert)
    except IncompatibleShapes:
        raise IncompatibleShapes(f"Cannot reshape native tensor {type(value)} with sizes {value.shape} given groups {groups}")
    for i, group in enumerate(groups):
        if value.shape.get_size(f'group{i}') == group.volume:
            value = unpack_dim(value, f'group{i}', group)
        elif check_sizes:
            raise AssertionError(f"Group {group} does not match dimension {i} of value {value.shape}")
        else:
            value = unpack_dim(value, f'group{i}', group)
    return value
def rotate_vector(vector: phiml.math._tensors.Tensor, angle: Union[float, phiml.math._tensors.Tensor, None], invert=False) ‑> phiml.math._tensors.Tensor

Rotates vector around the origin.

Args

vector
n-dimensional vector with exactly one channel dimension
angle
Euler angle(s) or rotation matrix. None is interpreted as no rotation.
invert
Whether to apply the inverse rotation.

Returns

Rotated vector as Tensor

Expand source code
def rotate_vector(vector: math.Tensor, angle: Optional[Union[float, math.Tensor]], invert=False) -> Tensor:
    """
    Rotates `vector` around the origin.

    Args:
        vector: n-dimensional vector with exactly one channel dimension
        angle: Euler angle(s) or rotation matrix.
            `None` is interpreted as no rotation.
        invert: Whether to apply the inverse rotation.

    Returns:
        Rotated vector as `Tensor`
    """
    assert 'vector' in vector.shape, f"vector must have exactly a channel dimension named 'vector'"
    if angle is None:
        return vector
    matrix = rotation_matrix(angle, matrix_dim=channel(vector))
    if invert:
        matrix = rename_dims(matrix, '~vector,vector', math.concat_shapes(channel('vector'), dual('vector')))
    assert matrix.vector.dual.size == vector.vector.size, f"Rotation matrix from {angle.shape} is {matrix.vector.dual.size}D but vector {vector.shape} is {vector.vector.size}D."
    return matrix @ vector
def rotation_angles(rot: phiml.math._tensors.Tensor)

Compute the scalar x in 2D or the Euler angles in 3D from a given rotation matrix. This function returns one valid solution but often, there are multiple solutions.

Args

rot
Rotation matrix as created by phi.math.rotation_matrix(). Must have exactly one channel and one dual dimension with equally-ordered elements.

Returns

Scalar x in 2D, Euler angles

Expand source code
def rotation_angles(rot: Tensor):
    """
    Compute the scalar x in 2D or the Euler angles in 3D from a given rotation matrix.
    This function returns one valid solution but often, there are multiple solutions.

    Args:
        rot: Rotation matrix as created by `phi.math.rotation_matrix()`.
            Must have exactly one channel and one dual dimension with equally-ordered elements.

    Returns:
        Scalar x in 2D, Euler angles
    """
    assert channel(rot).rank == 1 and dual(rot).rank == 1, f"Rotation matrix must have one channel and one dual dimension but got {rot.shape}"
    if channel(rot).size == 2:
        cos = rot[{channel: 0, dual: 0}]
        sin = rot[{channel: 1, dual: 0}]
        return math.arctan(sin, divide_by=cos)
    elif channel(rot).size == 3:
        a2 = -math.arcsin(rot[{channel: 2, dual: 0}])  # ToDo handle [2, 0] == 1 (i.e. cos_theta == 0)
        cos2 = math.cos(a2)
        a1 = math.arctan(rot[{channel: 2, dual: 1}] / cos2, divide_by=rot[{channel: 2, dual: 2}] / cos2)
        a3 = math.arctan(rot[{channel: 1, dual: 0}] / cos2, divide_by=rot[{channel: 0, dual: 0}] / cos2)
        regular_sol = stack([a1, a2, a3], channel(angle=channel(rot).item_names[0]))
        # --- pole case cos(theta) == 1 ---
        a3_pole = 0  # unconstrained
        bottom_pole = rot[{channel: 2, dual: 0}] < 0
        a2_pole = math.where(bottom_pole, 1.57079632679, -1.57079632679)
        a1_pole = math.where(bottom_pole, math.arctan(rot[{channel: 0, dual: 1}], divide_by=rot[{channel: 0, dual: 2}]), math.arctan(-rot[{channel: 0, dual: 1}], divide_by=-rot[{channel: 0, dual: 2}]))
        pole_sol = stack([a1_pole, a2_pole, a3_pole], channel(regular_sol))
        return math.where(abs(rot[{channel: 2, dual: 0}]) >= 1, pole_sol, regular_sol)
    else:
        raise ValueError(f"")
def rotation_matrix(x: Union[float, phiml.math._tensors.Tensor], matrix_dim=(vectorᶜ=None))

Create a 2D or 3D rotation matrix from the corresponding angle(s).

Args

x:
2D: scalar angle
3D: Either vector pointing along the rotation axis with rotation angle as length or Euler angles.
Euler angles need to be laid out along a angle() channel dimension with dimension names listing the spatial dimensions.
E.g. a 90° rotation about the z-axis is represented by vec('angles', x=0, y=0, z=PI/2).
If a rotation matrix is passed for angle(), it is returned without modification.
matrix_dim
Matrix dimension for 2D rotations. In 3D, the channel dimension of angle is used.

Returns

Matrix containing matrix_dim in primal and dual form as well as all non-channel dimensions of x.

Expand source code
def rotation_matrix(x: Union[float, math.Tensor], matrix_dim=channel('vector')):
    """
    Create a 2D or 3D rotation matrix from the corresponding angle(s).

    Args:
        x:
            2D: scalar angle
            3D: Either vector pointing along the rotation axis with rotation angle as length or Euler angles.
            Euler angles need to be laid out along a `angle` channel dimension with dimension names listing the spatial dimensions.
            E.g. a 90° rotation about the z-axis is represented by `vec('angles', x=0, y=0, z=PI/2)`.
            If a rotation matrix is passed for `angle`, it is returned without modification.
        matrix_dim: Matrix dimension for 2D rotations. In 3D, the channel dimension of angle is used.

    Returns:
        Matrix containing `matrix_dim` in primal and dual form as well as all non-channel dimensions of `x`.
    """
    if isinstance(x, Tensor) and '~vector' in x.shape and 'vector' in x.shape.channel and x.shape.get_size('~vector') == x.shape.get_size('vector'):
        return x  # already a rotation matrix
    elif 'angle' in shape(x) and shape(x).get_size('angle') == 3:  # 3D Euler angles
        assert channel(x).rank == 1 and channel(x).size == 3, f"x for 3D rotations needs to be a 3-vector but got {x}"
        s1, s2, s3 = math.sin(x).angle  # x, y, z
        c1, c2, c3 = math.cos(x).angle
        matrix_dim = matrix_dim.with_size(shape(x).get_item_names('angle'))
        return wrap([[c3 * c2, c3 * s2 * s1 - s3 * c1, c3 * s2 * c1 + s3 * s1],
                     [s3 * c2, s3 * s2 * s1 + c3 * c1, s3 * s2 * c1 - c3 * s1],
                     [-s2, c2 * s1, c2 * c1]], matrix_dim, matrix_dim.as_dual())  # Rz * Ry * Rx  (1. rotate about X by first angle)
    elif 'vector' in shape(x) and shape(x).get_size('vector') == 3:  # 3D axis + x
        angle = vec_length(x)
        s, c = math.sin(angle), math.cos(angle)
        t = 1 - c
        k1, k2, k3 = vec_normalize(x, epsilon=1e-12).vector
        matrix_dim = matrix_dim.with_size(shape(x).get_item_names('vector'))
        return wrap([[c + k1**2 * t, k1 * k2 * t - k3 * s, k1 * k3 * t + k2 * s],
                     [k2 * k1 * t + k3 * s, c + k2**2 * t, k2 * k3 * t - k1 * s],
                     [k3 * k1 * t - k2 * s, k3 * k2 * t + k1 * s, c + k3**2 * t]], matrix_dim, matrix_dim.as_dual())
    else:  # 2D rotation
        sin = wrap(math.sin(x))
        cos = wrap(math.cos(x))
        return wrap([[cos, -sin], [sin, cos]], matrix_dim, matrix_dim.as_dual())
def round(x: ~TensorOrTree) ‑> ~TensorOrTree

Rounds the Tensor or PhiTreeNode x to the closest integer.

Expand source code
def round_(x: TensorOrTree) -> TensorOrTree:
    """ Rounds the `Tensor` or `phiml.math.magic.PhiTreeNode` `x` to the closest integer. """
    return _backend_op1(x, Backend.round)
def s2b(value)

Change the type of all spatial dimensions of value to batch dimensions. See rename_dims().

Expand source code
def s2b(value):
    """ Change the type of all *spatial* dimensions of `value` to *batch* dimensions. See `rename_dims`. """
    return rename_dims(value, spatial, batch)
def safe_div(x: Union[float, phiml.math._tensors.Tensor], y: Union[float, phiml.math._tensors.Tensor])

Computes x/y with the Tensors x and y but returns 0 where y=0.

Expand source code
def safe_div(x: Union[float, Tensor], y: Union[float, Tensor]):
    """ Computes *x/y* with the `Tensor`s `x` and `y` but returns 0 where *y=0*. """
    return custom_op2(x, y,
                      l_operator=safe_div,
                      l_native_function=lambda x_, y_: choose_backend(x_, y_).divide_no_nan(x_, y_),
                      r_operator=lambda y_, x_: safe_div(x_, y_),
                      r_native_function=lambda y_, x_: choose_backend(x_, y_).divide_no_nan(x_, y_),
                      op_name='divide_no_nan')
def sample_subgrid(grid: phiml.math._tensors.Tensor, start: phiml.math._tensors.Tensor, size: phiml.math._shape.Shape) ‑> phiml.math._tensors.Tensor

Samples a sub-grid from grid with equal distance between sampling points. The values at the new sample points are determined via linear interpolation.

Args

grid
Tensor to be resampled. Values are assumed to be sampled at cell centers.
start
Origin point of sub-grid within grid, measured in number of cells. Must have a single dimension called vector. Example: start=(1, 0.5) would slice off the first grid point in dim 1 and take the mean of neighbouring points in dim 2. The order of dims must be equal to size and grid.shape.spatial.
size
Resolution of the sub-grid. Must not be larger than the resolution of grid. The order of dims must be equal to start and grid.shape.spatial.

Returns

Sub-grid as Tensor

Expand source code
def sample_subgrid(grid: Tensor, start: Tensor, size: Shape) -> Tensor:
    """
    Samples a sub-grid from `grid` with equal distance between sampling points.
    The values at the new sample points are determined via linear interpolation.

    Args:
        grid: `Tensor` to be resampled. Values are assumed to be sampled at cell centers.
        start: Origin point of sub-grid within `grid`, measured in number of cells.
            Must have a single dimension called `vector`.
            Example: `start=(1, 0.5)` would slice off the first grid point in dim 1 and take the mean of neighbouring points in dim 2.
            The order of dims must be equal to `size` and `grid.shape.spatial`.
        size: Resolution of the sub-grid. Must not be larger than the resolution of `grid`.
            The order of dims must be equal to `start` and `grid.shape.spatial`.

    Returns:
      Sub-grid as `Tensor`
    """
    assert start.shape.names == ('vector',)
    assert grid.shape.spatial.names == size.names
    assert math.all_available(start), "Cannot perform sample_subgrid() during tracing, 'start' must be known."
    crop = {}
    for dim, d_start, d_size in zip(grid.shape.spatial.names, start, size.sizes):
        crop[dim] = slice(int(d_start), int(d_start) + d_size + (0 if d_start % 1 in (0, 1) else 1))
    grid = grid[crop]
    upper_weight = start % 1
    lower_weight = 1 - upper_weight
    for i, dim in enumerate(grid.shape.spatial.names):
        if upper_weight[i].native() not in (0, 1):
            lower, upper = shift(grid, (0, 1), [dim], padding=None, stack_dim=None)
            grid = upper * upper_weight[i] + lower * lower_weight[i]
    return grid
def scatter(base_grid: Union[phiml.math._tensors.Tensor, phiml.math._shape.Shape], indices: Union[phiml.math._tensors.Tensor, dict], values: Union[float, phiml.math._tensors.Tensor], mode: Union[str, Callable] = 'update', outside_handling: str = 'discard', indices_gradient=False, default=None)

Scatters values into base_grid at indices. instance dimensions of indices and/or values are reduced during scattering. Depending on mode, this method has one of the following effects:

  • mode='update': Replaces the values of base_grid at indices by values. The result is undefined if indices contains duplicates.
  • mode='add': Adds values to base_grid at indices. The values corresponding to duplicate indices are accumulated.
  • mode='mean': Replaces the values of base_grid at indices by the mean of all values with the same index.

Implementations:

See Also: gather().

Args

base_grid
Tensor into which values are scattered.
indices
Tensor of n-dimensional indices at which to place values. Must have a single channel dimension with size matching the number of spatial dimensions of base_grid. This dimension is optional if the spatial rank is 1. Must also contain all scatter_dims.
values
Tensor of values to scatter at indices.
mode
Scatter mode as str or function. Supported modes are 'add', 'mean', 'update', 'max', 'min', 'prod', 'any', 'all'. The corresponding functions are the built-in sum_(), max´,min, as well as the reduce functions in phiml.math`.
outside_handling

Defines how indices lying outside the bounds of base_grid are handled.

  • 'discard': outside indices are ignored.
  • 'clamp': outside indices are projected onto the closest point inside the grid.
  • 'undefined': All points are expected to lie inside the grid. Otherwise an error may be thrown or an undefined tensor may be returned.
indices_gradient
Whether to allow the gradient of this operation to be backpropagated through indices.
default
Default value to use for bins into which no value is scattered. By default, NaN is used for the modes update and mean(), 0 for sum_(), inf for min and -inf for max. This will upgrade the data type to float if necessary.

Returns

Copy of base_grid with updated values at indices.

Expand source code
def scatter(base_grid: Union[Tensor, Shape],
            indices: Union[Tensor, dict],
            values: Union[Tensor, float],
            mode: Union[str, Callable] = 'update',
            outside_handling: str = 'discard',
            indices_gradient=False,
            default=None):
    """
    Scatters `values` into `base_grid` at `indices`.
    instance dimensions of `indices` and/or `values` are reduced during scattering.
    Depending on `mode`, this method has one of the following effects:

    * `mode='update'`: Replaces the values of `base_grid` at `indices` by `values`. The result is undefined if `indices` contains duplicates.
    * `mode='add'`: Adds `values` to `base_grid` at `indices`. The values corresponding to duplicate indices are accumulated.
    * `mode='mean'`: Replaces the values of `base_grid` at `indices` by the mean of all `values` with the same index.

    Implementations:

    * NumPy: Slice assignment / `numpy.add.at`
    * PyTorch: [`torch.scatter`](https://pytorch.org/docs/stable/generated/torch.scatter.html), [`torch.scatter_add`](https://pytorch.org/docs/stable/generated/torch.scatter_add.html)
    * TensorFlow: [`tf.tensor_scatter_nd_add`](https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_add), [`tf.tensor_scatter_nd_update`](https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update)
    * Jax: [`jax.lax.scatter_add`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scatter_add.html), [`jax.lax.scatter`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scatter.html)

    See Also:
        `gather()`.

    Args:
        base_grid: `Tensor` into which `values` are scattered.
        indices: `Tensor` of n-dimensional indices at which to place `values`.
            Must have a single channel dimension with size matching the number of spatial dimensions of `base_grid`.
            This dimension is optional if the spatial rank is 1.
            Must also contain all `scatter_dims`.
        values: `Tensor` of values to scatter at `indices`.
        mode: Scatter mode as `str` or function.
            Supported modes are 'add', 'mean', 'update', 'max', 'min', 'prod', 'any', 'all'.
            The corresponding functions are the built-in `sum`, `max´, `min`, as well as the reduce functions in `phiml.math`.
        outside_handling: Defines how indices lying outside the bounds of `base_grid` are handled.

            * `'discard'`: outside indices are ignored.
            * `'clamp'`: outside indices are projected onto the closest point inside the grid.
            * `'undefined'`: All points are expected to lie inside the grid. Otherwise an error may be thrown or an undefined tensor may be returned.
        indices_gradient: Whether to allow the gradient of this operation to be backpropagated through `indices`.
        default: Default value to use for bins into which no value is scattered.
            By default, `NaN` is used for the modes `update` and `mean`, `0` for `sum`, `inf` for min and `-inf` for max.
            This will upgrade the data type to `float` if necessary.

    Returns:
        Copy of `base_grid` with updated values at `indices`.
    """
    if callable(mode):
        mode = {sum: 'add', max: 'max', min: 'min', sum_: 'add', max_: 'max', min_: 'min', mean: 'mean', prod: 'prod', any_: 'any', all_: 'all'}[mode]
    if mode == 'prod':
        log_base_grid = log(base_grid) if isinstance(base_grid, Tensor) else base_grid
        log_default = None if default is None else log(default)
        log_result = scatter(log_base_grid, indices, log(values), 'add', outside_handling, indices_gradient, log_default)
        return exp(log_result)
    elif mode == 'any':
        b_base_grid = cast(base_grid, bool) if isinstance(base_grid, Tensor) else base_grid
        b_values = cast(values, bool)
        i_result = scatter(b_base_grid, indices, b_values, 'add', outside_handling, indices_gradient, False)
        return cast(i_result, bool)
    elif mode == 'all':
        not_base_grid = ~cast(base_grid, bool) if isinstance(base_grid, Tensor) else base_grid
        not_values = ~cast(values, bool)
        i_result = scatter(not_base_grid, indices, not_values, 'add', outside_handling, indices_gradient, False)
        return ~cast(i_result, bool)
    assert mode in ('update', 'add', 'mean', 'max', 'min'), f"Invalid scatter mode: '{mode}'"
    assert outside_handling in ('discard', 'clamp', 'undefined')
    assert isinstance(indices_gradient, bool)
    if isinstance(indices, dict):  # update a slice
        if len(indices) == 1 and isinstance(next(iter(indices.values())), (str, int, slice)):  # update a range
            dim, sel = next(iter(indices.items()))
            full_dim = base_grid.shape[dim]
            if isinstance(sel, str):
                sel = full_dim.item_names[0].index(sel)
            if isinstance(sel, int):
                sel = slice(sel, sel+1)
            assert isinstance(sel, slice), f"Selection must be a str, int or slice but got {type(sel)}"
            values = expand(values, full_dim.after_gather({dim: sel}))
            parts = [
                base_grid[{dim: slice(sel.start)}],
                values,
                base_grid[{dim: slice(sel.stop, None)}]
            ]
            return concat(parts, dim)
        else:
            raise NotImplementedError("scattering into non-continuous values not yet supported by dimension")
    grid_shape = base_grid if isinstance(base_grid, Shape) else base_grid.shape
    assert channel(indices).rank < 2
    if channel(indices) and channel(indices).item_names[0]:
        indexed_dims = channel(indices).item_names[0]
        assert indexed_dims in grid_shape, f"Scatter indices {indices.shape} point to missing dimensions in grid {grid_shape}"
        if indexed_dims != grid_shape.only(indexed_dims).names:
            indices = indices[{channel: grid_shape.only(indexed_dims).names}]
        indexed_dims = grid_shape.only(indexed_dims)
    else:
        indexed_dims = grid_shape.spatial or grid_shape.instance
        assert channel(indices).rank == 1 or (grid_shape.spatial_rank + grid_shape.instance_rank == 1 and indices.shape.channel_rank == 0), f"indices must have a channel dimension listing the indexed dims {indexed_dims} but got {indices.shape}. You can create it via vec({', '.join([d+'=...' for d in indexed_dims.names])}) or channel(index='{','.join(indexed_dims.names)}'). If you have raveled indices, use unpack_dim(indices, channel, base_grid.shape['{','.join(indexed_dims.names)}'])."
        assert channel(indices).volume == indexed_dims.rank
    values = wrap(values)
    batches = values.shape.non_channel.non_instance & indices.shape.non_channel.non_instance
    channels = grid_shape.without(indexed_dims).without(batches) & values.shape.channel
    # --- Set up grid ---
    if isinstance(base_grid, Shape):
        with choose_backend_t(indices, values):
            base_grid = zeros(base_grid & batches & values.shape.channel, dtype=values.dtype)
        if default is not None:
            base_grid += default
        elif mode in ['update', 'mean']:
            base_grid += float('nan')
        elif mode == 'max':
            base_grid -= float('inf')
        elif mode == 'min':
            base_grid += float('inf')
        else:
            assert mode == 'add'  # initialize with zeros
    # --- Handle outside indices ---
    if outside_handling == 'clamp':
        indices = clip(indices, 0, tensor(indexed_dims, channel(indices)) - 1)
    elif outside_handling == 'discard':
        indices_linear = pack_dims(indices, instance, instance(_scatter_instance=1))
        indices_inside = min_((round_(indices_linear) >= 0) & (round_(indices_linear) < tensor(indexed_dims, channel(indices_linear))), channel)
        indices_linear = boolean_mask(indices_linear, '_scatter_instance', indices_inside)
        if instance(values).rank > 0:
            values_linear = pack_dims(values, instance, instance(_scatter_instance=1))
            values_linear = boolean_mask(values_linear, '_scatter_instance', indices_inside)
            values = unpack_dim(values_linear, '_scatter_instance', instance(values))
        indices = unpack_dim(indices_linear, '_scatter_instance', instance(indices))
        if indices.shape.is_non_uniform:
            raise NotImplementedError()
    lists = indices.shape.instance & values.shape.instance

    def scatter_forward(base_grid: Tensor, indices: Tensor, values: Tensor):
        if values._is_tracer:
            if indices._is_tracer or base_grid._is_tracer:
                raise NotImplementedError("scattering linear tracer into linear tracer not supported")
            if not channel(indices):
                indices = expand(indices, channel(scatter_idx=indexed_dims))
            return values._scatter(base_grid, indices)
        indices = to_int32(round_(indices))
        native_grid = reshaped_native(base_grid, [batches, *indexed_dims, channels])
        native_values = reshaped_native(values, [batches, lists, channels])
        native_indices = reshaped_native(indices, [batches, lists, channel])
        backend = choose_backend(native_indices, native_values, native_grid)
        if mode != 'mean':
            native_result = backend.scatter(native_grid, native_indices, native_values, mode=mode)
        else:  # mean
            zero_grid = backend.zeros_like(native_grid)
            summed = backend.scatter(zero_grid, native_indices, native_values, mode='add')
            count = backend.scatter(zero_grid, native_indices, backend.ones_like(native_values), mode='add')
            native_result = summed / backend.maximum(count, 1)
            native_result = backend.where(count == 0, native_grid, native_result)
        return reshaped_tensor(native_result, [batches, *indexed_dims, channels], check_sizes=True, convert=False)

    def scatter_backward(args: dict, _output, d_output):
        from ._nd import spatial_gradient
        values_grad = gather(d_output, args['indices'])
        spatial_gradient_indices = gather(spatial_gradient(d_output, dims=indexed_dims), args['indices'])
        indices_grad = mean(spatial_gradient_indices * args['values'], 'vector_')
        return None, indices_grad, values_grad

    from ._functional import custom_gradient
    scatter_function = custom_gradient(scatter_forward, scatter_backward) if indices_gradient else scatter_forward
    result = scatter_function(base_grid, indices, values)
    return result
def seed(seed: int)

Sets the current seed of all backends and the built-in random package.

Calling this function with a fixed value at the start of an application yields reproducible results as long as the same backend is used.

Args

seed
Seed to use.
Expand source code
def seed(seed: int):
    """
    Sets the current seed of all backends and the built-in `random` package.

    Calling this function with a fixed value at the start of an application yields reproducible results
    as long as the same backend is used.

    Args:
        seed: Seed to use.
    """
    for backend in BACKENDS:
        backend.seed(seed)
    import random
    random.seed(0)
def set_global_precision(floating_point_bits: int)

Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends.

If floating_point_bits is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64. Operations may also convert floating point values to this precision, even if the input had a different precision.

If floating_point_bits is None, new tensors will default to float32 unless specified otherwise. The output of math operations has the same precision as its inputs.

Args

floating_point_bits
one of (16, 32, 64, None)
Expand source code
def set_global_precision(floating_point_bits: int):
    """
    Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends.

    If `floating_point_bits` is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64.
    Operations may also convert floating point values to this precision, even if the input had a different precision.

    If `floating_point_bits` is None, new tensors will default to float32 unless specified otherwise.
    The output of math operations has the same precision as its inputs.

    Args:
      floating_point_bits: one of (16, 32, 64, None)
    """
    _PRECISION[0] = floating_point_bits
def shape(obj, allow_unshaped=False) ‑> phiml.math._shape.Shape

If obj is a Tensor or Shaped, returns its shape. If obj is a Shape, returns obj.

This function can be passed as a dim argument to an operation to specify that it should act upon all dimensions.

Args

obj
Tensor or Shape or Shaped
allow_unshaped
If True, returns an empty shape for unsupported objects, else raises a ValueError.

Returns

Shape

Expand source code
def shape(obj, allow_unshaped=False) -> Shape:
    """
    If `obj` is a `Tensor` or `phiml.math.magic.Shaped`, returns its shape.
    If `obj` is a `Shape`, returns `obj`.

    This function can be passed as a `dim` argument to an operation to specify that it should act upon all dimensions.

    Args:
        obj: `Tensor` or `Shape` or `Shaped`
        allow_unshaped: If `True`, returns an empty shape for unsupported objects, else raises a `ValueError`.

    Returns:
        `Shape`
    """
    from .magic import PhiTreeNode, Shaped
    if isinstance(obj, Shape):
        return obj
    elif hasattr(obj, '__shape__'):
        return obj.__shape__()
    elif hasattr(obj, 'shape') and isinstance(obj.shape, Shape):
        return obj.shape
    elif isinstance(obj, (int, float, complex, bool)):
        return EMPTY_SHAPE
    elif isinstance(obj, (tuple, list)) and all(isinstance(item, (int, float, complex, bool)) for item in obj):
        return channel('vector')
    elif isinstance(obj, (Number, bool)):
        return EMPTY_SHAPE
    elif obj is None:
        return EMPTY_SHAPE
    elif isinstance(obj, (tuple, list)) and all(isinstance(item, (PhiTreeNode, Shaped)) for item in obj):
        return merge_shapes(*obj, allow_varying_sizes=True)
    if isinstance(obj, dict) and all(isinstance(item, (PhiTreeNode, Shaped)) for item in obj):
        return merge_shapes(*obj.values(), allow_varying_sizes=True)
    elif isinstance(obj, PhiTreeNode):
        from ._magic_ops import all_attributes
        return merge_shapes(*[getattr(obj, a) for a in all_attributes(obj, assert_any=True)], allow_varying_sizes=True)
    else:
        from ..backend import choose_backend, NoBackendFound
        try:
            backend = choose_backend(obj)
            shape_tuple = backend.staticshape(obj)
            if len(shape_tuple) == 0:
                return EMPTY_SHAPE
            elif len(shape_tuple) == 1:
                return channel('vector')
            else:
                raise ValueError(f"Cannot auto-complete shape of {backend} tensor with shape {shape_tuple}. Only 0D and 1D tensors have a Φ-ML shape by default.")
        except NoBackendFound:
            if allow_unshaped:
                return EMPTY_SHAPE
            raise ValueError(f'shape() requires Shaped or Shape argument but got {type(obj)}')
def shift(x: phiml.math._tensors.Tensor, offsets: Sequence[int], dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = zero-gradient, stack_dim: Union[phiml.math._shape.Shape, str, None] = (shiftᶜ=None), extend_bounds=0) ‑> List[phiml.math._tensors.Tensor]

Shift the tensor x by a fixed offset, using padding for edge values.

This is similar to numpy.roll() but with major differences:

  • Values shifted in from the boundary are defined by padding.
  • Positive offsets represent negative shifts.
  • Support for multi-dimensional shifts

See Also: index_shift(), neighbor_reduce().

Args

x
Input grid-like Tensor.
offsets
tuple listing shifts to compute, each must be an int. One Tensor will be returned for each entry.
dims
Dimensions along which to shift, defaults to all spatial dims of x.
padding
Padding to be performed at the boundary so that the shifted versions have the same size as x. Must be one of the following: Extrapolation, Tensor or number for constant extrapolation, name of extrapolation as str. Can be set to None to disable padding. Then the result tensors will be smaller than x.
stack_dim
Dimension along which the components corresponding to each dim in dims should be stacked. This can be set to None only if dims is a single dimension.
extend_bounds
Number of cells by which to pad the tensors in addition to the number required to maintain the size of x. Can only be used with a valid padding.

Returns

list of shifted tensors. The number of return tensors is equal to the number of offsets.

Expand source code
def shift(x: Tensor,
          offsets: Sequence[int],
          dims: DimFilter = math.spatial,
          padding: Union[Extrapolation, float, Tensor, str, None] = extrapolation.BOUNDARY,
          stack_dim: Union[Shape, str, None] = channel('shift'),
          extend_bounds=0) -> List[Tensor]:
    """
    Shift the tensor `x` by a fixed offset, using `padding` for edge values.

    This is similar to `numpy.roll()` but with major differences:

    * Values shifted in from the boundary are defined by `padding`.
    * Positive offsets represent negative shifts.
    * Support for multi-dimensional shifts

    See Also:
        `index_shift`, `neighbor_reduce`.

    Args:
        x: Input grid-like `Tensor`.
        offsets: `tuple` listing shifts to compute, each must be an `int`. One `Tensor` will be returned for each entry.
        dims: Dimensions along which to shift, defaults to all *spatial* dims of `x`.
        padding: Padding to be performed at the boundary so that the shifted versions have the same size as `x`.
            Must be one of the following: `Extrapolation`, `Tensor` or number for constant extrapolation, name of extrapolation as `str`.
            Can be set to `None` to disable padding. Then the result tensors will be smaller than `x`.
        stack_dim: Dimension along which the components corresponding to each dim in `dims` should be stacked.
            This can be set to `None` only if `dims` is a single dimension.
        extend_bounds: Number of cells by which to pad the tensors in addition to the number required to maintain the size of `x`.
            Can only be used with a valid `padding`.

    Returns:
        `list` of shifted tensors. The number of return tensors is equal to the number of `offsets`.
    """
    if dims is None:
        raise ValueError("dims=None is not supported anymore.")
    dims = x.shape.only(dims).names
    if stack_dim is None:
        assert len(dims) == 1
    x = wrap(x)
    pad_lower = max(0, -min(offsets))
    pad_upper = max(0, max(offsets))
    if padding is not None:
        x = math.pad(x, {axis: (pad_lower + extend_bounds, pad_upper + extend_bounds) for axis in dims}, mode=padding)
    if extend_bounds:
        assert padding is not None
    offset_tensors = []
    for offset in offsets:
        components = {}
        for dimension in dims:
            if padding is not None:
                slices = {dim: slice(pad_lower + offset, (-pad_upper + offset) or None) if dim == dimension else slice(pad_lower, -pad_upper or None) for dim in dims}
            else:
                slices = {dim: slice(pad_lower + offset, (-pad_upper + offset) or None) if dim == dimension else slice(None, None) for dim in dims}
            components[dimension] = x[slices]
        offset_tensors.append(stack(components, stack_dim) if stack_dim is not None else next(iter(components.values())))
    return offset_tensors
def si2d(value)

Change the type of all spatial and instance dimensions of value to dual dimensions. See rename_dims().

Expand source code
def si2d(value):
    """ Change the type of all *spatial* and *instance* dimensions of `value` to *dual* dimensions. See `rename_dims`. """
    return rename_dims(value, lambda s: s.non_channel.non_dual.non_batch, dual)
def sigmoid(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes the sigmoid function of the Tensor or PhiTreeNode x.

Expand source code
def sigmoid(x: TensorOrTree) -> TensorOrTree:
    """ Computes the sigmoid function of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.sigmoid)
def sign(x: ~TensorOrTree) ‑> ~TensorOrTree

The sign of positive numbers is 1 and -1 for negative numbers. The sign of 0 is undefined.

Args

x
Tensor or PhiTreeNode

Returns

Tensor or PhiTreeNode matching x.

Expand source code
def sign(x: TensorOrTree) -> TensorOrTree:
    """
    The sign of positive numbers is 1 and -1 for negative numbers.
    The sign of 0 is undefined.

    Args:
        x: `Tensor` or `phiml.math.magic.PhiTreeNode`

    Returns:
        `Tensor` or `phiml.math.magic.PhiTreeNode` matching `x`.
    """
    return _backend_op1(x, Backend.sign)
def sin(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes sin(x) of the Tensor or PhiTreeNode x.

Expand source code
def sin(x: TensorOrTree) -> TensorOrTree:
    """ Computes *sin(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.sin)
def sinh(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes sinh(x) of the Tensor or PhiTreeNode x.

Expand source code
def sinh(x: TensorOrTree) -> TensorOrTree:
    """ Computes *sinh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.sinh)
def slice(value, slices: Dict[str, Union[int, slice_(), str, tuple, list]])

Slices a Tensor or PhiTreeNode along named dimensions.

See Also: unstack().

Args

value
Tensor or PhiTreeNode or Number or None.
slices

dict mapping dimension names to slices. A slice can be one of the following:

  • An index (int)
  • A range (slice_())
  • An item name (str)
  • Multiple item names (comma-separated str)
  • Multiple indices or item names (tuple or list)

Returns

Tensor or PhiTreeNode of the same type as value.

Examples

>>> math.slice([vec(x=0, y=1), vec(x=2, y=3)], {'vector': 'y'})
[1, 3]
Expand source code
def slice_(value, slices: Dict[str, Union[int, slice, str, tuple, list]]):
    """
    Slices a `Tensor` or `phiml.math.magic.PhiTreeNode` along named dimensions.

    See Also:
        `unstack`.

    Args:
        value: `Tensor` or `phiml.math.magic.PhiTreeNode` or `Number` or `None`.
        slices: `dict` mapping dimension names to slices. A slice can be one of the following:

            * An index (`int`)
            * A range (`slice`)
            * An item name (`str`)
            * Multiple item names (comma-separated `str`)
            * Multiple indices or item names (`tuple` or `list`)

    Returns:
        `Tensor` or `phiml.math.magic.PhiTreeNode` of the same type as `value`.

    Examples:
        >>> math.slice([vec(x=0, y=1), vec(x=2, y=3)], {'vector': 'y'})
        [1, 3]
    """
    if isinstance(value, (bool, Number, str)) or value is None:
        return value
    if isinstance(value, tuple):
        return tuple([slice_(v, slices) for v in value])
    if isinstance(value, list):
        return [slice_(v, slices) for v in value]
    if isinstance(value, dict):
        return {k: slice_(v, slices) for k, v in value.items()}
    if isinstance(value, Shape):
        raise NotImplementedError
    if hasattr(value, '__getitem__'):
        return value[slices]
    if isinstance(value, PhiTreeNode):
        attrs = {key: getattr(value, key) for key in value_attributes(value)}
        new_attrs = {k: slice_(v, slices) for k, v in attrs.items()}
        return copy_with(value, **new_attrs)
    raise ValueError(f"value must be a PhiTreeNode but got {type(value)}")
def slice_off(x, *slices: Dict[str, Union[slice_(), int, str]])

Args

x
Any instance of Shapable

*slices: Returns:

Expand source code
def slice_off(x, *slices: Dict[str, Union[slice, int, str]]):
    """

    Args:
        x: Any instance of `phiml.math.magic.Shapable`
        *slices:

    Returns:

    """
    if not slices:
        return x
    x_shape = shape(x)
    dims = set().union(*[s.keys() for s in slices])
    dims = x_shape.only(dims).names
    depth = max(len(s) for s in slices)
    if depth == 1:
        if len(dims) == 1:
            d = dims[0]
            if all(all(_edge_slice(x_shape, dim, s) for dim, s in s_dict.items()) for s_dict in slices):  # only edges
                edge_slices = [_edge_slice(x_shape, dim, s) for s_dict in slices for dim, s in s_dict.items()]
                if any(s.start == 0 and s.stop is None for s in edge_slices):  # everything sliced off
                    return x[{d: slice(0, 0)}]
                start_slices = [s for s in edge_slices if s.start == 0]
                end_slices = [s for s in edge_slices if s.stop is None]
                start = max(s.stop for s in start_slices) if start_slices else 0  # at this point, s.stop must be an int
                end = min(s.start for s in end_slices) if end_slices else None
                return x[{d: slice(start, end)}]
            else:
                size = x_shape.get_size(d)
                mask = np.ones(size, dtype=np.bool_)
                for s_dict in slices:
                    s = next(iter(s_dict.values()))
                    if isinstance(s, str):
                        names = x_shape.get_item_names(d)
                        s = [names.index(n.strip()) for n in s.split(',')]
                    mask[s] = 0
                return boolean_mask(x, d, wrap(mask, x_shape[d]))
    unstack_dim = x_shape.only(_preferred_unstack_dim(x, dims))
    x_slices = unstack(x, unstack_dim)
    x_slices_out = []
    for i, x_slice in enumerate(x_slices):
        slices_without_unstack_dim = [{k: v for k, v in s_dict.items() if k != unstack_dim.name} for s_dict in slices if _includes_slice(s_dict, unstack_dim, i)]
        sliced_x_slice = slice_off(x_slice, *slices_without_unstack_dim)
        x_slices_out.append(sliced_x_slice)
    assembled = stack(x_slices_out, unstack_dim)
    slices_for_unstack_dim_only = [s_dict for s_dict in slices if len(s_dict) == 1 and unstack_dim.name in s_dict]
    result = slice_off(assembled, *slices_for_unstack_dim_only)
    return result
def soft_plus(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes softplus(x) of the Tensor or PhiTreeNode x.

Expand source code
def soft_plus(x: TensorOrTree) -> TensorOrTree:
    """ Computes *softplus(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.softplus)
def solve_linear(f: Union[Callable[[~X], ~Y], phiml.math._tensors.Tensor], y: ~Y, solve: phiml.math._optimize.Solve[~X, ~Y], *f_args, grad_for_f=False, f_kwargs: dict = None, **f_kwargs_) ‑> ~X

Solves the system of linear equations f(x) = y and returns x. This method will use the solver specified in solve. The following method identifiers are supported by all backends:

  • 'auto': Automatically choose a solver
  • 'CG': Conjugate gradient, only for symmetric and positive definite matrices.
  • 'CG-adaptive': Conjugate gradient with adaptive step size, only for symmetric and positive definite matrices.
  • 'biCG' or 'biCG-stab(0)': Biconjugate gradient
  • 'biCG-stab' or 'biCG-stab(1)': Biconjugate gradient stabilized, first order
  • 'biCG-stab(2)', 'biCG-stab(4)', …: Biconjugate gradient stabilized, second or higher order
  • 'scipy-direct': SciPy direct solve always run oh the CPU using scipy.sparse.linalg.spsolve.
  • 'scipy-CG', 'scipy-GMres', 'scipy-biCG', 'scipy-biCG-stab', 'scipy-CGS', 'scipy-QMR', 'scipy-GCrotMK': SciPy iterative solvers always run oh the CPU, both in eager execution and JIT mode.

For maximum performance, compile f using jit_compile_linear() beforehand. Then, an optimized representation of f (such as a sparse matrix) will be used to solve the linear system.

Caution: The matrix construction may potentially be performed each time solve_linear() is called if auxiliary arguments change. To prevent this, jit-compile the function that makes the call to solve_linear().

To obtain additional information about the performed solve, perform the solve within a SolveTape context. The used implementation can be obtained as SolveInfo.method.

The gradient of this operation will perform another linear solve with the parameters specified by Solve.gradient_solve.

See Also: solve_nonlinear(), jit_compile_linear().

Args

f

One of the following:

  • Linear function with Tensor or PhiTreeNode first parameter and return value. f can have additional auxiliary arguments and return auxiliary values.
  • Dense matrix (Tensor with at least one dual dimension)
  • Sparse matrix (Sparse Tensor with at least one dual dimension)
  • Native tensor (not yet supported)
y
Desired output of f(x) as Tensor or PhiTreeNode.
solve
Solve object specifying optimization method, parameters and initial guess for x.
*f_args
Positional arguments to be passed to f after solve.x0. These arguments will not be solved for. Supports vararg mode or pass all arguments as a tuple.
f_kwargs
Additional keyword arguments to be passed to f. These arguments are treated as auxiliary arguments and can be of any type.

Returns

x
solution of the linear system of equations f(x) = y as Tensor or PhiTreeNode.

Raises

NotConverged
If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
If the solve failed prematurely.
Expand source code
def solve_linear(f: Union[Callable[[X], Y], Tensor],
                 y: Y,
                 solve: Solve[X, Y],
                 *f_args,
                 grad_for_f=False,
                 f_kwargs: dict = None,
                 **f_kwargs_) -> X:
    """
    Solves the system of linear equations *f(x) = y* and returns *x*.
    This method will use the solver specified in `solve`.
    The following method identifiers are supported by all backends:

    * `'auto'`: Automatically choose a solver
    * `'CG'`: Conjugate gradient, only for symmetric and positive definite matrices.
    * `'CG-adaptive'`: Conjugate gradient with adaptive step size, only for symmetric and positive definite matrices.
    * `'biCG'` or `'biCG-stab(0)'`: Biconjugate gradient
    * `'biCG-stab'` or `'biCG-stab(1)'`: Biconjugate gradient stabilized, first order
    * `'biCG-stab(2)'`, `'biCG-stab(4)'`, ...: Biconjugate gradient stabilized, second or higher order
    * `'scipy-direct'`: SciPy direct solve always run oh the CPU using `scipy.sparse.linalg.spsolve`.
    * `'scipy-CG'`, `'scipy-GMres'`, `'scipy-biCG'`, `'scipy-biCG-stab'`, `'scipy-CGS'`, `'scipy-QMR'`, `'scipy-GCrotMK'`: SciPy iterative solvers always run oh the CPU, both in eager execution and JIT mode.

    For maximum performance, compile `f` using `jit_compile_linear()` beforehand.
    Then, an optimized representation of `f` (such as a sparse matrix) will be used to solve the linear system.

    **Caution:** The matrix construction may potentially be performed each time `solve_linear` is called if auxiliary arguments change.
    To prevent this, jit-compile the function that makes the call to `solve_linear`.

    To obtain additional information about the performed solve, perform the solve within a `SolveTape` context.
    The used implementation can be obtained as `SolveInfo.method`.

    The gradient of this operation will perform another linear solve with the parameters specified by `Solve.gradient_solve`.

    See Also:
        `solve_nonlinear()`, `jit_compile_linear()`.

    Args:
        f: One of the following:

            * Linear function with `Tensor` or `phiml.math.magic.PhiTreeNode` first parameter and return value. `f` can have additional auxiliary arguments and return auxiliary values.
            * Dense matrix (`Tensor` with at least one dual dimension)
            * Sparse matrix (Sparse `Tensor` with at least one dual dimension)
            * Native tensor (not yet supported)

        y: Desired output of `f(x)` as `Tensor` or `phiml.math.magic.PhiTreeNode`.
        solve: `Solve` object specifying optimization method, parameters and initial guess for `x`.
        *f_args: Positional arguments to be passed to `f` after `solve.x0`. These arguments will not be solved for.
            Supports vararg mode or pass all arguments as a `tuple`.
        f_kwargs: Additional keyword arguments to be passed to `f`.
            These arguments are treated as auxiliary arguments and can be of any type.

    Returns:
        x: solution of the linear system of equations `f(x) = y` as `Tensor` or `phiml.math.magic.PhiTreeNode`.

    Raises:
        NotConverged: If the desired accuracy was not be reached within the maximum number of iterations.
        Diverged: If the solve failed prematurely.
    """
    # --- Handle parameters ---
    f_kwargs = f_kwargs or {}
    f_kwargs.update(f_kwargs_)
    f_args = f_args[0] if len(f_args) == 1 and isinstance(f_args[0], tuple) else f_args
    # --- Get input and output tensors ---
    y_tree, y_tensors = disassemble_tree(y, cache=False, attr_type=value_attributes)
    x0_tree, x0_tensors = disassemble_tree(solve.x0, cache=False, attr_type=value_attributes)
    assert solve.x0 is not None, "Please specify the initial guess as Solve(..., x0=initial_guess)"
    assert len(x0_tensors) == len(y_tensors) == 1, "Only single-tensor linear solves are currently supported"
    if y_tree == 'native' and x0_tree == 'native':
        if callable(f):  # assume batch + 1 dim
            rank = y_tensors[0].rank
            assert x0_tensors[0].rank == rank, f"y and x0 must have the same rank but got {y_tensors[0].shape.sizes} for y and {x0_tensors[0].shape.sizes} for x0"
            if rank == 0:
                y = wrap(y)
                x0 = wrap(solve.x0)
            else:
                y = wrap(y, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector'))
                x0 = wrap(solve.x0, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector'))
            solve = copy_with(solve, x0=x0)
            solution = solve_linear(f, y, solve, *f_args, grad_for_f=grad_for_f, f_kwargs=f_kwargs, **f_kwargs_)
            return solution.native(','.join([f'batch{i}' for i in range(rank - 1)]) + ',vector')
        else:
            b = choose_backend(y, solve.x0, f)
            f_dims = b.staticshape(f)
            y_dims = b.staticshape(y)
            x_dims = b.staticshape(solve.x0)
            rank = len(f_dims) - 2
            assert rank >= 0, f"f must be a matrix but got shape {f_dims}"
            f = wrap(f, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector'), dual('vector'))
            if len(x_dims) == len(f_dims):  # matrix solve
                assert len(x_dims) == len(f_dims)
                assert x_dims[-2] == f_dims[-1]
                assert y_dims[-2] == f_dims[-2]
                y = wrap(y, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector'), batch('extra_batch'))
                x0 = wrap(solve.x0, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector'), batch('extra_batch'))
                solve = copy_with(solve, x0=x0)
                solution = solve_linear(f, y, solve, *f_args, grad_for_f=grad_for_f, f_kwargs=f_kwargs, **f_kwargs_)
                return solution.native(','.join([f'batch{i}' for i in range(rank - 1)]) + ',vector,extra_batch')
            else:
                assert len(x_dims) == len(f_dims) - 1
                assert x_dims[-1] == f_dims[-1]
                assert y_dims[-1] == f_dims[-2]
                y = wrap(y, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector'))
                x0 = wrap(solve.x0, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector'))
                solve = copy_with(solve, x0=x0)
                solution = solve_linear(f, y, solve, *f_args, grad_for_f=grad_for_f, f_kwargs=f_kwargs, **f_kwargs_)
                return solution.native(','.join([f'batch{i}' for i in range(rank - 1)]) + ',vector')
    backend = choose_backend_t(*y_tensors, *x0_tensors)
    prefer_explicit = backend.supports(Backend.sparse_coo_tensor) or backend.supports(Backend.csr_matrix) or grad_for_f

    if isinstance(f, Tensor) or (isinstance(f, LinearFunction) and prefer_explicit):  # Matrix solve
        if isinstance(f, LinearFunction):
            x0 = math.convert(solve.x0)
            matrix, bias = f.sparse_matrix_and_bias(x0, *f_args, **f_kwargs)
        else:
            matrix = f
            bias = 0
        preconditioner = compute_preconditioner(solve.preconditioner, matrix, safe=False, target_backend=NUMPY if solve.method.startswith('scipy-') else backend, solver=solve.method) if solve.preconditioner is not None else None

        def _matrix_solve_forward(y, solve: Solve, matrix: Tensor, is_backprop=False):
            backend_matrix = native_matrix(matrix, choose_backend_t(*y_tensors, matrix))
            pattern_dims_in = dual(matrix).as_channel().names
            pattern_dims_out = non_dual(matrix).names  # batch dims can be sparse or batched matrices
            result = _linear_solve_forward(y, solve, backend_matrix, pattern_dims_in, pattern_dims_out, preconditioner, backend, is_backprop)
            return result  # must return exactly `x` so gradient isn't computed w.r.t. other quantities

        _matrix_solve = attach_gradient_solve(_matrix_solve_forward, auxiliary_args=f'is_backprop,solve{",matrix" if matrix.default_backend == NUMPY else ""}', matrix_adjoint=grad_for_f)
        return _matrix_solve(y - bias, solve, matrix)
    else:  # Matrix-free solve
        f_args = cached(f_args)
        solve = cached(solve)
        assert not grad_for_f, f"grad_for_f=True can only be used for math.jit_compile_linear functions but got '{f_name(f)}'. Please decorate the linear function with @jit_compile_linear"
        assert solve.preconditioner is None, f"Preconditioners not currently supported for matrix-free solves. Decorate '{f_name(f)}' with @math.jit_compile_linear to perform a matrix solve."

        def _function_solve_forward(y, solve: Solve, f_args: tuple, f_kwargs: dict = None, is_backprop=False):
            y_nest, (y_tensor,) = disassemble_tree(y, cache=False, attr_type=value_attributes)
            x0_nest, (x0_tensor,) = disassemble_tree(solve.x0, cache=False, attr_type=value_attributes)
            # active_dims = (y_tensor.shape & x0_tensor.shape).non_batch  # assumes batch dimensions are not active
            batches = (y_tensor.shape & x0_tensor.shape).batch

            def native_lin_f(native_x, batch_index=None):
                if batch_index is not None and batches.volume > 1:
                    native_x = backend.tile(backend.expand_dims(native_x), [batches.volume, 1])
                x = assemble_tree(x0_nest, [reshaped_tensor(native_x, [batches, non_batch(x0_tensor)] if backend.ndims(native_x) >= 2 else [non_batch(x0_tensor)], convert=False)], attr_type=value_attributes)
                y = f(x, *f_args, **f_kwargs)
                _, (y_tensor,) = disassemble_tree(y, cache=False, attr_type=value_attributes)
                y_native = reshaped_native(y_tensor, [batches, non_batch(y_tensor)] if backend.ndims(native_x) >= 2 else [non_batch(y_tensor)])
                if batch_index is not None and batches.volume > 1:
                    y_native = y_native[batch_index]
                return y_native

            result = _linear_solve_forward(y, solve, native_lin_f, pattern_dims_in=non_batch(x0_tensor).names, pattern_dims_out=non_batch(y_tensor).names, preconditioner=None, backend=backend, is_backprop=is_backprop)
            return result  # must return exactly `x` so gradient isn't computed w.r.t. other quantities

        _function_solve = attach_gradient_solve(_function_solve_forward, auxiliary_args='is_backprop,f_kwargs,solve', matrix_adjoint=grad_for_f)
        return _function_solve(y, solve, f_args, f_kwargs=f_kwargs)
def solve_nonlinear(f: Callable, y, solve: phiml.math._optimize.Solve) ‑> phiml.math._tensors.Tensor

Solves the non-linear equation f(x) = y by minimizing the norm of the residual.

This method is limited to backends that support jacobian(), currently PyTorch, TensorFlow and Jax.

To obtain additional information about the performed solve, use a SolveTape.

See Also: minimize(), solve_linear().

Args

f
Function whose output is optimized to match y. All positional arguments of f are optimized and must be Tensor or PhiTreeNode. The output of f must match y.
y
Desired output of f(x) as Tensor or PhiTreeNode.
solve
Solve object specifying optimization method, parameters and initial guess for x.

Returns

x
Solution fulfilling f(x) = y within specified tolerance as Tensor or PhiTreeNode.

Raises

NotConverged
If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
If the solve failed prematurely.
Expand source code
def solve_nonlinear(f: Callable, y, solve: Solve) -> Tensor:
    """
    Solves the non-linear equation *f(x) = y* by minimizing the norm of the residual.

    This method is limited to backends that support `jacobian()`, currently PyTorch, TensorFlow and Jax.

    To obtain additional information about the performed solve, use a `SolveTape`.

    See Also:
        `minimize()`, `solve_linear()`.

    Args:
        f: Function whose output is optimized to match `y`.
            All positional arguments of `f` are optimized and must be `Tensor` or `phiml.math.magic.PhiTreeNode`.
            The output of `f` must match `y`.
        y: Desired output of `f(x)` as `Tensor` or `phiml.math.magic.PhiTreeNode`.
        solve: `Solve` object specifying optimization method, parameters and initial guess for `x`.

    Returns:
        x: Solution fulfilling `f(x) = y` within specified tolerance as `Tensor` or `phiml.math.magic.PhiTreeNode`.

    Raises:
        NotConverged: If the desired accuracy was not be reached within the maximum number of iterations.
        Diverged: If the solve failed prematurely.
    """
    def min_func(x):
        diff = f(x) - y
        l2 = l2_loss(diff)
        return l2
    if solve.preprocess_y is not None:
        y = solve.preprocess_y(y)
    from ._nd import l2_loss
    solve = solve.with_defaults('solve')
    tol = math.maximum(solve.rel_tol * l2_loss(y), solve.abs_tol)
    min_solve = copy_with(solve, abs_tol=tol, rel_tol=0, preprocess_y=None)
    return minimize(min_func, min_solve)
def sort(x: phiml.math._tensors.Tensor, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Sort the values of x along dim. In order to sort a flattened array, use pack_dims() first.

Args

x
Tensor
dim
Dimension to sort. If not present, sorting will be skipped. Defaults to non-batch dim.

Returns

Sorted Tensor or x if x is constant along dims.

Expand source code
def sort(x: Tensor, dim: DimFilter = non_batch) -> Tensor:
    """
    Sort the values of `x` along `dim`.
    In order to sort a flattened array, use `pack_dims` first.

    Args:
        x: `Tensor`
        dim: Dimension to sort. If not present, sorting will be skipped. Defaults to non-batch dim.

    Returns:
        Sorted `Tensor` or `x` if `x` is constant along `dims`.
    """
    v_shape = variable_shape(x)
    dim = v_shape.only(dim)
    if not dim:
        return x  # nothing to do; x is constant along dim
    assert dim.rank == 1, f"Can only sort one dimension at a time. Use pack_dims() to jointly sort over multiple dimensions."
    axis = v_shape.index(dim)
    x_native = x._native if isinstance(x, NativeTensor) else x.native(x.shape)
    sorted_native = x.default_backend.sort(x_native, axis=axis)
    x_shape = x.shape
    if x.shape.get_item_names(dim):
        warnings.warn(f"sort() removes item names along sorted axis '{dim}'. Was {x.shape.get_item_names(dim)}", RuntimeWarning, stacklevel=2)
        v_shape = v_shape.with_dim_size(dim, v_shape.get_size(dim), keep_item_names=False)
        x_shape = x_shape.with_dim_size(dim, x_shape.get_size(dim), keep_item_names=False)
    return NativeTensor(sorted_native, v_shape, x_shape)
def sparse_tensor(indices: phiml.math._tensors.Tensor, values: phiml.math._tensors.Tensor, dense_shape: phiml.math._shape.Shape, can_contain_double_entries=True, indices_sorted=False, format='coo', indices_constant: bool = True) ‑> phiml.math._tensors.Tensor

Construct a sparse tensor that stores values at the corresponding indices and is 0 everywhere else. In addition to the sparse dimensions indexed by indices, the tensor inherits all batch and channel dimensions from values.

Args

indices

Tensor encoding the positions of stored values. It has the following dimensions:

  • One instance dimension exactly matching the instance dimension on values. It enumerates the positions of stored entries.
  • One channel dimension. Its item names must match the dimension names of dense_shape but the order can be arbitrary.
  • Any number of batch dimensions
values

Tensor containing the stored values at positions given by indices. It has the following dimensions:

  • One instance dimension exactly matching the instance dimension on indices. It enumerates the values of stored entries.
  • Any number of channel dimensions if multiple values are stored at each index.
  • Any number of batch dimensions
dense_shape
Dimensions listed in indices. The order can differ from the item names of indices.
can_contain_double_entries
Whether some indices might occur more than once. If so, values at the same index will be summed.
indices_sorted
Whether the indices are sorted in ascending order given the dimension order of the item names of indices.
indices_constant
Whether the positions of the non-zero values are fixed. If True, JIT compilation will not create a placeholder for indices.
format
Sparse format in which to store the data, such as 'coo' or 'csr'. See get_format().

Returns

Sparse Tensor with the specified format.

Expand source code
def sparse_tensor(indices: Tensor,
                  values: Tensor,
                  dense_shape: Shape,
                  can_contain_double_entries=True,
                  indices_sorted=False,
                  format='coo',
                  indices_constant: bool = True) -> Tensor:
    """
    Construct a sparse tensor that stores `values` at the corresponding `indices` and is 0 everywhere else.
    In addition to the sparse dimensions indexed by `indices`, the tensor inherits all batch and channel dimensions from `values`.

    Args:
        indices: `Tensor` encoding the positions of stored values. It has the following dimensions:

            * One instance dimension exactly matching the instance dimension on `values`.
              It enumerates the positions of stored entries.
            * One channel dimension.
              Its item names must match the dimension names of `dense_shape` but the order can be arbitrary.
            * Any number of batch dimensions

        values: `Tensor` containing the stored values at positions given by `indices`. It has the following dimensions:

            * One instance dimension exactly matching the instance dimension on `indices`.
              It enumerates the values of stored entries.
            * Any number of channel dimensions if multiple values are stored at each index.
            * Any number of batch dimensions

        dense_shape: Dimensions listed in `indices`.
            The order can differ from the item names of `indices`.
        can_contain_double_entries: Whether some indices might occur more than once.
            If so, values at the same index will be summed.
        indices_sorted: Whether the indices are sorted in ascending order given the dimension order of the item names of `indices`.
        indices_constant: Whether the positions of the non-zero values are fixed.
            If `True`, JIT compilation will not create a placeholder for `indices`.
        format: Sparse format in which to store the data, such as `'coo'` or `'csr'`. See `phiml.math.get_format`.

    Returns:
        Sparse `Tensor` with the specified `format`.
    """
    if indices_constant is None:
        indices_constant = indices.default_backend.name == 'numpy'
    assert isinstance(indices_constant, bool)
    coo = SparseCoordinateTensor(indices, values, dense_shape, can_contain_double_entries, indices_sorted, indices_constant)
    return to_format(coo, format)
def spatial(*args, **dims: Union[int, str, tuple, list, phiml.math._shape.Shape, ForwardRef('Tensor')]) ‑> phiml.math._shape.Shape

Returns the spatial dimensions of an existing Shape or creates a new Shape with only spatial dimensions.

Usage for filtering spatial dimensions:

>>> spatial_dims = spatial(shape)
>>> spatial_dims = spatial(tensor)

Usage for creating a Shape with only spatial dimensions:

>>> spatial_shape = spatial('undef', x=2, y=3)
(x=2, y=3, undef=None)

Here, the dimension undef is created with an undefined size of None. Undefined sizes are automatically filled in by tensor(), wrap(), stack() and concat().

To create a shape with multiple types, use merge_shapes(), concat_shapes() or the syntax shape1 & shape2.

See Also: channel(), batch(), instance()

Args

*args

Either

  • Shape or Tensor to filter or
  • Names of dimensions with undefined sizes as str.
**dims
Dimension sizes and names. Must be empty when used as a filter operation.

Returns

Shape containing only dimensions of type spatial.

Expand source code
def spatial(*args, **dims: Union[int, str, tuple, list, Shape, 'Tensor']) -> Shape:
    """
    Returns the spatial dimensions of an existing `Shape` or creates a new `Shape` with only spatial dimensions.

    Usage for filtering spatial dimensions:
    >>> spatial_dims = spatial(shape)
    >>> spatial_dims = spatial(tensor)

    Usage for creating a `Shape` with only spatial dimensions:
    >>> spatial_shape = spatial('undef', x=2, y=3)
    (x=2, y=3, undef=None)

    Here, the dimension `undef` is created with an undefined size of `None`.
    Undefined sizes are automatically filled in by `tensor`, `wrap`, `stack` and `concat`.

    To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`.

    See Also:
        `channel`, `batch`, `instance`

    Args:
        *args: Either

            * `Shape` or `Tensor` to filter or
            * Names of dimensions with undefined sizes as `str`.

        **dims: Dimension sizes and names. Must be empty when used as a filter operation.

    Returns:
        `Shape` containing only dimensions of type spatial.
    """
    from .magic import Shaped
    if all(isinstance(arg, str) for arg in args) or dims:
        return _construct_shape(SPATIAL_DIM, *args, **dims)
    elif len(args) == 1 and isinstance(args[0], Shape):
        return args[0].spatial
    elif len(args) == 1 and isinstance(args[0], Shaped):
        return shape(args[0]).spatial
    else:
        raise AssertionError(f"spatial() must be called either as a selector spatial(Shape) or spatial(Tensor) or as a constructor spatial(*names, **dims). Got *args={args}, **dims={dims}")
def spatial_gradient(grid: phiml.math._tensors.Tensor, dx: Union[float, phiml.math._tensors.Tensor] = 1, difference: str = 'central', padding: Union[Extrapolation, float, phiml.math._tensors.Tensor, str, None] = zero-gradient, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>, stack_dim: Union[phiml.math._shape.Shape, str, None] = (gradientᶜ=None), pad=0) ‑> phiml.math._tensors.Tensor

Calculates the spatial_gradient of a scalar channel from finite differences. The spatial_gradient vectors are in reverse order, lowest dimension first.

Args

grid
grid values
dims
(Optional) Dimensions along which the spatial derivative will be computed. sequence of dimension names
dx
Physical distance between grid points, float or Tensor. When passing a vector-valued Tensor, the dx values should be listed along stack_dim, matching dims.
difference
type of difference, one of ('forward', 'backward', 'central') (default 'forward')
padding
Padding mode. Must be one of the following: Extrapolation, Tensor or number for constant extrapolation, name of extrapolation as str.
stack_dim
name of the new vector dimension listing the spatial_gradient w.r.t. the various axes
pad
How many cells to extend the result compared to grid. This value is added to the internal padding. For non-trivial extrapolations, this gives the correct result while manual padding before or after this operation would not respect the boundary locations.

Returns

Tensor

Expand source code
def spatial_gradient(grid: Tensor,
                     dx: Union[float, Tensor] = 1,
                     difference: str = 'central',
                     padding: Union[Extrapolation, float, Tensor, str, None] = extrapolation.BOUNDARY,
                     dims: DimFilter = spatial,
                     stack_dim: Union[Shape, str, None] = channel('gradient'),
                     pad=0) -> Tensor:
    """
    Calculates the spatial_gradient of a scalar channel from finite differences.
    The spatial_gradient vectors are in reverse order, lowest dimension first.

    Args:
        grid: grid values
        dims: (Optional) Dimensions along which the spatial derivative will be computed. sequence of dimension names
        dx: Physical distance between grid points, `float` or `Tensor`.
            When passing a vector-valued `Tensor`, the dx values should be listed along `stack_dim`, matching `dims`.
        difference: type of difference, one of ('forward', 'backward', 'central') (default 'forward')
        padding: Padding mode.
            Must be one of the following: `Extrapolation`, `Tensor` or number for constant extrapolation, name of extrapolation as `str`.
        stack_dim: name of the new vector dimension listing the spatial_gradient w.r.t. the various axes
        pad: How many cells to extend the result compared to `grid`.
            This value is added to the internal padding. For non-trivial extrapolations, this gives the correct result while manual padding before or after this operation would not respect the boundary locations.

    Returns:
        `Tensor`
    """
    grid = wrap(grid)
    if stack_dim and stack_dim in grid.shape:
        assert grid.shape.only(stack_dim).size == 1, f"spatial_gradient() cannot list components along {stack_dim.name} because that dimension already exists on grid {grid}"
        grid = grid[{stack_dim.name: 0}]
    dims = grid.shape.only(dims)
    dx = wrap(dx)
    if dx.vector.exists:
        dx = dx.vector[dims]
        if dx.vector.size in (None, 1):
            dx = dx.vector[0]
    if difference.lower() == 'central':
        left, right = shift(grid, (-1, 1), dims, padding, stack_dim=stack_dim, extend_bounds=pad)
        return (right - left) / (dx * 2)
    elif difference.lower() == 'forward':
        left, right = shift(grid, (0, 1), dims, padding, stack_dim=stack_dim, extend_bounds=pad)
        return (right - left) / dx
    elif difference.lower() == 'backward':
        left, right = shift(grid, (-1, 0), dims, padding, stack_dim=stack_dim, extend_bounds=pad)
        return (right - left) / dx
    else:
        raise ValueError('Invalid difference type: {}. Can be CENTRAL or FORWARD'.format(difference))
def sqrt(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes sqrt(x) of the Tensor or PhiTreeNode x.

Expand source code
def sqrt(x: TensorOrTree) -> TensorOrTree:
    """ Computes *sqrt(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.sqrt)
def stack(values: Union[dict, tuple, list], dim: Union[str, phiml.math._shape.Shape], expand_values=False, **kwargs)

Stacks values along the new dimension dim. All values must have the same spatial, instance and channel dimensions. If the dimension sizes vary, the resulting tensor will be non-uniform. Batch dimensions will be added as needed.

Stacking tensors is performed lazily, i.e. the memory is allocated only when needed. This makes repeated stacking and slicing along the same dimension very efficient, i.e. jit-compiled functions will not perform these operations.

Args

values
Collection of Shapable, such as Tensor If a dict, keys must be of type str and are used as item names along dim.
dim
Shape with a least one dimension. None of these dimensions can be present with any of the values. If dim is a single-dimension shape, its size is determined from len(values) and can be left undefined (None). If dim is a multi-dimension shape, its volume must be equal to len(values).
expand_values
If True, will first add missing dimensions to all values, not just batch dimensions. This allows tensors with different dimensions to be stacked. The resulting tensor will have all dimensions that are present in values.
**kwargs
Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the bounds: Box argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.

Returns

Tensor containing values stacked along dim.

Examples

>>> stack({'x': 0, 'y': 1}, channel('vector'))
(x=0, y=1)
>>> stack([math.zeros(batch(b=2)), math.ones(batch(b=2))], channel(c='x,y'))
(x=0.000, y=1.000); (x=0.000, y=1.000) (bᵇ=2, cᶜ=x,y)
>>> stack([vec(x=1, y=0), vec(x=2, y=3.)], batch('b'))
(x=1.000, y=0.000); (x=2.000, y=3.000) (bᵇ=2, vectorᶜ=x,y)
Expand source code
def stack(values: Union[tuple, list, dict], dim: Union[Shape, str], expand_values=False, **kwargs):
    """
    Stacks `values` along the new dimension `dim`.
    All values must have the same spatial, instance and channel dimensions. If the dimension sizes vary, the resulting tensor will be non-uniform.
    Batch dimensions will be added as needed.

    Stacking tensors is performed lazily, i.e. the memory is allocated only when needed.
    This makes repeated stacking and slicing along the same dimension very efficient, i.e. jit-compiled functions will not perform these operations.

    Args:
        values: Collection of `phiml.math.magic.Shapable`, such as `phiml.math.Tensor`
            If a `dict`, keys must be of type `str` and are used as item names along `dim`.
        dim: `Shape` with a least one dimension. None of these dimensions can be present with any of the `values`.
            If `dim` is a single-dimension shape, its size is determined from `len(values)` and can be left undefined (`None`).
            If `dim` is a multi-dimension shape, its volume must be equal to `len(values)`.
        expand_values: If `True`, will first add missing dimensions to all values, not just batch dimensions.
            This allows tensors with different dimensions to be stacked.
            The resulting tensor will have all dimensions that are present in `values`.
        **kwargs: Additional keyword arguments required by specific implementations.
            Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
            Adding batch dimensions must always work without keyword arguments.

    Returns:
        `Tensor` containing `values` stacked along `dim`.

    Examples:
        >>> stack({'x': 0, 'y': 1}, channel('vector'))
        (x=0, y=1)

        >>> stack([math.zeros(batch(b=2)), math.ones(batch(b=2))], channel(c='x,y'))
        (x=0.000, y=1.000); (x=0.000, y=1.000) (bᵇ=2, cᶜ=x,y)

        >>> stack([vec(x=1, y=0), vec(x=2, y=3.)], batch('b'))
        (x=1.000, y=0.000); (x=2.000, y=3.000) (bᵇ=2, vectorᶜ=x,y)
    """
    assert len(values) > 0, f"stack() got empty sequence {values}"
    if not dim:
        assert len(values) == 1, f"Only one element can be passed as `values` if no dim is passed but got {values}"
        from ._tensors import wrap
        return next(iter(values.values())) if isinstance(values, dict) else values[0]  # this may not be wrappable
    if not isinstance(dim, Shape):
        dim = auto(dim)
    values_ = tuple(values.values()) if isinstance(values, dict) else values
    if not expand_values:
        for v in values_[1:]:
            assert set(non_batch(v).names) == set(non_batch(values_[0]).names), f"When expand_values=False, stacked values must have the same non-batch dimensions but got {non_batch(values_[0])} and {non_batch(v)}"
    # --- Add missing dimensions ---
    if expand_values:
        all_dims = merge_shapes(*values_, allow_varying_sizes=True)
        if isinstance(values, dict):
            values = {k: expand(v, all_dims.without(shape(v))) for k, v in values.items()}
        else:
            values = [expand(v, all_dims.without(shape(v))) for v in values]
    else:
        all_batch_dims = merge_shapes(*[shape(v).batch for v in values_], allow_varying_sizes=True)
        if isinstance(values, dict):
            values = {k: expand(v, all_batch_dims.without(shape(v))) for k, v in values.items()}
        else:
            values = [expand(v, all_batch_dims.without(shape(v))) for v in values]
    if dim.rank == 1:
        assert dim.size == len(values) or dim.size is None, f"stack dim size must match len(values) or be undefined but got {dim} for {len(values)} values"
        if dim.size is None:
            dim = dim.with_size(len(values))
        if isinstance(values, dict):
            dim_item_names = tuple(values.keys())
            values = tuple(values.values())
            dim = dim.with_size(dim_item_names)
        # --- First try __stack__ ---
        for v in values:
            if hasattr(v, '__stack__'):
                result = v.__stack__(values, dim, **kwargs)
                if result is not NotImplemented:
                    assert isinstance(result, Shapable), "__stack__ must return a Shapable object"
                    return result
        # --- Next: try stacking attributes for tree nodes ---
        if all(isinstance(v, PhiTreeNode) for v in values):
            attributes = all_attributes(values[0])
            if attributes and all(all_attributes(v) == attributes for v in values):
                new_attrs = {}
                for a in attributes:
                    assert all(dim not in shape(getattr(v, a)) for v in values), f"Cannot stack attribute {a} because one values contains the stack dimension {dim}."
                    a_values = [getattr(v, a) for v in values]
                    if all(v is a_values[0] for v in a_values[1:]):
                        new_attrs[a] = expand(a_values[0], dim, **kwargs)
                    else:
                        new_attrs[a] = stack(a_values, dim, expand_values=expand_values, **kwargs)
                return copy_with(values[0], **new_attrs)
            else:
                warnings.warn(f"Failed to concat values using value attributes because attributes differ among values {values}")
        # --- Fallback: use expand and concat ---
        for v in values:
            if not hasattr(v, '__stack__') and hasattr(v, '__concat__') and hasattr(v, '__expand__'):
                expanded_values = tuple([expand(v, dim.with_size(1 if dim.item_names[0] is None else dim.item_names[0][i]), **kwargs) for i, v in enumerate(values)])
                if len(expanded_values) > 8:
                    warnings.warn(f"stack() default implementation is slow on large dimensions ({dim.name}={len(expanded_values)}). Please implement __stack__()", RuntimeWarning, stacklevel=2)
                result = v.__concat__(expanded_values, dim.name, **kwargs)
                if result is not NotImplemented:
                    assert isinstance(result, Shapable), "__concat__ must return a Shapable object"
                    return result
        # --- else maybe all values are native scalars ---
        from ._tensors import wrap
        try:
            values = tuple([wrap(v) for v in values])
        except ValueError:
            raise MagicNotImplemented(f"At least one item in values must be Shapable but got types {[type(v) for v in values]}")
        return values[0].__stack__(values, dim, **kwargs)
    else:  # multi-dim stack
        assert dim.volume == len(values), f"When passing multiple stack dims, their volume must equal len(values) but got {dim} for {len(values)} values"
        if isinstance(values, dict):
            warnings.warn(f"When stacking a dict along multiple dimensions, the key names are discarded. Got keys {tuple(values.keys())}", RuntimeWarning, stacklevel=2)
            values = tuple(values.values())
        # --- if any value implements Shapable, use stack and unpack_dim ---
        for v in values:
            if hasattr(v, '__stack__') and hasattr(v, '__unpack_dim__'):
                stack_dim = batch('_stack')
                stacked = v.__stack__(values, stack_dim, **kwargs)
                if stacked is not NotImplemented:
                    assert isinstance(stacked, Shapable), "__stack__ must return a Shapable object"
                    assert hasattr(stacked, '__unpack_dim__'), "If a value supports __unpack_dim__, the result of __stack__ must also support it."
                    reshaped = stacked.__unpack_dim__(stack_dim.name, dim, **kwargs)
                    if kwargs is NotImplemented:
                        warnings.warn("__unpack_dim__ is overridden but returned NotImplemented during multi-dimensional stack. This results in unnecessary stack operations.", RuntimeWarning, stacklevel=2)
                    else:
                        return reshaped
        # --- Fallback: multi-level stack ---
        for dim_ in reversed(dim):
            values = [stack(values[i:i + dim_.size], dim_, **kwargs) for i in range(0, len(values), dim_.size)]
        return values[0]
def std(value: Union[phiml.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Computes the standard deviation over values along the specified dimensions.

Warning: The standard deviation of non-uniform tensors along the stack dimension is undefined.

Args

value
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors

Returns

Tensor without the reduced dimensions.

Expand source code
def std(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor:
    """
    Computes the standard deviation over `values` along the specified dimensions.

    *Warning*: The standard deviation of non-uniform tensors along the stack dimension is undefined.

    Args:
        value: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

    Returns:
        `Tensor` without the reduced dimensions.
    """
    if not dim:
        warnings.warn("std along empty shape returns 0", RuntimeWarning, stacklevel=2)
        return zeros_like(value)
    if not callable(dim) and set(parse_dim_order(dim)) - set(value.shape.names):
        return zeros_like(value)  # std along constant dim is 0
    return reduce_(_std, value, dim)
def stop_gradient(x)

Disables gradients for the given tensor. This may switch off the gradients for x itself or create a copy of x with disabled gradients.

Implementations:

Args

x
Tensor or PhiTreeNode for which gradients should be disabled.

Returns

Copy of x.

Expand source code
def stop_gradient(x):
    """
    Disables gradients for the given tensor.
    This may switch off the gradients for `x` itself or create a copy of `x` with disabled gradients.

    Implementations:

    * PyTorch: [`x.detach()`](https://pytorch.org/docs/stable/autograd.html#torch.Tensor.detach)
    * TensorFlow: [`tf.stop_gradient`](https://www.tensorflow.org/api_docs/python/tf/stop_gradient)
    * Jax: [`jax.lax.stop_gradient`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.stop_gradient.html)

    Args:
        x: `Tensor` or `phiml.math.magic.PhiTreeNode` for which gradients should be disabled.

    Returns:
        Copy of `x`.
    """
    if isinstance(x, Shape):
        return x
    return _backend_op1(x, Backend.stop_gradient)
def stored_indices(x: phiml.math._tensors.Tensor, list_dim=(entriesⁱ=None), index_dim=(indexᶜ=None), invalid='discard') ‑> phiml.math._tensors.Tensor

Returns the indices of the stored values for a given `Tensor``. For sparse tensors, this will return the stored indices tensor. For collapsed tensors, only the stored dimensions will be returned.

Args

x
Tensor
list_dim
Dimension along which stored indices should be laid out.
invalid
One of 'discard', 'clamp', 'keep' Filter result by valid indices. Internally, invalid indices may be stored for performance reasons.

Returns

Tensor representing all indices of stored values.

Expand source code
def stored_indices(x: Tensor, list_dim=instance('entries'), index_dim=channel('index'), invalid='discard') -> Tensor:
    """
    Returns the indices of the stored values for a given `Tensor``.
    For sparse tensors, this will return the stored indices tensor.
    For collapsed tensors, only the stored dimensions will be returned.

    Args:
        x: `Tensor`
        list_dim: Dimension along which stored indices should be laid out.
        invalid: One of `'discard'`, `'clamp'`, `'keep'` Filter result by valid indices.
            Internally, invalid indices may be stored for performance reasons.

    Returns:
        `Tensor` representing all indices of stored values.
    """
    assert invalid in ['discard', 'clamp', 'keep'], f"invalid handling must be one of 'discard', 'clamp', 'keep' but got {invalid}"
    if isinstance(x, NativeTensor):
        from ._ops import meshgrid
        if batch(x):
            raise NotImplementedError
        indices = meshgrid(x._native_shape.non_batch.non_channel, stack_dim=index_dim)
        return pack_dims(indices, non_channel, list_dim)
    if isinstance(x, TensorStack):
        if x.is_cached or not x.requires_broadcast:
            return stored_indices(cached(x))
        raise NotImplementedError
        return stack([stored_indices(t, list_dim) for t in x._tensors], x._stack_dim)  # ToDo add index for stack dim
    elif isinstance(x, CompressedSparseMatrix):
        return rename_dims(x._coo_indices(invalid, stack_dim=index_dim), instance, list_dim)
    if isinstance(x, SparseCoordinateTensor):
        if x._can_contain_double_entries:
            warnings.warn(f"stored_values of sparse tensor {x.shape} may contain multiple values for the same position.")
        new_index_dim = index_dim.with_size(channel(x._indices).item_names[0])
        return rename_dims(x._indices, [instance(x._indices).name, channel(x._indices).name], [list_dim, new_index_dim])
    raise ValueError(x)
def stored_values(x: phiml.math._tensors.Tensor, list_dim=(entriesⁱ=None), invalid='discard') ‑> phiml.math._tensors.Tensor

Returns the stored values for a given `Tensor``.

For sparse tensors, this will return only the stored entries.

Dense tensors are reshaped so that all non-batch dimensions are packed into list_dim. Batch dimensions are preserved.

Args

x
Tensor
list_dim
Dimension along which stored values should be laid out.
invalid
One of 'discard', 'clamp', 'keep' Filter result by valid indices. Internally, invalid indices may be stored for performance reasons.

Returns

Tensor representing all values stored to represent x.

Expand source code
def stored_values(x: Tensor, list_dim=instance('entries'), invalid='discard') -> Tensor:
    """
    Returns the stored values for a given `Tensor``.

    For sparse tensors, this will return only the stored entries.

    Dense tensors are reshaped so that all non-batch dimensions are packed into `list_dim`. Batch dimensions are preserved.

    Args:
        x: `Tensor`
        list_dim: Dimension along which stored values should be laid out.
        invalid: One of `'discard'`, `'clamp'`, `'keep'` Filter result by valid indices.
            Internally, invalid indices may be stored for performance reasons.

    Returns:
        `Tensor` representing all values stored to represent `x`.
    """
    assert invalid in ['discard', 'clamp', 'keep'], f"invalid handling must be one of 'discard', 'clamp', 'keep' but got {invalid}"
    if isinstance(x, NativeTensor):
        x = NativeTensor(x._native, x._native_shape, x._native_shape)
        entries_dims = x.shape.non_batch
        return pack_dims(x, entries_dims, list_dim)
    if isinstance(x, TensorStack):
        if x.is_cached:
            return stored_values(cached(x))
        return stack([stored_values(t, list_dim) for t in x._tensors], x._stack_dim)
    elif isinstance(x, CompressedSparseMatrix):
        if invalid in ['keep', 'clamp']:
            return rename_dims(x._values, instance, list_dim)
        else:
            x = x.decompress()  # or apply slices, then return values
    if isinstance(x, SparseCoordinateTensor):
        if x._can_contain_double_entries:
            warnings.warn(f"stored_values of sparse tensor {x.shape} may contain multiple values for the same position.")
        return rename_dims(x._values, instance, list_dim)
    raise ValueError(x)
def sum(value: Union[phiml.math._tensors.Tensor, list, tuple, numbers.Number, bool], dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function non_batch>) ‑> phiml.math._tensors.Tensor

Sums values along the specified dimensions.

Args

value
Tensor or list / tuple of Tensors.
dim

Dimension or dimensions to be reduced. One of

  • None to reduce all non-batch dimensions
  • str containing single dimension or comma-separated list of dimensions
  • Tuple[str] or List[str]
  • Shape
  • batch(), instance(), spatial(), channel() to select dimensions by type
  • '0' when isinstance(value, (tuple, list)) to add up the sequence of Tensors

Returns

Tensor without the reduced dimensions.

Expand source code
def sum_(value: Union[Tensor, list, tuple, Number, bool], dim: DimFilter = non_batch) -> Tensor:
    """
    Sums `values` along the specified dimensions.

    Args:
        value: `Tensor` or `list` / `tuple` of Tensors.
        dim: Dimension or dimensions to be reduced. One of

            * `None` to reduce all non-batch dimensions
            * `str` containing single dimension or comma-separated list of dimensions
            * `Tuple[str]` or `List[str]`
            * `Shape`
            * `batch`, `instance`, `spatial`, `channel` to select dimensions by type
            * `'0'` when `isinstance(value, (tuple, list))` to add up the sequence of Tensors

    Returns:
        `Tensor` without the reduced dimensions.
    """
    return reduce_(_sum, bool_to_int(value), dim, require_all_dims_present=True)
def tan(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes tan(x) of the Tensor or PhiTreeNode x.

Expand source code
def tan(x: TensorOrTree) -> TensorOrTree:
    """ Computes *tan(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.tan)
def tanh(x: ~TensorOrTree) ‑> ~TensorOrTree

Computes tanh(x) of the Tensor or PhiTreeNode x.

Expand source code
def tanh(x: TensorOrTree) -> TensorOrTree:
    """ Computes *tanh(x)* of the `Tensor` or `phiml.math.magic.PhiTreeNode` `x`. """
    return _backend_op1(x, Backend.tanh)
def tensor(data, *shape: phiml.math._shape.Shape, convert: bool = True, default_list_dim=(vectorᶜ=None)) ‑> phiml.math._tensors.Tensor

Create a Tensor from the specified data. If convert=True, converts data to the preferred format of the default backend.

data must be one of the following:

  • Number: returns a dimensionless Tensor.
  • Native tensor such as NumPy array, TensorFlow tensor or PyTorch tensor.
  • tuple or list of numbers: backs the Tensor with native tensor.
  • tuple or list of non-numbers: creates tensors for the items and stacks them.
  • Tensor: renames dimensions and dimension types if names is specified. Converts all internal native values of the tensor if convert=True.
  • Shape: creates a 1D tensor listing the dimension sizes.

While specifying names is optional in some cases, it is recommended to always specify them.

Dimension types are always inferred from the dimension names if specified.

Implementations:

See Also: wrap() which uses convert=False, layout().

Args

data
native tensor, scalar, sequence, Shape or Tensor
shape
Ordered dimensions and types. If sizes are defined, they will be checked against data.`
convert
If True, converts the data to the native format of the current default backend. If False, wraps the data in a Tensor but keeps the given data reference if possible.

Raises

AssertionError
if dimension names are not provided and cannot automatically be inferred
ValueError
if data is not tensor-like

Returns

Tensor containing same values as data

Examples

>>> tensor([1, 2, 3], channel(vector='x,y,z'))
(x=1, y=2, z=3)
>>> tensor([1., 2, 3], channel(vector='x,y,z'))
(x=1.000, y=2.000, z=3.000) float64
>>> tensor(numpy.zeros([10, 8, 6, 2]), batch('batch'), spatial('x,y'), channel(vector='x,y'))
(batchᵇ=10, xˢ=8, yˢ=6, vectorᶜ=x,y) float64 const 0.0
>>> tensor([(0, 1), (0, 2), (1, 3)], instance('particles'), channel(vector='x,y'))
(x=0, y=1); (x=0, y=2); (x=1, y=3) (particlesⁱ=3, vectorᶜ=x,y)
>>> tensor(numpy.random.randn(10))
(vectorᶜ=10) float64 -0.128 ± 1.197 (-2e+00...2e+00)
Expand source code
def tensor(data,
           *shape: Shape,
           convert: bool = True,
           default_list_dim=channel('vector')) -> Tensor:  # TODO assume convert_unsupported, add convert_external=False for constants
    """
    Create a Tensor from the specified `data`.
    If `convert=True`, converts `data` to the preferred format of the default backend.

    `data` must be one of the following:
    
    * Number: returns a dimensionless Tensor.
    * Native tensor such as NumPy array, TensorFlow tensor or PyTorch tensor.
    * `tuple` or `list` of numbers: backs the Tensor with native tensor.
    * `tuple` or `list` of non-numbers: creates tensors for the items and stacks them.
    * Tensor: renames dimensions and dimension types if `names` is specified. Converts all internal native values of the tensor if `convert=True`.
    * Shape: creates a 1D tensor listing the dimension sizes.
    
    While specifying `names` is optional in some cases, it is recommended to always specify them.
    
    Dimension types are always inferred from the dimension names if specified.

    Implementations:

    * NumPy: [`numpy.array`](https://numpy.org/doc/stable/reference/generated/numpy.array.html)
    * PyTorch: [`torch.tensor`](https://pytorch.org/docs/stable/generated/torch.tensor.html), [`torch.from_numpy`](https://pytorch.org/docs/stable/generated/torch.from_numpy.html)
    * TensorFlow: [`tf.convert_to_tensor`](https://www.tensorflow.org/api_docs/python/tf/convert_to_tensor)
    * Jax: [`jax.numpy.array`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.array.html)

    See Also:
        `phiml.math.wrap()` which uses `convert=False`, `layout()`.

    Args:
        data: native tensor, scalar, sequence, Shape or Tensor
        shape: Ordered dimensions and types. If sizes are defined, they will be checked against `data`.`
        convert: If True, converts the data to the native format of the current default backend.
            If False, wraps the data in a `Tensor` but keeps the given data reference if possible.

    Raises:
        AssertionError: if dimension names are not provided and cannot automatically be inferred
        ValueError: if `data` is not tensor-like

    Returns:
        Tensor containing same values as data

    Examples:
        >>> tensor([1, 2, 3], channel(vector='x,y,z'))
        (x=1, y=2, z=3)

        >>> tensor([1., 2, 3], channel(vector='x,y,z'))
        (x=1.000, y=2.000, z=3.000) float64

        >>> tensor(numpy.zeros([10, 8, 6, 2]), batch('batch'), spatial('x,y'), channel(vector='x,y'))
        (batchᵇ=10, xˢ=8, yˢ=6, vectorᶜ=x,y) float64 const 0.0

        >>> tensor([(0, 1), (0, 2), (1, 3)], instance('particles'), channel(vector='x,y'))
        (x=0, y=1); (x=0, y=2); (x=1, y=3) (particlesⁱ=3, vectorᶜ=x,y)

        >>> tensor(numpy.random.randn(10))
        (vectorᶜ=10) float64 -0.128 ± 1.197 (-2e+00...2e+00)
    """
    assert all(isinstance(s, Shape) for s in shape), f"Cannot create tensor because shape needs to be one or multiple Shape instances but got {shape}"
    shape = None if len(shape) == 0 else concat_shapes(*shape)
    if isinstance(data, Tensor):
        if convert:
            backend = data.default_backend
            if backend != default_backend():
                data = data._op1(lambda n: convert_(n, use_dlpack=False))
        if shape is None:
            return data
        else:
            if None in shape.sizes:
                shape = shape.with_sizes(data.shape.sizes)
            return data._with_shape_replaced(shape)
    elif isinstance(data, Shape):
        if shape is None:
            shape = channel('dims')
            shape = shape.with_size(data.names)
            data = data.sizes
        elif not shape:
            assert data.rank == 1, f"When wrapping a Shape as a scalar tensor, it must be a rank-1 shape but got {data}"
            data = data.size
        else:
            assert shape.rank == 1, "Can only convert 1D shapes to Tensors"
            shape = shape.with_size(data.names)
            data = data.sizes
    elif isinstance(data, str) or data is None:
        return layout(data)
    elif isinstance(data, (numbers.Number, bool)):
        assert not shape, f"Trying to create a zero-dimensional Tensor from value '{data}' but shape={shape}"
        if convert:
            data = default_backend().as_tensor(data, convert_external=True)
        return NativeTensor(data, EMPTY_SHAPE)
    if isinstance(data, (tuple, list)):
        if all(isinstance(d, (bool, int, float, complex, np.generic)) for d in data):
            array = np.array(data)
            assert array.dtype != object
            data = array
        elif all(isinstance(d, str) for d in data):
            return layout(data, shape or default_list_dim)
        else:
            try:
                inner_shape = [] if shape is None else [shape[1:]]
                tensors = [d if isinstance(d, Tensor) else tensor(d, *inner_shape, convert=convert) for d in data]
                return stack(tensors, default_list_dim if shape is None else shape[0].with_sizes([len(tensors)]), expand_values=True)
            except IncompatibleShapes:
                assert not convert, f"Cannot convert {data} to tensor given shape {shape}"
                return layout(data, shape or default_list_dim)
            except ValueError:
                assert not convert, f"Cannot convert {data} to tensor"
                return layout(data, shape or default_list_dim)
    try:
        backend = choose_backend(data)
        if shape is None:
            assert backend.ndims(data) <= 1, "Specify dimension names for tensors with more than 1 dimension"
            shape = default_list_dim if backend.ndims(data) == 1 else EMPTY_SHAPE
            shape = shape.with_sizes(backend.staticshape(data))
        else:
            # fill in sizes or check them
            sizes = backend.staticshape(data)
            if len(sizes) != len(shape):
                raise IncompatibleShapes(f"Rank of given shape {shape} does not match data with sizes {sizes}")
            for size, s in zip(sizes, shape.sizes):
                if s is not None:
                    assert s == size, f"Given shape {shape} does not match data with sizes {sizes}. Consider leaving the sizes undefined."
            shape = shape.with_sizes(sizes, keep_item_names=True)
        if convert:
            data = convert_(data, use_dlpack=False)
        return NativeTensor(data, shape)
    except NoBackendFound:
        raise ValueError(f"{type(data)} is not supported. Only (Tensor, tuple, list, np.ndarray, native tensors) are allowed.\nCurrent backends: {BACKENDS}")
def tensor_like(existing_tensor: phiml.math._tensors.Tensor, values: Union[numbers.Number, phiml.math._tensors.Tensor, bool], value_order: str = None)

Creates a tensor with the same format and shape as existing_tensor.

Args

existing_tensor
Any Tensor, sparse or dense.
values
New values to replace the existing values by. If existing_tensor is sparse, values must broadcast to the instance dimension listing the stored indices.
value_order
Order of values compared to existing_tensor, only relevant if existing_tensor is sparse. If 'original', the values are ordered like the values that was used to create the first tensor with this sparsity pattern. If 'as existing', the values match the current order of existing_tensor. Note that the order of values may be changed upon creating a sparse tensor.

Returns

Tensor

Expand source code
def tensor_like(existing_tensor: Tensor, values: Union[Tensor, Number, bool], value_order: str = None):
    """
    Creates a tensor with the same format and shape as `existing_tensor`.

    Args:
        existing_tensor: Any `Tensor`, sparse or dense.
        values: New values to replace the existing values by.
            If `existing_tensor` is sparse, `values` must broadcast to the instance dimension listing the stored indices.
        value_order: Order of `values` compared to `existing_tensor`, only relevant if `existing_tensor` is sparse.
            If `'original'`, the values are ordered like the values that was used to create the first tensor with this sparsity pattern.
            If `'as existing'`, the values match the current order of `existing_tensor`.
            Note that the order of values may be changed upon creating a sparse tensor.

    Returns:
        `Tensor`
    """
    assert value_order in ['original', 'as existing', None]
    if isinstance(existing_tensor, (SparseCoordinateTensor, CompressedSparseMatrix)):
        if value_order is None:
            assert not instance(values), f"When creating a sparse tensor from a list of values, value_order must be specified."
        if instance(values):
            values = rename_dims(values, instance, instance(existing_tensor._values))
        values = expand(values, instance(existing_tensor._values))
        if value_order == 'original' and isinstance(existing_tensor, CompressedSparseMatrix) and existing_tensor._uncompressed_indices_perm is not None:
            values = values[existing_tensor._uncompressed_indices_perm]
        if isinstance(existing_tensor, CompressedSparseMatrix) and existing_tensor._uncompressed_offset is not None:
            from ._ops import where
            values = where(existing_tensor._valid_mask(), values, 0)
        return existing_tensor._with_values(values)
    if not is_sparse(existing_tensor):
        return unpack_dim(values, instance, existing_tensor.shape.non_channel.non_batch)
    raise NotImplementedError
def to_complex(x: ~TensorOrTree) ‑> ~TensorOrTree

Converts the given tensor to complex floating point format with the currently specified precision.

The precision can be set globally using math.set_global_precision() and locally using with math.precision().

See the documentation at https://tum-pbs.github.io/PhiML/Data_Types.html

See Also: cast().

Args

x
values to convert

Returns

Tensor of same shape as x

Expand source code
def to_complex(x: TensorOrTree) -> TensorOrTree:
    """
    Converts the given tensor to complex floating point format with the currently specified precision.

    The precision can be set globally using `math.set_global_precision()` and locally using `with math.precision()`.

    See the documentation at https://tum-pbs.github.io/PhiML/Data_Types.html

    See Also:
        `cast()`.

    Args:
        x: values to convert

    Returns:
        `Tensor` of same shape as `x`
    """
    return _backend_op1(x, Backend.to_complex)
def to_device(value, device: phiml.backend._backend.ComputeDevice, convert=True, use_dlpack=True)

Allocates the tensors of value on device. If the value already exists on that device, this function may either create a copy of value or return value directly.

See Also: to_cpu().

Args

value
Tensor or PhiTreeNode or native tensor.
device
Device to allocate value on. Either ComputeDevice or category str, such as 'CPU' or 'GPU'.
convert
Whether to convert tensors that do not belong to the corresponding backend to compatible native tensors. If False, this function has no effect on numpy tensors.
use_dlpack
Only if convert==True. Whether to use the DLPack library to convert from one GPU-enabled backend to another.

Returns

Same type as value.

Expand source code
def to_device(value, device: ComputeDevice or str, convert=True, use_dlpack=True):
    """
    Allocates the tensors of `value` on `device`.
    If the value already exists on that device, this function may either create a copy of `value` or return `value` directly.

    See Also:
        `to_cpu()`.

    Args:
        value: `Tensor` or `phiml.math.magic.PhiTreeNode` or native tensor.
        device: Device to allocate value on.
            Either `ComputeDevice` or category `str`, such as `'CPU'` or `'GPU'`.
        convert: Whether to convert tensors that do not belong to the corresponding backend to compatible native tensors.
            If `False`, this function has no effect on numpy tensors.
        use_dlpack: Only if `convert==True`.
            Whether to use the DLPack library to convert from one GPU-enabled backend to another.

    Returns:
        Same type as `value`.
    """
    assert isinstance(device, (ComputeDevice, str)), f"device must be a ComputeDevice or str but got {type(device)}"
    return tree_map(_to_device, value, device=device, convert_to_backend=convert, use_dlpack=use_dlpack)
def to_dict(value: Union[phiml.math._tensors.Tensor, phiml.math._shape.Shape])

Returns a serializable form of a Tensor or Shape. The result can be written to a JSON file, for example.

See Also: from_dict().

Args

value
Tensor or Shape

Returns

Serializable Python tree of primitives

Expand source code
def to_dict(value: Union[Tensor, Shape]):
    """
    Returns a serializable form of a `Tensor` or `Shape`.
    The result can be written to a JSON file, for example.

    See Also:
        `from_dict()`.

    Args:
        value: `Tensor` or `Shape`

    Returns:
        Serializable Python tree of primitives
    """
    if isinstance(value, Shape):
        return value._to_dict(include_sizes=True)
    elif isinstance(value, Tensor):
        return value._to_dict()
    raise ValueError(f"Cannot convert {value} to a dict")
def to_float(x: ~TensorOrTree) ‑> ~TensorOrTree

Converts the given tensor to floating point format with the currently specified precision.

The precision can be set globally using math.set_global_precision() and locally using with math.precision().

See the documentation at https://tum-pbs.github.io/PhiML/Data_Types.html

See Also: cast().

Args

x
Tensor or PhiTreeNode to convert

Returns

Tensor or PhiTreeNode matching x.

Expand source code
def to_float(x: TensorOrTree) -> TensorOrTree:
    """
    Converts the given tensor to floating point format with the currently specified precision.
    
    The precision can be set globally using `math.set_global_precision()` and locally using `with math.precision()`.
    
    See the documentation at https://tum-pbs.github.io/PhiML/Data_Types.html

    See Also:
        `cast()`.

    Args:
        x: `Tensor` or `phiml.math.magic.PhiTreeNode` to convert

    Returns:
        `Tensor` or `phiml.math.magic.PhiTreeNode` matching `x`.
    """
    return _backend_op1(x, Backend.to_float)
def to_format(x: phiml.math._tensors.Tensor, format: str)

Converts a Tensor to the specified sparse format or to a dense tensor.

Args

x
Sparse or dense Tensor
format
Target format. One of 'dense', 'coo', 'csr', or 'csc'.

Returns

Tensor of the specified format.

Expand source code
def to_format(x: Tensor, format: str):
    """
    Converts a `Tensor` to the specified sparse format or to a dense tensor.

    Args:
        x: Sparse or dense `Tensor`
        format: Target format. One of `'dense'`, `'coo'`, `'csr'`, or `'csc'`.

    Returns:
        `Tensor` of the specified format.
    """
    assert format in ('coo', 'csr', 'csc', 'dense'), f"Invalid format: '{format}'. Must be one of 'coo', 'csr', 'csc', 'dense'"
    if get_format(x) == format:
        return x
    if format == 'dense':
        return dense(x)
    if isinstance(x, SparseCoordinateTensor):
        if format == 'csr':
            return x.compress_rows()
        elif format == 'csc':
            return x.compress_cols()
    elif isinstance(x, CompressedSparseMatrix):
        if format == 'coo':
            return x.decompress()
        else:
            return to_format(x.decompress(), format)
    else:  # dense to sparse
        from ._ops import nonzero
        indices = nonzero(rename_dims(x, channel, instance))
        values = x[indices]
        coo = SparseCoordinateTensor(indices, values, x.shape, can_contain_double_entries=False, indices_sorted=False, indices_constant=x.default_backend.name == 'numpy')
        return to_format(coo, format)
def to_int32(x: ~TensorOrTree) ‑> ~TensorOrTree

Converts the Tensor or PhiTreeNode x to 32-bit integer.

Expand source code
def to_int32(x: TensorOrTree) -> TensorOrTree:
    """ Converts the `Tensor` or `phiml.math.magic.PhiTreeNode` `x` to 32-bit integer. """
    return _backend_op1(x, Backend.to_int32)
def to_int64(x: ~TensorOrTree) ‑> ~TensorOrTree

Converts the Tensor or PhiTreeNode x to 64-bit integer.

Expand source code
def to_int64(x: TensorOrTree) -> TensorOrTree:
    """ Converts the `Tensor` or `phiml.math.magic.PhiTreeNode` `x` to 64-bit integer. """
    return _backend_op1(x, Backend.to_int64)
def trace_check(f, *args, **kwargs) ‑> Tuple[bool, str]

Tests if f(*args, **kwargs) has already been traced for arguments compatible with args and kwargs. If true, jit-compiled functions are very fast since the Python function is not actually called anymore.

Args

f
Transformed Function, e.g. jit-compiled or linear function.
*args
Hypothetical arguments to be passed to f
**kwargs
Hypothetical keyword arguments to be passed to f

Returns

result
True if there is an existing trace that can be used, False if f would have to be re-traced.
message
A str that, if result == False, gives hints as to why f needs to be re-traced given args and kwargs.
Expand source code
def trace_check(f, *args, **kwargs) -> Tuple[bool, str]:
    """
    Tests if `f(*args, **kwargs)` has already been traced for arguments compatible with `args` and `kwargs`.
    If true, jit-compiled functions are very fast since the Python function is not actually called anymore.

    Args:
        f: Transformed Function, e.g. jit-compiled or linear function.
        *args: Hypothetical arguments to be passed to `f`
        **kwargs: Hypothetical keyword arguments to be passed to `f`

    Returns:
        result: `True` if there is an existing trace that can be used, `False` if `f` would have to be re-traced.
        message: A `str` that, if `result == False`, gives hints as to why `f` needs to be re-traced given `args` and `kwargs`.
    """
    assert args or kwargs, f"Please pass the hypothetical function arguments to trace_check()"
    if isinstance(f, (JitFunction, GradientFunction, HessianFunction, CustomGradientFunction)):
        keys = f.traces.keys()
    elif isinstance(f, LinearFunction):
        keys = f.matrices_and_biases.keys()
    else:
        raise ValueError(f"{f_name(f)} is not a traceable function. Only supports jit_compile, jit_compile_linear, gradient, custom_gradient, jacobian, hessian")
    key, *_ = key_from_args(args, kwargs, f.f_params, aux=f.auxiliary_args)
    if not keys:
        return False, "Function has not yet been traced"
    if key in keys:
        return True, ""
    traced_key = next(iter(keys))  # ToDo compare against all
    cond_equal = key.auxiliary_kwargs == traced_key.auxiliary_kwargs
    if isinstance(cond_equal, Tensor):
        cond_equal = cond_equal.all
    if not cond_equal:
        return False, "Auxiliary arguments do not match"
    # shapes need not be compared because they are included in specs
    if traced_key.tree.keys() != key.tree.keys():
        return False, f"Different primary arguments passed: {set(traced_key.tree.keys())} vs {set(key.tree.keys())}"
    for name in traced_key.tree.keys():
        if traced_key.tree[name] != key.tree[name]:
            return False, f"Primary argument '{name}' differs in non-traced variables: {traced_key.tree[name]} vs {key.tree[name]}. Make sure the corresponding class overrides __eq__()."
    if traced_key.specs != key.specs:
        return False, "Traced variables differ in shape"
    if traced_key.backend != key.backend:
        return False, f"Function was not traced with backend {key.backend}"
    if traced_key.spatial_derivative_order != key.spatial_derivative_order:
        return False, f"Different in spatial_derivative_order. This is likely an internal problem."
    return True
def transpose(x, axes)

Swap the dimension order of x. This operation is generally not necessary for Tensors because tensors will be reshaped under the hood or when getting the native/numpy representations. It can be used to transpose native tensors.

Implementations:

Args

x
Tensor or native tensor or Shapable.
axes
tuple or list

Returns

Tensor or native tensor, depending on x.

Expand source code
def transpose(x, axes):
    """
    Swap the dimension order of `x`.
    This operation is generally not necessary for `Tensor`s because tensors will be reshaped under the hood or when getting the native/numpy representations.
    It can be used to transpose native tensors.

    Implementations:

    * NumPy: [`numpy.transpose`](https://numpy.org/doc/stable/reference/generated/numpy.transpose.html)
    * PyTorch: [`x.permute`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.permute)
    * TensorFlow: [`tf.transpose`](https://www.tensorflow.org/api_docs/python/tf/transpose)
    * Jax: [`jax.numpy.transpose`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.transpose.html)

    Args:
        x: `Tensor` or native tensor or `phiml.math.magic.Shapable`.
        axes: `tuple` or `list`

    Returns:
        `Tensor` or native tensor, depending on `x`.
    """
    if isinstance(x, Tensor):
        if x.shape[axes] == x.shape.only(axes):  # order is correct
            return x
        new_shape = x.shape[axes]
        packed = pack_dims(x, new_shape, instance('_t_flat'))
        return unpack_dim(packed, '_t_flat', new_shape)
    else:
        return choose_backend(x).transpose(x, axes)
def unpack_dim(value, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable], *unpacked_dims: phiml.math._shape.Shape, **kwargs)

Decompresses a dimension by unstacking the elements along it. This function replaces the traditional reshape for these cases. The compressed dimension dim is assumed to contain elements laid out according to the order of unpacked_dims.

If dim does not exist on value, this function will return value as-is. This includes primitive types.

See Also: pack_dims()

Args

value
Shapable, such as Tensor, for which one dimension should be split.
dim
Single dimension to be decompressed.
*unpacked_dims
Vararg Shape, ordered dimensions to replace dim, fulfilling unpacked_dims.volume == shape(self)[dim].rank.
**kwargs
Additional keyword arguments required by specific implementations. Adding spatial dimensions to fields requires the bounds: Box argument specifying the physical extent of the new dimensions. Adding batch dimensions must always work without keyword arguments.

Returns

Same type as value.

Examples

>>> unpack_dim(math.zeros(instance(points=12)), 'points', spatial(x=4, y=3))
(xˢ=4, yˢ=3) const 0.0
Expand source code
def unpack_dim(value, dim: DimFilter, *unpacked_dims: Shape, **kwargs):
    """
    Decompresses a dimension by unstacking the elements along it.
    This function replaces the traditional `reshape` for these cases.
    The compressed dimension `dim` is assumed to contain elements laid out according to the order of `unpacked_dims`.

    If `dim` does not exist on `value`, this function will return `value` as-is. This includes primitive types.

    See Also:
        `pack_dims()`

    Args:
        value: `phiml.math.magic.Shapable`, such as `Tensor`, for which one dimension should be split.
        dim: Single dimension to be decompressed.
        *unpacked_dims: Vararg `Shape`, ordered dimensions to replace `dim`, fulfilling `unpacked_dims.volume == shape(self)[dim].rank`.
        **kwargs: Additional keyword arguments required by specific implementations.
            Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
            Adding batch dimensions must always work without keyword arguments.

    Returns:
        Same type as `value`.

    Examples:
        >>> unpack_dim(math.zeros(instance(points=12)), 'points', spatial(x=4, y=3))
        (xˢ=4, yˢ=3) const 0.0
    """
    if isinstance(value, (Number, bool)):
        return value
    assert isinstance(value, Shapable) and isinstance(value, Sliceable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}"
    dim = shape(value).only(dim)
    if dim.is_empty:
        return value  # Nothing to do, maybe expand?
    assert dim.rank == 1, f"unpack_dim requires as single dimension to be unpacked but got {dim}"
    dim = dim.name
    unpacked_dims = concat_shapes(*unpacked_dims)
    if unpacked_dims.rank == 0:
        return value[{dim: 0}]  # remove dim
    elif unpacked_dims.rank == 1:
        return rename_dims(value, dim, unpacked_dims, **kwargs)
    # --- First try __unpack_dim__
    if hasattr(value, '__unpack_dim__'):
        result = value.__unpack_dim__(dim, unpacked_dims, **kwargs)
        if result is not NotImplemented:
            return result
    # --- Next try Tree Node ---
    if isinstance(value, PhiTreeNode) and all_attributes(value):
        new_attributes = {a: unpack_dim(getattr(value, a), dim, unpacked_dims, **kwargs) for a in all_attributes(value)}
        return copy_with(value, **new_attributes)
    # --- Fallback: unstack and stack ---
    if shape(value).only(dim).volume > 8:
        warnings.warn(f"pack_dims() default implementation is slow on large dimensions ({shape(value).only(dim)}). Please implement __unpack_dim__() for {type(value).__name__} as defined in phiml.math.magic", RuntimeWarning, stacklevel=2)
    unstacked = unstack(value, dim)
    for dim in reversed(unpacked_dims):
        unstacked = [stack(unstacked[i:i+dim.size], dim, **kwargs) for i in range(0, len(unstacked), dim.size)]
    return unstacked[0]
def unstack(value, dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable]) ‑> tuple

Un-stacks a Sliceable along one or multiple dimensions.

If multiple dimensions are given, the order of elements will be according to the dimension order in dim, i.e. elements along the last dimension will be neighbors in the returned tuple. If no dimension is given or none of the given dimensions exists on value, returns a list containing only value.

See Also: slice_().

Args

value
Shapable, such as Tensor
dim
Dimensions as Shape or comma-separated str or dimension type, i.e. channel(), spatial(), instance(), batch().

Returns

tuple of objects matching the type of value.

Examples

>>> unstack(expand(0, spatial(x=5)), 'x')
(0.0, 0.0, 0.0, 0.0, 0.0)
Expand source code
def unstack(value, dim: DimFilter) -> tuple:
    """
    Un-stacks a `Sliceable` along one or multiple dimensions.

    If multiple dimensions are given, the order of elements will be according to the dimension order in `dim`, i.e. elements along the last dimension will be neighbors in the returned `tuple`.
    If no dimension is given or none of the given dimensions exists on `value`, returns a list containing only `value`.

    See Also:
        `phiml.math.slice`.

    Args:
        value: `phiml.math.magic.Shapable`, such as `phiml.math.Tensor`
        dim: Dimensions as `Shape` or comma-separated `str` or dimension type, i.e. `channel`, `spatial`, `instance`, `batch`.

    Returns:
        `tuple` of objects matching the type of `value`.

    Examples:
        >>> unstack(expand(0, spatial(x=5)), 'x')
        (0.0, 0.0, 0.0, 0.0, 0.0)
    """
    assert isinstance(value, Sliceable) and isinstance(value, Shaped), f"Cannot unstack {type(value).__name__}. Must be Sliceable and Shaped, see https://tum-pbs.github.io/PhiML/phiml/math/magic.html"
    dims = shape(value).only(dim)
    if dims.rank == 0:
        return value,
    if dims.rank == 1:
        if hasattr(value, '__unstack__'):
            result = value.__unstack__(dims.names)
            if result is not NotImplemented:
                assert isinstance(result, tuple), f"__unstack__ must return a tuple but got {type(result)}"
                assert all([isinstance(item, Sliceable) for item in result]), f"__unstack__ must return a tuple of Sliceable objects but not all items were sliceable in {result}"
                return result
        return tuple([slice_(value, {dims.name: i}) for i in range(dims.size)])
    else:  # multiple dimensions
        if hasattr(value, '__pack_dims__'):
            packed_dim = batch('_unstack')
            value_packed = value.__pack_dims__(dims.names, packed_dim, pos=None)
            if value_packed is not NotImplemented:
                return unstack(value_packed, packed_dim)
        unstack_dim = _any_uniform_dim(dims)
        first_unstacked = unstack(value, unstack_dim)
        inner_unstacked = [unstack(v, dims.without(unstack_dim)) for v in first_unstacked]
        return sum(inner_unstacked, ())
def upsample2x(grid: phiml.math._tensors.Tensor, padding: Extrapolation = zero-gradient, dims: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function spatial>) ‑> phiml.math._tensors.Tensor

Resamples a regular grid to double the number of spatial sample points per dimension. The grid values at the new points are determined via linear interpolation.

Args

grid
half-size grid
padding
grid extrapolation
dims
dims along which up-sampling is applied. If None, up-sample along all spatial dims.
grid
Tensor:
padding
Extrapolation: (Default value = extrapolation.BOUNDARY)
dims
tuple or None: (Default value = None)

Returns

double-size grid

Expand source code
def upsample2x(grid: Tensor,
               padding: Extrapolation = extrapolation.BOUNDARY,
               dims: DimFilter = spatial) -> Tensor:
    """
    Resamples a regular grid to double the number of spatial sample points per dimension.
    The grid values at the new points are determined via linear interpolation.

    Args:
      grid: half-size grid
      padding: grid extrapolation
      dims: dims along which up-sampling is applied. If None, up-sample along all spatial dims.
      grid: Tensor: 
      padding: Extrapolation:  (Default value = extrapolation.BOUNDARY)
      dims: tuple or None:  (Default value = None)

    Returns:
      double-size grid

    """
    for dim in grid.shape.only(dims):
        left, center, right = shift(grid, (-1, 0, 1), dim.names, padding, None)
        interp_left = 0.25 * left + 0.75 * center
        interp_right = 0.75 * center + 0.25 * right
        stacked = math.stack_tensors([interp_left, interp_right], channel(_interleave='left,right'))
        grid = math.pack_dims(stacked, (dim.name, '_interleave'), dim)
    return grid
def use(backend: Union[str, phiml.backend._backend.Backend]) ‑> phiml.backend._backend.Backend

Sets the given backend as default. This setting can be overridden using with backend:.

See default_backend(), choose_backend_t().

Args

backend
Backend or backend name to set as default. Possible names are 'torch', 'tensorflow', 'jax', 'numpy'.

Returns

The chosen backend as a `Backend´ instance.

Expand source code
def set_global_default_backend(backend: Union[str, Backend]) -> Backend:
    """
    Sets the given backend as default.
    This setting can be overridden using `with backend:`.

    See `default_backend()`, `choose_backend()`.

    Args:
        backend: `Backend` or backend name to set as default.
            Possible names are `'torch'`, `'tensorflow'`, `'jax'`, `'numpy'`.

    Returns:
        The chosen backend as a `Backend´ instance.
    """
    if isinstance(backend, ModuleType):
        backend = str(backend)
    if isinstance(backend, str):
        init_backend(backend)
        matches = [b for b in BACKENDS if b.name == backend.lower()]
        if not matches:
            raise ValueError(f"Illegal backend: '{backend}'")
        backend = matches[0]
    assert isinstance(backend, Backend), backend
    if _DEFAULT[0] is not backend:
        _DEFAULT[0] = backend
        ML_LOGGER.info(f"Φ-ML's default backend is now {backend}")
    return backend
def vec(name: Union[str, phiml.math._shape.Shape] = 'vector', *sequence, tuple_dim=(sequenceˢ=None), list_dim=(sequenceⁱ=None), **components) ‑> phiml.math._tensors.Tensor

Lay out the given values along a channel dimension without converting them to the current backend.

Args

name
Dimension name.
*sequence
Component values that will also be used as item names. If specified, components must be empty.
**components
Values by component name. If specified, no additional positional arguments must be given.
tuple_dim
Dimension for tuple values passed as components, e.g. vec(x=(0, 1), ...)
list_dim
Dimension for list values passed as components, e.g. vec(x=[0, 1], ...)

Returns

Tensor

Examples

>>> vec(x=1, y=0, z=-1)
(x=1, y=0, z=-1)
>>> vec(x=1., z=0)
(x=1.000, z=0.000)
>>> vec(x=tensor([1, 2, 3], instance('particles')), y=0)
(x=1, y=0); (x=2, y=0); (x=3, y=0) (particlesⁱ=3, vectorᶜ=x,y)
>>> vec(x=0, y=[0, 1])
(x=0, y=0); (x=0, y=1) (vectorᶜ=x,y, sequenceⁱ=2)
>>> vec(x=0, y=(0, 1))
(x=0, y=0); (x=0, y=1) (sequenceˢ=2, vectorᶜ=x,y)
Expand source code
def vec(name: Union[str, Shape] = 'vector', *sequence, tuple_dim=spatial('sequence'), list_dim=instance('sequence'), **components) -> Tensor:
    """
    Lay out the given values along a channel dimension without converting them to the current backend.

    Args:
        name: Dimension name.
        *sequence: Component values that will also be used as item names.
            If specified, `components` must be empty.
        **components: Values by component name.
            If specified, no additional positional arguments must be given.
        tuple_dim: Dimension for `tuple` values passed as components, e.g. `vec(x=(0, 1), ...)`
        list_dim: Dimension for `list` values passed as components, e.g. `vec(x=[0, 1], ...)`

    Returns:
        `Tensor`

    Examples:
        >>> vec(x=1, y=0, z=-1)
        (x=1, y=0, z=-1)

        >>> vec(x=1., z=0)
        (x=1.000, z=0.000)

        >>> vec(x=tensor([1, 2, 3], instance('particles')), y=0)
        (x=1, y=0); (x=2, y=0); (x=3, y=0) (particlesⁱ=3, vectorᶜ=x,y)

        >>> vec(x=0, y=[0, 1])
        (x=0, y=0); (x=0, y=1) (vectorᶜ=x,y, sequenceⁱ=2)

        >>> vec(x=0, y=(0, 1))
        (x=0, y=0); (x=0, y=1) (sequenceˢ=2, vectorᶜ=x,y)
    """
    dim = auto(name, channel)
    assert isinstance(dim, Shape), f"name must be a str or Shape but got '{type(name)}'"
    if sequence:
        assert not components, "vec() must be given either positional or keyword arguments but not both"
        if len(sequence) == 1 and isinstance(sequence[0], (tuple, list)):
            sequence = sequence[0]
        dim = dim.with_size([str(v) for v in sequence])
        return wrap(sequence, dim)
    else:
        def wrap_sequence(value):
            if isinstance(value, tuple):
                return wrap(value, tuple_dim)
            elif isinstance(value, list):
                return wrap(value, list_dim)
            else:
                return value
        components = {n: wrap_sequence(v) for n, v in components.items()}
        if not components:
            return wrap([], dim)
        return stack(components, dim, expand_values=True)
def vec_length(vec: phiml.math._tensors.Tensor, vec_dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function channel>, eps: Union[float, phiml.math._tensors.Tensor] = None)

Computes the vector length of vec().

Args

eps
Minimum vector length. Use to avoid inf gradients for zero-length vectors.
Expand source code
def vec_length(vec: Tensor, vec_dim: DimFilter = channel, eps: Union[float, Tensor] = None):
    """
    Computes the vector length of `vec`.

    Args:
        eps: Minimum vector length. Use to avoid `inf` gradients for zero-length vectors.
    """
    if vec.dtype.kind == complex:
        vec = stack([vec.real, vec.imag], channel('_ReIm'))
    squared = vec_squared(vec, vec_dim)
    if eps is not None:
        squared = math.maximum(squared, eps)
    return math.sqrt(squared)
def vec_normalize(vec: phiml.math._tensors.Tensor, vec_dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function channel>, epsilon=None, allow_infinite=False)

Normalizes the vectors in vec(). If vec_dim is None, the combined channel dimensions of vec() are interpreted as a vector.

Args

vec
Tensor to normalize.
vec_dim
Dimensions to normalize over. By default, all channel dimensions are used to compute the vector length.
epsilon
(Optional) Zero-length threshold. Vectors shorter than this length yield the unit vector (1, 0, 0, …). If not specified, the zero-vector yields NaN as it cannot be normalized.
allow_infinite
Allow infinite components in vectors. These vectors will then only points towards the infinite components.
Expand source code
def vec_normalize(vec: Tensor, vec_dim: DimFilter = channel, epsilon=None, allow_infinite=False):
    """
    Normalizes the vectors in `vec`. If `vec_dim` is None, the combined channel dimensions of `vec` are interpreted as a vector.

    Args:
        vec: `Tensor` to normalize.
        vec_dim: Dimensions to normalize over. By default, all channel dimensions are used to compute the vector length.
        epsilon: (Optional) Zero-length threshold. Vectors shorter than this length yield the unit vector (1, 0, 0, ...).
            If not specified, the zero-vector yields `NaN` as it cannot be normalized.
        allow_infinite: Allow infinite components in vectors. These vectors will then only points towards the infinite components.
    """
    if allow_infinite:  # replace inf by 1, finite by 0
        is_infinite = ~math.is_finite(vec)
        inf_mask = is_infinite & ~math.is_nan(vec)
        vec = math.where(math.any_(is_infinite, channel), inf_mask, vec)
    length = vec_length(vec, vec_dim=vec_dim)
    if epsilon is None:
        return vec / vec_length(vec, vec_dim=vec_dim)
    unit_vec = wrap([1] + [0] * (channel(vec).volume - 1), channel(vec))
    return math.where(abs(length) <= epsilon, unit_vec, vec / length)
def vec_squared(vec: phiml.math._tensors.Tensor, vec_dim: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = <function channel>)

Computes the squared length of vec(). If vec_dim is None, the combined channel dimensions of vec() are interpreted as a vector.

Expand source code
def vec_squared(vec: Tensor, vec_dim: DimFilter = channel):
    """ Computes the squared length of `vec`. If `vec_dim` is None, the combined channel dimensions of `vec` are interpreted as a vector. """
    return math.sum_(vec ** 2, dim=vec_dim)
def when_available(runnable: Callable, *tensor_args: phiml.math._tensors.Tensor)

Calls runnable(*tensor_args) once the concrete values of all tensors are available. In eager mode, runnable is called immediately. When jit-compiled, runnable is called after the jit-compiled function has returned.

Args

runnable
Function to call as runnable(*tensor_args). This can be a lambda function.
*tensor_args
Tensor values to pass to runnable with concrete values.
Expand source code
def when_available(runnable: Callable, *tensor_args: Tensor):
    """
    Calls `runnable(*tensor_args)` once the concrete values of all tensors are available.
    In eager mode, `runnable` is called immediately.
    When jit-compiled, `runnable` is called after the jit-compiled function has returned.

    Args:
        runnable: Function to call as `runnable(*tensor_args)`. This can be a `lambda` function.
        *tensor_args: `Tensor` values to pass to `runnable` with concrete values.
    """
    if _TRACING_LINEAR:
        raise RuntimeError(f"when_available() cannot be called inside a function marked as @jit_compile_linear")
    if all_available(*tensor_args):  # eager or NumPy
        runnable(*tensor_args)
    else:
        assert _TRACING_JIT, f"tensors are not available but no JIT function is being traced. Maybe you are using external jit?"
        for jit_f in _TRACING_JIT:
            jit_f.extract_and_call(tensor_args, runnable)
def where(condition: Union[phiml.math._tensors.Tensor, float, int], value_true: Union[phiml.math._tensors.Tensor, float, int, Any] = None, value_false: Union[phiml.math._tensors.Tensor, float, int, Any] = None)

Builds a tensor by choosing either values from value_true or value_false depending on condition. If condition is not of type boolean, non-zero values are interpreted as True.

This function requires non-None values for value_true and value_false. To get the indices of True / non-zero values, use :func:nonzero().

Args

condition
determines where to choose values from value_true or from value_false
value_true
Values to pick where condition != 0 / True
value_false
Values to pick where condition == 0 / False

Returns

Tensor containing dimensions of all inputs.

Expand source code
def where(condition: Union[Tensor, float, int],
          value_true: Union[Tensor, float, int, Any] = None,
          value_false: Union[Tensor, float, int, Any] = None):
    """
    Builds a tensor by choosing either values from `value_true` or `value_false` depending on `condition`.
    If `condition` is not of type boolean, non-zero values are interpreted as True.
    
    This function requires non-None values for `value_true` and `value_false`.
    To get the indices of True / non-zero values, use :func:`nonzero`.

    Args:
      condition: determines where to choose values from value_true or from value_false
      value_true: Values to pick where `condition != 0 / True`
      value_false: Values to pick where `condition == 0 / False`

    Returns:
        `Tensor` containing dimensions of all inputs.
    """
    if value_true is None:
        assert value_false is None, f"where can be used either with value_true and value_false or without both but got only value_false"
        warnings.warn("Use nonzero() instead of where() to get indices of non-zero elements.", SyntaxWarning, stacklevel=2)
        return nonzero(condition)
    from .extrapolation import Extrapolation, where as ext_where
    if isinstance(value_true, Extrapolation) or isinstance(value_false, Extrapolation):
        return ext_where(condition, value_true, value_false)
    condition = wrap(condition)
    value_true = wrap(value_true)
    value_false = wrap(value_false)

    def inner_where(c: Tensor, vt: Tensor, vf: Tensor):
        if vt._is_tracer or vf._is_tracer or c._is_tracer:
            return c * vt + (1 - c) * vf  # ToDo this does not take NaN into account
        if is_sparse(vt) or is_sparse(vf):
            if same_sparsity_pattern(vt, vf, allow_const=True) and same_sparsity_pattern(c, vt, allow_const=True):
                c_values = c._values if is_sparse(c) else c
                vt_values = vt._values if is_sparse(vt) else vt
                vf_values = vf._values if is_sparse(vf) else vf
                result_values = where(c_values, vt_values, vf_values)
                return c._with_values(result_values)
            raise NotImplementedError
        shape, (c, vt, vf) = broadcastable_native_tensors(c, vt, vf)
        result = choose_backend(c, vt, vf).where(c, vt, vf)
        return NativeTensor(result, shape)

    return broadcast_op(inner_where, [condition, value_true, value_false])
def wrap(data, *shape: phiml.math._shape.Shape, default_list_dim=(vectorᶜ=None)) ‑> phiml.math._tensors.Tensor

Short for tensor() with convert=False.

Expand source code
def wrap(data, *shape: Shape, default_list_dim=channel('vector')) -> Tensor:
    """ Short for `phiml.math.tensor()` with `convert=False`. """
    return tensor(data, *shape, convert=False, default_list_dim=default_list_dim)
def zeros(*shape: phiml.math._shape.Shape, dtype: Union[phiml.backend._dtype.DType, tuple, type] = None) ‑> phiml.math._tensors.Tensor

Define a tensor with specified shape with value 0.0 / 0 / False everywhere.

This method may not immediately allocate the memory to store the values.

See Also: zeros_like(), ones().

Args

*shape
This (possibly empty) sequence of Shapes is concatenated, preserving the order.
dtype
Data type as DType object. Defaults to float matching the current precision setting.

Returns

Tensor

Expand source code
def zeros(*shape: Shape, dtype: Union[DType, tuple, type] = None) -> Tensor:
    """
    Define a tensor with specified shape with value `0.0` / `0` / `False` everywhere.
    
    This method may not immediately allocate the memory to store the values.

    See Also:
        `zeros_like()`, `ones()`.

    Args:
        *shape: This (possibly empty) sequence of `Shape`s is concatenated, preserving the order.
        dtype: Data type as `DType` object. Defaults to `float` matching the current precision setting.

    Returns:
        `Tensor`
    """
    return _initialize(lambda shape: expand_tensor(NativeTensor(default_backend().zeros((), dtype=DType.as_dtype(dtype)), EMPTY_SHAPE), shape), shape)
def zeros_like(obj: Union[phiml.math._tensors.Tensor, PhiTreeNode]) ‑> Union[phiml.math._tensors.Tensor, PhiTreeNode]

Create a Tensor containing only 0.0 / 0 / False with the same shape and dtype as obj.

Expand source code
def zeros_like(obj: Union[Tensor, PhiTreeNode]) -> Union[Tensor, PhiTreeNode]:
    """ Create a `Tensor` containing only `0.0` / `0` / `False` with the same shape and dtype as `obj`. """
    nest, values = disassemble_tree(obj, cache=False, attr_type=value_attributes)
    zeros_ = []
    for val in values:
        val = wrap(val)
        with val.default_backend:
            zeros_.append(zeros(val.shape, dtype=val.dtype))
    return assemble_tree(nest, zeros_, attr_type=value_attributes)

Classes

class ConvergenceException

Base class for exceptions raised when a solve does not converge.

See Also: Diverged, NotConverged.

Expand source code
class ConvergenceException(RuntimeError):
    """
    Base class for exceptions raised when a solve does not converge.

    See Also:
        `Diverged`, `NotConverged`.
    """

    def __init__(self, result: SolveInfo):
        RuntimeError.__init__(self, result.msg)
        self.result: SolveInfo = result
        """ `SolveInfo` holding information about the solve. """

Ancestors

  • builtins.RuntimeError
  • builtins.Exception
  • builtins.BaseException

Subclasses

  • phiml.math._optimize.Diverged
  • phiml.math._optimize.NotConverged

Instance variables

var result

SolveInfo holding information about the solve.

class DType (kind: type, bits: int = None, precision: int = None)

Instances of DType represent the kind and size of data elements. The data type of tensors can be obtained via Tensor.dtype.

The following kinds of data types are supported:

  • float with 32 / 64 bits
  • complex with 64 / 128 bits
  • int with 8 / 16 / 32 / 64 bits
  • bool with 8 bits
  • str with 8n bits

Unlike with many computing libraries, there are no global variables corresponding to the available types. Instead, data types can simply be instantiated as needed.

Args

kind
Python type, one of (bool, int, float, complex, str)
bits
number of bits per element, a multiple of 8.
Expand source code
class DType:
    """
    Instances of `DType` represent the kind and size of data elements.
    The data type of tensors can be obtained via `Tensor.dtype`.

    The following kinds of data types are supported:

    * `float` with 32 / 64 bits
    * `complex` with 64 / 128 bits
    * `int` with 8 / 16 / 32 / 64 bits
    * `bool` with 8 bits
    * `str` with 8*n* bits

    Unlike with many computing libraries, there are no global variables corresponding to the available types.
    Instead, data types can simply be instantiated as needed.
    """

    def __init__(self, kind: type, bits: int = None, precision: int = None):
        """
        Args:
            kind: Python type, one of `(bool, int, float, complex, str)`
            bits: number of bits per element, a multiple of 8.
        """
        assert kind in (bool, int, float, complex, str, object)
        if kind is bool:
            assert bits is None, "Bits may not be set for bool or object"
            assert precision is None, f"Precision may only be specified for float or complex but got {kind}, precision={precision}"
            bits = 8
        elif kind == object:
            assert bits is None, "bits may not be set for bool or object"
            assert precision is None, f"Precision may only be specified for float or complex but got {kind}, precision={precision}"
            bits = int(np.round(np.log2(sys.maxsize))) + 1
        elif precision is not None:
            assert bits is None, "Specify either bits or precision when creating a DType but not both."
            assert kind in [float, complex], f"Precision may only be specified for float or complex but got {kind}, precision={precision}"
            if kind == float:
                bits = precision
            else:
                bits = precision * 2
        else:
            assert isinstance(bits, int), f"bits must be an int but got {type(bits)}"
        self.kind = kind
        """ Python class corresponding to the type of data, ignoring precision. One of (bool, int, float, complex, str) """
        self.bits = bits
        """ Number of bits used to store a single value of this type. See `DType.itemsize`. """

    @property
    def precision(self):
        """ Floating point precision. Only defined if `kind in (float, complex)`. For complex values, returns half of `DType.bits`. """
        if self.kind == float:
            return self.bits
        if self.kind == complex:
            return self.bits // 2
        else:
            return None

    @property
    def itemsize(self):
        """ Number of bytes used to storea single value of this type. See `DType.bits`. """
        assert self.bits % 8 == 0
        return self.bits // 8

    def __eq__(self, other):
        return isinstance(other, DType) and self.kind == other.kind and self.bits == other.bits

    def __ne__(self, other):
        return not self == other

    def __hash__(self):
        return hash(self.kind) + hash(self.bits)

    def __repr__(self):
        return f"{self.kind.__name__}{self.bits}"

    @staticmethod
    def as_dtype(value: Union['DType', tuple, type, None]) -> Union['DType', None]:
        if isinstance(value, DType):
            return value
        elif value is int:
            return DType(int, 32)
        elif value is float:
            from . import get_precision
            return DType(float, get_precision())
        elif value is complex:
            from . import get_precision
            return DType(complex, 2 * get_precision())
        elif value is None:
            return None
        elif isinstance(value, tuple):
            return DType(*value)
        elif value is str:
            raise ValueError("str DTypes must specify precision")
        else:
            return DType(value)  # bool, object

Static methods

def as_dtype(value: Union[ForwardRef('DType'), tuple, type, None]) ‑> Optional[phiml.backend._dtype.DType]
Expand source code
@staticmethod
def as_dtype(value: Union['DType', tuple, type, None]) -> Union['DType', None]:
    if isinstance(value, DType):
        return value
    elif value is int:
        return DType(int, 32)
    elif value is float:
        from . import get_precision
        return DType(float, get_precision())
    elif value is complex:
        from . import get_precision
        return DType(complex, 2 * get_precision())
    elif value is None:
        return None
    elif isinstance(value, tuple):
        return DType(*value)
    elif value is str:
        raise ValueError("str DTypes must specify precision")
    else:
        return DType(value)  # bool, object

Instance variables

var bits

Number of bits used to store a single value of this type. See DType.itemsize.

var itemsize

Number of bytes used to storea single value of this type. See DType.bits.

Expand source code
@property
def itemsize(self):
    """ Number of bytes used to storea single value of this type. See `DType.bits`. """
    assert self.bits % 8 == 0
    return self.bits // 8
var kind

Python class corresponding to the type of data, ignoring precision. One of (bool, int, float, complex, str)

var precision

Floating point precision. Only defined if kind in (float, complex). For complex values, returns half of DType.bits.

Expand source code
@property
def precision(self):
    """ Floating point precision. Only defined if `kind in (float, complex)`. For complex values, returns half of `DType.bits`. """
    if self.kind == float:
        return self.bits
    if self.kind == complex:
        return self.bits // 2
    else:
        return None
class Dict (*args, **kwargs)

Dictionary of Tensor or PhiTreeNode values. Dicts are not themselves tensors and do not have a shape. Use layout() to treat dict instances like tensors.

In addition to dictionary functions, supports mathematical operators with other Dicts and lookup via .key syntax. Dict implements PhiTreeNode so instances can be passed to math operations like sin().

Expand source code
class Dict(dict):
    """
    Dictionary of `Tensor` or `phiml.math.magic.PhiTreeNode` values.
    Dicts are not themselves tensors and do not have a shape.
    Use `layout()` to treat `dict` instances like tensors.

    In addition to dictionary functions, supports mathematical operators with other `Dict`s and lookup via `.key` syntax.
    `Dict` implements `phiml.math.magic.PhiTreeNode` so instances can be passed to math operations like `sin`.
    """

    def __value_attrs__(self):
        return tuple(self.keys())
    
    # --- Dict[key] ---

    def __getattr__(self, key):
        try:
            return self[key]
        except KeyError as k:
            raise AttributeError(k)

    def __setattr__(self, key, value):
        self[key] = value

    def __delattr__(self, key):
        try:
            del self[key]
        except KeyError as k:
            raise AttributeError(k)
        
    # --- operators ---
    
    def __neg__(self):
        return Dict({k: -v for k, v in self.items()})
    
    def __invert__(self):
        return Dict({k: ~v for k, v in self.items()})
    
    def __abs__(self):
        return Dict({k: abs(v) for k, v in self.items()})
    
    def __round__(self, n=None):
        return Dict({k: round(v) for k, v in self.items()})

    def __add__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val + other[key] for key, val in self.items()})
        else:
            return Dict({key: val + other for key, val in self.items()})

    def __radd__(self, other):
        if isinstance(other, Dict):
            return Dict({key: other[key] + val for key, val in self.items()})
        else:
            return Dict({key: other + val for key, val in self.items()})

    def __sub__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val - other[key] for key, val in self.items()})
        else:
            return Dict({key: val - other for key, val in self.items()})

    def __rsub__(self, other):
        if isinstance(other, Dict):
            return Dict({key: other[key] - val for key, val in self.items()})
        else:
            return Dict({key: other - val for key, val in self.items()})

    def __mul__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val * other[key] for key, val in self.items()})
        else:
            return Dict({key: val * other for key, val in self.items()})

    def __rmul__(self, other):
        if isinstance(other, Dict):
            return Dict({key: other[key] * val for key, val in self.items()})
        else:
            return Dict({key: other * val for key, val in self.items()})

    def __truediv__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val / other[key] for key, val in self.items()})
        else:
            return Dict({key: val / other for key, val in self.items()})

    def __rtruediv__(self, other):
        if isinstance(other, Dict):
            return Dict({key: other[key] / val for key, val in self.items()})
        else:
            return Dict({key: other / val for key, val in self.items()})

    def __floordiv__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val // other[key] for key, val in self.items()})
        else:
            return Dict({key: val // other for key, val in self.items()})

    def __rfloordiv__(self, other):
        if isinstance(other, Dict):
            return Dict({key: other[key] // val for key, val in self.items()})
        else:
            return Dict({key: other // val for key, val in self.items()})

    def __pow__(self, power, modulo=None):
        assert modulo is None
        if isinstance(power, Dict):
            return Dict({key: val ** power[key] for key, val in self.items()})
        else:
            return Dict({key: val ** power for key, val in self.items()})

    def __rpow__(self, other):
        if isinstance(other, Dict):
            return Dict({key: other[key] ** val for key, val in self.items()})
        else:
            return Dict({key: other ** val for key, val in self.items()})

    def __mod__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val % other[key] for key, val in self.items()})
        else:
            return Dict({key: val % other for key, val in self.items()})

    def __rmod__(self, other):
        if isinstance(other, Dict):
            return Dict({key: other[key] % val for key, val in self.items()})
        else:
            return Dict({key: other % val for key, val in self.items()})

    def __eq__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val == other[key] for key, val in self.items()})
        else:
            return Dict({key: val == other for key, val in self.items()})

    def __ne__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val != other[key] for key, val in self.items()})
        else:
            return Dict({key: val != other for key, val in self.items()})

    def __lt__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val < other[key] for key, val in self.items()})
        else:
            return Dict({key: val < other for key, val in self.items()})

    def __le__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val <= other[key] for key, val in self.items()})
        else:
            return Dict({key: val <= other for key, val in self.items()})

    def __gt__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val > other[key] for key, val in self.items()})
        else:
            return Dict({key: val > other for key, val in self.items()})

    def __ge__(self, other):
        if isinstance(other, Dict):
            return Dict({key: val >= other[key] for key, val in self.items()})
        else:
            return Dict({key: val >= other for key, val in self.items()})

    # --- overridden methods ---

    def copy(self):
        return Dict(self)

Ancestors

  • builtins.dict

Methods

def copy(self)

D.copy() -> a shallow copy of D

Expand source code
def copy(self):
    return Dict(self)
class Diverged

Raised if the optimization was stopped prematurely and cannot continue. This may indicate that no solution exists.

The values of the last estimate x may or may not be finite.

This exception inherits from ConvergenceException.

See Also: NotConverged.

Expand source code
class Diverged(ConvergenceException):
    """
    Raised if the optimization was stopped prematurely and cannot continue.
    This may indicate that no solution exists.

    The values of the last estimate `x` may or may not be finite.

    This exception inherits from `ConvergenceException`.

    See Also:
        `NotConverged`.
    """

    def __init__(self, result: SolveInfo):
        ConvergenceException.__init__(self, result)

Ancestors

  • phiml.math._optimize.ConvergenceException
  • builtins.RuntimeError
  • builtins.Exception
  • builtins.BaseException
class IncompatibleShapes (message, *shapes: phiml.math._shape.Shape)

Raised when the shape of a tensor does not match the other arguments.

Expand source code
class IncompatibleShapes(Exception):
    """
    Raised when the shape of a tensor does not match the other arguments.
    """
    def __init__(self, message, *shapes: Shape):
        Exception.__init__(self, message)
        self.shapes = shapes

Ancestors

  • builtins.Exception
  • builtins.BaseException
class LinearFunction

Just-in-time compiled linear function of Tensor arguments and return values.

Use jit_compile_linear() to create a linear function representation.

Expand source code
class LinearFunction(Generic[X, Y], Callable[[X], Y]):
    """
    Just-in-time compiled linear function of `Tensor` arguments and return values.

    Use `jit_compile_linear()` to create a linear function representation.
    """

    def __init__(self, f, auxiliary_args: Set[str], forget_traces: bool):
        self.f = f
        self.f_params = function_parameters(f)
        self.auxiliary_args = auxiliary_args
        self.forget_traces = forget_traces
        self.matrices_and_biases: Dict[SignatureKey, Tuple[SparseCoordinateTensor, Tensor, Tuple]] = {}
        self.nl_jit = JitFunction(f, self.auxiliary_args, forget_traces)  # for backends that do not support sparse matrices

    def _get_or_trace(self, key: SignatureKey, args: tuple, f_kwargs: dict):
        if not key.tracing and key in self.matrices_and_biases:
            return self.matrices_and_biases[key]
        else:
            if self.forget_traces:
                self.matrices_and_biases.clear()
            _TRACING_LINEAR.append(self)
            try:
                matrix, bias, raw_out = matrix_from_function(self.f, *args, **f_kwargs, auto_compress=True, _return_raw_output=True)
            finally:
                assert _TRACING_LINEAR.pop(-1) is self
            if not key.tracing:
                self.matrices_and_biases[key] = matrix, bias, raw_out
                if len(self.matrices_and_biases) >= 4:
                    warnings.warn(f"""Φ-ML-lin: The compiled linear function '{f_name(self.f)}' was traced {len(self.matrices_and_biases)} times.
Performing many traces may be slow and cause memory leaks.
Tensors in auxiliary arguments (all except the first parameter unless specified otherwise) are compared by reference, not by tensor values.
Auxiliary arguments: {key.auxiliary_kwargs}
Multiple linear traces can be avoided by jit-compiling the code that calls the linear function or setting forget_traces=True.""", RuntimeWarning, stacklevel=3)
            return matrix, bias, raw_out

    def __call__(self, *args: X, **kwargs) -> Y:
        try:
            key, tensors, natives, x = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args)
        except LinearTraceInProgress:
            return self.f(*args, **kwargs)
        assert tensors, "Linear function requires at least one argument"
        if any(isinstance(t, ShiftLinTracer) for t in tensors):
            # TODO: if t is identity, use cached ShiftLinTracer, otherwise multiply two ShiftLinTracers
            return self.f(*args, **kwargs)
        if not key.backend.supports(Backend.sparse_coo_tensor):  # This might be called inside a Jax linear solve
            # warnings.warn(f"Sparse matrices are not supported by {backend}. Falling back to regular jit compilation.", RuntimeWarning)
            if not math.all_available(*tensors):  # avoid nested tracing, Typical case jax.scipy.sparse.cg(LinearFunction). Nested traces cannot be reused which results in lots of traces per cg.
                ML_LOGGER.debug(f"Φ-ML-lin: Running '{f_name(self.f)}' as-is with {key.backend} because it is being traced.")
                return self.f(*args, **kwargs)
            else:
                return self.nl_jit(*args, **kwargs)
        matrix, bias, (out_tree, out_tensors) = self._get_or_trace(key, args, kwargs)
        result = matrix @ tensors[0] + bias
        out_tensors = list(out_tensors)
        out_tensors[0] = result
        return assemble_tree(out_tree, out_tensors)

    def sparse_matrix(self, *args, **kwargs):
        """
        Create an explicit representation of this linear function as a sparse matrix.

        See Also:
            `sparse_matrix_and_bias()`.

        Args:
            *args: Function arguments. This determines the size of the matrix.
            **kwargs: Additional keyword arguments for the linear function.

        Returns:
            Sparse matrix representation with `values` property and `native()` method.
        """
        key, *_ = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args)
        matrix, bias, *_ = self._get_or_trace(key, args, kwargs)
        assert math.close(bias, 0), "This is an affine function and cannot be represented by a single matrix. Use sparse_matrix_and_bias() instead."
        return matrix

    def sparse_matrix_and_bias(self, *args, **kwargs):
        """
        Create an explicit representation of this affine function as a sparse matrix and a bias vector.

        Args:
            *args: Positional arguments to the linear function.
                This determines the size of the matrix.
            **kwargs: Additional keyword arguments for the linear function.

        Returns:
            matrix: Sparse matrix representation with `values` property and `native()` method.
            bias: `Tensor`
        """
        key, *_ = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args)
        return self._get_or_trace(key, args, kwargs)[:2]

    def __repr__(self):
        return f"lin({f_name(self.f)})"

Ancestors

  • collections.abc.Callable
  • typing.Generic

Methods

def sparse_matrix(self, *args, **kwargs)

Create an explicit representation of this linear function as a sparse matrix.

See Also: sparse_matrix_and_bias().

Args

*args
Function arguments. This determines the size of the matrix.
**kwargs
Additional keyword arguments for the linear function.

Returns

Sparse matrix representation with values property and native() method.

Expand source code
def sparse_matrix(self, *args, **kwargs):
    """
    Create an explicit representation of this linear function as a sparse matrix.

    See Also:
        `sparse_matrix_and_bias()`.

    Args:
        *args: Function arguments. This determines the size of the matrix.
        **kwargs: Additional keyword arguments for the linear function.

    Returns:
        Sparse matrix representation with `values` property and `native()` method.
    """
    key, *_ = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args)
    matrix, bias, *_ = self._get_or_trace(key, args, kwargs)
    assert math.close(bias, 0), "This is an affine function and cannot be represented by a single matrix. Use sparse_matrix_and_bias() instead."
    return matrix
def sparse_matrix_and_bias(self, *args, **kwargs)

Create an explicit representation of this affine function as a sparse matrix and a bias vector.

Args

*args
Positional arguments to the linear function. This determines the size of the matrix.
**kwargs
Additional keyword arguments for the linear function.

Returns

matrix
Sparse matrix representation with values property and native() method.
bias
Tensor
Expand source code
def sparse_matrix_and_bias(self, *args, **kwargs):
    """
    Create an explicit representation of this affine function as a sparse matrix and a bias vector.

    Args:
        *args: Positional arguments to the linear function.
            This determines the size of the matrix.
        **kwargs: Additional keyword arguments for the linear function.

    Returns:
        matrix: Sparse matrix representation with `values` property and `native()` method.
        bias: `Tensor`
    """
    key, *_ = key_from_args(args, kwargs, self.f_params, cache=False, aux=self.auxiliary_args)
    return self._get_or_trace(key, args, kwargs)[:2]
class NotConverged

Raised during optimization if the desired accuracy was not reached within the maximum number of iterations.

This exception inherits from ConvergenceException.

See Also: Diverged.

Expand source code
class NotConverged(ConvergenceException):
    """
    Raised during optimization if the desired accuracy was not reached within the maximum number of iterations.

    This exception inherits from `ConvergenceException`.

    See Also:
        `Diverged`.
    """

    def __init__(self, result: SolveInfo):
        ConvergenceException.__init__(self, result)

Ancestors

  • phiml.math._optimize.ConvergenceException
  • builtins.RuntimeError
  • builtins.Exception
  • builtins.BaseException
class Shape

Shapes enumerate dimensions, each consisting of a name, size and type.

There are five types of dimensions: batch(), dual(), spatial(), channel(), and instance().

To construct a Shape, use batch(), dual(), spatial(), channel() or instance(), depending on the desired dimension type. To create a shape with multiple types, use merge_shapes(), concat_shapes() or the syntax shape1 & shape2.

The __init__ constructor is for internal use only.

Expand source code
class Shape:
    """
    Shapes enumerate dimensions, each consisting of a name, size and type.

    There are five types of dimensions: `batch`, `dual`, `spatial`, `channel`, and `instance`.
    """

    def __init__(self, sizes: tuple, names: tuple, types: tuple, item_names: tuple):
        """
        To construct a `Shape`, use `batch`, `dual`, `spatial`, `channel` or `instance`, depending on the desired dimension type.
        To create a shape with multiple types, use `merge_shapes()`, `concat_shapes()` or the syntax `shape1 & shape2`.

        The `__init__` constructor is for internal use only.
        """
        if len(sizes) > 0 and any(s is not None and not isinstance(s, int) for s in sizes):
            from ._tensors import Tensor
            sizes = tuple([s if isinstance(s, Tensor) or s is None else int(s) for s in sizes])  # TODO replace this by an assert
        self.sizes: tuple = sizes
        """
        Ordered dimension sizes as `tuple`.
        The size of a dimension can be an `int` or a `Tensor` for [non-uniform shapes](https://tum-pbs.github.io/PhiML/Non_Uniform.html).
        
        See Also:
            `Shape.get_size()`, `Shape.size`, `Shape.shape`.
        """
        self.names: Tuple[str] = names
        """
        Ordered dimension names as `tuple[str]`.
        
        See Also:
            `Shape.name`.
        """
        self.types: Tuple[str] = types  # undocumented, may be private
        self.item_names: Tuple[Optional[Tuple[str, ...]]] = (None,) * len(sizes) if item_names is None else item_names  # undocumented
        if DEBUG_CHECKS:
            assert len(sizes) == len(names) == len(types) == len(item_names), f"sizes={sizes}, names={names}, types={types}, item_names={item_names}"
            assert len(set(names)) == len(names), f"Duplicate dimension names: {names}"
            assert all(isinstance(n, str) for n in names), f"All names must be of type string but got {names}"
            assert isinstance(self.item_names, tuple)
            assert all([items is None or isinstance(items, tuple) for items in self.item_names])
            assert all([items is None or all([isinstance(n, str) for n in items]) for items in self.item_names])
            from ._tensors import Tensor
            for name, size in zip(names, sizes):
                if isinstance(size, Tensor):
                    assert size.rank > 0
            for name, size, item_names in zip(self.names, self.sizes, self.item_names):
                if item_names is not None:
                    try:
                        int(size)
                    except Exception:
                        raise AssertionError(f"When item names are present, the size must be an integer type")
                    assert len(item_names) == size, f"Number of item names ({len(item_names)}) does not match size {size}"
                    for item_name in item_names:
                        assert item_name, f"Empty item name"
                    assert len(set(item_names)) == len(item_names), f"Duplicate item names in shape {self} at dim '{name}': {item_names}"
            for name, type in zip(names, types):
                if type == DUAL_DIM:
                    assert name.startswith('~'), f"Dual dimensions must start with '~' but got '{name}' in {self}"

    def _check_is_valid_tensor_shape(self):
        if DEBUG_CHECKS:
            from ._tensors import Tensor
            for name, size in zip(self.names, self.sizes):
                if size is not None and isinstance(size, Tensor):
                    assert size.rank > 0
                    for dim in size.shape.names:
                        assert dim in self.names, f"Dimension {name} varies along {dim} but {dim} is not part of the Shape {self}"

    def _to_dict(self, include_sizes=True):
        result = dict(names=self.names, types=self.types, item_names=self.item_names)
        if include_sizes:
            if not all([isinstance(s, int)] for s in self.sizes):
                raise NotImplementedError()
            result['sizes'] = self.sizes
        return result

    @staticmethod
    def _from_dict(dict_: dict):
        names = tuple(dict_['names'])
        sizes = tuple(dict_['sizes']) if 'sizes' in dict_ else (None,) * len(names)
        item_names = tuple([None if n is None else tuple(n) for n in dict_['item_names']])
        return Shape(sizes, names, tuple(dict_['types']), item_names)

    @property
    def name_list(self):
        return list(self.names)

    @property
    def _named_sizes(self):
        return zip(self.names, self.sizes)

    @property
    def _dimensions(self):
        return zip(self.sizes, self.names, self.types, self.item_names)

    @property
    def untyped_dict(self):
        """
        Returns:
            `dict` containing dimension names as keys.
                The values are either the item names as `tuple` if available, otherwise the size.
        """
        return {name: self.get_item_names(i) or self.get_size(i) for i, name in enumerate(self.names)}

    def __len__(self):
        return len(self.sizes)

    def __contains__(self, item):
        if isinstance(item, (str, tuple, list)):
            dims = parse_dim_order(item)
            return all(dim in self.names for dim in dims)
        elif isinstance(item, Shape):
            return all([d in self.names for d in item.names])
        else:
            raise ValueError(item)

    def isdisjoint(self, other: Union['Shape', tuple, list, str]):
        """ Shapes are disjoint if all dimension names of one shape do not occur in the other shape. """
        other = parse_dim_order(other)
        return not any(dim in self.names for dim in other)

    def __iter__(self):
        return iter(self[i] for i in range(self.rank))

    def index(self, dim: Union[str, 'Shape', None]) -> int:
        """
        Finds the index of the dimension within this `Shape`.

        See Also:
            `Shape.indices()`.

        Args:
            dim: Dimension name or single-dimension `Shape`.

        Returns:
            Index as `int`.
        """
        if dim is None:
            return None
        elif isinstance(dim, str):
            if dim not in self.names:
                raise ValueError(f"Shape {self} has no dimension '{dim}'")
            return self.names.index(dim)
        elif isinstance(dim, Shape):
            assert dim.rank == 1, f"index() requires a single dimension as input but got {dim}. Use indices() for multiple dimensions."
            return self.names.index(dim.name)
        else:
            raise ValueError(f"index() requires a single dimension as input but got {dim}")

    def indices(self, dims: Union[tuple, list, 'Shape']) -> Tuple[int]:
        """
        Finds the indices of the given dimensions within this `Shape`.

        See Also:
            `Shape.index()`.

        Args:
            dims: Sequence of dimensions as `tuple`, `list` or `Shape`.

        Returns:
            Indices as `tuple[int]`.
        """
        if isinstance(dims, (list, tuple, set)):
            return tuple([self.index(n) for n in dims if n in self.names])
        elif isinstance(dims, Shape):
            return tuple([self.index(n) for n in dims.names if n in self.names])
        else:
            raise ValueError(f"indices() requires a sequence of dimensions but got {dims}")

    def get_size(self, dim: Union[str, 'Shape', int], default=None):
        """
        See Also:
            `Shape.get_sizes()`, `Shape.size`

        Args:
            dim: Dimension, either as name `str` or single-dimension `Shape` or index `int`.
            default: (Optional) If the dim does not exist, return this value instead of raising an error.

        Returns:
            Size associated with `dim` as `int` or `Tensor`.
        """
        if isinstance(dim, int):
            assert default is None, "Cannot use a default value when passing an int for dim"
            return self.sizes[dim]
        if isinstance(dim, Shape):
            assert dim.rank == 1, f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes."
            dim = dim.name
        if isinstance(dim, str):
            if dim not in self.names:
                if default is None:
                    raise KeyError(f"get_size() failed because '{dim}' is not part of Shape {self} and no default value was provided")
                else:
                    return default
            return self.sizes[self.names.index(dim)]
        else:
            raise ValueError(f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes.")

    def get_sizes(self, dims: Union[tuple, list, 'Shape']) -> tuple:
        """
        See Also:
            `Shape.get_size()`

        Args:
            dims: Dimensions as `tuple`, `list` or `Shape`.

        Returns:
            `tuple`
        """
        assert isinstance(dims, (tuple, list, Shape)), f"get_sizes() requires a sequence of dimensions but got {dims}"
        return tuple([self.get_size(dim) for dim in dims])

    def get_type(self, dim: Union[str, 'Shape']) -> str:
        # undocumented, use get_dim_type() instead.
        if isinstance(dim, str):
            return self.types[self.names.index(dim)]
        elif isinstance(dim, Shape):
            assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}"
            return self.types[self.names.index(dim.name)]
        else:
            raise ValueError(dim)

    def get_dim_type(self, dim: Union[str, 'Shape']) -> Callable:
        """
        Args:
            dim: Dimension, either as name `str` or single-dimension `Shape`.

        Returns:
            Dimension type, one of `batch`, `spatial`, `instance`, `channel`.
        """
        return DIM_FUNCTIONS[self.get_type(dim)]

    def get_types(self, dims: Union[tuple, list, 'Shape']) -> tuple:
        # undocumented, do not use
        if isinstance(dims, (tuple, list)):
            return tuple(self.get_type(n) for n in dims)
        elif isinstance(dims, Shape):
            return tuple(self.get_type(n) for n in dims.names)
        else:
            raise ValueError(dims)

    def get_item_names(self, dim: Union[str, 'Shape', int], fallback_spatial=False) -> Union[tuple, None]:
        """
        Args:
            fallback_spatial: If `True` and no item names are defined for `dim` and `dim` is a channel dimension, the spatial dimension names are interpreted as item names along `dim` in the order they are listed in this `Shape`.
            dim: Dimension, either as `int` index, `str` name or single-dimension `Shape`.

        Returns:
            Item names as `tuple` or `None` if not defined.
        """
        if isinstance(dim, int):
            result = self.item_names[dim]
        elif isinstance(dim, str):
            result = self.item_names[self.index(dim)]
        elif isinstance(dim, Shape):
            assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}"
            result = self.item_names[self.names.index(dim.name)]
        else:
            raise ValueError(dim)
        if result is not None:
            return result
        elif fallback_spatial and self.spatial_rank == self.get_size(dim) and self.get_type(dim) == CHANNEL_DIM:
            return self.spatial.names
        else:
            return None

    def flipped(self, dims: Union[List[str], Tuple[str]]):
        item_names = list(self.item_names)
        for dim in dims:
            if dim in self.names:
                dim_i_n = self.get_item_names(dim)
                if dim_i_n is not None:
                    item_names[self.index(dim)] = tuple(reversed(dim_i_n))
        return Shape(self.sizes, self.names, self.types, tuple(item_names))

    def __getitem__(self, selection):
        if isinstance(selection, int):
            return Shape((self.sizes[selection],), (self.names[selection],), (self.types[selection],), (self.item_names[selection],))
        elif isinstance(selection, slice):
            return Shape(self.sizes[selection], self.names[selection], self.types[selection], self.item_names[selection])
        elif isinstance(selection, str):
            if ',' in selection:
                selection = [self.index(s.strip()) for s in selection.split(',')]
            else:
                selection = self.index(selection)
            return self[selection]
        elif isinstance(selection, Shape):
            selection = selection.names
        if isinstance(selection, (tuple, list)):
            selection = [self.index(s) if isinstance(s, str) else s for s in selection]
            return Shape(tuple([self.sizes[i] for i in selection]), tuple([self.names[i] for i in selection]), tuple([self.types[i] for i in selection]), tuple([self.item_names[i] for i in selection]))
        raise AssertionError("Can only access shape elements as shape[int], shape[str], shape[slice], shape[Sequence] or shape[Shape]")

    @property
    def reversed(self):
        return Shape(tuple(reversed(self.sizes)), tuple(reversed(self.names)), tuple(reversed(self.types)), tuple(reversed(self.item_names)))

    @property
    def batch(self) -> 'Shape':
        """
        Filters this shape, returning only the batch dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t == BATCH_DIM]]

    @property
    def non_batch(self) -> 'Shape':
        """
        Filters this shape, returning only the non-batch dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t != BATCH_DIM]]

    @property
    def spatial(self) -> 'Shape':
        """
        Filters this shape, returning only the spatial dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t == SPATIAL_DIM]]

    @property
    def non_spatial(self) -> 'Shape':
        """
        Filters this shape, returning only the non-spatial dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t != SPATIAL_DIM]]

    @property
    def instance(self) -> 'Shape':
        """
        Filters this shape, returning only the instance dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t == INSTANCE_DIM]]

    @property
    def non_instance(self) -> 'Shape':
        """
        Filters this shape, returning only the non-instance dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t != INSTANCE_DIM]]

    @property
    def channel(self) -> 'Shape':
        """
        Filters this shape, returning only the channel dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t == CHANNEL_DIM]]

    @property
    def non_channel(self) -> 'Shape':
        """
        Filters this shape, returning only the non-channel dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t != CHANNEL_DIM]]

    @property
    def dual(self) -> 'Shape':
        """
        Filters this shape, returning only the dual dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t == DUAL_DIM]]

    @property
    def non_dual(self) -> 'Shape':
        """
        Filters this shape, returning only the non-dual dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t != DUAL_DIM]]

    @property
    def primal(self) -> 'Shape':
        """
        Filters this shape, returning only the dual dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t not in [DUAL_DIM, BATCH_DIM]]]

    @property
    def non_primal(self) -> 'Shape':
        """
        Filters this shape, returning only batch and dual dimensions as a new `Shape` object.

        See also:
            `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, t in enumerate(self.types) if t in [DUAL_DIM, BATCH_DIM]]]

    @property
    def non_singleton(self) -> 'Shape':
        """
        Filters this shape, returning only non-singleton dimensions as a new `Shape` object.
        Dimensions are singleton if their size is exactly `1`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, s in enumerate(self.sizes) if not _size_equal(s, 1)]]

    @property
    def singleton(self) -> 'Shape':
        """
        Filters this shape, returning only singleton dimensions as a new `Shape` object.
        Dimensions are singleton if their size is exactly `1`.

        Returns:
            New `Shape` object
        """
        return self[[i for i, s in enumerate(self.sizes) if _size_equal(s, 1)]]

    def assert_all_sizes_defined(self):
        """
        Filters this shape, returning only singleton dimensions as a new `Shape` object.
        Dimensions are singleton if their size is exactly `1`.

        Returns:
            New `Shape` object
        """
        for n, s in zip(self.names, self.sizes):
            assert s is not None, f"All sizes must be defined but dim '{n}' is undefined in shape {self}"

    def as_channel(self):
        """Returns a copy of this `Shape` with all dimensions of type *channel*."""
        return channel(**self.untyped_dict)

    def as_batch(self):
        """Returns a copy of this `Shape` with all dimensions of type *batch*."""
        return batch(**self.untyped_dict)

    def as_spatial(self):
        """Returns a copy of this `Shape` with all dimensions of type *spatial*."""
        return spatial(**self.untyped_dict)

    def as_instance(self):
        """Returns a copy of this `Shape` with all dimensions of type *instance*."""
        return instance(**self.untyped_dict)

    def as_dual(self):
        """Returns a copy of this `Shape` with all dimensions of type *dual*."""
        return dual(**self.untyped_dict)

    def _more_dual(self):
        return Shape(self.sizes, tuple('~' + n for n in self.names), (DUAL_DIM,) * len(self.names), self.item_names)

    def _less_dual(self, default_type='unknown_primal'):
        names = tuple(n[1:] if n.startswith('~') else n for n in self.names)
        types = [t if t != DUAL_DIM else (DUAL_DIM if n.startswith('~~') else default_type) for n, t in zip(self.names, self.types)]
        return Shape(self.sizes, names, tuple(types), self.item_names)

    def unstack(self, dim='dims') -> Tuple['Shape']:
        """
        Slices this `Shape` along a dimension.
        The dimension listing the sizes of the shape is referred to as `'dims'`.

        Non-uniform tensor shapes may be unstacked along other dimensions as well, see
        https://tum-pbs.github.io/PhiML/Non_Uniform.html

        Args:
            dim: dimension to unstack

        Returns:
            slices of this shape
        """
        if dim == 'dims':
            return tuple(Shape((self.sizes[i],), (self.names[i],), (self.types[i],), (self.item_names[i],)) for i in range(self.rank))
        if dim not in self and self.is_uniform:
            return tuple([self])
        from ._tensors import Tensor
        if dim in self:
            inner = self.without(dim)
            dim_size = self.get_size(dim)
        else:
            inner = self
            dim_size = self.shape.get_size(dim)
        sizes = []
        for size in inner.sizes:
            if isinstance(size, Tensor) and dim in size.shape:
                sizes.append(size._unstack(dim))
                dim_size = size.shape.get_size(dim)
            else:
                sizes.append(size)
        assert isinstance(dim_size, int)
        shapes = tuple(Shape(tuple([int(size[i]) if isinstance(size, tuple) else size for size in sizes]), inner.names, inner.types, inner.item_names) for i in range(dim_size))
        return shapes

    @property
    def name(self) -> str:
        """
        Only for Shapes containing exactly one single dimension.
        Returns the name of the dimension.

        See Also:
            `Shape.names`.
        """
        assert self.rank == 1, f"Shape.name is only defined for shapes of rank 1. shape={self}"
        return self.names[0]

    @property
    def size(self) -> int:
        """
        Only for Shapes containing exactly one single dimension.
        Returns the size of the dimension.

        See Also:
            `Shape.sizes`, `Shape.get_size()`.
        """
        assert self.rank == 1, f"Shape.size is only defined for shapes of rank 1 but has dims {self}"
        return self.sizes[0]

    @property
    def type(self) -> str:
        """
        Only for Shapes containing exactly one single dimension.
        Returns the type of the dimension.

        See Also:
            `Shape.get_type()`.
        """
        assert self.rank == 1, "Shape.type is only defined for shapes of rank 1."
        return self.types[0]

    @property
    def dim_type(self):
        types = set(self.types)
        assert len(types) == 1, f"Shape contains multiple types: {self}"
        return DIM_FUNCTIONS[next(iter(types))]

    def __int__(self):
        assert self.rank == 1, "int(Shape) is only defined for shapes of rank 1."
        return self.sizes[0]

    def mask(self, names: Union[tuple, list, set, 'Shape']):
        """
        Returns a binary sequence corresponding to the names of this Shape.
        A value of 1 means that a dimension of this Shape is contained in `names`.

        Args:
          names: instance of dimension
          names: tuple or list or set: 

        Returns:
          binary sequence

        """
        if isinstance(names, str):
            names = [names]
        elif isinstance(names, Shape):
            names = names.names
        mask = [1 if name in names else 0 for name in self.names]
        return tuple(mask)

    def __repr__(self):
        def size_repr(size, items):
            if items is not None:
                items_str = ",".join(items)
                return items_str if len(items_str) <= 20 else f"{size}:{items[0]}..{items[-1]}"
            return size

        strings = [f"{name}{SUPERSCRIPT.get(dim_type, '?')}={size_repr(size, items)}" for size, name, dim_type, items in self._dimensions]
        return '(' + ', '.join(strings) + ')'

    def __eq__(self, other):
        if not isinstance(other, Shape):
            return False
        if self.names != other.names or self.types != other.types:
            return False
        for size1, size2 in zip(self.sizes, other.sizes):
            equal = size1 == size2
            assert isinstance(equal, (bool, math.Tensor))
            if isinstance(equal, math.Tensor):
                equal = equal.all
            if not equal:
                return False
        for names1, names2 in zip(self.item_names, other.item_names):
            if names1 != names2:
                return False
        return True

    def __ne__(self, other):
        return not self == other

    def __bool__(self):
        return self.rank > 0

    def _reorder(self, names: Union[tuple, list, 'Shape']) -> 'Shape':
        assert len(names) == self.rank
        if isinstance(names, Shape):
            names = names.names
        order = [self.index(n) for n in names]
        return self[order]

    def _order_group(self, names: Union[tuple, list, 'Shape']) -> list:
        """ Reorders the dimensions of this `Shape` so that `names` are clustered together and occur in the specified order. """
        if isinstance(names, Shape):
            names = names.names
        result = []
        for dim in self.names:
            if dim not in result:
                if dim in names:
                    result.extend(names)
                else:
                    result.append(dim)
        return result

    def __and__(self, other):
        return merge_shapes(self, other)

    def _expand(self, dim: 'Shape', pos=None) -> 'Shape':
        """**Deprecated.** Use `phiml.math.merge_shapes()` or `phiml.math.concat_shapes()` instead. """
        warnings.warn("Shape.expand() is deprecated. Use merge_shapes() or concat_shapes() instead.", DeprecationWarning)
        if not dim:
            return self
        assert dim.name not in self, f"Cannot expand shape {self} by {dim} because dimension already exists."
        assert isinstance(dim, Shape) and dim.rank == 1, f"Shape.expand() requires a single dimension as a Shape but got {dim}"
        if pos is None:
            same_type_dims = self[[i for i, t in enumerate(self.types) if t == dim.type]]
            if len(same_type_dims) > 0:
                pos = self.index(same_type_dims.names[0])
            else:
                pos = {BATCH_DIM: 0, INSTANCE_DIM: self.batch_rank, SPATIAL_DIM: self.batch.rank + self.instance_rank, CHANNEL_DIM: self.rank + 1}[dim.type]
        elif pos < 0:
            pos += self.rank + 1
        sizes = list(self.sizes)
        names = list(self.names)
        types = list(self.types)
        item_names = list(self.item_names)
        sizes.insert(pos, dim.size)
        names.insert(pos, dim.name)
        types.insert(pos, dim.type)
        item_names.insert(pos, dim.item_names[0])
        return Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names))

    def without(self, dims: 'DimFilter') -> 'Shape':
        """
        Builds a new shape from this one that is missing all given dimensions.
        Dimensions in `dims` that are not part of this Shape are ignored.
        
        The complementary operation is `Shape.only()`.

        Args:
          dims: Single dimension (str) or instance of dimensions (tuple, list, Shape)
          dims: Dimensions to exclude as `str` or `tuple` or `list` or `Shape`. Dimensions that are not included in this shape are ignored.

        Returns:
          Shape without specified dimensions
        """
        if dims is None:  # subtract none
            return self
        elif callable(dims):
            dims = dims(self)
        if isinstance(dims, str):
            return self[[i for i in range(self.rank) if self.names[i] not in parse_dim_order(dims)]]
        elif isinstance(dims, Shape):
            return self[[i for i in range(self.rank) if self.names[i] not in dims.names]]
        if isinstance(dims, (tuple, list, set)) and all([isinstance(d, str) for d in dims]):
            return self[[i for i in range(self.rank) if self.names[i] not in dims]]
        elif isinstance(dims, (tuple, list, set)):
            result = self
            for wo in dims:
                result = result.without(wo)
            return result
        else:
            raise ValueError(dims)

    def only(self, dims: 'DimFilter', reorder=False):
        """
        Builds a new shape from this one that only contains the given dimensions.
        Dimensions in `dims` that are not part of this Shape are ignored.
        
        The complementary operation is :func:`Shape.without`.

        Args:
          dims: comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function.
          reorder: If `False`, keeps the dimension order as defined in this shape.
            If `True`, reorders the dimensions of this shape to match the order of `dims`.

        Returns:
          Shape containing only specified dimensions

        """
        if dims is None:  # keep none
            return EMPTY_SHAPE
        if callable(dims):
            dims = dims(self)
        if isinstance(dims, str):
            dims = parse_dim_order(dims)
        if isinstance(dims, Shape):
            dims = dims.names
        if isinstance(dims, (tuple, list, set)):
            if all(isinstance(d, int) for d in dims):
                if not reorder:
                    dims = tuple(sorted(dims))
                return self[dims]
            dim_names = []
            for d in dims:
                if callable(d):
                    d = d(self)
                if isinstance(d, str):
                    dim_names.append(d)
                elif isinstance(d, Shape):
                    dim_names.extend(d.names)
                else:
                    raise ValueError(f"Format not understood for Shape.only(): {dims}")
            if reorder:
                dim_names = [d.name if isinstance(d, Shape) else d for d in dim_names]
                assert all(isinstance(d, str) for d in dim_names)
                return self[[self.names.index(d) for d in dim_names if d in self.names]]
            else:
                dim_names = [d.name if isinstance(d, Shape) else d for d in dim_names]
                assert all(isinstance(d, str) for d in dim_names)
                return self[[i for i in range(self.rank) if self.names[i] in dim_names]]
        raise ValueError(dims)

    @property
    def rank(self) -> int:
        """
        Returns the number of dimensions.
        Equal to `len(shape)`.

        See `Shape.is_empty`, `Shape.batch_rank`, `Shape.spatial_rank`, `Shape.channel_rank`.
        """
        return len(self.sizes)

    @property
    def batch_rank(self) -> int:
        """ Number of batch dimensions """
        return sum([1 for ty in self.types if ty == BATCH_DIM])

    @property
    def instance_rank(self) -> int:
        return sum([1 for ty in self.types if ty == INSTANCE_DIM])

    @property
    def spatial_rank(self) -> int:
        """ Number of spatial dimensions """
        return sum([1 for ty in self.types if ty == SPATIAL_DIM])

    @property
    def dual_rank(self) -> int:
        """ Number of spatial dimensions """
        return sum([1 for ty in self.types if ty == DUAL_DIM])

    @property
    def channel_rank(self) -> int:
        """ Number of channel dimensions """
        return sum([1 for ty in self.types if ty == CHANNEL_DIM])

    @property
    def well_defined(self):
        """
        Returns `True` if no dimension size is `None`.

        Shapes with undefined sizes may be used in `phiml.math.tensor()`, `phiml.math.wrap()`, `phiml.math.stack()` or `phiml.math.concat()`.

        To create an undefined size, call a constructor function (`batch()`, `spatial()`, `channel()`, `instance()`)
        with positional `str` arguments, e.g. `spatial('x')`.
        """
        for size in self.sizes:
            if size is None:
                return False
        return True

    @property
    def shape(self) -> 'Shape':
        """
        Higher-order `Shape`.
        The returned shape will always contain the channel dimension `dims` with a size equal to the `Shape.rank` of this shape.

        For uniform shapes, `Shape.shape` will only contain the dimension `dims` but the shapes of [non-uniform shapes](https://tum-pbs.github.io/PhiML/Non_Uniform.html)
        may contain additional dimensions.

        See Also:
            `Shape.is_uniform`.

        Returns:
            `Shape`.
        """
        from . import Tensor
        shape = Shape((self.rank,), ('dims',), (CHANNEL_DIM,), (self.names,))
        for size in self.sizes:
            if isinstance(size, Tensor):
                shape = shape & size.shape
        return shape

    @property
    def is_uniform(self) -> bool:
        """
        A shape is uniform if it all sizes have a single integer value.

        See Also:
            `Shape.is_non_uniform`, `Shape.shape`.
        """
        return all(isinstance(s, int) for s in self.sizes)

    @property
    def is_non_uniform(self) -> bool:
        """
        A shape is non-uniform if the size of any dimension varies along another dimension.

        See Also:
            `Shape.is_uniform`, `Shape.shape`.
        """
        return not self.is_uniform

    @property
    def non_uniform(self) -> 'Shape':
        """
        Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension.
        """
        from . import Tensor
        indices = [i for i, size in enumerate(self.sizes) if isinstance(size, Tensor) and size.rank > 0]
        return self[indices]

    def with_size(self, size: Union[int, Tuple[str, ...]]):
        """
        Only for single-dimension shapes.
        Returns a `Shape` representing this dimension but with a different size.

        See Also:
            `Shape.with_sizes()`.

        Args:
            size: Replacement size for this dimension.

        Returns:
            `Shape`
        """
        assert self.rank == 1, "Shape.with_size() is only defined for shapes of rank 1."
        return self.with_sizes([size])

    def with_sizes(self, sizes: Union[Sequence[int], Sequence[Tuple[str, ...]], 'Shape', int], keep_item_names=True):
        """
        Returns a new `Shape` matching the dimension names and types of `self` but with different sizes.

        See Also:
            `Shape.with_size()`.

        Args:
            sizes: One of

                * `tuple` / `list` of same length as `self` containing replacement sizes or replacement item names.
                * `Shape` of any rank. Replaces sizes for dimensions shared by `sizes` and `self`.
                * `int`: new size for all dimensions

            keep_item_names: If `False`, forgets all item names.
                If `True`, keeps item names where the size does not change.

        Returns:
            `Shape` with same names and types as `self`.
        """
        if isinstance(sizes, int):
            sizes = [sizes] * len(self.sizes)
        if isinstance(sizes, Shape):
            item_names = [sizes.get_item_names(dim) if dim in sizes else self.get_item_names(dim) for dim in self.names]
            sizes = [sizes.get_size(dim) if dim in sizes else s for dim, s in self._named_sizes]
            return Shape(tuple(sizes), self.names, self.types, tuple(item_names))
        else:
            assert len(sizes) == len(self.sizes), f"Cannot create shape from {self} with sizes {sizes}"
            sizes_ = []
            item_names = []
            for i, obj in enumerate(sizes):
                new_size, new_item_names = Shape._size_and_item_names_from_obj(obj, self.sizes[i], self.item_names[i], keep_item_names)
                sizes_.append(new_size)
                item_names.append(new_item_names)
            return Shape(tuple(sizes_), self.names, self.types, tuple(item_names))

    @staticmethod
    def _size_and_item_names_from_obj(obj, prev_size, prev_item_names, keep_item_names=True):
        if isinstance(obj, str):
            obj = [s.strip() for s in obj.split(',')]
        if isinstance(obj, (tuple, list)):
            return len(obj), tuple(obj)
        elif isinstance(obj, Number):
            return obj, prev_item_names if keep_item_names and (prev_size is None or _size_equal(obj, prev_size)) else None
        elif isinstance(obj, math.Tensor) or obj is None:
            return obj, None
        elif isinstance(obj, Shape):
            return obj.rank, obj.names
        else:
            raise ValueError(f"sizes can only contain int, str or Tensor but got {type(obj)}")

    def without_sizes(self):
        """
        Returns:
            `Shape` with all sizes undefined (`None`)
        """
        return Shape((None,) * self.rank, self.names, self.types, (None,) * self.rank)

    def _replace_single_size(self, dim: str, size: int, keep_item_names: bool = False):
        new_sizes = list(self.sizes)
        new_sizes[self.index(dim)] = size
        return self.with_sizes(new_sizes, keep_item_names=keep_item_names)

    def with_dim_size(self, dim: Union[str, 'Shape'], size: Union[int, 'math.Tensor', str, tuple, list], keep_item_names=True):
        """
        Returns a new `Shape` that has a different size for `dim`.

        Args:
            dim: Dimension for which to replace the size, `Shape` or `str`.
            size: New size, `int` or `Tensor`

        Returns:
            `Shape` with same names and types as `self`.
        """
        if isinstance(dim, Shape):
            dim = dim.name
        assert isinstance(dim, str)
        new_size, new_item_names = Shape._size_and_item_names_from_obj(size, self.get_size(dim), self.get_item_names(dim), keep_item_names)
        return self.replace(dim, Shape((new_size,), (dim,), (self.get_type(dim),), (new_item_names,)), keep_item_names=keep_item_names)

    def _with_names(self, names: Union[str, tuple, list]):
        if isinstance(names, str):
            names = parse_dim_names(names, self.rank)
            names = [n if n is not None else o for n, o in zip(names, self.names)]
        return Shape(self.sizes, tuple(names), self.types, self.item_names)

    def _replace_names_and_types(self,
                                 dims: Union['Shape', str, tuple, list],
                                 new: Union['Shape', str, tuple, list]) -> 'Shape':
        """
        Returns a copy of `self` with `dims` replaced by `new`.
        Dimensions that are not present in `self` are ignored.

        The dimension order is preserved.

        Args:
            dims: Dimensions to replace.
            new: New dimensions, must have same length as `dims`.
                If a `Shape` is given, replaces the dimension types and item names as well.

        Returns:
            `Shape` with same rank and dimension order as `self`.
        """
        dims = parse_dim_order(dims)
        sizes = [math.rename_dims(s, dims, new) if isinstance(s, math.Tensor) else s for s in self.sizes]
        new = parse_dim_order(new) if isinstance(new, str) else new
        names = list(self.names)
        types = list(self.types)
        item_names = list(self.item_names)
        for old_name, new_dim in zip(dims, new):
            if old_name in self:
                if isinstance(new_dim, Shape):
                    names[self.index(old_name)] = new_dim.name
                    types[self.index(old_name)] = new_dim.type
                    item_names[self.index(old_name)] = new_dim.item_names[0]
                else:
                    names[self.index(old_name)] = _apply_prefix(new_dim, types[self.index(old_name)])
        return Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names))

    def replace(self, dims: Union['Shape', str, tuple, list], new: 'Shape', keep_item_names=True, replace_item_names: DimFilter = None) -> 'Shape':
        """
        Returns a copy of `self` with `dims` replaced by `new`.
        Dimensions that are not present in `self` are ignored.

        The dimension order is preserved.

        Args:
            dims: Dimensions to replace.
            new: New dimensions, must have same length as `dims`.
                If a `Shape` is given, replaces the dimension types and item names as well.
            keep_item_names: Keeps existing item names for dimensions where `new` does not specify item names if the new dimension has the same size.
            replace_item_names: For which dims the item names should be replaced as well.

        Returns:
            `Shape` with same rank and dimension order as `self`.
        """
        dims = parse_dim_order(dims)
        assert isinstance(new, Shape), f"new must be a Shape but got {new}"
        names = list(self.names)
        sizes = list(self.sizes)
        types = list(self.types)
        item_names = list(self.item_names)
        for i in self.indices(self.only(replace_item_names)):
            if item_names[i]:
                if len(new) > len(dims):
                    raise NotImplementedError
                else:
                    name_map = {d: n for d, n in zip(dims, new.names)}
                    item_names[i] = tuple([name_map.get(n, n) for n in item_names[i]])
        if len(new) > len(dims):  # Put all in one spot
            assert len(dims) == 1, "Cannot replace 2+ dims by more replacements"
            index = self.index(dims[0])
            return concat_shapes(self[:index], new, self[index+1:])
        for old_name, new_dim in zip(dims, new):
            if old_name in self:
                names[self.index(old_name)] = new_dim.name
                types[self.index(old_name)] = new_dim.type
                if new_dim.item_names[0]:
                    item_names[self.index(old_name)] = new_dim.item_names[0]
                elif not _size_equal(new_dim.size, self.get_size(old_name)) or not keep_item_names:
                    item_names[self.index(old_name)] = None  # forget previous item names
                sizes[self.index(old_name)] = new_dim.size
        replaced = Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names))
        if len(new) == len(dims):
            return replaced
        to_remove = dims[-(len(dims) - len(new)):]
        return replaced.without(to_remove)

    def _with_types(self, types: Union['Shape', str]):
        """
        Only for internal use.
        Note: This method does not rename dimensions to comply with type requirements (e.g. ~ for dual dims).
        """
        if isinstance(types, Shape):
            return Shape(self.sizes, self.names, tuple([types.get_type(name) if name in types else self_type for name, self_type in zip(self.names, self.types)]), self.item_names)
        elif isinstance(types, str):
            return Shape(self.sizes, self.names, (types,) * self.rank, self.item_names)
        else:
            raise ValueError(types)

    def _with_item_names(self, item_names: tuple):
        return Shape(self.sizes, self.names, self.types, item_names)

    def _with_item_name(self, dim: str, item_name: tuple):
        if dim not in self:
            return self
        item_names = list(self.item_names)
        item_names[self.index(dim)] = item_name
        return Shape(self.sizes, self.names, self.types, tuple(item_names))

    def _perm(self, names: Tuple[str]) -> List[int]:
        assert len(set(names)) == len(names), f"No duplicates allowed but got {names}"
        assert len(names) >= len(self.names), f"Cannot find permutation for {self} given {names} because names {set(self.names) - set(names)} are missing"
        assert len(names) <= len(self.names), f"Cannot find permutation for {self} given {names} because too many names were passed: {names}"
        perm = [self.names.index(name) for name in names]
        return perm

    @property
    def volume(self) -> Union[int, None]:
        """
        Returns the total number of values contained in a tensor of this shape.
        This is the product of all dimension sizes.

        Returns:
            volume as `int` or `Tensor` or `None` if the shape is not `Shape.well_defined`
        """
        from . import Tensor
        for dim, size in self._named_sizes:
            if isinstance(size, Tensor) and size.rank > 0:
                non_uniform_dim = size.shape.names[0]
                shapes = self.unstack(non_uniform_dim)
                return sum(s.volume for s in shapes)
        result = 1
        for size in self.sizes:
            if size is None:
                return None
            result *= size
        return int(result)

    @property
    def is_empty(self) -> bool:
        """ True if this shape has no dimensions. Equivalent to `Shape.rank` `== 0`. """
        return len(self.sizes) == 0

    def after_pad(self, widths: dict) -> 'Shape':
        sizes = list(self.sizes)
        item_names = list(self.item_names)
        for dim, (lo, up) in widths.items():
            if dim in self.names:
                sizes[self.index(dim)] += lo + up
                item_names[self.index(dim)] = None
        return Shape(tuple(sizes), self.names, self.types, tuple(item_names))

    def prepare_gather(self, dim: str, selection: Union[slice, int, 'Shape', str, tuple, list]):
        """
        Parse a slice object for a specific dimension.

        Args:
            dim: Name of dimension to slice.
            selection: Slice object.

        Returns:

        """
        if isinstance(selection, Shape):
            selection = selection.name if selection.rank == 1 else selection.names
        if isinstance(selection, str) and ',' in selection:
            selection = parse_dim_order(selection)
        if isinstance(selection, str):  # single item name
            item_names = self.get_item_names(dim, fallback_spatial=True)
            assert item_names is not None, f"No item names defined for dim '{dim}' in tensor {self.shape} and dimension size does not match spatial rank."
            assert selection in item_names, f"Accessing tensor.{dim}['{selection}'] failed. Item names are {item_names}."
            selection = item_names.index(selection)
        if isinstance(selection, (tuple, list)):
            selection = list(selection)
            if any([isinstance(s, str) for s in selection]):
                item_names = self.get_item_names(dim, fallback_spatial=True)
                for i, s in enumerate(selection):
                    if isinstance(s, str):
                        assert item_names is not None, f"Accessing tensor.{dim}['{s}'] failed because no item names are present on tensor {self.shape}"
                        assert s in item_names, f"Accessing tensor.{dim}['{s}'] failed. Item names are {item_names}."
                        selection[i] = item_names.index(s)
            if not selection:  # empty
                selection = slice(0, 0)
        return selection

    def resolve_index(self, index: Dict[str, Union[slice, int, 'Shape', str, tuple, list]]) -> Dict[str, Union[slice, int, tuple, list]]:
        """
        Replaces item names by the corresponding indices.

        Args:
            index: n-dimensional index or slice.

        Returns:
            Same index but without any reference to item names.
        """
        return {dim: self.prepare_gather(dim, s) for dim, s in index.items()}

    def after_gather(self, selection: dict) -> 'Shape':
        result = self
        for sel_dim, selection in selection.items():
            if sel_dim not in self.names:
                continue
            selection = self.prepare_gather(sel_dim, selection)
            if isinstance(selection, int):
                if result.is_uniform:
                    result = result.without(sel_dim)
                else:
                    from . import Tensor
                    gathered_sizes = [(s[{sel_dim: selection}] if isinstance(s, Tensor) else s) for s in result.sizes]
                    gathered_sizes = [(int(s) if isinstance(s, Tensor) and s.rank == 0 else s) for s in gathered_sizes]
                    result = result.with_sizes(gathered_sizes, keep_item_names=True).without(sel_dim)
            elif isinstance(selection, slice):
                step = int(selection.step) if selection.step is not None else 1
                start = int(selection.start) if selection.start is not None else (0 if step > 0 else self.get_size(sel_dim)-1)
                stop = int(selection.stop) if selection.stop is not None else (self.get_size(sel_dim) if step > 0 else -1)
                if stop < 0 and step > 0:
                    stop += self.get_size(sel_dim)
                    assert stop >= 0
                if start < 0 and step > 0:
                    start += self.get_size(sel_dim)
                    assert start >= 0
                stop = min(stop, self.get_size(sel_dim))
                new_size = math.to_int64(math.ceil(math.wrap((stop - start) / step)))
                if new_size.rank == 0:
                    new_size = int(new_size)  # NumPy array not allowed because not hashable
                result = result._replace_single_size(sel_dim, new_size, keep_item_names=True)
                if step < 0:
                    result = result.flipped([sel_dim])
                if self.get_item_names(sel_dim) is not None:
                    result = result._with_item_name(sel_dim, tuple(self.get_item_names(sel_dim)[selection]))
            elif isinstance(selection, (tuple, list)):
                result = result._replace_single_size(sel_dim, len(selection))
                if self.get_item_names(sel_dim) is not None:
                    result = result._with_item_name(sel_dim, tuple([self.get_item_names(sel_dim)[i] for i in selection]))
            else:
                raise NotImplementedError(f"{type(selection)} not supported. Only (int, slice) allowed.")
        return result

    def meshgrid(self, names=False):
        """
        Builds a sequence containing all multi-indices within a tensor of this shape.
        All indices are returned as `dict` mapping dimension names to `int` indices.

        The corresponding values can be retrieved from Tensors and other Sliceables using `tensor[index]`.

        This function currently only supports uniform tensors.

        Args:
            names: If `True`, replace indices by their item names if available.

        Returns:
            `dict` iterator.
        """
        assert self.is_uniform, f"Shape.meshgrid() is currently not supported for non-uniform tensors, {self}"
        indices = [0] * self.rank
        while True:
            if names:
                yield {dim: (names[index] if names is not None else index) for dim, index, names in zip(self.names, indices, self.item_names)}
            else:
                yield {dim: index for dim, index in zip(self.names, indices)}
            for i in range(self.rank-1, -1, -1):
                indices[i] = (indices[i] + 1) % self.sizes[i]
                if indices[i] != 0:
                    break
            else:
                return

    def first_index(self, names=False):
        return next(iter(self.meshgrid(names=names)))

    def are_adjacent(self, dims: Union[str, tuple, list, set, 'Shape']):
        indices = self.indices(dims)
        return (max(indices) - min(indices)) == len(dims) - 1

    def __add__(self, other):
        return self._op2(other, lambda s, o: s + o, 0)

    def __radd__(self, other):
        return self._op2(other, lambda s, o: o + s, 0)

    def __sub__(self, other):
        return self._op2(other, lambda s, o: s - o, 0)

    def __rsub__(self, other):
        return self._op2(other, lambda s, o: o - s, 0)

    def __mul__(self, other):
        return self._op2(other, lambda s, o: s * o, 1)

    def __rmul__(self, other):
        return self._op2(other, lambda s, o: o * s, 1)

    def _op2(self, other, fun, default: int):
        if isinstance(other, int):
            return Shape(tuple([fun(s, other) for s in self.sizes]), self.names, self.types, (None,) * self.rank)
        elif isinstance(other, Shape):
            merged = self.without_sizes() & other.without_sizes()
            sizes = ()
            for dim in merged.names:
                self_val = self.get_size(dim) if dim in self else default
                other_val = other.get_size(dim) if dim in other else default
                sizes += (fun(self_val, other_val),)
            return merged.with_sizes(sizes)
        else:
            return NotImplemented

    def __hash__(self):
        return hash(self.names)

Instance variables

var batch : phiml.math._shape.Shape

Filters this shape, returning only the batch dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def batch(self) -> 'Shape':
    """
    Filters this shape, returning only the batch dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t == BATCH_DIM]]
var batch_rank : int

Number of batch dimensions

Expand source code
@property
def batch_rank(self) -> int:
    """ Number of batch dimensions """
    return sum([1 for ty in self.types if ty == BATCH_DIM])
var channel : phiml.math._shape.Shape

Filters this shape, returning only the channel dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def channel(self) -> 'Shape':
    """
    Filters this shape, returning only the channel dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t == CHANNEL_DIM]]
var channel_rank : int

Number of channel dimensions

Expand source code
@property
def channel_rank(self) -> int:
    """ Number of channel dimensions """
    return sum([1 for ty in self.types if ty == CHANNEL_DIM])
var dim_type
Expand source code
@property
def dim_type(self):
    types = set(self.types)
    assert len(types) == 1, f"Shape contains multiple types: {self}"
    return DIM_FUNCTIONS[next(iter(types))]
var dual : phiml.math._shape.Shape

Filters this shape, returning only the dual dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def dual(self) -> 'Shape':
    """
    Filters this shape, returning only the dual dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t == DUAL_DIM]]
var dual_rank : int

Number of spatial dimensions

Expand source code
@property
def dual_rank(self) -> int:
    """ Number of spatial dimensions """
    return sum([1 for ty in self.types if ty == DUAL_DIM])
var instance : phiml.math._shape.Shape

Filters this shape, returning only the instance dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def instance(self) -> 'Shape':
    """
    Filters this shape, returning only the instance dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t == INSTANCE_DIM]]
var instance_rank : int
Expand source code
@property
def instance_rank(self) -> int:
    return sum([1 for ty in self.types if ty == INSTANCE_DIM])
var is_empty : bool

True if this shape has no dimensions. Equivalent to Shape.rank == 0.

Expand source code
@property
def is_empty(self) -> bool:
    """ True if this shape has no dimensions. Equivalent to `Shape.rank` `== 0`. """
    return len(self.sizes) == 0
var is_non_uniform : bool

A shape is non-uniform if the size of any dimension varies along another dimension.

See Also: Shape.is_uniform, Shape.shape.

Expand source code
@property
def is_non_uniform(self) -> bool:
    """
    A shape is non-uniform if the size of any dimension varies along another dimension.

    See Also:
        `Shape.is_uniform`, `Shape.shape`.
    """
    return not self.is_uniform
var is_uniform : bool

A shape is uniform if it all sizes have a single integer value.

See Also: Shape.is_non_uniform, Shape.shape.

Expand source code
@property
def is_uniform(self) -> bool:
    """
    A shape is uniform if it all sizes have a single integer value.

    See Also:
        `Shape.is_non_uniform`, `Shape.shape`.
    """
    return all(isinstance(s, int) for s in self.sizes)
var name : str

Only for Shapes containing exactly one single dimension. Returns the name of the dimension.

See Also: Shape.names.

Expand source code
@property
def name(self) -> str:
    """
    Only for Shapes containing exactly one single dimension.
    Returns the name of the dimension.

    See Also:
        `Shape.names`.
    """
    assert self.rank == 1, f"Shape.name is only defined for shapes of rank 1. shape={self}"
    return self.names[0]
var name_list
Expand source code
@property
def name_list(self):
    return list(self.names)
var names

Ordered dimension names as tuple[str].

See Also: Shape.name.

var non_batch : phiml.math._shape.Shape

Filters this shape, returning only the non-batch dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def non_batch(self) -> 'Shape':
    """
    Filters this shape, returning only the non-batch dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t != BATCH_DIM]]
var non_channel : phiml.math._shape.Shape

Filters this shape, returning only the non-channel dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def non_channel(self) -> 'Shape':
    """
    Filters this shape, returning only the non-channel dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t != CHANNEL_DIM]]
var non_dual : phiml.math._shape.Shape

Filters this shape, returning only the non-dual dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def non_dual(self) -> 'Shape':
    """
    Filters this shape, returning only the non-dual dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t != DUAL_DIM]]
var non_instance : phiml.math._shape.Shape

Filters this shape, returning only the non-instance dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def non_instance(self) -> 'Shape':
    """
    Filters this shape, returning only the non-instance dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t != INSTANCE_DIM]]
var non_primal : phiml.math._shape.Shape

Filters this shape, returning only batch and dual dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def non_primal(self) -> 'Shape':
    """
    Filters this shape, returning only batch and dual dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t in [DUAL_DIM, BATCH_DIM]]]
var non_singleton : phiml.math._shape.Shape

Filters this shape, returning only non-singleton dimensions as a new Shape object. Dimensions are singleton if their size is exactly 1.

Returns

New Shape object

Expand source code
@property
def non_singleton(self) -> 'Shape':
    """
    Filters this shape, returning only non-singleton dimensions as a new `Shape` object.
    Dimensions are singleton if their size is exactly `1`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, s in enumerate(self.sizes) if not _size_equal(s, 1)]]
var non_spatial : phiml.math._shape.Shape

Filters this shape, returning only the non-spatial dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def non_spatial(self) -> 'Shape':
    """
    Filters this shape, returning only the non-spatial dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t != SPATIAL_DIM]]
var non_uniform : phiml.math._shape.Shape

Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension.

Expand source code
@property
def non_uniform(self) -> 'Shape':
    """
    Returns only the non-uniform dimensions of this shape, i.e. the dimensions whose size varies along another dimension.
    """
    from . import Tensor
    indices = [i for i, size in enumerate(self.sizes) if isinstance(size, Tensor) and size.rank > 0]
    return self[indices]
var primal : phiml.math._shape.Shape

Filters this shape, returning only the dual dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def primal(self) -> 'Shape':
    """
    Filters this shape, returning only the dual dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t not in [DUAL_DIM, BATCH_DIM]]]
var rank : int

Returns the number of dimensions. Equal to len(shape()).

See Shape.is_empty, Shape.batch_rank, Shape.spatial_rank, Shape.channel_rank.

Expand source code
@property
def rank(self) -> int:
    """
    Returns the number of dimensions.
    Equal to `len(shape)`.

    See `Shape.is_empty`, `Shape.batch_rank`, `Shape.spatial_rank`, `Shape.channel_rank`.
    """
    return len(self.sizes)
var reversed
Expand source code
@property
def reversed(self):
    return Shape(tuple(reversed(self.sizes)), tuple(reversed(self.names)), tuple(reversed(self.types)), tuple(reversed(self.item_names)))
var shape : phiml.math._shape.Shape

Higher-order Shape. The returned shape will always contain the channel dimension dims with a size equal to the Shape.rank of this shape.

For uniform shapes, Shape.shape will only contain the dimension dims but the shapes of non-uniform shapes may contain additional dimensions.

See Also: Shape.is_uniform.

Returns

Shape.

Expand source code
@property
def shape(self) -> 'Shape':
    """
    Higher-order `Shape`.
    The returned shape will always contain the channel dimension `dims` with a size equal to the `Shape.rank` of this shape.

    For uniform shapes, `Shape.shape` will only contain the dimension `dims` but the shapes of [non-uniform shapes](https://tum-pbs.github.io/PhiML/Non_Uniform.html)
    may contain additional dimensions.

    See Also:
        `Shape.is_uniform`.

    Returns:
        `Shape`.
    """
    from . import Tensor
    shape = Shape((self.rank,), ('dims',), (CHANNEL_DIM,), (self.names,))
    for size in self.sizes:
        if isinstance(size, Tensor):
            shape = shape & size.shape
    return shape
var singleton : phiml.math._shape.Shape

Filters this shape, returning only singleton dimensions as a new Shape object. Dimensions are singleton if their size is exactly 1.

Returns

New Shape object

Expand source code
@property
def singleton(self) -> 'Shape':
    """
    Filters this shape, returning only singleton dimensions as a new `Shape` object.
    Dimensions are singleton if their size is exactly `1`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, s in enumerate(self.sizes) if _size_equal(s, 1)]]
var size : int

Only for Shapes containing exactly one single dimension. Returns the size of the dimension.

See Also: Shape.sizes, Shape.get_size().

Expand source code
@property
def size(self) -> int:
    """
    Only for Shapes containing exactly one single dimension.
    Returns the size of the dimension.

    See Also:
        `Shape.sizes`, `Shape.get_size()`.
    """
    assert self.rank == 1, f"Shape.size is only defined for shapes of rank 1 but has dims {self}"
    return self.sizes[0]
var sizes

Ordered dimension sizes as tuple. The size of a dimension can be an int or a Tensor for non-uniform shapes.

See Also: Shape.get_size(), Shape.size, Shape.shape.

var spatial : phiml.math._shape.Shape

Filters this shape, returning only the spatial dimensions as a new Shape object.

See also: Shape.batch, Shape.spatial, Shape.instance, Shape.channel, Shape.dual, Shape.non_batch, Shape.non_spatial, Shape.non_instance, Shape.non_channel, Shape.non_dual.

Returns

New Shape object

Expand source code
@property
def spatial(self) -> 'Shape':
    """
    Filters this shape, returning only the spatial dimensions as a new `Shape` object.

    See also:
        `Shape.batch`, `Shape.spatial`, `Shape.instance`, `Shape.channel`, `Shape.dual`, `Shape.non_batch`, `Shape.non_spatial`, `Shape.non_instance`, `Shape.non_channel`, `Shape.non_dual`.

    Returns:
        New `Shape` object
    """
    return self[[i for i, t in enumerate(self.types) if t == SPATIAL_DIM]]
var spatial_rank : int

Number of spatial dimensions

Expand source code
@property
def spatial_rank(self) -> int:
    """ Number of spatial dimensions """
    return sum([1 for ty in self.types if ty == SPATIAL_DIM])
var type : str

Only for Shapes containing exactly one single dimension. Returns the type of the dimension.

See Also: Shape.get_type().

Expand source code
@property
def type(self) -> str:
    """
    Only for Shapes containing exactly one single dimension.
    Returns the type of the dimension.

    See Also:
        `Shape.get_type()`.
    """
    assert self.rank == 1, "Shape.type is only defined for shapes of rank 1."
    return self.types[0]
var untyped_dict

Returns

dict containing dimension names as keys. The values are either the item names as tuple if available, otherwise the size.

Expand source code
@property
def untyped_dict(self):
    """
    Returns:
        `dict` containing dimension names as keys.
            The values are either the item names as `tuple` if available, otherwise the size.
    """
    return {name: self.get_item_names(i) or self.get_size(i) for i, name in enumerate(self.names)}
var volume : Optional[int]

Returns the total number of values contained in a tensor of this shape. This is the product of all dimension sizes.

Returns

volume as int or Tensor or None if the shape is not Shape.well_defined

Expand source code
@property
def volume(self) -> Union[int, None]:
    """
    Returns the total number of values contained in a tensor of this shape.
    This is the product of all dimension sizes.

    Returns:
        volume as `int` or `Tensor` or `None` if the shape is not `Shape.well_defined`
    """
    from . import Tensor
    for dim, size in self._named_sizes:
        if isinstance(size, Tensor) and size.rank > 0:
            non_uniform_dim = size.shape.names[0]
            shapes = self.unstack(non_uniform_dim)
            return sum(s.volume for s in shapes)
    result = 1
    for size in self.sizes:
        if size is None:
            return None
        result *= size
    return int(result)
var well_defined

Returns True if no dimension size is None.

Shapes with undefined sizes may be used in tensor(), wrap(), stack() or concat().

To create an undefined size, call a constructor function (batch(), spatial(), channel(), instance()) with positional str arguments, e.g. spatial('x').

Expand source code
@property
def well_defined(self):
    """
    Returns `True` if no dimension size is `None`.

    Shapes with undefined sizes may be used in `phiml.math.tensor()`, `phiml.math.wrap()`, `phiml.math.stack()` or `phiml.math.concat()`.

    To create an undefined size, call a constructor function (`batch()`, `spatial()`, `channel()`, `instance()`)
    with positional `str` arguments, e.g. `spatial('x')`.
    """
    for size in self.sizes:
        if size is None:
            return False
    return True

Methods

def after_gather(self, selection: dict) ‑> phiml.math._shape.Shape
Expand source code
def after_gather(self, selection: dict) -> 'Shape':
    result = self
    for sel_dim, selection in selection.items():
        if sel_dim not in self.names:
            continue
        selection = self.prepare_gather(sel_dim, selection)
        if isinstance(selection, int):
            if result.is_uniform:
                result = result.without(sel_dim)
            else:
                from . import Tensor
                gathered_sizes = [(s[{sel_dim: selection}] if isinstance(s, Tensor) else s) for s in result.sizes]
                gathered_sizes = [(int(s) if isinstance(s, Tensor) and s.rank == 0 else s) for s in gathered_sizes]
                result = result.with_sizes(gathered_sizes, keep_item_names=True).without(sel_dim)
        elif isinstance(selection, slice):
            step = int(selection.step) if selection.step is not None else 1
            start = int(selection.start) if selection.start is not None else (0 if step > 0 else self.get_size(sel_dim)-1)
            stop = int(selection.stop) if selection.stop is not None else (self.get_size(sel_dim) if step > 0 else -1)
            if stop < 0 and step > 0:
                stop += self.get_size(sel_dim)
                assert stop >= 0
            if start < 0 and step > 0:
                start += self.get_size(sel_dim)
                assert start >= 0
            stop = min(stop, self.get_size(sel_dim))
            new_size = math.to_int64(math.ceil(math.wrap((stop - start) / step)))
            if new_size.rank == 0:
                new_size = int(new_size)  # NumPy array not allowed because not hashable
            result = result._replace_single_size(sel_dim, new_size, keep_item_names=True)
            if step < 0:
                result = result.flipped([sel_dim])
            if self.get_item_names(sel_dim) is not None:
                result = result._with_item_name(sel_dim, tuple(self.get_item_names(sel_dim)[selection]))
        elif isinstance(selection, (tuple, list)):
            result = result._replace_single_size(sel_dim, len(selection))
            if self.get_item_names(sel_dim) is not None:
                result = result._with_item_name(sel_dim, tuple([self.get_item_names(sel_dim)[i] for i in selection]))
        else:
            raise NotImplementedError(f"{type(selection)} not supported. Only (int, slice) allowed.")
    return result
def after_pad(self, widths: dict) ‑> phiml.math._shape.Shape
Expand source code
def after_pad(self, widths: dict) -> 'Shape':
    sizes = list(self.sizes)
    item_names = list(self.item_names)
    for dim, (lo, up) in widths.items():
        if dim in self.names:
            sizes[self.index(dim)] += lo + up
            item_names[self.index(dim)] = None
    return Shape(tuple(sizes), self.names, self.types, tuple(item_names))
def are_adjacent(self, dims: Union[str, tuple, list, set, ForwardRef('Shape')])
Expand source code
def are_adjacent(self, dims: Union[str, tuple, list, set, 'Shape']):
    indices = self.indices(dims)
    return (max(indices) - min(indices)) == len(dims) - 1
def as_batch(self)

Returns a copy of this Shape with all dimensions of type batch.

Expand source code
def as_batch(self):
    """Returns a copy of this `Shape` with all dimensions of type *batch*."""
    return batch(**self.untyped_dict)
def as_channel(self)

Returns a copy of this Shape with all dimensions of type channel.

Expand source code
def as_channel(self):
    """Returns a copy of this `Shape` with all dimensions of type *channel*."""
    return channel(**self.untyped_dict)
def as_dual(self)

Returns a copy of this Shape with all dimensions of type dual.

Expand source code
def as_dual(self):
    """Returns a copy of this `Shape` with all dimensions of type *dual*."""
    return dual(**self.untyped_dict)
def as_instance(self)

Returns a copy of this Shape with all dimensions of type instance.

Expand source code
def as_instance(self):
    """Returns a copy of this `Shape` with all dimensions of type *instance*."""
    return instance(**self.untyped_dict)
def as_spatial(self)

Returns a copy of this Shape with all dimensions of type spatial.

Expand source code
def as_spatial(self):
    """Returns a copy of this `Shape` with all dimensions of type *spatial*."""
    return spatial(**self.untyped_dict)
def assert_all_sizes_defined(self)

Filters this shape, returning only singleton dimensions as a new Shape object. Dimensions are singleton if their size is exactly 1.

Returns

New Shape object

Expand source code
def assert_all_sizes_defined(self):
    """
    Filters this shape, returning only singleton dimensions as a new `Shape` object.
    Dimensions are singleton if their size is exactly `1`.

    Returns:
        New `Shape` object
    """
    for n, s in zip(self.names, self.sizes):
        assert s is not None, f"All sizes must be defined but dim '{n}' is undefined in shape {self}"
def first_index(self, names=False)
Expand source code
def first_index(self, names=False):
    return next(iter(self.meshgrid(names=names)))
def flipped(self, dims: Union[List[str], Tuple[str]])
Expand source code
def flipped(self, dims: Union[List[str], Tuple[str]]):
    item_names = list(self.item_names)
    for dim in dims:
        if dim in self.names:
            dim_i_n = self.get_item_names(dim)
            if dim_i_n is not None:
                item_names[self.index(dim)] = tuple(reversed(dim_i_n))
    return Shape(self.sizes, self.names, self.types, tuple(item_names))
def get_dim_type(self, dim: Union[str, ForwardRef('Shape')]) ‑> Callable

Args

dim
Dimension, either as name str or single-dimension Shape.

Returns

Dimension type, one of batch(), spatial(), instance(), channel().

Expand source code
def get_dim_type(self, dim: Union[str, 'Shape']) -> Callable:
    """
    Args:
        dim: Dimension, either as name `str` or single-dimension `Shape`.

    Returns:
        Dimension type, one of `batch`, `spatial`, `instance`, `channel`.
    """
    return DIM_FUNCTIONS[self.get_type(dim)]
def get_item_names(self, dim: Union[str, ForwardRef('Shape'), int], fallback_spatial=False) ‑> Optional[tuple]

Args

fallback_spatial
If True and no item names are defined for dim and dim is a channel dimension, the spatial dimension names are interpreted as item names along dim in the order they are listed in this Shape.
dim
Dimension, either as int index, str name or single-dimension Shape.

Returns

Item names as tuple or None if not defined.

Expand source code
def get_item_names(self, dim: Union[str, 'Shape', int], fallback_spatial=False) -> Union[tuple, None]:
    """
    Args:
        fallback_spatial: If `True` and no item names are defined for `dim` and `dim` is a channel dimension, the spatial dimension names are interpreted as item names along `dim` in the order they are listed in this `Shape`.
        dim: Dimension, either as `int` index, `str` name or single-dimension `Shape`.

    Returns:
        Item names as `tuple` or `None` if not defined.
    """
    if isinstance(dim, int):
        result = self.item_names[dim]
    elif isinstance(dim, str):
        result = self.item_names[self.index(dim)]
    elif isinstance(dim, Shape):
        assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}"
        result = self.item_names[self.names.index(dim.name)]
    else:
        raise ValueError(dim)
    if result is not None:
        return result
    elif fallback_spatial and self.spatial_rank == self.get_size(dim) and self.get_type(dim) == CHANNEL_DIM:
        return self.spatial.names
    else:
        return None
def get_size(self, dim: Union[str, ForwardRef('Shape'), int], default=None)

See Also: Shape.get_sizes(), Shape.size

Args

dim
Dimension, either as name str or single-dimension Shape or index int.
default
(Optional) If the dim does not exist, return this value instead of raising an error.

Returns

Size associated with dim as int or Tensor.

Expand source code
def get_size(self, dim: Union[str, 'Shape', int], default=None):
    """
    See Also:
        `Shape.get_sizes()`, `Shape.size`

    Args:
        dim: Dimension, either as name `str` or single-dimension `Shape` or index `int`.
        default: (Optional) If the dim does not exist, return this value instead of raising an error.

    Returns:
        Size associated with `dim` as `int` or `Tensor`.
    """
    if isinstance(dim, int):
        assert default is None, "Cannot use a default value when passing an int for dim"
        return self.sizes[dim]
    if isinstance(dim, Shape):
        assert dim.rank == 1, f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes."
        dim = dim.name
    if isinstance(dim, str):
        if dim not in self.names:
            if default is None:
                raise KeyError(f"get_size() failed because '{dim}' is not part of Shape {self} and no default value was provided")
            else:
                return default
        return self.sizes[self.names.index(dim)]
    else:
        raise ValueError(f"get_size() requires a single dimension but got {dim}. Use indices() to get multiple sizes.")
def get_sizes(self, dims: Union[tuple, list, ForwardRef('Shape')]) ‑> tuple

See Also: Shape.get_size()

Args

dims
Dimensions as tuple, list or Shape.

Returns

tuple

Expand source code
def get_sizes(self, dims: Union[tuple, list, 'Shape']) -> tuple:
    """
    See Also:
        `Shape.get_size()`

    Args:
        dims: Dimensions as `tuple`, `list` or `Shape`.

    Returns:
        `tuple`
    """
    assert isinstance(dims, (tuple, list, Shape)), f"get_sizes() requires a sequence of dimensions but got {dims}"
    return tuple([self.get_size(dim) for dim in dims])
def get_type(self, dim: Union[str, ForwardRef('Shape')]) ‑> str
Expand source code
def get_type(self, dim: Union[str, 'Shape']) -> str:
    # undocumented, use get_dim_type() instead.
    if isinstance(dim, str):
        return self.types[self.names.index(dim)]
    elif isinstance(dim, Shape):
        assert dim.rank == 1, f"Shape.get_type() only accepts single-dimension Shapes but got {dim}"
        return self.types[self.names.index(dim.name)]
    else:
        raise ValueError(dim)
def get_types(self, dims: Union[tuple, list, ForwardRef('Shape')]) ‑> tuple
Expand source code
def get_types(self, dims: Union[tuple, list, 'Shape']) -> tuple:
    # undocumented, do not use
    if isinstance(dims, (tuple, list)):
        return tuple(self.get_type(n) for n in dims)
    elif isinstance(dims, Shape):
        return tuple(self.get_type(n) for n in dims.names)
    else:
        raise ValueError(dims)
def index(self, dim: Union[str, ForwardRef('Shape'), None]) ‑> int

Finds the index of the dimension within this Shape.

See Also: Shape.indices().

Args

dim
Dimension name or single-dimension Shape.

Returns

Index as int.

Expand source code
def index(self, dim: Union[str, 'Shape', None]) -> int:
    """
    Finds the index of the dimension within this `Shape`.

    See Also:
        `Shape.indices()`.

    Args:
        dim: Dimension name or single-dimension `Shape`.

    Returns:
        Index as `int`.
    """
    if dim is None:
        return None
    elif isinstance(dim, str):
        if dim not in self.names:
            raise ValueError(f"Shape {self} has no dimension '{dim}'")
        return self.names.index(dim)
    elif isinstance(dim, Shape):
        assert dim.rank == 1, f"index() requires a single dimension as input but got {dim}. Use indices() for multiple dimensions."
        return self.names.index(dim.name)
    else:
        raise ValueError(f"index() requires a single dimension as input but got {dim}")
def indices(self, dims: Union[tuple, list, ForwardRef('Shape')]) ‑> Tuple[int]

Finds the indices of the given dimensions within this Shape.

See Also: Shape.index().

Args

dims
Sequence of dimensions as tuple, list or Shape.

Returns

Indices as tuple[int].

Expand source code
def indices(self, dims: Union[tuple, list, 'Shape']) -> Tuple[int]:
    """
    Finds the indices of the given dimensions within this `Shape`.

    See Also:
        `Shape.index()`.

    Args:
        dims: Sequence of dimensions as `tuple`, `list` or `Shape`.

    Returns:
        Indices as `tuple[int]`.
    """
    if isinstance(dims, (list, tuple, set)):
        return tuple([self.index(n) for n in dims if n in self.names])
    elif isinstance(dims, Shape):
        return tuple([self.index(n) for n in dims.names if n in self.names])
    else:
        raise ValueError(f"indices() requires a sequence of dimensions but got {dims}")
def isdisjoint(self, other: Union[ForwardRef('Shape'), tuple, list, str])

Shapes are disjoint if all dimension names of one shape do not occur in the other shape.

Expand source code
def isdisjoint(self, other: Union['Shape', tuple, list, str]):
    """ Shapes are disjoint if all dimension names of one shape do not occur in the other shape. """
    other = parse_dim_order(other)
    return not any(dim in self.names for dim in other)
def mask(self, names: Union[tuple, list, set, ForwardRef('Shape')])

Returns a binary sequence corresponding to the names of this Shape. A value of 1 means that a dimension of this Shape is contained in names.

Args

names
instance of dimension
names
tuple or list or set:

Returns

binary sequence

Expand source code
def mask(self, names: Union[tuple, list, set, 'Shape']):
    """
    Returns a binary sequence corresponding to the names of this Shape.
    A value of 1 means that a dimension of this Shape is contained in `names`.

    Args:
      names: instance of dimension
      names: tuple or list or set: 

    Returns:
      binary sequence

    """
    if isinstance(names, str):
        names = [names]
    elif isinstance(names, Shape):
        names = names.names
    mask = [1 if name in names else 0 for name in self.names]
    return tuple(mask)
def meshgrid(self, names=False)

Builds a sequence containing all multi-indices within a tensor of this shape. All indices are returned as dict mapping dimension names to int indices.

The corresponding values can be retrieved from Tensors and other Sliceables using tensor()[index].

This function currently only supports uniform tensors.

Args

names
If True, replace indices by their item names if available.

Returns

dict iterator.

Expand source code
def meshgrid(self, names=False):
    """
    Builds a sequence containing all multi-indices within a tensor of this shape.
    All indices are returned as `dict` mapping dimension names to `int` indices.

    The corresponding values can be retrieved from Tensors and other Sliceables using `tensor[index]`.

    This function currently only supports uniform tensors.

    Args:
        names: If `True`, replace indices by their item names if available.

    Returns:
        `dict` iterator.
    """
    assert self.is_uniform, f"Shape.meshgrid() is currently not supported for non-uniform tensors, {self}"
    indices = [0] * self.rank
    while True:
        if names:
            yield {dim: (names[index] if names is not None else index) for dim, index, names in zip(self.names, indices, self.item_names)}
        else:
            yield {dim: index for dim, index in zip(self.names, indices)}
        for i in range(self.rank-1, -1, -1):
            indices[i] = (indices[i] + 1) % self.sizes[i]
            if indices[i] != 0:
                break
        else:
            return
def only(self, dims: DimFilter, reorder=False)

Builds a new shape from this one that only contains the given dimensions. Dimensions in dims that are not part of this Shape are ignored.

The complementary operation is :func:Shape.without().

Args

dims
comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function.
reorder
If False, keeps the dimension order as defined in this shape. If True, reorders the dimensions of this shape to match the order of dims.

Returns

Shape containing only specified dimensions

Expand source code
def only(self, dims: 'DimFilter', reorder=False):
    """
    Builds a new shape from this one that only contains the given dimensions.
    Dimensions in `dims` that are not part of this Shape are ignored.
    
    The complementary operation is :func:`Shape.without`.

    Args:
      dims: comma-separated dimension names (str) or instance of dimensions (tuple, list, Shape) or filter function.
      reorder: If `False`, keeps the dimension order as defined in this shape.
        If `True`, reorders the dimensions of this shape to match the order of `dims`.

    Returns:
      Shape containing only specified dimensions

    """
    if dims is None:  # keep none
        return EMPTY_SHAPE
    if callable(dims):
        dims = dims(self)
    if isinstance(dims, str):
        dims = parse_dim_order(dims)
    if isinstance(dims, Shape):
        dims = dims.names
    if isinstance(dims, (tuple, list, set)):
        if all(isinstance(d, int) for d in dims):
            if not reorder:
                dims = tuple(sorted(dims))
            return self[dims]
        dim_names = []
        for d in dims:
            if callable(d):
                d = d(self)
            if isinstance(d, str):
                dim_names.append(d)
            elif isinstance(d, Shape):
                dim_names.extend(d.names)
            else:
                raise ValueError(f"Format not understood for Shape.only(): {dims}")
        if reorder:
            dim_names = [d.name if isinstance(d, Shape) else d for d in dim_names]
            assert all(isinstance(d, str) for d in dim_names)
            return self[[self.names.index(d) for d in dim_names if d in self.names]]
        else:
            dim_names = [d.name if isinstance(d, Shape) else d for d in dim_names]
            assert all(isinstance(d, str) for d in dim_names)
            return self[[i for i in range(self.rank) if self.names[i] in dim_names]]
    raise ValueError(dims)
def prepare_gather(self, dim: str, selection: Union[slice_(), int, ForwardRef('Shape'), str, tuple, list])

Parse a slice object for a specific dimension.

Args

dim
Name of dimension to slice.
selection
Slice object.

Returns:

Expand source code
def prepare_gather(self, dim: str, selection: Union[slice, int, 'Shape', str, tuple, list]):
    """
    Parse a slice object for a specific dimension.

    Args:
        dim: Name of dimension to slice.
        selection: Slice object.

    Returns:

    """
    if isinstance(selection, Shape):
        selection = selection.name if selection.rank == 1 else selection.names
    if isinstance(selection, str) and ',' in selection:
        selection = parse_dim_order(selection)
    if isinstance(selection, str):  # single item name
        item_names = self.get_item_names(dim, fallback_spatial=True)
        assert item_names is not None, f"No item names defined for dim '{dim}' in tensor {self.shape} and dimension size does not match spatial rank."
        assert selection in item_names, f"Accessing tensor.{dim}['{selection}'] failed. Item names are {item_names}."
        selection = item_names.index(selection)
    if isinstance(selection, (tuple, list)):
        selection = list(selection)
        if any([isinstance(s, str) for s in selection]):
            item_names = self.get_item_names(dim, fallback_spatial=True)
            for i, s in enumerate(selection):
                if isinstance(s, str):
                    assert item_names is not None, f"Accessing tensor.{dim}['{s}'] failed because no item names are present on tensor {self.shape}"
                    assert s in item_names, f"Accessing tensor.{dim}['{s}'] failed. Item names are {item_names}."
                    selection[i] = item_names.index(s)
        if not selection:  # empty
            selection = slice(0, 0)
    return selection
def replace(self, dims: Union[ForwardRef('Shape'), tuple, list, str], new: Shape, keep_item_names=True, replace_item_names: Union[str, tuple, list, set, ForwardRef('Shape'), Callable] = None) ‑> phiml.math._shape.Shape

Returns a copy of self with dims replaced by new. Dimensions that are not present in self are ignored.

The dimension order is preserved.

Args

dims
Dimensions to replace.
new
New dimensions, must have same length as dims. If a Shape is given, replaces the dimension types and item names as well.
keep_item_names
Keeps existing item names for dimensions where new does not specify item names if the new dimension has the same size.
replace_item_names
For which dims the item names should be replaced as well.

Returns

Shape with same rank and dimension order as self.

Expand source code
def replace(self, dims: Union['Shape', str, tuple, list], new: 'Shape', keep_item_names=True, replace_item_names: DimFilter = None) -> 'Shape':
    """
    Returns a copy of `self` with `dims` replaced by `new`.
    Dimensions that are not present in `self` are ignored.

    The dimension order is preserved.

    Args:
        dims: Dimensions to replace.
        new: New dimensions, must have same length as `dims`.
            If a `Shape` is given, replaces the dimension types and item names as well.
        keep_item_names: Keeps existing item names for dimensions where `new` does not specify item names if the new dimension has the same size.
        replace_item_names: For which dims the item names should be replaced as well.

    Returns:
        `Shape` with same rank and dimension order as `self`.
    """
    dims = parse_dim_order(dims)
    assert isinstance(new, Shape), f"new must be a Shape but got {new}"
    names = list(self.names)
    sizes = list(self.sizes)
    types = list(self.types)
    item_names = list(self.item_names)
    for i in self.indices(self.only(replace_item_names)):
        if item_names[i]:
            if len(new) > len(dims):
                raise NotImplementedError
            else:
                name_map = {d: n for d, n in zip(dims, new.names)}
                item_names[i] = tuple([name_map.get(n, n) for n in item_names[i]])
    if len(new) > len(dims):  # Put all in one spot
        assert len(dims) == 1, "Cannot replace 2+ dims by more replacements"
        index = self.index(dims[0])
        return concat_shapes(self[:index], new, self[index+1:])
    for old_name, new_dim in zip(dims, new):
        if old_name in self:
            names[self.index(old_name)] = new_dim.name
            types[self.index(old_name)] = new_dim.type
            if new_dim.item_names[0]:
                item_names[self.index(old_name)] = new_dim.item_names[0]
            elif not _size_equal(new_dim.size, self.get_size(old_name)) or not keep_item_names:
                item_names[self.index(old_name)] = None  # forget previous item names
            sizes[self.index(old_name)] = new_dim.size
    replaced = Shape(tuple(sizes), tuple(names), tuple(types), tuple(item_names))
    if len(new) == len(dims):
        return replaced
    to_remove = dims[-(len(dims) - len(new)):]
    return replaced.without(to_remove)
def resolve_index(self, index: Dict[str, Union[slice_(), int, ForwardRef('Shape'), str, tuple, list]]) ‑> Dict[str, Union[slice_(), int, tuple, list]]

Replaces item names by the corresponding indices.

Args

index
n-dimensional index or slice.

Returns

Same index but without any reference to item names.

Expand source code
def resolve_index(self, index: Dict[str, Union[slice, int, 'Shape', str, tuple, list]]) -> Dict[str, Union[slice, int, tuple, list]]:
    """
    Replaces item names by the corresponding indices.

    Args:
        index: n-dimensional index or slice.

    Returns:
        Same index but without any reference to item names.
    """
    return {dim: self.prepare_gather(dim, s) for dim, s in index.items()}
def unstack(self, dim='dims') ‑> Tuple[phiml.math._shape.Shape]

Slices this Shape along a dimension. The dimension listing the sizes of the shape is referred to as 'dims'.

Non-uniform tensor shapes may be unstacked along other dimensions as well, see https://tum-pbs.github.io/PhiML/Non_Uniform.html

Args

dim
dimension to unstack

Returns

slices of this shape

Expand source code
def unstack(self, dim='dims') -> Tuple['Shape']:
    """
    Slices this `Shape` along a dimension.
    The dimension listing the sizes of the shape is referred to as `'dims'`.

    Non-uniform tensor shapes may be unstacked along other dimensions as well, see
    https://tum-pbs.github.io/PhiML/Non_Uniform.html

    Args:
        dim: dimension to unstack

    Returns:
        slices of this shape
    """
    if dim == 'dims':
        return tuple(Shape((self.sizes[i],), (self.names[i],), (self.types[i],), (self.item_names[i],)) for i in range(self.rank))
    if dim not in self and self.is_uniform:
        return tuple([self])
    from ._tensors import Tensor
    if dim in self:
        inner = self.without(dim)
        dim_size = self.get_size(dim)
    else:
        inner = self
        dim_size = self.shape.get_size(dim)
    sizes = []
    for size in inner.sizes:
        if isinstance(size, Tensor) and dim in size.shape:
            sizes.append(size._unstack(dim))
            dim_size = size.shape.get_size(dim)
        else:
            sizes.append(size)
    assert isinstance(dim_size, int)
    shapes = tuple(Shape(tuple([int(size[i]) if isinstance(size, tuple) else size for size in sizes]), inner.names, inner.types, inner.item_names) for i in range(dim_size))
    return shapes
def with_dim_size(self, dim: Union[str, ForwardRef('Shape')], size: Union[int, ForwardRef('math.Tensor'), str, tuple, list], keep_item_names=True)

Returns a new Shape that has a different size for dim.

Args

dim
Dimension for which to replace the size, Shape or str.
size
New size, int or Tensor

Returns

Shape with same names and types as self.

Expand source code
def with_dim_size(self, dim: Union[str, 'Shape'], size: Union[int, 'math.Tensor', str, tuple, list], keep_item_names=True):
    """
    Returns a new `Shape` that has a different size for `dim`.

    Args:
        dim: Dimension for which to replace the size, `Shape` or `str`.
        size: New size, `int` or `Tensor`

    Returns:
        `Shape` with same names and types as `self`.
    """
    if isinstance(dim, Shape):
        dim = dim.name
    assert isinstance(dim, str)
    new_size, new_item_names = Shape._size_and_item_names_from_obj(size, self.get_size(dim), self.get_item_names(dim), keep_item_names)
    return self.replace(dim, Shape((new_size,), (dim,), (self.get_type(dim),), (new_item_names,)), keep_item_names=keep_item_names)
def with_size(self, size: Union[int, Tuple[str, ...]])

Only for single-dimension shapes. Returns a Shape representing this dimension but with a different size.

See Also: Shape.with_sizes().

Args

size
Replacement size for this dimension.

Returns

Shape

Expand source code
def with_size(self, size: Union[int, Tuple[str, ...]]):
    """
    Only for single-dimension shapes.
    Returns a `Shape` representing this dimension but with a different size.

    See Also:
        `Shape.with_sizes()`.

    Args:
        size: Replacement size for this dimension.

    Returns:
        `Shape`
    """
    assert self.rank == 1, "Shape.with_size() is only defined for shapes of rank 1."
    return self.with_sizes([size])
def with_sizes(self, sizes: Union[Sequence[int], Sequence[Tuple[str, ...]], ForwardRef('Shape'), int], keep_item_names=True)

Returns a new Shape matching the dimension names and types of self but with different sizes.

See Also: Shape.with_size().

Args

sizes

One of

  • tuple / list of same length as self containing replacement sizes or replacement item names.
  • Shape of any rank. Replaces sizes for dimensions shared by sizes and self.
  • int: new size for all dimensions
keep_item_names
If False, forgets all item names. If True, keeps item names where the size does not change.

Returns

Shape with same names and types as self.

Expand source code
def with_sizes(self, sizes: Union[Sequence[int], Sequence[Tuple[str, ...]], 'Shape', int], keep_item_names=True):
    """
    Returns a new `Shape` matching the dimension names and types of `self` but with different sizes.

    See Also:
        `Shape.with_size()`.

    Args:
        sizes: One of

            * `tuple` / `list` of same length as `self` containing replacement sizes or replacement item names.
            * `Shape` of any rank. Replaces sizes for dimensions shared by `sizes` and `self`.
            * `int`: new size for all dimensions

        keep_item_names: If `False`, forgets all item names.
            If `True`, keeps item names where the size does not change.

    Returns:
        `Shape` with same names and types as `self`.
    """
    if isinstance(sizes, int):
        sizes = [sizes] * len(self.sizes)
    if isinstance(sizes, Shape):
        item_names = [sizes.get_item_names(dim) if dim in sizes else self.get_item_names(dim) for dim in self.names]
        sizes = [sizes.get_size(dim) if dim in sizes else s for dim, s in self._named_sizes]
        return Shape(tuple(sizes), self.names, self.types, tuple(item_names))
    else:
        assert len(sizes) == len(self.sizes), f"Cannot create shape from {self} with sizes {sizes}"
        sizes_ = []
        item_names = []
        for i, obj in enumerate(sizes):
            new_size, new_item_names = Shape._size_and_item_names_from_obj(obj, self.sizes[i], self.item_names[i], keep_item_names)
            sizes_.append(new_size)
            item_names.append(new_item_names)
        return Shape(tuple(sizes_), self.names, self.types, tuple(item_names))
def without(self, dims: DimFilter) ‑> phiml.math._shape.Shape

Builds a new shape from this one that is missing all given dimensions. Dimensions in dims that are not part of this Shape are ignored.

The complementary operation is Shape.only().

Args

dims
Single dimension (str) or instance of dimensions (tuple, list, Shape)
dims
Dimensions to exclude as str or tuple or list or Shape. Dimensions that are not included in this shape are ignored.

Returns

Shape without specified dimensions

Expand source code
def without(self, dims: 'DimFilter') -> 'Shape':
    """
    Builds a new shape from this one that is missing all given dimensions.
    Dimensions in `dims` that are not part of this Shape are ignored.
    
    The complementary operation is `Shape.only()`.

    Args:
      dims: Single dimension (str) or instance of dimensions (tuple, list, Shape)
      dims: Dimensions to exclude as `str` or `tuple` or `list` or `Shape`. Dimensions that are not included in this shape are ignored.

    Returns:
      Shape without specified dimensions
    """
    if dims is None:  # subtract none
        return self
    elif callable(dims):
        dims = dims(self)
    if isinstance(dims, str):
        return self[[i for i in range(self.rank) if self.names[i] not in parse_dim_order(dims)]]
    elif isinstance(dims, Shape):
        return self[[i for i in range(self.rank) if self.names[i] not in dims.names]]
    if isinstance(dims, (tuple, list, set)) and all([isinstance(d, str) for d in dims]):
        return self[[i for i in range(self.rank) if self.names[i] not in dims]]
    elif isinstance(dims, (tuple, list, set)):
        result = self
        for wo in dims:
            result = result.without(wo)
        return result
    else:
        raise ValueError(dims)
def without_sizes(self)

Returns

Shape with all sizes undefined (None)

Expand source code
def without_sizes(self):
    """
    Returns:
        `Shape` with all sizes undefined (`None`)
    """
    return Shape((None,) * self.rank, self.names, self.types, (None,) * self.rank)
class Solve (method: Optional[str] = 'auto', rel_tol: Union[float, phiml.math._tensors.Tensor] = None, abs_tol: Union[float, phiml.math._tensors.Tensor] = None, x0: Union[~X, Any] = None, max_iterations: Union[int, phiml.math._tensors.Tensor] = 1000, suppress: Union[tuple, list] = (), preprocess_y: Callable = None, preprocess_y_args: tuple = (), preconditioner: Optional[str] = None, gradient_solve: Optional[ForwardRef('Solve[Y, X]')] = None)

Specifies parameters and stopping criteria for solving a minimization problem or system of equations.

Expand source code
class Solve(Generic[X, Y]):
    """
    Specifies parameters and stopping criteria for solving a minimization problem or system of equations.
    """

    def __init__(self,
                 method: Union[str, None] = 'auto',
                 rel_tol: Union[float, Tensor] = None,
                 abs_tol: Union[float, Tensor] = None,
                 x0: Union[X, Any] = None,
                 max_iterations: Union[int, Tensor] = 1000,
                 suppress: Union[tuple, list] = (),
                 preprocess_y: Callable = None,
                 preprocess_y_args: tuple = (),
                 preconditioner: Optional[str] = None,
                 gradient_solve: Union['Solve[Y, X]', None] = None):
        method = method or 'auto'
        assert isinstance(method, str)
        self.method: str = method
        """ Optimization method to use. Available solvers depend on the solve function that is used to perform the solve. """
        self.rel_tol: Tensor = math.to_float(wrap(rel_tol)) if rel_tol is not None else None
        """Relative tolerance for linear solves only, defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves.
        This must be unset or `0` for minimization problems.
        For systems of equations *f(x)=y*, the final tolerance is `max(rel_tol * norm(y), abs_tol)`. """
        self.abs_tol: Tensor = math.to_float(wrap(abs_tol)) if abs_tol is not None else None
        """ Absolut tolerance for optimization problems and linear solves.
        Defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves.
        For systems of equations *f(x)=y*, the final tolerance is `max(rel_tol * norm(y), abs_tol)`. """
        self.max_iterations: Tensor = math.to_int32(wrap(max_iterations))
        """ Maximum number of iterations to perform before raising a `NotConverged` error is raised. """
        self.x0 = x0
        """ Initial guess for the method, of same type and dimensionality as the solve result.
         This property must be set to a value compatible with the solution `x` before running a method. """
        self.preprocess_y: Callable = preprocess_y
        """ Function to be applied to the right-hand-side vector of an equation system before solving the system.
        This property is propagated to gradient solves by default. """
        self.preprocess_y_args: tuple = preprocess_y_args
        assert all(issubclass(err, ConvergenceException) for err in suppress)
        self.suppress: tuple = tuple(suppress)
        """ Error types to suppress; `tuple` of `ConvergenceException` types. For these errors, the solve function will instead return the partial result without raising the error. """
        self.preconditioner = preconditioner
        self._gradient_solve: Solve[Y, X] = gradient_solve
        self.id = str(uuid.uuid4())  # not altered by copy_with(), so that the lookup SolveTape[Solve] works after solve has been copied

    @property
    def gradient_solve(self) -> 'Solve[Y, X]':
        """
        Parameters to use for the gradient pass when an implicit gradient is computed.
        If `None`, a duplicate of this `Solve` is created for the gradient solve.

        In any case, the gradient solve information will be stored in `gradient_solve.result`.
        """
        if self._gradient_solve is None:
            self._gradient_solve = Solve(self.method, self.rel_tol, self.abs_tol, None, self.max_iterations, self.suppress, self.preprocess_y, self.preprocess_y_args)
        return self._gradient_solve

    def __repr__(self):
        return f"{self.method} with tolerance {self.rel_tol} (rel), {self.abs_tol} (abs), max_iterations={self.max_iterations}" + (" including preprocessing" if self.preprocess_y else "")

    def __eq__(self, other):
        if not isinstance(other, Solve):
            return False
        if self.method != other.method \
                or not math.equal(self.abs_tol, other.abs_tol) \
                or not math.equal(self.rel_tol, other.rel_tol) \
                or (self.max_iterations != other.max_iterations).any \
                or self.preprocess_y is not other.preprocess_y \
                or self.suppress != other.suppress:
            return False
        return self.x0 == other.x0

    def __variable_attrs__(self):
        return 'x0', 'rel_tol', 'abs_tol', 'max_iterations'

    def __value_attrs__(self):
        return self.__variable_attrs__()

    def with_defaults(self, mode: str):
        assert mode in ('solve', 'optimization')
        result = self
        if result.rel_tol is None:
            result = copy_with(result, rel_tol=_default_tolerance() if mode == 'solve' else wrap(0.))
        if result.abs_tol is None:
            result = copy_with(result, abs_tol=_default_tolerance())
        return result

    def with_preprocessing(self, preprocess_y: Callable, *args) -> 'Solve':
        """
        Adds preprocessing to this `Solve` and all corresponding gradient solves.

        Args:
            preprocess_y: Preprocessing function.
            *args: Arguments for the preprocessing function.

        Returns:
            Copy of this `Solve` with given preprocessing.
        """
        assert self.preprocess_y is None, f"preprocessing for linear solve '{self}' already set"
        gradient_solve = self._gradient_solve.with_preprocessing(preprocess_y, *args) if self._gradient_solve is not None else None
        return copy_with(self, preprocess_y=preprocess_y, preprocess_y_args=args, _gradient_solve=gradient_solve)

Ancestors

  • typing.Generic

Instance variables

var abs_tol

Absolut tolerance for optimization problems and linear solves. Defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. For systems of equations f(x)=y, the final tolerance is max(rel_tol * norm(y), abs_tol).

var gradient_solve : phiml.math._optimize.Solve[~Y, ~X]

Parameters to use for the gradient pass when an implicit gradient is computed. If None, a duplicate of this Solve is created for the gradient solve.

In any case, the gradient solve information will be stored in gradient_solve.result.

Expand source code
@property
def gradient_solve(self) -> 'Solve[Y, X]':
    """
    Parameters to use for the gradient pass when an implicit gradient is computed.
    If `None`, a duplicate of this `Solve` is created for the gradient solve.

    In any case, the gradient solve information will be stored in `gradient_solve.result`.
    """
    if self._gradient_solve is None:
        self._gradient_solve = Solve(self.method, self.rel_tol, self.abs_tol, None, self.max_iterations, self.suppress, self.preprocess_y, self.preprocess_y_args)
    return self._gradient_solve
var max_iterations

Maximum number of iterations to perform before raising a NotConverged error is raised.

var method

Optimization method to use. Available solvers depend on the solve function that is used to perform the solve.

var preprocess_y

Function to be applied to the right-hand-side vector of an equation system before solving the system. This property is propagated to gradient solves by default.

var rel_tol

Relative tolerance for linear solves only, defaults to 1e-5 for singe precision solves and 1e-12 for double precision solves. This must be unset or 0 for minimization problems. For systems of equations f(x)=y, the final tolerance is max(rel_tol * norm(y), abs_tol).

var suppress

Error types to suppress; tuple of ConvergenceException types. For these errors, the solve function will instead return the partial result without raising the error.

var x0

Initial guess for the method, of same type and dimensionality as the solve result. This property must be set to a value compatible with the solution x before running a method.

Methods

def with_defaults(self, mode: str)
Expand source code
def with_defaults(self, mode: str):
    assert mode in ('solve', 'optimization')
    result = self
    if result.rel_tol is None:
        result = copy_with(result, rel_tol=_default_tolerance() if mode == 'solve' else wrap(0.))
    if result.abs_tol is None:
        result = copy_with(result, abs_tol=_default_tolerance())
    return result
def with_preprocessing(self, preprocess_y: Callable, *args) ‑> phiml.math._optimize.Solve

Adds preprocessing to this Solve and all corresponding gradient solves.

Args

preprocess_y
Preprocessing function.
*args
Arguments for the preprocessing function.

Returns

Copy of this Solve with given preprocessing.

Expand source code
def with_preprocessing(self, preprocess_y: Callable, *args) -> 'Solve':
    """
    Adds preprocessing to this `Solve` and all corresponding gradient solves.

    Args:
        preprocess_y: Preprocessing function.
        *args: Arguments for the preprocessing function.

    Returns:
        Copy of this `Solve` with given preprocessing.
    """
    assert self.preprocess_y is None, f"preprocessing for linear solve '{self}' already set"
    gradient_solve = self._gradient_solve.with_preprocessing(preprocess_y, *args) if self._gradient_solve is not None else None
    return copy_with(self, preprocess_y=preprocess_y, preprocess_y_args=args, _gradient_solve=gradient_solve)
class SolveInfo

Stores information about the solution or trajectory of a solve.

When representing the full optimization trajectory, all tracked quantities will have an additional trajectory batch dimension.

Expand source code
class SolveInfo(Generic[X, Y]):
    """
    Stores information about the solution or trajectory of a solve.

    When representing the full optimization trajectory, all tracked quantities will have an additional `trajectory` batch dimension.
    """

    def __init__(self,
                 solve: Solve,
                 x: X,
                 residual: Union[Y, None],
                 iterations: Union[Tensor, None],
                 function_evaluations: Union[Tensor, None],
                 converged: Tensor,
                 diverged: Tensor,
                 method: str,
                 msg: Tensor,
                 solve_time: float):
        # tuple.__new__(SolveInfo, (x, residual, iterations, function_evaluations, converged, diverged))
        self.solve: Solve[X, Y] = solve
        """ `Solve`, Parameters specified for the solve. """
        self.x: X = x
        """ `Tensor` or `phiml.math.magic.PhiTreeNode`, solution estimate. """
        self.residual: Y = residual
        """ `Tensor` or `phiml.math.magic.PhiTreeNode`, residual vector for systems of equations or function value for minimization problems. """
        self.iterations: Tensor = iterations
        """ `Tensor`, number of performed iterations to reach this state. """
        self.function_evaluations: Tensor = function_evaluations
        """ `Tensor`, how often the function (or its gradient function) was called. """
        self.converged: Tensor = converged
        """ `Tensor`, whether the residual is within the specified tolerance. """
        self.diverged: Tensor = diverged
        """ `Tensor`, whether the solve has diverged at this point. """
        self.method = method
        """ `str`, which method and implementation that was used. """
        if all_available(diverged, converged, iterations):
            _, res_tensors = disassemble_tree(residual, cache=False)
            msg_fun = partial(_default_solve_info_msg, solve=solve)
            msg = map_(msg_fun, msg, converged.trajectory[-1], diverged.trajectory[-1], iterations.trajectory[-1], method=method, residual=res_tensors[0], dims=converged.shape.without('trajectory'))
        self.msg = msg
        """ `str`, termination message """
        self.solve_time = solve_time
        """ Time spent in Backend solve function (in seconds) """

    def __repr__(self):
        return f"{self.method}: {self.converged.trajectory[-1].sum} converged, {self.diverged.trajectory[-1].sum} diverged"

    def snapshot(self, index):
        return SolveInfo(self.solve, self.x.trajectory[index], self.residual.trajectory[index], self.iterations.trajectory[index], self.function_evaluations.trajectory[index],
                         self.converged.trajectory[index], self.diverged.trajectory[index], self.method, self.msg, self.solve_time)

    def convergence_check(self, only_warn: bool):
        if not all_available(self.diverged, self.converged):
            return
        if self.diverged.any:
            if Diverged not in self.solve.suppress:
                if only_warn:
                    warnings.warn(self.msg, ConvergenceWarning)
                else:
                    raise Diverged(self)
        if not self.converged.trajectory[-1].all:
            if NotConverged not in self.solve.suppress:
                if only_warn:
                    warnings.warn(self.msg, ConvergenceWarning)
                else:
                    raise NotConverged(self)

Ancestors

  • typing.Generic

Instance variables

var converged

Tensor, whether the residual is within the specified tolerance.

var diverged

Tensor, whether the solve has diverged at this point.

var function_evaluations

Tensor, how often the function (or its gradient function) was called.

var iterations

Tensor, number of performed iterations to reach this state.

var method

str, which method and implementation that was used.

var msg

str, termination message

var residual

Tensor or PhiTreeNode, residual vector for systems of equations or function value for minimization problems.

var solve

Solve, Parameters specified for the solve.

var solve_time

Time spent in Backend solve function (in seconds)

var x

Tensor or PhiTreeNode, solution estimate.

Methods

def convergence_check(self, only_warn: bool)
Expand source code
def convergence_check(self, only_warn: bool):
    if not all_available(self.diverged, self.converged):
        return
    if self.diverged.any:
        if Diverged not in self.solve.suppress:
            if only_warn:
                warnings.warn(self.msg, ConvergenceWarning)
            else:
                raise Diverged(self)
    if not self.converged.trajectory[-1].all:
        if NotConverged not in self.solve.suppress:
            if only_warn:
                warnings.warn(self.msg, ConvergenceWarning)
            else:
                raise NotConverged(self)
def snapshot(self, index)
Expand source code
def snapshot(self, index):
    return SolveInfo(self.solve, self.x.trajectory[index], self.residual.trajectory[index], self.iterations.trajectory[index], self.function_evaluations.trajectory[index],
                     self.converged.trajectory[index], self.diverged.trajectory[index], self.method, self.msg, self.solve_time)
class SolveTape (*solves: phiml.math._optimize.Solve, record_trajectories=False)

Used to record additional information about solves invoked via solve_linear(), solve_nonlinear() or minimize(). While a SolveTape is active, certain performance optimizations and algorithm implementations may be disabled.

To access a SolveInfo of a recorded solve, use

>>> solve = Solve(method, ...)
>>> with SolveTape() as solves:
>>>     x = math.solve_linear(f, y, solve)
>>> result: SolveInfo = solves[solve]  # get by Solve
>>> result: SolveInfo = solves[0]  # get by index

Args

*solves
(Optional) Select specific solves to be recorded. If none is given, records all solves that occur within the scope of this SolveTape.
record_trajectories
When enabled, the entries of SolveInfo will contain an additional batch dimension named trajectory.
Expand source code
class SolveTape:
    """
    Used to record additional information about solves invoked via `solve_linear()`, `solve_nonlinear()` or `minimize()`.
    While a `SolveTape` is active, certain performance optimizations and algorithm implementations may be disabled.

    To access a `SolveInfo` of a recorded solve, use
    >>> solve = Solve(method, ...)
    >>> with SolveTape() as solves:
    >>>     x = math.solve_linear(f, y, solve)
    >>> result: SolveInfo = solves[solve]  # get by Solve
    >>> result: SolveInfo = solves[0]  # get by index
    """

    def __init__(self, *solves: Solve, record_trajectories=False):
        """
        Args:
            *solves: (Optional) Select specific `solves` to be recorded.
                If none is given, records all solves that occur within the scope of this `SolveTape`.
            record_trajectories: When enabled, the entries of `SolveInfo` will contain an additional batch dimension named `trajectory`.
        """
        self.record_only_ids = [s.id for s in solves]
        self.record_trajectories = record_trajectories
        self.solves: List[SolveInfo] = []

    def should_record_trajectory_for(self, solve: Solve):
        if not self.record_trajectories:
            return False
        if not self.record_only_ids:
            return True
        return solve.id in self.record_only_ids

    def __enter__(self):
        _SOLVE_TAPES.append(self)
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        _SOLVE_TAPES.remove(self)

    def _add(self, solve: Solve, trj: bool, result: SolveInfo):
        if any(s.solve.id == solve.id for s in self.solves):
            warnings.warn("SolveTape contains two results for the same solve settings. SolveTape[solve] will return the first solve result.", RuntimeWarning)
        if self.record_only_ids and solve.id not in self.record_only_ids:
            return  # this solve should not be recorded
        if self.record_trajectories:
            assert trj, "Solve did not record a trajectory."
            self.solves.append(result)
        elif trj:
            self.solves.append(result.snapshot(-1))
        else:
            self.solves.append(result)

    def __getitem__(self, item) -> SolveInfo:
        if isinstance(item, int):
            return self.solves[item]
        else:
            assert isinstance(item, Solve)
            solves = [s for s in self.solves if s.solve.id == item.id]
            if len(solves) == 0:
                raise KeyError(f"No solve recorded with key '{item}'.")
            assert len(solves) == 1
            return solves[0]

    def __iter__(self):
        return iter(self.solves)

    def __len__(self):
        return len(self.solves)

Methods

def should_record_trajectory_for(self, solve: phiml.math._optimize.Solve)
Expand source code
def should_record_trajectory_for(self, solve: Solve):
    if not self.record_trajectories:
        return False
    if not self.record_only_ids:
        return True
    return solve.id in self.record_only_ids
class Tensor

Abstract base class to represent structured data of one data type. This class replaces the native tensor classes numpy.ndarray, torch.Tensor, tensorflow.Tensor or jax.numpy.ndarray as the main data container in Φ-ML.

Tensor instances are different from native tensors in two important ways:

  • The dimensions of Tensors have names and types.
  • Tensors can have non-uniform shapes, meaning that the size of dimensions can vary along other dimensions.

To check whether a value is a tensor, use isinstance(value, Tensor).

To construct a Tensor, use tensor(), wrap() or one of the basic tensor creation functions, see https://tum-pbs.github.io/PhiML/Tensors.html .

Tensors are not editable. When backed by an editable native tensor, e.g. a numpy.ndarray, do not edit the underlying data structure.

Expand source code
class Tensor:
    """
    Abstract base class to represent structured data of one data type.
    This class replaces the native tensor classes `numpy.ndarray`, `torch.Tensor`, `tensorflow.Tensor` or `jax.numpy.ndarray` as the main data container in Φ-ML.

    `Tensor` instances are different from native tensors in two important ways:

    * The dimensions of Tensors have *names* and *types*.
    * Tensors can have non-uniform shapes, meaning that the size of dimensions can vary along other dimensions.

    To check whether a value is a tensor, use `isinstance(value, Tensor)`.

    To construct a Tensor, use `phiml.math.tensor()`, `phiml.math.wrap()` or one of the basic tensor creation functions,
    see https://tum-pbs.github.io/PhiML/Tensors.html .

    Tensors are not editable.
    When backed by an editable native tensor, e.g. a `numpy.ndarray`, do not edit the underlying data structure.
    """

    def __init__(self):
        if DEBUG_CHECKS:
            self._init_stack = traceback.extract_stack()

    def native(self, order: Union[str, tuple, list, Shape] = None, singleton_for_const=False):
        """
        Returns a native tensor object with the dimensions ordered according to `order`.
        
        Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names.
        If a dimension of the tensor is not listed in `order`, a `ValueError` is raised.

        Args:
            order: (Optional) Order of dimension names as comma-separated string, list or `Shape`.
            singleton_for_const: If `True`, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions.

        Returns:
            Native tensor representation, such as PyTorch tensor or NumPy array.

        Raises:
            ValueError if the tensor cannot be transposed to match target_shape
        """
        raise NotImplementedError(self.__class__)

    def numpy(self, order: Union[str, tuple, list, Shape] = None) -> np.ndarray:
        """
        Converts this tensor to a `numpy.ndarray` with dimensions ordered according to `order`.
        
        *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable.
        To get a differentiable tensor, use `Tensor.native()` instead.
        
        Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names.
        If a dimension of the tensor is not listed in `order`, a `ValueError` is raised.

        If this `Tensor` is backed by a NumPy array, a reference to this array may be returned.

        See Also:
            `phiml.math.numpy()`

        Args:
            order: (Optional) Order of dimension names as comma-separated string, list or `Shape`.

        Returns:
            NumPy representation

        Raises:
            ValueError if the tensor cannot be transposed to match target_shape
        """
        native = self.native(order=order)
        return choose_backend(native).numpy(native)

    def __array__(self, dtype=None):  # NumPy conversion
        if self.rank > 1:
            warnings.warn("Automatic conversion of Φ-ML tensors to NumPy can cause problems because the dimension order is not guaranteed.", SyntaxWarning, stacklevel=3)
        return self.numpy(self._shape)

    def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):  # NumPy interface
        if len(inputs) != 2:
            return NotImplemented
        if ufunc.__name__ == 'multiply':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x * y, lambda x, y: choose_backend(x, y).mul(x, y), 'mul', '*')
            else:
                return self._op2(inputs[0], lambda x, y: y * x, lambda x, y: choose_backend(x, y).mul(y, x), 'rmul', '*')
        if ufunc.__name__ == 'add':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x + y, lambda x, y: choose_backend(x, y).add(x, y), 'add', '+')
            else:
                return self._op2(inputs[0], lambda x, y: y + x, lambda x, y: choose_backend(x, y).add(y, x), 'radd', '+')
        if ufunc.__name__ == 'subtract':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x - y, lambda x, y: choose_backend(x, y).sub(x, y), 'add', '-')
            else:
                return self._op2(inputs[0], lambda x, y: y - x, lambda x, y: choose_backend(x, y).sub(y, x), 'rsub', '-')
        if ufunc.__name__ in ['divide', 'true_divide']:
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x / y, lambda x, y: choose_backend(x, y).div(x, y), 'true_divide', '/')
            else:
                return self._op2(inputs[0], lambda x, y: y / x, lambda x, y: choose_backend(x, y).div(y, x), 'r_true_divide', '/')
        if ufunc.__name__ == 'floor_divide':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x // y, lambda x, y: choose_backend(x, y).floordiv(x, y), 'floor_divide', '//')
            else:
                return self._op2(inputs[0], lambda x, y: y // x, lambda x, y: choose_backend(x, y).floordiv(y, x), 'r_floor_divide', '//')
        if ufunc.__name__ == 'remainder':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x % y, lambda x, y: choose_backend(x, y).mod(x, y), 'remainder', '%')
            else:
                return self._op2(inputs[0], lambda x, y: y % x, lambda x, y: choose_backend(x, y).mod(y, x), 'r_remainder', '%')
        if ufunc.__name__ == 'power':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x ** y, lambda x, y: choose_backend(x, y).pow(x, y), 'power', '**')
            else:
                return self._op2(inputs[0], lambda x, y: y ** x, lambda x, y: choose_backend(x, y).pow(y, x), 'r_power', '**')
        if ufunc.__name__ == 'equal':
            if _EQUALITY_REDUCE[-1] == 'ref':
                return wrap(inputs[0] is inputs[1])
            elif _EQUALITY_REDUCE[-1] == 'shape_and_value':
                if set(inputs[0].shape) != set(inputs[1].shape):
                    return wrap(False)
                from ._ops import close
                return wrap(close(inputs[0], inputs[1], rel_tolerance=0, abs_tolerance=0))
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x == y, lambda x, y: choose_backend(x, y).equal(x, y), 'equal', '==')
            else:
                return self._op2(inputs[0], lambda x, y: y == x, lambda x, y: choose_backend(x, y).equal(y, x), 'r_equal', '==')
        if ufunc.__name__ == 'not_equal':
            if _EQUALITY_REDUCE[-1] == 'ref':
                return wrap(inputs[0] is not inputs[1])
            elif _EQUALITY_REDUCE[-1] == 'shape_and_value':
                if set(inputs[0].shape) != set(inputs[1].shape):
                    return wrap(True)
                from ._ops import close
                return wrap(not close(inputs[0], inputs[1], rel_tolerance=0, abs_tolerance=0))
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x != y, lambda x, y: choose_backend(x, y).not_equal(x, y), 'equal', '!=')
            else:
                return self._op2(inputs[0], lambda x, y: y != x, lambda x, y: choose_backend(x, y).not_equal(y, x), 'r_equal', '!=')
        if ufunc.__name__ == 'greater':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x > y, lambda x, y: choose_backend(x, y).greater_than(x, y), 'greater', '>')
            else:
                return self._op2(inputs[0], lambda x, y: y > x, lambda x, y: choose_backend(x, y).greater_than(y, x), 'r_greater', '>')
        if ufunc.__name__ == 'greater_equal':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x >= y, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'greater_equal', '>=')
            else:
                return self._op2(inputs[0], lambda x, y: y >= x, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'r_greater_equal', '>=')
        if ufunc.__name__ == 'less':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x < y, lambda x, y: choose_backend(x, y).greater_than(y, x), 'less', '<')
            else:
                return self._op2(inputs[0], lambda x, y: y < x, lambda x, y: choose_backend(x, y).greater_than(x, y), 'r_less', '<')
        if ufunc.__name__ == 'less_equal':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x <= y, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'less_equal', '<=')
            else:
                return self._op2(inputs[0], lambda x, y: y <= x, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'r_less_equal', '<=')
        if ufunc.__name__ == 'left_shift':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x << y, lambda x, y: choose_backend(x, y).shift_bits_left(x, y), 'left_shift', '<<')
            else:
                return self._op2(inputs[0], lambda x, y: y << x, lambda x, y: choose_backend(x, y).shift_bits_left(y, x), 'r_left_shift', '<<')
        if ufunc.__name__ == 'right_shift':
            if inputs[0] is self:
                return self._op2(inputs[1], lambda x, y: x >> y, lambda x, y: choose_backend(x, y).shift_bits_right(x, y), 'right_shift', '>>')
            else:
                return self._op2(inputs[0], lambda x, y: y >> x, lambda x, y: choose_backend(x, y).shift_bits_right(y, x), 'r_right_shift', '>>')
        raise NotImplementedError(f"NumPy function '{ufunc.__name__}' is not compatible with Φ-ML tensors.")

    @property
    def dtype(self) -> DType:
        """ Data type of the elements of this `Tensor`. """
        raise NotImplementedError(self.__class__)

    @property
    def shape(self) -> Shape:
        """ The `Shape` lists the dimensions with their sizes, names and types. """
        raise NotImplementedError(self.__class__)

    @property
    def default_backend(self) -> Backend:
        from ._ops import choose_backend_t
        return choose_backend_t(self)

    def _with_shape_replaced(self, new_shape: Shape):
        raise NotImplementedError(self.__class__)

    def _with_natives_replaced(self, natives: list):
        """ Replaces all n _natives() of this Tensor with the first n elements of the list and removes them from the list. """
        raise NotImplementedError(self.__class__)

    @property
    def rank(self) -> int:
        """
        Number of explicit dimensions of this `Tensor`. Equal to `tensor.shape.rank`.
        This replaces [`numpy.ndarray.ndim`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html) /
        [`torch.Tensor.dim`](https://pytorch.org/docs/master/generated/torch.Tensor.dim.html) /
        [`tf.rank()`](https://www.tensorflow.org/api_docs/python/tf/rank) /
        [`jax.numpy.ndim()`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndim.html).
        """
        return self.shape.rank

    @property
    def _is_tracer(self) -> bool:
        """
        Tracers store additional internal information.
        They should not be converted to `native()` in intermediate operations.
        
        TensorStack prevents performing the actual stack operation if one of its component tensors is special.
        """
        raise NotImplementedError(self.__class__)

    def _to_dict(self):
        return cached(self)._to_dict()

    def __len__(self):
        return self.shape.volume if self.rank == 1 else NotImplemented

    def __bool__(self):
        assert self.rank == 0, f"Cannot convert tensor with non-empty shape {self.shape} to bool. Use tensor.any or tensor.all instead."
        from ._ops import all_
        if not self.default_backend.supports(Backend.jit_compile):  # NumPy
            return bool(self.native()) if self.rank == 0 else bool(all_(self).native())
        else:
            # __bool__ does not work with TensorFlow tracing.
            # TensorFlow needs to see a tf.Tensor in loop conditions but won't allow bool() invocations.
            # However, this function must always return a Python bool.
            raise AssertionError("To evaluate the boolean value of a Tensor, use 'Tensor.all'.")

    @property
    def all(self):
        """ Whether all values of this `Tensor` are `True` as a native bool. """
        from ._ops import all_, cast
        if self.rank == 0:
            return cast(self, DType(bool)).native()
        else:
            return all_(self, dim=self.shape).native()

    @property
    def any(self):
        """ Whether this `Tensor` contains a `True` value as a native bool. """
        from ._ops import any_, cast
        if self.rank == 0:
            return cast(self, DType(bool)).native()
        else:
            return any_(self, dim=self.shape).native()

    @property
    def mean(self):
        """ Mean value of this `Tensor` as a native scalar. """
        from ._ops import mean
        return mean(self, dim=self.shape).native()

    @property
    def finite_mean(self):
        """ Mean value of all finite values in this `Tensor` as a native scalar. """
        from ._ops import finite_mean
        return finite_mean(self, dim=self.shape).native()

    @property
    def std(self):
        """ Standard deviation of this `Tensor` as a native scalar. """
        from ._ops import std
        return std(self, dim=self.shape).native()

    @property
    def sum(self):
        """ Sum of all values of this `Tensor` as a native scalar. """
        from ._ops import sum_
        return sum_(self, dim=self.shape).native()

    @property
    def finite_sum(self):
        """ Sum of all finite values of this `Tensor` as a native scalar. """
        from ._ops import finite_sum
        return finite_sum(self, dim=self.shape).native()

    @property
    def min(self):
        """ Minimum value of this `Tensor` as a native scalar. """
        from ._ops import min_
        return min_(self, dim=self.shape).native()

    @property
    def finite_min(self):
        """ Minimum finite value of this `Tensor` as a native scalar. """
        from ._ops import finite_min
        return finite_min(self, dim=self.shape).native()

    @property
    def max(self):
        """ Maximum value of this `Tensor` as a native scalar. """
        from ._ops import max_
        return max_(self, dim=self.shape).native()

    @property
    def finite_max(self):
        """ Maximum finite value of this `Tensor` as a native scalar. """
        from ._ops import finite_max
        return finite_max(self, dim=self.shape).native()

    @property
    def real(self) -> 'Tensor':
        """
        Returns the real part of this tensor.

        See Also:
            `phiml.math.real()`
        """
        from ._ops import real
        return real(self)

    @property
    def imag(self) -> 'Tensor':
        """
        Returns the imaginary part of this tensor.
        If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.

        See Also:
            `phiml.math.imag()`
        """
        from ._ops import imag
        return imag(self)

    @property
    def available(self) -> bool:
        """
        A tensor is available if it stores concrete values and these can currently be read.

        Tracers used inside jit compilation are typically not available.

        See Also:
            `phiml.math.jit_compile()`.
        """
        if self._is_tracer:
            return False
        natives = self._natives()
        natives_available = [choose_backend(native).is_available(native) for native in natives]
        return all(natives_available)

    @property
    def device(self) -> Union[ComputeDevice, None]:
        """
        Returns the `ComputeDevice` that this tensor is allocated on.
        The device belongs to this tensor's `default_backend`.

        See Also:
            `Tensor.default_backend`.
        """
        natives = self._natives()
        if not natives:
            return None
        return self.default_backend.get_device(natives[0])

    def __int__(self):
        return int(self.native()) if self.shape.volume == 1 else NotImplemented

    def __float__(self):
        return float(self.native()) if self.shape.volume == 1 else NotImplemented

    def __complex__(self):
        return complex(self.native()) if self.shape.volume == 1 else NotImplemented

    def __index__(self):
        assert self.shape.volume == 1, f"Only scalar tensors can be converted to index but has shape {self.shape}"
        assert self.dtype.kind == int, f"Only int tensors can be converted to index but dtype is {self.dtype}"
        return int(self.native())

    def __repr__(self):
        return format_tensor(self, PrintOptions())

    def _repr_pretty_(self, printer, cycle):
        printer.text(format_tensor(self, PrintOptions(colors=DEFAULT_COLORS)))

    def __format__(self, format_spec: str):
        if BROADCAST_FORMATTER.values is not None:
            return BROADCAST_FORMATTER.register_formatted(self, format_spec)
        specs = format_spec.split(':')
        layout_ = 'auto'
        for possible_layout in ['summary', 'full', 'row', 'numpy']:
            if possible_layout in specs:
                assert layout_ == 'auto', f"Two layout identifiers encountered in '{format_spec}'"
                layout_ = possible_layout
        include_shape = 'shape' in specs or (False if 'no-shape' in specs else None)
        include_dtype = 'dtype' in specs or (False if 'no-dtype' in specs else None)
        color = 'color' in specs or (False if 'no-color' in specs else None)
        threshold = 8
        float_format = None
        for spec in specs:
            if spec.startswith('threshold='):
                threshold = int(spec[len('threshold='):])
            elif '.' in spec:
                float_format = spec
        result = format_tensor(self, PrintOptions(layout_, float_format, threshold, color, include_shape, include_dtype))
        return result

    def __getitem__(self, item) -> 'Tensor':
        if isinstance(item, Tensor):
            if item.dtype.kind == bool:
                from ._ops import boolean_mask
                return boolean_mask(self, item.shape.non_batch or item.shape, item)
            elif item.dtype.kind == int:
                from ._ops import gather
                return gather(self, item)
            else:
                raise AssertionError(f"Index tensor must be of dtype int (gather) or bool (boolean_mask) but got {item}")
        item = slicing_dict(self, item)
        selections = {}
        sliced = self
        for dim, selection in item.items():
            if dim not in self.shape:
                continue
            selection = self.shape.prepare_gather(dim, selection)
            # Either handle slicing directly or add it to the dict
            if isinstance(selection, (tuple, list)):
                result = [sliced[{dim: i}] for i in selection]
                stack_dim = sliced.shape[dim].after_gather({dim: selection})
                sliced = stack(result, stack_dim)
            elif isinstance(selection, Tensor) and selection.dtype.kind == bool:
                from ._ops import boolean_mask
                sliced = boolean_mask(sliced, dim, selection)
            elif isinstance(selection, Tensor) and selection.dtype.kind == int:
                from ._ops import gather
                sliced = gather(sliced, selection, dims=dim)
            else:
                selections[dim] = selection
        return sliced._getitem(selections) if selections else sliced

    def _getitem(self, selection: dict) -> 'Tensor':
        """
        Slice the tensor along specified dimensions.

        Args:
          selection: dim_name: str -> Union[int, slice]
          selection: dict: 

        Returns:

        """
        raise NotImplementedError()

    def __setitem__(self, key, value):
        raise SyntaxError("Tensors are not editable to preserve the autodiff chain. This feature might be added in the future. To update part of a tensor, use math.where() or math.scatter()")

    def __unstack__(self, dims: Tuple[str, ...]) -> Tuple['Tensor', ...]:  # from phiml.math.magic.Sliceable
        if len(dims) == 1:
            return self._unstack(dims[0])
        else:
            return NotImplemented

    def _unstack(self, dim: str):
        """
        Splits this tensor along the specified dimension.
        The returned tensors have the same dimensions as this tensor save the unstacked dimension.

        Raises an error if the dimension is not part of the `Shape` of this `Tensor`.

        See Also:
            `TensorDim.unstack()`

        Args:
            dim: name of dimension to unstack

        Returns:
            tuple of tensors

        """
        raise NotImplementedError()

    @staticmethod
    def __stack__(values: tuple, dim: Shape, **_kwargs) -> 'Tensor':
        if any(isinstance(v, Layout) for v in values):
            layout_ = [v for v in values if isinstance(v, Layout)][0]
            return layout_.__stack__(values, dim, **_kwargs)
        from ._ops import stack_tensors
        return stack_tensors(values, dim)

    def __expand__(self, dims: Shape, **kwargs) -> 'Tensor':
        return expand_tensor(self, dims)

    @staticmethod
    def __concat__(values: tuple, dim: str, **kwargs) -> 'Tensor':
        from ._ops import concat_tensor
        return concat_tensor(values, dim)

    def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'Tensor':
        return self._with_shape_replaced(rename_dims(self.shape, dims, new_dims))

    def __unpack_dim__(self, dim: str, unpacked_dims: Shape, **kwargs) -> 'Tensor':
        if self.shape.is_uniform:
            native = self.native(self.shape.names)
            new_shape = self.shape.without(dim)
            i = self.shape.index(dim)
            for d in unpacked_dims:
                new_shape = new_shape._expand(d, pos=i)
                i += 1
            native_reshaped = choose_backend(native).reshape(native, new_shape.sizes)
            return NativeTensor(native_reshaped, new_shape)
        else:
            tensors = self._tensors
            if dim == self._stack_dim.name:
                for udim in unpacked_dims:
                    tensors = [TensorStack(tensors[o::len(tensors)//udim.size], udim) for o in range(len(tensors)//udim.size)]
                assert len(tensors) == 1
                return tensors[0]
            raise NotImplementedError

    def __pack_dims__(self, dims: Tuple[str, ...], packed_dim: Shape, pos: Union[int, None], **kwargs) -> 'Tensor':
        order = self.shape._order_group(dims)
        if self.shape.is_uniform:
            native = self.native(order)
            if pos is None:
                pos = min(self.shape.indices(dims))
            new_shape = self.shape.without(dims)._expand(packed_dim.with_sizes([self.shape.only(dims).volume]), pos)
            native = choose_backend(native).reshape(native, new_shape.sizes)
            return NativeTensor(native, new_shape)
        else:
            from ._ops import concat_tensor
            value = cached(self)
            assert isinstance(value, TensorStack)
            inner_packed = [pack_dims(t, dims, packed_dim) for t in value._tensors]
            return concat_tensor(inner_packed, packed_dim.name)

    def __cast__(self, dtype: DType):
        return self._op1(lambda native: choose_backend(native).cast(native, dtype=dtype))

    def dimension(self, name: Union[str, Shape]) -> 'TensorDim':
        """
        Returns a reference to a specific dimension of this tensor.
        This is equivalent to the syntax `tensor.<name>`.

        The dimension need not be part of the `Tensor.shape` in which case its size is 1.

        Args:
            name: dimension name

        Returns:
            `TensorDim` corresponding to a dimension of this tensor
        """
        if isinstance(name, str):
            return TensorDim(self, name)
        elif isinstance(name, Shape):
            return TensorDim(self, name.name)
        else:
            raise ValueError(name)

    def pack(self, dims, packed_dim):
        """ See `pack_dims()` """
        from ._ops import pack_dims
        return pack_dims(self, dims, packed_dim)

    def unpack(self, dim, unpacked_dims):
        """ See `unpack_dim()` """
        from ._ops import unpack_dim
        return unpack_dim(self, dim, unpacked_dims)

    def __getattr__(self, name):
        if name.startswith('__'):  # called by hasattr in magic ops
            raise AttributeError
        if name.startswith('_'):
            raise AttributeError(f"'{type(self)}' object has no attribute '{name}'")
        if name == 'is_tensor_like':  # TensorFlow replaces abs() while tracing and checks for this attribute
            raise AttributeError(f"'{type(self)}' object has no attribute '{name}'")
        assert name not in ('shape', '_shape', 'tensor'), name
        return TensorDim(self, name)

    def __add__(self, other):
        return self._op2(other, lambda x, y: x + y, lambda x, y: choose_backend(x, y).add(x, y), 'add', '+')

    def __radd__(self, other):
        return self._op2(other, lambda x, y: y + x, lambda x, y: choose_backend(x, y).add(y, x), 'radd', '+')

    def __sub__(self, other):
            return self._op2(other, lambda x, y: x - y, lambda x, y: choose_backend(x, y).sub(x, y), 'sub', '-')

    def __rsub__(self, other):
        return self._op2(other, lambda x, y: y - x, lambda x, y: choose_backend(x, y).sub(y, x), 'rsub', '-')

    def __and__(self, other):
        return self._op2(other, lambda x, y: x & y, lambda x, y: choose_backend(x, y).and_(x, y), 'and', '&')

    def __rand__(self, other):
        return self._op2(other, lambda x, y: y & x, lambda x, y: choose_backend(x, y).and_(y, x), 'rand', '&')

    def __or__(self, other):
        return self._op2(other, lambda x, y: x | y, lambda x, y: choose_backend(x, y).or_(x, y), 'or', '|')

    def __ror__(self, other):
        return self._op2(other, lambda x, y: y | x, lambda x, y: choose_backend(x, y).or_(y, x), 'ror', '|')

    def __xor__(self, other):
        return self._op2(other, lambda x, y: x ^ y, lambda x, y: choose_backend(x, y).xor(x, y), 'xor', '^')

    def __rxor__(self, other):
        return self._op2(other, lambda x, y: y ^ x, lambda x, y: choose_backend(x, y).xor(y, x), 'rxor', '^')

    def __mul__(self, other):
        return self._op2(other, lambda x, y: x * y, lambda x, y: choose_backend(x, y).mul(x, y), 'mul', '*')

    def __rmul__(self, other):
        return self._op2(other, lambda x, y: y * x, lambda x, y: choose_backend(x, y).mul(y, x), 'rmul', '*')

    def __truediv__(self, other):
        return self._op2(other, lambda x, y: x / y, lambda x, y: choose_backend(x, y).div(x, y), 'truediv', '/')

    def __rtruediv__(self, other):
        return self._op2(other, lambda x, y: y / x, lambda x, y: choose_backend(x, y).div(y, x), 'rtruediv', '/')

    def __divmod__(self, other):
        return self._op2(other, lambda x, y: divmod(x, y), lambda x, y: divmod(x, y), 'divmod', 'divmod')

    def __rdivmod__(self, other):
        return self._op2(other, lambda x, y: divmod(y, x), lambda x, y: divmod(y, x), 'rdivmod', 'divmod')

    def __floordiv__(self, other):
        return self._op2(other, lambda x, y: x // y, lambda x, y: choose_backend(x, y).floordiv(x, y), 'floordiv', '//')

    def __rfloordiv__(self, other):
        return self._op2(other, lambda x, y: y // x, lambda x, y: choose_backend(x, y).floordiv(y, x), 'rfloordiv', '//')

    def __pow__(self, power, modulo=None):
        assert modulo is None
        return self._op2(power, lambda x, y: x ** y, lambda x, y: choose_backend(x, y).pow(x, y), 'pow', '**')

    def __rpow__(self, other):
        return self._op2(other, lambda x, y: y ** x, lambda x, y: choose_backend(x, y).pow(y, x), 'rpow', '**')

    def __mod__(self, other):
        return self._op2(other, lambda x, y: x % y, lambda x, y: choose_backend(x, y).mod(x, y), 'mod', '%')

    def __rmod__(self, other):
        return self._op2(other, lambda x, y: y % x, lambda x, y: choose_backend(x, y).mod(y, x), 'rmod', '%')

    def __eq__(self, other):
        if _EQUALITY_REDUCE[-1] == 'ref':
            return wrap(self is other)
        elif _EQUALITY_REDUCE[-1] == 'shape_and_value':
            if set(self.shape) != set(other.shape):
                return wrap(False)
            from ._ops import close
            return wrap(close(self, other, rel_tolerance=0, abs_tolerance=0))
        if other is None:
            other = float('nan')
        return self._op2(other, lambda x, y: x == y, lambda x, y: choose_backend(x, y).equal(x, y), 'eq', '==')

    def __ne__(self, other):
        if _EQUALITY_REDUCE[-1] == 'ref':
            return wrap(self is not other)
        elif _EQUALITY_REDUCE[-1] == 'shape_and_value':
            if set(self.shape) != set(other.shape):
                return wrap(True)
            from ._ops import close
            return wrap(not close(self, other, rel_tolerance=0, abs_tolerance=0))
        if other is None:
            other = float('nan')
        return self._op2(other, lambda x, y: x != y, lambda x, y: choose_backend(x, y).not_equal(x, y), 'ne', '!=')

    def __lt__(self, other):
        return self._op2(other, lambda x, y: x < y, lambda x, y: choose_backend(x, y).greater_than(y, x), 'lt', '<')

    def __le__(self, other):
        return self._op2(other, lambda x, y: x <= y, lambda x, y: choose_backend(x, y).greater_or_equal(y, x), 'le', '<=')

    def __gt__(self, other):
        return self._op2(other, lambda x, y: x > y, lambda x, y: choose_backend(x, y).greater_than(x, y), 'gt', '>')

    def __ge__(self, other):
        return self._op2(other, lambda x, y: x >= y, lambda x, y: choose_backend(x, y).greater_or_equal(x, y), 'ge', '>=')

    def __lshift__(self, other):
        return self._op2(other, lambda x, y: x << y, lambda x, y: choose_backend(x, y).shift_bits_left(x, y), 'lshift', '<<')

    def __rlshift__(self, other):
        return self._op2(other, lambda y, x: x << y, lambda y, x: choose_backend(x, y).shift_bits_left(x, y), 'lshift', '<<')

    def __rshift__(self, other):
        return self._op2(other, lambda x, y: x >> y, lambda x, y: choose_backend(x, y).shift_bits_right(x, y), 'rshift', '>>')

    def __rrshift__(self, other):
        return self._op2(other, lambda y, x: x >> y, lambda y, x: choose_backend(x, y).shift_bits_right(x, y), 'rshift', '>>')

    def __abs__(self):
        return self._op1(lambda t: choose_backend(t).abs(t))

    def __round__(self, n=None):
        return self._op1(lambda t: choose_backend(t).round(t))

    def __copy__(self):
        return self._op1(lambda t: choose_backend(t).copy(t, only_mutable=True))

    def __deepcopy__(self, memodict={}):
        return self._op1(lambda t: choose_backend(t).copy(t, only_mutable=False))

    def __neg__(self) -> 'Tensor':
        return self._op1(lambda t: -t)

    def __invert__(self) -> 'Tensor':
        return self._op1(lambda t: choose_backend(t).invert(t))

    def __reversed__(self):
        assert self.shape.channel.rank == 1
        return self[::-1]

    def __iter__(self):
        if self.rank == 1:
            return iter(self.native())
        elif self.rank == 0:
            return iter([self.native()])
        else:
            from ._ops import reshaped_native
            native = reshaped_native(self, [self.shape])
            return iter(native)

    def __matmul__(self, other):
        assert isinstance(other, Tensor), f"Matmul '@' requires two Tensor arguments but got {type(other)}"
        match_names = self.shape.dual.as_batch().names
        if not match_names:  # this is not a matrix
            assert self.shape.primal.only(other.shape).is_empty, f"Cannot compute matmul {self.shape} @ {other.shape}. First argument is not a matrix; it has no dual dimensions."
            return self * other
        match_primal = other.shape.only(match_names, reorder=True)
        if not match_primal:
            assert non_batch(other).non_dual.rank == 1, f"Cannot multiply {self.shape} @ {other.shape} because arg2 does not have appropriate non-dual dimensions"
            match_primal = non_batch(other).non_dual
        match_dual = self.shape.dual.only(match_primal.as_dual(), reorder=True)
        left_arg = pack_dims(self, match_dual, dual('_reduce'))
        right_arg = pack_dims(other, match_primal, channel('_reduce'))
        from ._ops import dot
        return dot(left_arg, '~_reduce', right_arg, '_reduce')

    # def __rmatmul__(self, other):

    def _tensor(self, other) -> 'Tensor':
        if isinstance(other, Tensor):
            return other
        elif isinstance(other, (tuple, list)) and any(isinstance(v, Tensor) for v in other):
            if 'vector' in self.shape:
                outer_dim = self.shape['vector']
            elif self.shape.channel_rank == 1:
                outer_dim = self.shape.channel
            else:
                raise ValueError(f"Cannot combine tensor of shape {self.shape} with tuple {tuple([type(v).__name__ for v in other])}")
            remaining_shape = self.shape.without(outer_dim)
            other_items = [v if isinstance(v, Tensor) else compatible_tensor(v, compat_shape=remaining_shape, compat_natives=self._natives(), convert=False) for v in other]
            other_stacked = stack(other_items, outer_dim, expand_values=True)
            return other_stacked
        else:
            return compatible_tensor(other, compat_shape=self.shape, compat_natives=self._natives(), convert=False)

    def _op1(self, native_function) -> 'Tensor':
        """
        Transform the values of this tensor given a function that can be applied to any native tensor.

        Args:
          native_function:

        Returns:

        """
        raise NotImplementedError(self.__class__)

    def _op2(self, other, operator: Callable, native_function: Callable, op_name: str = 'unknown', op_symbol: str = '?') -> 'Tensor':
        """
        Apply a broadcast operation on two tensors.

        Args:
            other: second argument
            operator: function (Tensor, Tensor) -> Tensor, used to propagate the operation to children tensors to have Python choose the callee
            native_function: function (native tensor, native tensor) -> native tensor
            op_name: Name of the python function without leading and trailing `__`.
                Examples: 'add', 'radd', 'sub', 'mul', 'and', 'eq', 'ge'.
            op_symbol: Operation symbol, such as '+', '-', '&', '%', '>='

        Returns:
            `Tensor`
        """
        raise NotImplementedError(self.__class__)

    def _natives(self) -> tuple:
        raise NotImplementedError(self.__class__)

    def _spec_dict(self) -> dict:
        raise NotImplementedError(self.__class__)

    @classmethod
    def _from_spec_and_natives(cls, spec: dict, natives: list):
        raise NotImplementedError(cls)

    def _simplify(self):
        """ Does not cache this value but if it is already cached, returns the cached version. """
        return self

Subclasses

  • phiml.math._sparse.CompressedSparseMatrix
  • phiml.math._sparse.SparseCoordinateTensor
  • phiml.math._tensors.Layout
  • phiml.math._tensors.NativeTensor
  • phiml.math._tensors.TensorStack
  • phiml.math._trace.GatherLinTracer
  • phiml.math._trace.ShiftLinTracer
  • phiml.math._trace.SparseLinTracer

Instance variables

var all

Whether all values of this Tensor are True as a native bool.

Expand source code
@property
def all(self):
    """ Whether all values of this `Tensor` are `True` as a native bool. """
    from ._ops import all_, cast
    if self.rank == 0:
        return cast(self, DType(bool)).native()
    else:
        return all_(self, dim=self.shape).native()
var any

Whether this Tensor contains a True value as a native bool.

Expand source code
@property
def any(self):
    """ Whether this `Tensor` contains a `True` value as a native bool. """
    from ._ops import any_, cast
    if self.rank == 0:
        return cast(self, DType(bool)).native()
    else:
        return any_(self, dim=self.shape).native()
var available : bool

A tensor is available if it stores concrete values and these can currently be read.

Tracers used inside jit compilation are typically not available.

See Also: jit_compile().

Expand source code
@property
def available(self) -> bool:
    """
    A tensor is available if it stores concrete values and these can currently be read.

    Tracers used inside jit compilation are typically not available.

    See Also:
        `phiml.math.jit_compile()`.
    """
    if self._is_tracer:
        return False
    natives = self._natives()
    natives_available = [choose_backend(native).is_available(native) for native in natives]
    return all(natives_available)
var default_backend : phiml.backend._backend.Backend
Expand source code
@property
def default_backend(self) -> Backend:
    from ._ops import choose_backend_t
    return choose_backend_t(self)
var device : Optional[phiml.backend._backend.ComputeDevice]

Returns the ComputeDevice that this tensor is allocated on. The device belongs to this tensor's default_backend.

See Also: Tensor.default_backend.

Expand source code
@property
def device(self) -> Union[ComputeDevice, None]:
    """
    Returns the `ComputeDevice` that this tensor is allocated on.
    The device belongs to this tensor's `default_backend`.

    See Also:
        `Tensor.default_backend`.
    """
    natives = self._natives()
    if not natives:
        return None
    return self.default_backend.get_device(natives[0])
var dtype : phiml.backend._dtype.DType

Data type of the elements of this Tensor.

Expand source code
@property
def dtype(self) -> DType:
    """ Data type of the elements of this `Tensor`. """
    raise NotImplementedError(self.__class__)
var finite_max

Maximum finite value of this Tensor as a native scalar.

Expand source code
@property
def finite_max(self):
    """ Maximum finite value of this `Tensor` as a native scalar. """
    from ._ops import finite_max
    return finite_max(self, dim=self.shape).native()
var finite_mean

Mean value of all finite values in this Tensor as a native scalar.

Expand source code
@property
def finite_mean(self):
    """ Mean value of all finite values in this `Tensor` as a native scalar. """
    from ._ops import finite_mean
    return finite_mean(self, dim=self.shape).native()
var finite_min

Minimum finite value of this Tensor as a native scalar.

Expand source code
@property
def finite_min(self):
    """ Minimum finite value of this `Tensor` as a native scalar. """
    from ._ops import finite_min
    return finite_min(self, dim=self.shape).native()
var finite_sum

Sum of all finite values of this Tensor as a native scalar.

Expand source code
@property
def finite_sum(self):
    """ Sum of all finite values of this `Tensor` as a native scalar. """
    from ._ops import finite_sum
    return finite_sum(self, dim=self.shape).native()
var imag : phiml.math._tensors.Tensor

Returns the imaginary part of this tensor. If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.

See Also: imag()

Expand source code
@property
def imag(self) -> 'Tensor':
    """
    Returns the imaginary part of this tensor.
    If this tensor does not store complex numbers, returns a zero tensor with the same shape and dtype as this tensor.

    See Also:
        `phiml.math.imag()`
    """
    from ._ops import imag
    return imag(self)
var max

Maximum value of this Tensor as a native scalar.

Expand source code
@property
def max(self):
    """ Maximum value of this `Tensor` as a native scalar. """
    from ._ops import max_
    return max_(self, dim=self.shape).native()
var mean

Mean value of this Tensor as a native scalar.

Expand source code
@property
def mean(self):
    """ Mean value of this `Tensor` as a native scalar. """
    from ._ops import mean
    return mean(self, dim=self.shape).native()
var min

Minimum value of this Tensor as a native scalar.

Expand source code
@property
def min(self):
    """ Minimum value of this `Tensor` as a native scalar. """
    from ._ops import min_
    return min_(self, dim=self.shape).native()
var rank : int

Number of explicit dimensions of this Tensor. Equal to tensor.shape.rank. This replaces numpy.ndarray.ndim / torch.Tensor.dim / tf.rank() / jax.numpy.ndim().

Expand source code
@property
def rank(self) -> int:
    """
    Number of explicit dimensions of this `Tensor`. Equal to `tensor.shape.rank`.
    This replaces [`numpy.ndarray.ndim`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html) /
    [`torch.Tensor.dim`](https://pytorch.org/docs/master/generated/torch.Tensor.dim.html) /
    [`tf.rank()`](https://www.tensorflow.org/api_docs/python/tf/rank) /
    [`jax.numpy.ndim()`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndim.html).
    """
    return self.shape.rank
var real : phiml.math._tensors.Tensor

Returns the real part of this tensor.

See Also: real()

Expand source code
@property
def real(self) -> 'Tensor':
    """
    Returns the real part of this tensor.

    See Also:
        `phiml.math.real()`
    """
    from ._ops import real
    return real(self)
var shape : phiml.math._shape.Shape

The Shape lists the dimensions with their sizes, names and types.

Expand source code
@property
def shape(self) -> Shape:
    """ The `Shape` lists the dimensions with their sizes, names and types. """
    raise NotImplementedError(self.__class__)
var std

Standard deviation of this Tensor as a native scalar.

Expand source code
@property
def std(self):
    """ Standard deviation of this `Tensor` as a native scalar. """
    from ._ops import std
    return std(self, dim=self.shape).native()
var sum

Sum of all values of this Tensor as a native scalar.

Expand source code
@property
def sum(self):
    """ Sum of all values of this `Tensor` as a native scalar. """
    from ._ops import sum_
    return sum_(self, dim=self.shape).native()

Methods

def dimension(self, name: Union[str, phiml.math._shape.Shape]) ‑> phiml.math._tensors.TensorDim

Returns a reference to a specific dimension of this tensor. This is equivalent to the syntax tensor.<name>.

The dimension need not be part of the Tensor.shape in which case its size is 1.

Args

name
dimension name

Returns

TensorDim corresponding to a dimension of this tensor

Expand source code
def dimension(self, name: Union[str, Shape]) -> 'TensorDim':
    """
    Returns a reference to a specific dimension of this tensor.
    This is equivalent to the syntax `tensor.<name>`.

    The dimension need not be part of the `Tensor.shape` in which case its size is 1.

    Args:
        name: dimension name

    Returns:
        `TensorDim` corresponding to a dimension of this tensor
    """
    if isinstance(name, str):
        return TensorDim(self, name)
    elif isinstance(name, Shape):
        return TensorDim(self, name.name)
    else:
        raise ValueError(name)
def native(self, order: Union[phiml.math._shape.Shape, tuple, list, str] = None, singleton_for_const=False)

Returns a native tensor object with the dimensions ordered according to order.

Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in order, a ValueError is raised.

Args

order
(Optional) Order of dimension names as comma-separated string, list or Shape.
singleton_for_const
If True, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions.

Returns

Native tensor representation, such as PyTorch tensor or NumPy array.

Raises

ValueError if the tensor cannot be transposed to match target_shape

Expand source code
def native(self, order: Union[str, tuple, list, Shape] = None, singleton_for_const=False):
    """
    Returns a native tensor object with the dimensions ordered according to `order`.
    
    Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names.
    If a dimension of the tensor is not listed in `order`, a `ValueError` is raised.

    Args:
        order: (Optional) Order of dimension names as comma-separated string, list or `Shape`.
        singleton_for_const: If `True`, dimensions along which values are guaranteed to be constant will not be expanded to their true size but returned as singleton dimensions.

    Returns:
        Native tensor representation, such as PyTorch tensor or NumPy array.

    Raises:
        ValueError if the tensor cannot be transposed to match target_shape
    """
    raise NotImplementedError(self.__class__)
def numpy(self, order: Union[phiml.math._shape.Shape, tuple, list, str] = None) ‑> numpy.ndarray

Converts this tensor to a numpy.ndarray with dimensions ordered according to order.

Note: Using this function breaks the autograd chain. The returned tensor is not differentiable. To get a differentiable tensor, use Tensor.native() instead.

Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names. If a dimension of the tensor is not listed in order, a ValueError is raised.

If this Tensor is backed by a NumPy array, a reference to this array may be returned.

See Also: numpy()

Args

order
(Optional) Order of dimension names as comma-separated string, list or Shape.

Returns

NumPy representation

Raises

ValueError if the tensor cannot be transposed to match target_shape

Expand source code
def numpy(self, order: Union[str, tuple, list, Shape] = None) -> np.ndarray:
    """
    Converts this tensor to a `numpy.ndarray` with dimensions ordered according to `order`.
    
    *Note*: Using this function breaks the autograd chain. The returned tensor is not differentiable.
    To get a differentiable tensor, use `Tensor.native()` instead.
    
    Transposes the underlying tensor to match the name order and adds singleton dimensions for new dimension names.
    If a dimension of the tensor is not listed in `order`, a `ValueError` is raised.

    If this `Tensor` is backed by a NumPy array, a reference to this array may be returned.

    See Also:
        `phiml.math.numpy()`

    Args:
        order: (Optional) Order of dimension names as comma-separated string, list or `Shape`.

    Returns:
        NumPy representation

    Raises:
        ValueError if the tensor cannot be transposed to match target_shape
    """
    native = self.native(order=order)
    return choose_backend(native).numpy(native)
def pack(self, dims, packed_dim)
Expand source code
def pack(self, dims, packed_dim):
    """ See `pack_dims()` """
    from ._ops import pack_dims
    return pack_dims(self, dims, packed_dim)
def unpack(self, dim, unpacked_dims)
Expand source code
def unpack(self, dim, unpacked_dims):
    """ See `unpack_dim()` """
    from ._ops import unpack_dim
    return unpack_dim(self, dim, unpacked_dims)