Module phi.field

The fields module provides a number of data structures and functions to represent continuous, spatially varying data.

All fields are subclasses of Field which provides abstract functions for sampling field values at physical locations.

The most important field types are:

  • CenteredGrid embeds a tensor in the physical space. Uses linear interpolation between grid points.
  • StaggeredGrid samples the vector components at face centers instead of at cell centers.
  • Noise is a function that produces a procedurally generated noise field

Use grid() to create a Grid from data or by sampling another Field or Geometry. Alternatively, the phi.physics.Domain class provides convenience methods for grid creation.

All fields can be sampled at physical locations or volumes using sample() or reduce_sample().

See the phi.field module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html

Expand source code
"""
The fields module provides a number of data structures and functions to represent continuous, spatially varying data.

All fields are subclasses of `Field` which provides abstract functions for sampling field values at physical locations.

The most important field types are:

* `CenteredGrid` embeds a tensor in the physical space. Uses linear interpolation between grid points.
* `StaggeredGrid` samples the vector components at face centers instead of at cell centers.
* `Noise` is a function that produces a procedurally generated noise field

Use `grid()` to create a `Grid` from data or by sampling another `Field` or `phi.geom.Geometry`.
Alternatively, the `phi.physics.Domain` class provides convenience methods for grid creation.

All fields can be sampled at physical locations or volumes using `sample()` or `reduce_sample()`.

See the `phi.field` module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html
"""

from ._field import Field, SampledField, unstack, sample, reduce_sample
from ._constant import ConstantField
from ._mask import HardGeometryMask, SoftGeometryMask as GeometryMask, SoftGeometryMask
from ._grid import Grid, CenteredGrid, StaggeredGrid
from ._point_cloud import PointCloud
from ._noise import Noise
from ._angular_velocity import AngularVelocity
from phi.math import (
    abs, sign, round, ceil, floor, sqrt, exp, isfinite, real, imag, sin, cos, cast, to_float, to_int32, to_int64, convert,
    stop_gradient,
    jit_compile, jit_compile_linear, functional_gradient,
    solve_linear, solve_nonlinear, minimize,
    l2_loss, l1_loss, frequency_loss,
)
from ._field_math import (
    assert_close,
    bake_extrapolation,
    laplace, spatial_gradient, divergence, stagger, curl,  # spatial operators
    fourier_poisson, fourier_laplace,
    mean, pad, shift, normalize, center_of_mass,
    concat, stack,
    where,
    vec_squared, vec_abs,
    downsample2x, upsample2x,
    extrapolate_valid,
    native_call,
    integrate,
)
from ._field_io import write, read
from ._scene import Scene

__all__ = [key for key in globals().keys() if not key.startswith('_')]

__pdoc__ = {
    'Grid.__init__': False,
    'Scene.__init__': False,
}

Functions

def abs(x) ‑> phi.math._tensors.Tensor

Computes ||x||1. Complex x result in matching precision float values.

Note: The gradient of this operation is undefined for x=0. TensorFlow and PyTorch return 0 while Jax returns 1.

Args

x
Tensor or TensorLike

Returns

Absolute value of x of same type as x.

Expand source code
def abs_(x) -> Tensor:
    """
    Computes *||x||<sub>1</sub>*.
    Complex `x` result in matching precision float values.

    *Note*: The gradient of this operation is undefined for *x=0*.
    TensorFlow and PyTorch return 0 while Jax returns 1.

    Args:
        x: `Tensor` or `TensorLike`

    Returns:
        Absolute value of `x` of same type as `x`.
    """
    return _backend_op1(x, Backend.abs)
def assert_close(*fields: phi.field._field.SampledField, rel_tolerance: float = 1e-05, abs_tolerance: float = 0, msg: str = '', verbose: bool = True)

Raises an AssertionError if the values of the given fields are not close. See assert_close().

Expand source code
def assert_close(*fields: SampledField or math.Tensor or Number,
                 rel_tolerance: float = 1e-5,
                 abs_tolerance: float = 0,
                 msg: str = "",
                 verbose: bool = True):
    """ Raises an AssertionError if the `values` of the given fields are not close. See `phi.math.assert_close()`. """
    f0 = next(filter(lambda t: isinstance(t, SampledField), fields))
    values = [(f @ f0).values if isinstance(f, SampledField) else math.wrap(f) for f in fields]
    math.assert_close(*values, rel_tolerance=rel_tolerance, abs_tolerance=abs_tolerance, msg=msg, verbose=verbose)
def bake_extrapolation(grid: ~GridType) ‑> ~GridType

Pads grid with its current extrapolation. For StaggeredGrids, the resulting grid will have a consistent shape, independent of the original extrapolation.

Args

grid
CenteredGrid or StaggeredGrid.

Returns

Padded grid with extrapolation NONE.

Expand source code
def bake_extrapolation(grid: GridType) -> GridType:
    """
    Pads `grid` with its current extrapolation.
    For `StaggeredGrid`s, the resulting grid will have a consistent shape, independent of the original extrapolation.

    Args:
        grid: `CenteredGrid` or `StaggeredGrid`.

    Returns:
        Padded grid with extrapolation `phi.math.extrapolation.NONE`.
    """
    if grid.extrapolation == math.extrapolation.NONE:
        return grid
    if isinstance(grid, StaggeredGrid):
        values = grid.values.unstack('vector')
        padded = []
        for dim, value in zip(grid.shape.spatial.names, values):
            lower, upper = grid.extrapolation.valid_outer_faces(dim)
            padded.append(math.pad(value, {dim: (0 if lower else 1, 0 if upper else 1)}, grid.extrapolation))
        return StaggeredGrid(math.stack(padded, channel('vector')), bounds=grid.bounds, extrapolation=math.extrapolation.NONE)
    elif isinstance(grid, CenteredGrid):
        return pad(grid, 1).with_extrapolation(math.extrapolation.NONE)
    else:
        raise ValueError(f"Not a valid grid: {grid}")
def cast(x: phi.math._tensors.Tensor, dtype: phi.math.backend._dtype.DType) ‑> phi.math._tensors.Tensor

Casts x to a different data type.

Implementations:

See Also: to_float(), to_int32(), to_int64(), to_complex.

Args

x
Tensor
dtype
New data type as DType, e.g. DType(int, 16).

Returns

Tensor with data type dtype

Expand source code
def cast(x: Tensor, dtype: DType) -> Tensor:
    """
    Casts `x` to a different data type.

    Implementations:

    * NumPy: [`x.astype()`](numpy.ndarray.astype)
    * PyTorch: [`x.to()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.to)
    * TensorFlow: [`tf.cast`](https://www.tensorflow.org/api_docs/python/tf/cast)
    * Jax: [`jax.numpy.array`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.array.html)

    See Also:
        `to_float`, `to_int32`, `to_int64`, `to_complex`.

    Args:
        x: `Tensor`
        dtype: New data type as `phi.math.DType`, e.g. `DType(int, 16)`.

    Returns:
        `Tensor` with data type `dtype`
    """
    return x._op1(lambda native: choose_backend(native).cast(native, dtype=dtype))
def ceil(x) ‑> phi.math._tensors.Tensor

Computes ⌈x⌉ of the Tensor or TensorLike x.

Expand source code
def ceil(x) -> Tensor:
    """ Computes *⌈x⌉* of the `Tensor` or `TensorLike` `x`. """
    return _backend_op1(x, Backend.ceil)
def center_of_mass(density: phi.field._field.SampledField)

Compute the center of mass of a density field.

Args

density
Scalar SampledField

Returns

Tensor holding only batch dimensions.

Expand source code
def center_of_mass(density: SampledField):
    """
    Compute the center of mass of a density field.

    Args:
        density: Scalar `SampledField`

    Returns:
        `Tensor` holding only batch dimensions.
    """
    assert 'vector' not in density.shape
    return mean(density.points * density) / mean(density)
def concat(fields: List[~SampledFieldType], dim: phi.math._shape.Shape) ‑> ~SampledFieldType

Concatenates the given SampledFields along dim.

See Also: stack().

Args

fields
List of matching SampledField instances.
dim
Concatenation dimension as Shape. Size is ignored.

Returns

SampledField matching concatenated fields.

Expand source code
def concat(fields: List[SampledFieldType], dim: Shape) -> SampledFieldType:
    """
    Concatenates the given `SampledField`s along `dim`.

    See Also:
        `stack()`.

    Args:
        fields: List of matching `SampledField` instances.
        dim: Concatenation dimension as `Shape`. Size is ignored.

    Returns:
        `SampledField` matching concatenated fields.
    """
    assert all(isinstance(f, SampledField) for f in fields)
    assert all(isinstance(f, type(fields[0])) for f in fields)
    if any(f.extrapolation != fields[0].extrapolation for f in fields):
        raise NotImplementedError("Concatenating extrapolations not supported")
    if isinstance(fields[0], Grid):
        values = math.concat([f.values for f in fields], dim)
        return fields[0].with_values(values)
    elif isinstance(fields[0], PointCloud):
        elements = geom.concat([f.elements for f in fields], dim, sizes=[f.shape.get_size(dim) for f in fields])
        values = math.concat([math.expand(f.values, f.shape.only(dim)) for f in fields], dim)
        colors = math.concat([math.expand(f.color, f.shape.only(dim)) for f in fields], dim)
        return PointCloud(elements=elements, values=values, color=colors, extrapolation=fields[0].extrapolation, add_overlapping=fields[0]._add_overlapping, bounds=fields[0].bounds)
    raise NotImplementedError(type(fields[0]))
def convert(x, backend: phi.math.backend._backend.Backend = None, use_dlpack=True)

Convert the native representation of a Tensor or TensorLike to the native format of backend.

Warning: This operation breaks the automatic differentiation chain.

See Also: convert().

Args

x
Tensor to convert. If x is a TensorLike, its variable attributes are converted.
backend
Target backend. If None, uses the current default backend, see default_backend().

Returns

Tensor with native representation belonging to backend.

Expand source code
def convert(x, backend: Backend = None, use_dlpack=True):
    """
    Convert the native representation of a `Tensor` or `TensorLike` to the native format of `backend`.

    *Warning*: This operation breaks the automatic differentiation chain.

    See Also:
        `phi.math.backend.convert()`.

    Args:
        x: `Tensor` to convert. If `x` is a `TensorLike`, its variable attributes are converted.
        backend: Target backend. If `None`, uses the current default backend, see `phi.math.backend.default_backend()`.

    Returns:
        `Tensor` with native representation belonging to `backend`.
    """
    if isinstance(x, Tensor):
        return x._op1(lambda native: b_convert(native, backend, use_dlpack=use_dlpack))
    elif isinstance(x, TensorLike):
        return copy_with(x, **{a: convert(getattr(x, a), backend, use_dlpack=use_dlpack) for a in variable_attributes(x)})
    else:
        return choose_backend(x).as_tensor(x)
def cos(x) ‑> phi.math._tensors.Tensor

Computes cos(x) of the Tensor or TensorLike x.

Expand source code
def cos(x) -> Tensor:
    """ Computes *cos(x)* of the `Tensor` or `TensorLike` `x`. """
    return _backend_op1(x, Backend.cos)
def curl(field: phi.field._grid.Grid, type: type = phi.field._grid.CenteredGrid)

Computes the finite-difference curl of the give 2D StaggeredGrid.

Expand source code
def curl(field: Grid, type: type = CenteredGrid):
    """ Computes the finite-difference curl of the give 2D `StaggeredGrid`. """
    assert field.spatial_rank in (2, 3), "curl is only defined in 2 and 3 spatial dimensions."
    if field.spatial_rank == 2 and type == StaggeredGrid:
        assert isinstance(field, CenteredGrid) and 'vector' not in field.shape, f"2D curl requires scalar field but got {field}"
        grad = math.spatial_gradient(field.values, dx=field.dx, difference='forward', padding=None, stack_dim=channel('vector'))
        result = grad.vector.flip() * (1, -1)  # (d/dy, -d/dx)
        bounds = Box(field.bounds.lower + 0.5 * field.dx, field.bounds.upper - 0.5 * field.dx)  # lose 1 cell per dimension
        return StaggeredGrid(result, bounds=bounds, extrapolation=field.extrapolation.spatial_gradient())
    raise NotImplementedError()
def divergence(field: phi.field._grid.Grid) ‑> phi.field._grid.CenteredGrid

Computes the divergence of a grid using finite differences.

This function can operate in two modes depending on the type of field:

  • CenteredGrid approximates the divergence at cell centers using central differences
  • StaggeredGrid exactly computes the divergence at cell centers

Args

field
vector field as CenteredGrid or StaggeredGrid

Returns

Divergence field as CenteredGrid

Expand source code
def divergence(field: Grid) -> CenteredGrid:
    """
    Computes the divergence of a grid using finite differences.

    This function can operate in two modes depending on the type of `field`:

    * `CenteredGrid` approximates the divergence at cell centers using central differences
    * `StaggeredGrid` exactly computes the divergence at cell centers

    Args:
        field: vector field as `CenteredGrid` or `StaggeredGrid`

    Returns:
        Divergence field as `CenteredGrid`
    """
    if isinstance(field, StaggeredGrid):
        field = bake_extrapolation(field)
        components = []
        for i, dim in enumerate(field.shape.spatial.names):
            div_dim = math.spatial_gradient(field.values.vector[i], dx=field.dx[i], difference='forward', padding=None, dims=[dim]).gradient[0]
            components.append(div_dim)
        data = math.sum(components, dim='0')
        return CenteredGrid(data, bounds=field.bounds, extrapolation=field.extrapolation.spatial_gradient())
    elif isinstance(field, CenteredGrid):
        left, right = shift(field, (-1, 1), stack_dim=batch('div_'))
        grad = (right - left) / (field.dx * 2)
        components = [grad.vector[i].div_[i] for i in range(grad.div_.size)]
        result = sum(components)
        return result
    else:
        raise NotImplementedError(f"{type(field)} not supported. Only StaggeredGrid allowed.")
def downsample2x(grid: phi.field._grid.Grid) ‑> ~GridType

Reduces the number of sample points by a factor of 2 in each spatial dimension. The new values are determined via linear interpolation.

See Also: upsample2x().

Args

grid
CenteredGrid or StaggeredGrid.

Returns

Grid of same type as grid.

Expand source code
def downsample2x(grid: Grid) -> GridType:
    """
    Reduces the number of sample points by a factor of 2 in each spatial dimension.
    The new values are determined via linear interpolation.

    See Also:
        `upsample2x()`.

    Args:
        grid: `CenteredGrid` or `StaggeredGrid`.

    Returns:
        `Grid` of same type as `grid`.
    """
    if isinstance(grid, CenteredGrid):
        values = math.downsample2x(grid.values, grid.extrapolation)
        return CenteredGrid(values, bounds=grid.bounds, extrapolation=grid.extrapolation)
    elif isinstance(grid, StaggeredGrid):
        values = []
        for dim, centered_grid in zip(grid.shape.spatial.names, unstack(grid, 'vector')):
            odd_discarded = centered_grid.values[{dim: slice(None, None, 2)}]
            others_interpolated = math.downsample2x(odd_discarded, grid.extrapolation, dims=grid.shape.spatial.without(dim))
            values.append(others_interpolated)
        return StaggeredGrid(math.stack(values, channel('vector')), bounds=grid.bounds, extrapolation=grid.extrapolation)
    else:
        raise ValueError(type(grid))
def exp(x) ‑> phi.math._tensors.Tensor

Computes exp(x) of the Tensor or TensorLike x.

Expand source code
def exp(x) -> Tensor:
    """ Computes *exp(x)* of the `Tensor` or `TensorLike` `x`. """
    return _backend_op1(x, Backend.exp)
def extrapolate_valid(grid: ~GridType, valid: ~GridType, distance_cells=1) ‑> tuple

Extrapolates values of grid which are marked by nonzero values in valid using `phi.math.extrapolate_valid_values(). If values is a StaggeredGrid, its components get extrapolated independently.

Args

grid
Grid holding the values for extrapolation
valid
Grid (same type as values) marking the positions for extrapolation with nonzero values
distance_cells
Number of extrapolation steps

Returns

grid
Grid with extrapolated values.
valid
binary Grid marking all valid values after extrapolation.
Expand source code
def extrapolate_valid(grid: GridType, valid: GridType, distance_cells=1) -> tuple:
    """
    Extrapolates values of `grid` which are marked by nonzero values in `valid` using `phi.math.extrapolate_valid_values().
    If `values` is a StaggeredGrid, its components get extrapolated independently.

    Args:
        grid: Grid holding the values for extrapolation
        valid: Grid (same type as `values`) marking the positions for extrapolation with nonzero values
        distance_cells: Number of extrapolation steps

    Returns:
        grid: Grid with extrapolated values.
        valid: binary Grid marking all valid values after extrapolation.
    """
    assert isinstance(valid, type(grid)), 'Type of valid Grid must match type of grid.'
    if isinstance(grid, CenteredGrid):
        new_values, new_valid = extrapolate_valid_values(grid.values, valid.values, distance_cells)
        return grid.with_values(new_values), valid.with_values(new_valid)
    elif isinstance(grid, StaggeredGrid):
        new_values = []
        new_valid = []
        for cgrid, cvalid in zip(unstack(grid, 'vector'), unstack(valid, 'vector')):
            new_tensor, new_mask = extrapolate_valid(cgrid, valid=cvalid, distance_cells=distance_cells)
            new_values.append(new_tensor.values)
            new_valid.append(new_mask.values)
        return grid.with_values(math.stack(new_values, channel('vector'))), valid.with_values(math.stack(new_valid, channel('vector')))
    else:
        raise NotImplementedError()
def floor(x) ‑> phi.math._tensors.Tensor

Computes ⌊x⌋ of the Tensor or TensorLike x.

Expand source code
def floor(x) -> Tensor:
    """ Computes *⌊x⌋* of the `Tensor` or `TensorLike` `x`. """
    return _backend_op1(x, Backend.floor)
def fourier_laplace(grid: ~GridType, times=1) ‑> ~GridType
Expand source code
def fourier_laplace(grid: GridType, times=1) -> GridType:
    """ See `phi.math.fourier_laplace()` """
    assert grid.extrapolation.spatial_gradient() == math.extrapolation.PERIODIC
    values = math.fourier_laplace(grid.values, dx=grid.dx, times=times)
    return type(grid)(values=values, bounds=grid.bounds, extrapolation=grid.extrapolation)
def fourier_poisson(grid: ~GridType, times=1) ‑> ~GridType
Expand source code
def fourier_poisson(grid: GridType, times=1) -> GridType:
    """ See `phi.math.fourier_poisson()` """
    assert grid.extrapolation.spatial_gradient() == math.extrapolation.PERIODIC
    values = math.fourier_poisson(grid.values, dx=grid.dx, times=times)
    return type(grid)(values=values, bounds=grid.bounds, extrapolation=grid.extrapolation)
def frequency_loss(x, frequency_falloff: float = 100, threshold=1e-05, ignore_mean=False) ‑> phi.math._tensors.Tensor

Penalizes the squared values in frequency (Fourier) space. Lower frequencies are weighted more strongly then higher frequencies, depending on frequency_falloff.

Args

x
Tensor or TensorLike Values to penalize, typically actual - target.
frequency_falloff
Large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally. Note: The total loss is not normalized. Varying the value will result in losses of different magnitudes.
threshold
Frequency amplitudes below this value are ignored. Setting this to zero may cause infinities or NaN values during backpropagation.
ignore_mean
If True, does not penalize the mean value (frequency=0 component).

Returns

Scalar loss value

Expand source code
def frequency_loss(x,
                   frequency_falloff: float = 100,
                   threshold=1e-5,
                   ignore_mean=False) -> Tensor:
    """
    Penalizes the squared `values` in frequency (Fourier) space.
    Lower frequencies are weighted more strongly then higher frequencies, depending on `frequency_falloff`.

    Args:
        x: `Tensor` or `TensorLike` Values to penalize, typically `actual - target`.
        frequency_falloff: Large values put more emphasis on lower frequencies, 1.0 weights all frequencies equally.
            *Note*: The total loss is not normalized. Varying the value will result in losses of different magnitudes.
        threshold: Frequency amplitudes below this value are ignored.
            Setting this to zero may cause infinities or NaN values during backpropagation.
        ignore_mean: If `True`, does not penalize the mean value (frequency=0 component).

    Returns:
      Scalar loss value
    """
    if isinstance(x, Tensor):
        if ignore_mean:
            x -= math.mean(x, x.shape.non_batch)
        k_squared = vec_squared(math.fftfreq(x.shape.spatial))
        weights = math.exp(-0.5 * k_squared * frequency_falloff ** 2)
        diff_fft = abs_square(math.fft(x) * weights)
        diff_fft = math.sqrt(math.maximum(diff_fft, threshold))
        return l2_loss(diff_fft)
    elif isinstance(x, TensorLike):
        return sum([frequency_loss(getattr(x, a), frequency_falloff, threshold, ignore_mean) for a in variable_values(x)])
    else:
        raise ValueError(x)
def functional_gradient(f: Callable, wrt: tuple = (0,), get_output=True) ‑> Callable

Creates a function which computes the gradient of f.

Example:

def loss_function(x, y):
    prediction = f(x)
    loss = math.l2_loss(prediction - y)
    return loss, prediction

dx, = functional_gradient(loss_function, get_output=False)(x, y)

(loss, prediction), (dx, dy) = functional_gradient(loss_function,
                                    wrt=(0, 1), get_output=True)(x, y)

Functional gradients are implemented for the following backends:

When the gradient function is invoked, f is called with tensors that track the gradient. For PyTorch, arg.requires_grad = True for all positional arguments of f.

Args

f
Function to be differentiated. f must return a floating point Tensor with rank zero. It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if return_values=True. All arguments for which the gradient is computed must be of dtype float or complex.
get_output
Whether the gradient function should also return the return values of f.
wrt
Arguments of f with respect to which the gradient should be computed. Example: wrt_indices=[0] computes the gradient with respect to the first argument of f.

Returns

Function with the same arguments as f that returns the value of f, auxiliary data and gradient of f if get_output=True, else just the gradient of f.

Expand source code
def functional_gradient(f: Callable, wrt: tuple or list = (0,), get_output=True) -> Callable:
    """
    Creates a function which computes the gradient of `f`.

    Example:
    ```python
    def loss_function(x, y):
        prediction = f(x)
        loss = math.l2_loss(prediction - y)
        return loss, prediction

    dx, = functional_gradient(loss_function, get_output=False)(x, y)

    (loss, prediction), (dx, dy) = functional_gradient(loss_function,
                                        wrt=(0, 1), get_output=True)(x, y)
    ```

    Functional gradients are implemented for the following backends:

    * PyTorch: [`torch.autograd.grad`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.grad) / [`torch.autograd.backward`](https://pytorch.org/docs/stable/autograd.html#torch.autograd.backward)
    * TensorFlow: [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape)
    * Jax: [`jax.grad`](https://jax.readthedocs.io/en/latest/jax.html#jax.grad)

    When the gradient function is invoked, `f` is called with tensors that track the gradient.
    For PyTorch, `arg.requires_grad = True` for all positional arguments of `f`.

    Args:
        f: Function to be differentiated.
            `f` must return a floating point `Tensor` with rank zero.
            It can return additional tensors which are treated as auxiliary data and will be returned by the gradient function if `return_values=True`.
            All arguments for which the gradient is computed must be of dtype float or complex.
        get_output: Whether the gradient function should also return the return values of `f`.
        wrt: Arguments of `f` with respect to which the gradient should be computed.
            Example: `wrt_indices=[0]` computes the gradient with respect to the first argument of `f`.

    Returns:
        Function with the same arguments as `f` that returns the value of `f`, auxiliary data and gradient of `f` if `get_output=True`, else just the gradient of `f`.
    """
    return GradientFunction(f, wrt, get_output)
def imag(x) ‑> phi.math._tensors.Tensor

See Also: real(), conjugate().

Args

x
Tensor or TensorLike or native tensor.

Returns

Imaginary component of x if x is complex, zeros otherwise.

Expand source code
def imag(x) -> Tensor:
    """
    See Also:
        `real()`, `conjugate()`.

    Args:
        x: `Tensor` or `TensorLike` or native tensor.

    Returns:
        Imaginary component of `x` if `x` is complex, zeros otherwise.
    """
    return _backend_op1(x, Backend.imag)
def integrate(field: phi.field._field.Field, region: phi.geom._geom.Geometry) ‑> phi.math._tensors.Tensor

Computes R f(x) dxd , where f denotes the Field, R the region and d the number of spatial dimensions (d=field.shape.spatial_rank). Depending on the sample() implementation for field, the integral may be a rough approximation.

This method is currently only implemented for CenteredGrid.

Args

field
Field to integrate.
region
Region to integrate over.

Returns

Integral as Tensor

Expand source code
def integrate(field: Field, region: Geometry) -> math.Tensor:
    """
    Computes *∫<sub>R</sub> f(x) dx<sup>d</sup>* , where *f* denotes the `Field`, *R* the `region` and *d* the number of spatial dimensions (`d=field.shape.spatial_rank`).
    Depending on the `sample` implementation for `field`, the integral may be a rough approximation.

    This method is currently only implemented for `CenteredGrid`.

    Args:
        field: `Field` to integrate.
        region: Region to integrate over.

    Returns:
        Integral as `phi.math.Tensor`
    """
    if not isinstance(field, CenteredGrid):
        raise NotImplementedError()
    return field._sample(region) * region.volume
def isfinite(x) ‑> phi.math._tensors.Tensor

Returns a Tensor or TensorLike matching x with values True where x has a finite value and False otherwise.

Expand source code
def isfinite(x) -> Tensor:
    """ Returns a `Tensor` or `TensorLike` matching `x` with values `True` where `x` has a finite value and `False` otherwise. """
    return _backend_op1(x, Backend.isfinite)
def jit_compile(f: Callable) ‑> Callable

Compiles a graph based on the function f. The graph compilation is performed just-in-time (jit), e.g. when the returned function is called for the first time.

The traced function will compute the same result as f but may run much faster. Some checks may be disabled in the compiled function.

Can be used as a decorator:

@math.jit_compile
def my_function(x: math.Tensor) -> math.Tensor:

Invoking the returned function may invoke re-tracing / re-compiling f after the first call if either

  • it is called with a different number of arguments,
  • the keyword arguments differ from previous invocations,
  • the positional tensor arguments have different dimension names or types (the dimension order also counts),
  • any positional Tensor arguments require a different backend than previous invocations,
  • TensorLike positional arguments do not match in non-variable properties.

Compilation is implemented for the following backends:

Jit-compilations cannot be nested, i.e. you cannot call jit_compile() while another function is being compiled. An exception to this is jit_compile_linear() which can be called from within a jit-compiled function.

See Also: jit_compile_linear()

Args

f
Function to be traced. All positional arguments must be of type Tensor or TensorLike returning a single Tensor or TensorLike.

Returns

Function with similar signature and return values as f.

Expand source code
def jit_compile(f: Callable) -> Callable:
    """
    Compiles a graph based on the function `f`.
    The graph compilation is performed just-in-time (jit), e.g. when the returned function is called for the first time.

    The traced function will compute the same result as `f` but may run much faster.
    Some checks may be disabled in the compiled function.

    Can be used as a decorator:
    ```python
    @math.jit_compile
    def my_function(x: math.Tensor) -> math.Tensor:
    ```

    Invoking the returned function may invoke re-tracing / re-compiling `f` after the first call if either

    * it is called with a different number of arguments,
    * the keyword arguments differ from previous invocations,
    * the positional tensor arguments have different dimension names or types (the dimension order also counts),
    * any positional `Tensor` arguments require a different backend than previous invocations,
    * `TensorLike` positional arguments do not match in non-variable properties.

    Compilation is implemented for the following backends:

    * PyTorch: [`torch.jit.trace`](https://pytorch.org/docs/stable/jit.html)
    * TensorFlow: [`tf.function`](https://www.tensorflow.org/guide/function)
    * Jax: [`jax.jit`](https://jax.readthedocs.io/en/latest/notebooks/quickstart.html#using-jit-to-speed-up-functions)

    Jit-compilations cannot be nested, i.e. you cannot call `jit_compile()` while another function is being compiled.
    An exception to this is `jit_compile_linear()` which can be called from within a jit-compiled function.

    See Also:
        `jit_compile_linear()`

    Args:
        f: Function to be traced.
            All positional arguments must be of type `Tensor` or `TensorLike` returning a single `Tensor` or `TensorLike`.

    Returns:
        Function with similar signature and return values as `f`.
    """
    return f if isinstance(f, (JitFunction, LinearFunction)) else JitFunction(f)
def jit_compile_linear(f: Callable[[~X], ~Y]) ‑> phi.math._functional.LinearFunction[~X, ~Y]

Compile an optimized representation of the linear function f. For backends that support sparse tensors, a sparse matrix will be constructed for f.

Can be used as a decorator:

@math.jit_compile_linear
def my_linear_function(x: math.Tensor) -> math.Tensor:

Unlike jit_compile(), jit_compile_linear() can be called during a regular jit compilation.

See Also: jit_compile()

Args

f
Function that is linear in its positional arguments. All positional arguments must be of type Tensor and f must return a Tensor. f may be conditioned on keyword arguments. However, passing different values for these will cause f to be re-traced unless the conditioning arguments are also being traced.

Returns

LinearFunction with similar signature and return values as f.

Expand source code
def jit_compile_linear(f: Callable[[X], Y]) -> 'LinearFunction[X, Y]':  # TODO add cache control method, e.g. max_traces
    """
    Compile an optimized representation of the linear function `f`.
    For backends that support sparse tensors, a sparse matrix will be constructed for `f`.

    Can be used as a decorator:
    ```python
    @math.jit_compile_linear
    def my_linear_function(x: math.Tensor) -> math.Tensor:
    ```

    Unlike `jit_compile()`, `jit_compile_linear()` can be called during a regular jit compilation.

    See Also:
        `jit_compile()`

    Args:
        f: Function that is linear in its positional arguments.
            All positional arguments must be of type `Tensor` and `f` must return a `Tensor`.
            `f` may be conditioned on keyword arguments.
            However, passing different values for these will cause `f` to be re-traced unless the conditioning arguments are also being traced.

    Returns:
        `LinearFunction` with similar signature and return values as `f`.
    """
    if isinstance(f, JitFunction):
        f = f.f  # cannot trace linear function from jitted version
    return f if isinstance(f, LinearFunction) else LinearFunction(f)
def l1_loss(x) ‑> phi.math._tensors.Tensor

Computes i ||xi||1, summing over all non-batch dimensions.

Args

x
Tensor or TensorLike. For TensorLike objects, only value the sum over all value attributes is computed.

Returns

loss
Tensor
Expand source code
def l1_loss(x) -> Tensor:
    """
    Computes *∑<sub>i</sub> ||x<sub>i</sub>||<sub>1</sub>*, summing over all non-batch dimensions.

    Args:
        x: `Tensor` or `TensorLike`.
            For `TensorLike` objects, only value the sum over all value attributes is computed.

    Returns:
        loss: `Tensor`
    """
    if isinstance(x, Tensor):
        return math.sum_(abs(x), x.shape.non_batch)
    elif isinstance(x, TensorLike):
        return sum([l1_loss(getattr(x, a)) for a in variable_values(x)])
    else:
        raise ValueError(x)
def l2_loss(x) ‑> phi.math._tensors.Tensor

Computes i ||xi||22 / 2, summing over all non-batch dimensions.

Args

x
Tensor or TensorLike. For TensorLike objects, only value the sum over all value attributes is computed.

Returns

loss
Tensor
Expand source code
def l2_loss(x) -> Tensor:
    """
    Computes *∑<sub>i</sub> ||x<sub>i</sub>||<sub>2</sub><sup>2</sup> / 2*, summing over all non-batch dimensions.

    Args:
        x: `Tensor` or `TensorLike`.
            For `TensorLike` objects, only value the sum over all value attributes is computed.

    Returns:
        loss: `Tensor`
    """
    if isinstance(x, Tensor):
        if x.dtype.kind == complex:
            x = abs(x)
        return math.sum_(x ** 2, x.shape.non_batch) * 0.5
    elif isinstance(x, TensorLike):
        return sum([l2_loss(getattr(x, a)) for a in variable_values(x)])
    else:
        raise ValueError(x)
def laplace(field: ~GridType, axes=None) ‑> ~GridType

Finite-difference laplace operator for Grids. See laplace().

Expand source code
def laplace(field: GridType, axes=None) -> GridType:
    """ Finite-difference laplace operator for Grids. See `phi.math.laplace()`. """
    result = field._op1(lambda tensor: math.laplace(tensor, dx=field.dx, padding=field.extrapolation, dims=axes))
    return result
def mean(field: phi.field._field.SampledField) ‑> phi.math._tensors.Tensor

Computes the mean value by reducing all spatial / instance dimensions.

Args

field
SampledField

Returns

Tensor

Expand source code
def mean(field: SampledField) -> math.Tensor:
    """
    Computes the mean value by reducing all spatial / instance dimensions.

    Args:
        field: `SampledField`

    Returns:
        `phi.math.Tensor`
    """
    return math.mean(field.values, field.shape.non_channel.non_batch)
def minimize(f: Callable[[~X], ~Y], solve: phi.math._functional.Solve[~X, ~Y]) ‑> ~X

Finds a minimum of the scalar function f(x). The method argument of solve determines which method is used. All methods supported by scipy.optimize.minimize are supported, see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html .

This method is limited to backends that support functional_gradient(), currently PyTorch, TensorFlow and Jax.

To obtain additional information about the performed solve, use a SolveTape.

See Also: solve_nonlinear().

Args

f
Function whose output is subject to minimization. All positional arguments of f are optimized and must be Tensor or TensorLike. The first return value of f must be a scalar float Tensor or TensorLike.
solve
Solve object to specify method type, parameters and initial guess for x.

Returns

x
solution, the minimum point x.

Raises

NotConverged
If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
If the optimization failed prematurely.
Expand source code
def minimize(f: Callable[[X], Y], solve: Solve[X, Y]) -> X:
    """
    Finds a minimum of the scalar function *f(x)*.
    The `method` argument of `solve` determines which method is used.
    All methods supported by `scipy.optimize.minimize` are supported,
    see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html .

    This method is limited to backends that support `functional_gradient()`, currently PyTorch, TensorFlow and Jax.

    To obtain additional information about the performed solve, use a `SolveTape`.

    See Also:
        `solve_nonlinear()`.

    Args:
        f: Function whose output is subject to minimization.
            All positional arguments of `f` are optimized and must be `Tensor` or `TensorLike`.
            The first return value of `f` must be a scalar float `Tensor` or `TensorLike`.
        solve: `Solve` object to specify method type, parameters and initial guess for `x`.

    Returns:
        x: solution, the minimum point `x`.

    Raises:
        NotConverged: If the desired accuracy was not be reached within the maximum number of iterations.
        Diverged: If the optimization failed prematurely.
    """
    assert (solve.relative_tolerance == 0).all, f"relative_tolerance must be zero for minimize() but got {solve.relative_tolerance}"
    x0_nest, x0_tensors = disassemble_tree(solve.x0)
    x0_tensors = [to_float(t) for t in x0_tensors]
    backend = choose_backend_t(*x0_tensors, prefer_default=True)
    batch_dims = merge_shapes(*[t.shape for t in x0_tensors]).batch
    x0_natives = []
    for t in x0_tensors:
        t._expand()
        assert t.shape.is_uniform
        x0_natives.append(reshaped_native(t, [batch_dims, t.shape.non_batch], force_expand=True))
    x0_flat = backend.concat(x0_natives, -1)

    def unflatten_assemble(x_flat, additional_dims: Shape = EMPTY_SHAPE):
        i = 0
        x_tensors = []
        for x0_native, x0_tensor in zip(x0_natives, x0_tensors):
            vol = backend.shape(x0_native)[-1]
            flat_native = x_flat[..., i:i + vol]
            x_tensors.append(reshaped_tensor(flat_native, [*additional_dims, batch_dims, x0_tensor.shape.non_batch]))
            i += vol
        x = assemble_tree(x0_nest, x_tensors)
        return x

    def native_function(x_flat):
        x = unflatten_assemble(x_flat)
        if isinstance(x, (tuple, list)):
            y = f(*x)
        else:
            y = f(x)
        _, y_tensors = disassemble_tree(y)
        return y_tensors[0].sum, reshaped_native(y_tensors[0], [batch_dims])

    atol = backend.to_float(reshaped_native(solve.absolute_tolerance, [batch_dims], force_expand=True))
    maxi = backend.to_int32(reshaped_native(solve.max_iterations, [batch_dims], force_expand=True))
    trj = _SOLVE_TAPES and any(t.record_trajectories for t in _SOLVE_TAPES)
    t = time.perf_counter()
    ret = backend.minimize(solve.method, native_function, x0_flat, atol, maxi, trj)
    t = time.perf_counter() - t
    if not trj:
        assert isinstance(ret, SolveResult)
        converged = reshaped_tensor(ret.converged, [batch_dims])
        diverged = reshaped_tensor(ret.diverged, [batch_dims])
        x = unflatten_assemble(ret.x)
        iterations = reshaped_tensor(ret.iterations, [batch_dims])
        function_evaluations = reshaped_tensor(ret.function_evaluations, [batch_dims])
        residual = reshaped_tensor(ret.residual, [batch_dims])
        result = SolveInfo(solve, x, residual, iterations, function_evaluations, converged, diverged, ret.method, ret.message, t)
    else:  # trajectory
        assert isinstance(ret, (tuple, list)) and all(isinstance(r, SolveResult) for r in ret)
        converged = reshaped_tensor(ret[-1].converged, [batch_dims])
        diverged = reshaped_tensor(ret[-1].diverged, [batch_dims])
        x = unflatten_assemble(ret[-1].x)
        x_ = unflatten_assemble(backend.stack([r.x for r in ret]), additional_dims=batch('trajectory'))
        residual = stack([reshaped_tensor(r.residual, [batch_dims]) for r in ret], batch('trajectory'))
        iterations = reshaped_tensor(ret[-1].iterations, [batch_dims])
        function_evaluations = stack([reshaped_tensor(r.function_evaluations, [batch_dims]) for r in ret], batch('trajectory'))
        result = SolveInfo(solve, x_, residual, iterations, function_evaluations, converged, diverged, ret[-1].method, ret[-1].message, t)
    for tape in _SOLVE_TAPES:
        tape._add(solve, trj, result)
    result.convergence_check(False)  # raises ConvergenceException
    return x
def native_call(f, *inputs, channels_last=None, channel_dim='vector', extrapolation=None) ‑> phi.field._field.SampledField

Similar to native_call().

Args

f
Function to be called on native tensors of inputs.values. The function output must have the same dimension layout as the inputs and the batch size must be identical.
*inputs
SampledField or Tensor instances.
extrapolation
(Optional) Extrapolation of the output field. If None, uses the extrapolation of the first input field.

Returns

SampledField matching the first SampledField in inputs.

Expand source code
def native_call(f, *inputs, channels_last=None, channel_dim='vector', extrapolation=None) -> SampledField or math.Tensor:
    """
    Similar to `phi.math.native_call()`.

    Args:
        f: Function to be called on native tensors of `inputs.values`.
            The function output must have the same dimension layout as the inputs and the batch size must be identical.
        *inputs: `SampledField` or `phi.math.Tensor` instances.
        extrapolation: (Optional) Extrapolation of the output field. If `None`, uses the extrapolation of the first input field.

    Returns:
        `SampledField` matching the first `SampledField` in `inputs`.
    """
    input_tensors = [i.values if isinstance(i, SampledField) else math.tensor(i) for i in inputs]
    values = math.native_call(f, *input_tensors, channels_last=channels_last, channel_dim=channel_dim)
    for i in inputs:
        if isinstance(i, SampledField):
            result = i.with_values(values=values)
            if extrapolation is not None:
                result = result.with_extrapolation(extrapolation)
            return result
    else:
        raise AssertionError("At least one input must be a SampledField.")
def normalize(field: phi.field._field.SampledField, norm: phi.field._field.SampledField, epsilon=1e-05)

Multiplies the values of field so that its sum matches the source.

Expand source code
def normalize(field: SampledField, norm: SampledField, epsilon=1e-5):
    """ Multiplies the values of `field` so that its sum matches the source. """
    data = math.normalize_to(field.values, norm.values, epsilon)
    return field.with_values(data)
def pad(grid: ~GridType, widths: int) ‑> ~GridType

Pads a Grid using its extrapolation.

Unlike pad(), this function also affects the bounds of the grid, changing its size and origin depending on widths.

Args

grid
CenteredGrid or StaggeredGrid
widths
Either int or (lower, upper) to pad the same number of cells in all spatial dimensions or dict mapping dimension names to (lower, upper).

Returns

Grid of the same type as grid

Expand source code
def pad(grid: GridType, widths: int or tuple or list or dict) -> GridType:
    """
    Pads a `Grid` using its extrapolation.

    Unlike `phi.math.pad()`, this function also affects the `bounds` of the grid, changing its size and origin depending on `widths`.

    Args:
        grid: `CenteredGrid` or `StaggeredGrid`
        widths: Either `int` or `(lower, upper)` to pad the same number of cells in all spatial dimensions
            or `dict` mapping dimension names to `(lower, upper)`.

    Returns:
        `Grid` of the same type as `grid`
    """
    if isinstance(widths, int):
        widths = {axis: (widths, widths) for axis in grid.shape.spatial.names}
    elif isinstance(widths, (tuple, list)):
        widths = {axis: (width if isinstance(width, (tuple, list)) else (width, width)) for axis, width in zip(grid.shape.spatial.names, widths)}
    else:
        assert isinstance(widths, dict)
    widths_list = [widths[axis] for axis in grid.shape.spatial.names]
    if isinstance(grid, Grid):
        data = math.pad(grid.values, widths, grid.extrapolation)
        w_lower = math.wrap([w[0] for w in widths_list])
        w_upper = math.wrap([w[1] for w in widths_list])
        bounds = Box(grid.box.lower - w_lower * grid.dx, grid.box.upper + w_upper * grid.dx)
        return type(grid)(values=data, resolution=data.shape.spatial, bounds=bounds, extrapolation=grid.extrapolation)
    raise NotImplementedError(f"{type(grid)} not supported. Only Grid instances allowed.")
def read(file: str, convert_to_backend=True) ‑> phi.field._field.SampledField

Loads a previously saved SampledField from disc.

See Also: write().

Args

file
Single file as str or Tensor of string type. If file is a tensor, all contained files are loaded an stacked according to the dimensions of file.
convert_to_backend
Whether to convert the read data to the data format of the default backend, e.g. TensorFlow tensors.

Returns

Loaded SampledField.

Expand source code
def read(file: str or math.Tensor, convert_to_backend=True) -> SampledField:
    """
    Loads a previously saved `SampledField` from disc.

    See Also:
        `write()`.

    Args:
        file: Single file as `str` or `Tensor` of string type.
            If `file` is a tensor, all contained files are loaded an stacked according to the dimensions of `file`.
        convert_to_backend: Whether to convert the read data to the data format of the default backend, e.g. TensorFlow tensors.

    Returns:
        Loaded `SampledField`.
    """
    if isinstance(file, str):
        return read_single_field(file, convert_to_backend=convert_to_backend)
    if isinstance(file, math.Tensor):
        if file.rank == 0:
            return read_single_field(file.native(), convert_to_backend=convert_to_backend)
        else:
            dim = file.shape[0]
            files = file.unstack(dim.name)
            fields = [read(file_, convert_to_backend=convert_to_backend) for file_ in files]
            return stack(fields, dim)
    else:
        raise ValueError(file)
def real(x) ‑> phi.math._tensors.Tensor

See Also: imag(), conjugate().

Args

x
Tensor or TensorLike or native tensor.

Returns

Real component of x.

Expand source code
def real(x) -> Tensor:
    """
    See Also:
        `imag()`, `conjugate()`.

    Args:
        x: `Tensor` or `TensorLike` or native tensor.

    Returns:
        Real component of `x`.
    """
    return _backend_op1(x, Backend.real)
def reduce_sample(field: phi.field._field.Field, geometry: phi.geom._geom.Geometry, dim=(vectorᶜ=None)) ‑> phi.math._tensors.Tensor

Similar to sample(), but matches channel dimensions of geometry with channel dimensions of this field. Currently, geometry may have at most one channel dimension.

See Also: sample(), Field.at(), Resampling overview.

Args

field
Source Field to sample.
geometry
Single or batched Geometry.
dim
Dimension of result, resulting from reduction of channel dimensions.

Returns

Sampled values as a Tensor

Expand source code
def reduce_sample(field: Field, geometry: Geometry, dim=channel('vector')) -> math.Tensor:
    """
    Similar to `sample()`, but matches channel dimensions of `geometry` with channel dimensions of this field.
    Currently, `geometry` may have at most one channel dimension.

    See Also:
        `sample()`, `Field.at()`, [Resampling overview](https://tum-pbs.github.io/PhiFlow/Fields.html#resampling-fields).

    Args:
        field: Source `Field` to sample.
        geometry: Single or batched `phi.geom.Geometry`.
        dim: Dimension of result, resulting from reduction of channel dimensions.

    Returns:
        Sampled values as a `phi.math.Tensor`
    """
    if isinstance(field, SampledField) and field.elements.shallow_equals(geometry):
        return field.values
    if geometry.shape.channel:  # Reduce this dimension
        assert geometry.shape.channel.rank == 1, "Only single-dimension reduction supported."
        if field.shape.channel:
            assert field.shape.channel.volume == geometry.shape.channel.volume, f"Cannot sample field with channels {field.shape.channel} at elements with channels {geometry.shape.channel}."
            components = unstack(field, field.shape.channel.name)
            sampled = [c._sample(p) for c, p in zip(components, geometry.unstack(geometry.shape.channel.name))]
        else:
            sampled = [field._sample(p) for p in geometry.unstack(geometry.shape.channel.name)]
        return math.stack(sampled, dim)
    else:  # Nothing to reduce
        return field._sample(geometry)
def round(x) ‑> phi.math._tensors.Tensor

Rounds the Tensor or TensorLike x to the closest integer.

Expand source code
def round_(x) -> Tensor:
    """ Rounds the `Tensor` or `TensorLike` `x` to the closest integer. """
    return _backend_op1(x, Backend.round)
def sample(field: phi.field._field.Field, geometry: phi.geom._geom.Geometry) ‑> phi.math._tensors.Tensor

Computes the field value inside the volume of the (batched) geometry.

The field value may be determined by integrating over the volume, sampling the central value or any other way.

The batch dimensions of geometry are matched with this field. The geometry must not share any channel dimensions with this field. Spatial dimensions of geometry can be used to sample a grid of geometries.

See Also: reduce_sample(), Field.at(), Resampling overview.

Args

field
Source Field to sample.
geometry
Single or batched Geometry.

Returns

Sampled values as a Tensor

Expand source code
def sample(field: Field, geometry: Geometry) -> math.Tensor:
    """
    Computes the field value inside the volume of the (batched) `geometry`.

    The field value may be determined by integrating over the volume, sampling the central value or any other way.

    The batch dimensions of `geometry` are matched with this field.
    The `geometry` must not share any channel dimensions with this field.
    Spatial dimensions of `geometry` can be used to sample a grid of geometries.

    See Also:
        `reduce_sample()`, `Field.at()`, [Resampling overview](https://tum-pbs.github.io/PhiFlow/Fields.html#resampling-fields).

    Args:
        field: Source `Field` to sample.
        geometry: Single or batched `phi.geom.Geometry`.

    Returns:
        Sampled values as a `phi.math.Tensor`
    """
    assert all(dim not in field.shape for dim in geometry.shape.channel)
    if isinstance(field, SampledField) and field.elements.shallow_equals(geometry) and not geometry.shape.channel:
        return field.values
    if geometry.shape.channel:
        sampled = [field._sample(p) for p in geometry.unstack(geometry.shape.channel.name)]
        return math.stack(sampled, geometry.shape.channel)
    else:
        return field._sample(geometry)
def shift(grid: phi.field._grid.CenteredGrid, offsets: tuple, stack_dim: phi.math._shape.Shape = (shiftᶜ=None))

Wraps :func:math.shift for CenteredGrid.

Args

grid
CenteredGrid:
offsets
tuple:
stack_dim
(Default value = 'shift')

Returns:

Expand source code
def shift(grid: CenteredGrid, offsets: tuple, stack_dim: Shape = channel('shift')):
    """
    Wraps :func:`math.shift` for CenteredGrid.

    Args:
      grid: CenteredGrid: 
      offsets: tuple: 
      stack_dim:  (Default value = 'shift')

    Returns:

    """
    data = math.shift(grid.values, offsets, padding=grid.extrapolation, stack_dim=stack_dim)
    return [CenteredGrid(data[i], bounds=grid.bounds, extrapolation=grid.extrapolation) for i in range(len(offsets))]
def sign(x)

The sign of positive numbers is 1 and -1 for negative numbers. The sign of 0 is undefined.

Args

x
Tensor or TensorLike

Returns

Tensor or TensorLike matching x.

Expand source code
def sign(x):
    """
    The sign of positive numbers is 1 and -1 for negative numbers.
    The sign of 0 is undefined.

    Args:
        x: `Tensor` or `TensorLike`

    Returns:
        `Tensor` or `TensorLike` matching `x`.
    """
    return _backend_op1(x, Backend.sign)
def sin(x) ‑> phi.math._tensors.Tensor

Computes sin(x) of the Tensor or TensorLike x.

Expand source code
def sin(x) -> Tensor:
    """ Computes *sin(x)* of the `Tensor` or `TensorLike` `x`. """
    return _backend_op1(x, Backend.sin)
def solve_linear(f: Callable[[~X], ~Y], y: ~Y, solve: phi.math._functional.Solve[~X, ~Y], f_args: tuple = (), f_kwargs: dict = None) ‑> ~X

Solves the system of linear equations f(x) = y and returns x. For maximum performance, compile f using jit_compile_linear() beforehand. Then, an optimized representation of f (such as a sparse matrix) will be used to solve the linear system.

To obtain additional information about the performed solve, use a SolveTape.

The gradient of this operation will perform another linear solve with the parameters specified by Solve.gradient_solve.

See Also: solve_nonlinear(), jit_compile_linear().

Args

f
Linear function with Tensor or TensorLike first parameter and return value. f can have additional arguments.
y
Desired output of f(x) as Tensor or TensorLike.
solve
Solve object specifying optimization method, parameters and initial guess for x.
f_args
Additional Tensor or TensorLike arguments to be passed to f. f need not be linear in these arguments. Use this instead of lambda function since a lambda will not be recognized as calling a jit-compiled function.
f_kwargs
Additional keyword arguments to be passed to f. These arguments can be of any type.

Returns

x
solution of the linear system of equations f(x) = y as Tensor or TensorLike.

Raises

NotConverged
If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
If the solve failed prematurely.
Expand source code
def solve_linear(f: Callable[[X], Y],
                 y: Y, solve: Solve[X, Y],
                 f_args: tuple or list = (),
                 f_kwargs: dict = None) -> X:
    """
    Solves the system of linear equations *f(x) = y* and returns *x*.
    For maximum performance, compile `f` using `jit_compile_linear()` beforehand.
    Then, an optimized representation of `f` (such as a sparse matrix) will be used to solve the linear system.

    To obtain additional information about the performed solve, use a `SolveTape`.

    The gradient of this operation will perform another linear solve with the parameters specified by `Solve.gradient_solve`.

    See Also:
        `solve_nonlinear()`, `jit_compile_linear()`.

    Args:
        f: Linear function with `Tensor` or `TensorLike` first parameter and return value.
            `f` can have additional arguments.
        y: Desired output of `f(x)` as `Tensor` or `TensorLike`.
        solve: `Solve` object specifying optimization method, parameters and initial guess for `x`.
        f_args: Additional `Tensor` or `TensorLike` arguments to be passed to `f`.
            `f` need not be linear in these arguments.
            Use this instead of lambda function since a lambda will not be recognized as calling a jit-compiled function.
        f_kwargs: Additional keyword arguments to be passed to `f`.
            These arguments can be of any type.

    Returns:
        x: solution of the linear system of equations `f(x) = y` as `Tensor` or `TensorLike`.

    Raises:
        NotConverged: If the desired accuracy was not be reached within the maximum number of iterations.
        Diverged: If the solve failed prematurely.
    """
    y_tree, y_tensors = disassemble_tree(y)
    x0_tree, x0_tensors = disassemble_tree(solve.x0)
    assert len(x0_tensors) == len(y_tensors) == 1, "Only single-tensor linear solves are currently supported"
    backend = choose_backend_t(*y_tensors, *x0_tensors)

    if not all_available(*y_tensors, *x0_tensors):  # jit mode
        f = jit_compile_linear(f) if backend.supports(Backend.sparse_coo_tensor) else jit_compile(f)

    if isinstance(f, LinearFunction) and (backend.supports(Backend.sparse_coo_tensor) or backend.supports(Backend.csr_matrix)):
        matrix, bias = f.sparse_matrix_and_bias(solve.x0, *f_args, **(f_kwargs or {}))
        return _matrix_solve(y - bias, solve, matrix, backend=backend)  # custom_gradient
    else:
        # arg_tree, arg_tensors = disassemble_tree(f_args)
        # arg_tensors = cached(arg_tensors)
        # f_args = assemble_tree(arg_tree, arg_tensors)
        f_args = cached(f_args)
        # x0_tensors = cached(x0_tensors)
        # solve = copy_with(solve, x0=assemble_tree(x0_tree, x0_tensors))
        solve = cached(solve)
        return _function_solve(y, solve, f_args, f_kwargs=f_kwargs or {}, f=f, backend=backend)  # custom_gradient
def solve_nonlinear(f: Callable, y, solve: phi.math._functional.Solve) ‑> phi.math._tensors.Tensor

Solves the non-linear equation f(x) = y by minimizing the norm of the residual.

This method is limited to backends that support functional_gradient(), currently PyTorch, TensorFlow and Jax.

To obtain additional information about the performed solve, use a SolveTape.

See Also: minimize(), solve_linear().

Args

f
Function whose output is optimized to match y. All positional arguments of f are optimized and must be Tensor or TensorLike. The output of f must match y.
y
Desired output of f(x) as Tensor or TensorLike.
solve
Solve object specifying optimization method, parameters and initial guess for x.

Returns

x
Solution fulfilling f(x) = y within specified tolerance as Tensor or TensorLike.

Raises

NotConverged
If the desired accuracy was not be reached within the maximum number of iterations.
Diverged
If the solve failed prematurely.
Expand source code
def solve_nonlinear(f: Callable, y, solve: Solve) -> Tensor:
    """
    Solves the non-linear equation *f(x) = y* by minimizing the norm of the residual.

    This method is limited to backends that support `functional_gradient()`, currently PyTorch, TensorFlow and Jax.

    To obtain additional information about the performed solve, use a `SolveTape`.

    See Also:
        `minimize()`, `solve_linear()`.

    Args:
        f: Function whose output is optimized to match `y`.
            All positional arguments of `f` are optimized and must be `Tensor` or `TensorLike`.
            The output of `f` must match `y`.
        y: Desired output of `f(x)` as `Tensor` or `TensorLike`.
        solve: `Solve` object specifying optimization method, parameters and initial guess for `x`.

    Returns:
        x: Solution fulfilling `f(x) = y` within specified tolerance as `Tensor` or `TensorLike`.

    Raises:
        NotConverged: If the desired accuracy was not be reached within the maximum number of iterations.
        Diverged: If the solve failed prematurely.
    """
    from ._nd import l2_loss

    def min_func(x):
        diff = f(x) - y
        l2 = l2_loss(diff)
        return l2

    rel_tol_to_abs = solve.relative_tolerance * l2_loss(y, batch_norm=True)
    solve.absolute_tolerance = rel_tol_to_abs
    solve.relative_tolerance = 0
    return minimize(min_func, solve)
def spatial_gradient(field: phi.field._grid.CenteredGrid, extrapolation: Extrapolation = None, type: type = phi.field._grid.CenteredGrid, stack_dim: phi.math._shape.Shape = (vectorᶜ=None))

Finite difference spatial_gradient.

This function can operate in two modes:

  • type=CenteredGrid approximates the spatial_gradient at cell centers using central differences
  • type=StaggeredGrid computes the spatial_gradient at face centers of neighbouring cells

Args

field
centered grid of any number of dimensions (scalar field, vector field, tensor field)
type
either CenteredGrid or StaggeredGrid
stack_dim
Dimension to be added. This dimension lists the spatial_gradient w.r.t. the spatial dimensions. The field must not have a dimension of the same name.

Returns

spatial_gradient field of type type.

Expand source code
def spatial_gradient(field: CenteredGrid,
                     extrapolation: math.Extrapolation = None,
                     type: type = CenteredGrid,
                     stack_dim: Shape = channel('vector')):
    """
    Finite difference spatial_gradient.

    This function can operate in two modes:

    * `type=CenteredGrid` approximates the spatial_gradient at cell centers using central differences
    * `type=StaggeredGrid` computes the spatial_gradient at face centers of neighbouring cells

    Args:
        field: centered grid of any number of dimensions (scalar field, vector field, tensor field)
        type: either `CenteredGrid` or `StaggeredGrid`
        stack_dim: Dimension to be added. This dimension lists the spatial_gradient w.r.t. the spatial dimensions.
            The `field` must not have a dimension of the same name.

    Returns:
        spatial_gradient field of type `type`.

    """
    assert isinstance(field, Grid)
    if extrapolation is None:
        extrapolation = field.extrapolation.spatial_gradient()
    if type == CenteredGrid:
        values = math.spatial_gradient(field.values, field.dx.vector.as_channel(name=stack_dim.name), difference='central', padding=field.extrapolation, stack_dim=stack_dim)
        return CenteredGrid(values, bounds=field.bounds, extrapolation=extrapolation)
    elif type == StaggeredGrid:
        assert stack_dim.name == 'vector'
        return stagger(field, lambda lower, upper: (upper - lower) / field.dx, extrapolation)
    raise NotImplementedError(f"{type(field)} not supported. Only CenteredGrid and StaggeredGrid allowed.")
def sqrt(x) ‑> phi.math._tensors.Tensor

Computes sqrt(x) of the Tensor or TensorLike x.

Expand source code
def sqrt(x) -> Tensor:
    """ Computes *sqrt(x)* of the `Tensor` or `TensorLike` `x`. """
    return _backend_op1(x, Backend.sqrt)
def stack(fields, dim: phi.math._shape.Shape)

Stacks the given SampledFields along dim.

See Also: concat().

Args

fields
List of matching SampledField instances.
dim
Stack dimension as Shape. Size is ignored.

Returns

SampledField matching stacked fields.

Expand source code
def stack(fields, dim: Shape):
    """
    Stacks the given `SampledField`s along `dim`.

    See Also:
        `concat()`.

    Args:
        fields: List of matching `SampledField` instances.
        dim: Stack dimension as `Shape`. Size is ignored.

    Returns:
        `SampledField` matching stacked fields.
    """
    assert all(isinstance(f, SampledField) for f in fields), f"All fields must be SampledFields of the same type but got {fields}"
    assert all(isinstance(f, type(fields[0])) for f in fields), f"All fields must be SampledFields of the same type but got {fields}"
    if any(f.extrapolation != fields[0].extrapolation for f in fields):
        raise NotImplementedError("Concatenating extrapolations not supported")
    if isinstance(fields[0], Grid):
        values = math.stack([f.values for f in fields], dim)
        return fields[0].with_values(values)
    elif isinstance(fields[0], PointCloud):
        elements = geom.stack(*[f.elements for f in fields], dim=dim)
        values = math.stack([f.values for f in fields], dim=dim)
        colors = math.stack([f.color for f in fields], dim=dim)
        return PointCloud(elements=elements, values=values, color=colors, extrapolation=fields[0].extrapolation, add_overlapping=fields[0]._add_overlapping, bounds=fields[0].bounds)
    raise NotImplementedError(type(fields[0]))
def stagger(field: phi.field._grid.CenteredGrid, face_function: Callable, extrapolation: Extrapolation, type: type = phi.field._grid.StaggeredGrid)

Creates a new grid by evaluating face_function given two neighbouring cells. One layer of missing cells is inferred from the extrapolation.

This method returns a Field of type type which must be either StaggeredGrid or CenteredGrid. When returning a StaggeredGrid, the new values are sampled at the faces of neighbouring cells. When returning a CenteredGrid, the new grid has the same resolution as field.

Args

field
centered grid
face_function
function mapping (value1: Tensor, value2: Tensor) -> center_value: Tensor
extrapolation
extrapolation mode of the returned grid. Has no effect on the values.
type
one of (StaggeredGrid, CenteredGrid)
field
CenteredGrid:
face_function
Callable:
extrapolation
math.extrapolation.Extrapolation:
type
type: (Default value = StaggeredGrid)

Returns

grid of type matching the type argument

Expand source code
def stagger(field: CenteredGrid,
            face_function: Callable,
            extrapolation: math.extrapolation.Extrapolation,
            type: type = StaggeredGrid):
    """
    Creates a new grid by evaluating `face_function` given two neighbouring cells.
    One layer of missing cells is inferred from the extrapolation.
    
    This method returns a Field of type `type` which must be either StaggeredGrid or CenteredGrid.
    When returning a StaggeredGrid, the new values are sampled at the faces of neighbouring cells.
    When returning a CenteredGrid, the new grid has the same resolution as `field`.

    Args:
      field: centered grid
      face_function: function mapping (value1: Tensor, value2: Tensor) -> center_value: Tensor
      extrapolation: extrapolation mode of the returned grid. Has no effect on the values.
      type: one of (StaggeredGrid, CenteredGrid)
      field: CenteredGrid: 
      face_function: Callable:
      extrapolation: math.extrapolation.Extrapolation: 
      type: type:  (Default value = StaggeredGrid)

    Returns:
      grid of type matching the `type` argument

    """
    all_lower = []
    all_upper = []
    if type == StaggeredGrid:
        for dim in field.shape.spatial.names:
            lo_valid, up_valid = extrapolation.valid_outer_faces(dim)
            width_lower = {dim: (int(lo_valid), int(up_valid) - 1)}
            width_upper = {dim: (int(lo_valid) - 1, int(lo_valid and up_valid))}
            all_lower.append(math.pad(field.values, width_lower, field.extrapolation))
            all_upper.append(math.pad(field.values, width_upper, field.extrapolation))
        all_upper = math.stack(all_upper, channel('vector'))
        all_lower = math.stack(all_lower, channel('vector'))
        values = face_function(all_lower, all_upper)
        result = StaggeredGrid(values, bounds=field.bounds, extrapolation=extrapolation)
        assert result.shape.spatial == field.shape.spatial
        return result
    elif type == CenteredGrid:
        left, right = math.shift(field.values, (-1, 1), padding=field.extrapolation, stack_dim=channel('vector'))
        values = face_function(left, right)
        return CenteredGrid(values, bounds=field.bounds, extrapolation=extrapolation)
    else:
        raise ValueError(type)
def stop_gradient(x)

Disables gradients for the given tensor. This may switch off the gradients for x itself or create a copy of x with disabled gradients.

Implementations:

Args

x
Tensor or TensorLike for which gradients should be disabled.

Returns

Copy of x.

Expand source code
def stop_gradient(x):
    """
    Disables gradients for the given tensor.
    This may switch off the gradients for `x` itself or create a copy of `x` with disabled gradients.

    Implementations:

    * PyTorch: [`x.detach()`](https://pytorch.org/docs/stable/autograd.html#torch.Tensor.detach)
    * TensorFlow: [`tf.stop_gradient`](https://www.tensorflow.org/api_docs/python/tf/stop_gradient)
    * Jax: [`jax.lax.stop_gradient`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.stop_gradient.html)

    Args:
        x: `Tensor` or `TensorLike` for which gradients should be disabled.

    Returns:
        Copy of `x`.
    """
    if isinstance(x, Tensor):
        return x._op1(lambda native: choose_backend(native).stop_gradient(native))
    elif isinstance(x, TensorLike):
        nest, values = disassemble_tree(x)
        new_values = [stop_gradient(v) for v in values]
        return assemble_tree(nest, new_values)
    else:
        return wrap(choose_backend(x).stop_gradient(x))
def to_float(x) ‑> phi.math._tensors.Tensor

Converts the given tensor to floating point format with the currently specified precision.

The precision can be set globally using math.set_global_precision() and locally using with math.precision().

See the phi.math module documentation at https://tum-pbs.github.io/PhiFlow/Math.html

See Also: cast().

Args

x
Tensor or TensorLike to convert

Returns

Tensor or TensorLike matching x.

Expand source code
def to_float(x) -> Tensor:
    """
    Converts the given tensor to floating point format with the currently specified precision.
    
    The precision can be set globally using `math.set_global_precision()` and locally using `with math.precision()`.
    
    See the `phi.math` module documentation at https://tum-pbs.github.io/PhiFlow/Math.html

    See Also:
        `cast()`.

    Args:
        x: `Tensor` or `TensorLike` to convert

    Returns:
        `Tensor` or `TensorLike` matching `x`.
    """
    return _backend_op1(x, Backend.to_float)
def to_int32(x)

Converts the Tensor or TensorLike x to 32-bit integer.

Expand source code
def to_int32(x):
    """ Converts the `Tensor` or `TensorLike` `x` to 32-bit integer. """
    return _backend_op1(x, Backend.to_int32)
def to_int64(x) ‑> phi.math._tensors.Tensor

Converts the Tensor or TensorLike x to 64-bit integer.

Expand source code
def to_int64(x) -> Tensor:
    """ Converts the `Tensor` or `TensorLike` `x` to 64-bit integer. """
    return _backend_op1(x, Backend.to_int64)
def unstack(field: phi.field._field.Field, dim: str) ‑> tuple

Unstack field along one of its dimensions. The dimension can be batch, spatial or channel.

Args

field
Field to unstack.
dim
name of the dimension to unstack, must be part of self.shape

Returns

tuple of Fields. The returned fields may be of different types than field.

Expand source code
def unstack(field: Field, dim: str) -> tuple:
    """
    Unstack `field` along one of its dimensions.
    The dimension can be batch, spatial or channel.

    Args:
        field: `Field` to unstack.
        dim: name of the dimension to unstack, must be part of `self.shape`

    Returns:
        `tuple` of `Fields`. The returned fields may be of different types than `field`.
    """
    size = field.shape.get_size(dim)
    if isinstance(size, Tensor):
        size = math.min(size)  # unstack StaggeredGrid along x or y
    return tuple(field[{dim: i}] for i in range(size))
def upsample2x(grid: ~GridType) ‑> ~GridType

Increases the number of sample points by a factor of 2 in each spatial dimension. The new values are determined via linear interpolation.

See Also: downsample2x().

Args

grid
CenteredGrid or StaggeredGrid.

Returns

Grid of same type as grid.

Expand source code
def upsample2x(grid: GridType) -> GridType:
    """
    Increases the number of sample points by a factor of 2 in each spatial dimension.
    The new values are determined via linear interpolation.

    See Also:
        `downsample2x()`.

    Args:
        grid: `CenteredGrid` or `StaggeredGrid`.

    Returns:
        `Grid` of same type as `grid`.
    """
    if isinstance(grid, CenteredGrid):
        values = math.upsample2x(grid.values, grid.extrapolation)
        return CenteredGrid(values, bounds=grid.bounds, extrapolation=grid.extrapolation)
    elif isinstance(grid, StaggeredGrid):
        raise NotImplementedError()
    else:
        raise ValueError(type(grid))
def vec_abs(field: phi.field._field.SampledField)
Expand source code
def vec_abs(field: SampledField):
    """ See `phi.math.vec_abs()` """
    if isinstance(field, StaggeredGrid):
        field = field.at_centers()
    return field.with_values(math.vec_abs(field.values))
def vec_squared(field: phi.field._field.SampledField)
Expand source code
def vec_squared(field: SampledField):
    """ See `phi.math.vec_squared()` """
    if isinstance(field, StaggeredGrid):
        field = field.at_centers()
    return field.with_values(math.vec_squared(field.values))
def where(mask: phi.field._field.Field, field_true: phi.field._field.Field, field_false: phi.field._field.Field) ‑> phi.field._field.SampledField

Element-wise where operation. Picks the value of field_true where mask=1 / True and the value of field_false where mask=0 / False.

The fields are automatically resampled if necessary, preferring the sample points of mask. At least one of the arguments must be a SampledField.

Args

mask
Field or Geometry object.
field_true
Field
field_false
Field

Returns

SampledField

Expand source code
def where(mask: Field or Geometry, field_true: Field, field_false: Field) -> SampledField:
    """
    Element-wise where operation.
    Picks the value of `field_true` where `mask=1 / True` and the value of `field_false` where `mask=0 / False`.

    The fields are automatically resampled if necessary, preferring the sample points of `mask`.
    At least one of the arguments must be a `SampledField`.

    Args:
        mask: `Field` or `Geometry` object.
        field_true: `Field`
        field_false: `Field`

    Returns:
        `SampledField`
    """
    if isinstance(mask, Geometry):
        mask = HardGeometryMask(mask)
    elif isinstance(mask, SampledField):
        field_true = field_true.at(mask)
        field_false = field_false.at(mask)
    elif isinstance(field_true, SampledField):
        mask = mask.at(field_true)
        field_false = field_false.at(field_true)
    elif isinstance(field_false, SampledField):
        mask = mask.at(field_true)
        field_true = field_true.at(mask)
    else:
        raise NotImplementedError('At least one argument must be a SampledField')
    values = mask.values * field_true.values + (1 - mask.values) * field_false.values
    # values = math.where(mask.values, field_true.values, field_false.values)
    return field_true.with_values(values)
def write(field: phi.field._field.SampledField, file: str)

Writes a field to disc using a NumPy file format. Depending on file, the data may be split up into multiple files.

All characteristics of the field are serialized so that it can be fully restored using read().

See Also: read()

Args

field
Field to be saved.
file
Single file as str or Tensor of string type. If file is a tensor, the dimensions of field are matched to the dimensions of file. Dimensions of file that are missing in field result in data duplication. Dimensions of field that are missing in file result in larger files.
Expand source code
def write(field: SampledField, file: str or math.Tensor):
    """
    Writes a field to disc using a NumPy file format.
    Depending on `file`, the data may be split up into multiple files.

    All characteristics of the field are serialized so that it can be fully restored using `read()`.

    See Also:
        `read()`

    Args:
        field: Field to be saved.
        file: Single file as `str` or `Tensor` of string type.
            If `file` is a tensor, the dimensions of `field` are matched to the dimensions of `file`.
            Dimensions of `file` that are missing in `field` result in data duplication.
            Dimensions of `field` that are missing in `file` result in larger files.
    """
    if isinstance(file, str):
        write_single_field(field, file)
    elif isinstance(file, math.Tensor):
        if file.rank == 0:
            write_single_field(field, file.native())
        else:
            dim = file.shape.names[0]
            files = file.unstack(dim)
            fields = field.dimension(dim).unstack(file.shape.get_size(dim))
            for field_, file_ in zip(fields, files):
                write(field_, file_)
    else:
        raise ValueError(file)

Classes

class AngularVelocity (location: phi.math._tensors.Tensor, strength: phi.math._tensors.Tensor = 1.0, falloff: collections.abc.Callable = None, component: str = None)

Model of a single vortex or set of vortices. The falloff of the velocity magnitude can be controlled.

Without a specified falloff, the velocity increases linearly with the distance from the vortex center. This is the case with rotating rigid bodies, for example.

Expand source code
class AngularVelocity(Field):
    """
    Model of a single vortex or set of vortices.
    The falloff of the velocity magnitude can be controlled.

    Without a specified falloff, the velocity increases linearly with the distance from the vortex center.
    This is the case with rotating rigid bodies, for example.
    """

    def __init__(self,
                 location: math.Tensor or tuple or list or Number,
                 strength: math.Tensor or Number = 1.0,
                 falloff: Callable = None,
                 component: str = None):
        location = math.wrap(location)
        strength = math.wrap(strength)
        assert location.shape.channel.names == ('vector',), "location must have a single channel dimension called 'vector'"
        assert location.shape.spatial.is_empty, "location tensor cannot have any spatial dimensions"
        self.location = location
        self.strength = strength
        self.falloff = falloff
        self.component = component
        spatial_names = [GLOBAL_AXIS_ORDER.axis_name(i, location.vector.size) for i in range(location.vector.size)]
        self._shape = location.shape & spatial(**{dim: 1 for dim in spatial_names})

    def _sample(self, geometry: Geometry) -> math.Tensor:
        points = geometry.center
        distances = points - self.location
        strength = self.strength if self.falloff is None else self.strength * self.falloff(distances)
        velocity = math.cross_product(strength, distances)
        velocity = math.sum(velocity, self.location.shape.batch.without(points.shape))
        if self.component:
            velocity = velocity.vector[self.component]
        return velocity

    @property
    def shape(self) -> Shape:
        return self._shape

    def __getitem__(self, item: dict):
        assert all(dim == 'vector' for dim in item), f"Cannot slice AngularVelocity with {item}"
        if 'vector' in item:
            assert item['vector'] == 0 or self.component is None
            component = self.shape.spatial.names[item['vector']]
            return AngularVelocity(self.location, self.strength, self.falloff, component)
        else:
            return self

Ancestors

  • phi.field._field.Field

Instance variables

var shape : phi.math._shape.Shape

Returns a shape with the following properties

  • The spatial dimension names match the dimensions of this Field
  • The batch dimensions match the batch dimensions of this Field
  • The channel dimensions match the channels of this Field
Expand source code
@property
def shape(self) -> Shape:
    return self._shape
class CenteredGrid (values: Any, extrapolation: Any = 0.0, bounds: phi.geom._box.Box = None, resolution: phi.math._shape.Shape = None, **resolution_: int)

N-dimensional grid with values sampled at the cell centers. A centered grid is defined through its CenteredGrid.values Tensor, its CenteredGrid.bounds Box describing the physical size, and its CenteredGrid.extrapolation (Extrapolation).

Centered grids support batch, spatial and channel dimensions.

See Also: StaggeredGrid, Grid, SampledField, Field, module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html

Args

values

Values to use for the grid. Has to be one of the following:

  • Geometry: sets inside values to 1, outside to 0
  • Field: resamples the Field to the staggered sample points
  • Number: uses the value for all sample points
  • tuple or list: interprets the sequence as vector, used for all sample points
  • Tensor compatible with grid dims: uses tensor values as grid values
  • Function values(x) where x is a Tensor representing the physical location.
extrapolation
The grid extrapolation determines the value outside the values tensor. Allowed types: float, Tensor, Extrapolation.
bounds
Physical size and location of the grid as Box.
resolution
Grid resolution as purely spatial Shape.
**resolution_
Spatial dimensions as keyword arguments. Typically either resolution or spatial_dims are specified.
Expand source code
class CenteredGrid(Grid):
    """
    N-dimensional grid with values sampled at the cell centers.
    A centered grid is defined through its `CenteredGrid.values` `phi.math.Tensor`, its `CenteredGrid.bounds` `phi.geom.Box` describing the physical size, and its `CenteredGrid.extrapolation` (`phi.math.Extrapolation`).
    
    Centered grids support batch, spatial and channel dimensions.

    See Also:
        `StaggeredGrid`,
        `Grid`,
        `SampledField`,
        `Field`,
        module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html
    """

    def __init__(self,
                 values: Any,
                 extrapolation: Any = 0.,
                 bounds: Box = None,
                 resolution: Shape = None,
                 **resolution_: int or Tensor):
        """
        Args:
            values: Values to use for the grid.
                Has to be one of the following:

                * `phi.geom.Geometry`: sets inside values to 1, outside to 0
                * `Field`: resamples the Field to the staggered sample points
                * `Number`: uses the value for all sample points
                * `tuple` or `list`: interprets the sequence as vector, used for all sample points
                * `phi.math.Tensor` compatible with grid dims: uses tensor values as grid values
                * Function `values(x)` where `x` is a `phi.math.Tensor` representing the physical location.

            extrapolation: The grid extrapolation determines the value outside the `values` tensor.
                Allowed types: `float`, `phi.math.Tensor`, `phi.math.extrapolation.Extrapolation`.
            bounds: Physical size and location of the grid as `phi.geom.Box`.
            resolution: Grid resolution as purely spatial `phi.math.Shape`.
            **resolution_: Spatial dimensions as keyword arguments. Typically either `resolution` or `spatial_dims` are specified.
        """
        if not isinstance(extrapolation, math.Extrapolation):
            extrapolation = math.extrapolation.ConstantExtrapolation(extrapolation)
        if resolution is None and not resolution_:
            assert isinstance(values, math.Tensor), "Grid resolution must be specified when 'values' is not a Tensor."
            resolution = values.shape.spatial
            bounds = bounds or Box(0, math.wrap(resolution, channel('vector')))
            elements = GridCell(resolution, bounds)
        else:
            resolution = (resolution or math.EMPTY_SHAPE) & spatial(**resolution_)
            bounds = bounds or Box(0, math.wrap(resolution, channel('vector')))
            elements = GridCell(resolution, bounds)
            if isinstance(values, math.Tensor):
                values = math.expand(values, resolution)
            elif isinstance(values, Geometry):
                values = reduce_sample(HardGeometryMask(values), elements)
            elif isinstance(values, Field):
                values = reduce_sample(values, elements)
            elif callable(values):
                values = values(elements.center)
                assert isinstance(values, math.Tensor), f"values function must return a Tensor but returned {type(values)}"
            else:
                values = math.expand(math.tensor(values), resolution)
        if values.dtype.kind not in (float, complex):
            values = math.to_float(values)
        assert resolution.spatial_rank == bounds.spatial_rank, f"Resolution {resolution} does not match bounds {bounds}"
        Grid.__init__(self, elements, values, extrapolation, values.shape.spatial, bounds)

    def __getitem__(self, item: dict):
        values = self._values[{dim: slice(sel, sel + 1) if isinstance(sel, int) and dim in self.shape.spatial else sel for dim, sel in item.items()}]
        extrapolation = self._extrapolation[item]
        bounds = self.elements[item].bounds
        return CenteredGrid(values, bounds=bounds, extrapolation=extrapolation)

    def _sample(self, geometry: Geometry) -> Tensor:
        if geometry == self.bounds:
            return math.mean(self._values, self._resolution)
        if isinstance(geometry, GeometryStack):
            sampled = [self.sample(g) for g in geometry.geometries]
            return math.stack(sampled, geometry.stack_dim)
        if isinstance(geometry, GridCell):
            if self.elements == geometry:
                return self.values
            elif math.close(self.dx, geometry.size):
                fast_resampled = self._shift_resample(geometry.resolution, geometry.bounds)
                if fast_resampled is not NotImplemented:
                    return fast_resampled
        points = geometry.center
        local_points = self.box.global_to_local(points) * self.resolution - 0.5
        return math.grid_sample(self.values, local_points, self.extrapolation)

    def _shift_resample(self, resolution: Shape, bounds: Box, threshold=1e-5, max_padding=20):
        assert math.all_available(bounds.lower, bounds.upper), "Shift resampling requires 'bounds' to be available."
        lower = math.to_int32(math.ceil(math.maximum(0, self.box.lower - bounds.lower) / self.dx - threshold))
        upper = math.to_int32(math.ceil(math.maximum(0, bounds.upper - self.box.upper) / self.dx - threshold))
        total_padding = (math.sum(lower) + math.sum(upper)).numpy()
        if total_padding > max_padding:
            return NotImplemented
        elif total_padding > 0:
            from phi.field import pad
            padded = pad(self, {dim: (int(lower[i]), int(upper[i])) for i, dim in enumerate(self.shape.spatial.names)})
            grid_box, grid_resolution, grid_values = padded.box, padded.resolution, padded.values
        else:
            grid_box, grid_resolution, grid_values = self.box, self.resolution, self.values
        origin_in_local = grid_box.global_to_local(bounds.lower) * grid_resolution
        data = math.sample_subgrid(grid_values, origin_in_local, resolution)
        return data

    def closest_values(self, points: Geometry):
        assert 'vector' not in points.shape
        local_points = self.box.global_to_local(points.center) * self.resolution - 0.5
        return math.closest_grid_values(self.values, local_points, self.extrapolation)

Ancestors

  • phi.field._grid.Grid
  • phi.field._field.SampledField
  • phi.field._field.Field

Methods

def closest_values(self, points: phi.geom._geom.Geometry)

Sample the closest grid point values of this field at the world-space locations (in physical units) given by points. Points must have a single channel dimension named vector. It may additionally contain any number of batch and spatial dimensions, all treated as batch dimensions.

Args

points
world-space locations

Returns

Closest grid point values as a Tensor. For each dimension, the grid points immediately left and right of the sample points are evaluated. For each point in points, a 2^d cube of points is determined where d is the number of spatial dimensions of this field. These values are stacked along the new dimensions 'closest_<dim>' where <dim> refers to the name of a spatial dimension.

Expand source code
def closest_values(self, points: Geometry):
    assert 'vector' not in points.shape
    local_points = self.box.global_to_local(points.center) * self.resolution - 0.5
    return math.closest_grid_values(self.values, local_points, self.extrapolation)
class ConstantField (value=1.0)

Deprecated.

Expand source code
class ConstantField(Field):
    """
    Deprecated.
    """

    def __init__(self, value=1.0):
        warnings.warn("ConstantField is deprecated. Use numbers or tuples instead.", DeprecationWarning)
        self.value = math.wrap(value)

    @property
    def shape(self) -> Shape:
        return self.value.shape

    def _op1(self, operator) -> Field:
        return ConstantField(operator(self.value))

    def _op2(self, other, operator) -> Field:
        return ConstantField(operator(self.value, other))

    def _sample(self, geometry: Geometry) -> math.Tensor:
        return self.value

    def __getitem__(self, item):
        return ConstantField(self.value[item])

    def unstack(self, dimension: str):
        warnings.warn("ConstantField.unstack() is deprecated. Use field.unstack(ConstantField) instead.", DeprecationWarning)
        return tuple(ConstantField(v) for v in self.value.unstack(dimension))

    def __repr__(self):
        return repr(self.value)

Ancestors

  • phi.field._field.Field

Instance variables

var shape : phi.math._shape.Shape

Returns a shape with the following properties

  • The spatial dimension names match the dimensions of this Field
  • The batch dimensions match the batch dimensions of this Field
  • The channel dimensions match the channels of this Field
Expand source code
@property
def shape(self) -> Shape:
    return self.value.shape

Methods

def unstack(self, dimension: str)
Expand source code
def unstack(self, dimension: str):
    warnings.warn("ConstantField.unstack() is deprecated. Use field.unstack(ConstantField) instead.", DeprecationWarning)
    return tuple(ConstantField(v) for v in self.value.unstack(dimension))
class Field

Base class for all fields.

Important implementations:

  • CenteredGrid
  • StaggeredGrid
  • PointCloud
  • Noise

See the phi.field module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html

Expand source code
class Field:
    """
    Base class for all fields.
    
    Important implementations:
    
    * CenteredGrid
    * StaggeredGrid
    * PointCloud
    * Noise
    
    See the `phi.field` module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html
    """

    @property
    def shape(self) -> Shape:
        """
        Returns a shape with the following properties
        
        * The spatial dimension names match the dimensions of this Field
        * The batch dimensions match the batch dimensions of this Field
        * The channel dimensions match the channels of this Field
        """
        raise NotImplementedError()

    @property
    def spatial_rank(self) -> int:
        """
        Spatial rank of the field (1 for 1D, 2 for 2D, 3 for 3D).
        This is equal to the spatial rank of the `data`.
        """
        return self.shape.spatial.rank

    def _sample(self, geometry: Geometry) -> math.Tensor:
        """ For internal use only. Use `sample()` instead. """
        raise NotImplementedError(self)

    def at(self, representation: 'SampledField', keep_extrapolation=False) -> 'SampledField':
        """
        Samples this field at the sample points of `representation`.
        The result will approximate the values of this field on the data structure of `representation`.
        
        Unlike `Field.sample()`, this method returns a `Field` object, not a `Tensor`.

        Operator alias:
            `self @ representation`.

        See Also:
            `sample()`, `reduce_sample()`, [Resampling overview](https://tum-pbs.github.io/PhiFlow/Fields.html#resampling-fields).

        Args:
            representation: Field object defining the sample points. The values of `representation` are ignored.
            keep_extrapolation: Only available if `self` is a `SampledField`.
                If True, the resampled field will inherit the extrapolation from `self` instead of `representation`.
                This can result in non-compatible value tensors for staggered grids where the tensor size depends on the extrapolation type.

        Returns:
            Field object of same type as `representation`
        """
        resampled = reduce_sample(self, representation.elements)
        extrap = self.extrapolation if isinstance(self, SampledField) and keep_extrapolation else representation.extrapolation
        return representation._op1(lambda old: extrap if isinstance(old, math.extrapolation.Extrapolation) else resampled)

    def __matmul__(self, other: 'SampledField'):  # values @ representation
        """
        Resampling operator with change of extrapolation.

        Args:
            other: instance of SampledField

        Returns:
            Copy of other with values and extrapolation from this Field.
        """
        return self.at(other, keep_extrapolation=False)

    def __rmatmul__(self, other):  # values @ representation
        if not isinstance(self, SampledField):
            return NotImplemented
        if isinstance(other, Geometry):
            return self.with_values(other)
        return NotImplemented

    def __getitem__(self, item: dict) -> 'Field':
        """
        Access a slice of the Field.
        The returned `Field` may be of a different type than `self`.

        Args:
            item: `dict` mapping dimensions (`str`) to selections (`int` or `slice`)

        Returns:
            Sliced `Field`.
        """
        raise NotImplementedError(self)

    def dimension(self, name: str):
        """
        Returns a reference to one of the dimensions of this field.

        The dimension reference can be used the same way as a `Tensor` dimension reference.
        Notable properties and methods of a dimension reference are:
        indexing using `[index]`, `unstack()`, `size`, `exists`, `is_batch`, `is_spatial`, `is_channel`.

        A shortcut to calling this function is the syntax `field.<dim_name>` which calls `field.dimension(<dim_name>)`.

        Args:
            name: dimension name

        Returns:
            dimension reference

        """
        return _FieldDim(self, name)

    def __getattr__(self, name: str) -> '_FieldDim':
        if name.startswith('_'):
            raise AttributeError(f"'{type(self)}' object has no attribute '{name}'")
        if hasattr(self.__class__, name):
            raise RuntimeError(f"Failed to get attribute '{name}' of {self.__class__}")
        return _FieldDim(self, name)

    def __repr__(self):
        return f"{self.__class__.__name__} {self.shape}"

Subclasses

  • phi.field._angular_velocity.AngularVelocity
  • phi.field._constant.ConstantField
  • phi.field._field.SampledField
  • phi.field._mask.HardGeometryMask
  • phi.field._noise.Noise

Instance variables

var shape : phi.math._shape.Shape

Returns a shape with the following properties

  • The spatial dimension names match the dimensions of this Field
  • The batch dimensions match the batch dimensions of this Field
  • The channel dimensions match the channels of this Field
Expand source code
@property
def shape(self) -> Shape:
    """
    Returns a shape with the following properties
    
    * The spatial dimension names match the dimensions of this Field
    * The batch dimensions match the batch dimensions of this Field
    * The channel dimensions match the channels of this Field
    """
    raise NotImplementedError()
var spatial_rank : int

Spatial rank of the field (1 for 1D, 2 for 2D, 3 for 3D). This is equal to the spatial rank of the data.

Expand source code
@property
def spatial_rank(self) -> int:
    """
    Spatial rank of the field (1 for 1D, 2 for 2D, 3 for 3D).
    This is equal to the spatial rank of the `data`.
    """
    return self.shape.spatial.rank

Methods

def at(self, representation: SampledField, keep_extrapolation=False) ‑> phi.field._field.SampledField

Samples this field at the sample points of representation. The result will approximate the values of this field on the data structure of representation.

Unlike Field.sample(), this method returns a Field object, not a Tensor.

Operator alias: self @ representation.

See Also: sample(), reduce_sample(), Resampling overview.

Args

representation
Field object defining the sample points. The values of representation are ignored.
keep_extrapolation
Only available if self is a SampledField. If True, the resampled field will inherit the extrapolation from self instead of representation. This can result in non-compatible value tensors for staggered grids where the tensor size depends on the extrapolation type.

Returns

Field object of same type as representation

Expand source code
def at(self, representation: 'SampledField', keep_extrapolation=False) -> 'SampledField':
    """
    Samples this field at the sample points of `representation`.
    The result will approximate the values of this field on the data structure of `representation`.
    
    Unlike `Field.sample()`, this method returns a `Field` object, not a `Tensor`.

    Operator alias:
        `self @ representation`.

    See Also:
        `sample()`, `reduce_sample()`, [Resampling overview](https://tum-pbs.github.io/PhiFlow/Fields.html#resampling-fields).

    Args:
        representation: Field object defining the sample points. The values of `representation` are ignored.
        keep_extrapolation: Only available if `self` is a `SampledField`.
            If True, the resampled field will inherit the extrapolation from `self` instead of `representation`.
            This can result in non-compatible value tensors for staggered grids where the tensor size depends on the extrapolation type.

    Returns:
        Field object of same type as `representation`
    """
    resampled = reduce_sample(self, representation.elements)
    extrap = self.extrapolation if isinstance(self, SampledField) and keep_extrapolation else representation.extrapolation
    return representation._op1(lambda old: extrap if isinstance(old, math.extrapolation.Extrapolation) else resampled)
def dimension(self, name: str)

Returns a reference to one of the dimensions of this field.

The dimension reference can be used the same way as a Tensor dimension reference. Notable properties and methods of a dimension reference are: indexing using [index], unstack(), size, exists, is_batch, is_spatial, is_channel.

A shortcut to calling this function is the syntax field.<dim_name> which calls field.dimension(<dim_name>).

Args

name
dimension name

Returns

dimension reference

Expand source code
def dimension(self, name: str):
    """
    Returns a reference to one of the dimensions of this field.

    The dimension reference can be used the same way as a `Tensor` dimension reference.
    Notable properties and methods of a dimension reference are:
    indexing using `[index]`, `unstack()`, `size`, `exists`, `is_batch`, `is_spatial`, `is_channel`.

    A shortcut to calling this function is the syntax `field.<dim_name>` which calls `field.dimension(<dim_name>)`.

    Args:
        name: dimension name

    Returns:
        dimension reference

    """
    return _FieldDim(self, name)
class Grid

Base class for CenteredGrid and StaggeredGrid.

Args

elements
Geometry object specifying the sample points and sizes
values
values corresponding to elements
extrapolation
values outside elements
Expand source code
class Grid(SampledField):
    """
    Base class for `CenteredGrid` and `StaggeredGrid`.
    """

    def __init__(self, elements: Geometry, values: Tensor, extrapolation: math.Extrapolation, resolution: Shape, bounds: Box):
        SampledField.__init__(self, elements, values, extrapolation)
        assert values.shape.spatial_rank == elements.spatial_rank, f"Spatial dimensions of values ({values.shape}) do not match elements {elements}"
        assert values.shape.spatial_rank == bounds.spatial_rank, f"Spatial dimensions of values ({values.shape}) do not match elements {elements}"
        assert values.shape.instance_rank == 0, f"Instance dimensions not supported for grids. Got values with shape {values.shape}"
        self._bounds = bounds
        self._resolution = resolution

    def closest_values(self, points: Geometry):
        """
        Sample the closest grid point values of this field at the world-space locations (in physical units) given by `points`.
        Points must have a single channel dimension named `vector`.
        It may additionally contain any number of batch and spatial dimensions, all treated as batch dimensions.

        Args:
            points: world-space locations

        Returns:
            Closest grid point values as a `Tensor`.
            For each dimension, the grid points immediately left and right of the sample points are evaluated.
            For each point in `points`, a *2^d* cube of points is determined where *d* is the number of spatial dimensions of this field.
            These values are stacked along the new dimensions `'closest_<dim>'` where `<dim>` refers to the name of a spatial dimension.
        """
        raise NotImplementedError(self)

    def _sample(self, geometry: Geometry) -> math.Tensor:
        raise NotImplementedError(self)

    def with_values(self, values):
        if isinstance(values, math.Tensor):
            return type(self)(values, extrapolation=self.extrapolation, bounds=self.bounds)
        else:
            return type(self)(values, extrapolation=self.extrapolation, bounds=self.bounds, resolution=self._resolution)

    def with_extrapolation(self, extrapolation: math.Extrapolation):
        return type(self)(self.values, extrapolation=extrapolation, bounds=self.bounds)

    def with_bounds(self, bounds: Box):
        return type(self)(self.values, extrapolation=self.extrapolation, bounds=bounds)

    def __value_attrs__(self):
        return '_values', '_extrapolation'

    def __variable_attrs__(self):
        return '_values',

    def __eq__(self, other):
        if not type(self) == type(other):
            return False
        if not (self._bounds == other._bounds and self._resolution == other._resolution and self._extrapolation == other._extrapolation):
            return False
        if self.values is None:
            return other.values is None
        if other.values is None:
            return False
        if not math.all_available(self.values) or not math.all_available(other.values):  # tracers involved
            if math.all_available(self.values) != math.all_available(other.values):
                return False
            else:  # both tracers
                return self.values.shape == other.values.shape
        return bool((self.values == other.values).all)

    def __getitem__(self, item: dict) -> 'Grid':
        raise NotImplementedError(self)

    @property
    def shape(self):
        return self._resolution & self._values.shape.non_spatial

    @property
    def bounds(self) -> Box:
        return self._bounds

    @property
    def box(self) -> Box:
        return self._bounds

    @property
    def resolution(self) -> Shape:
        return self._resolution

    @property
    def dx(self) -> Tensor:
        return self.box.size / self.resolution

    def __repr__(self):
        if self._values is not None:
            return f"{self.__class__.__name__}[{self.shape.non_spatial & self.resolution}, size={self.box.size}, extrapolation={self._extrapolation}]"
        else:
            return f"{self.__class__.__name__}[{self.resolution}, size={self.box.size}, extrapolation={self._extrapolation}]"

Ancestors

  • phi.field._field.SampledField
  • phi.field._field.Field

Subclasses

  • phi.field._grid.CenteredGrid
  • phi.field._grid.StaggeredGrid

Instance variables

var bounds : phi.geom._box.Box
Expand source code
@property
def bounds(self) -> Box:
    return self._bounds
var box : phi.geom._box.Box
Expand source code
@property
def box(self) -> Box:
    return self._bounds
var dx : phi.math._tensors.Tensor
Expand source code
@property
def dx(self) -> Tensor:
    return self.box.size / self.resolution
var resolution : phi.math._shape.Shape
Expand source code
@property
def resolution(self) -> Shape:
    return self._resolution
var shape

Returns a shape with the following properties

  • The spatial dimension names match the dimensions of this Field
  • The batch dimensions match the batch dimensions of this Field
  • The channel dimensions match the channels of this Field
Expand source code
@property
def shape(self):
    return self._resolution & self._values.shape.non_spatial

Methods

def closest_values(self, points: phi.geom._geom.Geometry)

Sample the closest grid point values of this field at the world-space locations (in physical units) given by points. Points must have a single channel dimension named vector. It may additionally contain any number of batch and spatial dimensions, all treated as batch dimensions.

Args

points
world-space locations

Returns

Closest grid point values as a Tensor. For each dimension, the grid points immediately left and right of the sample points are evaluated. For each point in points, a 2^d cube of points is determined where d is the number of spatial dimensions of this field. These values are stacked along the new dimensions 'closest_<dim>' where <dim> refers to the name of a spatial dimension.

Expand source code
def closest_values(self, points: Geometry):
    """
    Sample the closest grid point values of this field at the world-space locations (in physical units) given by `points`.
    Points must have a single channel dimension named `vector`.
    It may additionally contain any number of batch and spatial dimensions, all treated as batch dimensions.

    Args:
        points: world-space locations

    Returns:
        Closest grid point values as a `Tensor`.
        For each dimension, the grid points immediately left and right of the sample points are evaluated.
        For each point in `points`, a *2^d* cube of points is determined where *d* is the number of spatial dimensions of this field.
        These values are stacked along the new dimensions `'closest_<dim>'` where `<dim>` refers to the name of a spatial dimension.
    """
    raise NotImplementedError(self)
def with_bounds(self, bounds: phi.geom._box.Box)
Expand source code
def with_bounds(self, bounds: Box):
    return type(self)(self.values, extrapolation=self.extrapolation, bounds=bounds)
def with_extrapolation(self, extrapolation: Extrapolation)

Returns a copy of this field with values replaced.

Expand source code
def with_extrapolation(self, extrapolation: math.Extrapolation):
    return type(self)(self.values, extrapolation=extrapolation, bounds=self.bounds)
def with_values(self, values)

Returns a copy of this field with values replaced.

Expand source code
def with_values(self, values):
    if isinstance(values, math.Tensor):
        return type(self)(values, extrapolation=self.extrapolation, bounds=self.bounds)
    else:
        return type(self)(values, extrapolation=self.extrapolation, bounds=self.bounds, resolution=self._resolution)
class HardGeometryMask (geometry: phi.geom._geom.Geometry)

Field that takes the value 1 inside a Geometry object and 0 outside. For volume sampling, performs sampling at the center points.

Expand source code
class HardGeometryMask(Field):
    """
    Field that takes the value 1 inside a Geometry object and 0 outside.
    For volume sampling, performs sampling at the center points.
    """

    def __init__(self, geometry: Geometry):
        assert isinstance(geometry, Geometry)
        self.geometry = geometry

    @property
    def shape(self):
        return self.geometry.shape.non_channel

    def _sample(self, geometry: Geometry) -> Tensor:
        return math.to_float(self.geometry.lies_inside(geometry.center))

    def __getitem__(self, item: dict):
        return HardGeometryMask(self.geometry[item])

Ancestors

  • phi.field._field.Field

Subclasses

  • phi.field._mask.SoftGeometryMask

Instance variables

var shape

Returns a shape with the following properties

  • The spatial dimension names match the dimensions of this Field
  • The batch dimensions match the batch dimensions of this Field
  • The channel dimensions match the channels of this Field
Expand source code
@property
def shape(self):
    return self.geometry.shape.non_channel
class Noise (*shape: phi.math._shape.Shape, scale=10, smoothness=1.0, **channel_dims)

Generates random noise fluctuations which can be configured in physical size and smoothness. Each time values are sampled from a Noise field, a new noise field is generated.

Noise is typically used as an initializer for CenteredGrids or StaggeredGrids.

Args

shape
Batch and channel dimensions. Spatial dimensions will be added automatically once sampled on a grid.
scale
Size of noise fluctuations in physical units.
smoothness
Determines how quickly high frequencies die out.
**dims
Additional dimensions, added to shape.
Expand source code
class Noise(Field):
    """
    Generates random noise fluctuations which can be configured in physical size and smoothness.
    Each time values are sampled from a Noise field, a new noise field is generated.

    Noise is typically used as an initializer for CenteredGrids or StaggeredGrids.
    """

    def __init__(self, *shape: math.Shape, scale=10, smoothness=1.0, **channel_dims):
        """
        Args:
          shape: Batch and channel dimensions. Spatial dimensions will be added automatically once sampled on a grid.
          scale: Size of noise fluctuations in physical units.
          smoothness: Determines how quickly high frequencies die out.
          **dims: Additional dimensions, added to `shape`.
        """
        self.scale = scale
        self.smoothness = smoothness
        self._shape = math.concat_shapes(*shape, channel(**channel_dims))

    @property
    def shape(self):
        return self._shape

    def _sample(self, geometry: Geometry) -> Tensor:
        if isinstance(geometry, GridCell):
            return self.grid_sample(geometry.resolution, geometry.grid_size)
        raise NotImplementedError(f"{type(geometry)} not supported. Only GridCell allowed.")

    def grid_sample(self, resolution: math.Shape, size, shape: math.Shape = None):
        shape = (self._shape if shape is None else shape) & resolution
        rndj = math.to_complex(random_normal(shape)) + 1j * math.to_complex(random_normal(shape))  # Note: there is no complex32
        with math.NUMPY:
            k = math.fftfreq(resolution) * resolution / size * self.scale  # in physical units
            k = math.vec_squared(k)
        lowest_frequency = 0.1
        weight_mask = math.to_float(k > lowest_frequency)
        # --- Compute 1/k ---
        k._native[(0,) * len(k.shape)] = np.inf
        inv_k = 1 / k
        inv_k._native[(0,) * len(k.shape)] = 0
        # --- Compute result ---
        fft = rndj * inv_k ** self.smoothness * weight_mask
        array = math.real(math.ifft(fft))
        array /= math.std(array, dim=array.shape.non_batch)
        array -= math.mean(array, dim=array.shape.non_batch)
        array = math.to_float(array)
        return array

    def __getitem__(self, item: dict):
        new_shape = self.shape.after_gather(item)
        return Noise(new_shape, scale=self.scale, smoothness=self.smoothness)

    def __repr__(self):
        return f"{self._shape}, scale={self.scale}, smoothness={self.smoothness}"

Ancestors

  • phi.field._field.Field

Instance variables

var shape

Returns a shape with the following properties

  • The spatial dimension names match the dimensions of this Field
  • The batch dimensions match the batch dimensions of this Field
  • The channel dimensions match the channels of this Field
Expand source code
@property
def shape(self):
    return self._shape

Methods

def grid_sample(self, resolution: phi.math._shape.Shape, size, shape: phi.math._shape.Shape = None)
Expand source code
def grid_sample(self, resolution: math.Shape, size, shape: math.Shape = None):
    shape = (self._shape if shape is None else shape) & resolution
    rndj = math.to_complex(random_normal(shape)) + 1j * math.to_complex(random_normal(shape))  # Note: there is no complex32
    with math.NUMPY:
        k = math.fftfreq(resolution) * resolution / size * self.scale  # in physical units
        k = math.vec_squared(k)
    lowest_frequency = 0.1
    weight_mask = math.to_float(k > lowest_frequency)
    # --- Compute 1/k ---
    k._native[(0,) * len(k.shape)] = np.inf
    inv_k = 1 / k
    inv_k._native[(0,) * len(k.shape)] = 0
    # --- Compute result ---
    fft = rndj * inv_k ** self.smoothness * weight_mask
    array = math.real(math.ifft(fft))
    array /= math.std(array, dim=array.shape.non_batch)
    array -= math.mean(array, dim=array.shape.non_batch)
    array = math.to_float(array)
    return array
class PointCloud (elements: phi.geom._geom.Geometry, values: Any = 1, extrapolation=0, add_overlapping=False, bounds: phi.geom._box.Box = None, color: str = None)

A point cloud consists of elements at arbitrary locations. A value or vector is associated with each element.

Outside of elements, the value of the field is determined by the extrapolation.

All points belonging to one example must be listed in the 'points' dimension.

Unlike with GeometryMask, the elements of a PointCloud are assumed to be small. When sampling this field on a grid, scatter functions may be used.

See the phi.field module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html

Args

elements
Geometry object specifying the sample points and sizes
values
values corresponding to elements
extrapolation
values outside elements
add_overlapping
True: values of overlapping geometries are summed. False: values between overlapping geometries are interpolated
bounds
(optional) size of the fixed domain in which the points should get visualized. None results in max and min coordinates of points.
color
(optional) hex code for color or tensor of colors (same length as elements) in which points should get plotted.
Expand source code
class PointCloud(SampledField):
    """
    A point cloud consists of elements at arbitrary locations.
    A value or vector is associated with each element.

    Outside of elements, the value of the field is determined by the extrapolation.

    All points belonging to one example must be listed in the 'points' dimension.

    Unlike with GeometryMask, the elements of a PointCloud are assumed to be small.
    When sampling this field on a grid, scatter functions may be used.

    See the `phi.field` module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html
    """

    def __init__(self,
                 elements: Geometry,
                 values: Any = 1,
                 extrapolation=math.extrapolation.ZERO,
                 add_overlapping=False,
                 bounds: Box = None,
                 color: str or Tensor or tuple or list or None = None):
        """
        Args:
          elements: Geometry object specifying the sample points and sizes
          values: values corresponding to elements
          extrapolation: values outside elements
          add_overlapping: True: values of overlapping geometries are summed. False: values between overlapping geometries are interpolated
          bounds: (optional) size of the fixed domain in which the points should get visualized. None results in max and min coordinates of points.
          color: (optional) hex code for color or tensor of colors (same length as elements) in which points should get plotted.
        """
        SampledField.__init__(self, elements, math.wrap(values), extrapolation)
        self._add_overlapping = add_overlapping
        assert bounds is None or isinstance(bounds, Box), 'Invalid bounds.'
        self._bounds = bounds
        color = '#0060ff' if color is None else color
        self._color = math.wrap(color, instance('points')) if isinstance(color, (tuple, list)) else math.wrap(color)

    @property
    def shape(self):
        return self._elements.shape & self._values.shape.non_spatial

    def __getitem__(self, item: dict):
        elements = self.elements[item]
        values = self._values[item]
        color = self._color[item]
        extrapolation = self._extrapolation[item]
        return PointCloud(elements, values, extrapolation, self._add_overlapping, self._bounds, color)

    def with_elements(self, elements: Geometry):
        return PointCloud(elements=elements, values=self.values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds, color=self._color)

    def with_values(self, values):
        return PointCloud(elements=self.elements, values=values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds, color=self._color)

    def with_extrapolation(self, extrapolation: math.Extrapolation):
        return PointCloud(elements=self.elements, values=self.values, extrapolation=extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds, color=self._color)

    def with_color(self, color: str or Tensor or tuple or list):
        return PointCloud(elements=self.elements, values=self.values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds, color=color)

    def with_bounds(self, bounds: Box):
        return PointCloud(elements=self.elements, values=self.values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=bounds, color=self._color)

    def __value_attrs__(self):
        return '_values', '_extrapolation'

    def __variable_attrs__(self):
        return '_values', '_elements'

    @property
    def bounds(self) -> Box:
        return self._bounds

    @property
    def color(self) -> Tensor:
        return self._color

    def _sample(self, geometry: Geometry) -> Tensor:
        if geometry == self.elements:
            return self.values
        elif isinstance(geometry, GridCell):
            return self._grid_scatter(geometry.bounds, geometry.resolution)
        elif isinstance(geometry, GeometryStack):
            sampled = [self._sample(g) for g in geometry.geometries]
            return math.stack(sampled, geometry.stack_dim)
        else:
            raise NotImplementedError()

    def _grid_scatter(self, box: Box, resolution: math.Shape):
        """
        Approximately samples this field on a regular grid using math.scatter().

        Args:
          box: physical dimensions of the grid
          resolution: grid resolution
          box: Box: 
          resolution: math.Shape: 

        Returns:
          CenteredGrid

        """
        closest_index = box.global_to_local(self.points) * resolution - 0.5
        mode = 'add' if self._add_overlapping else 'mean'
        base = math.zeros(resolution)
        if isinstance(self.extrapolation, math.extrapolation.ConstantExtrapolation):
            base += self.extrapolation.value
        scattered = math.scatter(base, closest_index, self.values, mode=mode, outside_handling='discard')
        return scattered

    def __repr__(self):
        return "PointCloud[%s]" % (self.shape,)

    def __and__(self, other):
        assert isinstance(other, PointCloud)
        from ._field_math import concat
        return concat([self, other], instance('points'))

Ancestors

  • phi.field._field.SampledField
  • phi.field._field.Field

Instance variables

var bounds : phi.geom._box.Box
Expand source code
@property
def bounds(self) -> Box:
    return self._bounds
var color : phi.math._tensors.Tensor
Expand source code
@property
def color(self) -> Tensor:
    return self._color
var shape

Returns a shape with the following properties

  • The spatial dimension names match the dimensions of this Field
  • The batch dimensions match the batch dimensions of this Field
  • The channel dimensions match the channels of this Field
Expand source code
@property
def shape(self):
    return self._elements.shape & self._values.shape.non_spatial

Methods

def with_bounds(self, bounds: phi.geom._box.Box)
Expand source code
def with_bounds(self, bounds: Box):
    return PointCloud(elements=self.elements, values=self.values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=bounds, color=self._color)
def with_color(self, color: str)
Expand source code
def with_color(self, color: str or Tensor or tuple or list):
    return PointCloud(elements=self.elements, values=self.values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds, color=color)
def with_elements(self, elements: phi.geom._geom.Geometry)
Expand source code
def with_elements(self, elements: Geometry):
    return PointCloud(elements=elements, values=self.values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds, color=self._color)
def with_extrapolation(self, extrapolation: Extrapolation)

Returns a copy of this field with values replaced.

Expand source code
def with_extrapolation(self, extrapolation: math.Extrapolation):
    return PointCloud(elements=self.elements, values=self.values, extrapolation=extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds, color=self._color)
def with_values(self, values)

Returns a copy of this field with values replaced.

Expand source code
def with_values(self, values):
    return PointCloud(elements=self.elements, values=values, extrapolation=self.extrapolation, add_overlapping=self._add_overlapping, bounds=self._bounds, color=self._color)
class SampledField (elements: phi.geom._geom.Geometry, values: phi.math._tensors.Tensor, extrapolation: Extrapolation)

Base class for fields that are sampled at specific locations such as grids or point clouds.

Args

elements
Geometry object specifying the sample points and sizes
values
values corresponding to elements
extrapolation
values outside elements
Expand source code
class SampledField(Field):
    """
    Base class for fields that are sampled at specific locations such as grids or point clouds.
    """

    def __init__(self, elements: Geometry, values: Tensor, extrapolation: math.Extrapolation):
        """
        Args:
          elements: Geometry object specifying the sample points and sizes
          values: values corresponding to elements
          extrapolation: values outside elements
        """
        assert isinstance(extrapolation, Extrapolation), f"Not a valid extrapolation: {extrapolation}"
        assert isinstance(elements, Geometry), elements
        assert isinstance(values, Tensor), f"Values must be a Tensor but got {values}."
        self._elements = elements
        self._values = values
        self._extrapolation = extrapolation

    def with_values(self, values):
        """ Returns a copy of this field with `values` replaced. """
        raise NotImplementedError(self)

    def with_extrapolation(self, extrapolation: math.Extrapolation):
        """ Returns a copy of this field with `values` replaced. """
        raise NotImplementedError(self)

    @property
    def shape(self):
        raise NotImplementedError()

    def __getitem__(self, item: dict) -> 'Field':
        raise NotImplementedError(self)

    @property
    def elements(self) -> Geometry:
        """
        Returns a geometrical representation of the discretized volume elements.
        The result is a tuple of Geometry objects, each of which can have additional spatial (but not batch) dimensions.
        
        For grids, the geometries are boxes while particle fields may be represented as spheres.
        
        If this Field has no discrete points, this method returns an empty geometry.
        """
        return self._elements

    @property
    def points(self) -> Tensor:
        return self.elements.center

    @property
    def values(self) -> Tensor:
        return self._values

    data = values

    @property
    def extrapolation(self) -> Extrapolation:
        return self._extrapolation

    def __mul__(self, other):
        return self._op2(other, lambda d1, d2: d1 * d2)

    __rmul__ = __mul__

    def __truediv__(self, other):
        return self._op2(other, lambda d1, d2: d1 / d2)

    def __rtruediv__(self, other):
        return self._op2(other, lambda d1, d2: d2 / d1)

    def __sub__(self, other):
        return self._op2(other, lambda d1, d2: d1 - d2)

    def __rsub__(self, other):
        return self._op2(other, lambda d1, d2: d2 - d1)

    def __add__(self, other):
        return self._op2(other, lambda d1, d2: d1 + d2)

    __radd__ = __add__

    def __pow__(self, power, modulo=None):
        return self._op2(power, lambda f, p: f ** p)

    def __neg__(self):
        return self._op1(lambda x: -x)

    def __gt__(self, other):
        return self._op2(other, lambda x, y: x > y)

    def __ge__(self, other):
        return self._op2(other, lambda x, y: x >= y)

    def __lt__(self, other):
        return self._op2(other, lambda x, y: x < y)

    def __le__(self, other):
        return self._op2(other, lambda x, y: x <= y)

    def __abs__(self):
        return self._op1(lambda x: abs(x))

    def _op1(self: 'SampledFieldType', operator: Callable) -> 'SampledFieldType':
        """
        Perform an operation on the data of this field.

        Args:
          operator: function that accepts tensors and extrapolations and returns objects of the same type and dimensions

        Returns:
          Field of same type
        """
        values = operator(self.values)
        extrapolation_ = operator(self._extrapolation)
        return self.with_values(values).with_extrapolation(extrapolation_)

    def _op2(self, other, operator) -> 'SampledField':
        if isinstance(other, Field):
            other_values = reduce_sample(other, self._elements)
            values = operator(self._values, other_values)
            extrapolation_ = operator(self._extrapolation, other.extrapolation)
            return self.with_values(values).with_extrapolation(extrapolation_)
        else:
            other = math.tensor(other)
            values = operator(self._values, other)
            return self.with_values(values)

Ancestors

  • phi.field._field.Field

Subclasses

  • phi.field._grid.Grid
  • phi.field._point_cloud.PointCloud

Instance variables

var data : phi.math._tensors.Tensor
Expand source code
@property
def values(self) -> Tensor:
    return self._values
var elements : phi.geom._geom.Geometry

Returns a geometrical representation of the discretized volume elements. The result is a tuple of Geometry objects, each of which can have additional spatial (but not batch) dimensions.

For grids, the geometries are boxes while particle fields may be represented as spheres.

If this Field has no discrete points, this method returns an empty geometry.

Expand source code
@property
def elements(self) -> Geometry:
    """
    Returns a geometrical representation of the discretized volume elements.
    The result is a tuple of Geometry objects, each of which can have additional spatial (but not batch) dimensions.
    
    For grids, the geometries are boxes while particle fields may be represented as spheres.
    
    If this Field has no discrete points, this method returns an empty geometry.
    """
    return self._elements
var extrapolationExtrapolation
Expand source code
@property
def extrapolation(self) -> Extrapolation:
    return self._extrapolation
var points : phi.math._tensors.Tensor
Expand source code
@property
def points(self) -> Tensor:
    return self.elements.center
var shape

Returns a shape with the following properties

  • The spatial dimension names match the dimensions of this Field
  • The batch dimensions match the batch dimensions of this Field
  • The channel dimensions match the channels of this Field
Expand source code
@property
def shape(self):
    raise NotImplementedError()
var values : phi.math._tensors.Tensor
Expand source code
@property
def values(self) -> Tensor:
    return self._values

Methods

def with_extrapolation(self, extrapolation: Extrapolation)

Returns a copy of this field with values replaced.

Expand source code
def with_extrapolation(self, extrapolation: math.Extrapolation):
    """ Returns a copy of this field with `values` replaced. """
    raise NotImplementedError(self)
def with_values(self, values)

Returns a copy of this field with values replaced.

Expand source code
def with_values(self, values):
    """ Returns a copy of this field with `values` replaced. """
    raise NotImplementedError(self)
class Scene

Provides methods for reading and writing simulation data.

See the format documentation at https://tum-pbs.github.io/PhiFlow/Scene_Format_Specification.html .

All data of a Scene is located inside a single directory with name sim_xxxxxx where xxxxxx is the id. The data of the scene is organized into NumPy files by name and frame.

To create a new scene, use Scene.create(). To reference an existing scene, use Scene.at(). To list all scenes within a directory, use Scene.list().

Expand source code
class Scene(object):
    """
    Provides methods for reading and writing simulation data.

    See the format documentation at https://tum-pbs.github.io/PhiFlow/Scene_Format_Specification.html .

    All data of a `Scene` is located inside a single directory with name `sim_xxxxxx` where `xxxxxx` is the `id`.
    The data of the scene is organized into NumPy files by *name* and *frame*.

    To create a new scene, use `Scene.create()`.
    To reference an existing scene, use `Scene.at()`.
    To list all scenes within a directory, use `Scene.list()`.
    """

    def __init__(self, paths: str or math.Tensor):
        self._paths = math.wrap(paths)
        self._properties: dict or None = None

    @property
    def shape(self):
        return self._paths.shape

    @property
    def is_batch(self):
        return self._paths.rank > 0

    @property
    def path(self) -> str:
        """
        Relative path of the scene directory.
        This property only exists for single scenes, not scene batches.
        """
        assert not self.is_batch, "Scene.path is not defined for scene batches."
        return self._paths.native()

    @property
    def paths(self) -> math.Tensor:
        return self._paths

    @staticmethod
    def stack(*scenes: 'Scene', dim: Shape = batch('batch')) -> 'Scene':
        return Scene(math.stack([s._paths for s in scenes], dim))

    @staticmethod
    def create(parent_directory: str,
               shape: math.Shape = math.EMPTY_SHAPE,
               copy_calling_script=True,
               **dimensions) -> 'Scene':
        """
        Creates a new `Scene` or a batch of new scenes inside `parent_directory`.

        See Also:
            `Scene.at()`, `Scene.list()`.

        Args:
            parent_directory: Directory to hold the new `Scene`. If it doesn't exist, it will be created.
            shape: Determines number of scenes to create. Multiple scenes will be represented by a `Scene` with `is_batch=True`.
            copy_calling_script: Whether to copy the Python file that invoked this method into the `src` folder of all created scenes.
                See `Scene.copy_calling_script()`.
            dimensions: Additional batch dimensions

        Returns:
            Single `Scene` object representing the new scene(s).
        """
        shape = shape & math.batch(**dimensions)
        parent_directory = expanduser(parent_directory)
        abs_dir = abspath(parent_directory)
        if not isdir(abs_dir):
            os.makedirs(abs_dir)
            next_id = 0
        else:
            indices = [int(name[4:]) for name in os.listdir(abs_dir) if name.startswith("sim_")]
            next_id = max([-1] + indices) + 1
        ids = math.wrap(tuple(range(next_id, next_id + shape.volume))).vector.split(shape)
        paths = math.map(lambda id_: join(parent_directory, f"sim_{id_:06d}"), ids)
        scene = Scene(paths)
        scene.mkdir()
        if copy_calling_script:
            try:
                scene.copy_calling_script()
            except IOError as err:
                warnings.warn(f"Failed to copy calling script to scene during Scene.create(): {err}")
        return scene

    @staticmethod
    def list(parent_directory: str,
             include_other: bool = False,
             dim: Shape or None = None) -> 'Scene' or tuple:
        """
        Lists all scenes inside the given directory.

        See Also:
            `Scene.at()`, `Scene.create()`.

        Args:
            parent_directory: Directory that contains scene folders.
            include_other: Whether folders that do not match the scene format should also be treated as scenes.
            dim: Stack dimension. If None, returns tuple of `Scene` objects. Otherwise, returns a scene batch with this dimension.

        Returns:
            `tuple` of scenes.
        """
        parent_directory = expanduser(parent_directory)
        abs_dir = abspath(parent_directory)
        if not isdir(abs_dir):
            return ()
        names = [sim for sim in os.listdir(abs_dir) if sim.startswith("sim_") or (include_other and isdir(join(abs_dir, sim)))]
        if dim is None:
            return tuple(Scene(join(parent_directory, name)) for name in names)
        else:
            paths = math.wrap([join(parent_directory, name) for name in names], dim)
            return Scene(paths)

    @staticmethod
    def at(directory: str or tuple or list or math.Tensor or 'Scene', id: int or math.Tensor or None = None) -> 'Scene':
        """
        Creates a `Scene` for an existing directory.

        See Also:
            `Scene.create()`, `Scene.list()`.

        Args:
            directory: Either directory containing scene folder if `id` is given, or scene path if `id=None`.
            id: (Optional) Scene `id`, will be determined from `directory` if not specified.

        Returns:
            `Scene` object for existing scene.
        """
        if isinstance(directory, Scene):
            assert id is None, f"Got id={id} but directory is already a Scene."
            return directory
        if isinstance(directory, (tuple, list)):
            directory = math.wrap(directory, batch('scenes'))
        directory = math.map(lambda d: expanduser(d), math.wrap(directory))
        if id is None:
            paths = directory
        else:
            id = math.wrap(id)
            paths = math.map(lambda d, i: join(d, f"sim_{i:06d}"), directory, id)
        # test all exist
        for path in math.flatten(paths):
            if not isdir(path):
                raise IOError(f"There is no scene at '{path}'")
        return Scene(paths)

    def subpath(self, name: str, create: bool = False) -> str or tuple:
        """
        Resolves the relative path `name` with this `Scene` as the root folder.

        Args:
            name: Relative path with this `Scene` as the root folder.
            create: Whether to create a directory of that name.

        Returns:
            Relative path including the path to this `Scene`.
            In batch mode, returns a `tuple`, else a `str`.
        """
        def single_subpath(path):
            path = join(path, name)
            if create and not isdir(path):
                os.mkdir(path)
            return path

        result = math.map(single_subpath, self._paths)
        if result.rank == 0:
            return result.native()
        else:
            return result

    def _init_properties(self):
        if self._properties is not None:
            return
        json_file = join(next(iter(math.flatten(self._paths))), "description.json")
        if isfile(json_file):
            with open(json_file) as stream:
                self._properties = json.load(stream)
        else:
            self._properties = {}

    def exist_properties(self):
        """
        Checks whether the file `description.json` exists or has existed.
        """
        if self._properties is not None:
            return True  # must have been written or read
        else:
            json_file = join(next(iter(math.flatten(self._paths))), "description.json")
            return isfile(json_file)

    def exists_config(self):
        """ Tests if the configuration file *description.json* exists. In batch mode, tests if any configuration exists. """
        if isinstance(self.path, str):
            return isfile(join(self.path, "description.json"))
        else:
            return any(isfile(join(p, "description.json")) for p in self.path)

    @property
    def properties(self):
        self._init_properties()
        return self._properties

    @properties.setter
    def properties(self, dict):
        self._properties = dict
        with open(join(self.path, "description.json"), "w") as out:
            json.dump(self._properties, out, indent=2)

    def put_property(self, key, value):
        """ See `Scene.put_properties()`. """
        self._init_properties()
        self._properties[key] = value
        self._write_properties()

    def put_properties(self, update: dict = None, **kw_updates):
        """
        Updates the properties dictionary and stores it in `description.json` of all scene folders.

        Args:
            update: new values, must be JSON serializable.
            kw_updates: additional update as keyword arguments. This overrides `update`.
        """
        self._init_properties()
        if update:
            self._properties.update(update)
        self._properties.update(kw_updates)
        self._write_properties()

    def _write_properties(self):
        for path in math.flatten(self.paths):
            with open(join(path, "description.json"), "w") as out:
                json.dump(self._properties, out, indent=2)

    def write_sim_frame(self, arrays, fieldnames, frame):
        write_sim_frame(self._paths, arrays, names=fieldnames, frame=frame)

    def write(self, data: dict = None, frame=0, **kw_data):
        """
        Writes fields to this scene.
        One NumPy file will be created for each `phi.field.Field`

        See Also:
            `Scene.read()`.

        Args:
            data: `dict` mapping field names to `Field` objects that can be written using `phi.field.write()`.
            kw_data: Additional data, overrides elements in `data`.
            frame: Frame number.
        """
        data = dict(data) if data else {}
        data.update(kw_data)
        write_sim_frame(self._paths, data, names=None, frame=frame)

    def read_array(self, field_name, frame):
        return read_sim_frame(self._paths, field_name, frame=frame)

    # def read_sim_frames(self, fieldnames=None, frames=None):
    #     return read_sim_frames(self.path, fieldnames=fieldnames, frames=frames, batch_dim=self.batch_dim)

    def read(self, *names: str, frame=0, convert_to_backend=True):
        """
        Reads one or multiple fields from disc.

        See Also:
            `Scene.write()`.

        Args:
            names: Single field name or sequence of field names.
            frame: Frame number.
            convert_to_backend: Whether to convert the read data to the data format of the default backend, e.g. TensorFlow tensors.

        Returns:
            Single `phi.field.Field` or sequence of fields, depending on the type of `names`.
        """
        result = read_sim_frame(self._paths, names, frame=frame, convert_to_backend=convert_to_backend)
        return result[0] if len(names) == 1 else result

    @property
    def fieldnames(self) -> tuple:
        """ Determines all field names present in this `Scene`, independent of frame. """
        return get_fieldnames(self.path)

    @property
    def frames(self):
        """ Determines all frame numbers present in this `Scene`, independent of field names. See `Scene.complete_frames`. """
        return get_frames(self.path, mode=set.union)

    @property
    def complete_frames(self):
        """
        Determines all frame number for which all existing fields are available.
        If there are multiple fields stored within this scene, a frame is considered complete only if an entry exists for all fields.

        See Also:
            `Scene.frames`
        """
        return get_frames(self.path, mode=set.intersection)

    def __repr__(self):
        return repr(self.paths)

    def __eq__(self, other):
        return isinstance(other, Scene) and (other._paths == self._paths).all

    def copy_calling_script(self, full_trace=False, include_context_information=True):
        """
        Copies the Python file that called this method into the `src` folder of this `Scene`.

        In batch mode, the script is copied to all scenes.

        Args:
            full_trace: Whether to include scripts that indirectly called this method.
            include_context_information: If True, writes the phiflow version and `sys.argv` into `context.json`.
        """
        script_paths = [frame.filename for frame in inspect.stack()]
        script_paths = list(filter(lambda path: not _is_phi_file(path), script_paths))
        script_paths = set(script_paths) if full_trace else [script_paths[0]]
        self.subpath('src', create=True)
        for script_path in script_paths:
            if script_path.endswith('.py'):
                self.copy_src(script_path, only_external=False)
            elif 'ipython' in script_path:
                from IPython import get_ipython
                cells = get_ipython().user_ns['In']
                blocks = [f"#%% In[{i}]\n{cell}" for i, cell in enumerate(cells)]
                text = "\n\n".join(blocks)
                self.copy_src_text('ipython.py', text)
        if include_context_information:
            for path in math.flatten(self._paths):
                with open(join(path, 'src', 'context.json'), 'w') as context_file:
                    json.dump({
                        'phi_version': phi_version,
                        'argv': sys.argv
                    }, context_file)

    def copy_src(self, script_path, only_external=True):
        for path in math.flatten(self._paths):
            if not only_external or not _is_phi_file(script_path):
                shutil.copy(script_path, join(path, 'src', basename(script_path)))

    def copy_src_text(self, filename, text):
        for path in math.flatten(self._paths):
            target = join(path, 'src', filename)
            with open(target, "w") as file:
                file.writelines(text)

    def mkdir(self):
        for path in math.flatten(self._paths):
            isdir(path) or os.mkdir(path)

    def remove(self):
        """ Deletes the scene directory and all contained files. """
        for p in math.flatten(self._paths):
            p = abspath(p)
            if isdir(p):
                shutil.rmtree(p)

Static methods

def at(directory: str, id: int = None) ‑> phi.field._scene.Scene

Creates a Scene for an existing directory.

See Also: Scene.create(), Scene.list().

Args

directory
Either directory containing scene folder if id is given, or scene path if id=None.
id
(Optional) Scene id, will be determined from directory if not specified.

Returns

Scene object for existing scene.

Expand source code
@staticmethod
def at(directory: str or tuple or list or math.Tensor or 'Scene', id: int or math.Tensor or None = None) -> 'Scene':
    """
    Creates a `Scene` for an existing directory.

    See Also:
        `Scene.create()`, `Scene.list()`.

    Args:
        directory: Either directory containing scene folder if `id` is given, or scene path if `id=None`.
        id: (Optional) Scene `id`, will be determined from `directory` if not specified.

    Returns:
        `Scene` object for existing scene.
    """
    if isinstance(directory, Scene):
        assert id is None, f"Got id={id} but directory is already a Scene."
        return directory
    if isinstance(directory, (tuple, list)):
        directory = math.wrap(directory, batch('scenes'))
    directory = math.map(lambda d: expanduser(d), math.wrap(directory))
    if id is None:
        paths = directory
    else:
        id = math.wrap(id)
        paths = math.map(lambda d, i: join(d, f"sim_{i:06d}"), directory, id)
    # test all exist
    for path in math.flatten(paths):
        if not isdir(path):
            raise IOError(f"There is no scene at '{path}'")
    return Scene(paths)
def create(parent_directory: str, shape: phi.math._shape.Shape = (), copy_calling_script=True, **dimensions) ‑> phi.field._scene.Scene

Creates a new Scene or a batch of new scenes inside parent_directory.

See Also: Scene.at(), Scene.list().

Args

parent_directory
Directory to hold the new Scene. If it doesn't exist, it will be created.
shape
Determines number of scenes to create. Multiple scenes will be represented by a Scene with is_batch=True.
copy_calling_script
Whether to copy the Python file that invoked this method into the src folder of all created scenes. See Scene.copy_calling_script().
dimensions
Additional batch dimensions

Returns

Single Scene object representing the new scene(s).

Expand source code
@staticmethod
def create(parent_directory: str,
           shape: math.Shape = math.EMPTY_SHAPE,
           copy_calling_script=True,
           **dimensions) -> 'Scene':
    """
    Creates a new `Scene` or a batch of new scenes inside `parent_directory`.

    See Also:
        `Scene.at()`, `Scene.list()`.

    Args:
        parent_directory: Directory to hold the new `Scene`. If it doesn't exist, it will be created.
        shape: Determines number of scenes to create. Multiple scenes will be represented by a `Scene` with `is_batch=True`.
        copy_calling_script: Whether to copy the Python file that invoked this method into the `src` folder of all created scenes.
            See `Scene.copy_calling_script()`.
        dimensions: Additional batch dimensions

    Returns:
        Single `Scene` object representing the new scene(s).
    """
    shape = shape & math.batch(**dimensions)
    parent_directory = expanduser(parent_directory)
    abs_dir = abspath(parent_directory)
    if not isdir(abs_dir):
        os.makedirs(abs_dir)
        next_id = 0
    else:
        indices = [int(name[4:]) for name in os.listdir(abs_dir) if name.startswith("sim_")]
        next_id = max([-1] + indices) + 1
    ids = math.wrap(tuple(range(next_id, next_id + shape.volume))).vector.split(shape)
    paths = math.map(lambda id_: join(parent_directory, f"sim_{id_:06d}"), ids)
    scene = Scene(paths)
    scene.mkdir()
    if copy_calling_script:
        try:
            scene.copy_calling_script()
        except IOError as err:
            warnings.warn(f"Failed to copy calling script to scene during Scene.create(): {err}")
    return scene
def list(parent_directory: str, include_other: bool = False, dim: phi.math._shape.Shape = None) ‑> phi.field._scene.Scene

Lists all scenes inside the given directory.

See Also: Scene.at(), Scene.create().

Args

parent_directory
Directory that contains scene folders.
include_other
Whether folders that do not match the scene format should also be treated as scenes.
dim
Stack dimension. If None, returns tuple of Scene objects. Otherwise, returns a scene batch with this dimension.

Returns

tuple of scenes.

Expand source code
@staticmethod
def list(parent_directory: str,
         include_other: bool = False,
         dim: Shape or None = None) -> 'Scene' or tuple:
    """
    Lists all scenes inside the given directory.

    See Also:
        `Scene.at()`, `Scene.create()`.

    Args:
        parent_directory: Directory that contains scene folders.
        include_other: Whether folders that do not match the scene format should also be treated as scenes.
        dim: Stack dimension. If None, returns tuple of `Scene` objects. Otherwise, returns a scene batch with this dimension.

    Returns:
        `tuple` of scenes.
    """
    parent_directory = expanduser(parent_directory)
    abs_dir = abspath(parent_directory)
    if not isdir(abs_dir):
        return ()
    names = [sim for sim in os.listdir(abs_dir) if sim.startswith("sim_") or (include_other and isdir(join(abs_dir, sim)))]
    if dim is None:
        return tuple(Scene(join(parent_directory, name)) for name in names)
    else:
        paths = math.wrap([join(parent_directory, name) for name in names], dim)
        return Scene(paths)
def stack(*scenes: Scene, dim: phi.math._shape.Shape = (batchᵇ=None)) ‑> phi.field._scene.Scene
Expand source code
@staticmethod
def stack(*scenes: 'Scene', dim: Shape = batch('batch')) -> 'Scene':
    return Scene(math.stack([s._paths for s in scenes], dim))

Instance variables

var complete_frames

Determines all frame number for which all existing fields are available. If there are multiple fields stored within this scene, a frame is considered complete only if an entry exists for all fields.

See Also: Scene.frames

Expand source code
@property
def complete_frames(self):
    """
    Determines all frame number for which all existing fields are available.
    If there are multiple fields stored within this scene, a frame is considered complete only if an entry exists for all fields.

    See Also:
        `Scene.frames`
    """
    return get_frames(self.path, mode=set.intersection)
var fieldnames : tuple

Determines all field names present in this Scene, independent of frame.

Expand source code
@property
def fieldnames(self) -> tuple:
    """ Determines all field names present in this `Scene`, independent of frame. """
    return get_fieldnames(self.path)
var frames

Determines all frame numbers present in this Scene, independent of field names. See Scene.complete_frames.

Expand source code
@property
def frames(self):
    """ Determines all frame numbers present in this `Scene`, independent of field names. See `Scene.complete_frames`. """
    return get_frames(self.path, mode=set.union)
var is_batch
Expand source code
@property
def is_batch(self):
    return self._paths.rank > 0
var path : str

Relative path of the scene directory. This property only exists for single scenes, not scene batches.

Expand source code
@property
def path(self) -> str:
    """
    Relative path of the scene directory.
    This property only exists for single scenes, not scene batches.
    """
    assert not self.is_batch, "Scene.path is not defined for scene batches."
    return self._paths.native()
var paths : phi.math._tensors.Tensor
Expand source code
@property
def paths(self) -> math.Tensor:
    return self._paths
var properties
Expand source code
@property
def properties(self):
    self._init_properties()
    return self._properties
var shape
Expand source code
@property
def shape(self):
    return self._paths.shape

Methods

def copy_calling_script(self, full_trace=False, include_context_information=True)

Copies the Python file that called this method into the src folder of this Scene.

In batch mode, the script is copied to all scenes.

Args

full_trace
Whether to include scripts that indirectly called this method.
include_context_information
If True, writes the phiflow version and sys.argv into context.json.
Expand source code
def copy_calling_script(self, full_trace=False, include_context_information=True):
    """
    Copies the Python file that called this method into the `src` folder of this `Scene`.

    In batch mode, the script is copied to all scenes.

    Args:
        full_trace: Whether to include scripts that indirectly called this method.
        include_context_information: If True, writes the phiflow version and `sys.argv` into `context.json`.
    """
    script_paths = [frame.filename for frame in inspect.stack()]
    script_paths = list(filter(lambda path: not _is_phi_file(path), script_paths))
    script_paths = set(script_paths) if full_trace else [script_paths[0]]
    self.subpath('src', create=True)
    for script_path in script_paths:
        if script_path.endswith('.py'):
            self.copy_src(script_path, only_external=False)
        elif 'ipython' in script_path:
            from IPython import get_ipython
            cells = get_ipython().user_ns['In']
            blocks = [f"#%% In[{i}]\n{cell}" for i, cell in enumerate(cells)]
            text = "\n\n".join(blocks)
            self.copy_src_text('ipython.py', text)
    if include_context_information:
        for path in math.flatten(self._paths):
            with open(join(path, 'src', 'context.json'), 'w') as context_file:
                json.dump({
                    'phi_version': phi_version,
                    'argv': sys.argv
                }, context_file)
def copy_src(self, script_path, only_external=True)
Expand source code
def copy_src(self, script_path, only_external=True):
    for path in math.flatten(self._paths):
        if not only_external or not _is_phi_file(script_path):
            shutil.copy(script_path, join(path, 'src', basename(script_path)))
def copy_src_text(self, filename, text)
Expand source code
def copy_src_text(self, filename, text):
    for path in math.flatten(self._paths):
        target = join(path, 'src', filename)
        with open(target, "w") as file:
            file.writelines(text)
def exist_properties(self)

Checks whether the file description.json exists or has existed.

Expand source code
def exist_properties(self):
    """
    Checks whether the file `description.json` exists or has existed.
    """
    if self._properties is not None:
        return True  # must have been written or read
    else:
        json_file = join(next(iter(math.flatten(self._paths))), "description.json")
        return isfile(json_file)
def exists_config(self)

Tests if the configuration file description.json exists. In batch mode, tests if any configuration exists.

Expand source code
def exists_config(self):
    """ Tests if the configuration file *description.json* exists. In batch mode, tests if any configuration exists. """
    if isinstance(self.path, str):
        return isfile(join(self.path, "description.json"))
    else:
        return any(isfile(join(p, "description.json")) for p in self.path)
def mkdir(self)
Expand source code
def mkdir(self):
    for path in math.flatten(self._paths):
        isdir(path) or os.mkdir(path)
def put_properties(self, update: dict = None, **kw_updates)

Updates the properties dictionary and stores it in description.json of all scene folders.

Args

update
new values, must be JSON serializable.
kw_updates
additional update as keyword arguments. This overrides update.
Expand source code
def put_properties(self, update: dict = None, **kw_updates):
    """
    Updates the properties dictionary and stores it in `description.json` of all scene folders.

    Args:
        update: new values, must be JSON serializable.
        kw_updates: additional update as keyword arguments. This overrides `update`.
    """
    self._init_properties()
    if update:
        self._properties.update(update)
    self._properties.update(kw_updates)
    self._write_properties()
def put_property(self, key, value)
Expand source code
def put_property(self, key, value):
    """ See `Scene.put_properties()`. """
    self._init_properties()
    self._properties[key] = value
    self._write_properties()
def read(self, *names: str, frame=0, convert_to_backend=True)

Reads one or multiple fields from disc.

See Also: Scene.write().

Args

names
Single field name or sequence of field names.
frame
Frame number.
convert_to_backend
Whether to convert the read data to the data format of the default backend, e.g. TensorFlow tensors.

Returns

Single Field or sequence of fields, depending on the type of names.

Expand source code
def read(self, *names: str, frame=0, convert_to_backend=True):
    """
    Reads one or multiple fields from disc.

    See Also:
        `Scene.write()`.

    Args:
        names: Single field name or sequence of field names.
        frame: Frame number.
        convert_to_backend: Whether to convert the read data to the data format of the default backend, e.g. TensorFlow tensors.

    Returns:
        Single `phi.field.Field` or sequence of fields, depending on the type of `names`.
    """
    result = read_sim_frame(self._paths, names, frame=frame, convert_to_backend=convert_to_backend)
    return result[0] if len(names) == 1 else result
def read_array(self, field_name, frame)
Expand source code
def read_array(self, field_name, frame):
    return read_sim_frame(self._paths, field_name, frame=frame)
def remove(self)

Deletes the scene directory and all contained files.

Expand source code
def remove(self):
    """ Deletes the scene directory and all contained files. """
    for p in math.flatten(self._paths):
        p = abspath(p)
        if isdir(p):
            shutil.rmtree(p)
def subpath(self, name: str, create: bool = False) ‑> str

Resolves the relative path name with this Scene as the root folder.

Args

name
Relative path with this Scene as the root folder.
create
Whether to create a directory of that name.

Returns

Relative path including the path to this Scene. In batch mode, returns a tuple, else a str.

Expand source code
def subpath(self, name: str, create: bool = False) -> str or tuple:
    """
    Resolves the relative path `name` with this `Scene` as the root folder.

    Args:
        name: Relative path with this `Scene` as the root folder.
        create: Whether to create a directory of that name.

    Returns:
        Relative path including the path to this `Scene`.
        In batch mode, returns a `tuple`, else a `str`.
    """
    def single_subpath(path):
        path = join(path, name)
        if create and not isdir(path):
            os.mkdir(path)
        return path

    result = math.map(single_subpath, self._paths)
    if result.rank == 0:
        return result.native()
    else:
        return result
def write(self, data: dict = None, frame=0, **kw_data)

Writes fields to this scene. One NumPy file will be created for each Field

See Also: Scene.read().

Args

data
dict mapping field names to Field objects that can be written using write().
kw_data
Additional data, overrides elements in data.
frame
Frame number.
Expand source code
def write(self, data: dict = None, frame=0, **kw_data):
    """
    Writes fields to this scene.
    One NumPy file will be created for each `phi.field.Field`

    See Also:
        `Scene.read()`.

    Args:
        data: `dict` mapping field names to `Field` objects that can be written using `phi.field.write()`.
        kw_data: Additional data, overrides elements in `data`.
        frame: Frame number.
    """
    data = dict(data) if data else {}
    data.update(kw_data)
    write_sim_frame(self._paths, data, names=None, frame=frame)
def write_sim_frame(self, arrays, fieldnames, frame)
Expand source code
def write_sim_frame(self, arrays, fieldnames, frame):
    write_sim_frame(self._paths, arrays, names=fieldnames, frame=frame)
class GeometryMask (geometry: phi.geom._geom.Geometry, balance: phi.math._tensors.Tensor = 0.5)

When sampled given another geometry, the approximate overlap between the geometries is computed, allowing for fractional values between 0 and 1.

Expand source code
class SoftGeometryMask(HardGeometryMask):
    """
    When sampled given another geometry, the approximate overlap between the geometries is computed, allowing for fractional values between 0 and 1.
    """
    def __init__(self, geometry: Geometry, balance: Tensor or float = 0.5):
        super().__init__(geometry)
        self.balance = balance

    def _sample(self, geometry: Geometry) -> Tensor:
        return self.geometry.approximate_fraction_inside(geometry, self.balance)

    def __getitem__(self, item: dict):
        return SoftGeometryMask(self.geometry[item], self.balance)

Ancestors

  • phi.field._mask.HardGeometryMask
  • phi.field._field.Field
class SoftGeometryMask (geometry: phi.geom._geom.Geometry, balance: phi.math._tensors.Tensor = 0.5)

When sampled given another geometry, the approximate overlap between the geometries is computed, allowing for fractional values between 0 and 1.

Expand source code
class SoftGeometryMask(HardGeometryMask):
    """
    When sampled given another geometry, the approximate overlap between the geometries is computed, allowing for fractional values between 0 and 1.
    """
    def __init__(self, geometry: Geometry, balance: Tensor or float = 0.5):
        super().__init__(geometry)
        self.balance = balance

    def _sample(self, geometry: Geometry) -> Tensor:
        return self.geometry.approximate_fraction_inside(geometry, self.balance)

    def __getitem__(self, item: dict):
        return SoftGeometryMask(self.geometry[item], self.balance)

Ancestors

  • phi.field._mask.HardGeometryMask
  • phi.field._field.Field
class StaggeredGrid (values: Any, extrapolation: Any = 0.0, bounds: phi.geom._box.Box = None, resolution: phi.math._shape.Shape = None, **resolution_: int)

N-dimensional grid whose vector components are sampled at the respective face centers. A staggered grid is defined through its values tensor, its bounds describing the physical size, and its extrapolation.

Staggered grids support batch and spatial dimensions but only one channel dimension for the staggered vector components.

See Also: CenteredGrid, Grid, SampledField, Field, module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html

Args

values

Values to use for the grid. Has to be one of the following:

  • Geometry: sets inside values to 1, outside to 0
  • Field: resamples the Field to the staggered sample points
  • Number: uses the value for all sample points
  • tuple or list: interprets the sequence as vector, used for all sample points
  • Tensor with staggered shape: uses tensor values as grid values. Must contain a vector dimension with each slice consisting of one more element along the dimension they describe. Use stack() to manually create this non-uniform tensor.
  • Function values(x) where x is a Tensor representing the physical location.
extrapolation
The grid extrapolation determines the value outside the values tensor. Allowed types: float, Tensor, Extrapolation.
bounds
Physical size and location of the grid.
resolution
Grid resolution as purely spatial Shape.
**resolution_
Spatial dimensions as keyword arguments. Typically either resolution or spatial_dims are specified.
Expand source code
class StaggeredGrid(Grid):
    """
    N-dimensional grid whose vector components are sampled at the respective face centers.
    A staggered grid is defined through its values tensor, its bounds describing the physical size, and its extrapolation.
    
    Staggered grids support batch and spatial dimensions but only one channel dimension for the staggered vector components.


    See Also:
        `CenteredGrid`,
        `Grid`,
        `SampledField`,
        `Field`,
        module documentation at https://tum-pbs.github.io/PhiFlow/Fields.html
    """

    def __init__(self,
                 values: Any,
                 extrapolation: Any = 0.,
                 bounds: Box = None,
                 resolution: Shape = None,
                 **resolution_: int or Tensor):
        """
        Args:
            values: Values to use for the grid.
                Has to be one of the following:

                * `phi.geom.Geometry`: sets inside values to 1, outside to 0
                * `Field`: resamples the Field to the staggered sample points
                * `Number`: uses the value for all sample points
                * `tuple` or `list`: interprets the sequence as vector, used for all sample points
                * `phi.math.Tensor` with staggered shape: uses tensor values as grid values.
                  Must contain a `vector` dimension with each slice consisting of one more element along the dimension they describe.
                  Use `phi.math.stack()` to manually create this non-uniform tensor.
                * Function `values(x)` where `x` is a `phi.math.Tensor` representing the physical location.

            extrapolation: The grid extrapolation determines the value outside the `values` tensor.
                Allowed types: `float`, `phi.math.Tensor`, `phi.math.extrapolation.Extrapolation`.
            bounds: Physical size and location of the grid.
            resolution: Grid resolution as purely spatial `phi.math.Shape`.
            **resolution_: Spatial dimensions as keyword arguments. Typically either `resolution` or `spatial_dims` are specified.
        """
        if not isinstance(extrapolation, math.Extrapolation):
            extrapolation = math.extrapolation.ConstantExtrapolation(extrapolation)
        if resolution is None and not resolution_:
            assert isinstance(values, Tensor), "Grid resolution must be specified when 'values' is not a Tensor."
            any_dim = values.shape.spatial.names[0]
            x = values.vector[any_dim]
            ext_lower, ext_upper = extrapolation.valid_outer_faces(any_dim)
            delta = int(ext_lower) + int(ext_upper) - 1
            resolution = x.shape.spatial._replace_single_size(any_dim, x.shape.get_size(any_dim) - delta)
            bounds = bounds or Box(0, math.wrap(resolution, channel('vector')))
            elements = staggered_elements(resolution, bounds, extrapolation)
        else:
            resolution = (resolution or math.EMPTY_SHAPE) & spatial(**resolution_)
            bounds = bounds or Box(0, math.wrap(resolution, channel('vector')))
            elements = staggered_elements(resolution, bounds, extrapolation)
            if isinstance(values, math.Tensor):
                values = expand_staggered(values, resolution, extrapolation)
            elif isinstance(values, Geometry):
                values = reduce_sample(HardGeometryMask(values), elements)
            elif isinstance(values, Field):
                values = reduce_sample(values, elements)
            elif callable(values):
                values = values(elements.center)
                assert isinstance(values, TensorStack), f"values function must return a staggered Tensor but returned {type(values)}"
                assert 'staggered_direction' in values.shape
                if 'vector' in values.shape:
                    values = math.stack([values.staggered_direction[i].vector[i] for i in range(resolution.rank)], channel('vector'))
                else:
                    values = values.staggered_direction.as_channel('vector')
            else:
                values = expand_staggered(math.tensor(values), resolution, extrapolation)
        if values.dtype.kind not in (float, complex):
            values = math.to_float(values)
        assert resolution.spatial_rank == bounds.spatial_rank, f"Resolution {resolution} does not match bounds {bounds}"
        Grid.__init__(self, elements, values, extrapolation, resolution, bounds)

    @property
    def cells(self):
        return GridCell(self.resolution, self.bounds)

    def with_extrapolation(self, extrapolation: math.Extrapolation):
        if all(extrapolation.valid_outer_faces(dim) == self.extrapolation.valid_outer_faces(dim) for dim in self.resolution.names):
            return StaggeredGrid(self.values, extrapolation=extrapolation, bounds=self.bounds)
        else:
            values = []
            for dim, component in zip(self.shape.spatial.names, self.values.unstack('vector')):
                old_lo, old_hi = [int(v) for v in self.extrapolation.valid_outer_faces(dim)]
                new_lo, new_hi = [int(v) for v in extrapolation.valid_outer_faces(dim)]
                widths = (new_lo - old_lo, new_hi - old_hi)
                values.append(math.pad(component, {dim: widths}, self.extrapolation))
            values = math.stack(values, channel('vector'))
            return StaggeredGrid(values, extrapolation=extrapolation, bounds=self.bounds)

    def _sample(self, geometry: Geometry) -> Tensor:
        channels = [sample(component, geometry) for component in self.vector.unstack()]
        return math.stack(channels, channel('vector'))

    def closest_values(self, points: Geometry):
        assert 'vector' not in points.shape
        if 'staggered_direction' in points.shape:
            points = points.unstack('staggered_direction')
            channels = [component.closest_values(p) for p, component in zip(points, self.vector.unstack())]
        else:
            channels = [component.closest_values(points) for component in self.vector.unstack()]
        return math.stack(channels, channel('vector'))

    def at_centers(self) -> CenteredGrid:
        """
        Interpolates the staggered values to the cell centers.

        Returns:
            `CenteredGrid` sampled at cell centers.
        """
        return CenteredGrid(self, resolution=self.resolution, bounds=self.bounds, extrapolation=self.extrapolation)

    def __getitem__(self, item: dict):
        values = self._values[{dim: sel for dim, sel in item.items() if dim not in self.shape.spatial}]
        for dim, sel in item.items():
            if dim in self.shape.spatial:
                sel = slice(sel, sel + 1) if isinstance(sel, int) else sel
                values = []
                for vdim, val in zip(self.shape.spatial.names, self.values.unstack('vector')):
                    if vdim == dim:
                        values.append(val[{dim: slice(sel.start, sel.stop + 1)}])
                    else:
                        values.append(val[{dim: sel}])
                values = math.stack(values, channel('vector'))
        extrapolation = self._extrapolation[item]
        bounds = GridCell(self._resolution, self._bounds)[item].bounds
        if 'vector' in item:
            if isinstance(item['vector'], int):
                dim = self.shape.spatial.names[item['vector']]
                comp_cells = GridCell(self.resolution, bounds).stagger(dim, *self.extrapolation.valid_outer_faces(dim))
                return CenteredGrid(values, bounds=comp_cells.bounds, extrapolation=extrapolation)
            else:
                assert isinstance(item['vector'], slice) and not item['vector'].start and not item['vector'].stop
        return StaggeredGrid(values, bounds=bounds, extrapolation=extrapolation)

    def staggered_tensor(self) -> Tensor:
        """
        Stacks all component grids into a single uniform `phi.math.Tensor`.
        The individual components are padded to a common (larger) shape before being stacked.
        The shape of the returned tensor is exactly one cell larger than the grid `resolution` in every spatial dimension.

        Returns:
            Uniform `phi.math.Tensor`.
        """
        padded = []
        for dim, component in zip(self.resolution.names, math.unstack(self.values, 'vector')):
            widths = {d: (0, 1) for d in self.resolution.names}
            lo_valid, up_valid = self.extrapolation.valid_outer_faces(dim)
            widths[dim] = (int(not lo_valid), int(not up_valid))
            padded.append(math.pad(component, widths, mode=self.extrapolation))
        result = math.stack(padded, channel('vector'))
        assert result.shape.is_uniform
        return result

    def _op2(self, other, operator):
        if isinstance(other, StaggeredGrid) and self.bounds == other.bounds and self.shape.spatial == other.shape.spatial:
            values = operator(self._values, other.values)
            extrapolation_ = operator(self._extrapolation, other.extrapolation)
            return StaggeredGrid(values=values, extrapolation=extrapolation_, bounds=self.bounds)
        else:
            return SampledField._op2(self, other, operator)

Ancestors

  • phi.field._grid.Grid
  • phi.field._field.SampledField
  • phi.field._field.Field

Instance variables

var cells
Expand source code
@property
def cells(self):
    return GridCell(self.resolution, self.bounds)

Methods

def at_centers(self) ‑> phi.field._grid.CenteredGrid

Interpolates the staggered values to the cell centers.

Returns

CenteredGrid sampled at cell centers.

Expand source code
def at_centers(self) -> CenteredGrid:
    """
    Interpolates the staggered values to the cell centers.

    Returns:
        `CenteredGrid` sampled at cell centers.
    """
    return CenteredGrid(self, resolution=self.resolution, bounds=self.bounds, extrapolation=self.extrapolation)
def closest_values(self, points: phi.geom._geom.Geometry)

Sample the closest grid point values of this field at the world-space locations (in physical units) given by points. Points must have a single channel dimension named vector. It may additionally contain any number of batch and spatial dimensions, all treated as batch dimensions.

Args

points
world-space locations

Returns

Closest grid point values as a Tensor. For each dimension, the grid points immediately left and right of the sample points are evaluated. For each point in points, a 2^d cube of points is determined where d is the number of spatial dimensions of this field. These values are stacked along the new dimensions 'closest_<dim>' where <dim> refers to the name of a spatial dimension.

Expand source code
def closest_values(self, points: Geometry):
    assert 'vector' not in points.shape
    if 'staggered_direction' in points.shape:
        points = points.unstack('staggered_direction')
        channels = [component.closest_values(p) for p, component in zip(points, self.vector.unstack())]
    else:
        channels = [component.closest_values(points) for component in self.vector.unstack()]
    return math.stack(channels, channel('vector'))
def staggered_tensor(self) ‑> phi.math._tensors.Tensor

Stacks all component grids into a single uniform Tensor. The individual components are padded to a common (larger) shape before being stacked. The shape of the returned tensor is exactly one cell larger than the grid resolution in every spatial dimension.

Returns

Uniform Tensor.

Expand source code
def staggered_tensor(self) -> Tensor:
    """
    Stacks all component grids into a single uniform `phi.math.Tensor`.
    The individual components are padded to a common (larger) shape before being stacked.
    The shape of the returned tensor is exactly one cell larger than the grid `resolution` in every spatial dimension.

    Returns:
        Uniform `phi.math.Tensor`.
    """
    padded = []
    for dim, component in zip(self.resolution.names, math.unstack(self.values, 'vector')):
        widths = {d: (0, 1) for d in self.resolution.names}
        lo_valid, up_valid = self.extrapolation.valid_outer_faces(dim)
        widths[dim] = (int(not lo_valid), int(not up_valid))
        padded.append(math.pad(component, widths, mode=self.extrapolation))
    result = math.stack(padded, channel('vector'))
    assert result.shape.is_uniform
    return result
def with_extrapolation(self, extrapolation: Extrapolation)

Returns a copy of this field with values replaced.

Expand source code
def with_extrapolation(self, extrapolation: math.Extrapolation):
    if all(extrapolation.valid_outer_faces(dim) == self.extrapolation.valid_outer_faces(dim) for dim in self.resolution.names):
        return StaggeredGrid(self.values, extrapolation=extrapolation, bounds=self.bounds)
    else:
        values = []
        for dim, component in zip(self.shape.spatial.names, self.values.unstack('vector')):
            old_lo, old_hi = [int(v) for v in self.extrapolation.valid_outer_faces(dim)]
            new_lo, new_hi = [int(v) for v in extrapolation.valid_outer_faces(dim)]
            widths = (new_lo - old_lo, new_hi - old_hi)
            values.append(math.pad(component, {dim: widths}, self.extrapolation))
        values = math.stack(values, channel('vector'))
        return StaggeredGrid(values, extrapolation=extrapolation, bounds=self.bounds)