This notebook lists useful code snippets.
from phi.flow import *
from phi.tf.flow import *
from phi.jax.stax.flow import *
from phi.torch.flow import *
2025-10-20 16:42:37.162021: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2025-10-20 16:42:39.372434: E external/local_xla/xla/stream_executor/cuda/cuda_platform.cc:51] failed call to cuInit: INTERNAL: CUDA error: Failed call to cuInit: UNKNOWN ERROR (303)
backend.default_backend().list_devices('GPU')
[]
backend.default_backend().list_devices('CPU')
[torch device 'CPU' (CPU 'cpu') | 15995 MB | 4 processors | ]
assert backend.default_backend().set_default_device('CPU')
math.set_global_precision(32)  # single precision is the default
x32 = math.random_normal(batch(b=4))
with math.precision(64):  ## operations within this context will use 32 bit floats
    x64 = math.to_float(x32)
data = math.random_normal(batch(examples=10)) * .1  # batch of scalar values
data = math.random_uniform(batch(examples=10), channel(vector='x,y'))  # batch of vectors
data
(examplesᵇ=10, vectorᶜ=x,y) 0.477 ± 0.243 (4e-03...1e+00)
data.examples[0]
(x=0.746, y=0.783)
Tensor¶print(data)
print(f"{data:full:shape:dtype:color:.1f}")
(examplesᵇ=10, vectorᶜ=x,y) 0.477 ± 0.243 (4e-03...1e+00) (examplesᵇ=10, vectorᶜ=x,y) [[0.7, 0.8], [0.7, 0.4], [0.4, 0.6], [0.5, 0.5], [0.0, 1.0], [0.1, 0.5], [0.7, 0.4], [0.5, 0.1], [0.3, 0.4], [0.6, 0.3]]
Tensor¶data = math.random_uniform(spatial(x=8, y=6))
vis.plot(data)  # or vis.show(data)
/opt/hostedtoolcache/Python/3.12.12/x64/lib/python3.12/site-packages/phi/vis/_matplotlib/_matplotlib_plots.py:167: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect. plt.tight_layout() # because subplot titles can be added after figure creation
Tensor to NumPy¶data.numpy(order='x,y')
array([[0.44715345, 0.11466235, 0.80700165, 0.42683053, 0.9830442 ,
        0.96569663],
       [0.4751532 , 0.27429855, 0.13322192, 0.11635357, 0.25475442,
        0.44499677],
       [0.05167419, 0.75811654, 0.5447222 , 0.06704581, 0.22153193,
        0.72694325],
       [0.9613559 , 0.76944786, 0.7440348 , 0.21292871, 0.45958424,
        0.7653247 ],
       [0.45025527, 0.70046294, 0.69470763, 0.619114  , 0.17853403,
        0.5036263 ],
       [0.3342715 , 0.35268652, 0.41007197, 0.62838537, 0.8982941 ,
        0.09558487],
       [0.94572204, 0.45102024, 0.30300617, 0.8596781 , 0.7521077 ,
        0.81394476],
       [0.32756013, 0.140594  , 0.5850747 , 0.0150463 , 0.15520924,
        0.781707  ]], dtype=float32)
math.reshaped_native(data, ['extra', data.shape], to_numpy=True)
/tmp/ipykernel_2465/2990683702.py:1: DeprecationWarning: phiml.math.reshaped_native() is deprecated. Use Tensor.native() instead. math.reshaped_native(data, ['extra', data.shape], to_numpy=True)
array([[0.44715345, 0.11466235, 0.80700165, 0.42683053, 0.9830442 ,
        0.96569663, 0.4751532 , 0.27429855, 0.13322192, 0.11635357,
        0.25475442, 0.44499677, 0.05167419, 0.75811654, 0.5447222 ,
        0.06704581, 0.22153193, 0.72694325, 0.9613559 , 0.76944786,
        0.7440348 , 0.21292871, 0.45958424, 0.7653247 , 0.45025527,
        0.70046294, 0.69470763, 0.619114  , 0.17853403, 0.5036263 ,
        0.3342715 , 0.35268652, 0.41007197, 0.62838537, 0.8982941 ,
        0.09558487, 0.94572204, 0.45102024, 0.30300617, 0.8596781 ,
        0.7521077 , 0.81394476, 0.32756013, 0.140594  , 0.5850747 ,
        0.0150463 , 0.15520924, 0.781707  ]], dtype=float32)
points = math.tensor([(0, 0), (0, 1), (1, 0)], instance('points'), channel('vector'))
distances = points - math.rename_dims(points, 'points', 'others')
math.print(math.vec_length(distances))
[[0. , 1. , 1. ], [1. , 0. , 1.4142135], [1. , 1.4142135, 0. ]]
/tmp/ipykernel_2465/2195475714.py:3: DeprecationWarning: phiml.math.length is deprecated in favor of phiml.math.norm math.print(math.vec_length(distances))
CenteredGrid¶zero_grid = CenteredGrid(0, 0, x=32, y=32, bounds=Box(x=1, y=1))
y_grid = CenteredGrid((0, 1), extrapolation.BOUNDARY, x=32, y=32)
noise_grid = CenteredGrid(Noise(), extrapolation.PERIODIC, x=32, y=32)
sin_curve = CenteredGrid(lambda x: math.sin(x), extrapolation.PERIODIC, x=100, bounds=Box(x=2 * PI))
vis.plot(zero_grid, y_grid, noise_grid, sin_curve, size=(12, 3))
StaggeredGrid¶zero_grid = StaggeredGrid(0, 0, x=32, y=32, bounds=Box(x=1, y=1))
y_grid = StaggeredGrid((0, 1), extrapolation.BOUNDARY, x=32, y=32)
noise_grid = StaggeredGrid(Noise(), extrapolation.PERIODIC, x=32, y=32)
sin_curve = StaggeredGrid(lambda x: math.sin(x), extrapolation.PERIODIC, x=100, bounds=Box(x=2 * PI))
vis.plot(zero_grid, y_grid, noise_grid, sin_curve, size=(12, 3))
/opt/hostedtoolcache/Python/3.12.12/x64/lib/python3.12/site-packages/phiml/math/_shape.py:2847: RuntimeWarning: Stacking shapes with incompatible labels will result in labels being lost. For vector Got ('x', 'y') and ('x',)
  warnings.warn(f"Stacking shapes with incompatible labels will result in labels being lost. For {dim.name} Got {prev.slice_names} and {dim.slice_names}", RuntimeWarning)
StaggeredGrid from NumPy Arrays¶Given matching arrays vx and vy, we can construct a StaggeredGrid.
Note that the shapes of the arrays must match the extrapolation!
vx = math.tensor(np.zeros([33, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 33]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), extrapolation.BOUNDARY)
vx = math.tensor(np.zeros([32, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 32]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), extrapolation.PERIODIC)
vx = math.tensor(np.zeros([31, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 31]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), 0)
Field[(xˢ=32, yˢ=32, vectorᶜ=x,y)]
def loss_function(x):
    return math.l2_loss(math.cos(x))
initial_guess = math.tensor([1, -1], math.batch('batch'))
math.minimize(loss_function, Solve('L-BFGS-B', 0, 1e-3, x0=initial_guess))
(1.574, -1.574) along batchᵇ
def f(x):
    return 2 * x
math.solve_linear(f, 84, Solve('CG', 1e-5, x0=0))
tensor([42.])
from functools import partial
periodic_laplace = partial(math.laplace, padding=extrapolation.PERIODIC)
example_input = math.ones(spatial(x=3))
matrix, bias = math.matrix_from_function(periodic_laplace, example_input)
math.print(matrix)
x=0 -2. 1. 1. along ~x x=1 1. -2. 1. along ~x x=2 1. 1. -2. along ~x
def f(x):
    return math.l2_loss(math.sin(x))
f_grid = CenteredGrid(f, x=100, y=100, bounds=Box(x=2*PI, y=2*PI))
vis.plot(f_grid)
def minimize(x0):
    with math.SolveTape(record_trajectories=True) as solves:
        math.minimize(f, Solve('BFGS', 0, 1e-5, x0=x0))
    return solves[0].x  # shape (trajectory, x, y, vector)
trajectories = CenteredGrid(minimize, x=8, y=8, bounds=Box(x=2*PI, y=2*PI)).values
segments = []
for start, end in zip(trajectories.trajectory[:-1].trajectory, trajectories.trajectory[1:].trajectory):
    segments.append(PointCloud(start, end - start, bounds=Box(x=2*PI, y=2*PI)))
anim_segments = field.stack(segments, batch('time'))
vis.plot(f_grid, anim_segments, overlay='args', animate='time', color='#FFFFFF', frame_time=500)
/tmp/ipykernel_2465/1489350838.py:9: UserWarning: bounds argument is deprecated since 2.5 and will be ignored. segments.append(PointCloud(start, end - start, bounds=Box(x=2*PI, y=2*PI)))
<Figure size 640x480 with 0 Axes>
net = dense_net(1, 1, layers=[8, 8], activation='ReLU')  # Implemented for PyTorch, TensorFlow, Jax-Stax
optimizer = adam(net, 1e-3)
BATCH = batch(batch=100)
def loss_function(data: Tensor):
    prediction = math.native_call(net, data)
    label = math.sin(data)
    return math.l2_loss(prediction - label), data, label
print(f"Initial loss: {loss_function(math.random_normal(BATCH))[0]}")
for i in range(100):
    loss, _data, _label = update_weights(net, optimizer, loss_function, data=math.random_normal(BATCH))
print(f"Final loss: {loss}")
Initial loss: (batchᵇ=100) 0.221 ± 0.183 (1e-05...5e-01) Final loss: (batchᵇ=100) 0.087 ± 0.071 (1e-05...2e-01)
parameter_count(net)
97