This notebook lists useful code snippets.
from phi.flow import *
from phi.tf.flow import *
from phi.jax.stax.flow import *
from phi.torch.flow import *
2024-10-17 19:57:53.450841: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered 2024-10-17 19:57:53.465281: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered 2024-10-17 19:57:53.469593: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered 2024-10-17 19:57:53.481135: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2024-10-17 19:57:54.482905: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
backend.default_backend().list_devices('GPU')
[]
backend.default_backend().list_devices('CPU')
[torch device 'CPU' (CPU 'cpu') | 15981 MB | 4 processors | ]
assert backend.default_backend().set_default_device('CPU')
math.set_global_precision(32) # single precision is the default
x32 = math.random_normal(batch(b=4))
with math.precision(64): ## operations within this context will use 32 bit floats
x64 = math.to_float(x32)
data = math.random_normal(batch(examples=10)) * .1 # batch of scalar values
data = math.random_uniform(batch(examples=10), channel(vector='x,y')) # batch of vectors
data
(examplesᵇ=10, vectorᶜ=x,y) 0.575 ± 0.269 (4e-04...1e+00)
data.examples[0]
(x=0.626, y=0.659)
Tensor
¶print(data)
print(f"{data:full:shape:dtype:color:.1f}")
(examplesᵇ=10, vectorᶜ=x,y) 0.575 ± 0.269 (4e-04...1e+00) (examplesᵇ=10, vectorᶜ=x,y) [[0.6, 0.7], [0.3, 0.3], [0.5, 0.6], [0.3, 0.7], [0.4, 0.8], [0.6, 0.0], [1.0, 1.0], [0.6, 0.6], [0.6, 1.0], [0.1, 0.9]]
Tensor
¶data = math.random_uniform(spatial(x=8, y=6))
vis.plot(data) # or vis.show(data)
/opt/hostedtoolcache/Python/3.12.7/x64/lib/python3.12/site-packages/phi/vis/_matplotlib/_matplotlib_plots.py:167: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect. plt.tight_layout() # because subplot titles can be added after figure creation
Tensor
to NumPy¶data.numpy(order='x,y')
array([[0.32074964, 0.45673442, 0.805186 , 0.9279012 , 0.1884247 , 0.11589837], [0.7258245 , 0.8662647 , 0.18583113, 0.40409058, 0.3491332 , 0.80648303], [0.62259406, 0.8686672 , 0.7167539 , 0.7337983 , 0.38596487, 0.12700123], [0.71185523, 0.7233456 , 0.8122215 , 0.2178359 , 0.5504832 , 0.69600594], [0.12341881, 0.5526725 , 0.9551072 , 0.17882258, 0.894248 , 0.40559953], [0.6761497 , 0.22656739, 0.47586387, 0.536201 , 0.73837894, 0.0853458 ], [0.5066474 , 0.13730311, 0.526967 , 0.30572063, 0.92925215, 0.1812197 ], [0.10421163, 0.7134018 , 0.3264681 , 0.5559566 , 0.3229518 , 0.62900114]], dtype=float32)
math.reshaped_native(data, ['extra', data.shape], to_numpy=True)
array([[0.32074964, 0.45673442, 0.805186 , 0.9279012 , 0.1884247 , 0.11589837, 0.7258245 , 0.8662647 , 0.18583113, 0.40409058, 0.3491332 , 0.80648303, 0.62259406, 0.8686672 , 0.7167539 , 0.7337983 , 0.38596487, 0.12700123, 0.71185523, 0.7233456 , 0.8122215 , 0.2178359 , 0.5504832 , 0.69600594, 0.12341881, 0.5526725 , 0.9551072 , 0.17882258, 0.894248 , 0.40559953, 0.6761497 , 0.22656739, 0.47586387, 0.536201 , 0.73837894, 0.0853458 , 0.5066474 , 0.13730311, 0.526967 , 0.30572063, 0.92925215, 0.1812197 , 0.10421163, 0.7134018 , 0.3264681 , 0.5559566 , 0.3229518 , 0.62900114]], dtype=float32)
points = math.tensor([(0, 0), (0, 1), (1, 0)], instance('points'), channel('vector'))
distances = points - math.rename_dims(points, 'points', 'others')
math.print(math.vec_length(distances))
[[0. , 1. , 1. ], [1. , 0. , 1.4142135], [1. , 1.4142135, 0. ]]
CenteredGrid
¶zero_grid = CenteredGrid(0, 0, x=32, y=32, bounds=Box(x=1, y=1))
y_grid = CenteredGrid((0, 1), extrapolation.BOUNDARY, x=32, y=32)
noise_grid = CenteredGrid(Noise(), extrapolation.PERIODIC, x=32, y=32)
sin_curve = CenteredGrid(lambda x: math.sin(x), extrapolation.PERIODIC, x=100, bounds=Box(x=2 * PI))
vis.plot(zero_grid, y_grid, noise_grid, sin_curve, size=(12, 3))
StaggeredGrid
¶zero_grid = StaggeredGrid(0, 0, x=32, y=32, bounds=Box(x=1, y=1))
y_grid = StaggeredGrid((0, 1), extrapolation.BOUNDARY, x=32, y=32)
noise_grid = StaggeredGrid(Noise(), extrapolation.PERIODIC, x=32, y=32)
sin_curve = StaggeredGrid(lambda x: math.sin(x), extrapolation.PERIODIC, x=100, bounds=Box(x=2 * PI))
vis.plot(zero_grid, y_grid, noise_grid, sin_curve, size=(12, 3))
/opt/hostedtoolcache/Python/3.12.7/x64/lib/python3.12/site-packages/phiml/math/_shape.py:2144: RuntimeWarning: Stacking shapes with incompatible item names will result in item names being lost. For vector Got ('x', 'y') and ('x',) warnings.warn(f"Stacking shapes with incompatible item names will result in item names being lost. For {name} Got {item_names[index]} and {items}", RuntimeWarning)
StaggeredGrid
from NumPy Arrays¶Given matching arrays vx
and vy
, we can construct a StaggeredGrid
.
Note that the shapes of the arrays must match the extrapolation!
vx = math.tensor(np.zeros([33, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 33]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), extrapolation.BOUNDARY)
vx = math.tensor(np.zeros([32, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 32]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), extrapolation.PERIODIC)
vx = math.tensor(np.zeros([31, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 31]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), 0)
Grid faces[(~vectorᵈ=x,y, xˢ=~(x=31, y=32) int64, yˢ=~(x=32, y=31) int64) const 0.0, ext=0]
def loss_function(x):
return math.l2_loss(math.cos(x))
initial_guess = math.tensor([1, -1], math.batch('batch'))
math.minimize(loss_function, Solve('L-BFGS-B', 0, 1e-3, x0=initial_guess))
(1.574, -1.574) along batchᵇ
def f(x):
return 2 * x
math.solve_linear(f, 84, Solve('CG', 1e-5, x0=0))
tensor([42.])
from functools import partial
periodic_laplace = partial(math.laplace, padding=extrapolation.PERIODIC)
example_input = math.ones(spatial(x=3))
matrix, bias = math.matrix_from_function(periodic_laplace, example_input)
math.print(matrix)
x=0 -2. 1. 1. along ~x x=1 1. -2. 1. along ~x x=2 1. 1. -2. along ~x
def f(x):
return math.l2_loss(math.sin(x))
f_grid = CenteredGrid(f, x=100, y=100, bounds=Box(x=2*PI, y=2*PI))
vis.plot(f_grid)
def minimize(x0):
with math.SolveTape(record_trajectories=True) as solves:
math.minimize(f, Solve('BFGS', 0, 1e-5, x0=x0))
return solves[0].x # shape (trajectory, x, y, vector)
trajectories = CenteredGrid(minimize, x=8, y=8, bounds=Box(x=2*PI, y=2*PI)).values
segments = []
for start, end in zip(trajectories.trajectory[:-1].trajectory, trajectories.trajectory[1:].trajectory):
segments.append(PointCloud(start, end - start, bounds=Box(x=2*PI, y=2*PI)))
anim_segments = field.stack(segments, batch('time'))
vis.plot(f_grid, anim_segments, overlay='args', animate='time', color='#FFFFFF', frame_time=500)
/tmp/ipykernel_2383/1489350838.py:9: UserWarning: bounds argument is deprecated since 2.5 and will be ignored. segments.append(PointCloud(start, end - start, bounds=Box(x=2*PI, y=2*PI)))
<Figure size 640x480 with 0 Axes>
net = dense_net(1, 1, layers=[8, 8], activation='ReLU') # Implemented for PyTorch, TensorFlow, Jax-Stax
optimizer = adam(net, 1e-3)
BATCH = batch(batch=100)
def loss_function(data: Tensor):
prediction = math.native_call(net, data)
label = math.sin(data)
return math.l2_loss(prediction - label), data, label
print(f"Initial loss: {loss_function(math.random_normal(BATCH))[0]}")
for i in range(100):
loss, _data, _label = update_weights(net, optimizer, loss_function, data=math.random_normal(BATCH))
print(f"Final loss: {loss}")
Initial loss: (batchᵇ=100) 0.228 ± 0.182 (4e-06...5e-01) Final loss: (batchᵇ=100) 0.105 ± 0.082 (1e-09...2e-01)
parameter_count(net)
97