This notebook lists useful code snippets.
from phi.flow import *
from phi.tf.flow import *
from phi.jax.stax.flow import *
from phi.torch.flow import *
2024-12-06 19:28:44.317380: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered WARNING: All log messages before absl::InitializeLog() is called are written to STDERR E0000 00:00:1733513324.332231 2158 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered E0000 00:00:1733513324.336681 2158 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered 2024-12-06 19:28:44.353190: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2024-12-06 19:28:46.853026: E external/local_xla/xla/stream_executor/cuda/cuda_driver.cc:152] failed call to cuInit: INTERNAL: CUDA error: Failed call to cuInit: UNKNOWN ERROR (303)
backend.default_backend().list_devices('GPU')
[]
backend.default_backend().list_devices('CPU')
[torch device 'CPU' (CPU 'cpu') | 15981 MB | 4 processors | ]
assert backend.default_backend().set_default_device('CPU')
math.set_global_precision(32) # single precision is the default
x32 = math.random_normal(batch(b=4))
with math.precision(64): ## operations within this context will use 32 bit floats
x64 = math.to_float(x32)
data = math.random_normal(batch(examples=10)) * .1 # batch of scalar values
data = math.random_uniform(batch(examples=10), channel(vector='x,y')) # batch of vectors
data
(examplesᵇ=10, vectorᶜ=x,y) 0.341 ± 0.205 (5e-03...8e-01)
data.examples[0]
(x=0.377, y=0.084)
Tensor
¶print(data)
print(f"{data:full:shape:dtype:color:.1f}")
(examplesᵇ=10, vectorᶜ=x,y) 0.341 ± 0.205 (5e-03...8e-01) (examplesᵇ=10, vectorᶜ=x,y) [[0.4, 0.1], [0.4, 0.5], [0.3, 0.3], [0.4, 0.4], [0.2, 0.0], [0.0, 0.3], [0.6, 0.7], [0.4, 0.4], [0.5, 0.8], [0.0, 0.3]]
Tensor
¶data = math.random_uniform(spatial(x=8, y=6))
vis.plot(data) # or vis.show(data)
/opt/hostedtoolcache/Python/3.12.7/x64/lib/python3.12/site-packages/phi/vis/_matplotlib/_matplotlib_plots.py:167: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect. plt.tight_layout() # because subplot titles can be added after figure creation
Tensor
to NumPy¶data.numpy(order='x,y')
array([[0.02089942, 0.07113802, 0.29025614, 0.44720393, 0.25507218, 0.1220867 ], [0.04714096, 0.9627641 , 0.7323178 , 0.43196374, 0.65170914, 0.7439455 ], [0.13001513, 0.05242467, 0.8929076 , 0.47162652, 0.28926837, 0.7617123 ], [0.4166569 , 0.6261892 , 0.99661046, 0.04237741, 0.9552279 , 0.7568089 ], [0.3502682 , 0.9863457 , 0.4262132 , 0.7556447 , 0.6456703 , 0.02423155], [0.20684487, 0.64379096, 0.69073254, 0.94477534, 0.72589576, 0.1822154 ], [0.2346788 , 0.32578886, 0.14604938, 0.8885656 , 0.42375505, 0.29243845], [0.15849876, 0.06622547, 0.07535005, 0.7936608 , 0.2870556 , 0.6676684 ]], dtype=float32)
math.reshaped_native(data, ['extra', data.shape], to_numpy=True)
array([[0.02089942, 0.07113802, 0.29025614, 0.44720393, 0.25507218, 0.1220867 , 0.04714096, 0.9627641 , 0.7323178 , 0.43196374, 0.65170914, 0.7439455 , 0.13001513, 0.05242467, 0.8929076 , 0.47162652, 0.28926837, 0.7617123 , 0.4166569 , 0.6261892 , 0.99661046, 0.04237741, 0.9552279 , 0.7568089 , 0.3502682 , 0.9863457 , 0.4262132 , 0.7556447 , 0.6456703 , 0.02423155, 0.20684487, 0.64379096, 0.69073254, 0.94477534, 0.72589576, 0.1822154 , 0.2346788 , 0.32578886, 0.14604938, 0.8885656 , 0.42375505, 0.29243845, 0.15849876, 0.06622547, 0.07535005, 0.7936608 , 0.2870556 , 0.6676684 ]], dtype=float32)
points = math.tensor([(0, 0), (0, 1), (1, 0)], instance('points'), channel('vector'))
distances = points - math.rename_dims(points, 'points', 'others')
math.print(math.vec_length(distances))
[[0. , 1. , 1. ], [1. , 0. , 1.4142135], [1. , 1.4142135, 0. ]]
CenteredGrid
¶zero_grid = CenteredGrid(0, 0, x=32, y=32, bounds=Box(x=1, y=1))
y_grid = CenteredGrid((0, 1), extrapolation.BOUNDARY, x=32, y=32)
noise_grid = CenteredGrid(Noise(), extrapolation.PERIODIC, x=32, y=32)
sin_curve = CenteredGrid(lambda x: math.sin(x), extrapolation.PERIODIC, x=100, bounds=Box(x=2 * PI))
vis.plot(zero_grid, y_grid, noise_grid, sin_curve, size=(12, 3))
StaggeredGrid
¶zero_grid = StaggeredGrid(0, 0, x=32, y=32, bounds=Box(x=1, y=1))
y_grid = StaggeredGrid((0, 1), extrapolation.BOUNDARY, x=32, y=32)
noise_grid = StaggeredGrid(Noise(), extrapolation.PERIODIC, x=32, y=32)
sin_curve = StaggeredGrid(lambda x: math.sin(x), extrapolation.PERIODIC, x=100, bounds=Box(x=2 * PI))
vis.plot(zero_grid, y_grid, noise_grid, sin_curve, size=(12, 3))
/opt/hostedtoolcache/Python/3.12.7/x64/lib/python3.12/site-packages/phiml/math/_shape.py:2163: RuntimeWarning: Stacking shapes with incompatible item names will result in item names being lost. For vector Got ('x', 'y') and ('x',) warnings.warn(f"Stacking shapes with incompatible item names will result in item names being lost. For {name} Got {item_names[index]} and {items}", RuntimeWarning)
StaggeredGrid
from NumPy Arrays¶Given matching arrays vx
and vy
, we can construct a StaggeredGrid
.
Note that the shapes of the arrays must match the extrapolation!
vx = math.tensor(np.zeros([33, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 33]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), extrapolation.BOUNDARY)
vx = math.tensor(np.zeros([32, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 32]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), extrapolation.PERIODIC)
vx = math.tensor(np.zeros([31, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 31]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), 0)
Grid faces[(~vectorᵈ=x,y, xˢ=~(x=31, y=32) int64, yˢ=~(x=32, y=31) int64) const 0.0, ext=0]
def loss_function(x):
return math.l2_loss(math.cos(x))
initial_guess = math.tensor([1, -1], math.batch('batch'))
math.minimize(loss_function, Solve('L-BFGS-B', 0, 1e-3, x0=initial_guess))
(1.574, -1.574) along batchᵇ
def f(x):
return 2 * x
math.solve_linear(f, 84, Solve('CG', 1e-5, x0=0))
tensor([42.])
from functools import partial
periodic_laplace = partial(math.laplace, padding=extrapolation.PERIODIC)
example_input = math.ones(spatial(x=3))
matrix, bias = math.matrix_from_function(periodic_laplace, example_input)
math.print(matrix)
x=0 -2. 1. 1. along ~x x=1 1. -2. 1. along ~x x=2 1. 1. -2. along ~x
def f(x):
return math.l2_loss(math.sin(x))
f_grid = CenteredGrid(f, x=100, y=100, bounds=Box(x=2*PI, y=2*PI))
vis.plot(f_grid)
def minimize(x0):
with math.SolveTape(record_trajectories=True) as solves:
math.minimize(f, Solve('BFGS', 0, 1e-5, x0=x0))
return solves[0].x # shape (trajectory, x, y, vector)
trajectories = CenteredGrid(minimize, x=8, y=8, bounds=Box(x=2*PI, y=2*PI)).values
segments = []
for start, end in zip(trajectories.trajectory[:-1].trajectory, trajectories.trajectory[1:].trajectory):
segments.append(PointCloud(start, end - start, bounds=Box(x=2*PI, y=2*PI)))
anim_segments = field.stack(segments, batch('time'))
vis.plot(f_grid, anim_segments, overlay='args', animate='time', color='#FFFFFF', frame_time=500)
/tmp/ipykernel_2158/1489350838.py:9: UserWarning: bounds argument is deprecated since 2.5 and will be ignored. segments.append(PointCloud(start, end - start, bounds=Box(x=2*PI, y=2*PI)))
<Figure size 640x480 with 0 Axes>
net = dense_net(1, 1, layers=[8, 8], activation='ReLU') # Implemented for PyTorch, TensorFlow, Jax-Stax
optimizer = adam(net, 1e-3)
BATCH = batch(batch=100)
def loss_function(data: Tensor):
prediction = math.native_call(net, data)
label = math.sin(data)
return math.l2_loss(prediction - label), data, label
print(f"Initial loss: {loss_function(math.random_normal(BATCH))[0]}")
for i in range(100):
loss, _data, _label = update_weights(net, optimizer, loss_function, data=math.random_normal(BATCH))
print(f"Final loss: {loss}")
Initial loss: (batchᵇ=100) 0.260 ± 0.205 (4e-05...7e-01) Final loss: (batchᵇ=100) 0.094 ± 0.097 (9e-06...5e-01)
parameter_count(net)
97