This notebook lists useful code snippets.
from phi.flow import *
from phi.tf.flow import *
from phi.jax.stax.flow import *
from phi.torch.flow import *
2023-11-26 11:49:12.954898: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used. 2023-11-26 11:49:12.992645: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used. 2023-11-26 11:49:12.993841: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-11-26 11:49:13.736396: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
backend.default_backend().list_devices('GPU')
[]
backend.default_backend().list_devices('CPU')
[torch device 'CPU' (CPU 'cpu') | 15981 MB | 4 processors | ]
assert backend.default_backend().set_default_device('CPU')
math.set_global_precision(32) # single precision is the default
x32 = math.random_normal(batch(b=4))
with math.precision(64): ## operations within this context will use 32 bit floats
x64 = math.to_float(x32)
data = math.random_normal(batch(examples=10)) * .1 # batch of scalar values
data = math.random_uniform(batch(examples=10), channel(vector='x,y')) # batch of vectors
data
(examplesᵇ=10, vectorᶜ=x,y) 0.445 ± 0.268 (1e-02...1e+00)
data.examples[0]
(x=0.011, y=0.085)
Tensor
¶print(data)
print(f"{data:full:shape:dtype:color:.1f}")
(examplesᵇ=10, vectorᶜ=x,y) 0.445 ± 0.268 (1e-02...1e+00) (examplesᵇ=10, vectorᶜ=x,y) [[0.0, 0.1], [0.3, 0.5], [0.4, 0.4], [0.5, 1.0], [0.5, 0.8], [0.1, 0.6], [0.4, 0.9], [0.6, 0.7], [0.1, 0.6], [0.4, 0.0]]
Tensor
¶data = math.random_uniform(spatial(x=8, y=6))
vis.plot(data) # or vis.show(data)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/phi/field/_field.py:142: FutureWarning: Instance checks on Grid are deprecated and will be removed in version 3.0. Use the methods instance.is_grid, instance.is_point_cloud, instance.is_centered and instance.is_staggered instead. return isinstance(self, Grid) /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/phi/field/_field.py:148: FutureWarning: Instance checks on PointCloud are deprecated and will be removed in version 3.0. Use the methods instance.is_grid, instance.is_point_cloud, instance.is_centered and instance.is_staggered instead. return isinstance(self, PointCloud)
<Figure size 1200x500 with 2 Axes>
Tensor
to NumPy¶data.numpy(order='x,y')
array([[0.4410696 , 0.95934725, 0.06720412, 0.30094683, 0.20172751, 0.21062613], [0.4954998 , 0.44564545, 0.9559748 , 0.5037065 , 0.07400584, 0.81757927], [0.7320142 , 0.7158386 , 0.5989751 , 0.69232947, 0.57535815, 0.19882429], [0.1574719 , 0.09751654, 0.961734 , 0.99972266, 0.786199 , 0.57087886], [0.07150358, 0.61368334, 0.9968091 , 0.3749305 , 0.7449604 , 0.32483393], [0.92981213, 0.66242385, 0.3368066 , 0.29553294, 0.8711322 , 0.43148118], [0.53200275, 0.29400176, 0.99528533, 0.14394194, 0.04994398, 0.97237754], [0.55653167, 0.73855597, 0.46719742, 0.96737516, 0.5878397 , 0.2944795 ]], dtype=float32)
math.reshaped_native(data, ['extra', data.shape], to_numpy=True)
array([[0.4410696 , 0.95934725, 0.06720412, 0.30094683, 0.20172751, 0.21062613, 0.4954998 , 0.44564545, 0.9559748 , 0.5037065 , 0.07400584, 0.81757927, 0.7320142 , 0.7158386 , 0.5989751 , 0.69232947, 0.57535815, 0.19882429, 0.1574719 , 0.09751654, 0.961734 , 0.99972266, 0.786199 , 0.57087886, 0.07150358, 0.61368334, 0.9968091 , 0.3749305 , 0.7449604 , 0.32483393, 0.92981213, 0.66242385, 0.3368066 , 0.29553294, 0.8711322 , 0.43148118, 0.53200275, 0.29400176, 0.99528533, 0.14394194, 0.04994398, 0.97237754, 0.55653167, 0.73855597, 0.46719742, 0.96737516, 0.5878397 , 0.2944795 ]], dtype=float32)
points = math.tensor([(0, 0), (0, 1), (1, 0)], instance('points'), channel('vector'))
distances = points - math.rename_dims(points, 'points', 'others')
math.print(math.vec_length(distances))
[[0. , 1. , 1. ], [1. , 0. , 1.4142135], [1. , 1.4142135, 0. ]]
CenteredGrid
¶zero_grid = CenteredGrid(0, 0, x=32, y=32, bounds=Box(x=1, y=1))
y_grid = CenteredGrid((0, 1), extrapolation.BOUNDARY, x=32, y=32)
noise_grid = CenteredGrid(Noise(), extrapolation.PERIODIC, x=32, y=32)
sin_curve = CenteredGrid(lambda x: math.sin(x), extrapolation.PERIODIC, x=100, bounds=Box(x=2 * PI))
vis.plot(zero_grid, y_grid, noise_grid, sin_curve, size=(12, 3))
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/phi/field/_field.py:154: FutureWarning: Instance checks on StaggeredGrid are deprecated and will be removed in version 3.0. Use the methods instance.is_grid, instance.is_point_cloud, instance.is_centered and instance.is_staggered instead. return isinstance(self, StaggeredGrid)
<Figure size 1200x300 with 6 Axes>
StaggeredGrid
¶zero_grid = StaggeredGrid(0, 0, x=32, y=32, bounds=Box(x=1, y=1))
y_grid = StaggeredGrid((0, 1), extrapolation.BOUNDARY, x=32, y=32)
noise_grid = StaggeredGrid(Noise(), extrapolation.PERIODIC, x=32, y=32)
sin_curve = StaggeredGrid(lambda x: math.sin(x), extrapolation.PERIODIC, x=100, bounds=Box(x=2 * PI))
vis.plot(zero_grid, y_grid, noise_grid, sin_curve, size=(12, 3))
<Figure size 1200x300 with 4 Axes>
StaggeredGrid
from NumPy Arrays¶Given matching arrays vx
and vy
, we can construct a StaggeredGrid
.
Note that the shapes of the arrays must match the extrapolation!
vx = math.tensor(np.zeros([33, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 33]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], channel('vector')), extrapolation.BOUNDARY)
vx = math.tensor(np.zeros([32, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 32]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], channel('vector')), extrapolation.PERIODIC)
vx = math.tensor(np.zeros([31, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 31]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], channel('vector')), 0)
StaggeredGrid[(xˢ=32, yˢ=32, vectorᶜ=2), size=(x=32, y=32) int64, extrapolation=0]
def loss_function(x):
return math.l2_loss(math.cos(x))
initial_guess = math.tensor([1, -1], math.batch('batch'))
math.minimize(loss_function, Solve('L-BFGS-B', 0, 1e-3, x0=initial_guess))
(1.574, -1.574) along batchᵇ
def f(x):
return 2 * x
math.solve_linear(f, 84, Solve('CG', 1e-5, x0=0))
--------------------------------------------------------------------------- AssertionError Traceback (most recent call last) Cell In[17], line 4 1 def f(x): 2 return 2 * x ----> 4 math.solve_linear(f, 84, Solve('CG', 1e-5, x0=0)) File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/phiml/math/_optimize.py:538, in solve_linear(f, y, solve, grad_for_f, f_kwargs, *f_args, **f_kwargs_) 536 rank = y_tensors[0].rank 537 assert x0_tensors[0].rank == rank, f"y and x0 must have the same rank but got {y_tensors[0].shape.sizes} for y and {x0_tensors[0].shape.sizes} for x0" --> 538 y = wrap(y, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector')) 539 x0 = wrap(solve.x0, *[batch(f'batch{i}') for i in range(rank - 1)], channel('vector')) 540 solve = copy_with(solve, x0=x0) File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/phiml/math/_tensors.py:1655, in wrap(data, *shape) 1652 def wrap(data, 1653 *shape: Shape) -> Tensor: 1654 """ Short for `phiml.math.tensor()` with `convert=False`. """ -> 1655 return tensor(data, *shape, convert=False) File /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/phiml/math/_tensors.py:1608, in tensor(data, convert, default_list_dim, *shape) 1606 return layout(data) 1607 elif isinstance(data, (numbers.Number, bool)): -> 1608 assert not shape, f"Trying to create a zero-dimensional Tensor from value '{data}' but shape={shape}" 1609 if convert: 1610 data = default_backend().as_tensor(data, convert_external=True) AssertionError: Trying to create a zero-dimensional Tensor from value '84' but shape=(vectorᶜ=None)
from functools import partial
periodic_laplace = partial(math.laplace, padding=extrapolation.PERIODIC)
example_input = math.ones(spatial(x=3))
matrix, bias = math.matrix_from_function(periodic_laplace, example_input)
math.print(matrix)
x=0 -2. 1. 1. along ~x x=1 1. -2. 1. along ~x x=2 1. 1. -2. along ~x
def f(x):
return math.l2_loss(math.sin(x))
f_grid = CenteredGrid(f, x=100, y=100, bounds=Box(x=2*PI, y=2*PI))
vis.plot(f_grid)
<Figure size 1200x500 with 2 Axes>
def minimize(x0):
with math.SolveTape(record_trajectories=True) as solves:
math.minimize(f, Solve('BFGS', 0, 1e-5, x0=x0))
return solves[0].x # shape (trajectory, x, y, vector)
trajectories = CenteredGrid(minimize, x=8, y=8, bounds=Box(x=2*PI, y=2*PI)).values
segments = []
for start, end in zip(trajectories.trajectory[:-1].trajectory, trajectories.trajectory[1:].trajectory):
segments.append(PointCloud(start, end - start, bounds=Box(x=2*PI, y=2*PI)))
anim_segments = field.stack(segments, batch('time'))
vis.plot(f_grid, anim_segments, overlay='args', animate='time', color='#FFFFFF', frame_time=500)
/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/phi/field/_field.py:142: FutureWarning: Instance checks on Grid are deprecated and will be removed in version 3.0. Use the methods instance.is_grid, instance.is_point_cloud, instance.is_centered and instance.is_staggered instead. return isinstance(self, Grid) /opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/phi/field/_field.py:148: FutureWarning: Instance checks on PointCloud are deprecated and will be removed in version 3.0. Use the methods instance.is_grid, instance.is_point_cloud, instance.is_centered and instance.is_staggered instead. return isinstance(self, PointCloud)
net = dense_net(1, 1, layers=[8, 8], activation='ReLU') # Implemented for PyTorch, TensorFlow, Jax-Stax
optimizer = adam(net, 1e-3)
BATCH = batch(batch=100)
def loss_function(data: Tensor):
prediction = math.native_call(net, data)
label = math.sin(data)
return math.l2_loss(prediction - label), data, label
print(f"Initial loss: {loss_function(math.random_normal(BATCH))[0]}")
for i in range(100):
loss, _data, _label = update_weights(net, optimizer, loss_function, data=math.random_normal(BATCH))
print(f"Final loss: {loss}")
Initial loss: (batchᵇ=100) 0.230 ± 0.181 (1e-04...5e-01) Final loss: (batchᵇ=100) 0.095 ± 0.080 (5e-05...2e-01)
parameter_count(net)
97