This notebook lists useful code snippets.
from phi.flow import *
from phi.tf.flow import *
from phi.jax.stax.flow import *
from phi.torch.flow import *
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1778412457.714064 2717 cudart_stub.cc:31] Could not find cuda drivers on your machine, GPU will not be used. I0000 00:00:1778412457.759785 2717 cpu_feature_guard.cc:227] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1778412459.366341 2717 cudart_stub.cc:31] Could not find cuda drivers on your machine, GPU will not be used. E0000 00:00:1778412459.967418 2717 cuda_platform.cc:52] failed call to cuInit: INTERNAL: CUDA error: Failed call to cuInit: UNKNOWN ERROR (303) /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/torch/cuda/__init__.py:1113: UserWarning: CUDA initialization: Unexpected error from cudaGetDeviceCount(). Did you run some cuda functions before calling NumCudaDevices() that might have already set an error? Error 302: Error loading CUDA libraries. GPU will not be used. (Triggered internally at /pytorch/c10/cuda/CUDAFunctions.cpp:119.) r = torch._C._cuda_getDeviceCount() if nvml_count < 0 else nvml_count
backend.default_backend().list_devices('GPU')
[]
backend.default_backend().list_devices('CPU')
[torch device 'CPU' (CPU 'cpu') | 15990 MB | 4 processors | ]
assert backend.default_backend().set_default_device('CPU')
math.set_global_precision(32) # single precision is the default
x32 = math.random_normal(batch(b=4))
with math.precision(64): ## operations within this context will use 32 bit floats
x64 = math.to_float(x32)
data = math.random_normal(batch(examples=10)) * .1 # batch of scalar values
data = math.random_uniform(batch(examples=10), channel(vector='x,y')) # batch of vectors
data
(examplesᵇ=10, vectorᶜ=x,y) 0.459 ± 0.288 (2e-03...9e-01)
data.examples[0]
(x=0.358, y=0.359)
Tensor¶print(data)
print(f"{data:full:shape:dtype:color:.1f}")
(examplesᵇ=10, vectorᶜ=x,y) 0.459 ± 0.288 (2e-03...9e-01) (examplesᵇ=10, vectorᶜ=x,y) [[0.4, 0.4], [0.7, 0.9], [0.3, 0.9], [0.2, 0.3], [0.5, 0.3], [0.6, 0.1], [0.6, 0.9], [0.0, 0.5], [0.3, 0.0], [0.2, 0.9]]
Tensor¶data = math.random_uniform(spatial(x=8, y=6))
vis.plot(data) # or vis.show(data)
/opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phi/vis/_matplotlib/_matplotlib_plots.py:167: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect. plt.tight_layout() # because subplot titles can be added after figure creation
Tensor to NumPy¶data.numpy(order='x,y')
array([[0.9495491 , 0.946657 , 0.24188793, 0.975975 , 0.3593011 ,
0.5775759 ],
[0.6287643 , 0.04376984, 0.22935694, 0.85324293, 0.69515866,
0.7738955 ],
[0.14730418, 0.9191379 , 0.7120109 , 0.21208698, 0.73663014,
0.5370546 ],
[0.4324667 , 0.29122525, 0.70229554, 0.01253647, 0.12457764,
0.54018855],
[0.30596375, 0.44692242, 0.33118993, 0.38342917, 0.5953857 ,
0.23059392],
[0.48649228, 0.59734184, 0.8796958 , 0.7074785 , 0.28984743,
0.34160197],
[0.5205149 , 0.04510987, 0.48762155, 0.94453794, 0.5022286 ,
0.6261617 ],
[0.28649974, 0.03869402, 0.23791057, 0.11448985, 0.00993162,
0.18660599]], dtype=float32)
math.reshaped_native(data, ['extra', data.shape], to_numpy=True)
/tmp/ipykernel_2717/2990683702.py:1: DeprecationWarning: phiml.math.reshaped_native() is deprecated. Use Tensor.native() instead. math.reshaped_native(data, ['extra', data.shape], to_numpy=True)
array([[0.9495491 , 0.946657 , 0.24188793, 0.975975 , 0.3593011 ,
0.5775759 , 0.6287643 , 0.04376984, 0.22935694, 0.85324293,
0.69515866, 0.7738955 , 0.14730418, 0.9191379 , 0.7120109 ,
0.21208698, 0.73663014, 0.5370546 , 0.4324667 , 0.29122525,
0.70229554, 0.01253647, 0.12457764, 0.54018855, 0.30596375,
0.44692242, 0.33118993, 0.38342917, 0.5953857 , 0.23059392,
0.48649228, 0.59734184, 0.8796958 , 0.7074785 , 0.28984743,
0.34160197, 0.5205149 , 0.04510987, 0.48762155, 0.94453794,
0.5022286 , 0.6261617 , 0.28649974, 0.03869402, 0.23791057,
0.11448985, 0.00993162, 0.18660599]], dtype=float32)
points = math.tensor([(0, 0), (0, 1), (1, 0)], instance('points'), channel('vector'))
distances = points - math.rename_dims(points, 'points', 'others')
math.print(math.vec_length(distances))
[[0. , 1. , 1. ], [1. , 0. , 1.4142135], [1. , 1.4142135, 0. ]]
/tmp/ipykernel_2717/2195475714.py:3: DeprecationWarning: phiml.math.length is deprecated in favor of phiml.math.norm math.print(math.vec_length(distances))
CenteredGrid¶zero_grid = CenteredGrid(0, 0, x=32, y=32, bounds=Box(x=1, y=1))
y_grid = CenteredGrid((0, 1), extrapolation.BOUNDARY, x=32, y=32)
noise_grid = CenteredGrid(Noise(), extrapolation.PERIODIC, x=32, y=32)
sin_curve = CenteredGrid(lambda x: math.sin(x), extrapolation.PERIODIC, x=100, bounds=Box(x=2 * PI))
vis.plot(zero_grid, y_grid, noise_grid, sin_curve, size=(12, 3))
/opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_tensors.py:1229: RuntimeWarning: invalid value encountered in scalar power result = op(n1, n2)
StaggeredGrid¶zero_grid = StaggeredGrid(0, 0, x=32, y=32, bounds=Box(x=1, y=1))
y_grid = StaggeredGrid((0, 1), extrapolation.BOUNDARY, x=32, y=32)
noise_grid = StaggeredGrid(Noise(), extrapolation.PERIODIC, x=32, y=32)
sin_curve = StaggeredGrid(lambda x: math.sin(x), extrapolation.PERIODIC, x=100, bounds=Box(x=2 * PI))
vis.plot(zero_grid, y_grid, noise_grid, sin_curve, size=(12, 3))
/opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_shape.py:2859: RuntimeWarning: Stacking shapes with incompatible labels will result in labels being lost. For vector Got ('x', 'y') and ('x',)
warnings.warn(f"Stacking shapes with incompatible labels will result in labels being lost. For {dim.name} Got {prev.slice_names} and {dim.slice_names}", RuntimeWarning)
StaggeredGrid from NumPy Arrays¶Given matching arrays vx and vy, we can construct a StaggeredGrid.
Note that the shapes of the arrays must match the extrapolation!
vx = math.tensor(np.zeros([33, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 33]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), extrapolation.BOUNDARY)
vx = math.tensor(np.zeros([32, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 32]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), extrapolation.PERIODIC)
vx = math.tensor(np.zeros([31, 32]), spatial('x,y'))
vy = math.tensor(np.zeros([32, 31]), spatial('x,y'))
StaggeredGrid(math.stack([vx, vy], dual(vector='x,y')), 0)
(~vectorᵈ=x,y, xˢ=~(x=31, y=32) int64, yˢ=~(x=32, y=31) int64) const 0.0 @ face of UniformGrid[(xˢ=32, yˢ=32, vectorᶜ=x,y)], BC=0
def loss_function(x):
return math.l2_loss(math.cos(x))
initial_guess = math.tensor([1, -1], math.batch('batch'))
math.minimize(loss_function, Solve('L-BFGS-B', 0, 1e-3, x0=initial_guess))
(1.574, -1.574) along batchᵇ
def f(x):
return 2 * x
math.solve_linear(f, 84, Solve('CG', 1e-5, x0=0))
tensor([42.])
from functools import partial
periodic_laplace = partial(math.laplace, padding=extrapolation.PERIODIC)
example_input = math.ones(spatial(x=3))
matrix, bias = math.matrix_from_function(periodic_laplace, example_input)
math.print(matrix)
x=0 -2. 1. 1. along ~x x=1 1. -2. 1. along ~x x=2 1. 1. -2. along ~x
def f(x):
return math.l2_loss(math.sin(x))
f_grid = CenteredGrid(f, x=100, y=100, bounds=Box(x=2*PI, y=2*PI))
vis.plot(f_grid)
def minimize(x0):
with math.SolveTape(record_trajectories=True) as solves:
math.minimize(f, Solve('BFGS', 0, 1e-5, x0=x0))
return solves[0].x # shape (trajectory, x, y, vector)
trajectories = CenteredGrid(minimize, x=8, y=8, bounds=Box(x=2*PI, y=2*PI)).values
segments = []
for start, end in zip(trajectories.trajectory[:-1].trajectory, trajectories.trajectory[1:].trajectory):
segments.append(PointCloud(start, end - start, bounds=Box(x=2*PI, y=2*PI)))
anim_segments = field.stack(segments, batch('time'))
vis.plot(f_grid, anim_segments, overlay='args', animate='time', color='#FFFFFF', frame_time=500)
--------------------------------------------------------------------------- NotImplementedError Traceback (most recent call last) Cell In[20], line 6 2 with math.SolveTape(record_trajectories=True) as solves: 3 math.minimize(f, Solve('BFGS', 0, 1e-5, x0=x0)) 4 return solves[0].x # shape (trajectory, x, y, vector) 5 ----> 6 trajectories = CenteredGrid(minimize, x=8, y=8, bounds=Box(x=2*PI, y=2*PI)).values 7 segments = [] 8 for start, end in zip(trajectories.trajectory[:-1].trajectory, trajectories.trajectory[1:].trajectory): 9 segments.append(PointCloud(start, end - start, bounds=Box(x=2*PI, y=2*PI))) File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phi/field/_grid.py:75, in CenteredGrid(values, boundary, bounds, resolution, extrapolation, convert, **resolution_) 73 values = sample(values, elements) 74 elif callable(values): ---> 75 values = sample_function(values, elements, 'center', extrapolation) 76 else: 77 if isinstance(values, (tuple, list)) and len(values) == resolution.rank: File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phi/geom/_geom.py:859, in sample_function(f, elements, at, extrapolation) 857 values = math.map_s2b(f)(*pos.vector) 858 else: --> 859 values = math.map_s2b(f)(pos) 860 assert isinstance(values, math.Tensor), f"values function must return a Tensor but returned {type(values)}" 861 return values File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_functional.py:1229, in map_types.<locals>.retyped_f(*args, **kwargs) 1227 retyped_kwarg, input_types = forward_retype(v, input_types) 1228 retyped_kwargs[k] = retyped_kwarg -> 1229 output = f(*retyped_args, **retyped_kwargs) 1230 restored_output = reverse_retype(output, input_types) 1231 return restored_output Cell In[20], line 3, in minimize(x0) 1 def minimize(x0): 2 with math.SolveTape(record_trajectories=True) as solves: ----> 3 math.minimize(f, Solve('BFGS', 0, 1e-5, x0=x0)) 4 return solves[0].x # shape (trajectory, x, y, vector) File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_optimize.py:466, in minimize(f, solve) 464 iterations = reshaped_tensor(ret[-1].iterations, [batch_dims]) 465 function_evaluations = stack([reshaped_tensor(r.function_evaluations, [batch_dims]) for r in ret], batch('trajectory')) --> 466 result = SolveInfo(solve, x_, residual, iterations, function_evaluations, converged, diverged, ret[-1].method, ret[-1].message, t) 467 for tape in _SOLVE_TAPES: 468 tape._add(solve, trj, result) File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_optimize.py:182, in SolveInfo.__init__(self, solve, x, residual, iterations, function_evaluations, converged, diverged, method, msg, solve_time) 180 _, res_tensors = disassemble_tree(residual, cache=False) 181 msg_fun = partial(_default_solve_info_msg, solve=solve) --> 182 msg = map_(msg_fun, msg, converged.trajectory[-1], diverged.trajectory[-1], iterations.trajectory[-1], method=method, residual=res_tensors[0], dims=converged.shape.without('trajectory')) 183 self.msg = msg 184 """ `str`, termination message """ File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_functional.py:1430, in map_(function, dims, range, unwrap_scalars, expand_results, simplify, map_name, *args, **kwargs) 1428 assert all(r is None for r in results), f"map function returned None for some elements, {results}" 1429 return None -> 1430 return stack(results, dims_, expand_values=expand_results, simplify=simplify, layout_non_matching=True) File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_magic_ops.py:261, in stack(values, dim, expand_values, simplify, layout_non_matching, allow_varying_labels, **kwargs) 259 # --- Fallback: multi-level stack --- 260 for dim_ in reversed(dim): --> 261 values = [stack(values[i:i + dim_.size], dim_, **kwargs) for i in range(0, len(values), dim_.size)] 262 return values[0] File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_magic_ops.py:201, in stack(values, dim, expand_values, simplify, layout_non_matching, allow_varying_labels, **kwargs) 199 if any(isinstance(v, (tuple, list, dict)) for v in values_): 200 from ._tensors import wrap, layout --> 201 if _is_data_array(values_): 202 tensors = [wrap(v) for v in values_] 203 return stack(tensors, dim) File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_magic_ops.py:868, in _is_data_array(sequence) 866 def _is_data_array(sequence): 867 try: --> 868 all([np.asarray(v).dtype != object for v in sequence]) 869 except ValueError: # e.g. inhomogeneous 870 return False File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_tensors.py:123, in Tensor.__array__(self, dtype) 121 if self.rank > 1: 122 warnings.warn("Automatic conversion of Φ-ML tensors to NumPy can cause problems because the dimension order is not guaranteed.", SyntaxWarning, stacklevel=3) --> 123 return self.numpy(self._shape) File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_tensors.py:118, in Tensor.numpy(self, order, force_expand) 93 def numpy(self, order: Union[str, tuple, list, Shape] = None, force_expand=True) -> np.ndarray: 94 """ 95 Converts this tensor to a `numpy.ndarray` with dimensions ordered according to `order`. 96 (...) 116 ValueError if the tensor cannot be transposed to match target_shape 117 """ --> 118 return self.backend.numpy(self.native(order, force_expand)) File /opt/hostedtoolcache/Python/3.12.13/x64/lib/python3.12/site-packages/phiml/math/_tensors.py:178, in Tensor.backend(self) 176 @property 177 def backend(self) -> Backend: --> 178 raise NotImplementedError(self.__class__) NotImplementedError: <class 'phiml.math._tree.Layout'>
net = dense_net(1, 1, layers=[8, 8], activation='ReLU') # Implemented for PyTorch, TensorFlow, Jax-Stax
optimizer = adam(net, 1e-3)
BATCH = batch(batch=100)
def loss_function(data: Tensor):
prediction = math.native_call(net, data)
label = math.sin(data)
return math.l2_loss(prediction - label), data, label
print(f"Initial loss: {loss_function(math.random_normal(BATCH))[0]}")
for i in range(100):
loss, _data, _label = update_weights(net, optimizer, loss_function, data=math.random_normal(BATCH))
print(f"Final loss: {loss}")
Initial loss: (batchᵇ=100) 0.203 ± 0.178 (9e-07...6e-01) Final loss: (batchᵇ=100) 0.078 ± 0.085 (5e-06...3e-01)
parameter_count(net)
97