Module phiml.backend.tensorflow

TensorFlow integration.

Expand source code
"""
TensorFlow integration.
"""
import platform as _platform

import os
import tensorflow as _tf

from .. import ML_LOGGER as _LOGGER

if _tf.__version__.startswith('1.'):
    raise ImportError(f"Φ-ML requires TensorFlow 2 but found TensorFlow {_tf.__version__}")

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # only errors
if _platform.system().lower() == 'windows':  # prevent Blas GEMM launch failed on Windows
    for i, device in enumerate(_tf.config.list_physical_devices('GPU')):
        _tf.config.experimental.set_memory_growth(device, True)
        _LOGGER.info(f"phiml.backend.tf: Setting memory_growth on GPU {i} to True to prevent Blas errors")

from ._compile_cuda import compile_cuda_ops

from ._tf_backend import TFBackend as _TFBackend

TENSORFLOW = _TFBackend()
"""Backend for TensorFlow operations."""

__all__ = [key for key in globals().keys() if not key.startswith('_')]

Sub-modules

phiml.backend.tensorflow.nets

TensorFlow implementation of the unified machine learning API. Equivalent functions also exist for the other frameworks …

Global variables

var TENSORFLOW

Backend for TensorFlow operations.

Functions

def compile_cuda_ops(gcc: str = None, nvcc: str = None, cuda_lib: str = None)
Expand source code
def compile_cuda_ops(gcc: str = None,
                     nvcc: str = None,
                     cuda_lib: str = None):
    tf_gcc = check_tf_cuda_compatibility()
    if gcc is None:
        gcc = tf_gcc if isfile(tf_gcc) else 'gcc'
    if nvcc is None:
        nvcc = '/usr/local/cuda/bin/nvcc' if isfile('/usr/local/cuda/bin/nvcc') else 'nvcc'
    if cuda_lib is None:
        cuda_lib = '/usr/local/cuda/lib64/'

    uml_tf_path = abspath(dirname(__file__))
    src_path = join(uml_tf_path, 'cuda', 'src')
    build_path = join(uml_tf_path, 'cuda', 'build')
    logfile_path = join(uml_tf_path, 'cuda', 'log.txt')
    print("Source Path:\t" + src_path)
    print("Build Path:\t" + build_path)
    print("GCC:\t\t" + gcc)
    print("NVCC:\t\t" + nvcc)
    print("CUDA lib:\t" + cuda_lib)
    print("----------------------------")
    # Remove old build files
    if isdir(build_path):
        print('Removing old build files from %s' % build_path)
        for file in os.listdir(build_path):
            os.remove(join(build_path, file))
    else:
        print('Creating build directory at %s' % build_path)
        os.mkdir(build_path)
    print('Compiling CUDA code...')
    with open(logfile_path, "w") as logfile:
        try:
            compile_cuda('resample', nvcc, src_path, build_path, logfile=logfile)
            compile_gcc('resample', gcc, src_path, build_path, cuda_lib, logfile=logfile)
            compile_cuda('resample_gradient', nvcc, src_path, build_path, logfile=logfile)
            compile_gcc('resample_gradient', gcc, src_path, build_path, cuda_lib, logfile=logfile)
            # compile_cuda('bicgstab_ilu_linear_solve_op', self.nvcc, src_path, build_path, logfile=logfile)
            # compile_gcc('bicgstab_ilu_linear_solve_op', self.gcc, src_path, build_path, self.cuda_lib, logfile=logfile)
        except BaseException as err:
            print(f"Compilation failed. See {logfile_path} for details.")
            raise err
    print(f"Compilation complete. See {logfile_path} for details.")