OpenNN
Open-source neural networks library
Loading...
Searching...
No Matches
neural_network.h File Reference

Declares the NeuralNetwork class. More...

#include "layer.h"
#include "tensor_utilities.h"
#include "variable.h"
#include "forward_propagation.h"

Go to the source code of this file.

Classes

struct  opennn::__nv_bfloat16
 
struct  opennn::__half
 
struct  opennn::cudnnTensorStruct
 
class  opennn::opennn::Json
 
class  opennn::opennn::JsonDocument
 
class  opennn::opennn::JsonWriter
 
struct  opennn::opennn::TypeInfo< Type::FP32 >
 
struct  opennn::opennn::TypeInfo< Type::BF16 >
 
struct  opennn::opennn::TypeInfo< Type::INT8 >
 
class  opennn::opennn::Configuration
 
struct  opennn::opennn::Configuration::Resolved
 
struct  opennn::opennn::Shape
 
struct  opennn::opennn::Buffer
 
struct  opennn::opennn::TensorView
 
class  opennn::opennn::Backend
 
struct  opennn::opennn::ForwardPropagation
 
class  opennn::NeuralNetwork
 Stack of Layers forming a trainable model. More...
 

Namespaces

namespace  opennn
 
namespace  opennn::opennn
 

Macros

#define EIGEN_MAX_ALIGN_BYTES   64
 
#define EIGEN_NO_DEBUG
 
#define NOMINMAX
 
#define _SILENCE_CXX17_ITERATOR_BASE_CLASS_DEPRECATION_WARNING
 
#define _CRT_SECURE_NO_WARNINGS
 
#define EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS
 
#define _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING
 

Typedefs

using opennn::cudaStream_t = void*
 
using opennn::cudaEvent_t = void*
 
using opennn::cublasHandle_t = void*
 
using opennn::cublasLtHandle_t = void*
 
using opennn::cudnnHandle_t = void*
 
using opennn::cudnnTensorDescriptor_t = cudnnTensorStruct*
 
using opennn::cudnnFilterDescriptor_t = void*
 
using opennn::cudnnConvolutionDescriptor_t = void*
 
using opennn::cudnnPoolingDescriptor_t = void*
 
using opennn::cudnnActivationDescriptor_t = void*
 
using opennn::cudnnDropoutDescriptor_t = void*
 
using opennn::cudnnOpTensorDescriptor_t = void*
 
using opennn::MatrixR = Matrix<float, Dynamic, Dynamic, Layout>
 
using opennn::MatrixI = Matrix<Index, Dynamic, Dynamic, Layout>
 
using opennn::MatrixB = Matrix<bool, Dynamic, Dynamic, Layout>
 
using opennn::VectorR = Matrix<float, Dynamic, 1>
 
using opennn::VectorI = Matrix<Index, Dynamic, 1>
 
using opennn::VectorB = Matrix<bool, Dynamic, 1>
 
using opennn::VectorMap = Map<VectorR, AlignedMax>
 
using opennn::MatrixMap = Map<MatrixR, Layout | AlignedMax>
 
using opennn::Tensor0 = Tensor<float, 0, Layout | AlignedMax>
 
using opennn::Tensor2 = Tensor<float, 2, Layout | AlignedMax>
 
using opennn::Tensor3 = Tensor<float, 3, Layout | AlignedMax>
 
using opennn::Tensor4 = Tensor<float, 4, Layout | AlignedMax>
 
template<int Rank>
using opennn::TensorR = Tensor<float, Rank, Layout | AlignedMax>
 
using opennn::TensorMap2 = TensorMap<Tensor<float, 2, Layout | AlignedMax>, AlignedMax>
 
using opennn::TensorMap3 = TensorMap<Tensor<float, 3, Layout | AlignedMax>, AlignedMax>
 
using opennn::TensorMap4 = TensorMap<Tensor<float, 4, Layout | AlignedMax>, AlignedMax>
 
template<int Rank>
using opennn::TensorMapR = TensorMap<Tensor<float, Rank, Layout | AlignedMax>, AlignedMax>
 
template<typename T, size_t N>
using opennn::opennn::array = Eigen::array<T, N>
 

Enumerations

enum  opennn::cudaDataType_t {
  opennn::CUDA_R_32F = 0 , opennn::CUDA_R_16F = 2 , opennn::CUDA_R_8I = 3 , opennn::CUDA_R_32I = 10 ,
  opennn::CUDA_R_16BF = 14
}
 
enum  opennn::cublasComputeType_t { opennn::CUBLAS_COMPUTE_32F = 0 , opennn::CUBLAS_COMPUTE_32F_FAST_16BF = 65 , opennn::CUBLAS_COMPUTE_32F_FAST_TF32 = 68 }
 
enum  opennn::cublasOperation_t { opennn::CUBLAS_OP_N = 0 , opennn::CUBLAS_OP_T = 1 }
 
enum  opennn::cublasLtEpilogue_t { opennn::CUBLASLT_EPILOGUE_DEFAULT = 1 , opennn::CUBLASLT_EPILOGUE_BIAS = 4 , opennn::CUBLASLT_EPILOGUE_RELU_BIAS = 132 }
 
enum  opennn::cudnnDataType_t {
  opennn::CUDNN_DATA_FLOAT = 0 , opennn::CUDNN_DATA_HALF = 2 , opennn::CUDNN_DATA_INT8 = 3 , opennn::CUDNN_DATA_INT32 = 4 ,
  opennn::CUDNN_DATA_BFLOAT16 = 14
}
 
enum  opennn::cudnnActivationMode_t {
  opennn::CUDNN_ACTIVATION_IDENTITY = 0 , opennn::CUDNN_ACTIVATION_SIGMOID = 1 , opennn::CUDNN_ACTIVATION_RELU = 2 , opennn::CUDNN_ACTIVATION_TANH = 3 ,
  opennn::CUDNN_ACTIVATION_ELU = 4
}
 
enum  opennn::cudnnPoolingMode_t { opennn::CUDNN_POOLING_MAX = 0 }
 
enum  opennn::cudnnBatchNormMode_t { opennn::CUDNN_BATCHNORM_PER_ACTIVATION = 0 }
 
enum  opennn::cudnnConvolutionFwdAlgo_t { opennn::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0 }
 
enum  opennn::cudnnConvolutionBwdDataAlgo_t { opennn::CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0 }
 
enum  opennn::cudnnConvolutionBwdFilterAlgo_t { opennn::CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0 }
 
enum class  opennn::opennn::Device { opennn::opennn::Auto , opennn::opennn::CPU , opennn::opennn::CUDA }
 
enum class  opennn::opennn::Type { opennn::opennn::Auto , opennn::opennn::FP32 , opennn::opennn::BF16 , opennn::opennn::INT8 }
 

Functions

template<typename T>
ostream & opennn::opennn::operator<< (ostream &os, const vector< T > &vec)
 
void opennn::opennn::add_json_field (JsonWriter &writer, const std::string &name, const std::string &value)
 
void opennn::opennn::write_json (JsonWriter &writer, std::initializer_list< std::pair< const char *, std::string > > props)
 
float opennn::opennn::read_json_type (const Json *root, const std::string &field)
 
long opennn::opennn::read_json_index (const Json *root, const std::string &field)
 
bool opennn::opennn::read_json_bool (const Json *root, const std::string &field)
 
std::string opennn::opennn::read_json_string (const Json *root, const std::string &field)
 
std::string opennn::opennn::read_json_string_fallback (const Json *root, std::initializer_list< std::string > names)
 
const Jsonopennn::opennn::require_json_field (const Json *root, const std::string &field)
 
template<typename Func>
void opennn::opennn::for_json_items (const Json *parent, const char *tag, long count, Func func)
 
JsonDocument opennn::opennn::load_json_file (const std::filesystem::path &file_name)
 
const Jsonopennn::opennn::get_json_root (const JsonDocument &document, const std::string &tag)
 
template<Type... Supported, typename F>
void opennn::opennn::visit_type (Type t, F &&f)
 
template<Type... Supported, typename F>
void opennn::opennn::visit_type_pair (Type t_in, Type t_out, F &&f)
 
cudnnDataType_t opennn::opennn::to_cudnn (Type type) noexcept
 
cudaDataType_t opennn::opennn::to_cuda (Type type) noexcept
 
Index opennn::opennn::type_bytes (Type type) noexcept
 
int opennn::opennn::to_int (Index value)
 
float opennn::opennn::to_type (Index value)
 
Index opennn::opennn::align_up (Index value, Index alignment)
 
Index opennn::opennn::get_aligned_size (Index size)
 
Index opennn::opennn::get_aligned_bytes (Index n_bytes)
 
template<typename Container>
Index opennn::opennn::ssize (const Container &container) noexcept
 
bool opennn::opennn::is_aligned (const void *ptr)
 
Index opennn::opennn::aligned_total_elements (const vector< Shape > &shapes)
 
Index opennn::opennn::aligned_total_elements (const vector< vector< Shape > > &nested)
 
Index opennn::opennn::aligned_total_bytes (const vector< Shape > &shapes, const vector< Type > &dtypes)
 
Index opennn::opennn::aligned_total_bytes (const vector< vector< Shape > > &nested, const vector< vector< Type > > &dtypes)
 
Index opennn::opennn::aligned_total_bytes (const vector< Shape > &shapes, Type dtype)
 
string opennn::opennn::shape_to_string (const Shape &, const string &=" ")
 
Shape opennn::opennn::string_to_shape (const string &, const string &=" ")
 
template<typename... Vs>
size_t opennn::opennn::hash_combine (const Vs &... values)
 
ThreadPoolDevice & opennn::opennn::get_device ()
 

Variables

constexpr float opennn::opennn::EPSILON = numeric_limits<float>::epsilon()
 
constexpr float opennn::opennn::MAX = numeric_limits<float>::max()
 
constexpr float opennn::opennn::NEG_INFINITY = -numeric_limits<float>::infinity()
 
constexpr float opennn::opennn::QUIET_NAN = numeric_limits<float>::quiet_NaN()
 
constexpr float opennn::opennn::SOFTMAX_MASK_VALUE = float(-1e9f)
 
constexpr int opennn::Layout = Eigen::RowMajor
 
static constexpr Index opennn::opennn::ALIGN_BYTES = EIGEN_MAX_ALIGN_BYTES
 
static constexpr Index opennn::opennn::ALIGN_ELEMENTS = ALIGN_BYTES / sizeof(float)
 
constexpr cudaDataType_t opennn::opennn::CUDA_REDUCTION_DTYPE = CUDA_R_32F
 
constexpr cublasComputeType_t opennn::opennn::CUBLAS_COMPUTE_DTYPE = CUBLAS_COMPUTE_32F_FAST_TF32
 

Detailed Description

Declares the NeuralNetwork class.

NeuralNetwork is the base class for every model in OpenNN. It owns an ordered stack of Layer instances, the parameter buffer they share, and the device/precision configuration used for forward and backward passes.

Macro Definition Documentation

◆ _CRT_SECURE_NO_WARNINGS

#define _CRT_SECURE_NO_WARNINGS

◆ _SILENCE_CXX17_ITERATOR_BASE_CLASS_DEPRECATION_WARNING

#define _SILENCE_CXX17_ITERATOR_BASE_CLASS_DEPRECATION_WARNING

◆ _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING

#define _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING

◆ EIGEN_MAX_ALIGN_BYTES

#define EIGEN_MAX_ALIGN_BYTES   64

◆ EIGEN_NO_DEBUG

#define EIGEN_NO_DEBUG

◆ EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS

#define EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS

◆ NOMINMAX

#define NOMINMAX