C++ API Reference for Intel® Data Analytics Acceleration Library 2018 Update 3

neural_networks_training_model.h
1 /* file: neural_networks_training_model.h */
2 /*******************************************************************************
3 * Copyright 2014-2018 Intel Corporation.
4 *
5 * This software and the related documents are Intel copyrighted materials, and
6 * your use of them is governed by the express license under which they were
7 * provided to you (License). Unless the License provides otherwise, you may not
8 * use, modify, copy, publish, distribute, disclose or transmit this software or
9 * the related documents without Intel's prior written permission.
10 *
11 * This software and the related documents are provided as is, with no express
12 * or implied warranties, other than those that are expressly stated in the
13 * License.
14 *******************************************************************************/
15 
16 /*
17 //++
18 // Implementation of neural network.
19 //--
20 */
21 
22 #ifndef __NEURAL_NETWORK_TRAINING_MODEL_H__
23 #define __NEURAL_NETWORK_TRAINING_MODEL_H__
24 
25 #include "services/daal_defines.h"
26 #include "data_management/data/tensor.h"
27 #include "data_management/data/numeric_table.h"
28 #include "services/daal_memory.h"
29 #include "algorithms/neural_networks/layers/layer.h"
30 #include "algorithms/neural_networks/layers/layer_types.h"
31 #include "algorithms/neural_networks/layers/loss/loss_layer_forward.h"
32 #include "algorithms/neural_networks/layers/split/split_layer_forward.h"
33 #include "algorithms/neural_networks/neural_networks_prediction_model.h"
34 #include "algorithms/neural_networks/neural_networks_training_topology.h"
35 
36 #include "algorithms/optimization_solver/iterative_solver/iterative_solver_batch.h"
37 
38 namespace daal
39 {
40 namespace algorithms
41 {
45 namespace neural_networks
46 {
47 namespace training
48 {
49 namespace interface1
50 {
59 class Parameter : public daal::algorithms::Parameter
60 {
61 public:
67  Parameter(const services::SharedPtr<optimization_solver::iterative_solver::Batch > &optimizationSolver_ = services::SharedPtr<optimization_solver::iterative_solver::Batch>(),
68  engines::EnginePtr engine_ = engines::mt19937::Batch<DAAL_ALGORITHM_FP_TYPE>::create()) :
69  optimizationSolver(optimizationSolver_),
70  engine(engine_) {}
71 
72  services::SharedPtr<optimization_solver::iterative_solver::Batch> optimizationSolver;
73  engines::EnginePtr engine;
74 };
75 
80 class DAAL_EXPORT Model : public neural_networks::ModelImpl
81 {
82 public:
83  DECLARE_SERIALIZABLE_CAST(Model);
84 
85  using neural_networks::ModelImpl::getWeightsAndBiases;
86  using neural_networks::ModelImpl::setWeightsAndBiases;
87 
89  Model();
90 
91  static services::SharedPtr<Model> create(services::Status *stat = NULL);
92 
94  Model(const Model &model) :
95  ModelImpl(model),
96  _backwardLayers(model.getBackwardLayers()),
97  _storeWeightDerivativesInTable(model._storeWeightDerivativesInTable)
98  {}
99 
101  virtual ~Model() {}
102 
111  template<typename modelFPType>
112  services::Status initialize(const services::Collection<size_t> &sampleSize, const Topology &topology,
113  const Parameter &parameter = Parameter())
114  {
115  using namespace layers;
116  using namespace services;
117 
118  size_t nLayers = topology.size();
119  Status st;
120  _backwardNextLayers = SharedPtr<Collection<NextLayers> >(new Collection<NextLayers>(nLayers));
121  if (!_backwardNextLayers)
122  {
123  st.add(services::ErrorMemoryAllocationFailed);
124  return st;
125  }
126 
127  for(size_t i = 0; i < nLayers; i++)
128  {
129  insertLayer(topology[i]);
130  }
131 
132  for(int i = (int)nLayers - 1; i >= 0; i--)
133  {
134  size_t layerId = topology[i].index();
135  const NextLayers &next = topology[i].nextLayers();
136  for (size_t j = 0; j < next.size(); j++)
137  {
138  (*_backwardNextLayers)[next[j]].push_back(layerId);
139  }
140  }
141 
142  for(int i = (int)nLayers - 1; i >= 0; i--)
143  {
144  layers::forward::LayerIfacePtr layer = getForwardLayer(i);
145  SharedPtr<split::forward::Batch<float> > splitLayerFloat = dynamicPointerCast<split::forward::Batch<float>, forward::LayerIface>(layer);
146  SharedPtr<split::forward::Batch<double> > splitLayerDouble = dynamicPointerCast<split::forward::Batch<double>, forward::LayerIface>(layer);
147  if(splitLayerFloat.get() || splitLayerDouble.get())
148  {
149  const NextLayers &next = topology[i].nextLayers();
150  for (size_t j = 0; j < next.size(); j++)
151  {
152  layers::forward::LayerIfacePtr nextLayer = getForwardLayer(next[j]);
153  nextLayer->getLayerParameter()->allowInplaceComputation = false;
154  }
155  }
156  }
157 
158  allocate<modelFPType>(sampleSize, parameter);
159 
160  for(size_t i = 0; i < nLayers; i++)
161  {
162  getForwardLayer(i)->enableResetOnCompute(false);
163  getBackwardLayer(i)->enableResetOnCompute(false);
164  }
165  return st;
166  }
167 
172  const ForwardLayersPtr getForwardLayers() const
173  {
174  return _forwardLayers;
175  }
176 
182  const layers::forward::LayerIfacePtr getForwardLayer(size_t index) const
183  {
184  return _forwardLayers->get(index);
185  }
186 
191  const BackwardLayersPtr getBackwardLayers() const
192  {
193  return _backwardLayers;
194  }
195 
201  const layers::backward::LayerIfacePtr getBackwardLayer(size_t index) const
202  {
203  return _backwardLayers->get(index);
204  }
205 
210  template<typename modelFPType>
211  const prediction::ModelPtr getPredictionModel()
212  {
213  using namespace services;
214  using namespace data_management;
215  using namespace layers;
216 
217  size_t nLayers = _forwardLayers->size();
218 
219  /* Copy forward layers */
220  ForwardLayersPtr _predictionForwardLayers(new ForwardLayers(nLayers));
221  SharedPtr<Collection<NextLayers> > _predictionNextLayers(new Collection<NextLayers>(nLayers));
222  for (size_t i = 0; i < nLayers; i++)
223  {
224  (*_predictionNextLayers)[i] = _nextLayers->get(i);
225  (*_predictionForwardLayers)[i] = ((*_forwardLayers)[i])->getLayerForPrediction();
226  (*_predictionForwardLayers)[i]->getLayerParameter()->predictionStage = true;
227  }
228 
229  bool storeWeightsInTable = true;
230  prediction::ModelPtr predictionModel(new prediction::Model(
231  _predictionForwardLayers, _predictionNextLayers, (modelFPType)0.0, storeWeightsInTable));
232 
233  predictionModel->setWeightsAndBiases(getWeightsAndBiases());
234  return predictionModel;
235  }
236 
242  bool getWeightsAndBiasesStorageStatus() const
243  {
244  return _storeWeightsInTable;
245  }
246 
254  services::Status setWeightsAndBiases(size_t idx, const data_management::NumericTablePtr &table);
255 
261  data_management::NumericTablePtr getWeightsAndBiases(size_t idx) const;
262 
267  data_management::NumericTablePtr getWeightsAndBiasesDerivatives() const;
268 
274  data_management::NumericTablePtr getWeightsAndBiasesDerivatives(size_t idx) const;
275 
283  DAAL_DEPRECATED services::Status setErrors(services::ErrorCollection &errors)
284  {
285  return services::Status();
286  }
287 
293  DAAL_DEPRECATED const services::ErrorCollection &getErrors() const { return _errors; }
294 
302  template<typename modelFPType>
303  services::Status allocate(const services::Collection<size_t> &sampleSize, const Parameter &parameter = Parameter())
304  {
305  using namespace services;
306  using namespace data_management;
307  using namespace layers;
308 
309  services::Status s;
310 
311  if (_sampleSize.size() > 0) { _sampleSize.clear(); }
312  _sampleSize = sampleSize;
313 
314  _forwardLayers->get(0)->getLayerInput()->set(forward::data,
315  TensorPtr(new HomogenTensor<modelFPType>(_sampleSize, Tensor::doAllocate)));
316 
317  size_t nLayers = _forwardLayers->size();
318 
319  for (size_t i = 0; i < nLayers; i++)
320  {
321  layers::Parameter *lParameter = _forwardLayers->get(i)->getLayerParameter();
322  initializers::Parameter *wParameter = lParameter->weightsInitializer->getParameter();
323  initializers::Parameter *bParameter = lParameter->biasesInitializer->getParameter();
324 
325  s |= connectForwardLayers(i);
326 
327  if(!wParameter->engine)
328  {
329  wParameter->engine = parameter.engine;
330  }
331  if(!bParameter->engine)
332  {
333  bParameter->engine = parameter.engine;
334  }
335  }
336 
337  bool checkWeightsAndBiasesAlloc = true;
338  s |= createWeightsAndBiases<modelFPType>(checkWeightsAndBiasesAlloc);
339  s |= enableConditionalGradientPropagation();
340  if(!s) return s;
341 
342  for (size_t i = 0; i < nLayers; i++)
343  {
344  forward::LayerIfacePtr forwardLayer = _forwardLayers->get(i);
345  forward::Input *forwardInput = forwardLayer->getLayerInput();
346 
347  forwardLayer->getLayerResult()->setResultForBackward(forwardInput);
348  }
349 
350  /* Check weights and biases derivatives allocation status before allocating the results of backward layers */
351  s |= checkWeightsAndBiasesDerivativesAllocation();
352 
353  for (int i = (int)nLayers - 1; i >= 0; i--)
354  {
355  s |= connectBackwardLayers(i);
356  }
357 
358  s |= createWeightsAndBiasesDerivatives<modelFPType>();
359  if(_solverOptionalArgumentCollection.size() == 0)
360  {
361  if(_storeWeightsInTable) _solverOptionalArgumentCollection = DataCollection(1);
362  else _solverOptionalArgumentCollection = DataCollection(nLayers);
363  }
364  return s;
365  }
366 
367 protected:
369  Model(services::Status &st);
370 
372  template<typename Archive, bool onDeserialize>
373  services::Status serialImpl(Archive *arch)
374  {
375  return services::Status();
376  }
377 
378  void insertLayer(const layers::LayerDescriptor &layerDescriptor)
379  {
380  _forwardLayers->insert(layerDescriptor.index(), layerDescriptor.layer()->forwardLayer->clone());
381  _backwardLayers->insert(layerDescriptor.index(), layerDescriptor.layer()->backwardLayer->clone());
382  _nextLayers->insert(layerDescriptor.index(), layerDescriptor.nextLayers());
383  }
384 
385  services::Status enableConditionalGradientPropagation()
386  {
387  using namespace services;
388  using namespace layers;
389 
390  services::Status s;
391 
392  size_t nLayers = _forwardLayers->size();
393 
394  /* Array of flags for the neural network layers */
395  bool *flags = (bool *)daal_malloc(nLayers * sizeof(bool));
396 
397  /* Perform depth search to disable gradient propagation in starting forward layers with weights
398  and all the previous layers */
399  s |= disableGradientPropagationInStartingLayers(nLayers, flags);
400 
401  /* Perform depth search to enable gradient propagation in the layers
402  that follow forward layers with weights */
403  s |= enableGradientPropagation(nLayers, flags);
404 
405  daal_free(flags);
406  return s;
407  }
408 
409  services::Status disableGradientPropagationInStartingLayers(size_t nLayers, bool *visited)
410  {
411  using namespace services;
412  using namespace layers;
413 
414  for (size_t i = 0; i < nLayers; i++)
415  {
416  visited[i] = false;
417  }
418 
419  Collection<size_t> stack;
420  stack.push_back(0 /* ID of the first forward layer */);
421  while (stack.size() > 0)
422  {
423  size_t layerId = stack[stack.size() - 1];
424  stack.erase(stack.size() - 1);
425  if (!visited[layerId])
426  {
427  visited[layerId] = true;
428 
429  forward::LayerIfacePtr forwardLayer = _forwardLayers->get(layerId);
430  forward::Input *forwardInput = forwardLayer->getLayerInput();
431  layers::Parameter *forwardParameter = forwardLayer->getLayerParameter();
432  layers::Parameter *backwardParameter = _backwardLayers->get(layerId)->getLayerParameter();
433 
434  backwardParameter->propagateGradient = false;
435 
436  if (forwardInput->getWeightsSizes(forwardParameter).size() +
437  forwardInput->getBiasesSizes(forwardParameter) .size() == 0)
438  {
439  /* Continue depth search for layers that do not have weights and biases */
440  const NextLayers &next = _nextLayers->get(layerId);
441  for (size_t i = 0; i < next.size(); i++)
442  {
443  stack.push_back(next[i]);
444  }
445  }
446  }
447  }
448  return services::Status();
449  }
450 
451  services::Status enableGradientPropagationInSubsequentLayers(size_t startLayerId, size_t nLayers, bool *enabledPropagation)
452  {
453  using namespace services;
454  using namespace layers;
455  Collection<size_t> stack;
456  const NextLayers &next = _nextLayers->get(startLayerId);
457  for (size_t i = 0; i < next.size(); i++)
458  {
459  stack.push_back(next[i]);
460  }
461  while (stack.size() > 0)
462  {
463  size_t layerId = stack[stack.size() - 1];
464  stack.erase(stack.size() - 1);
465  if (!enabledPropagation[layerId])
466  {
467  enabledPropagation[layerId] = true;
468  backward::LayerIfacePtr backwardLayer = _backwardLayers->get(layerId);
469  backwardLayer->getLayerParameter()->propagateGradient = true;
470  const NextLayers &next = _nextLayers->get(layerId);
471  for (size_t i = 0; i < next.size(); i++)
472  {
473  stack.push_back(next[i]);
474  }
475  }
476  }
477  return services::Status();
478  }
479 
480  services::Status enableGradientPropagation(size_t nLayers, bool *enabledPropagation)
481  {
482  using namespace services;
483  using namespace layers;
484  Collection<size_t> stack;
485  stack.push_back(0 /* ID of the first forward layer */);
486 
487  for (size_t i = 0; i < nLayers; i++)
488  {
489  enabledPropagation[i] = false;
490  }
491 
492  while (stack.size() > 0)
493  {
494  size_t layerId = stack[stack.size() - 1];
495  stack.erase(stack.size() - 1);
496  if (!enabledPropagation[layerId])
497  {
498  forward::LayerIfacePtr forwardLayer = _forwardLayers->get(layerId);
499  forward::Input *forwardInput = forwardLayer->getLayerInput();
500  layers::Parameter *forwardParameter = forwardLayer->getLayerParameter();
501  layers::Parameter *backwardParameter = _backwardLayers->get(layerId)->getLayerParameter();
502 
503  if (backwardParameter->propagateGradient == false &&
504  (forwardInput->getWeightsSizes(forwardParameter).size() +
505  forwardInput->getBiasesSizes(forwardParameter) .size()) > 0)
506  {
507  enableGradientPropagationInSubsequentLayers(layerId, nLayers, enabledPropagation);
508  }
509  else
510  {
511  const NextLayers &next = _nextLayers->get(layerId);
512  for (size_t i = 0; i < next.size(); i++)
513  {
514  stack.push_back(next[i]);
515  }
516  }
517  }
518  }
519  return services::Status();
520  }
521 
522  services::Status checkWeightsAndBiasesDerivativesAllocation()
523  {
524  using namespace services;
525  using namespace layers;
526 
527  _storeWeightDerivativesInTable = true;
528  size_t nLayers = _backwardLayers->size();
529  for (size_t i = 0; i < nLayers; i++)
530  {
531  backward::LayerIfacePtr &backwardLayer = _backwardLayers->get(i);
532  if (!backwardLayer) { continue; }
533  backward::ResultPtr backwardResult = backwardLayer->getLayerResult();
534  /* Check if weight and bias derivatives are allocated by user */
535  if (backwardResult->get(backward::weightDerivatives) || backwardResult->get(backward::biasDerivatives))
536  {
537  _storeWeightDerivativesInTable = false;
538  break;
539  }
540  }
541  return services::Status();
542  }
543 
544  services::Status connectBackwardLayers(size_t layerId)
545  {
546  using namespace services;
547  using namespace data_management;
548  using namespace layers;
549 
550  forward::LayerIfacePtr &forwardLayer = _forwardLayers->get(layerId);
551  backward::LayerIfacePtr &backwardLayer = _backwardLayers->get(layerId);
552 
553  if (!forwardLayer || !backwardLayer) { return services::Status(); }
554 
555  backward::Input *backwardInput = backwardLayer->getLayerInput();
556  forward::ResultPtr forwardResult = forwardLayer->getLayerResult();
557 
558  backwardInput->setInputFromForward(forwardResult);
559  backwardLayer->allocateResult();
560 
561  /* Don't connect backward layer to next backward layers
562  if the layer does not propagate gradient */
563  if (!backwardLayer->getLayerParameter()->propagateGradient) { return services::Status(); }
564 
565  backward::ResultPtr backwardResult = backwardLayer->getLayerResult();
566 
567  const NextLayers &next = _backwardNextLayers->get(layerId);
568  const size_t nextLayersSize = next.size();
569  for(size_t j = 0; j < nextLayersSize; j++)
570  {
571  size_t inputIndex = nextLayersSize - j - 1;
572  _backwardLayers->get(next[j])->addInput(backwardResult, inputIndex, 0 /* index in input object of next[j] backward layer */);
573  }
574  return services::Status();
575  }
576 
577  template<typename modelFPType>
578  DAAL_EXPORT services::Status createWeightsAndBiasesDerivatives();
579 
580 public:
586  algorithms::OptionalArgumentPtr getSolverOptionalArgument(size_t index)
587  {
588  return services::dynamicPointerCast<algorithms::OptionalArgument, data_management::SerializationIface>(_solverOptionalArgumentCollection[index]);
589  }
590 
598  services::Status setSolverOptionalArgument(const algorithms::OptionalArgumentPtr& solverOptionalArgument, size_t index)
599  {
600  _solverOptionalArgumentCollection[index] = solverOptionalArgument;
601  return services::Status();
602  }
603 
608  data_management::DataCollection getSolverOptionalArgumentCollection()
609  {
610  return _solverOptionalArgumentCollection;
611  }
612 
619  services::Status setSolverOptionalArgumentCollection(const data_management::DataCollection &solverOptionalArgumentCollection)
620  {
621  _solverOptionalArgumentCollection = solverOptionalArgumentCollection;
622  return services::Status();
623  }
624 
625 private:
626  data_management::DataCollection _solverOptionalArgumentCollection;
627  services::Collection<size_t> _sampleSize;
628  BackwardLayersPtr _backwardLayers;
629  services::SharedPtr<services::Collection<layers::NextLayers> > _backwardNextLayers;
630  mutable services::ErrorCollection _errors;
632  bool _storeWeightDerivativesInTable;
633  LearnableParametersIfacePtr _weightsAndBiasesDerivatives;
634 };
635 
636 typedef services::SharedPtr<Model> ModelPtr;
639 } // namespace interface1
640 using interface1::Parameter;
641 using interface1::Model;
642 using interface1::ModelPtr;
643 
644 } // namespace training
645 } // namespace neural_networks
646 } // namespace algorithms
647 } // namespace daal
648 #endif
daal::algorithms::neural_networks::training::interface1::Model::setSolverOptionalArgumentCollection
services::Status setSolverOptionalArgumentCollection(const data_management::DataCollection &solverOptionalArgumentCollection)
Definition: neural_networks_training_model.h:619
daal::algorithms::neural_networks::training::interface1::Model
Class representing the model of neural network.
Definition: neural_networks_training_model.h:80
daal::algorithms::neural_networks::training::interface1::Model::setSolverOptionalArgument
services::Status setSolverOptionalArgument(const algorithms::OptionalArgumentPtr &solverOptionalArgument, size_t index)
Definition: neural_networks_training_model.h:598
daal::algorithms::neural_networks::training::interface1::Topology
Class defining a neural network topology - a set of layers and connection between them - on the train...
Definition: neural_networks_training_topology.h:40
daal::algorithms::neural_networks::layers::backward::weightDerivatives
Definition: layer_backward_types.h:85
daal::algorithms::neural_networks::training::interface1::Topology::push_back
size_t push_back(const layers::LayerIfacePtr &layer)
Definition: neural_networks_training_topology.h:69
daal
Definition: algorithm_base_common.h:31
daal::algorithms::neural_networks::training::interface1::Model::initialize
services::Status initialize(const services::Collection< size_t > &sampleSize, const Topology &topology, const Parameter &parameter=Parameter())
Definition: neural_networks_training_model.h:112
daal::algorithms::neural_networks::training::interface1::Model::allocate
services::Status allocate(const services::Collection< size_t > &sampleSize, const Parameter &parameter=Parameter())
Definition: neural_networks_training_model.h:303
daal::services::ErrorMemoryAllocationFailed
Definition: error_indexes.h:146
daal::algorithms::neural_networks::training::interface1::Model::setErrors
DAAL_DEPRECATED services::Status setErrors(services::ErrorCollection &errors)
Definition: neural_networks_training_model.h:283
daal::algorithms::neural_networks::training::interface1::Model::getForwardLayers
const ForwardLayersPtr getForwardLayers() const
Definition: neural_networks_training_model.h:172
daal::algorithms::neural_networks::training::interface1::Parameter::optimizationSolver
services::SharedPtr< optimization_solver::iterative_solver::Batch > optimizationSolver
Definition: neural_networks_training_model.h:72
daal::algorithms::neural_networks::layers::backward::biasDerivatives
Definition: layer_backward_types.h:86
daal_defines.h
daal::algorithms::neural_networks::training::interface1::Model::getBackwardLayers
const BackwardLayersPtr getBackwardLayers() const
Definition: neural_networks_training_model.h:191
daal::algorithms::neural_networks::training::interface1::Model::getBackwardLayer
const layers::backward::LayerIfacePtr getBackwardLayer(size_t index) const
Definition: neural_networks_training_model.h:201
daal::algorithms::neural_networks::training::interface1::Model::getErrors
DAAL_DEPRECATED const services::ErrorCollection & getErrors() const
Definition: neural_networks_training_model.h:293
daal::algorithms::neural_networks::training::interface1::Parameter::engine
engines::EnginePtr engine
Definition: neural_networks_training_model.h:73
daal::algorithms::interface1::Parameter
Base class to represent computation parameters. Algorithm-specific parameters are represented as deri...
Definition: algorithm_types.h:60
daal::algorithms::neural_networks::training::interface1::Model::getSolverOptionalArgument
algorithms::OptionalArgumentPtr getSolverOptionalArgument(size_t index)
Definition: neural_networks_training_model.h:586
daal::algorithms::neural_networks::training::interface1::Parameter
Class representing the parameters of neural network.
Definition: neural_networks_training_model.h:59
daal::services::daal_malloc
DAAL_EXPORT void * daal_malloc(size_t size, size_t alignment=DAAL_MALLOC_DEFAULT_ALIGNMENT)
daal::algorithms::association_rules::data
Definition: apriori_types.h:81
daal::algorithms::neural_networks::training::interface1::Parameter::Parameter
Parameter(const services::SharedPtr< optimization_solver::iterative_solver::Batch > &optimizationSolver_=services::SharedPtr< optimization_solver::iterative_solver::Batch >(), engines::EnginePtr engine_=engines::mt19937::Batch< DAAL_ALGORITHM_FP_TYPE >::create())
Definition: neural_networks_training_model.h:67
daal::algorithms::neural_networks::training::interface1::Model::getForwardLayer
const layers::forward::LayerIfacePtr getForwardLayer(size_t index) const
Definition: neural_networks_training_model.h:182
daal::algorithms::neural_networks::training::interface1::Model::~Model
virtual ~Model()
Destructor.
Definition: neural_networks_training_model.h:101
daal::services::daal_free
DAAL_EXPORT void daal_free(void *ptr)
daal::algorithms::neural_networks::training::interface1::Model::getSolverOptionalArgumentCollection
data_management::DataCollection getSolverOptionalArgumentCollection()
Definition: neural_networks_training_model.h:608
daal::algorithms::neural_networks::training::interface1::Model::Model
Model(const Model &model)
Copy constructor.
Definition: neural_networks_training_model.h:94
daal::algorithms::neural_networks::training::interface1::Model::getPredictionModel
const prediction::ModelPtr getPredictionModel()
Definition: neural_networks_training_model.h:211
daal::algorithms::neural_networks::training::interface1::Topology::size
size_t size() const
Definition: neural_networks_training_topology.h:62
daal::algorithms::neural_networks::training::model
Definition: neural_networks_training_result.h:52
daal::algorithms::neural_networks::training::interface1::Model::getWeightsAndBiasesStorageStatus
bool getWeightsAndBiasesStorageStatus() const
Definition: neural_networks_training_model.h:242
daal::algorithms::interface1::Model
The base class for the classes that represent the models, such as linear_regression::Model or svm::Mo...
Definition: model.h:52

For more complete information about compiler optimizations, see our Optimization Notice.