C++ API Reference for Intel® Data Analytics Acceleration Library 2019 Update 5

relu_layer_backward_types.h
1 /* file: relu_layer_backward_types.h */
2 /*******************************************************************************
3 * Copyright 2014-2019 Intel Corporation.
4 *
5 * This software and the related documents are Intel copyrighted materials, and
6 * your use of them is governed by the express license under which they were
7 * provided to you (License). Unless the License provides otherwise, you may not
8 * use, modify, copy, publish, distribute, disclose or transmit this software or
9 * the related documents without Intel's prior written permission.
10 *
11 * This software and the related documents are provided as is, with no express
12 * or implied warranties, other than those that are expressly stated in the
13 * License.
14 *******************************************************************************/
15 
16 /*
17 //++
18 // Implementation of the backward rectifier linear unit (relu) layer.
19 //--
20 */
21 
22 #ifndef __RELU_LAYER_BACKWARD_TYPES_H__
23 #define __RELU_LAYER_BACKWARD_TYPES_H__
24 
25 #include "algorithms/algorithm.h"
26 #include "data_management/data/tensor.h"
27 #include "data_management/data/homogen_tensor.h"
28 #include "services/daal_defines.h"
29 #include "algorithms/neural_networks/layers/layer_backward_types.h"
30 #include "algorithms/neural_networks/layers/relu/relu_layer_types.h"
31 
32 namespace daal
33 {
34 namespace algorithms
35 {
36 namespace neural_networks
37 {
38 namespace layers
39 {
43 namespace relu
44 {
54 namespace backward
55 {
59 namespace interface1
60 {
65 class DAAL_EXPORT Input : public layers::backward::Input
66 {
67 public:
68  typedef layers::backward::Input super;
70  Input();
71 
73  Input(const Input& other);
74 
75  virtual ~Input() {}
76 
80  using layers::backward::Input::get;
81 
85  using layers::backward::Input::set;
86 
92  data_management::TensorPtr get(LayerDataId id) const;
93 
99  void set(LayerDataId id, const data_management::TensorPtr &value);
100 
108  services::Status check(const daal::algorithms::Parameter *par, int method) const DAAL_C11_OVERRIDE;
109 };
110 
115 class DAAL_EXPORT Result : public layers::backward::Result
116 {
117 public:
118  DECLARE_SERIALIZABLE_CAST(Result);
120  Result();
121 
122  virtual ~Result() {};
123 
127  using layers::backward::Result::get;
128 
132  using layers::backward::Result::set;
133 
142  services::Status check(const daal::algorithms::Input *input, const daal::algorithms::Parameter *par, int method) const DAAL_C11_OVERRIDE;
143 
152  template <typename algorithmFPType>
153  DAAL_EXPORT services::Status allocate(const daal::algorithms::Input *input, const daal::algorithms::Parameter *parameter, const int method);
154 
155 protected:
157  template<typename Archive, bool onDeserialize>
158  services::Status serialImpl(Archive *arch)
159  {
160  return daal::algorithms::Result::serialImpl<Archive, onDeserialize>(arch);
161  }
162 };
163 typedef services::SharedPtr<Result> ResultPtr;
164 
166 } // namespace interface1
167 using interface1::Input;
168 using interface1::Result;
169 using interface1::ResultPtr;
170 } // namespace backward
171 
172 } // namespace relu
173 } // namespace layers
174 } // namespace neural_networks
175 } // namespace algorithm
176 } // namespace daal
177 #endif
daal
Definition: algorithm_base_common.h:31
daal_defines.h
daal::algorithms::neural_networks::layers::relu::backward::interface1::Input
Input objects for the backward relu layer
Definition: relu_layer_backward_types.h:65
daal::algorithms::neural_networks::layers::relu::LayerDataId
LayerDataId
Identifiers of input objects for the backward relu layer and results for the forward relu layer...
Definition: relu_layer_types.h:64
daal::algorithms::neural_networks::layers::relu::backward::interface1::Result
Provides methods to access the result obtained with the compute() method of the backward relu layer...
Definition: relu_layer_backward_types.h:115
daal::algorithms::math::abs::value
Definition: abs_types.h:86

For more complete information about compiler optimizations, see our Optimization Notice.