Python* API Reference for Intel® Data Analytics Acceleration Library 2019

lrn_layer_dense_batch.py

1 # file: lrn_layer_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2018 Intel Corporation.
4 #
5 # This software and the related documents are Intel copyrighted materials, and
6 # your use of them is governed by the express license under which they were
7 # provided to you (License). Unless the License provides otherwise, you may not
8 # use, modify, copy, publish, distribute, disclose or transmit this software or
9 # the related documents without Intel's prior written permission.
10 #
11 # This software and the related documents are provided as is, with no express
12 # or implied warranties, other than those that are expressly stated in the
13 # License.
14 #===============================================================================
15 
16 #
17 # ! Content:
18 # ! Python example of forward and backward local response normalization (lrn) layer usage
19 # !
20 # !*****************************************************************************
21 
22 #
23 ## <a name="DAAL-EXAMPLE-PY-LRN_LAYER_BATCH"></a>
24 ## \example lrn_layer_dense_batch.py
25 #
26 
27 import os
28 import sys
29 
30 from daal.algorithms.neural_networks import layers
31 from daal.algorithms.neural_networks.layers import lrn
32 from daal.data_management import HomogenTensor, TensorIface
33 
34 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
35 if utils_folder not in sys.path:
36  sys.path.insert(0, utils_folder)
37 from utils import printTensor, readTensorFromCSV
38 
39 # Input data set parameters
40 datasetName = os.path.join("..", "data", "batch", "layer.csv")
41 
42 if __name__ == "__main__":
43 
44  # Read datasetFileName from a file and create a tensor to store input data
45  tensorData = readTensorFromCSV(datasetName)
46 
47  # Create an algorithm to compute forward local response normalization layer results using default method
48  forwardLRNlayer = lrn.forward.Batch()
49 
50  # Set input objects for the forward local response normalization layer
51  forwardLRNlayer.input.setInput(layers.forward.data, tensorData)
52 
53  # Compute forward local response normalization layer results
54  forwardResult = forwardLRNlayer.compute()
55 
56  # Print the results of the forward local response normalization layer
57  printTensor(tensorData, "LRN layer input (first 5 rows):", 5)
58  printTensor(forwardResult.getResult(layers.forward.value), "LRN layer result (first 5 rows):", 5)
59  printTensor(forwardResult.getLayerData(layers.lrn.auxSmBeta), "LRN layer auxSmBeta (first 5 rows):", 5)
60 
61  # Get the size of forward local response normalization layer output
62  gDims = forwardResult.getResult(layers.forward.value).getDimensions()
63  tensorDataBack = HomogenTensor(gDims, TensorIface.doAllocate, 0.01)
64 
65  # Create an algorithm to compute backward local response normalization layer results using default method
66  backwardLRNlayer = lrn.backward.Batch()
67 
68  # Set input objects for the backward local response normalization layer
69  backwardLRNlayer.input.setInput(layers.backward.inputGradient, tensorDataBack)
70  backwardLRNlayer.input.setInputLayerData(layers.backward.inputFromForward, forwardResult.getResultLayerData(layers.forward.resultForBackward))
71 
72  # Compute backward local response normalization layer results
73  backwardResult = backwardLRNlayer.compute()
74 
75  # Print the results of the backward local response normalization layer
76  printTensor(backwardResult.getResult(layers.backward.gradient), "LRN layer backpropagation result (first 5 rows):", 5)

For more complete information about compiler optimizations, see our Optimization Notice.