Python* API Reference for Intel® Data Analytics Acceleration Library 2018 Update 1

lrn_layer_dense_batch.py

1 # file: lrn_layer_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2017 Intel Corporation
4 # All Rights Reserved.
5 #
6 # If this software was obtained under the Intel Simplified Software License,
7 # the following terms apply:
8 #
9 # The source code, information and material ("Material") contained herein is
10 # owned by Intel Corporation or its suppliers or licensors, and title to such
11 # Material remains with Intel Corporation or its suppliers or licensors. The
12 # Material contains proprietary information of Intel or its suppliers and
13 # licensors. The Material is protected by worldwide copyright laws and treaty
14 # provisions. No part of the Material may be used, copied, reproduced,
15 # modified, published, uploaded, posted, transmitted, distributed or disclosed
16 # in any way without Intel's prior express written permission. No license under
17 # any patent, copyright or other intellectual property rights in the Material
18 # is granted to or conferred upon you, either expressly, by implication,
19 # inducement, estoppel or otherwise. Any license under such intellectual
20 # property rights must be express and approved by Intel in writing.
21 #
22 # Unless otherwise agreed by Intel in writing, you may not remove or alter this
23 # notice or any other notice embedded in Materials by Intel or Intel's
24 # suppliers or licensors in any way.
25 #
26 #
27 # If this software was obtained under the Apache License, Version 2.0 (the
28 # "License"), the following terms apply:
29 #
30 # You may not use this file except in compliance with the License. You may
31 # obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
32 #
33 #
34 # Unless required by applicable law or agreed to in writing, software
35 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
36 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
37 #
38 # See the License for the specific language governing permissions and
39 # limitations under the License.
40 #===============================================================================
41 
42 #
43 # ! Content:
44 # ! Python example of forward and backward local response normalization (lrn) layer usage
45 # !
46 # !*****************************************************************************
47 
48 #
49 
50 
51 #
52 
53 import os
54 import sys
55 
56 from daal.algorithms.neural_networks import layers
57 from daal.algorithms.neural_networks.layers import lrn
58 from daal.data_management import HomogenTensor, TensorIface
59 
60 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
61 if utils_folder not in sys.path:
62  sys.path.insert(0, utils_folder)
63 from utils import printTensor, readTensorFromCSV
64 
65 # Input data set parameters
66 datasetName = os.path.join("..", "data", "batch", "layer.csv")
67 
68 if __name__ == "__main__":
69 
70  # Read datasetFileName from a file and create a tensor to store input data
71  tensorData = readTensorFromCSV(datasetName)
72 
73  # Create an algorithm to compute forward local response normalization layer results using default method
74  forwardLRNlayer = lrn.forward.Batch()
75 
76  # Set input objects for the forward local response normalization layer
77  forwardLRNlayer.input.setInput(layers.forward.data, tensorData)
78 
79  # Compute forward local response normalization layer results
80  forwardResult = forwardLRNlayer.compute()
81 
82  # Print the results of the forward local response normalization layer
83  printTensor(tensorData, "LRN layer input (first 5 rows):", 5)
84  printTensor(forwardResult.getResult(layers.forward.value), "LRN layer result (first 5 rows):", 5)
85  printTensor(forwardResult.getLayerData(layers.lrn.auxSmBeta), "LRN layer auxSmBeta (first 5 rows):", 5)
86 
87  # Get the size of forward local response normalization layer output
88  gDims = forwardResult.getResult(layers.forward.value).getDimensions()
89  tensorDataBack = HomogenTensor(gDims, TensorIface.doAllocate, 0.01)
90 
91  # Create an algorithm to compute backward local response normalization layer results using default method
92  backwardLRNlayer = lrn.backward.Batch()
93 
94  # Set input objects for the backward local response normalization layer
95  backwardLRNlayer.input.setInput(layers.backward.inputGradient, tensorDataBack)
96  backwardLRNlayer.input.setInputLayerData(layers.backward.inputFromForward, forwardResult.getResultLayerData(layers.forward.resultForBackward))
97 
98  # Compute backward local response normalization layer results
99  backwardResult = backwardLRNlayer.compute()
100 
101  # Print the results of the backward local response normalization layer
102  printTensor(backwardResult.getResult(layers.backward.gradient), "LRN layer backpropagation result (first 5 rows):", 5)

For more complete information about compiler optimizations, see our Optimization Notice.