Python* API Reference for Intel® Data Analytics Acceleration Library 2019 Update 4

neural_net_dense_batch.py

Deprecation Notice: With the introduction of daal4py, a package that supersedes PyDAAL, Intel is deprecating PyDAAL and will discontinue support starting with Intel® DAAL 2021 and Intel® Distribution for Python 2021. Until then Intel will continue to provide compatible pyDAAL pip and conda packages for newer releases of Intel DAAL and make it available in open source. However, Intel will not add the new features of Intel DAAL to pyDAAL. Intel recommends developers switch to and use daal4py.

Note: To find daal4py examples, refer to daal4py documentation or browse github repository.

1 # file: neural_net_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2019 Intel Corporation.
4 #
5 # This software and the related documents are Intel copyrighted materials, and
6 # your use of them is governed by the express license under which they were
7 # provided to you (License). Unless the License provides otherwise, you may not
8 # use, modify, copy, publish, distribute, disclose or transmit this software or
9 # the related documents without Intel's prior written permission.
10 #
11 # This software and the related documents are provided as is, with no express
12 # or implied warranties, other than those that are expressly stated in the
13 # License.
14 #===============================================================================
15 
16 #
17 # ! Content:
18 # ! Python example of neural network training and scoring
19 # !*****************************************************************************
20 
21 #
22 ## <a name="DAAL-EXAMPLE-PY-NEURAL_NET_DENSE_BATCH"></a>
23 ## \example neural_net_dense_batch.py
24 #
25 
26 import os
27 import sys
28 
29 import numpy as np
30 
31 from daal.algorithms.neural_networks import initializers
32 from daal.algorithms.neural_networks import layers
33 from daal.algorithms import optimization_solver
34 from daal.algorithms.neural_networks import training, prediction
35 from daal.data_management import NumericTable, HomogenNumericTable
36 
37 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
38 if utils_folder not in sys.path:
39  sys.path.insert(0, utils_folder)
40 from utils import printTensors, readTensorFromCSV
41 
42 # Input data set parameters
43 trainDatasetFile = os.path.join("..", "data", "batch", "neural_network_train.csv")
44 trainGroundTruthFile = os.path.join("..", "data", "batch", "neural_network_train_ground_truth.csv")
45 testDatasetFile = os.path.join("..", "data", "batch", "neural_network_test.csv")
46 testGroundTruthFile = os.path.join("..", "data", "batch", "neural_network_test_ground_truth.csv")
47 
48 fc1 = 0
49 fc2 = 1
50 sm1 = 2
51 
52 batchSize = 10
53 
54 def configureNet():
55  # Create layers of the neural network
56  # Create fully-connected layer and initialize layer parameters
57  fullyConnectedLayer1 = layers.fullyconnected.Batch(5)
58  fullyConnectedLayer1.parameter.weightsInitializer = initializers.uniform.Batch(-0.001, 0.001)
59  fullyConnectedLayer1.parameter.biasesInitializer = initializers.uniform.Batch(0, 0.5)
60 
61  # Create fully-connected layer and initialize layer parameters
62  fullyConnectedLayer2 = layers.fullyconnected.Batch(2)
63  fullyConnectedLayer2.parameter.weightsInitializer = initializers.uniform.Batch(0.5, 1)
64  fullyConnectedLayer2.parameter.biasesInitializer = initializers.uniform.Batch(0.5, 1)
65 
66  # Create softmax layer and initialize layer parameters
67  softmaxCrossEntropyLayer = layers.loss.softmax_cross.Batch()
68 
69  # Create configuration of the neural network with layers
70  topology = training.Topology()
71 
72  # Add layers to the topology of the neural network
73  topology.push_back(fullyConnectedLayer1)
74  topology.push_back(fullyConnectedLayer2)
75  topology.push_back(softmaxCrossEntropyLayer)
76  topology.get(fc1).addNext(fc2)
77  topology.get(fc2).addNext(sm1)
78  return topology
79 
80 
81 def trainModel():
82  # Read training data set from a .csv file and create a tensor to store input data
83  trainingData = readTensorFromCSV(trainDatasetFile)
84  trainingGroundTruth = readTensorFromCSV(trainGroundTruthFile, True)
85 
86  sgdAlgorithm = optimization_solver.sgd.Batch(fptype=np.float32)
87 
88  # Set learning rate for the optimization solver used in the neural network
89  learningRate = 0.001
90  sgdAlgorithm.parameter.learningRateSequence = HomogenNumericTable(1, 1, NumericTable.doAllocate, learningRate)
91  # Set the batch size for the neural network training
92  sgdAlgorithm.parameter.batchSize = batchSize
93  sgdAlgorithm.parameter.nIterations = int(trainingData.getDimensionSize(0) / sgdAlgorithm.parameter.batchSize)
94 
95  # Create an algorithm to train neural network
96  net = training.Batch(sgdAlgorithm)
97 
98  sampleSize = trainingData.getDimensions()
99  sampleSize[0] = batchSize
100 
101  # Configure the neural network
102  topology = configureNet()
103  net.initialize(sampleSize, topology)
104 
105  # Pass a training data set and dependent values to the algorithm
106  net.input.setInput(training.data, trainingData)
107  net.input.setInput(training.groundTruth, trainingGroundTruth)
108 
109  # Run the neural network training and retrieve training model
110  trainingModel = net.compute().get(training.model)
111  # return prediction model
112  return trainingModel.getPredictionModel_Float32()
113 
114 
115 def testModel(predictionModel):
116  # Read testing data set from a .csv file and create a tensor to store input data
117  predictionData = readTensorFromCSV(testDatasetFile)
118 
119  # Create an algorithm to compute the neural network predictions
120  net = prediction.Batch()
121 
122  net.parameter.batchSize = predictionData.getDimensionSize(0)
123 
124  # Set input objects for the prediction neural network
125  net.input.setModelInput(prediction.model, predictionModel)
126  net.input.setTensorInput(prediction.data, predictionData)
127 
128  # Run the neural network prediction
129  # and return results of the neural network prediction
130  return net.compute()
131 
132 
133 def printResults(predictionResult):
134  # Read testing ground truth from a .csv file and create a tensor to store the data
135  predictionGroundTruth = readTensorFromCSV(testGroundTruthFile)
136 
137  printTensors(predictionGroundTruth, predictionResult.getResult(prediction.prediction),
138  "Ground truth", "Neural network predictions: each class probability",
139  "Neural network classification results (first 20 observations):", 20)
140 
141 
142 topology = ""
143 if __name__ == "__main__":
144 
145  predictionModel = trainModel()
146 
147  predictionResult = testModel(predictionModel)
148 
149  printResults(predictionResult)

For more complete information about compiler optimizations, see our Optimization Notice.