Python* API Reference for Intel® Data Analytics Acceleration Library 2018 Update 1

df_cls_dense_batch.py

1 #===============================================================================
2 # Copyright 2014-2017 Intel Corporation
3 # All Rights Reserved.
4 #
5 # If this software was obtained under the Intel Simplified Software License,
6 # the following terms apply:
7 #
8 # The source code, information and material ("Material") contained herein is
9 # owned by Intel Corporation or its suppliers or licensors, and title to such
10 # Material remains with Intel Corporation or its suppliers or licensors. The
11 # Material contains proprietary information of Intel or its suppliers and
12 # licensors. The Material is protected by worldwide copyright laws and treaty
13 # provisions. No part of the Material may be used, copied, reproduced,
14 # modified, published, uploaded, posted, transmitted, distributed or disclosed
15 # in any way without Intel's prior express written permission. No license under
16 # any patent, copyright or other intellectual property rights in the Material
17 # is granted to or conferred upon you, either expressly, by implication,
18 # inducement, estoppel or otherwise. Any license under such intellectual
19 # property rights must be express and approved by Intel in writing.
20 #
21 # Unless otherwise agreed by Intel in writing, you may not remove or alter this
22 # notice or any other notice embedded in Materials by Intel or Intel's
23 # suppliers or licensors in any way.
24 #
25 #
26 # If this software was obtained under the Apache License, Version 2.0 (the
27 # "License"), the following terms apply:
28 #
29 # You may not use this file except in compliance with the License. You may
30 # obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
31 #
32 #
33 # Unless required by applicable law or agreed to in writing, software
34 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
35 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
36 #
37 # See the License for the specific language governing permissions and
38 # limitations under the License.
39 #===============================================================================
40 
41 ## <a name="DAAL-EXAMPLE-PY-DF_CLS_DENSE_BATCH"></a>
42 ## \example df_cls_dense_batch.py
43 
44 import os
45 import sys
46 
47 from daal.algorithms import decision_forest
48 from daal.algorithms.decision_forest.classification import prediction, training
49 from daal.algorithms import classifier
50 from daal.data_management import (
51  FileDataSource, DataSourceIface, NumericTableIface, HomogenNumericTable,
52  MergedNumericTable, data_feature_utils
53 )
54 
55 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
56 if utils_folder not in sys.path:
57  sys.path.insert(0, utils_folder)
58 from utils import printNumericTable, printNumericTables
59 
60 DAAL_PREFIX = os.path.join('..', 'data')
61 
62 # Input data set parameters
63 trainDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'df_classification_train.csv')
64 testDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'df_classification_test.csv')
65 
66 nFeatures = 3
67 nClasses = 5
68 
69 # Decision forest parameters
70 nTrees = 10
71 minObservationsInLeafNode = 8
72 
73 # Model object for the decision forest classification algorithm
74 model = None
75 predictionResult = None
76 testGroundTruth = None
77 
78 
79 def trainModel():
80  global model
81 
82  # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
83  trainDataSource = FileDataSource(
84  trainDatasetFileName,
85  DataSourceIface.notAllocateNumericTable,
86  DataSourceIface.doDictionaryFromContext
87  )
88 
89  # Create Numeric Tables for training data and labels
90  trainData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
91  trainGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
92  mergedData = MergedNumericTable(trainData, trainGroundTruth)
93 
94  # Retrieve the data from the input file
95  trainDataSource.loadDataBlock(mergedData)
96 
97  # Get the dictionary and update it with additional information about data
98  dict = trainData.getDictionary()
99 
100  # Add a feature type to the dictionary
101  dict[0].featureType = data_feature_utils.DAAL_CONTINUOUS
102  dict[1].featureType = data_feature_utils.DAAL_CONTINUOUS
103  dict[2].featureType = data_feature_utils.DAAL_CATEGORICAL
104 
105  # Create an algorithm object to train the decision forest classification model
106  algorithm = training.Batch(nClasses)
107  algorithm.parameter.nTrees = nTrees
108  algorithm.parameter.minObservationsInLeafNode = minObservationsInLeafNode
109  algorithm.parameter.featuresPerNode = nFeatures
110  algorithm.parameter.varImportance = decision_forest.training.MDI
111  algorithm.parameter.resultsToCompute = decision_forest.training.computeOutOfBagError
112 
113  # Pass the training data set and dependent values to the algorithm
114  algorithm.input.set(classifier.training.data, trainData)
115  algorithm.input.set(classifier.training.labels, trainGroundTruth)
116 
117  # Train the decision forest classification model and retrieve the results of the training algorithm
118  trainingResult = algorithm.compute()
119  model = trainingResult.get(classifier.training.model)
120  printNumericTable(trainingResult.getTable(training.variableImportance), "Variable importance results: ")
121  printNumericTable(trainingResult.getTable(training.outOfBagError), "OOB error: ")
122 
123 def testModel():
124  global testGroundTruth, predictionResult
125 
126  # Initialize FileDataSource<CSVFeatureManager> to retrieve the test data from a .csv file
127  testDataSource = FileDataSource(
128  testDatasetFileName,
129  DataSourceIface.notAllocateNumericTable,
130  DataSourceIface.doDictionaryFromContext
131  )
132 
133  # Create Numeric Tables for testing data and labels
134  testData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
135  testGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
136  mergedData = MergedNumericTable(testData, testGroundTruth)
137 
138  # Retrieve the data from input file
139  testDataSource.loadDataBlock(mergedData)
140 
141  # Get the dictionary and update it with additional information about data
142  dict = testData.getDictionary()
143 
144  # Add a feature type to the dictionary
145  dict[0].featureType = data_feature_utils.DAAL_CONTINUOUS
146  dict[1].featureType = data_feature_utils.DAAL_CONTINUOUS
147  dict[2].featureType = data_feature_utils.DAAL_CATEGORICAL
148 
149  # Create algorithm objects for decision forest classification prediction with the default method
150  algorithm = prediction.Batch(nClasses)
151 
152  # Pass the testing data set and trained model to the algorithm
153  algorithm.input.setTable(classifier.prediction.data, testData)
154  algorithm.input.setModel(classifier.prediction.model, model)
155 
156  # Compute prediction results and retrieve algorithm results
157  # (Result class from classifier.prediction)
158  predictionResult = algorithm.compute()
159 
160 
161 def printResults():
162 
163  printNumericTables(
164  testGroundTruth,
165  predictionResult.get(classifier.prediction.prediction),
166  "Ground truth", "Classification results",
167  "decision forest classification classification results (first 20 observations):", 20
168  )
169 
170 if __name__ == "__main__":
171 
172  trainModel()
173  testModel()
174  printResults()

For more complete information about compiler optimizations, see our Optimization Notice.