Python* API Reference for Intel® Data Analytics Acceleration Library 2018 Update 3

df_cls_dense_batch.py

1 # file: df_cls_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2018 Intel Corporation.
4 #
5 # This software and the related documents are Intel copyrighted materials, and
6 # your use of them is governed by the express license under which they were
7 # provided to you (License). Unless the License provides otherwise, you may not
8 # use, modify, copy, publish, distribute, disclose or transmit this software or
9 # the related documents without Intel's prior written permission.
10 #
11 # This software and the related documents are provided as is, with no express
12 # or implied warranties, other than those that are expressly stated in the
13 # License.
14 #===============================================================================
15 
16 
17 
18 
19 import os
20 import sys
21 
22 from daal.algorithms import decision_forest
23 from daal.algorithms.decision_forest.classification import prediction, training
24 from daal.algorithms import classifier
25 from daal.data_management import (
26  FileDataSource, DataSourceIface, NumericTableIface, HomogenNumericTable,
27  MergedNumericTable, data_feature_utils
28 )
29 
30 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
31 if utils_folder not in sys.path:
32  sys.path.insert(0, utils_folder)
33 from utils import printNumericTable, printNumericTables
34 
35 DAAL_PREFIX = os.path.join('..', 'data')
36 
37 # Input data set parameters
38 trainDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'df_classification_train.csv')
39 testDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'df_classification_test.csv')
40 
41 nFeatures = 3
42 nClasses = 5
43 
44 # Decision forest parameters
45 nTrees = 10
46 minObservationsInLeafNode = 8
47 
48 # Model object for the decision forest classification algorithm
49 model = None
50 predictionResult = None
51 testGroundTruth = None
52 
53 
54 def trainModel():
55  global model
56 
57  # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
58  trainDataSource = FileDataSource(
59  trainDatasetFileName,
60  DataSourceIface.notAllocateNumericTable,
61  DataSourceIface.doDictionaryFromContext
62  )
63 
64  # Create Numeric Tables for training data and labels
65  trainData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
66  trainGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
67  mergedData = MergedNumericTable(trainData, trainGroundTruth)
68 
69  # Retrieve the data from the input file
70  trainDataSource.loadDataBlock(mergedData)
71 
72  # Get the dictionary and update it with additional information about data
73  dict = trainData.getDictionary()
74 
75  # Add a feature type to the dictionary
76  dict[0].featureType = data_feature_utils.DAAL_CONTINUOUS
77  dict[1].featureType = data_feature_utils.DAAL_CONTINUOUS
78  dict[2].featureType = data_feature_utils.DAAL_CATEGORICAL
79 
80  # Create an algorithm object to train the decision forest classification model
81  algorithm = training.Batch(nClasses)
82  algorithm.parameter.nTrees = nTrees
83  algorithm.parameter.minObservationsInLeafNode = minObservationsInLeafNode
84  algorithm.parameter.featuresPerNode = nFeatures
85  algorithm.parameter.varImportance = decision_forest.training.MDI
86  algorithm.parameter.resultsToCompute = decision_forest.training.computeOutOfBagError
87 
88  # Pass the training data set and dependent values to the algorithm
89  algorithm.input.set(classifier.training.data, trainData)
90  algorithm.input.set(classifier.training.labels, trainGroundTruth)
91 
92  # Train the decision forest classification model and retrieve the results of the training algorithm
93  trainingResult = algorithm.compute()
94  model = trainingResult.get(classifier.training.model)
95  printNumericTable(trainingResult.getTable(training.variableImportance), "Variable importance results: ")
96  printNumericTable(trainingResult.getTable(training.outOfBagError), "OOB error: ")
97 
98 def testModel():
99  global testGroundTruth, predictionResult
100 
101  # Initialize FileDataSource<CSVFeatureManager> to retrieve the test data from a .csv file
102  testDataSource = FileDataSource(
103  testDatasetFileName,
104  DataSourceIface.notAllocateNumericTable,
105  DataSourceIface.doDictionaryFromContext
106  )
107 
108  # Create Numeric Tables for testing data and labels
109  testData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
110  testGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
111  mergedData = MergedNumericTable(testData, testGroundTruth)
112 
113  # Retrieve the data from input file
114  testDataSource.loadDataBlock(mergedData)
115 
116  # Get the dictionary and update it with additional information about data
117  dict = testData.getDictionary()
118 
119  # Add a feature type to the dictionary
120  dict[0].featureType = data_feature_utils.DAAL_CONTINUOUS
121  dict[1].featureType = data_feature_utils.DAAL_CONTINUOUS
122  dict[2].featureType = data_feature_utils.DAAL_CATEGORICAL
123 
124  # Create algorithm objects for decision forest classification prediction with the default method
125  algorithm = prediction.Batch(nClasses)
126 
127  # Pass the testing data set and trained model to the algorithm
128  algorithm.input.setTable(classifier.prediction.data, testData)
129  algorithm.input.setModel(classifier.prediction.model, model)
130 
131  # Compute prediction results and retrieve algorithm results
132  # (Result class from classifier.prediction)
133  predictionResult = algorithm.compute()
134 
135 
136 def printResults():
137  printNumericTable(predictionResult.get(classifier.prediction.prediction),"Decision forest prediction results (first 10 rows):",10)
138  printNumericTable(testGroundTruth, "Ground truth (first 10 rows):", 10);
139 
140 if __name__ == "__main__":
141 
142  trainModel()
143  testModel()
144  printResults()

For more complete information about compiler optimizations, see our Optimization Notice.