Python* API Reference for Intel® Data Analytics Acceleration Library 2019 Update 5

adagrad_opt_res_dense_batch.py

Deprecation Notice: With the introduction of daal4py, a package that supersedes PyDAAL, Intel is deprecating PyDAAL and will discontinue support starting with Intel® DAAL 2021 and Intel® Distribution for Python 2021. Until then Intel will continue to provide compatible pyDAAL pip and conda packages for newer releases of Intel DAAL and make it available in open source. However, Intel will not add the new features of Intel DAAL to pyDAAL. Intel recommends developers switch to and use daal4py.

Note: To find daal4py examples, refer to daal4py documentation or browse github repository.

1 # file: adagrad_opt_res_dense_batch.py
2 #===============================================================================
3 # Copyright 2014-2019 Intel Corporation.
4 #
5 # This software and the related documents are Intel copyrighted materials, and
6 # your use of them is governed by the express license under which they were
7 # provided to you (License). Unless the License provides otherwise, you may not
8 # use, modify, copy, publish, distribute, disclose or transmit this software or
9 # the related documents without Intel's prior written permission.
10 #
11 # This software and the related documents are provided as is, with no express
12 # or implied warranties, other than those that are expressly stated in the
13 # License.
14 #===============================================================================
15 
16 #
17 # ! Content:
18 # ! Python example of the Adagrad algorithm
19 # !*****************************************************************************
20 
21 #
22 ## <a name="DAAL-EXAMPLE-PY-ADAGRAD_OPT_RES_DENSE_BATCH"></a>
23 ## \example adagrad_opt_res_dense_batch.py
24 #
25 
26 import os
27 import sys
28 
29 import numpy as np
30 
31 import daal.algorithms.optimization_solver as optimization_solver
32 import daal.algorithms.optimization_solver.mse
33 import daal.algorithms.optimization_solver.adagrad
34 import daal.algorithms.optimization_solver.iterative_solver
35 from daal.data_management import (
36  DataSourceIface, FileDataSource, HomogenNumericTable, MergedNumericTable, NumericTableIface
37 )
38 
39 utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
40 if utils_folder not in sys.path:
41  sys.path.insert(0, utils_folder)
42 from utils import printNumericTable
43 
44 datasetFileName = os.path.join('..', 'data', 'batch', 'mse.csv')
45 
46 nFeatures = 3
47 accuracyThreshold = 0.0000001
48 halfNIterations = 500
49 nIterations = halfNIterations * 2
50 batchSize = 1
51 learningRate = 1.0
52 
53 startPoint = np.array([[8], [2], [1], [4]], dtype=np.float64)
54 
55 if __name__ == "__main__":
56 
57  # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
58  dataSource = FileDataSource(datasetFileName,
59  DataSourceIface.notAllocateNumericTable,
60  DataSourceIface.doDictionaryFromContext)
61 
62  # Create Numeric Tables for data and values for dependent variable
63  data = HomogenNumericTable(nFeatures, 0, NumericTableIface.doNotAllocate)
64  dependentVariables = HomogenNumericTable(1, 0, NumericTableIface.doNotAllocate)
65  mergedData = MergedNumericTable(data, dependentVariables)
66 
67  # Retrieve the data from the input file
68  dataSource.loadDataBlock(mergedData)
69 
70  nVectors = data.getNumberOfRows()
71 
72  mseObjectiveFunction = optimization_solver.mse.Batch(nVectors)
73  mseObjectiveFunction.input.set(optimization_solver.mse.data, data)
74  mseObjectiveFunction.input.set(optimization_solver.mse.dependentVariables, dependentVariables)
75 
76  # Create objects to compute the Adagrad result using the default method
77  adagradAlgorithm = optimization_solver.adagrad.Batch(mseObjectiveFunction)
78 
79  # Set input objects for the the Adagrad algorithm
80  adagradAlgorithm.input.setInput(optimization_solver.iterative_solver.inputArgument, HomogenNumericTable(startPoint))
81  adagradAlgorithm.parameter.learningRate = HomogenNumericTable(1, 1, NumericTableIface.doAllocate, learningRate)
82  adagradAlgorithm.parameter.nIterations = halfNIterations
83  adagradAlgorithm.parameter.accuracyThreshold = accuracyThreshold
84  adagradAlgorithm.parameter.batchSize = batchSize
85  adagradAlgorithm.parameter.optionalResultRequired = True
86 
87  # Compute the Adagrad result
88  # Result class from daal.algorithms.optimization_solver.iterative_solver
89  res = adagradAlgorithm.compute()
90 
91  # Print computed the Adagrad result
92  printNumericTable(res.getResult(optimization_solver.iterative_solver.minimum), "Minimum after first compute():")
93  printNumericTable(res.getResult(optimization_solver.iterative_solver.nIterations), "Number of iterations performed:")
94 
95  adagradAlgorithm.input.setInput(optimization_solver.iterative_solver.inputArgument, res.getResult(optimization_solver.iterative_solver.minimum))
96  adagradAlgorithm.input.setInput(optimization_solver.iterative_solver.optionalArgument, res.getResult(optimization_solver.iterative_solver.optionalResult))
97 
98  res = adagradAlgorithm.compute()
99 
100  printNumericTable(res.getResult(optimization_solver.iterative_solver.minimum), "Minimum after second compute():")
101  printNumericTable(res.getResult(optimization_solver.iterative_solver.nIterations), "Number of iterations performed:")

For more complete information about compiler optimizations, see our Optimization Notice.