C++ API Reference for Intel® Data Analytics Acceleration Library 2018 Update 1

neural_net_predict_dense_batch.cpp

/* file: neural_net_predict_dense_batch.cpp */
/*******************************************************************************
* Copyright 2014-2017 Intel Corporation
* All Rights Reserved.
*
* If this software was obtained under the Intel Simplified Software License,
* the following terms apply:
*
* The source code, information and material ("Material") contained herein is
* owned by Intel Corporation or its suppliers or licensors, and title to such
* Material remains with Intel Corporation or its suppliers or licensors. The
* Material contains proprietary information of Intel or its suppliers and
* licensors. The Material is protected by worldwide copyright laws and treaty
* provisions. No part of the Material may be used, copied, reproduced,
* modified, published, uploaded, posted, transmitted, distributed or disclosed
* in any way without Intel's prior express written permission. No license under
* any patent, copyright or other intellectual property rights in the Material
* is granted to or conferred upon you, either expressly, by implication,
* inducement, estoppel or otherwise. Any license under such intellectual
* property rights must be express and approved by Intel in writing.
*
* Unless otherwise agreed by Intel in writing, you may not remove or alter this
* notice or any other notice embedded in Materials by Intel or Intel's
* suppliers or licensors in any way.
*
*
* If this software was obtained under the Apache License, Version 2.0 (the
* "License"), the following terms apply:
*
* You may not use this file except in compliance with the License. You may
* obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
/*
! Content:
! C++ example of neural network scoring
!******************************************************************************/
#include "daal.h"
#include "service.h"
#include "neural_net_predict_dense_batch.h"
using namespace std;
using namespace daal;
using namespace daal::algorithms;
using namespace daal::algorithms::neural_networks;
using namespace daal::services;
/* Input data set parameters */
string testDatasetFile = "../data/batch/neural_network_test.csv";
string testGroundTruthFile = "../data/batch/neural_network_test_ground_truth.csv";
/* Weights and biases obtained on the training stage */
string fc1WeightsFile = "../data/batch/fc1_weights.csv";
string fc1BiasesFile = "../data/batch/fc1_biases.csv";
string fc2WeightsFile = "../data/batch/fc2_weights.csv";
string fc2BiasesFile = "../data/batch/fc2_biases.csv";
TensorPtr predictionData;
prediction::ModelPtr predictionModel;
prediction::ResultPtr predictionResult;
void createModel();
void testModel();
void printResults();
int main()
{
createModel();
testModel();
printResults();
return 0;
}
void createModel()
{
/* Read testing data set from a .csv file and create a tensor to store input data */
predictionData = readTensorFromCSV(testDatasetFile);
/* Configure the neural network */
LayerIds ids;
prediction::TopologyPtr topology = configureNet(&ids);
/* Create prediction model of the neural network */
predictionModel = prediction::Model::create(*topology);
checkPtr(predictionModel.get());
/* Read 1st fully-connected layer weights and biases from CSV file */
/* 1st fully-connected layer weights are a 2D tensor of size 5 x 20 */
TensorPtr fc1Weights = readTensorFromCSV(fc1WeightsFile);
/* 1st fully-connected layer biases are a 1D tensor of size 5 */
TensorPtr fc1Biases = readTensorFromCSV(fc1BiasesFile);
/* Set weights and biases of the 1st fully-connected layer */
forward::Input *fc1Input = predictionModel->getLayer(ids.fc1)->getLayerInput();
fc1Input->set(forward::weights, fc1Weights);
fc1Input->set(forward::biases, fc1Biases);
/* Set flag that specifies that weights and biases of the 1st fully-connected layer are initialized */
predictionModel->getLayer(ids.fc1)->getLayerParameter()->weightsAndBiasesInitialized = true;
/* Read 2nd fully-connected layer weights and biases from CSV file */
/* 2nd fully-connected layer weights are a 2D tensor of size 2 x 5 */
TensorPtr fc2Weights = readTensorFromCSV(fc2WeightsFile);
/* 2nd fully-connected layer biases are a 1D tensor of size 2 */
TensorPtr fc2Biases = readTensorFromCSV(fc2BiasesFile);
/* Set weights and biases of the 2nd fully-connected layer */
forward::Input *fc2Input = predictionModel->getLayer(ids.fc2)->getLayerInput();
fc2Input->set(forward::weights, fc2Weights);
fc2Input->set(forward::biases, fc2Biases);
/* Set flag that specifies that weights and biases of the 2nd fully-connected layer are initialized */
predictionModel->getLayer(ids.fc2)->getLayerParameter()->weightsAndBiasesInitialized = true;
}
void testModel()
{
/* Create an algorithm to compute the neural network predictions */
prediction::Batch<> net;
/* Set parameters for the prediction neural network */
net.parameter.batchSize = predictionData->getDimensionSize(0);
/* Set input objects for the prediction neural network */
net.input.set(prediction::model, predictionModel);
net.input.set(prediction::data, predictionData);
/* Run the neural network prediction */
net.compute();
/* Print results of the neural network prediction */
predictionResult = net.getResult();
}
void printResults()
{
/* Read testing ground truth from a .csv file and create a tensor to store the data */
TensorPtr predictionGroundTruth = readTensorFromCSV(testGroundTruthFile);
printTensors<int, float>(predictionGroundTruth, predictionResult->get(prediction::prediction),
"Ground truth", "Neural network predictions: each class probability",
"Neural network classification results (first 20 observations):", 20);
}

For more complete information about compiler optimizations, see our Optimization Notice.