From cc5288ee5c99dcae91c381a793d2d64a5261449c Mon Sep 17 00:00:00 2001 From: Simon Pfreundschuh Date: Tue, 18 Oct 2016 11:05:16 +0200 Subject: [PATCH] Made Real_t default floating point type for CPU implementation and removed template parameter from TrainCpu(...). --- tmva/tmva/inc/TMVA/DNN/Architectures/Cpu.h | 2 +- tmva/tmva/inc/TMVA/MethodDNN.h | 1 - tmva/tmva/src/MethodDNN.cxx | 21 ++++++++++----------- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/tmva/tmva/inc/TMVA/DNN/Architectures/Cpu.h b/tmva/tmva/inc/TMVA/DNN/Architectures/Cpu.h index 6cce3b68f6668..096cece022a6d 100644 --- a/tmva/tmva/inc/TMVA/DNN/Architectures/Cpu.h +++ b/tmva/tmva/inc/TMVA/DNN/Architectures/Cpu.h @@ -33,7 +33,7 @@ namespace DNN * for this architecture as well as the remaining functions in the low-level * interface in the form of static members. */ -template +template class TCpu { public: diff --git a/tmva/tmva/inc/TMVA/MethodDNN.h b/tmva/tmva/inc/TMVA/MethodDNN.h index 970fe203be38f..a0240d811b2b5 100644 --- a/tmva/tmva/inc/TMVA/MethodDNN.h +++ b/tmva/tmva/inc/TMVA/MethodDNN.h @@ -153,7 +153,6 @@ class MethodDNN : public MethodBase TString tokenDelim); void Train(); void TrainGpu(); - template void TrainCpu(); virtual Double_t GetMvaValue( Double_t* err=0, Double_t* errUpper=0 ); diff --git a/tmva/tmva/src/MethodDNN.cxx b/tmva/tmva/src/MethodDNN.cxx index e23a3a269f6d1..0bf27ffc0703a 100644 --- a/tmva/tmva/src/MethodDNN.cxx +++ b/tmva/tmva/src/MethodDNN.cxx @@ -525,7 +525,7 @@ void TMVA::MethodDNN::Train() Log() << kFATAL << "OpenCL backend not yes supported." << Endl; return; } else if (fArchitectureString == "CPU") { - TrainCpu(); + TrainCpu(); if (!fExitFromTraining) fIPyMaxIter = fIPyCurrentIter; ExitFromTraining(); return; @@ -895,7 +895,6 @@ void TMVA::MethodDNN::TrainGpu() } //______________________________________________________________________________ -template void TMVA::MethodDNN::TrainCpu() { @@ -919,7 +918,7 @@ void TMVA::MethodDNN::TrainCpu() << fTrainingSettings.size() << ":" << Endl; trainingPhase++; - TNet> net(settings.batchSize, fNet); + TNet> net(settings.batchSize, fNet); net.SetWeightDecay(settings.weightDecay); net.SetRegularization(settings.regularization); // Need to convert dropoutprobabilities to conventions used @@ -933,7 +932,7 @@ void TMVA::MethodDNN::TrainCpu() net.InitializeGradients(); auto testNet = net.CreateClone(settings.batchSize); - using DataLoader_t = TDataLoader>; + using DataLoader_t = TDataLoader>; size_t nThreads = 1; DataLoader_t trainingData(GetEventCollection(Types::kTraining), @@ -946,12 +945,12 @@ void TMVA::MethodDNN::TrainCpu() testNet.GetBatchSize(), net.GetInputWidth(), net.GetOutputWidth(), nThreads); - DNN::TGradientDescent> minimizer(settings.learningRate, + DNN::TGradientDescent> minimizer(settings.learningRate, settings.convergenceSteps, settings.testInterval); - std::vector>> nets{}; - std::vector>> batches{}; + std::vector>> nets{}; + std::vector>> batches{}; nets.reserve(nThreads); for (size_t i = 0; i < nThreads; i++) { nets.push_back(net); @@ -959,9 +958,9 @@ void TMVA::MethodDNN::TrainCpu() { auto &masterLayer = net.GetLayer(j); auto &layer = nets.back().GetLayer(j); - TCpu::Copy(layer.GetWeights(), + TCpu<>::Copy(layer.GetWeights(), masterLayer.GetWeights()); - TCpu::Copy(layer.GetBiases(), + TCpu<>::Copy(layer.GetBiases(), masterLayer.GetBiases()); } } @@ -1004,7 +1003,7 @@ void TMVA::MethodDNN::TrainCpu() if ((stepCount % minimizer.GetTestInterval()) == 0) { // Compute test error. - AFloat testError = 0.0; + Double_t testError = 0.0; for (auto batch : testData) { auto inputMatrix = batch.GetInput(); auto outputMatrix = batch.GetOutput(); @@ -1015,7 +1014,7 @@ void TMVA::MethodDNN::TrainCpu() end = std::chrono::system_clock::now(); // Compute training error. - AFloat trainingError = 0.0; + Double_t trainingError = 0.0; for (auto batch : trainingData) { auto inputMatrix = batch.GetInput(); auto outputMatrix = batch.GetOutput();