Skip to content
This repository has been archived by the owner on May 24, 2018. It is now read-only.

Commit

Permalink
Merge branch 'master' into HEAD
Browse files Browse the repository at this point in the history
  • Loading branch information
winstywang committed May 15, 2015
2 parents ed0573d + 0cb6ce1 commit fb03a11
Show file tree
Hide file tree
Showing 11 changed files with 50 additions and 58 deletions.
36 changes: 0 additions & 36 deletions doc/debug_perf.md

This file was deleted.

4 changes: 2 additions & 2 deletions example/ImageNet/kaiming.conf
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ iter = imginst
rand_crop=1
rand_mirror=1
min_crop_size=192
max_crop_size=224
max_aspect_ratio=0.3
max_crop_size=256
max_random_contrast=0.5
iter = threadbuffer
iter = end

Expand Down
4 changes: 2 additions & 2 deletions example/MNIST/MNIST_CONV.conf
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ layer[0->1] = conv:cv1
nchannel = 32
random_type = xavier
no_bias=0
layer[1->2] = max_pooling
layer[1->2] = max_pooling:pool1
kernel_size = 3
stride = 2
layer[2->3] = flatten
Expand All @@ -45,7 +45,7 @@ batch_size = 100

## global parameters
dev = gpu
save_model = 15
save_model = 1
max_round = 15
num_round = 15
train_eval = 1
Expand Down
1 change: 1 addition & 0 deletions src/cxxnet_main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -514,6 +514,7 @@ class CXXNetLearnTask {
CHECK(fi->Read(&net_type, sizeof(int)) != 0) << " invalid model file";
net_trainer = this->CreateNet();
net_trainer->CopyModelFrom(*fi);
start_counter = 1;
delete fi;
}
private:
Expand Down
9 changes: 5 additions & 4 deletions src/layer/cudnn_pooling_layer-inl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,18 @@
namespace cxxnet {
namespace layer {

template<typename Reducer, int mode, typename xpu>
template<typename Reducer, int mode, typename xpu, bool is_saved_param>
class CuDNNPoolingLayer : public PoolingLayer<Reducer, mode, xpu> {
public:
CuDNNPoolingLayer(){}
};

#ifdef __CUDACC__
template<typename Reducer, int mode>
class CuDNNPoolingLayer<Reducer, mode, gpu> : public PoolingLayer<Reducer, mode, gpu> {
template<typename Reducer, int mode, bool is_saved_param>
class CuDNNPoolingLayer<Reducer, mode, gpu, is_saved_param>
: public PoolingLayer<Reducer, mode, gpu, is_saved_param> {
private:
typedef PoolingLayer<Reducer, mode, gpu> Parent;
typedef PoolingLayer<Reducer, mode, gpu, is_saved_param> Parent;
public:
CuDNNPoolingLayer(){}
#if CXXNET_USE_CUDNN == 1
Expand Down
12 changes: 9 additions & 3 deletions src/layer/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,9 @@ const int kChConcat = 28;
const int kPRelu = 29;
const int kBatchNorm = 30;
const int kFixConnect = 31;
const int kNewMaxPooling = 32;
const int kNewSumPooling = 33;
const int kNewAvgPooling = 34;
/*! \brief gap used to encode pairtest layer */
const int kPairTestGap = 1024;
/*! \brief use integer to encode layer types */
Expand All @@ -334,9 +337,12 @@ inline LayerType GetLayerType(const char *type) {
if (!strcmp(type, "dropout")) return kDropout;
if (!strcmp(type, "conv")) return kConv;
if (!strcmp(type, "relu_max_pooling")) return kReluMaxPooling;
if (!strcmp(type, "max_pooling")) return kMaxPooling;
if (!strcmp(type, "sum_pooling")) return kSumPooling;
if (!strcmp(type, "avg_pooling")) return kAvgPooling;
if (!strcmp(type, "max_pooling")) return kNewMaxPooling;
if (!strcmp(type, "sum_pooling")) return kNewSumPooling;
if (!strcmp(type, "avg_pooling")) return kNewAvgPooling;
if (!strcmp(type, "max_pooling_old")) return kMaxPooling;
if (!strcmp(type, "sum_pooling_old")) return kSumPooling;
if (!strcmp(type, "avg_pooling_old")) return kAvgPooling;
if (!strcmp(type, "lrn")) return kLRN;
if (!strcmp(type, "concat")) return kConcat;
if (!strcmp(type, "xelu")) return kXelu;
Expand Down
11 changes: 7 additions & 4 deletions src/layer/layer_impl-inl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,13 @@ ILayer<xpu>* CreateLayer_(LayerType type,
case kLRN: return new LRNLayer<xpu>();
case kFlatten: return new FlattenLayer<xpu>();
case kReluMaxPooling: return
new PoolingLayer<mshadow::red::maximum, false, xpu, false, op::relu, op::relu_grad>();
case kMaxPooling: return new CuDNNPoolingLayer<mshadow::red::maximum, kMaxPooling, xpu>();
case kSumPooling: return new PoolingLayer<mshadow::red::sum, kSumPooling, xpu>();
case kAvgPooling: return new CuDNNPoolingLayer<mshadow::red::sum, kAvgPooling, xpu>();
new PoolingLayer<mshadow::red::maximum, false, xpu, false, false, op::relu, op::relu_grad>();
case kMaxPooling: return new CuDNNPoolingLayer<mshadow::red::maximum, kMaxPooling, xpu, false>();
case kSumPooling: return new PoolingLayer<mshadow::red::sum, kSumPooling, xpu, false>();
case kAvgPooling: return new CuDNNPoolingLayer<mshadow::red::sum, kAvgPooling, xpu, false>();
case kNewMaxPooling: return new CuDNNPoolingLayer<mshadow::red::maximum, kMaxPooling, xpu, true>();
case kNewSumPooling: return new PoolingLayer<mshadow::red::sum, kSumPooling, xpu, true>();
case kNewAvgPooling: return new CuDNNPoolingLayer<mshadow::red::sum, kAvgPooling, xpu, true>();
case kSoftmax: return new SoftmaxLayer<xpu>(label_info);
case kConcat: return new ConcatLayer<xpu, 3>();
case kChConcat: return new ConcatLayer<xpu, 1>();
Expand Down
2 changes: 1 addition & 1 deletion src/layer/param.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ struct LayerParam {
num_input_channel = 0;
num_input_node = 0;
// 64 MB
temp_col_max = 64<<18;
temp_col_max = 64 << 18;
memset(reserved, 0, sizeof(reserved));
}
/*!
Expand Down
12 changes: 12 additions & 0 deletions src/layer/pooling_layer-inl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ namespace layer {
template<typename Reducer,
int mode,
typename xpu,
bool is_saved_param = true,
bool is_identity = true,
typename ForwardOp = op::identity,
typename BackOp = op::identity_grad>
Expand All @@ -20,6 +21,17 @@ class PoolingLayer : public ILayer<xpu> {
virtual void SetParam(const char *name, const char* val) {
param_.SetParam(name, val);
}
virtual void SaveModel(utils::IStream &fo) const {
if (is_saved_param) {
fo.Write(&param_, sizeof(LayerParam));
}
}
virtual void LoadModel(utils::IStream &fi) {
if (is_saved_param) {
utils::Check(fi.Read(&param_, sizeof(LayerParam)) != 0,
"PoolingLayer:LoadModel invalid model file");
}
}
virtual void InitConnection(const std::vector<Node<xpu>*> &nodes_in,
const std::vector<Node<xpu>*> &nodes_out,
ConnectState<xpu> *p_cstate) {
Expand Down
14 changes: 10 additions & 4 deletions src/nnet/nnet_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -261,10 +261,16 @@ struct NetConfig {
layers.push_back(info);
layercfg.resize(layers.size());
} else {
utils::Check(cfg_layer_index < static_cast<int>(layers.size()),
"config layer index exceed bound");
utils::Check(info == layers[cfg_layer_index],
"config setting does not match existing network structure");
CHECK(cfg_layer_index < static_cast<int>(layers.size())) <<
"config layer index exceed bound";
bool is_same_layer = (info == layers[cfg_layer_index]);
if (!is_same_layer && info.type >= 32 && info.type <= 34 &&
layers[cfg_layer_index].type >= 11 && layers[cfg_layer_index].type <= 13) {
LOG(ERROR) << "Do you use deprecated model contains pooling layer? If yes, please check https://github.com/dmlc/cxxnet/issues/147 for details.";
}
CHECK(is_same_layer) <<
"config setting does not match existing network structure in layer " <<
info.name;
}
if (info.nindex_out.size() == 1) {
cfg_top_node = info.nindex_out[0];
Expand Down
3 changes: 1 addition & 2 deletions src/nnet/nnet_impl-inl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,7 @@ class CXXNetThreadTrainer : public INetTrainer {
NetConfig old_cfg;
old_cfg.LoadNet(fi);
fi.Read(&epoch_counter, sizeof(epoch_counter));
epoch_counter = 0;
NeuralNet<cpu> old_net(old_cfg, 0, 0, NULL);
NeuralNet<cpu> old_net(old_cfg, 1, 0, NULL);
std::string old_model;
fi.Read(&old_model);
utils::MemoryBufferStream os(&old_model);
Expand Down

0 comments on commit fb03a11

Please sign in to comment.