-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexperiment.py
190 lines (152 loc) · 8.46 KB
/
experiment.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
from __future__ import print_function
import argparse
import torch
import torch.optim as optim
from utils.optimizer import AdamNormGrad
import os
import numpy as np
import datetime
from utils.load_data import load_dataset
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# # # # # # # # # # #
# START EXPERIMENTS # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # #
# Training settings
parser = argparse.ArgumentParser(description='VAE+VampPrior')
# arguments for optimization
parser.add_argument('--batch_size', type=int, default=200, metavar='BStrain',
help='input batch size for training (default: 200)')
parser.add_argument('--test_batch_size', type=int, default=1000, metavar='BStest',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=400, metavar='E',
help='number of epochs to train (default: 400)')
parser.add_argument('--lr', type=float, default=0.0005, metavar='LR',
help='learning rate (default: 0.0005)')
parser.add_argument('--early_stopping_epochs', type=int, default=50, metavar='ES',
help='number of epochs for early stopping')
parser.add_argument('--warmup', type=int, default=100, metavar='WU',
help='number of epochs for warm-up')
parser.add_argument('--max_beta', type=float, default=1., metavar='B',
help='maximum value of beta for training')
# cuda
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
# random seed
parser.add_argument('--seed', type=int, default=14, metavar='S',
help='random seed (default: 14)')
# model: latent size, input_size, so on
parser.add_argument('--num_layers', type=int, default=1, metavar='NL',
help='number of layers')
parser.add_argument('--z1_size', type=int, default=200, metavar='M1',
help='latent size')
parser.add_argument('--z2_size', type=int, default=200, metavar='M2',
help='latent size')
parser.add_argument('--hidden_size', type=int, default=600 , metavar="H",
help='the width of hidden layers')
parser.add_argument('--input_size', type=int, default=[1, 28, 28], metavar='D',
help='input size')
parser.add_argument('--activation', type=str, default=None, metavar='ACT',
help='activation function')
parser.add_argument('--number_components', type=int, default=1000, metavar='NC',
help='number of pseudo-inputs')
parser.add_argument('--pseudoinputs_mean', type=float, default=0.05, metavar='PM',
help='mean for init pseudo-inputs')
parser.add_argument('--pseudoinputs_std', type=float, default=0.01, metavar='PS',
help='std for init pseudo-inputs')
parser.add_argument('--use_training_data_init', action='store_true', default=False,
help='initialize pseudo-inputs with randomly chosen training data')
# model: model name, prior
parser.add_argument('--model_name', type=str, default='baseline', metavar='MN',
help='model name: baseline, vamp, hvamp, hvamp1')
parser.add_argument('--input_type', type=str, default='binary', metavar='IT',
help='type of the input: binary, gray, continuous, multinomial')
parser.add_argument('--gated', action='store_true', default=False,
help='use gating mechanism')
# experiment
parser.add_argument('--S', type=int, default=5000, metavar='SLL',
help='number of samples used for approximating log-likelihood')
parser.add_argument('--MB', type=int, default=100, metavar='MBLL',
help='size of a mini-batch used for approximating log-likelihood')
# dataset
parser.add_argument('--dataset_name', type=str, default='ml20m', metavar='DN',
help='name of the dataset: ml20m, netflix, pinterest, hadden')
parser.add_argument('--dynamic_binarization', action='store_true', default=False,
help='allow dynamic binarization')
# note
parser.add_argument('--note', type=str, default="none", metavar='NT',
help='additional note on the experiment')
parser.add_argument('--no_log', action='store_true', default=False,
help='print log to log_dir')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {} #! Changed num_workers: 1->0 because of error
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
dataset_dir_index = ['module1','module12']
def run(dataset_idx,args, kwargs):
args.model_signature = str(datetime.datetime.now())[0:10]
#모델이름 저장
model_name = args.dataset_name + '_' + args.model_name + '_' + \
'(K_' + str(args.number_components) + ')' + \
'_' + args.input_type + '_beta(' + str(args.max_beta) + ')' + \
'_layers(' + str(args.num_layers) + ')' + '_hidden(' + str(args.hidden_size) + ')' + \
'_z1(' + str(args.z1_size) + ')' + '_z2(' + str(args.z2_size) + ')'
if args.note is not None:
print(args.note)
model_name = model_name + args.note + dataset_dir_index[(int)(dataset_idx/5)] + str(dataset_idx%5)
print(model_name)
args.note = dataset_dir_index[(int)(dataset_idx/5)] + '_' + str(dataset_idx%5)
# DIRECTORY FOR SAVINGF
#로그파일 및 모델 저장 경로
snapshots_path = 'snapshots/'
dir = snapshots_path + args.model_signature + '_' + model_name + '/'
if not os.path.exists(dir):
os.makedirs(dir)
# LOAD DATA=========================================================================================================
print('load data')
# loading data
# data load
train_loader, val_loader, test_loader, args = load_dataset(dataset_idx,args, **kwargs)
# CREATE MODEL======================================================================================================
print('create model')
# importing model
if args.model_name == 'baseline':
from models.Baseline import VAE
elif args.model_name == 'baseline2':
from models.Baseline2 import VAE
elif args.model_name == 'vamp':
from models.Vamp import VAE
elif args.model_name == 'hvamp':
from models.HVamp import VAE
elif args.model_name == 'hvamp1':
from models.HVamp_1layer import VAE
else:
raise Exception('Wrong name of the model!')
#hvamp model 초기화
model = VAE(args)
if args.cuda:
model.cuda()
optimizer = AdamNormGrad(model.parameters(), lr=args.lr)
# ======================================================================================================================
print(args)
log_dir = "vae_experiment_log_" + str(os.getenv("COMPUTERNAME")) +".txt"
open(log_dir, 'a').close()
# ======================================================================================================================
print('perform experiment')
from utils.perform_experiment import experiment_vae
experiment_vae(args, train_loader, val_loader, test_loader, model, optimizer, dir, log_dir, model_name = args.model_name)
# ======================================================================================================================
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == "__main__":
for i in range(0,10):
run(i,args, kwargs)
# # # # # # # # # # #
# END EXPERIMENTS # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # #