-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpreset.py
229 lines (187 loc) · 9.5 KB
/
preset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
# -*- coding: utf-8 -*-
import numpy as np
from graph import Graph
from optimizers import *
from functions import *
class Preset():
# 比の違う2次関数
def preset_0(self):
# hyper parameter
learning_rate = 0.1
momentum = 0.9
# グラフと範囲 基本にはグラフの探索範囲に合わせると良い。
x0 = np.arange(-20.0, 20.0, 0.25)
x1 = np.arange(-20.0, 20.0, 0.25)
f = SimpleFunction1()
G = Graph(x0, x1, f())
# 初期値
#init_pos = 10*np.random.random(2)-5
init_pos = np.array([-8.0,-0.0001])
# 最適化er
gd_opt = GDOptimizer(G.f, init_pos, learning_rate, "GD", "red")
#sgd_opt = SGDOptimizer_(G.f, init_pos, learning_rate, name="SGD", color="firebrick")
mom_opt = MomentumOptimizer(G.f, init_pos, learning_rate, momentum=momentum, name="Mom", color="green")
nag_opt = NAGOptimizer(G.f, init_pos, learning_rate, momentum, "NAG", "lime")
ada_grad_opt = AdaGradOptimizer(G.f, init_pos, learning_rate, eps=1e-4, name="AdaGrad", color="yellow")
rmsp_opt = RMSpropOptimizer(G.f, init_pos, learning_rate, alpha=0.9, name="RMSprop", color="blue")
rmsp_mom_opt = RMSpropMomentumOptimizer(G.f, init_pos, learning_rate, alpha=0.2, momentum=momentum, name="RMSprop+Momentum", color="cyan")
ada_del_opt = AdaDeltaOptimizer(G.f, init_pos, gamma=0.9, eps=1e-3, name="AdaDelta", color="purple")
adam_opt = AdamOptimizer(G.f, init_pos, alpha=0.2, beta_1=0.8, beta_2=0.9, eps=1e-7, name="Adam", color="deeppink")
return [gd_opt, mom_opt, nag_opt, ada_grad_opt, rmsp_opt, rmsp_mom_opt, ada_del_opt, adam_opt], G
# 比の違う2次関数
def preset_1(self):
# hyper parameter
learning_rate = 0.1
momentum = 0.9
# グラフと範囲 基本にはグラフの探索範囲に合わせると良い。
x0 = np.arange(-10.0, 10.0, 0.25)
x1 = np.arange(-10.0, 10.0, 0.25)
G = Graph(x0, x1)
# 初期値
#init_pos = 10*np.random.random(2)-5
init_pos = np.array([-8.0,-4.0])
# 最適化er
gd_opt = GDOptimizer(G.f, init_pos, learning_rate, "GD", "red")
#sgd_opt = SGDOptimizer_(G.f, init_pos, learning_rate, name="SGD", color="firebrick")
mom_opt = MomentumOptimizer(G.f, init_pos, learning_rate, momentum=momentum, name="Mom", color="green")
nag_opt = NAGOptimizer(G.f, init_pos, learning_rate, momentum, "NAG", "lime")
ada_grad_opt = AdaGradOptimizer(G.f, init_pos, learning_rate, eps=1e-7, name="AdaGrad", color="yellow")
rmsp_opt = RMSpropOptimizer(G.f, init_pos, learning_rate, alpha=0.9, name="RMSprop", color="blue")
rmsp_mom_opt = RMSpropMomentumOptimizer(G.f, init_pos, learning_rate, alpha=0.5, momentum=momentum, name="RMSprop+Momentum", color="cyan")
ada_del_opt = AdaDeltaOptimizer(G.f, init_pos, gamma=0.9, eps=1e-2, name="AdaDelta", color="purple")
adam_opt = AdamOptimizer(G.f, init_pos, alpha=0.2, beta_1=0.8, beta_2=0.9, eps=1e-7, name="Adam", color="deeppink")
return [gd_opt, mom_opt, nag_opt, ada_grad_opt, rmsp_opt, rmsp_mom_opt, ada_del_opt, adam_opt], G
def preset_2(self):
learning_rate = 2.0 #five-wellでちょうどいい感じ
momentum = 0.7
x0 = np.arange(-20.0, 20.0, 0.25)
x1 = np.arange(-20.0, 20.0, 0.25)
f = FiveWellPotentialFunction_mod()
G = Graph(x0, x1, f())
# 初期値
init_pos = np.array([0.0, -1.0])
# 最適化er
gd_opt = GDOptimizer(G.f, init_pos, learning_rate, "GD", "red")
#sgd_opt = SGDOptimizer_(G.f, init_pos, learning_rate,
# noize_vec_mul=10.0, noize_vec_negbias=0.5, noize_const_mul=2.0, noize_const_negbias=1.0, name="SGD", color="firebrick")
mom_opt = MomentumOptimizer(G.f, init_pos, learning_rate, momentum=momentum, name="Mom", color="green")
nag_opt = NAGOptimizer(G.f, init_pos, learning_rate, momentum, "NAG", "lime")
ada_grad_opt = AdaGradOptimizer(G.f, init_pos, learning_rate, eps=1e-7, name="AdaGrad", color="yellow")
rmsp_opt = RMSpropOptimizer(G.f, init_pos, learning_rate, alpha=0.8, name="RMSprop", color="blue")
rmsp_mom_opt = RMSpropMomentumOptimizer(G.f, init_pos, learning_rate, alpha=0.2, momentum=momentum, name="RMSprop+Momentum", color="cyan")
ada_del_opt = AdaDeltaOptimizer(G.f, init_pos, gamma=0.9, eps=1e-2, name="AdaDelta", color="purple")
adam_opt = AdamOptimizer(G.f, init_pos, alpha=0.2, beta_1=0.8, beta_2=0.9, eps=1e-7, name="Adam", color="deeppink")
return [gd_opt, mom_opt, nag_opt, ada_grad_opt, rmsp_opt, rmsp_mom_opt, ada_del_opt, adam_opt], G
# Booth関数の上でいろいろ
def preset_3(self):
learning_rate = 0.05
momentum = 0.7
x0 = np.arange(-5.0, 5.0, 0.25)
x1 = np.arange(-5.0, 5.0, 0.25)
f = BoothFunction()
G = Graph(x0, x1, f())
# 初期値
init_pos = np.array([-2.0,-4.0])
# 最適化er
gd_opt = GDOptimizer(G.f, init_pos, learning_rate, "GD", "red")
mom_opt = MomentumOptimizer(G.f, init_pos, learning_rate, momentum=momentum, name="Mom", color="green")
nag_opt = NAGOptimizer(G.f, init_pos, learning_rate, momentum, "NAG", "lime")
ada_grad_opt = AdaGradOptimizer(G.f, init_pos, learning_rate, eps=1e-7, name="AdaGrad", color="yellow")
rmsp_opt = RMSpropOptimizer(G.f, init_pos, learning_rate, alpha=0.9, name="RMSprop", color="blue")
rmsp_mom_opt = RMSpropMomentumOptimizer(G.f, init_pos, learning_rate, alpha=0.2, momentum=momentum, name="RMSprop+Momentum", color="cyan")
ada_del_opt = AdaDeltaOptimizer(G.f, init_pos, gamma=0.9, eps=1e-2, name="AdaDelta", color="purple")
adam_opt = AdamOptimizer(G.f, init_pos, alpha=0.2, beta_1=0.8, beta_2=0.9, eps=1e-7, name="Adam", color="deeppink")
return [gd_opt, mom_opt, nag_opt, ada_grad_opt, rmsp_opt, rmsp_mom_opt, ada_del_opt, adam_opt], G
# 確率的(ノイズというなの乱数入れてるだけの実装)な勾配法のイメージと比較
def preset_4(self):
learning_rate = 3.0
momentum = 0.7
np.random.seed(7)
x0 = np.arange(-20.0, 20.0, 0.25)
x1 = np.arange(-20.0, 20.0, 0.25)
f = FiveWellPotentialFunction_mod()
G = Graph(x0, x1, f())
# 初期値
init_pos = np.array([-4.0,-4.0])
# 最適化er
gd_opt = GDOptimizer(G.f, init_pos, learning_rate, "GD", "red")
mom_opt = MomentumOptimizer(G.f, init_pos, learning_rate, momentum=momentum, name="Mom", color="green")
sgd_opt = SGDOptimizer(G.f, init_pos, learning_rate, noize_vec_mul=10.0, noize_vec_negbias=5.0, name="SGD", color="blue")
"""
sgd_opt = SGDOptimizer_(G.f, init_pos, learning_rate,
noize_vec_mul=10.0, noize_vec_negbias=0.5, noize_const_mul=2.0, noize_const_negbias=1.0, name="SGD", color="blue")
"""
return [gd_opt, sgd_opt, mom_opt], G
# MomentumとNAGの比較用。
def preset_5(self):
learning_rate = 0.02
momentum = 0.9
x0 = np.arange(-10.0, 10.0, 0.25)
x1 = np.arange(-10.0, 10.0, 0.25)
G = Graph(x0, x1)
# 初期値
init_pos = np.array([-9.0,-7.0])
# 最適化er
mom_opt = MomentumOptimizer(G.f, init_pos, learning_rate, momentum=momentum, name="Mom", color="green")
nag_opt = NAGOptimizer(G.f, init_pos, learning_rate, momentum, name="NAG", color="lime")
return [mom_opt, nag_opt], G
# RMSpropとRMSporp+Momentum
def preset_6(self):
learning_rate = 0.02
momentum = 0.9
x0 = np.arange(-10.0, 10.0, 0.25)
x1 = np.arange(-10.0, 10.0, 0.25)
G = Graph(x0, x1)
# 初期値
init_pos = np.array([-9.0,-7.0])
# 最適化er
rmsp_opt = RMSpropOptimizer(G.f, init_pos, learning_rate, name="RMSprop", color="blue")
rmsp_mom_opt = RMSpropMomentumOptimizer(G.f, init_pos, learning_rate, momentum=momentum, name="RMSprop+Momentum", color="cyan")
return [rmsp_opt, rmsp_mom_opt], G
# Ada系
def preset_7(self):
learning_rate = 0.001
momentum = 0.9
x0 = np.arange(-10.0, 10.0, 0.25)
x1 = np.arange(-10.0, 10.0, 0.25)
G = Graph(x0, x1)
# 初期値
init_pos = np.array([-9.0,-7.0])
# 最適化er
ada_grad_opt = AdaGradOptimizer(G.f, init_pos, learning_rate, eps=1e-7, name="AdaGrad", color="yellow")
ada_del_opt = AdaDeltaOptimizer(G.f, init_pos, gamma=0.9, eps=1e-2, name="AdaDelta", color="purple")
adam_opt = AdamOptimizer(G.f, init_pos, alpha=0.2, beta_1=0.8, beta_2=0.9, eps=1e-7, name="Adam", color="deeppink")
smorms_opt = SMORMS3Optimizer(G.f, init_pos, learning_rate, name="SMORMS3", color="orange")
return [ada_grad_opt, ada_del_opt, adam_opt, smorms_opt], G
# Ada系2
def preset_8(self):
learning_rate = 0.001
momentum = 0.9
x0 = np.arange(-5.0, 5.0, 0.25)
x1 = np.arange(-5.0, 5.0, 0.25)
f = BoothFunction()
G = Graph(x0, x1, f())
# 初期値
init_pos = np.array([-2.0,-4.0])
# 最適化er
ada_grad_opt = AdaGradOptimizer(G.f, init_pos, learning_rate, eps=1e-7, name="AdaGrad", color="yellow")
ada_del_opt = AdaDeltaOptimizer(G.f, init_pos, gamma=0.9, eps=1e-2, name="AdaDelta", color="purple")
adam_opt = AdamOptimizer(G.f, init_pos, alpha=0.2, beta_1=0.8, beta_2=0.9, eps=1e-7, name="Adam", color="deeppink")
smorms_opt = SMORMS3Optimizer(G.f, init_pos, learning_rate, name="SMORMS3", color="orange")
return [ada_grad_opt, ada_del_opt, adam_opt, smorms_opt], G
# Ada系3
def preset_9(self):
learning_rate = 0.001
momentum = 0.9
x0 = np.arange(-20.0, 20.0, 0.25)
x1 = np.arange(-20.0, 20.0, 0.25)
f = FiveWellPotentialFunction_mod()
G = Graph(x0, x1, f())
# 初期値
init_pos = np.array([0.0, -1.0])
# 最適化er
ada_grad_opt = AdaGradOptimizer(G.f, init_pos, learning_rate, eps=1e-7, name="AdaGrad", color="yellow")
ada_del_opt = AdaDeltaOptimizer(G.f, init_pos, gamma=0.9, eps=1e-2, name="AdaDelta", color="purple")
adam_opt = AdamOptimizer(G.f, init_pos, alpha=0.2, beta_1=0.8, beta_2=0.9, eps=1e-7, name="Adam", color="deeppink")
smorms_opt = SMORMS3Optimizer(G.f, init_pos, learning_rate, name="SMORMS3", color="orange")
return [ada_grad_opt, ada_del_opt, adam_opt, smorms_opt], G