forked from Schoyen/fys-stk-lecture
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathone_dim_poisson.py
92 lines (69 loc) · 2.26 KB
/
one_dim_poisson.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import tensorflow as tf
import matplotlib.pyplot as plt
tf.keras.backend.set_floatx("float64") # by default it's 32
# Define analytic solution
def g_analytic(x):
return x * (1 - x) * tf.exp(x)
# Define grid
num_points = 11
start = tf.constant(0, dtype=tf.float64)
stop = tf.constant(1, dtype=tf.float64)
x = tf.reshape(
tf.linspace(start, stop, num_points),
(-1, 1)
)
# Define model
class DNModel(tf.keras.Model):
def __init__(self):
super(DNModel, self).__init__()
self.dense_1 = tf.keras.layers.Dense(20, activation=tf.nn.sigmoid)
self.dense_2 = tf.keras.layers.Dense(10, activation=tf.nn.sigmoid)
self.out = tf.keras.layers.Dense(1)
def call(self, inputs):
x = self.dense_1(inputs)
x = self.dense_2(x)
return self.out(x)
# Define right-hand side solution
@tf.function
def rhs(x):
return (3 * x + x ** 2) * tf.exp(x)
# Define trial solution
@tf.function
def trial_solution(model, x):
return x * (1 - x) * model(x)
# Define loss function
@tf.function
def loss(model, x):
with tf.GradientTape() as tape:
tape.watch(x) # watch x as symbolic variable
with tf.GradientTape() as tape_2:
tape_2.watch(x)
trial = trial_solution(model, x)
d_trial = tape_2.gradient(trial, x)
d2_trial = tape.gradient(d_trial, x)
return tf.losses.MSE(tf.zeros_like(d2_trial), -d2_trial - rhs(x))
# Define gradient method
@tf.function
def grad(model, x):
with tf.GradientTape() as tape:
loss_value = loss(model, x)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
# Initialize model and optimizer
model = DNModel()
optimizer = tf.keras.optimizers.Adam(0.01)
num_epochs = 2000
for epoch in range(num_epochs):
cost, gradients = grad(model, x)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
print(
f"Step:{optimizer.iterations.numpy()},"
+ f"Loss: {tf.reduce_mean(cost.numpy())}"
)
# Apply gradients in optimizer
# Output loss improvement
# Plot solution on larger grid
x = tf.reshape(tf.linspace(start, stop, 1001), (-1,1))
plt.plot(x, trial_solution(model, x), label="Neural")
plt.plot(x, g_analytic(x), label="Analytic")
plt.legend(loc="best")
plt.show()