From db103d5b8bda9dd7794396964f6e0636692fef63 Mon Sep 17 00:00:00 2001 From: Hyeongmin Moon Date: Fri, 11 Feb 2022 11:36:13 +0900 Subject: [PATCH] fix log I found total_loss is accumulated during the whole epochs(100 epochs). fixed this so that log can print right loss per epoch. --- 08-AutoEncoder/conv_autoencoder.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/08-AutoEncoder/conv_autoencoder.py b/08-AutoEncoder/conv_autoencoder.py index 005f9c0..144f3be 100644 --- a/08-AutoEncoder/conv_autoencoder.py +++ b/08-AutoEncoder/conv_autoencoder.py @@ -64,8 +64,9 @@ def forward(self, x): criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5) -total_loss = 0 + for epoch in range(num_epochs): + total_loss = 0 for data in dataloader: img, _ = data img = Variable(img).cuda() @@ -76,8 +77,8 @@ def forward(self, x): optimizer.zero_grad() loss.backward() optimizer.step() + total_loss += loss.data # ===================log======================== - total_loss += loss.data print('epoch [{}/{}], loss:{:.4f}' .format(epoch+1, num_epochs, total_loss)) if epoch % 10 == 0: