-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathdemo.py
75 lines (59 loc) · 2.64 KB
/
demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
""" An example script for using the robusta package.
"""
import torch
import torchvision
from torchvision import transforms
import robusta
# To make this demo run with 0 dependencies and run on any device
# (to show the library features) the demo doesn't use Cuda and the dataset has
# only 1 image.
# After you run it once, feel free to enable Cuda and change the dataset folder
def main():
model = torchvision.models.resnet50(pretrained=True)
# Dummy-ImageNetC dataset has only 1 image.
# Change this to evaluate on a full dataset.
batch_size = 1
dataset_folder = "test/dummy_datasets/ImageNet-C"
num_epochs = 1
# We provide implementations for ImageNet-val, ImageNetC, ImageNetR,
# ImageNetA and ImageNetD:
val_dataset = robusta.datasets.imagenetc.ImageNetC(
root=dataset_folder,
corruption="gaussian_blur",
severity=1,
transform=transforms.Compose([transforms.ToTensor()]),
)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=True
)
# We offer different options for batch norm adaptation;
# alternatives are "ema", "batch_wise_prior", ...
robusta.batchnorm.adapt(model, adapt_type="batch_wise")
# The accuracy metric can be specific to the dataset:
# For example, ImageNet-R requires remapping into 200 classes.
# accuracy_metric = val_dataset.accuracy
# You can also easily use self-learning in your model.
# Self-learning adaptation can be combined with batch norm adaptation, example:
parameters = robusta.selflearning.adapt(model, adapt_type="affine")
optimizer = torch.optim.SGD(parameters, lr=1e-3)
# You can choose from a set of adaptation losses (GCE, Entropy, ...)
rpl_loss = robusta.selflearning.GeneralizedCrossEntropy(q=0.8)
acc1_sum, acc5_sum, num_samples = 0.0, 0.0, 0.0
for epoch in range(num_epochs):
predictions = []
for images, targets in val_loader:
logits = model(images)
predictions = logits.argmax(dim=1)
# Predictions are optional. If you do not specify them,
# they will be computed within the loss function.
loss = rpl_loss(logits, predictions)
# When using self-learning, you need to add an additional optimizer
# step in your evaluation loop.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# acc1_sum, acc5_sum += accuracy_metric(predictions, targets, topk=(1,5))
num_samples += len(targets)
print(f"Top-1: {acc1_sum/num_samples}, Top-5: {acc5_sum/num_samples}")
if __name__ == "__main__":
main()