Skip to content

Commit

Permalink
Update torchao to 0.4.0 and fix GPU quantization tutorial
Browse files Browse the repository at this point in the history
  • Loading branch information
svekars committed Sep 30, 2024
1 parent 01d2270 commit 0899f34
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion .ci/docker/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -68,5 +68,5 @@ iopath
pygame==2.6.0
pycocotools
semilearn==0.3.2
torchao==0.0.3
torchao==0.4.0
segment_anything==1.0
10 changes: 5 additions & 5 deletions prototype_source/gpu_quantization_torchao_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
#

import torch
from torchao.quantization import change_linear_weights_to_int8_dqtensors
from torchao.quantization.quant_api import quantize_, int8_dynamic_activation_int8_weight
from segment_anything import sam_model_registry
from torch.utils.benchmark import Timer

Expand Down Expand Up @@ -156,9 +156,9 @@ def get_sam_model(only_one_block=False, batchsize=1):
# in memory bound situations where the benefit comes from loading less
# weight data, rather than doing less computation. The torchao APIs:
#
# ``change_linear_weights_to_int8_dqtensors``,
# ``change_linear_weights_to_int8_woqtensors`` or
# ``change_linear_weights_to_int4_woqtensors``
# ``int8_dynamic_activation_int8_weight()``,
# ``int8_dynamic_activation_int8_semi_sparse_weight`` or
# ``int8_dynamic_activation_int4_weight``
#
# can be used to easily apply the desired quantization technique and then
# once the model is compiled with ``torch.compile`` with ``max-autotune``, quantization is
Expand All @@ -185,7 +185,7 @@ def get_sam_model(only_one_block=False, batchsize=1):
model, image = get_sam_model(only_one_block, batchsize)
model = model.to(torch.bfloat16)
image = image.to(torch.bfloat16)
change_linear_weights_to_int8_dqtensors(model)
quantize_(model, int8_dynamic_activation_int8_weight())
model_c = torch.compile(model, mode='max-autotune')
quant_res = benchmark(model_c, image)
print(f"bf16 compiled runtime of the quantized block is {quant_res['time']:0.2f}ms and peak memory {quant_res['memory']: 0.2f}GB")
Expand Down

0 comments on commit 0899f34

Please sign in to comment.