Skip to content

Commit

Permalink
a
Browse files Browse the repository at this point in the history
  • Loading branch information
AlexanderDokuchaev committed Apr 18, 2024
1 parent 678a914 commit 8469479
Show file tree
Hide file tree
Showing 17 changed files with 66 additions and 33 deletions.
9 changes: 7 additions & 2 deletions .github/workflows/precommit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ jobs:
defaults:
run:
shell: bash
runs-on: ubuntu-20.04
runs-on: ubuntu-20.04-4-cores
steps:
- uses: actions/checkout@v3
with:
Expand Down Expand Up @@ -125,6 +125,8 @@ jobs:
- name: Runner info
continue-on-error: true
run: |
export PATH=/usr/local/cuda-12.1/bin${PATH:+:${PATH}}
export LD_LIBRARY_PATH=/usr/local/cuda-12.1/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
nvidia-smi
cat /proc/cpuinfo
nvcc --version
Expand All @@ -140,4 +142,7 @@ jobs:
run: |
python -c "import torch; print(torch.cuda.is_available())"
- name: Run PyTorch precommit test scope
run: make test-torch-cuda
run: |
export PATH=/usr/local/cuda-12.1/bin${PATH:+:${PATH}}
export LD_LIBRARY_PATH=/usr/local/cuda-12.1/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
make test-torch-cuda
6 changes: 3 additions & 3 deletions tests/torch/pruning/test_tensor_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@
from nncf.torch.tensor import PTNNCFTensor


@pytest.mark.parametrize("device", (torch.device("cpu"), torch.device("cuda")))
def test_ones(device):
if not torch.cuda.is_available() and device == torch.device("cuda"):
def test_ones(use_cuda):
if use_cuda and not torch.cuda.is_available():
pytest.skip("There are no available CUDA devices")
device = torch.device("cuda" if use_cuda else "cpu")
shape = [1, 3, 10, 100]
tensor = PTNNCFPruningTensorProcessor.ones(shape, device)
assert torch.is_tensor(tensor.tensor)
Expand Down
1 change: 1 addition & 0 deletions tests/torch/ptq/test_fast_bias_correction.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def check_bias(model: NNCFNetwork, ref_bias: list):
raise ValueError("Not found node with bias")


@pytest.mark.cuda
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skipping for CPU-only setups")
class TestTorchCudaFBCAlgorithm(TestTorchFBCAlgorithm):
@staticmethod
Expand Down
13 changes: 7 additions & 6 deletions tests/torch/ptq/test_reducers_and_aggregators.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ def all_close(self, val: torch.Tensor, ref) -> bool:
return super().all_close(val, ref)


@pytest.mark.cuda
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Cuda is not available in current environment")
class TestCudaReducersAggregators(BaseTestReducersAggregators):
def get_nncf_tensor(self, x: np.array, dtype: Optional[Dtype] = None):
Expand All @@ -97,11 +98,11 @@ def all_close(self, val: torch.Tensor, ref) -> bool:
return super().all_close(val, ref)


@pytest.mark.parametrize("device", ["cuda", "cpu"])
@pytest.mark.parametrize("size,ref", [(16_000_000, 1_600_000.8750), (17_000_000, 1_700_000.7500)])
def test_quantile_percentile_function(device, size, ref):
if not torch.cuda.is_available() and device == "cuda":
def test_quantile_percentile_function(use_cuda, size, ref):
if use_cuda and not torch.cuda.is_available():
pytest.skip("Cuda is not available in current environment")
device = "cuda" if use_cuda else "cpu"
tensor = PTNNCFTensor(torch.arange(1, size, 1).float().to(device))
res_quantile = PTNNCFCollectorTensorProcessor.quantile(tensor, [0.1], axis=0)
res_percentile = PTNNCFCollectorTensorProcessor.percentile(tensor, [10], axis=0)
Expand All @@ -111,11 +112,11 @@ def test_quantile_percentile_function(device, size, ref):
assert tensor.is_cuda == (device == "cuda")


@pytest.mark.parametrize("device", ["cuda", "cpu"])
@pytest.mark.parametrize("size,ref", [(16_000_000, 8_000_000), (17_000_000, 8_500_000)])
def test_median_function(device, size, ref):
if not torch.cuda.is_available() and device == "cuda":
def test_median_function(use_cuda, size, ref):
if use_cuda and not torch.cuda.is_available():
pytest.skip("Cuda is not available in current environment")
device = "cuda" if use_cuda else "cpu"
tensor = PTNNCFTensor(torch.arange(1, size, 1).float().to(device))
res = PTNNCFCollectorTensorProcessor.median(tensor, axis=0)
assert res.tensor == ref
Expand Down
7 changes: 3 additions & 4 deletions tests/torch/ptq/test_weights_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,12 +243,11 @@ def test_get_dtype_attribute_of_parameter():
assert compressed_model.weight.dtype == torch.uint8


@pytest.mark.parametrize("device", ("cpu", "cuda"))
@pytest.mark.parametrize("dtype", ("float16", "float32"))
def test_model_devices_and_precisions(device, dtype):
if device == "cuda" and not torch.cuda.is_available():
def test_model_devices_and_precisions(use_cuda, dtype):
if use_cuda and not torch.cuda.is_available():
pytest.skip("Skipping for CPU-only setups")
device = torch.device(device)
device = torch.device("cuda" if use_cuda else "cpu")
dtype = torch.float16 if dtype == "float16" else torch.float32

model = MatMulModel().to(device)
Expand Down
10 changes: 6 additions & 4 deletions tests/torch/quantization/test_algo_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,7 @@ def activation_quantizers_dumping_worker(current_gpu, config, tmp_path):
f.writelines("%s\n" % str(aq_id))


@pytest.mark.cuda
def test_activation_quantizers_order_is_the_same__for_resnet50(tmp_path, runs_subprocess_in_precommit):
if not torch.cuda.is_available():
pytest.skip("Skipping CUDA test cases for CPU only setups")
Expand Down Expand Up @@ -803,7 +804,8 @@ def test_internal_autocast_model(self, initializing_config: NNCFConfig):
compressed_model(inputs)

@pytest.mark.parametrize(
"device", [pytest.param("cuda"), pytest.param("cpu", marks=pytest.mark.skip(reason="CVS-86697"))]
"device",
[pytest.param("cuda", marks=pytest.mark.cuda), pytest.param("cpu", marks=pytest.mark.skip(reason="CVS-86697"))],
)
def test_manual_partial_half_precision_model(self, initializing_config: NNCFConfig, device: str):
model = TestHalfPrecisionModels.ModelWithManualPartialHalfPrecision()
Expand All @@ -821,11 +823,10 @@ def test_manual_partial_half_precision_model(self, initializing_config: NNCFConf
# Should complete successfully, including init.
compressed_model(inputs)

@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_external_autocast(self, initializing_config: NNCFConfig, device: str):
def test_external_autocast(self, initializing_config: NNCFConfig, use_cuda):
model = TestHalfPrecisionModels.RegularModel()
inputs = torch.ones([1, 1, 1, 1])
if device == "cuda":
if use_cuda:
if not torch.cuda.is_available():
pytest.skip("CUDA not available")
inputs = inputs.cuda()
Expand Down Expand Up @@ -941,6 +942,7 @@ def test_can_quantize_user_module_with_addmm():
create_compressed_model_and_algo_for_test(ModelWithUserModule(), nncf_config)


@pytest.mark.cuda
def test_works_when_wrapped_with_dataparallel():
if not torch.cuda.is_available():
pytest.xfail("The executing host must have > 1 CUDA GPU in order for this test to be relevant.")
Expand Down
1 change: 1 addition & 0 deletions tests/torch/quantization/test_autoq_precision_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ def __str__(self):
)


@pytest.mark.cuda
@pytest.mark.parametrize("params", AUTOQ_TEST_PARAMS, ids=[str(p) for p in AUTOQ_TEST_PARAMS])
def test_autoq_precision_init(_seed, dataset_dir, tmp_path, mocker, params):
config = params.config_builder.build()
Expand Down
6 changes: 3 additions & 3 deletions tests/torch/quantization/test_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -614,12 +614,12 @@ class TestParametrizedLong(BaseParametrized):
pass


@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_mapping_to_zero(quantization_mode, device):
def test_mapping_to_zero(use_cuda, quantization_mode):
torch.manual_seed(42)

if not torch.cuda.is_available() and device == "cuda":
if use_cuda and not torch.cuda.is_available():
pytest.skip("Skipping CUDA test cases for CPU only setups")
device = "cuda" if use_cuda else "cpu"
x_zero = torch.zeros([1]).to(torch.device(device))
levels = 256
eps = 1e-6
Expand Down
1 change: 1 addition & 0 deletions tests/torch/quantization/test_hawq_precision_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -614,6 +614,7 @@ def precision_init_dumping_worker(gpu, ngpus_per_node, config, tmp_path):
torch.save(act_bitwidth_per_scope, str(out_file_path))


@pytest.mark.cuda
def test_can_broadcast_initialized_precisions_in_distributed_mode(tmp_path, runs_subprocess_in_precommit):
if not torch.cuda.is_available():
pytest.skip("Skipping CUDA test cases for CPU only setups")
Expand Down
4 changes: 3 additions & 1 deletion tests/torch/sparsity/const/test_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,9 @@ def test_can_restore_binary_mask_on_magnitude_algo_resume():
PTTensorListComparator.check_equal(ref_mask_2, op.operand.binary_mask)


@pytest.mark.parametrize("use_data_parallel", [True, False], ids=["dataparallel", "regular"])
@pytest.mark.parametrize(
"use_data_parallel", [pytest.param(True, marks=pytest.mark.cuda), False], ids=["dataparallel", "regular"]
)
def test_can_restore_binary_mask_on_magnitude_quant_algo_resume(tmp_path, use_data_parallel):
config = get_empty_config()
config["compression"] = [
Expand Down
2 changes: 1 addition & 1 deletion tests/torch/sparsity/movement/test_components.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,6 @@ class TestFunctions:
],
)
@pytest.mark.parametrize("requires_grad", [True, False])
@pytest.mark.parametrize("use_cuda", [True, False])
def test_binary_mask_by_threshold(
self,
input_tensor: torch.Tensor,
Expand Down Expand Up @@ -385,6 +384,7 @@ def test_importance_loss_forward(self, desc, requires_grad: bool, use_cuda: bool
assert output.requires_grad is requires_grad
assert torch.allclose(output, torch.tensor(desc["ref_output"]))

@pytest.mark.gpu
def test_importance_loss_adapts_to_device_change(self):
if not torch.cuda.is_available():
pytest.skip("requires GPU")
Expand Down
1 change: 1 addition & 0 deletions tests/torch/test_algo_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,7 @@ def get_basic_rb_sparsity_int8_config():
]


@pytest.mark.cuda
@pytest.mark.parametrize(
"config",
comp_loss_configs,
Expand Down
5 changes: 4 additions & 1 deletion tests/torch/test_api_behavior.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,10 @@ def forward(self, x):
return self.model.forward(x)


@pytest.mark.parametrize("original_device", ["cpu", "cuda", "cuda:0"])
@pytest.mark.parametrize(
"original_device",
["cpu", pytest.param("cuda", marks=pytest.mark.cuda), pytest.param("cuda:0", marks=pytest.mark.cuda)],
)
def test_model_is_inited_with_own_device_by_default(nncf_config_with_default_init_args, original_device):
if not torch.cuda.is_available() and "cuda" in original_device:
pytest.skip("Skipping for CPU-only setups")
Expand Down
6 changes: 3 additions & 3 deletions tests/torch/test_graph_building.py
Original file line number Diff line number Diff line change
Expand Up @@ -374,10 +374,10 @@ def test_filler_input_info_arg_generation(filler_gen_test_struct: FillerInputInf
],
ids=["filler", "example", "loader"],
)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_input_infos_respect_device_setting(input_info: ModelInputInfo, device: str):
if device == "cuda" and not torch.cuda.is_available():
def test_input_infos_respect_device_setting(input_info: ModelInputInfo, use_cuda: bool):
if use_cuda and not torch.cuda.is_available():
pytest.skip("Skipped checking CUDA device test cases on CPU-only hosts")
device = "cuda" if use_cuda else "cpu"
forward_inputs = input_info.get_forward_inputs(device)

def assert_on_device(x: torch.Tensor):
Expand Down
20 changes: 18 additions & 2 deletions tests/torch/test_knowledge_distillation.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,15 @@ def get_sparsity_config_with_sparsity_init(config: NNCFConfig, sparsity_init=0.5
return config


@pytest.mark.parametrize("inference_type", ["cpu", "single_GPU", "DP", "DDP"])
@pytest.mark.parametrize(
"inference_type",
[
"cpu",
pytest.param("single_GPU", marks=pytest.mark.cuda),
pytest.param("DP", marks=pytest.mark.cuda),
pytest.param("DDP", marks=pytest.mark.cuda),
],
)
def test_knowledge_distillation_training_process(inference_type: str):
if not torch.cuda.is_available() and inference_type != "cpu":
pytest.skip("Skipping CUDA test cases for CPU only setups")
Expand Down Expand Up @@ -311,7 +319,15 @@ def test_kd_sparsity_statistics(algo: str):


@pytest.mark.parametrize("device_placing", ["before", "after"])
@pytest.mark.parametrize("inference_type", ["cpu", "single_GPU", "DP", "DDP"])
@pytest.mark.parametrize(
"inference_type",
[
"cpu",
pytest.param("single_GPU", marks=pytest.mark.cuda),
pytest.param("DP", marks=pytest.mark.cuda),
pytest.param("DDP", marks=pytest.mark.cuda),
],
)
def test_model_device_before_create_compressed_model(device_placing, inference_type):
if not torch.cuda.is_available() and inference_type != "cpu":
pytest.skip("Skipping CUDA test cases for CPU only setups")
Expand Down
6 changes: 3 additions & 3 deletions tests/torch/test_model_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def to(self, device):
self.to_device = device

@pytest.mark.parametrize("target_point", available_points)
@pytest.mark.parametrize("multidevice", (False, True))
@pytest.mark.parametrize("multidevice", (False, pytest.param(True, marks=pytest.mark.cuda)))
@pytest.mark.parametrize("hook", (lambda x: x, BaseOpWithParam(lambda x: x).cpu()))
def test_pt_insertion_command(self, target_point: PTTargetPoint, multidevice: bool, hook):
model = wrap_model(InsertionPointTestModel(), torch.ones([1, 1, 10, 10]))
Expand Down Expand Up @@ -696,7 +696,7 @@ def test_create_shared_quantizer_insertion_command():
"priority", [TransformationPriority.FP32_TENSOR_STATISTICS_OBSERVATION, TransformationPriority.DEFAULT_PRIORITY]
)
@pytest.mark.parametrize("compression_module_registered", [False, True])
@pytest.mark.parametrize("multidevice_model", (False, True))
@pytest.mark.parametrize("multidevice_model", (False, pytest.param(True, marks=pytest.mark.cuda)))
def test_shared_fn_insertion_point(
priority, compression_module_registered, compression_module_type, multidevice_model, mocker
):
Expand Down Expand Up @@ -786,7 +786,7 @@ def _insert_external_op_mocked():
"priority", [TransformationPriority.FP32_TENSOR_STATISTICS_OBSERVATION, TransformationPriority.DEFAULT_PRIORITY]
)
@pytest.mark.parametrize("compression_module_registered", [False, True])
@pytest.mark.parametrize("multidevice_model", (False, True))
@pytest.mark.parametrize("multidevice_model", (False, pytest.param(True, marks=pytest.mark.cuda)))
def test_shared_fn_insertion_command_several_module_types(
priority, compression_module_registered, multidevice_model, mocker
):
Expand Down
1 change: 1 addition & 0 deletions tests/torch/test_nncf_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -835,6 +835,7 @@ def forward(self, x, y):
return res


@pytest.mark.cuda
def test_multidevice_model():
if not torch.cuda.is_available():
pytest.skip("GPU required")
Expand Down

0 comments on commit 8469479

Please sign in to comment.