Skip to content

Commit

Permalink
Convert FBCODE to use the Ruff Formatter
Browse files Browse the repository at this point in the history
Summary:
Converts the directory specified to use the Ruff formatter. This is the last big diff to convert all of Fbcode to Ruff.

pomsky_fix_bugs

drop-conflicts
bypass-github-export-checks
allow-large-files

Reviewed By: amyreese

Differential Revision: D66886610

fbshipit-source-id: 8276a7f6164efec189ca0b87e535543ed5bc3615
  • Loading branch information
Thomas Polasek authored and facebook-github-bot committed Dec 6, 2024
1 parent b731879 commit 0a23047
Show file tree
Hide file tree
Showing 7 changed files with 18 additions and 14 deletions.
2 changes: 1 addition & 1 deletion tests/test_manifest.py
Original file line number Diff line number Diff line change
Expand Up @@ -725,7 +725,7 @@ def test_replicated_entries_only_on_rank_0(rank: int) -> None:


def _update_local_manifest_with_merged_entries(
local_manifest: Dict[str, Entry]
local_manifest: Dict[str, Entry],
) -> None:
"""
Update the expected local manifest with manually merged ShardedTensorEntries
Expand Down
4 changes: 3 additions & 1 deletion torchsnapshot/asyncio_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@ def _run_once(self):
timeout = (
0
if ready or self._stopping
else min(max(scheduled[0]._when - now, 0), 86400) if scheduled else None
else min(max(scheduled[0]._when - now, 0), 86400)
if scheduled
else None
)
event_list = self._selector.select(timeout)
self._process_events(event_list)
Expand Down
16 changes: 10 additions & 6 deletions torchsnapshot/io_preparers/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def can_load_inplace(

@staticmethod
def empty_tensor_from_entry(
entry: Union[TensorEntry, ChunkedTensorEntry]
entry: Union[TensorEntry, ChunkedTensorEntry],
) -> torch.Tensor:
if entry.dtype in SUPPORTED_QUANTIZED_DTYPES:
# TODO: we can't allocate empty quantized tensors because we don't
Expand Down Expand Up @@ -394,11 +394,15 @@ def tensor_copy(dst: torch.Tensor, src: torch.Tensor) -> None:
# a region of the larger tensor's storage contain data that does not match
# the larger tensor's qscheme.

if src.is_quantized and (
not dst.is_quantized # Copying from quantized Tensor to non-quantized Tensor is not allowed
or dst.qscheme() != src.qscheme() # Quantized copy only works with same qscheme
or dst.dtype != src.dtype # Quantized copy requires matching dtypes
or (dst._is_view() and not _q_params_equal(dst, src)) # See the top comment
if (
src.is_quantized
and (
not dst.is_quantized # Copying from quantized Tensor to non-quantized Tensor is not allowed
or dst.qscheme()
!= src.qscheme() # Quantized copy only works with same qscheme
or dst.dtype != src.dtype # Quantized copy requires matching dtypes
or (dst._is_view() and not _q_params_equal(dst, src)) # See the top comment
)
):
# TODO: tile the dequantize -> copy to reduce memory footprint
src = _tensor_dequantize(src)
Expand Down
4 changes: 2 additions & 2 deletions torchsnapshot/manifest_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def _get_rank_to_manifest(metadata: SnapshotMetadata) -> List[Dict[str, Entry]]:


def _get_merged_sharded_tensor_entries(
rank_to_manifest: List[Dict[str, Entry]]
rank_to_manifest: List[Dict[str, Entry]],
) -> Dict[str, Entry]:
groups = defaultdict(list)
for manifest in rank_to_manifest:
Expand All @@ -130,7 +130,7 @@ def _get_merged_sharded_tensor_entries(


def _get_merged_dtensor_entries(
rank_to_manifest: List[Dict[str, Entry]]
rank_to_manifest: List[Dict[str, Entry]],
) -> Dict[str, Entry]:
"""
Merge all DTensor entries across ranks if sharded
Expand Down
2 changes: 1 addition & 1 deletion torchsnapshot/partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def partition_write_reqs(


def _consolidate_replicated_chunked_tensor_entries(
rank_to_entries: List[Dict[str, Entry]]
rank_to_entries: List[Dict[str, Entry]],
) -> List[Dict[str, Entry]]:
groups: Dict[str, List[ChunkedTensorEntry]] = defaultdict(list)

Expand Down
3 changes: 1 addition & 2 deletions torchsnapshot/serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,8 +245,7 @@ def contiguous_view_as_untyped_storage(tensor: torch.Tensor) -> UntypedStorage:
else:
untyped_storage = tensor.storage().untyped()
return untyped_storage[
tensor.storage_offset()
* tensor.element_size() : tensor.storage_offset()
tensor.storage_offset() * tensor.element_size() : tensor.storage_offset()
* tensor.element_size()
+ tensor.nelement() * tensor.element_size()
]
Expand Down
1 change: 0 additions & 1 deletion torchsnapshot/snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -863,7 +863,6 @@ def _coalesce_path_and_replicated(
app_state: AppState,
replicated: List[str],
) -> Tuple[str, Set[str]]:

rank = pg_wrapper.get_rank()

# coalesce path
Expand Down

0 comments on commit 0a23047

Please sign in to comment.