diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 656fb790..381cb30d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,25 +6,9 @@ repos:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
- - repo: 'https://github.com/asottile/pyupgrade'
- rev: v3.15.0
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.4.8
hooks:
- - id: pyupgrade
- args:
- - '--py37-plus'
- - repo: 'https://github.com/PyCQA/isort'
- rev: 5.12.0
- hooks:
- - id: isort
- - repo: 'https://github.com/psf/black'
- rev: 23.11.0
- hooks:
- - id: black
- - repo: 'https://github.com/pycqa/flake8'
- rev: 6.1.0
- hooks:
- - id: flake8
- - repo: 'https://github.com/codespell-project/codespell'
- rev: v2.1.0
- hooks:
- - id: codespell
+ - id: ruff
+ args: [ --fix ]
+ - id: ruff-format
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 9dc027cd..34c911c1 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -35,10 +35,11 @@ ideal report includes:
Codestyle
---------
-This project uses flake8 to enforce codstyle requirements. We've codified this
-process using a tool called `pre-commit `__. pre-commit
-allows us to specify a config file with all tools required for code linting,
-and surfaces either a git commit hook, or single command, for enforcing these.
+This project uses `ruff `__ to enforce
+codstyle requirements. We've codified this process using a tool called
+`pre-commit `__. pre-commit allows us to specify a
+config file with all tools required for code linting, and surfaces either a
+git commit hook, or single command, for enforcing these.
To validate your PR prior to publishing, you can use the following
`installation guide `__ to setup pre-commit.
@@ -51,10 +52,7 @@ to automatically perform the codestyle validation:
$ pre-commit run
This will automatically perform simple updates (such as white space clean up)
-and provide a list of any failing flake8 checks. After these are addressed,
+and provide a list of any failing checks. After these are addressed,
you can commit the changes prior to publishing the PR.
-These checks are also included in our CI setup under the "Lint" workflow which will provide output on Github for anything missed locally.
-
-See the `flake8` section of the
-`setup.cfg `__ for the
-currently enforced rules.
+These checks are also included in our CI setup under the "Lint" workflow which
+will provide output on Github for anything missed locally.
diff --git a/pyproject.toml b/pyproject.toml
index 0c1026fe..35a5af32 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,12 +3,63 @@ markers = [
"slow: marks tests as slow",
]
-[tool.isort]
-profile = "black"
-line_length = 79
-honor_noqa = true
-src_paths = ["s3transfer", "tests"]
+[tool.ruff]
+exclude = [
+ ".bzr",
+ ".direnv",
+ ".eggs",
+ ".git",
+ ".git-rewrite",
+ ".hg",
+ ".ipynb_checkpoints",
+ ".mypy_cache",
+ ".nox",
+ ".pants.d",
+ ".pyenv",
+ ".pytest_cache",
+ ".pytype",
+ ".ruff_cache",
+ ".svn",
+ ".tox",
+ ".venv",
+ ".vscode",
+ "__pypackages__",
+ "_build",
+ "buck-out",
+ "build",
+ "dist",
+ "node_modules",
+ "site-packages",
+ "venv",
+]
-[tool.black]
+# Format same as Black.
line-length = 79
-skip_string_normalization = true
+indent-width = 4
+
+target-version = "py38"
+
+[tool.ruff.lint]
+# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
+# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or
+# McCabe complexity (`C901`) by default.
+select = ["E4", "E7", "E9", "F", "I", "UP"]
+ignore = []
+
+# Allow fix for all enabled rules (when `--fix`) is provided.
+fixable = ["ALL"]
+unfixable = []
+
+# Allow unused variables when underscore-prefixed.
+dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
+
+[tool.ruff.format]
+# Like Black, use double quotes for strings, spaces for indents
+# and trailing commas.
+quote-style = "preserve"
+indent-style = "space"
+skip-magic-trailing-comma = false
+line-ending = "auto"
+
+docstring-code-format = false
+docstring-code-line-length = "dynamic"
diff --git a/s3transfer/__init__.py b/s3transfer/__init__.py
index 4ada4a89..36626c12 100644
--- a/s3transfer/__init__.py
+++ b/s3transfer/__init__.py
@@ -123,6 +123,7 @@ def __call__(self, bytes_amount):
"""
+
import concurrent.futures
import functools
import logging
@@ -813,8 +814,8 @@ def _validate_all_known_args(self, actual, allowed):
for kwarg in actual:
if kwarg not in allowed:
raise ValueError(
- "Invalid extra_args key '%s', "
- "must be one of: %s" % (kwarg, ', '.join(allowed))
+ f"Invalid extra_args key '{kwarg}', "
+ f"must be one of: {', '.join(allowed)}"
)
def _ranged_download(
diff --git a/s3transfer/bandwidth.py b/s3transfer/bandwidth.py
index 9bac5885..9301c8e3 100644
--- a/s3transfer/bandwidth.py
+++ b/s3transfer/bandwidth.py
@@ -30,9 +30,7 @@ def __init__(self, requested_amt, retry_time):
"""
self.requested_amt = requested_amt
self.retry_time = retry_time
- msg = 'Request amount {} exceeded the amount available. Retry in {}'.format(
- requested_amt, retry_time
- )
+ msg = f'Request amount {requested_amt} exceeded the amount available. Retry in {retry_time}'
super().__init__(msg)
diff --git a/s3transfer/constants.py b/s3transfer/constants.py
index 570aa2ea..b07b1d47 100644
--- a/s3transfer/constants.py
+++ b/s3transfer/constants.py
@@ -26,5 +26,5 @@
'ExpectedBucketOwner',
]
-USER_AGENT = 's3transfer/%s' % s3transfer.__version__
-PROCESS_USER_AGENT = '%s processpool' % USER_AGENT
+USER_AGENT = f's3transfer/{s3transfer.__version__}'
+PROCESS_USER_AGENT = f'{USER_AGENT} processpool'
diff --git a/s3transfer/copies.py b/s3transfer/copies.py
index 77deca62..c2ae9ce0 100644
--- a/s3transfer/copies.py
+++ b/s3transfer/copies.py
@@ -280,7 +280,7 @@ def _get_head_object_request_from_copy_source(self, copy_source):
raise TypeError(
'Expecting dictionary formatted: '
'{"Bucket": bucket_name, "Key": key} '
- 'but got %s or type %s.' % (copy_source, type(copy_source))
+ f'but got {copy_source} or type {type(copy_source)}.'
)
def _extra_upload_part_args(self, extra_args):
diff --git a/s3transfer/download.py b/s3transfer/download.py
index dc8980d4..e1a9cf0e 100644
--- a/s3transfer/download.py
+++ b/s3transfer/download.py
@@ -307,9 +307,7 @@ def _get_download_output_manager_cls(self, transfer_future, osutil):
if download_manager_cls.is_compatible(fileobj, osutil):
return download_manager_cls
raise RuntimeError(
- 'Output {} of type: {} is not supported.'.format(
- fileobj, type(fileobj)
- )
+ f'Output {fileobj} of type: {type(fileobj)} is not supported.'
)
def _submit(
diff --git a/s3transfer/futures.py b/s3transfer/futures.py
index e8282916..68775d04 100644
--- a/s3transfer/futures.py
+++ b/s3transfer/futures.py
@@ -175,9 +175,7 @@ def __init__(self, transfer_id=None):
self._failure_cleanups_lock = threading.Lock()
def __repr__(self):
- return '{}(transfer_id={})'.format(
- self.__class__.__name__, self.transfer_id
- )
+ return f'{self.__class__.__name__}(transfer_id={self.transfer_id})'
@property
def exception(self):
@@ -295,8 +293,8 @@ def _transition_to_non_done_state(self, desired_state):
with self._lock:
if self.done():
raise RuntimeError(
- 'Unable to transition from done state %s to non-done '
- 'state %s.' % (self.status, desired_state)
+ f'Unable to transition from done state {self.status} to non-done '
+ f'state {desired_state}.'
)
self._status = desired_state
@@ -316,9 +314,7 @@ def submit(self, executor, task, tag=None):
:returns: A future representing the submitted task
"""
logger.debug(
- "Submitting task {} to executor {} for transfer request: {}.".format(
- task, executor, self.transfer_id
- )
+ f"Submitting task {task} to executor {executor} for transfer request: {self.transfer_id}."
)
future = executor.submit(task, tag=tag)
# Add this created future to the list of associated future just
@@ -400,7 +396,7 @@ def _run_callback(self, callback):
# We do not want a callback interrupting the process, especially
# in the failure cleanups. So log and catch, the exception.
except Exception:
- logger.debug("Exception raised in %s." % callback, exc_info=True)
+ logger.debug(f"Exception raised in {callback}.", exc_info=True)
class BoundedExecutor:
diff --git a/s3transfer/manager.py b/s3transfer/manager.py
index ab9a210f..8db9a411 100644
--- a/s3transfer/manager.py
+++ b/s3transfer/manager.py
@@ -149,8 +149,8 @@ def _validate_attrs_are_nonzero(self):
for attr, attr_val in self.__dict__.items():
if attr_val is not None and attr_val <= 0:
raise ValueError(
- 'Provided parameter %s of value %s must be greater than '
- '0.' % (attr, attr_val)
+ f'Provided parameter {attr} of value {attr_val} must '
+ 'be greater than 0.'
)
@@ -492,16 +492,16 @@ def _validate_if_bucket_supported(self, bucket):
match = pattern.match(bucket)
if match:
raise ValueError(
- 'TransferManager methods do not support %s '
- 'resource. Use direct client calls instead.' % resource
+ f'TransferManager methods do not support {resource} '
+ 'resource. Use direct client calls instead.'
)
def _validate_all_known_args(self, actual, allowed):
for kwarg in actual:
if kwarg not in allowed:
raise ValueError(
- "Invalid extra_args key '%s', "
- "must be one of: %s" % (kwarg, ', '.join(allowed))
+ "Invalid extra_args key '{}', "
+ "must be one of: {}".format(kwarg, ', '.join(allowed))
)
def _add_operation_defaults(self, bucket, extra_args):
diff --git a/s3transfer/processpool.py b/s3transfer/processpool.py
index 017eeb44..318f1e7f 100644
--- a/s3transfer/processpool.py
+++ b/s3transfer/processpool.py
@@ -192,6 +192,7 @@
are using ``us-west-2`` as their region.
"""
+
import collections
import contextlib
import logging
diff --git a/s3transfer/subscribers.py b/s3transfer/subscribers.py
index 473d5d94..fe773233 100644
--- a/s3transfer/subscribers.py
+++ b/s3transfer/subscribers.py
@@ -30,20 +30,19 @@ def __new__(cls, *args, **kwargs):
return super().__new__(cls)
@classmethod
- @lru_cache()
+ @lru_cache
def _validate_subscriber_methods(cls):
for subscriber_type in cls.VALID_SUBSCRIBER_TYPES:
subscriber_method = getattr(cls, 'on_' + subscriber_type)
if not callable(subscriber_method):
raise InvalidSubscriberMethodError(
- 'Subscriber method %s must be callable.'
- % subscriber_method
+ f'Subscriber method {subscriber_method} must be callable.'
)
if not accepts_kwargs(subscriber_method):
raise InvalidSubscriberMethodError(
- 'Subscriber method %s must accept keyword '
- 'arguments (**kwargs)' % subscriber_method
+ f'Subscriber method {subscriber_method} must accept keyword '
+ 'arguments (**kwargs)'
)
def on_queued(self, future, **kwargs):
diff --git a/s3transfer/tasks.py b/s3transfer/tasks.py
index 1bad9812..4183715a 100644
--- a/s3transfer/tasks.py
+++ b/s3transfer/tasks.py
@@ -96,11 +96,7 @@ def __repr__(self):
main_kwargs_to_display = self._get_kwargs_with_params_to_include(
self._main_kwargs, params_to_display
)
- return '{}(transfer_id={}, {})'.format(
- self.__class__.__name__,
- self._transfer_coordinator.transfer_id,
- main_kwargs_to_display,
- )
+ return f'{self.__class__.__name__}(transfer_id={self._transfer_coordinator.transfer_id}, {main_kwargs_to_display})'
@property
def transfer_id(self):
diff --git a/s3transfer/upload.py b/s3transfer/upload.py
index c834c52a..0347857b 100644
--- a/s3transfer/upload.py
+++ b/s3transfer/upload.py
@@ -550,9 +550,7 @@ def _get_upload_input_manager_cls(self, transfer_future):
if upload_manager_cls.is_compatible(fileobj):
return upload_manager_cls
raise RuntimeError(
- 'Input {} of type: {} is not supported.'.format(
- fileobj, type(fileobj)
- )
+ f'Input {fileobj} of type: {type(fileobj)} is not supported.'
)
def _submit(
diff --git a/s3transfer/utils.py b/s3transfer/utils.py
index ef171f54..98742236 100644
--- a/s3transfer/utils.py
+++ b/s3transfer/utils.py
@@ -191,9 +191,7 @@ def __init__(self, func, *args, **kwargs):
self._kwargs = kwargs
def __repr__(self):
- return 'Function: {} with args {} and kwargs {}'.format(
- self._func, self._args, self._kwargs
- )
+ return f'Function: {self._func} with args {self._args} and kwargs {self._kwargs}'
def __call__(self):
return self._func(*self._args, **self._kwargs)
@@ -636,7 +634,7 @@ def acquire(self, tag, blocking=True):
"""
logger.debug("Acquiring %s", tag)
if not self._semaphore.acquire(blocking):
- raise NoResourcesAvailable("Cannot acquire tag '%s'" % tag)
+ raise NoResourcesAvailable(f"Cannot acquire tag '{tag}'")
def release(self, tag, acquire_token):
"""Release the semaphore
@@ -694,7 +692,7 @@ def acquire(self, tag, blocking=True):
try:
if self._count == 0:
if not blocking:
- raise NoResourcesAvailable("Cannot acquire tag '%s'" % tag)
+ raise NoResourcesAvailable(f"Cannot acquire tag '{tag}'")
else:
while self._count == 0:
self._condition.wait()
@@ -716,7 +714,7 @@ def release(self, tag, acquire_token):
self._condition.acquire()
try:
if tag not in self._tag_sequences:
- raise ValueError("Attempted to release unknown tag: %s" % tag)
+ raise ValueError(f"Attempted to release unknown tag: {tag}")
max_sequence = self._tag_sequences[tag]
if self._lowest_sequence[tag] == sequence_number:
# We can immediately process this request and free up
@@ -743,7 +741,7 @@ def release(self, tag, acquire_token):
else:
raise ValueError(
"Attempted to release unknown sequence number "
- "%s for tag: %s" % (sequence_number, tag)
+ f"{sequence_number} for tag: {tag}"
)
finally:
self._condition.release()
@@ -781,13 +779,13 @@ def _adjust_for_chunksize_limits(self, current_chunksize):
if current_chunksize > self.max_size:
logger.debug(
"Chunksize greater than maximum chunksize. "
- "Setting to %s from %s." % (self.max_size, current_chunksize)
+ f"Setting to {self.max_size} from {current_chunksize}."
)
return self.max_size
elif current_chunksize < self.min_size:
logger.debug(
"Chunksize less than minimum chunksize. "
- "Setting to %s from %s." % (self.min_size, current_chunksize)
+ f"Setting to {self.min_size} from {current_chunksize}."
)
return self.min_size
else:
@@ -804,8 +802,7 @@ def _adjust_for_max_parts(self, current_chunksize, file_size):
if chunksize != current_chunksize:
logger.debug(
"Chunksize would result in the number of parts exceeding the "
- "maximum. Setting to %s from %s."
- % (chunksize, current_chunksize)
+ f"maximum. Setting to {chunksize} from {current_chunksize}."
)
return chunksize
diff --git a/scripts/ci/install b/scripts/ci/install
index 2f3a8f97..8b4e8b0f 100755
--- a/scripts/ci/install
+++ b/scripts/ci/install
@@ -44,4 +44,4 @@ if __name__ == "__main__":
package = os.path.join('dist', wheel_dist)
if args.extras:
package = f"'{package}[{args.extras}]'"
- run('pip install %s' % package)
+ run(f'pip install {package}')
diff --git a/scripts/new-change b/scripts/new-change
index 78c4fa9f..bdf898fa 100755
--- a/scripts/new-change
+++ b/scripts/new-change
@@ -36,6 +36,7 @@ You can then use the ``scripts/gen-changelog`` to generate the
CHANGELOG.rst file.
"""
+
import argparse
import json
import os
@@ -132,9 +133,7 @@ def replace_issue_references(parsed, repo_name):
def linkify(match):
number = match.group()[1:]
- return '`{} `__'.format(
- match.group(), repo_name, number
- )
+ return f'`{match.group()} `__'
new_description = re.sub(r'#\d+', linkify, description)
parsed['description'] = new_description
diff --git a/scripts/performance/benchmark b/scripts/performance/benchmark
index 30152ade..d1aa8d7a 100755
--- a/scripts/performance/benchmark
+++ b/scripts/performance/benchmark
@@ -18,6 +18,7 @@ To use the script, run::
If no ``--output-file`` was provided, the data will be saved to
``performance.csv``
"""
+
import argparse
import os
import subprocess
@@ -34,7 +35,7 @@ elif sys.platform == 'darwin':
else:
# TODO: Add support for windows. This would require figuring out what
# interface to use on windows.
- raise RuntimeError('Script cannot be run on %s' % sys.platform)
+ raise RuntimeError(f'Script cannot be run on {sys.platform}')
def benchmark(args):
diff --git a/scripts/performance/benchmark-download b/scripts/performance/benchmark-download
index 932fd51f..14bab5be 100755
--- a/scripts/performance/benchmark-download
+++ b/scripts/performance/benchmark-download
@@ -19,6 +19,7 @@ To benchmark with your own s3 key:
--s3-bucket mybucket
"""
+
import argparse
import os
import shutil
@@ -62,7 +63,7 @@ def human_readable_to_bytes(value):
try:
return int(value)
except ValueError:
- raise ValueError("Invalid size value: %s" % value)
+ raise ValueError(f"Invalid size value: {value}")
else:
multiplier = SIZE_SUFFIX[suffix]
return int(value[: -len(suffix)]) * multiplier
@@ -96,8 +97,8 @@ def benchmark_download(args):
upload_file(client, temp_file, args.s3_bucket)
download_file_script = (
- './download-file --file-name %s --file-type %s --s3-bucket %s '
- '--s3-key %s' % (temp_file, args.file_type, args.s3_bucket, s3_key)
+ f'./download-file --file-name {temp_file} --file-type {args.file_type} --s3-bucket {args.s3_bucket} '
+ f'--s3-key {s3_key}'
)
benchmark_args = ['./benchmark', download_file_script]
if args.output_file:
diff --git a/scripts/performance/benchmark-upload b/scripts/performance/benchmark-upload
index bbaf6dfa..bc67e1f0 100755
--- a/scripts/performance/benchmark-upload
+++ b/scripts/performance/benchmark-upload
@@ -19,6 +19,7 @@ To benchmark with your own local file::
--s3-bucket mybucket
"""
+
import argparse
import os
import shutil
@@ -60,7 +61,7 @@ def human_readable_to_bytes(value):
try:
return int(value)
except ValueError:
- raise ValueError("Invalid size value: %s" % value)
+ raise ValueError(f"Invalid size value: {value}")
else:
multiplier = SIZE_SUFFIX[suffix]
return int(value[: -len(suffix)]) * multiplier
@@ -86,9 +87,8 @@ def benchmark_upload(args):
create_file(source_file, args.file_size)
upload_file_script = (
- './upload-file --file-name %s --file-type %s --s3-bucket %s '
- '--s3-key %s'
- % (source_file, args.file_type, args.s3_bucket, TEMP_KEY)
+ f'./upload-file --file-name {source_file} --file-type {args.file_type} --s3-bucket {args.s3_bucket} '
+ f'--s3-key {TEMP_KEY}'
)
benchmark_args = ['./benchmark', upload_file_script]
if args.output_file:
diff --git a/scripts/performance/download-file b/scripts/performance/download-file
index 4a7d86fe..64975d09 100755
--- a/scripts/performance/download-file
+++ b/scripts/performance/download-file
@@ -14,6 +14,7 @@ To download a file::
--s3-bucket mybucket --s3-key mykey
"""
+
import argparse
from botocore.session import get_session
diff --git a/scripts/performance/processpool-download b/scripts/performance/processpool-download
index 1b175b2f..28295276 100755
--- a/scripts/performance/processpool-download
+++ b/scripts/performance/processpool-download
@@ -16,6 +16,7 @@ To download a prefix recursively to a directory::
./proccesspool-download -d mydirname -b mybucket -p myprefix/
"""
+
import argparse
import os
diff --git a/scripts/performance/summarize b/scripts/performance/summarize
index dafbca2e..dd3abc33 100755
--- a/scripts/performance/summarize
+++ b/scripts/performance/summarize
@@ -63,6 +63,7 @@ summary as JSON instead of a pretty printed table::
}
"""
+
import argparse
import csv
import json
@@ -167,13 +168,13 @@ class Summarizer:
table = [
[
'Total Time (seconds)',
- '%.3f' % self.total_time,
+ f'{self.total_time:.3f}',
self.std_dev_total_time,
],
['Maximum Memory', h(self.max_memory), h(self.std_dev_max_memory)],
[
'Maximum CPU (percent)',
- '%.1f' % self.max_cpu,
+ f'{self.max_cpu:.1f}',
self.std_dev_max_cpu,
],
[
@@ -183,14 +184,14 @@ class Summarizer:
],
[
'Average CPU (percent)',
- '%.1f' % self.average_cpu,
+ f'{self.average_cpu:.1f}',
self.std_dev_average_cpu,
],
]
return tabulate(
table,
headers=[
- 'Metric over %s run(s)' % (self.total_files),
+ f'Metric over {self.total_files} run(s)',
'Mean',
'Standard Deviation',
],
@@ -237,8 +238,8 @@ class Summarizer:
def _validate_row(self, row, filename):
if not row:
raise RuntimeError(
- 'Row: %s could not be processed. The CSV file (%s) may be '
- 'empty.' % (row, filename)
+ f'Row: {row} could not be processed. The CSV file ({filename}) may be '
+ 'empty.'
)
def process_data_row(self, row):
diff --git a/scripts/performance/upload-file b/scripts/performance/upload-file
index 3b0abc8a..d61a48de 100755
--- a/scripts/performance/upload-file
+++ b/scripts/performance/upload-file
@@ -14,6 +14,7 @@ To upload a file::
--s3-bucket mybucket --s3-key mykey
"""
+
import argparse
from botocore.session import get_session
diff --git a/scripts/stress/timeout b/scripts/stress/timeout
index 3323b4aa..71001824 100755
--- a/scripts/stress/timeout
+++ b/scripts/stress/timeout
@@ -13,6 +13,7 @@ To use the script, run::
./timeout "./my-script-to-run" --timeout-after 5
"""
+
import argparse
import os
import subprocess
@@ -24,7 +25,7 @@ import psutil
class TimeoutException(Exception):
def __init__(self, timeout_len):
- msg = 'Script failed to complete within %s seconds' % timeout_len
+ msg = f'Script failed to complete within {timeout_len} seconds'
Exception.__init__(self, msg)
diff --git a/setup.cfg b/setup.cfg
index 3e3c0567..b997161e 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,6 +7,3 @@ requires_dist =
[options.extras_require]
crt = botocore[crt]>=1.33.2,<2.0a0
-
-[flake8]
-ignore = E203,E226,E501,W503,W504
diff --git a/tests/__init__.py b/tests/__init__.py
index 03590fef..b5c50327 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -72,9 +72,7 @@ def assert_files_equal(first, second):
second_md5 = md5_checksum(second)
if first_md5 != second_md5:
raise AssertionError(
- "Files are not equal: {}(md5={}) != {}(md5={})".format(
- first, first_md5, second, second_md5
- )
+ f"Files are not equal: {first}(md5={first_md5}) != {second}(md5={second_md5})"
)
diff --git a/tests/functional/test_copy.py b/tests/functional/test_copy.py
index 9638cab2..4120377a 100644
--- a/tests/functional/test_copy.py
+++ b/tests/functional/test_copy.py
@@ -96,9 +96,9 @@ def add_successful_copy_responses(
# Add the expected create multipart upload params.
if expected_create_mpu_params:
- stubbed_responses[0][
- 'expected_params'
- ] = expected_create_mpu_params
+ stubbed_responses[0]['expected_params'] = (
+ expected_create_mpu_params
+ )
# Add any expected copy parameters.
if expected_copy_params:
@@ -110,9 +110,9 @@ def add_successful_copy_responses(
# Add the expected complete multipart upload params.
if expected_complete_mpu_params:
- stubbed_responses[-1][
- 'expected_params'
- ] = expected_complete_mpu_params
+ stubbed_responses[-1]['expected_params'] = (
+ expected_complete_mpu_params
+ )
# Add the responses to the stubber.
for stubbed_response in stubbed_responses:
@@ -396,7 +396,7 @@ def add_upload_part_copy_responses_with_default_expected_params(
if extra_expected_params:
if 'ChecksumAlgorithm' in extra_expected_params:
name = extra_expected_params['ChecksumAlgorithm']
- checksum_member = 'Checksum%s' % name.upper()
+ checksum_member = f'Checksum{name.upper()}'
response = upload_part_response['service_response']
response['CopyPartResult'][checksum_member] = 'sum%s==' % (
i + 1
diff --git a/tests/functional/test_crt.py b/tests/functional/test_crt.py
index 352e5854..ff1b413f 100644
--- a/tests/functional/test_crt.py
+++ b/tests/functional/test_crt.py
@@ -77,7 +77,7 @@ def setUp(self):
'myfile', self.expected_content, mode='wb'
)
self.expected_path = "/" + self.bucket + "/" + self.key
- self.expected_host = "s3.%s.amazonaws.com" % (self.region)
+ self.expected_host = f"s3.{self.region}.amazonaws.com"
self.expected_s3express_host = f'{self.s3express_bucket}.s3express-usw2-az5.us-west-2.amazonaws.com'
self.expected_s3express_path = f'/{self.key}'
self.s3_request = mock.Mock(awscrt.s3.S3Request)
diff --git a/tests/functional/test_download.py b/tests/functional/test_download.py
index f458721d..ccbac406 100644
--- a/tests/functional/test_download.py
+++ b/tests/functional/test_download.py
@@ -116,9 +116,9 @@ def add_successful_get_object_responses(
expected_params
)
if expected_ranges:
- stubbed_response['expected_params'][
- 'Range'
- ] = expected_ranges[i]
+ stubbed_response['expected_params']['Range'] = (
+ expected_ranges[i]
+ )
self.stubber.add_response(**stubbed_response)
def add_n_retryable_get_object_responses(self, n, num_reads=0):
@@ -141,7 +141,7 @@ def test_download_temporary_file_does_not_exist(self):
# Make sure the file exists
self.assertTrue(os.path.exists(self.filename))
# Make sure the random temporary file does not exist
- possible_matches = glob.glob('%s*' % self.filename + os.extsep)
+ possible_matches = glob.glob(f'{self.filename}*' + os.extsep)
self.assertEqual(possible_matches, [])
def test_download_for_fileobj(self):
@@ -201,7 +201,7 @@ def test_download_cleanup_on_failure(self):
future.result()
# Make sure the actual file and the temporary do not exist
# by globbing for the file and any of its extensions
- possible_matches = glob.glob('%s*' % self.filename)
+ possible_matches = glob.glob(f'{self.filename}*')
self.assertEqual(possible_matches, [])
def test_download_with_nonexistent_directory(self):
diff --git a/tests/functional/test_processpool.py b/tests/functional/test_processpool.py
index 7b45d1f3..baf0232d 100644
--- a/tests/functional/test_processpool.py
+++ b/tests/functional/test_processpool.py
@@ -212,7 +212,7 @@ def test_cleans_up_tempfile_on_failure(self):
)
self.assertFalse(os.path.exists(self.filename))
# Any tempfile should have been erased as well
- possible_matches = glob.glob('%s*' % self.filename + os.extsep)
+ possible_matches = glob.glob(f'{self.filename}*' + os.extsep)
self.assertEqual(possible_matches, [])
def test_validates_extra_args(self):
diff --git a/tests/functional/test_upload.py b/tests/functional/test_upload.py
index 021b409b..e3b54899 100644
--- a/tests/functional/test_upload.py
+++ b/tests/functional/test_upload.py
@@ -92,7 +92,7 @@ def collect_body(self, params, model, **kwargs):
data=params['Body'],
)
self.client.meta.events.emit(
- 'request-created.s3.%s' % model.name,
+ f'request-created.s3.{model.name}',
request=request,
operation_name=model.name,
)
@@ -398,7 +398,7 @@ def add_upload_part_responses_with_default_expected_params(
# If ChecksumAlgorithm is present stub the response checksums
if 'ChecksumAlgorithm' in extra_expected_params:
name = extra_expected_params['ChecksumAlgorithm']
- checksum_member = 'Checksum%s' % name.upper()
+ checksum_member = f'Checksum{name.upper()}'
response = upload_part_response['service_response']
response[checksum_member] = 'sum%s==' % (i + 1)
diff --git a/tests/integration/test_crt.py b/tests/integration/test_crt.py
index 7f16d85e..a13ff17b 100644
--- a/tests/integration/test_crt.py
+++ b/tests/integration/test_crt.py
@@ -508,6 +508,6 @@ def test_download_cancel(self):
future.result()
self.assertEqual(err.name, 'AWS_ERROR_S3_CANCELED')
- possible_matches = glob.glob('%s*' % download_path)
+ possible_matches = glob.glob(f'{download_path}*')
self.assertEqual(possible_matches, [])
self._assert_subscribers_called()
diff --git a/tests/integration/test_download.py b/tests/integration/test_download.py
index 6a07f933..da08514e 100644
--- a/tests/integration/test_download.py
+++ b/tests/integration/test_download.py
@@ -98,7 +98,7 @@ def test_large_download_exits_quicky_on_exception(self):
future.cancel()
raise RuntimeError(
"Download transfer did not start after waiting for "
- "%s seconds." % timeout
+ f"{timeout} seconds."
)
# Raise an exception which should cause the preceding
# download to cancel and exit quickly
@@ -115,9 +115,7 @@ def test_large_download_exits_quicky_on_exception(self):
self.assertLess(
actual_time_to_exit,
max_allowed_exit_time,
- "Failed to exit under {}. Instead exited in {}.".format(
- max_allowed_exit_time, actual_time_to_exit
- ),
+ f"Failed to exit under {max_allowed_exit_time}. Instead exited in {actual_time_to_exit}.",
)
# Make sure the future was cancelled because of the KeyboardInterrupt
@@ -126,7 +124,7 @@ def test_large_download_exits_quicky_on_exception(self):
# Make sure the actual file and the temporary do not exist
# by globbing for the file and any of its extensions
- possible_matches = glob.glob('%s*' % download_path)
+ possible_matches = glob.glob(f'{download_path}*')
self.assertEqual(possible_matches, [])
@skip_if_using_serial_implementation(
@@ -174,9 +172,7 @@ def test_many_files_exits_quicky_on_exception(self):
self.assertLess(
end_time - start_time,
max_allowed_exit_time,
- "Failed to exit under {}. Instead exited in {}.".format(
- max_allowed_exit_time, end_time - start_time
- ),
+ f"Failed to exit under {max_allowed_exit_time}. Instead exited in {end_time - start_time}.",
)
# Make sure at least one of the futures got cancelled
@@ -186,7 +182,7 @@ def test_many_files_exits_quicky_on_exception(self):
# For the transfer that did get cancelled, make sure the temporary
# file got removed.
- possible_matches = glob.glob('%s*' % future.meta.call_args.fileobj)
+ possible_matches = glob.glob(f'{future.meta.call_args.fileobj}*')
self.assertEqual(possible_matches, [])
def test_progress_subscribers_on_download(self):
@@ -284,5 +280,5 @@ def test_download_to_special_file(self):
except Exception as e:
self.fail(
'Should have been able to download to /dev/null but received '
- 'following exception %s' % e
+ f'following exception {e}'
)
diff --git a/tests/integration/test_processpool.py b/tests/integration/test_processpool.py
index d0899b1b..86bbc62f 100644
--- a/tests/integration/test_processpool.py
+++ b/tests/integration/test_processpool.py
@@ -96,14 +96,12 @@ def test_large_download_exits_quickly_on_exception(self):
self.assertLess(
end_time - start_time,
max_allowed_exit_time,
- "Failed to exit under {}. Instead exited in {}.".format(
- max_allowed_exit_time, end_time - start_time
- ),
+ f"Failed to exit under {max_allowed_exit_time}. Instead exited in {end_time - start_time}.",
)
# Make sure the actual file and the temporary do not exist
# by globbing for the file and any of its extensions
- possible_matches = glob.glob('%s*' % download_path)
+ possible_matches = glob.glob(f'{download_path}*')
self.assertEqual(possible_matches, [])
def test_many_files_exits_quickly_on_exception(self):
@@ -138,12 +136,10 @@ def test_many_files_exits_quickly_on_exception(self):
self.assertLess(
end_time - start_time,
max_allowed_exit_time,
- "Failed to exit under {}. Instead exited in {}.".format(
- max_allowed_exit_time, end_time - start_time
- ),
+ f"Failed to exit under {max_allowed_exit_time}. Instead exited in {end_time - start_time}.",
)
# For the transfer that did get cancelled, make sure the temporary
# file got removed.
- possible_matches = glob.glob('%s*' % base_filename)
+ possible_matches = glob.glob(f'{base_filename}*')
self.assertEqual(possible_matches, [])
diff --git a/tests/integration/test_s3transfer.py b/tests/integration/test_s3transfer.py
index 95decb4d..f710dd45 100644
--- a/tests/integration/test_s3transfer.py
+++ b/tests/integration/test_s3transfer.py
@@ -31,9 +31,7 @@ def assert_files_equal(first, second):
second_md5 = md5_checksum(second)
if first_md5 != second_md5:
raise AssertionError(
- "Files are not equal: {}(md5={}) != {}(md5={})".format(
- first, first_md5, second, second_md5
- )
+ f"Files are not equal: {first}(md5={first_md5}) != {second}(md5={second_md5})"
)
diff --git a/tests/integration/test_upload.py b/tests/integration/test_upload.py
index ef366de5..a12d1818 100644
--- a/tests/integration/test_upload.py
+++ b/tests/integration/test_upload.py
@@ -84,7 +84,7 @@ def test_large_upload_exits_quicky_on_exception(self):
future.cancel()
raise RuntimeError(
"Download transfer did not start after waiting for "
- "%s seconds." % timeout
+ f"{timeout} seconds."
)
# Raise an exception which should cause the preceding
# download to cancel and exit quickly
@@ -101,9 +101,7 @@ def test_large_upload_exits_quicky_on_exception(self):
self.assertLess(
actual_time_to_exit,
max_allowed_exit_time,
- "Failed to exit under {}. Instead exited in {}.".format(
- max_allowed_exit_time, actual_time_to_exit
- ),
+ f"Failed to exit under {max_allowed_exit_time}. Instead exited in {actual_time_to_exit}.",
)
try:
@@ -163,9 +161,7 @@ def test_many_files_exits_quicky_on_exception(self):
self.assertLess(
end_time - start_time,
max_allowed_exit_time,
- "Failed to exit under {}. Instead exited in {}.".format(
- max_allowed_exit_time, end_time - start_time
- ),
+ f"Failed to exit under {max_allowed_exit_time}. Instead exited in {end_time - start_time}.",
)
# Make sure at least one of the futures got cancelled
diff --git a/tests/unit/test_compat.py b/tests/unit/test_compat.py
index f40a602a..20f07c74 100644
--- a/tests/unit/test_compat.py
+++ b/tests/unit/test_compat.py
@@ -57,7 +57,7 @@ def test_non_file_like_obj(self):
def test_non_seekable_ioerror(self):
# Should return False if IOError is thrown.
with open(self.filename, 'w') as f:
- self.assertFalse(seekable(ErrorRaisingSeekWrapper(f, IOError())))
+ self.assertFalse(seekable(ErrorRaisingSeekWrapper(f, OSError())))
def test_non_seekable_oserror(self):
# Should return False if OSError is thrown.
diff --git a/tests/unit/test_crt.py b/tests/unit/test_crt.py
index dc5b745c..477c91df 100644
--- a/tests/unit/test_crt.py
+++ b/tests/unit/test_crt.py
@@ -103,7 +103,7 @@ def setUp(self):
self.files = FileCreator()
self.filename = self.files.create_file('myfile', 'my content')
self.expected_path = "/" + self.bucket + "/" + self.key
- self.expected_host = "s3.%s.amazonaws.com" % (self.region)
+ self.expected_host = f"s3.{self.region}.amazonaws.com"
def tearDown(self):
self.files.remove_all()
diff --git a/tests/unit/test_futures.py b/tests/unit/test_futures.py
index ed196a3a..f2a94f23 100644
--- a/tests/unit/test_futures.py
+++ b/tests/unit/test_futures.py
@@ -489,9 +489,7 @@ def assert_submit_would_not_block(self, task, tag=None, **kwargs):
self.executor.submit(task, tag=tag, block=False)
except NoResourcesAvailable:
self.fail(
- 'Task {} should not have been blocked. Caused by:\n{}'.format(
- task, traceback.format_exc()
- )
+ f'Task {task} should not have been blocked. Caused by:\n{traceback.format_exc()}'
)
def add_done_callback_to_future(self, future, fn, *args, **kwargs):
diff --git a/tests/unit/test_manager.py b/tests/unit/test_manager.py
index b377b200..78c3b8d7 100644
--- a/tests/unit/test_manager.py
+++ b/tests/unit/test_manager.py
@@ -132,7 +132,7 @@ def test_wait_does_not_propogate_exceptions_from_result(self):
try:
self.coordinator_controller.wait()
except FutureResultException as e:
- self.fail('%s should not have been raised.' % e)
+ self.fail(f'{e} should not have been raised.')
def test_wait_can_be_interrupted(self):
inject_interrupt_coordinator = TransferCoordinatorWithInterrupt()
diff --git a/tests/unit/test_s3transfer.py b/tests/unit/test_s3transfer.py
index d9255bf3..65972ec1 100644
--- a/tests/unit/test_s3transfer.py
+++ b/tests/unit/test_s3transfer.py
@@ -12,7 +12,6 @@
# language governing permissions and limitations under the License.
import os
import shutil
-import socket
import tempfile
from concurrent import futures
from contextlib import closing
@@ -434,7 +433,7 @@ def test_retry_on_failures_from_stream_reads(self):
response_body = b'foobarbaz'
stream_with_errors = mock.Mock()
stream_with_errors.read.side_effect = [
- socket.error("fake error"),
+ OSError("fake error"),
response_body,
]
client.get_object.return_value = {'Body': stream_with_errors}
@@ -469,7 +468,7 @@ def test_exception_raised_on_exceeded_retries(self):
client = mock.Mock()
response_body = b'foobarbaz'
stream_with_errors = mock.Mock()
- stream_with_errors.read.side_effect = socket.error("fake error")
+ stream_with_errors.read.side_effect = OSError("fake error")
client.get_object.return_value = {'Body': stream_with_errors}
config = TransferConfig(multipart_threshold=4, multipart_chunksize=4)
@@ -678,7 +677,7 @@ def test_get_object_stream_is_retried_and_succeeds(self):
}
self.client.get_object.side_effect = [
# First request fails.
- socket.error("fake error"),
+ OSError("fake error"),
# Second succeeds.
{'Body': BytesIO(b'foobar')},
]
@@ -696,7 +695,7 @@ def test_get_object_stream_uses_all_retries_and_errors_out(self):
# Here we're raising an exception every single time, which
# will exhaust our retry count and propagate a
# RetriesExceededError.
- self.client.get_object.side_effect = socket.error("fake error")
+ self.client.get_object.side_effect = OSError("fake error")
with self.assertRaises(RetriesExceededError):
transfer.download_file('bucket', 'key', 'smallfile')
diff --git a/tests/unit/test_subscribers.py b/tests/unit/test_subscribers.py
index a3d5a435..e66461ab 100644
--- a/tests/unit/test_subscribers.py
+++ b/tests/unit/test_subscribers.py
@@ -54,7 +54,7 @@ def test_can_call_base_subscriber_method(self):
except Exception as e:
self.fail(
'Should be able to call base class subscriber method. '
- 'instead got: %s' % e
+ f'instead got: {e}'
)
def test_subclass_can_have_and_call_additional_methods(self):
diff --git a/tests/unit/test_upload.py b/tests/unit/test_upload.py
index 4c523011..84b1e67d 100644
--- a/tests/unit/test_upload.py
+++ b/tests/unit/test_upload.py
@@ -50,7 +50,7 @@ class InterruptionError(Exception):
class OSUtilsExceptionOnFileSize(OSUtils):
def get_file_size(self, filename):
raise AssertionError(
- "The file %s should not have been stated" % filename
+ f"The file {filename} should not have been stated"
)
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py
index ea7edf8a..59996696 100644
--- a/tests/unit/test_utils.py
+++ b/tests/unit/test_utils.py
@@ -282,7 +282,7 @@ def test_remove_file_ignores_errors(self):
try:
OSUtils().remove_file(non_existent_file)
except OSError as e:
- self.fail('OSError should have been caught: %s' % e)
+ self.fail(f'OSError should have been caught: {e}')
def test_remove_file_proxies_remove_file(self):
OSUtils().remove_file(self.filename)
@@ -306,7 +306,7 @@ def test_get_temp_filename(self):
filename = 'myfile'
self.assertIsNotNone(
re.match(
- r'%s\.[0-9A-Fa-f]{8}$' % filename,
+ rf'{filename}\.[0-9A-Fa-f]{{8}}$',
OSUtils().get_temp_filename(filename),
)
)
@@ -329,7 +329,7 @@ def test_allocate(self):
@mock.patch('s3transfer.utils.fallocate')
def test_allocate_with_io_error(self, mock_fallocate):
- mock_fallocate.side_effect = IOError()
+ mock_fallocate.side_effect = OSError()
with self.assertRaises(IOError):
OSUtils().allocate(self.filename, 1)
self.assertFalse(os.path.exists(self.filename))