diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 9b88bc764b..b4ff68e91d 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -22,14 +22,19 @@ def _cmp(a, b): def cmp_pkg_version(version_str, pkg_version_str=__version__): - """ Compare `version_str` to current package version + """ Compare ``version_str`` to current package version To be valid, a version must have a numerical major version followed by a dot, followed by a numerical minor version. It may optionally be followed by a dot and a numerical micro version, and / or by an "extra" string. - *Any* extra string labels the version as pre-release, so `1.2.0somestring` - compares as prior to (pre-release for) `1.2.0`, where `somestring` can be - any string. + The extra string may further contain a "+". Any value to the left of a "+" + labels the version as pre-release, while values to the right indicate a + post-release relative to the values to the left. That is, + ``1.2.0+1`` is post-release for ``1.2.0``, while ``1.2.0rc1+1`` is + post-release for ``1.2.0rc1`` and pre-release for ``1.2.0``. + + This is an approximation of `PEP-440`_, and future versions will fully + implement PEP-440. Parameters ---------- @@ -50,15 +55,38 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__): 1 >>> cmp_pkg_version('1.2.0dev', '1.2.0') -1 + >>> cmp_pkg_version('1.2.0dev', '1.2.0rc1') + -1 + >>> cmp_pkg_version('1.2.0rc1', '1.2.0') + -1 + >>> cmp_pkg_version('1.2.0rc1+1', '1.2.0rc1') + 1 + >>> cmp_pkg_version('1.2.0rc1+1', '1.2.0') + -1 + + .. _`PEP-440`: https://www.python.org/dev/peps/pep-0440/ """ version, extra = _parse_version(version_str) pkg_version, pkg_extra = _parse_version(pkg_version_str) - if version != pkg_version: - return _cmp(StrictVersion(version), StrictVersion(pkg_version)) - return (0 if extra == pkg_extra - else 1 if extra == '' - else -1 if pkg_extra == '' - else _cmp(extra, pkg_extra)) + + # Normalize versions + quick_check = _cmp(StrictVersion(version), StrictVersion(pkg_version)) + # Nothing further to check + if quick_check != 0 or extra == pkg_extra == '': + return quick_check + + # Before + is pre-release, after + is additional increment + pre, _, post = extra.partition('+') + pkg_pre, _, pkg_post = pkg_extra.partition('+') + quick_check = _cmp(pre, pkg_pre) + if quick_check != 0: # Excludes case where pre and pkg_pre == '' + # Pre-releases are ordered but strictly less than non-pre + return (1 if pre == '' + else -1 if pkg_pre == '' + else quick_check) + + # All else being equal, compare additional information lexically + return _cmp(post, pkg_post) def pkg_commit_hash(pkg_path=None): diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 15e69a1829..527f9b8f91 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -112,9 +112,6 @@ def test_tuplespec(): ): assert_array_equal(getattr(ap_header, method)(*args), getattr(ap_tuple, method)(*args)) - # Tuple-defined ArrayProxies have no header to store - with warnings.catch_warnings(): - assert_true(ap_tuple.header is None) # Partial tuples of length 2-4 are also valid for n in range(2, 5): ArrayProxy(bio, tuple_spec[:n]) @@ -141,10 +138,6 @@ def test_nifti1_init(): ap = ArrayProxy(bio, hdr) assert_true(ap.file_like == bio) assert_equal(ap.shape, shape) - # Check there has been a copy of the header - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - assert_false(ap.header is hdr) # Get the data assert_array_equal(np.asarray(ap), arr * 2.0 + 10) with InTemporaryDirectory(): diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 10b61628c7..f91b61af9e 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -47,6 +47,7 @@ from numpy.testing import assert_almost_equal, assert_array_equal, assert_warns, assert_allclose from ..testing import clear_and_catch_warnings from ..tmpdirs import InTemporaryDirectory +from ..deprecator import ExpiredDeprecationError from .test_api_validators import ValidateAPI from .test_helpers import (bytesio_round_trip, bytesio_filemap, @@ -422,10 +423,8 @@ def validate_ndim(self, imaker, params): def validate_shape_deprecated(self, imaker, params): # Check deprecated get_shape API img = imaker() - with clear_and_catch_warnings() as w: - warnings.simplefilter('always', DeprecationWarning) - assert_equal(img.get_shape(), params['shape']) - assert_equal(len(w), 1) + with assert_raises(ExpiredDeprecationError): + img.get_shape() def validate_mmap_parameter(self, imaker, params): img = imaker() diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index c5d92119ae..eb9ff15cab 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -26,6 +26,7 @@ from ..tmpdirs import InTemporaryDirectory from ..testing import (assert_true, assert_equal, assert_false, assert_raises, assert_warns, assert_array_equal, data_path, clear_and_catch_warnings) +from ..deprecator import ExpiredDeprecationError from . import test_spatialimages as tsi from .test_fileslice import slicer_samples @@ -105,25 +106,16 @@ def test_old_namespace(): arr = np.arange(24).reshape((2, 3, 4)) aff = np.diag([2, 3, 4, 1]) - with clear_and_catch_warnings() as warns: - from .. import Minc1Image, MincImage - assert_equal(warns, []) - # But the old named import, imported from new, is not the same - assert_false(Minc1Image is MincImage) - assert_equal(warns, []) - # Create object using old name - mimg = MincImage(arr, aff) - # Call to create object created warning - assert_equal(warns.pop(0).category, FutureWarning) - assert_array_equal(mimg.get_fdata(), arr) - # Another old name - from ..minc1 import MincFile, Minc1File - assert_false(MincFile is Minc1File) + from .. import Minc1Image, MincImage + assert_false(Minc1Image is MincImage) + with assert_raises(ExpiredDeprecationError): + MincImage(arr, aff) assert_equal(warns, []) + # Another old name + from ..minc1 import MincFile, Minc1File + assert_false(MincFile is Minc1File) + with assert_raises(ExpiredDeprecationError): mf = MincFile(netcdf_file(EG_FNAME)) - # Call to create object created warning - assert_equal(warns.pop(0).category, FutureWarning) - assert_equal(mf.get_data_shape(), (10, 20, 20)) class _TestMincFile(object): diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index 1a5775d33f..1cbf5f9096 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -63,6 +63,14 @@ def test_cmp_pkg_version(): ('1.2.1rc', '1.2.1rc1', -1), ('1.2.1b', '1.2.1a', 1), ('1.2.1a', '1.2.1b', -1), + ('1.2.0+1', '1.2', 1), + ('1.2', '1.2.0+1', -1), + ('1.2.1+1', '1.2.1', 1), + ('1.2.1', '1.2.1+1', -1), + ('1.2.1rc1+1', '1.2.1', -1), + ('1.2.1', '1.2.1rc1+1', 1), + ('1.2.1rc1+1', '1.2.1+1', -1), + ('1.2.1+1', '1.2.1rc1+1', 1), ): assert_equal(cmp_pkg_version(test_ver, pkg_ver), exp_out) assert_raises(ValueError, cmp_pkg_version, 'foo.2') diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 48a024795d..d7ca111f22 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -58,6 +58,7 @@ from numpy.testing import assert_almost_equal, assert_array_equal, assert_allclose from ..testing import data_path as DATA_PATH, assert_dt_equal, clear_and_catch_warnings +from ..deprecator import ExpiredDeprecationError from ..tmpdirs import InTemporaryDirectory @@ -324,12 +325,8 @@ def validate_slope_inter_offset(self, pmaker, params): def validate_deprecated_header(self, pmaker, params): prox, fio, hdr = pmaker() - with warnings.catch_warnings(record=True) as warns: - warnings.simplefilter("always") - # Header is a copy of original - assert_false(prox.header is hdr) - assert_equal(prox.header, hdr) - assert_equal(warns.pop(0).category, DeprecationWarning) + with assert_raises(ExpiredDeprecationError): + prox.header class TestSpm99AnalyzeProxyAPI(TestAnalyzeProxyAPI): diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index 019cc58d1c..cd66bbfe3a 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -11,11 +11,12 @@ import numpy as np from io import BytesIO -from ..volumeutils import (calculate_scale, scale_min_max, finite_range, - apply_read_scaling, array_to_file, array_from_file) +from ..volumeutils import finite_range, apply_read_scaling, array_to_file, array_from_file from ..casting import type_info from ..testing import suppress_warnings +from .test_volumeutils import _calculate_scale + from numpy.testing import (assert_array_almost_equal, assert_array_equal) from nose.tools import (assert_true, assert_equal, assert_raises, @@ -26,56 +27,6 @@ DEBUG = True -def test_scale_min_max(): - mx_dt = np.maximum_sctype(np.float) - for tp in np.sctypes['uint'] + np.sctypes['int']: - info = np.iinfo(tp) - # Need to pump up to max fp type to contain python longs - imin = np.array(info.min, dtype=mx_dt) - imax = np.array(info.max, dtype=mx_dt) - value_pairs = ( - (0, imax), - (imin, 0), - (imin, imax), - (1, 10), - (-1, -1), - (1, 1), - (-10, -1), - (-100, 10)) - for mn, mx in value_pairs: - # with intercept - scale, inter = scale_min_max(mn, mx, tp, True) - if mx - mn: - assert_array_almost_equal, (mx - inter) / scale, imax - assert_array_almost_equal, (mn - inter) / scale, imin - else: - assert_equal, (scale, inter), (1.0, mn) - # without intercept - if imin == 0 and mn < 0 and mx > 0: - (assert_raises, ValueError, - scale_min_max, mn, mx, tp, False) - continue - scale, inter = scale_min_max(mn, mx, tp, False) - assert_equal, inter, 0.0 - if mn == 0 and mx == 0: - assert_equal, scale, 1.0 - continue - sc_mn = mn / scale - sc_mx = mx / scale - assert_true, sc_mn >= imin - assert_true, sc_mx <= imax - if imin == 0: - if mx > 0: # numbers all +ve - assert_array_almost_equal, mx / scale, imax - else: # numbers all -ve - assert_array_almost_equal, mn / scale, imax - continue - if abs(mx) >= abs(mn): - assert_array_almost_equal, mx / scale, imax - else: - assert_array_almost_equal, mn / scale, imin - - def test_finite_range(): # Finite range utility function for in_arr, res in ( @@ -122,26 +73,6 @@ def test_finite_range(): assert_raises(TypeError, finite_range, a) -def test_calculate_scale(): - # Test for special cases in scale calculation - npa = np.array - # Here the offset handles it - res = calculate_scale(npa([-2, -1], dtype=np.int8), np.uint8, True) - assert_equal(res, (1.0, -2.0, None, None)) - # Not having offset not a problem obviously - res = calculate_scale(npa([-2, -1], dtype=np.int8), np.uint8, 0) - assert_equal(res, (-1.0, 0.0, None, None)) - # Case where offset handles scaling - res = calculate_scale(npa([-1, 1], dtype=np.int8), np.uint8, 1) - assert_equal(res, (1.0, -1.0, None, None)) - # Can't work for no offset case - assert_raises(ValueError, - calculate_scale, npa([-1, 1], dtype=np.int8), np.uint8, 0) - # Offset trick can't work when max is out of range - res = calculate_scale(npa([-1, 255], dtype=np.int16), np.uint8, 1) - assert_not_equal(res, (1.0, -1.0, None, None)) - - def test_a2f_mn_mx(): # Test array to file mn, mx handling str_io = BytesIO() @@ -213,9 +144,9 @@ def test_array_file_scales(): info = type_info(in_type) arr[0], arr[1] = info['min'], info['max'] if not err is None: - assert_raises(err, calculate_scale, arr, out_dtype, True) + assert_raises(err, _calculate_scale, arr, out_dtype, True) continue - slope, inter, mn, mx = calculate_scale(arr, out_dtype, True) + slope, inter, mn, mx = _calculate_scale(arr, out_dtype, True) array_to_file(arr, bio, out_type, 0, inter, slope, mn, mx) bio.seek(0) arr2 = array_from_file(arr.shape, out_dtype, bio) @@ -266,7 +197,7 @@ def check_int_a2f(in_type, out_type): data[1] = this_max + 0j str_io = BytesIO() try: - scale, inter, mn, mx = calculate_scale(data, out_type, True) + scale, inter, mn, mx = _calculate_scale(data, out_type, True) except ValueError as e: if DEBUG: print(in_type, out_type, e) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 7d275e3366..8b11e5cc51 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -28,6 +28,7 @@ from ..testing import (clear_and_catch_warnings, suppress_warnings, memmap_after_ufunc) from ..tmpdirs import InTemporaryDirectory +from ..deprecator import ExpiredDeprecationError from .. import load as top_load @@ -284,8 +285,8 @@ def test_data_shape(self): img = img_klass(arr, np.eye(4)) # Shape may be promoted to higher dimension, but may not reorder or # change size - assert_equal(img.get_shape()[:1], (4,)) - assert_equal(np.prod(img.get_shape()), 4) + assert_equal(img.shape[:1], (4,)) + assert_equal(np.prod(img.shape), 4) img = img_klass(np.zeros((2, 3, 4), dtype=np.float32), np.eye(4)) assert_equal(img.shape, (2, 3, 4)) @@ -305,19 +306,13 @@ def test_str(self): assert_true(len(str(img)) > 0) def test_get_shape(self): - # Check there is a get_shape method - # (it is deprecated) + # Check that get_shape raises an ExpiredDeprecationError img_klass = self.image_class # Assumes all possible images support int16 # See https://github.com/nipy/nibabel/issues/58 img = img_klass(np.arange(1, dtype=np.int16), np.eye(4)) - with suppress_warnings(): - # Shape may be promoted to higher dimension, but may not reorder or - # change size - assert_equal(img.get_shape()[:1], (1,)) - assert_equal(np.prod(img.get_shape()), 1) - img = img_klass(np.zeros((2, 3, 4), np.int16), np.eye(4)) - assert_equal(img.get_shape(), (2, 3, 4)) + with assert_raises(ExpiredDeprecationError): + img.get_shape() def test_get_fdata(self): # Test array image and proxy image interface for floating point data @@ -568,18 +563,14 @@ def from_file_map(self, file_map=None): bio = BytesIO() file_map = FakeImage.make_file_map({'image': bio}) - with clear_and_catch_warnings() as w: - warnings.simplefilter('always', DeprecationWarning) + with assert_raises(ExpiredDeprecationError): img.to_files(file_map) - assert_equal(len(w), 1) + with assert_raises(ExpiredDeprecationError): img.to_filespec('an_image') - assert_equal(len(w), 2) - img = FakeImage.from_files(file_map) - assert_equal(len(w), 3) - file_map = FakeImage.filespec_to_files('an_image') - assert_equal(list(file_map), ['image']) - assert_equal(file_map['image'].filename, 'an_image.foo') - assert_equal(len(w), 4) + with assert_raises(ExpiredDeprecationError): + FakeImage.from_files(file_map) + with assert_raises(ExpiredDeprecationError): + FakeImage.filespec_to_files('an_image') class MmapImageMixin(object): diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 4072f85131..d391abf359 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -32,8 +32,9 @@ array_to_file, allopen, # for backwards compatibility fname_ext_ul_case, - calculate_scale, - can_cast, + calculate_scale, # Deprecated + can_cast, # Deprecated + scale_min_max, # Deprecated write_zeros, seek_tell, apply_read_scaling, @@ -52,8 +53,11 @@ from ..openers import Opener, BZ2File from ..casting import (floor_log2, type_info, OK_FLOATS, shared_range) +from ..deprecator import ExpiredDeprecationError + from numpy.testing import (assert_array_almost_equal, assert_array_equal) +from nose.tools import assert_raises from ..testing_pytest import (assert_dt_equal, assert_allclose_safely, suppress_warnings, clear_and_catch_warnings) @@ -67,6 +71,15 @@ NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES +def test_deprecated_functions(): + with assert_raises(ExpiredDeprecationError): + scale_min_max(0, 1, np.uint8, True) + with assert_raises(ExpiredDeprecationError): + calculate_scale(np.array([-2, -1], dtype=np.int8), np.uint8, True) + with assert_raises(ExpiredDeprecationError): + can_cast(np.float32, np.float32) + + def test__is_compressed_fobj(): # _is_compressed helper function with InTemporaryDirectory(): @@ -294,9 +307,7 @@ def test_array_to_file(): for code in '<>': ndt = dt.newbyteorder(code) for allow_intercept in (True, False): - with suppress_warnings(): # deprecated - scale, intercept, mn, mx = \ - calculate_scale(arr, ndt, allow_intercept) + scale, intercept, mn, mx = _calculate_scale(arr, ndt, allow_intercept) data_back = write_return(arr, str_io, ndt, 0, intercept, scale) assert_array_almost_equal(arr, data_back) @@ -875,28 +886,6 @@ def test_best_write_scale_ftype(): assert best_write_scale_ftype(arr, lower_t(0.5), 0) == lower_t -def test_can_cast(): - tests = ((np.float32, np.float32, True, True, True), - (np.float64, np.float32, True, True, True), - (np.complex128, np.float32, False, False, False), - (np.float32, np.complex128, True, True, True), - (np.float32, np.uint8, False, True, True), - (np.uint32, np.complex128, True, True, True), - (np.int64, np.float32, True, True, True), - (np.complex128, np.int16, False, False, False), - (np.float32, np.int16, False, True, True), - (np.uint8, np.int16, True, True, True), - (np.uint16, np.int16, False, True, True), - (np.int16, np.uint16, False, False, True), - (np.int8, np.uint16, False, False, True), - (np.uint16, np.uint8, False, True, True), - ) - for intype, outtype, def_res, scale_res, all_res in tests: - assert def_res == can_cast(intype, outtype) - assert scale_res == can_cast(intype, outtype, False, True) - assert all_res == can_cast(intype, outtype, True, True) - - def test_write_zeros(): bio = BytesIO() write_zeros(bio, 10000) @@ -1291,3 +1280,49 @@ def run(self): if err: raise err[0] + + +def _calculate_scale(data, out_dtype, allow_intercept): + ''' Calculate scaling and optional intercept for data + + Copy of the deprecated volumeutils.calculate_scale, to preserve tests + + Parameters + ---------- + data : array + out_dtype : dtype + output data type in some form understood by ``np.dtype`` + allow_intercept : bool + If True allow non-zero intercept + + Returns + ------- + scaling : None or float + scalefactor to divide into data. None if no valid data + intercept : None or float + intercept to subtract from data. None if no valid data + mn : None or float + minimum of finite value in data or None if this will not + be used to threshold data + mx : None or float + minimum of finite value in data, or None if this will not + be used to threshold data + ''' + # Code here is a compatibility shell around arraywriters refactor + in_dtype = data.dtype + out_dtype = np.dtype(out_dtype) + if np.can_cast(in_dtype, out_dtype): + return 1.0, 0.0, None, None + from ..arraywriters import make_array_writer, WriterError, get_slope_inter + try: + writer = make_array_writer(data, out_dtype, True, allow_intercept) + except WriterError as e: + raise ValueError(str(e)) + if out_dtype.kind in 'fc': + return (1.0, 0.0, None, None) + mn, mx = writer.finite_range() + if (mn, mx) == (np.inf, -np.inf): # No valid data + return (None, None, None, None) + if in_dtype.kind not in 'fc': + mn, mx = (None, None) + return get_slope_inter(writer) + (mn, mx) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 2cc083ecb6..41d248a671 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -400,17 +400,17 @@ def can_cast(in_type, out_type, has_intercept=False, has_slope=False): Examples -------- - >>> can_cast(np.float64, np.float32) + >>> can_cast(np.float64, np.float32) # doctest: +SKIP True - >>> can_cast(np.complex128, np.float32) + >>> can_cast(np.complex128, np.float32) # doctest: +SKIP False - >>> can_cast(np.int64, np.float32) + >>> can_cast(np.int64, np.float32) # doctest: +SKIP True - >>> can_cast(np.float32, np.int16) + >>> can_cast(np.float32, np.int16) # doctest: +SKIP False - >>> can_cast(np.float32, np.int16, False, True) + >>> can_cast(np.float32, np.int16, False, True) # doctest: +SKIP True - >>> can_cast(np.int16, np.uint8) + >>> can_cast(np.int16, np.uint8) # doctest: +SKIP False Whether we can actually cast int to uint when we don't have an intercept @@ -420,9 +420,9 @@ def can_cast(in_type, out_type, has_intercept=False, has_slope=False): Here we need an intercept to scale the full range of an int to a uint - >>> can_cast(np.int16, np.uint8, False, True) + >>> can_cast(np.int16, np.uint8, False, True) # doctest: +SKIP False - >>> can_cast(np.int16, np.uint8, True, True) + >>> can_cast(np.int16, np.uint8, True, True) # doctest: +SKIP True ''' in_dtype = np.dtype(in_type) @@ -1094,26 +1094,26 @@ def scale_min_max(mn, mx, out_type, allow_intercept): Examples -------- - >>> scale_min_max(0, 255, np.uint8, False) + >>> scale_min_max(0, 255, np.uint8, False) # doctest: +SKIP (1.0, 0.0) - >>> scale_min_max(-128, 127, np.int8, False) + >>> scale_min_max(-128, 127, np.int8, False) # doctest: +SKIP (1.0, 0.0) - >>> scale_min_max(0, 127, np.int8, False) + >>> scale_min_max(0, 127, np.int8, False) # doctest: +SKIP (1.0, 0.0) - >>> scaling, intercept = scale_min_max(0, 127, np.int8, True) - >>> np.allclose((0 - intercept) / scaling, -128) + >>> scaling, intercept = scale_min_max(0, 127, np.int8, True) # doctest: +SKIP + >>> np.allclose((0 - intercept) / scaling, -128) # doctest: +SKIP True - >>> np.allclose((127 - intercept) / scaling, 127) + >>> np.allclose((127 - intercept) / scaling, 127) # doctest: +SKIP True - >>> scaling, intercept = scale_min_max(-10, -1, np.int8, True) - >>> np.allclose((-10 - intercept) / scaling, -128) + >>> scaling, intercept = scale_min_max(-10, -1, np.int8, True) # doctest: +SKIP + >>> np.allclose((-10 - intercept) / scaling, -128) # doctest: +SKIP True - >>> np.allclose((-1 - intercept) / scaling, 127) + >>> np.allclose((-1 - intercept) / scaling, 127) # doctest: +SKIP True - >>> scaling, intercept = scale_min_max(1, 10, np.int8, True) - >>> np.allclose((1 - intercept) / scaling, -128) + >>> scaling, intercept = scale_min_max(1, 10, np.int8, True) # doctest: +SKIP + >>> np.allclose((1 - intercept) / scaling, -128) # doctest: +SKIP True - >>> np.allclose((10 - intercept) / scaling, 127) + >>> np.allclose((10 - intercept) / scaling, 127) # doctest: +SKIP True Notes