From e64857f992da1d77dd7f3fbfaa6d0faec1479b55 Mon Sep 17 00:00:00 2001 From: Michael Schellenberger Costa Date: Wed, 29 May 2024 11:45:01 +0200 Subject: [PATCH 1/3] Modernize `mdspan` from upstream --- .../cuda/std/__algorithm/comp_ref_type.h | 4 +- libcudacxx/include/cuda/std/__fwd/mdspan.h | 73 ++ .../cuda/std/__mdspan/compressed_pair.h | 256 ---- .../include/cuda/std/__mdspan/concepts.h | 103 ++ libcudacxx/include/cuda/std/__mdspan/config.h | 276 ----- .../cuda/std/__mdspan/default_accessor.h | 80 +- .../cuda/std/__mdspan/dynamic_extent.h | 92 -- .../include/cuda/std/__mdspan/extents.h | 1057 ++++++++++------- .../include/cuda/std/__mdspan/full_extent_t.h | 74 -- .../include/cuda/std/__mdspan/layout_left.h | 368 +++--- .../include/cuda/std/__mdspan/layout_right.h | 366 +++--- .../include/cuda/std/__mdspan/layout_stride.h | 951 ++++++++------- libcudacxx/include/cuda/std/__mdspan/macros.h | 646 ---------- .../cuda/std/__mdspan/maybe_static_value.h | 165 --- libcudacxx/include/cuda/std/__mdspan/mdspan.h | 654 +++++----- .../cuda/std/__mdspan/no_unique_address.h | 149 --- .../__mdspan/standard_layout_static_array.h | 700 ----------- .../include/cuda/std/__mdspan/static_array.h | 296 ----- .../include/cuda/std/__mdspan/submdspan.h | 530 --------- .../cuda/std/__tuple_dir/sfinae_helpers.h | 6 +- .../include/cuda/std/__tuple_dir/tuple_like.h | 32 +- .../include/cuda/std/__type_traits/fold.h | 88 ++ .../cuda/std/detail/libcxx/include/tuple | 4 +- libcudacxx/include/cuda/std/mdspan | 11 +- .../containers/compressed_pair.pass.cpp | 45 - .../libcxx/iterators/unwrap_iter.pass.cpp | 4 + .../containers/views/mdspan/CommonHelpers.h | 100 ++ .../views/mdspan/ConvertibleToIntegral.h | 99 ++ .../views/mdspan/CustomTestLayouts.h | 569 +++++++++ .../views/mdspan/MinimalElementType.h | 116 ++ .../mdspan/default_accessor/access.pass.cpp | 59 + .../default_accessor/ctor.conversion.pass.cpp | 82 ++ .../default_accessor/ctor.default.pass.cpp | 49 + .../default_accessor/element_type.verify.cpp | 47 + .../mdspan/default_accessor/offset.pass.cpp | 60 + .../mdspan/default_accessor/types.pass.cpp | 56 + .../mdspan/extents/CtorTestCombinations.h | 130 ++ .../views/mdspan/extents/comparison.pass.cpp | 104 ++ .../views/mdspan/extents/conversion.pass.cpp | 154 +++ .../views/mdspan/extents/ctad.pass.cpp | 51 + .../mdspan/extents/ctor_default.pass.cpp | 70 ++ .../mdspan/extents/ctor_from_array.pass.cpp | 132 ++ .../extents/ctor_from_integral.pass.cpp | 79 ++ .../mdspan/extents/ctor_from_span.pass.cpp | 136 +++ .../views/mdspan/extents/dextents.pass.cpp | 43 + .../views/mdspan/extents/obs_static.pass.cpp | 113 ++ .../views/mdspan/extents/types.pass.cpp | 103 ++ .../views/mdspan/foo_customizations.hpp | 252 ---- .../mdspan/layout_left/comparison.pass.cpp | 144 +++ .../mdspan/layout_left/ctor.default.pass.cpp | 74 ++ .../mdspan/layout_left/ctor.extents.pass.cpp | 75 ++ .../layout_left/ctor.layout_right.pass.cpp | 149 +++ .../layout_left/ctor.layout_stride.pass.cpp | 144 +++ .../mdspan/layout_left/ctor.mapping.pass.cpp | 156 +++ .../mdspan/layout_left/extents.verify.cpp | 48 + .../layout_left/index_operator.pass.cpp | 141 +++ .../mdspan/layout_left/properties.pass.cpp | 70 ++ .../layout_left/required_span_size.pass.cpp | 54 + .../layout_left/static_requirements.pass.cpp | 150 +++ .../views/mdspan/layout_left/stride.pass.cpp | 57 + .../mdspan/layout_right/comparison.pass.cpp | 144 +++ .../mdspan/layout_right/ctor.default.pass.cpp | 74 ++ .../mdspan/layout_right/ctor.extents.pass.cpp | 75 ++ .../layout_right/ctor.layout_left.pass.cpp | 149 +++ .../layout_right/ctor.layout_stride.pass.cpp | 141 +++ .../mdspan/layout_right/ctor.mapping.pass.cpp | 156 +++ .../mdspan/layout_right/extents.verify.cpp | 48 + .../layout_right/index_operator.pass.cpp | 143 +++ .../mdspan/layout_right/properties.pass.cpp | 70 ++ .../layout_right/required_span_size.pass.cpp | 54 + .../layout_right/static_requirements.pass.cpp | 150 +++ .../views/mdspan/layout_right/stride.pass.cpp | 57 + .../mdspan/layout_stride/comparison.pass.cpp | 303 +++++ .../layout_stride/ctor.default.pass.cpp | 98 ++ .../layout_stride/ctor.extents_array.pass.cpp | 167 +++ .../layout_stride/ctor.extents_span.pass.cpp | 170 +++ .../ctor.strided_mapping.pass.cpp | 219 ++++ .../mdspan/layout_stride/deduction.pass.cpp | 68 ++ .../mdspan/layout_stride/extents.verify.cpp | 47 + .../layout_stride/index_operator.pass.cpp | 154 +++ .../is_exhaustive_corner_case.pass.cpp | 59 + .../mdspan/layout_stride/properties.pass.cpp | 141 +++ .../layout_stride/required_span_size.pass.cpp | 63 + .../static_requirements.pass.cpp | 143 +++ .../mdspan/layout_stride/stride.pass.cpp | 66 + .../access.pass.cpp | 28 - .../copy.pass.cpp | 31 - .../offset.pass.cpp | 28 - .../mdspan.extents.cmp/compare.pass.cpp | 58 - .../mdspan/mdspan.extents.cons/array.pass.cpp | 73 -- .../convertible_to_size_t.pass.cpp | 46 - .../mdspan/mdspan.extents.cons/copy.pass.cpp | 61 - .../mdspan.extents.cons/default.pass.cpp | 45 - .../mdspan.extents.cons/param_pack.pass.cpp | 91 -- .../mdspan/mdspan.extents.cons/span.pass.cpp | 74 -- .../mdspan/mdspan.extents.obs/extent.pass.cpp | 66 - .../mdspan/mdspan.extents.obs/rank.pass.cpp | 65 - .../mdspan.extents.obs/static_extent.pass.cpp | 64 - .../extents_element.fail.cpp | 45 - .../index_type.fail.cpp | 44 - .../mdspan.extents.util/extents_util.hpp | 43 - .../mdspan.layout.left.cons/copy.pass.cpp | 49 - .../layout_right_init.pass.cpp | 55 - .../layout_stride_init.pass.cpp | 51 - .../list_init.pass.cpp | 70 -- .../mdspan.layout.left.obs/compare.fail.cpp | 34 - .../mdspan.layout.left.obs/compare.pass.cpp | 59 - .../mdspan.layout.left.obs/extents.pass.cpp | 49 - .../is_exhaustive.pass.cpp | 38 - .../is_strided.pass.cpp | 29 - .../mdspan.layout.left.obs/is_unique.pass.cpp | 38 - .../mdspan.layout.left.obs/paren_op.pass.cpp | 77 -- .../required_span_size.pass.cpp | 45 - .../mdspan.layout.left.obs/stride.pass.cpp | 56 - .../mdspan.layout.right.cons/copy.pass.cpp | 49 - .../layout_left_init.pass.cpp | 55 - .../layout_stride_init.pass.cpp | 51 - .../list_init.pass.cpp | 70 -- .../mdspan.layout.right.obs/compare.fail.cpp | 34 - .../mdspan.layout.right.obs/compare.pass.cpp | 59 - .../mdspan.layout.right.obs/extents.pass.cpp | 49 - .../is_exhaustive.pass.cpp | 38 - .../is_strided.pass.cpp | 29 - .../is_unique.pass.cpp | 38 - .../mdspan.layout.right.obs/paren_op.pass.cpp | 77 -- .../required_span_size.pass.cpp | 45 - .../mdspan.layout.right.obs/stride.pass.cpp | 56 - .../list_init.pass.cpp | 61 - .../mdspan.layout.stride.obs/compare.fail.cpp | 37 - .../mdspan.layout.stride.obs/compare.pass.cpp | 84 -- .../mdspan.layout.stride.obs/extents.pass.cpp | 32 - .../is_exhaustive.pass.cpp | 77 -- .../is_strided.pass.cpp | 29 - .../is_unique.pass.cpp | 41 - .../paren_op.pass.cpp | 87 -- .../required_span_size.pass.cpp | 64 - .../mdspan.layout.stride.obs/stride.pass.cpp | 69 -- .../mdspan.layout.stride.obs/strides.pass.cpp | 53 - .../mdspan/mdspan.layout.util/layout_util.hpp | 166 --- .../array_init_extents.pass.cpp | 107 -- .../mdspan/mdspan.mdspan.cons/copy.pass.cpp | 78 -- .../mdspan.mdspan.cons/ctad_c_array.pass.cpp | 51 - .../ctad_const_c_array.pass.cpp | 40 - .../mdspan.mdspan.cons/ctad_copy.pass.cpp | 37 - .../mdspan.mdspan.cons/ctad_extents.pass.cpp | 76 -- .../ctad_extents_pack.pass.cpp | 38 - .../mdspan.mdspan.cons/ctad_layouts.pass.cpp | 59 - .../mdspan.mdspan.cons/ctad_mapping.pass.cpp | 53 - .../mdspan.mdspan.cons/ctad_pointer.pass.cpp | 57 - .../custom_accessor.pass.cpp | 43 - .../mdspan.mdspan.cons/custom_layout.pass.cpp | 36 - .../mdspan.mdspan.cons/data_c_array.pass.cpp | 40 - .../mdspan.mdspan.cons/default.pass.cpp | 39 - .../mdspan.mdspan.cons/extents.pass.cpp | 80 -- .../mdspan.mdspan.cons/extents_pack.pass.cpp | 111 -- .../list_init_layout_left.pass.cpp | 40 - .../list_init_layout_right.pass.cpp | 40 - .../list_init_layout_stride.pass.cpp | 38 - .../mdspan.mdspan.cons/mapping.pass.cpp | 76 -- .../span_init_extents.pass.cpp | 97 -- .../mdspan.mdspan.members/accessor.pass.cpp | 34 - .../brackets_op.pass.cpp | 167 --- .../data_handle.pass.cpp | 70 -- .../mdspan.mdspan.members/empty.pass.cpp | 51 - .../mdspan.mdspan.members/extent.pass.cpp | 64 - .../mdspan.mdspan.members/extents.pass.cpp | 29 - .../is_exhaustive.pass.cpp | 48 - .../mdspan.mdspan.members/is_strided.pass.cpp | 48 - .../mdspan.mdspan.members/is_unique.pass.cpp | 45 - .../mdspan.mdspan.members/mapping.pass.cpp | 46 - .../mdspan.mdspan.members/rank.pass.cpp | 54 - .../mdspan.mdspan.members/size.pass.cpp | 48 - .../mdspan.mdspan.members/stride.pass.cpp | 57 - .../mdspan.mdspan.members/swap.pass.cpp | 89 -- .../mdspan/mdspan.mdspan.util/mdspan_util.hpp | 16 - .../dim_reduction.pass.cpp | 74 -- .../pair_init.pass.cpp | 35 - .../return_type.pass.cpp | 385 ------ .../tuple_init.pass.cpp | 35 - .../views/mdspan/mdspan/CustomTestAccessors.h | 421 +++++++ .../views/mdspan/mdspan/CustomTestLayouts.h | 568 +++++++++ .../views/mdspan/mdspan/assign.pass.cpp | 158 +++ .../views/mdspan/mdspan/conversion.pass.cpp | 383 ++++++ .../views/mdspan/mdspan/conversion.verify.cpp | 74 ++ .../views/mdspan/mdspan/ctor.copy.pass.cpp | 135 +++ .../views/mdspan/mdspan/ctor.default.pass.cpp | 168 +++ .../mdspan/mdspan/ctor.dh_array.pass.cpp | 259 ++++ .../mdspan/mdspan/ctor.dh_extents.pass.cpp | 183 +++ .../mdspan/mdspan/ctor.dh_integers.pass.cpp | 208 ++++ .../views/mdspan/mdspan/ctor.dh_map.pass.cpp | 165 +++ .../mdspan/mdspan/ctor.dh_map_acc.pass.cpp | 171 +++ .../views/mdspan/mdspan/ctor.dh_span.pass.cpp | 259 ++++ .../views/mdspan/mdspan/ctor.move.pass.cpp | 141 +++ .../views/mdspan/mdspan/deduction.pass.cpp | 224 ++++ .../mdspan/mdspan/element_type.verify.cpp | 57 + .../views/mdspan/mdspan/extents.verify.cpp | 34 + .../mdspan/mdspan/index_operator.pass.cpp | 342 ++++++ .../views/mdspan/mdspan/mapping.verify.cpp | 34 + .../views/mdspan/mdspan/move.pass.cpp | 138 +++ .../views/mdspan/mdspan/properties.pass.cpp | 260 ++++ .../views/mdspan/mdspan/swap.pass.cpp | 72 ++ .../views/mdspan/mdspan/types.pass.cpp | 227 ++++ .../containers/views/mdspan/my_accessor.hpp | 43 - .../std/containers/views/mdspan/my_int.hpp | 65 - pyproject.toml | 2 +- 205 files changed, 13482 insertions(+), 10757 deletions(-) create mode 100644 libcudacxx/include/cuda/std/__fwd/mdspan.h delete mode 100644 libcudacxx/include/cuda/std/__mdspan/compressed_pair.h create mode 100644 libcudacxx/include/cuda/std/__mdspan/concepts.h delete mode 100644 libcudacxx/include/cuda/std/__mdspan/config.h delete mode 100644 libcudacxx/include/cuda/std/__mdspan/dynamic_extent.h delete mode 100644 libcudacxx/include/cuda/std/__mdspan/full_extent_t.h delete mode 100644 libcudacxx/include/cuda/std/__mdspan/macros.h delete mode 100644 libcudacxx/include/cuda/std/__mdspan/maybe_static_value.h delete mode 100644 libcudacxx/include/cuda/std/__mdspan/no_unique_address.h delete mode 100644 libcudacxx/include/cuda/std/__mdspan/standard_layout_static_array.h delete mode 100644 libcudacxx/include/cuda/std/__mdspan/static_array.h delete mode 100644 libcudacxx/include/cuda/std/__mdspan/submdspan.h delete mode 100644 libcudacxx/test/libcudacxx/libcxx/containers/compressed_pair.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/CommonHelpers.h create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/ConvertibleToIntegral.h create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/CustomTestLayouts.h create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/MinimalElementType.h create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/access.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/ctor.conversion.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/ctor.default.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/element_type.verify.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/offset.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/types.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/CtorTestCombinations.h create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/comparison.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/conversion.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctad.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_default.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_array.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_integral.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_span.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/dextents.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/obs_static.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/types.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/foo_customizations.hpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/comparison.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.default.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.extents.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.layout_right.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.layout_stride.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.mapping.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/extents.verify.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/index_operator.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/properties.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/required_span_size.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/static_requirements.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/stride.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/comparison.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.default.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.extents.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.layout_left.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.layout_stride.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.mapping.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/extents.verify.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/index_operator.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/properties.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/required_span_size.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/static_requirements.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/stride.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/comparison.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.default.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.extents_array.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.extents_span.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.strided_mapping.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/deduction.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/extents.verify.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/index_operator.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/is_exhaustive_corner_case.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/properties.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/required_span_size.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/static_requirements.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/stride.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/access.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/copy.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/offset.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cmp/compare.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/array.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/convertible_to_size_t.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/copy.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/default.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/param_pack.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/span.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/extent.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/rank.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/static_extent.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.overview/extents_element.fail.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.overview/index_type.fail.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.util/extents_util.hpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/copy.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/layout_right_init.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/layout_stride_init.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/list_init.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/compare.fail.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/compare.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/extents.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_exhaustive.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_strided.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_unique.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/paren_op.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/required_span_size.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/stride.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/copy.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/layout_left_init.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/layout_stride_init.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/list_init.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/compare.fail.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/compare.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/extents.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_exhaustive.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_strided.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_unique.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/paren_op.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/required_span_size.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/stride.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.cons/list_init.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/compare.fail.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/compare.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/extents.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_exhaustive.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_strided.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_unique.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/paren_op.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/required_span_size.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/stride.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/strides.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.util/layout_util.hpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/array_init_extents.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/copy.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_c_array.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_const_c_array.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_copy.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_extents.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_extents_pack.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_layouts.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_mapping.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_pointer.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/custom_accessor.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/custom_layout.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/data_c_array.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/default.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/extents.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/extents_pack.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_left.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_right.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_stride.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/mapping.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/span_init_extents.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/accessor.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/brackets_op.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/data_handle.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/empty.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/extent.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/extents.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_exhaustive.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_strided.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_unique.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/mapping.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/rank.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/size.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/stride.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/swap.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.util/mdspan_util.hpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/dim_reduction.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/pair_init.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/return_type.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/tuple_init.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/CustomTestAccessors.h create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/CustomTestLayouts.h create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/assign.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/conversion.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/conversion.verify.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.copy.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.default.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_array.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_extents.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_integers.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_map.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_map_acc.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_span.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.move.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/deduction.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/element_type.verify.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/extents.verify.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/index_operator.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/mapping.verify.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/move.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/properties.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/swap.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/types.pass.cpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/my_accessor.hpp delete mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/my_int.hpp diff --git a/libcudacxx/include/cuda/std/__algorithm/comp_ref_type.h b/libcudacxx/include/cuda/std/__algorithm/comp_ref_type.h index bc760f288e9..abe7be8f34e 100644 --- a/libcudacxx/include/cuda/std/__algorithm/comp_ref_type.h +++ b/libcudacxx/include/cuda/std/__algorithm/comp_ref_type.h @@ -74,10 +74,10 @@ struct __debug_less #ifdef _CCCL_ENABLE_DEBUG_MODE template using __comp_ref_type = __debug_less<_Comp>; -#else +#else // ^^^ _LIBCUDACXX_ENABLE_DEBUG_MODE ^^^ / vvv !_LIBCUDACXX_ENABLE_DEBUG_MODE vvv template using __comp_ref_type = _Comp&; -#endif +#endif // !_LIBCUDACXX_ENABLE_DEBUG_MODE _LIBCUDACXX_END_NAMESPACE_STD diff --git a/libcudacxx/include/cuda/std/__fwd/mdspan.h b/libcudacxx/include/cuda/std/__fwd/mdspan.h new file mode 100644 index 00000000000..0a22db9b7a1 --- /dev/null +++ b/libcudacxx/include/cuda/std/__fwd/mdspan.h @@ -0,0 +1,73 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___FWD_MDSPAN_H +#define _LIBCUDACXX___FWD_MDSPAN_H + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +#if _CCCL_STD_VER >= 2014 + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +// Layout policy with a mapping which corresponds to FORTRAN-style array layouts +struct layout_left +{ + template + class mapping; +}; + +// Layout policy with a mapping which corresponds to C-style array layouts +struct layout_right +{ + template + class mapping; +}; + +// Layout policy with a unique mapping where strides are arbitrary +struct layout_stride +{ + template + class mapping; +}; + +// [mdspan.layout.policy.reqmts] +namespace __mdspan_detail +{ +template +_CCCL_INLINE_VAR constexpr bool __is_valid_layout_mapping = false; + +template +_CCCL_INLINE_VAR constexpr bool + __is_valid_layout_mapping<_Layout, _Extents, void_t>> = true; +} // namespace __mdspan_detail + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _CCCL_STD_VER >= 2014 + +#endif // _LIBCUDACXX___FWD_MDSPAN_H diff --git a/libcudacxx/include/cuda/std/__mdspan/compressed_pair.h b/libcudacxx/include/cuda/std/__mdspan/compressed_pair.h deleted file mode 100644 index 3a1be7e0b8c..00000000000 --- a/libcudacxx/include/cuda/std/__mdspan/compressed_pair.h +++ /dev/null @@ -1,256 +0,0 @@ -/* -//@HEADER -// ************************************************************************ -// -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation -// -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) -// -// ************************************************************************ -//@HEADER -*/ - -#ifndef _LIBCUDACXX___MDSPAN_COMPRESSED_PAIR_HPP -#define _LIBCUDACXX___MDSPAN_COMPRESSED_PAIR_HPP - -#include - -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header - -#include -#ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS -# include -#endif // _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS -#include -#include - -_LIBCUDACXX_BEGIN_NAMESPACE_STD - -#if _CCCL_STD_VER > 2011 - -namespace __detail -{ - -// For no unique address emulation, this is the case taken when neither are empty. -// For real `[[no_unique_address]]`, this case is always taken. -template -struct __compressed_pair -{ - _CCCL_NO_UNIQUE_ADDRESS _Tp __t_val; - _CCCL_NO_UNIQUE_ADDRESS _Up __u_val; - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp& __first() noexcept - { - return __t_val; - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp const& __first() const noexcept - { - return __t_val; - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up& __second() noexcept - { - return __u_val; - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up const& __second() const noexcept - { - return __u_val; - } - - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair(__compressed_pair const&) noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair(__compressed_pair&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair& - operator=(__compressed_pair const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair& - operator=(__compressed_pair&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__compressed_pair() noexcept = default; - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __compressed_pair(_TLike&& __t, _ULike&& __u) - : __t_val((_TLike&&) __t) - , __u_val((_ULike&&) __u) - {} -}; - -# ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - -// First empty. -template -struct __compressed_pair< - _Tp, - _Up, - _CUDA_VSTD::enable_if_t<_CCCL_TRAIT(_CUDA_VSTD::is_empty, _Tp) && !_CCCL_TRAIT(_CUDA_VSTD::is_empty, _Up)>> - : private _Tp -{ - _Up __u_val; - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp& __first() noexcept - { - return *static_cast<_Tp*>(this); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp const& __first() const noexcept - { - return *static_cast<_Tp const*>(this); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up& __second() noexcept - { - return __u_val; - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up const& __second() const noexcept - { - return __u_val; - } - - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair(__compressed_pair const&) noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair(__compressed_pair&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair& - operator=(__compressed_pair const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair& - operator=(__compressed_pair&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__compressed_pair() noexcept = default; - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __compressed_pair(_TLike&& __t, _ULike&& __u) - : _Tp((_TLike&&) __t) - , __u_val((_ULike&&) __u) - {} -}; - -// Second empty. -template -struct __compressed_pair< - _Tp, - _Up, - _CUDA_VSTD::enable_if_t> - : private _Up -{ - _Tp __t_val; - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp& __first() noexcept - { - return __t_val; - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp const& __first() const noexcept - { - return __t_val; - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up& __second() noexcept - { - return *static_cast<_Up*>(this); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up const& __second() const noexcept - { - return *static_cast<_Up const*>(this); - } - - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair(__compressed_pair const&) noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair(__compressed_pair&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair& - operator=(__compressed_pair const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair& - operator=(__compressed_pair&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__compressed_pair() noexcept = default; - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __compressed_pair(_TLike&& __t, _ULike&& __u) - : _Up((_ULike&&) __u) - , __t_val((_TLike&&) __t) - {} -}; - -// Both empty. -template -struct __compressed_pair< - _Tp, - _Up, - _CUDA_VSTD::enable_if_t<_CCCL_TRAIT(_CUDA_VSTD::is_empty, _Tp) && _CCCL_TRAIT(_CUDA_VSTD::is_empty, _Up)>> -// We need to use the __no_unique_address_emulation wrapper here to avoid -// base class ambiguities. -# ifdef __MDSPAN_COMPILER_MSVC - // MSVC doesn't allow you to access public static member functions of a type - // when you *happen* to privately inherit from that type. - : protected __no_unique_address_emulation<_Tp, 0> - , protected __no_unique_address_emulation<_Up, 1> -# else - : private __no_unique_address_emulation<_Tp, 0> - , private __no_unique_address_emulation<_Up, 1> -# endif -{ - using __first_base_t = __no_unique_address_emulation<_Tp, 0>; - using __second_base_t = __no_unique_address_emulation<_Up, 1>; - - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp& __first() noexcept - { - return this->__first_base_t::__ref(); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp const& __first() const noexcept - { - return this->__first_base_t::__ref(); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up& __second() noexcept - { - return this->__second_base_t::__ref(); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Up const& __second() const noexcept - { - return this->__second_base_t::__ref(); - } - - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair(__compressed_pair const&) noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __compressed_pair(__compressed_pair&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair& - operator=(__compressed_pair const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __compressed_pair& - operator=(__compressed_pair&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__compressed_pair() noexcept = default; - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __compressed_pair(_TLike&& __t, _ULike&& __u) noexcept - : __first_base_t(_Tp((_TLike&&) __t)) - , __second_base_t(_Up((_ULike&&) __u)) - {} -}; - -# endif // !_CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - -} // end namespace __detail - -#endif // _CCCL_STD_VER > 2011 - -_LIBCUDACXX_END_NAMESPACE_STD - -#endif // _LIBCUDACXX___MDSPAN_COMPRESSED_PAIR_HPP diff --git a/libcudacxx/include/cuda/std/__mdspan/concepts.h b/libcudacxx/include/cuda/std/__mdspan/concepts.h new file mode 100644 index 00000000000..d53b4bc563d --- /dev/null +++ b/libcudacxx/include/cuda/std/__mdspan/concepts.h @@ -0,0 +1,103 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___MDSPAN_CONCEPTS_H +#define _LIBCUDACXX___MDSPAN_CONCEPTS_H + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#if _CCCL_STD_VER >= 2014 + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +namespace __mdspan_detail +{ + +// [mdspan.layout.stride.expo]/3 +template +struct __is_extents : false_type +{}; + +template +_CCCL_INLINE_VAR constexpr bool __is_extents_v = __is_extents<_Tp>::value; + +// [mdspan.layout.general]/2 +template +_CCCL_INLINE_VAR constexpr bool __is_mapping_of = + _CCCL_TRAIT(is_same, typename _Layout::template mapping, _Mapping); + +// [mdspan.layout.stride.expo]/4 +# if _CCCL_STD_VER >= 2020 +template +concept __layout_mapping_alike = requires { + requires __is_mapping_of; + requires __is_extents_v; + { _Mapping::is_always_strided() } -> same_as; + { _Mapping::is_always_exhaustive() } -> same_as; + { _Mapping::is_always_unique() } -> same_as; + bool_constant<_Mapping::is_always_strided()>::value; + bool_constant<_Mapping::is_always_exhaustive()>::value; + bool_constant<_Mapping::is_always_unique()>::value; +}; +# else // ^^^ _CCCL_STD_VER >= 2020 ^^^ / vvv _CCCL_STD_VER <= 2017 vvv +// NOTE: integral_constant::value only checks that this is a constant expression +template +_CCCL_CONCEPT_FRAGMENT( + __layout_mapping_alike_prop_, + requires()( // + requires(same_as), + requires(same_as), + requires(same_as), + (integral_constant::value), + (integral_constant::value), + (integral_constant::value))); + +template +_CCCL_CONCEPT __layout_mapping_alike_prop = _CCCL_FRAGMENT(__layout_mapping_alike_prop_, _Mapping); + +template +_CCCL_CONCEPT_FRAGMENT( + __layout_mapping_alike_, + requires()( // + requires(__is_mapping_of), + requires(_CCCL_TRAIT(__is_extents, typename _Mapping::extents_type)), + requires(__layout_mapping_alike_prop<_Mapping>))); + +template +_CCCL_CONCEPT __layout_mapping_alike = _CCCL_FRAGMENT(__layout_mapping_alike_, _Mapping); +# endif // _CCCL_STD_VER <= 2017 + +} // namespace __mdspan_detail + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _CCCL_STD_VER >= 2014 + +#endif // _LIBCUDACXX___MDSPAN_CONCEPTS_H diff --git a/libcudacxx/include/cuda/std/__mdspan/config.h b/libcudacxx/include/cuda/std/__mdspan/config.h deleted file mode 100644 index 9f1c9898dd2..00000000000 --- a/libcudacxx/include/cuda/std/__mdspan/config.h +++ /dev/null @@ -1,276 +0,0 @@ -/* -//@HEADER -// ************************************************************************ -// -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation -// -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) -// -// ************************************************************************ -//@HEADER -*/ - -#ifndef _LIBCUDACXX___MDSPAN_CONFIG_HPP -#define _LIBCUDACXX___MDSPAN_CONFIG_HPP - -#include - -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header - -#if _CCCL_STD_VER > 2011 - -# ifdef _MSVC_LANG -# define __MDSPAN_CPLUSPLUS _MSVC_LANG -# else -# define __MDSPAN_CPLUSPLUS __cplusplus -# endif - -# define __MDSPAN_CXX_STD_14 201402L -# define __MDSPAN_CXX_STD_17 201703L -# define __MDSPAN_CXX_STD_20 202002L - -# define __MDSPAN_HAS_CXX_14 (__MDSPAN_CPLUSPLUS >= __MDSPAN_CXX_STD_14) -# define __MDSPAN_HAS_CXX_17 (__MDSPAN_CPLUSPLUS >= __MDSPAN_CXX_STD_17) -# define __MDSPAN_HAS_CXX_20 (__MDSPAN_CPLUSPLUS >= __MDSPAN_CXX_STD_20) - -static_assert(__MDSPAN_CPLUSPLUS >= __MDSPAN_CXX_STD_14, "mdspan requires C++14 or later."); - -# ifndef __MDSPAN_COMPILER_CLANG -# if defined(__clang__) -# define __MDSPAN_COMPILER_CLANG __clang__ -# endif -# endif - -# if !defined(__MDSPAN_COMPILER_MSVC) && !defined(__MDSPAN_COMPILER_MSVC_CLANG) -# if defined(_MSC_VER) -# if !defined(__MDSPAN_COMPILER_CLANG) -# define __MDSPAN_COMPILER_MSVC _MSC_VER -# else -# define __MDSPAN_COMPILER_MSVC_CLANG _MSC_VER -# endif -# endif -# endif - -# ifndef __MDSPAN_COMPILER_INTEL -# ifdef __INTEL_COMPILER -# define __MDSPAN_COMPILER_INTEL __INTEL_COMPILER -# endif -# endif - -# ifndef __MDSPAN_COMPILER_APPLECLANG -# ifdef __apple_build_version__ -# define __MDSPAN_COMPILER_APPLECLANG __apple_build_version__ -# endif -# endif - -# ifndef __MDSPAN_HAS_CUDA -# if defined(__CUDACC__) -# define __MDSPAN_HAS_CUDA __CUDACC__ -# endif -# endif - -# ifndef __MDSPAN_HAS_HIP -# if defined(__HIPCC__) -# define __MDSPAN_HAS_HIP __HIPCC__ -# endif -# endif - -# ifndef __MDSPAN_PRESERVE_STANDARD_LAYOUT -// Preserve standard layout by default, but we're not removing the old version -// that turns this off until we're sure this doesn't have an unreasonable cost -// to the compiler or optimizer. -# define __MDSPAN_PRESERVE_STANDARD_LAYOUT 1 -# endif - -# ifndef __MDSPAN_USE_CONCEPTS -// Looks like concepts doesn't work in CUDA 12 -# if defined(__cpp_concepts) && __cpp_concepts >= 201507L && !defined __cuda_std__ -# define __MDSPAN_USE_CONCEPTS 1 -# endif -# endif - -# ifndef __MDSPAN_USE_FOLD_EXPRESSIONS -# if (defined(__cpp_fold_expressions) && __cpp_fold_expressions >= 201603L) \ - || (!defined(__cpp_fold_expressions) && __MDSPAN_HAS_CXX_17) -# define __MDSPAN_USE_FOLD_EXPRESSIONS 1 -# endif -# endif - -# ifndef __MDSPAN_NEEDS_TRAIT_VARIABLE_TEMPLATE_BACKPORTS -# if (!(defined(__cpp_lib_type_trait_variable_templates) && __cpp_lib_type_trait_variable_templates >= 201510L) \ - || !__MDSPAN_HAS_CXX_17) -# if !(defined(__MDSPAN_COMPILER_APPLECLANG) && __MDSPAN_HAS_CXX_17) -# define __MDSPAN_NEEDS_TRAIT_VARIABLE_TEMPLATE_BACKPORTS 1 -# endif -# endif -# endif - -# ifndef __MDSPAN_USE_VARIABLE_TEMPLATES -# if (defined(__cpp_variable_templates) && __cpp_variable_templates >= 201304 && __MDSPAN_HAS_CXX_17) \ - || (!defined(__cpp_variable_templates) && __MDSPAN_HAS_CXX_17) -# define __MDSPAN_USE_VARIABLE_TEMPLATES 1 -# endif -# endif // __MDSPAN_USE_VARIABLE_TEMPLATES - -# ifndef __MDSPAN_USE_CONSTEXPR_14 -# if (defined(__cpp_constexpr) && __cpp_constexpr >= 201304) \ - || (!defined(__cpp_constexpr) && __MDSPAN_HAS_CXX_14) \ - && (!(defined(__INTEL_COMPILER) && __INTEL_COMPILER <= 1700)) -# define __MDSPAN_USE_CONSTEXPR_14 1 -# endif -# endif - -# ifndef __MDSPAN_USE_INTEGER_SEQUENCE -# if defined(__MDSPAN_COMPILER_MSVC) -# if (defined(__cpp_lib_integer_sequence) && __cpp_lib_integer_sequence >= 201304) -# define __MDSPAN_USE_INTEGER_SEQUENCE 1 -# endif -# endif -# endif -# ifndef __MDSPAN_USE_INTEGER_SEQUENCE -# if (defined(__cpp_lib_integer_sequence) && __cpp_lib_integer_sequence >= 201304) \ - || (!defined(__cpp_lib_integer_sequence) && __MDSPAN_HAS_CXX_14) /* as far as I can tell, libc++ seems to think \ - this is a C++11 feature... */ \ - || (defined(__GLIBCXX__) && __GLIBCXX__ > 20150422 && __GNUC__ < 5 && !defined(__INTEL_CXX11_MODE__)) -// several compilers lie about integer_sequence working properly unless the C++14 standard is used -# define __MDSPAN_USE_INTEGER_SEQUENCE 1 -# elif defined(__MDSPAN_COMPILER_APPLECLANG) && __MDSPAN_HAS_CXX_14 -// appleclang seems to be missing the __cpp_lib_... macros, but doesn't seem to lie about C++14 making -// integer_sequence work -# define __MDSPAN_USE_INTEGER_SEQUENCE 1 -# endif -# endif - -# ifndef __MDSPAN_USE_RETURN_TYPE_DEDUCTION -# if (defined(__cpp_return_type_deduction) && __cpp_return_type_deduction >= 201304) \ - || (!defined(__cpp_return_type_deduction) && __MDSPAN_HAS_CXX_14) -# define __MDSPAN_USE_RETURN_TYPE_DEDUCTION 1 -# endif -# endif - -# ifndef __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION -// GCC 10 is known not to work with CTAD for this case. -# if (defined(__MDSPAN_COMPILER_CLANG) || !_CCCL_COMPILER(GCC) || _CCCL_COMPILER(GCC, >=, 11)) \ - && ((defined(__cpp_deduction_guides) && __cpp_deduction_guides >= 201703) \ - || (!defined(__cpp_deduction_guides) && __MDSPAN_HAS_CXX_17)) -# define __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1 -# endif -# endif - -# ifndef __MDSPAN_USE_ALIAS_TEMPLATE_ARGUMENT_DEDUCTION -// GCC 10 is known not to work with CTAD for this case. -# if (defined(__MDSPAN_COMPILER_CLANG) || !_CCCL_COMPILER(GCC) || _CCCL_COMPILER(GCC, >=, 11)) \ - && ((defined(__cpp_deduction_guides) && __cpp_deduction_guides >= 201907) \ - || (!defined(__cpp_deduction_guides) && __MDSPAN_HAS_CXX_20)) -# define __MDSPAN_USE_ALIAS_TEMPLATE_ARGUMENT_DEDUCTION 1 -# endif -# endif - -# ifndef __MDSPAN_USE_STANDARD_TRAIT_ALIASES -# if (defined(__cpp_lib_transformation_trait_aliases) && __cpp_lib_transformation_trait_aliases >= 201304) \ - || (!defined(__cpp_lib_transformation_trait_aliases) && __MDSPAN_HAS_CXX_14) -# define __MDSPAN_USE_STANDARD_TRAIT_ALIASES 1 -# elif defined(__MDSPAN_COMPILER_APPLECLANG) && __MDSPAN_HAS_CXX_14 -// appleclang seems to be missing the __cpp_lib_... macros, but doesn't seem to lie about C++14 -# define __MDSPAN_USE_STANDARD_TRAIT_ALIASES 1 -# endif -# endif - -# ifndef __MDSPAN_DEFAULTED_CONSTRUCTORS_INHERITANCE_WORKAROUND -# ifdef __GNUC__ -# if __GNUC__ < 9 -# define __MDSPAN_DEFAULTED_CONSTRUCTORS_INHERITANCE_WORKAROUND 1 -# endif -# endif -# endif - -# ifndef __MDSPAN_CONDITIONAL_EXPLICIT -# if __MDSPAN_HAS_CXX_20 && !defined(__MDSPAN_COMPILER_MSVC) -# define __MDSPAN_CONDITIONAL_EXPLICIT(COND) explicit(COND) -# else -# define __MDSPAN_CONDITIONAL_EXPLICIT(COND) -# endif -# endif - -# ifndef __MDSPAN_USE_BRACKET_OPERATOR -# if defined(__cpp_multidimensional_subscript) -# define __MDSPAN_USE_BRACKET_OPERATOR 1 -# else -# define __MDSPAN_USE_BRACKET_OPERATOR 0 -# endif -# endif - -# ifndef __MDSPAN_USE_PAREN_OPERATOR -# if !__MDSPAN_USE_BRACKET_OPERATOR -# define __MDSPAN_USE_PAREN_OPERATOR 1 -# else -# define __MDSPAN_USE_PAREN_OPERATOR 0 -# endif -# endif - -# if __MDSPAN_USE_BRACKET_OPERATOR -# define __MDSPAN_OP(mds, ...) mds[__VA_ARGS__] -// Corentins demo compiler for subscript chokes on empty [] call, -// though I believe the proposal supports it? -# ifdef __MDSPAN_NO_EMPTY_BRACKET_OPERATOR -# define __MDSPAN_OP0(mds) mds.accessor().access(mds.data_handle(), 0) -# else -# define __MDSPAN_OP0(mds) mds[] -# endif -# define __MDSPAN_OP1(mds, a) mds[a] -# define __MDSPAN_OP2(mds, a, b) mds[a, b] -# define __MDSPAN_OP3(mds, a, b, c) mds[a, b, c] -# define __MDSPAN_OP4(mds, a, b, c, d) mds[a, b, c, d] -# define __MDSPAN_OP5(mds, a, b, c, d, e) mds[a, b, c, d, e] -# define __MDSPAN_OP6(mds, a, b, c, d, e, f) mds[a, b, c, d, e, f] -# else -# define __MDSPAN_OP(mds, ...) mds(__VA_ARGS__) -# define __MDSPAN_OP0(mds) mds() -# define __MDSPAN_OP1(mds, a) mds(a) -# define __MDSPAN_OP2(mds, a, b) mds(a, b) -# define __MDSPAN_OP3(mds, a, b, c) mds(a, b, c) -# define __MDSPAN_OP4(mds, a, b, c, d) mds(a, b, c, d) -# define __MDSPAN_OP5(mds, a, b, c, d, e) mds(a, b, c, d, e) -# define __MDSPAN_OP6(mds, a, b, c, d, e, f) mds(a, b, c, d, e, f) -# endif - -#endif // _CCCL_STD_VER > 2011 - -#endif // _LIBCUDACXX___MDSPAN_CONFIG_HPP diff --git a/libcudacxx/include/cuda/std/__mdspan/default_accessor.h b/libcudacxx/include/cuda/std/__mdspan/default_accessor.h index ccba021aa99..a5cc8ecd6b3 100644 --- a/libcudacxx/include/cuda/std/__mdspan/default_accessor.h +++ b/libcudacxx/include/cuda/std/__mdspan/default_accessor.h @@ -1,45 +1,19 @@ -/* -//@HEADER -// ************************************************************************ +// -*- C++ -*- +//===----------------------------------------------------------------------===// // -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. // -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). // -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. // -// ************************************************************************ -//@HEADER -*/ +//===---------------------------------------------------------------------===// #ifndef _LIBCUDACXX___MDSPAN_DEFAULT_ACCESSOR_HPP #define _LIBCUDACXX___MDSPAN_DEFAULT_ACCESSOR_HPP @@ -54,42 +28,46 @@ # pragma system_header #endif // no system header -#include +#include +#include +#include #include #include -_LIBCUDACXX_BEGIN_NAMESPACE_STD +#if _CCCL_STD_VER >= 2014 -#if _CCCL_STD_VER > 2011 +_LIBCUDACXX_BEGIN_NAMESPACE_STD template struct default_accessor { + static_assert(!_CCCL_TRAIT(is_array, _ElementType), "default_accessor: template argument may not be an array type"); + static_assert(!_CCCL_TRAIT(is_abstract, _ElementType), + "default_accessor: template argument may not be an abstract class"); + using offset_policy = default_accessor; using element_type = _ElementType; using reference = _ElementType&; using data_handle_type = _ElementType*; - _CCCL_HIDE_FROM_ABI constexpr default_accessor() noexcept = default; + constexpr default_accessor() noexcept = default; _CCCL_TEMPLATE(class _OtherElementType) _CCCL_REQUIRES(_CCCL_TRAIT(is_convertible, _OtherElementType (*)[], element_type (*)[])) _LIBCUDACXX_HIDE_FROM_ABI constexpr default_accessor(default_accessor<_OtherElementType>) noexcept {} - _LIBCUDACXX_HIDE_FROM_ABI constexpr data_handle_type offset(data_handle_type __p, size_t __i) const noexcept + _LIBCUDACXX_HIDE_FROM_ABI constexpr reference access(data_handle_type __p, size_t __i) const noexcept { - return __p + __i; + return __p[__i]; } - - __MDSPAN_FORCE_INLINE_FUNCTION - constexpr reference access(data_handle_type __p, size_t __i) const noexcept + _LIBCUDACXX_HIDE_FROM_ABI constexpr data_handle_type offset(data_handle_type __p, size_t __i) const noexcept { - return __p[__i]; + return __p + __i; } }; -#endif // _CCCL_STD_VER > 2011 - _LIBCUDACXX_END_NAMESPACE_STD -#endif // _LIBCUDACXX___MDSPAN_DEFAULT_ACCESSOR_HPP +#endif // _CCCL_STD_VER >= 2014 + +#endif // _LIBCUDACXX___MDSPAN_DEFAULT_ACCESSOR_H diff --git a/libcudacxx/include/cuda/std/__mdspan/dynamic_extent.h b/libcudacxx/include/cuda/std/__mdspan/dynamic_extent.h deleted file mode 100644 index ba6d64417a2..00000000000 --- a/libcudacxx/include/cuda/std/__mdspan/dynamic_extent.h +++ /dev/null @@ -1,92 +0,0 @@ -/* -//@HEADER -// ************************************************************************ -// -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation -// -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) -// -// ************************************************************************ -//@HEADER -*/ - -#ifndef _LIBCUDACXX___MDSPAN_DYNAMIC_EXTENT_HPP -#define _LIBCUDACXX___MDSPAN_DYNAMIC_EXTENT_HPP - -#include - -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header - -#include // dynamic_extent -#include -#include -#include // numeric_limits - -#ifdef _LIBCUDACXX_IMPLICIT_SYSTEM_HEADER -#endif - -_LIBCUDACXX_BEGIN_NAMESPACE_STD - -#if _CCCL_STD_VER > 2011 - -namespace __detail -{ - -template -_CCCL_HOST_DEVICE constexpr auto __make_dynamic_extent() -{ - return dynamic_extent; -} - -template -_CCCL_HOST_DEVICE constexpr auto __make_dynamic_extent_integral() -{ - return dynamic_extent; -} - -} // end namespace __detail - -#endif // _CCCL_STD_VER > 2011 - -_LIBCUDACXX_END_NAMESPACE_STD - -//============================================================================================================== - -#endif // _LIBCUDACXX___MDSPAN_DYNAMIC_EXTENT_HPP diff --git a/libcudacxx/include/cuda/std/__mdspan/extents.h b/libcudacxx/include/cuda/std/__mdspan/extents.h index b6ce539a901..e1fca649bdb 100644 --- a/libcudacxx/include/cuda/std/__mdspan/extents.h +++ b/libcudacxx/include/cuda/std/__mdspan/extents.h @@ -1,45 +1,19 @@ -/* -//@HEADER -// ************************************************************************ +// -*- C++ -*- +//===----------------------------------------------------------------------===// // -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. // -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). // -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. // -// ************************************************************************ -//@HEADER -*/ +//===---------------------------------------------------------------------===// #ifndef _LIBCUDACXX___MDSPAN_EXTENTS_HPP #define _LIBCUDACXX___MDSPAN_EXTENTS_HPP @@ -54,523 +28,748 @@ # pragma system_header #endif // no system header -#include -#ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS -# include -#endif // _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS -#include -#include -#include +#include +#include +#include #include #include #include #include +#include #include #include +#include #include +#include #include +#include #include _CCCL_PUSH_MACROS +#if _CCCL_STD_VER >= 2014 + _LIBCUDACXX_BEGIN_NAMESPACE_STD -#if _CCCL_STD_VER > 2011 +namespace __mdspan_detail +{ -namespace __detail +// ------------------------------------------------------------------ +// ------------ __static_array -------------------------------------- +// ------------------------------------------------------------------ +// array like class which provides an array of static values with get +template +struct __static_array { +public: + _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t __size() + { + return sizeof...(_Values); + } -template -struct __count_dynamic_extents; + _LIBCUDACXX_HIDE_FROM_ABI static constexpr _Tp __get(size_t __index) noexcept + { + constexpr array<_Tp, sizeof...(_Values)> __array = {_Values...}; + return __array[__index]; + } -template -struct __count_dynamic_extents<_Ep, _Extents...> -{ - static constexpr size_t val = (_Ep == dynamic_extent ? 1 : 0) + __count_dynamic_extents<_Extents...>::val; + template + _LIBCUDACXX_HIDE_FROM_ABI static constexpr _Tp __get() + { + return __get(_Index); + } }; -template <> -struct __count_dynamic_extents<> +// ------------------------------------------------------------------ +// ------------ __possibly_empty_array ----------------------------- +// ------------------------------------------------------------------ + +// array like class which provides get function and operator [], and +// has a specialization for the size 0 case. +// This is needed to make the __maybe_static_array be truly empty, for +// all static values. + +template +struct __possibly_empty_array { - static constexpr size_t val = 0; + _Tp __vals_[_Size]; + _LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp& operator[](size_t __index) + { + return __vals_[__index]; + } + _LIBCUDACXX_HIDE_FROM_ABI constexpr const _Tp& operator[](size_t __index) const + { + return __vals_[__index]; + } }; -template -_CCCL_HOST_DEVICE static constexpr false_type __check_compatible_extents( - false_type, - _CUDA_VSTD::integer_sequence, - _CUDA_VSTD::integer_sequence) noexcept +template +struct __possibly_empty_array<_Tp, 0> { - return {}; -} + _LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp& operator[](size_t) + { + _CCCL_UNREACHABLE(); + } + _LIBCUDACXX_HIDE_FROM_ABI constexpr const _Tp& operator[](size_t) const + { + _CCCL_UNREACHABLE(); + } +}; -// This helper prevents ICE's on MSVC. -template -struct __compare_extent_compatible - : integral_constant -{}; +// ------------------------------------------------------------------ +// ------------ static_partial_sums --------------------------------- +// ------------------------------------------------------------------ + +// Provides a compile time partial sum one can index into -template -static integral_constant::value)...>> _CCCL_HOST_DEVICE -__check_compatible_extents(true_type, - _CUDA_VSTD::integer_sequence, - _CUDA_VSTD::integer_sequence) noexcept +template +struct __static_partial_sums { - return {}; -} + _LIBCUDACXX_HIDE_FROM_ABI static constexpr array __static_partial_sums_impl() + { + array __values{_Values...}; + array __partial_sums{{}}; + size_t __running_sum = 0; + for (int __i = 0; __i != sizeof...(_Values); ++__i) + { + __partial_sums[__i] = __running_sum; + __running_sum += __values[__i]; + } + return __partial_sums; + } -struct __extents_tag -{}; + _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t __get(size_t __index) + { + constexpr array __result = __static_partial_sums_impl(); + return __result[__index]; + } +}; -} // end namespace __detail +// ------------------------------------------------------------------ +// ------------ __maybe_static_array -------------------------------- +// ------------------------------------------------------------------ -template -class extents -# ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : private __detail::__no_unique_address_emulation< - __detail::__partially_static_sizes_tagged<__detail::__extents_tag, _ThisIndexType, size_t, _Extents...>> -# endif +// array like class which has a mix of static and runtime values but +// only stores the runtime values. +// The type of the static and the runtime values can be different. +// The position of a dynamic value is indicated through a tag value. +template +struct __maybe_static_array { -public: - using rank_type = size_t; - using index_type = _ThisIndexType; - using size_type = make_unsigned_t; + static_assert(is_convertible<_TStatic, _TDynamic>::value, + "__maybe_static_array: _TStatic must be convertible to _TDynamic"); + static_assert(is_convertible<_TDynamic, _TStatic>::value, + "__maybe_static_array: _TDynamic must be convertible to _TStatic"); - // internal typedefs which for technical reasons are public - using __storage_t = - __detail::__partially_static_sizes_tagged<__detail::__extents_tag, index_type, size_t, _Extents...>; - using __indices_t = _CUDA_VSTD::integer_sequence; +private: + // Static values member + static constexpr size_t __size_ = sizeof...(_Values); + static constexpr size_t __size_dynamic_ = _CCCL_FOLD_PLUS(size_t(0), static_cast(_Values == _DynTag)); + using _StaticValues = __static_array<_TStatic, _Values...>; + using _DynamicValues = __possibly_empty_array<_TDynamic, __size_dynamic_>; -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - _CCCL_NO_UNIQUE_ADDRESS __storage_t __storage_; -# else - using __base_t = __detail::__no_unique_address_emulation<__storage_t>; -# endif + // Dynamic values member + _CCCL_NO_UNIQUE_ADDRESS _DynamicValues __dyn_vals_; - // private members dealing with the way we internally store dynamic extents + // static mapping of indices to the position in the dynamic values array + using _DynamicIdxMap = __static_partial_sums(_Values == _DynTag)...>; -private: - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __storage_t& __storage() noexcept + template + _LIBCUDACXX_HIDE_FROM_ABI static constexpr _DynamicValues __zeros(index_sequence) noexcept + { + return _DynamicValues{((void) Indices, 0)...}; + } + +public: + _LIBCUDACXX_HIDE_FROM_ABI constexpr __maybe_static_array() noexcept + : __dyn_vals_{__zeros(make_index_sequence<__size_dynamic_>())} + {} + + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr __maybe_static_array(span<_Tp, _Size> __vals) noexcept +# if _CCCL_STD_VER <= 2017 // NVCC complains that this constructor would not be constexpr without it + : __dyn_vals_{} +# endif // _CCCL_STD_VER <= 2017 { -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - return __storage_; -# else - return this->__base_t::__ref(); -# endif + if constexpr (_Size == __size_dynamic_) + { + for (size_t __i = 0; __i != _Size; __i++) + { + __dyn_vals_[__i] = static_cast<_TDynamic>(__vals[__i]); + } + } + else + { + for (size_t __i = 0; __i != __size_; __i++) + { + _TStatic __static_val = _StaticValues::__get(__i); + if (__static_val == _DynTag) + { + __dyn_vals_[_DynamicIdxMap::__get(__i)] = static_cast<_TDynamic>(__vals[__i]); + } + else + { + // Not catching this could lead to out of bounds errors later + // e.g. using my_mdspan_t = mdspan>; my_mdspan_t = m(new int[N], span(&N)); + // Right-hand-side construction looks ok with allocation and size matching, + // but since (potentially elsewhere defined) my_mdspan_t has static size m now thinks its range is 10 not N + _CCCL_ASSERT(static_cast<_TDynamic>(__vals[__i]) == static_cast<_TDynamic>(__static_val), + "extents construction: mismatch of provided arguments with static extents."); + } + } + } } - __MDSPAN_FORCE_INLINE_FUNCTION - constexpr __storage_t const& __storage() const noexcept + + // constructors from dynamic values only -- this covers the case for rank() == 0 + _CCCL_TEMPLATE(class... _DynVals) + _CCCL_REQUIRES((sizeof...(_DynVals) == __size_dynamic_) && (!__all<__is_std_span<_DynVals>...>::value)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr __maybe_static_array(_DynVals... __vals) noexcept + : __dyn_vals_{static_cast<_TDynamic>(__vals)...} + {} + + // constructors from all values -- here rank will be greater than 0 + _CCCL_TEMPLATE(class... _DynVals) + _CCCL_REQUIRES((sizeof...(_DynVals) != __size_dynamic_) && (!__all<__is_std_span<_DynVals>...>::value)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr __maybe_static_array(_DynVals... __vals) { -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - return __storage_; -# else - return this->__base_t::__ref(); -# endif + static_assert((sizeof...(_DynVals) == __size_), "Invalid number of values."); + _TDynamic __values[__size_] = {static_cast<_TDynamic>(__vals)...}; + for (size_t __i = 0; __i < __size_; __i++) + { + _TStatic __static_val = _StaticValues::__get(__i); + if (__static_val == _DynTag) + { + __dyn_vals_[_DynamicIdxMap::__get(__i)] = __values[__i]; + } + else + { + // Not catching this could lead to out of bounds errors later + // e.g. using my_mdspan_t = mdspan>; my_mdspan_t = m(new int[5], 5); + // Right-hand-side construction looks ok with allocation and size matching, + // but since (potentially elsewhere defined) my_mdspan_t has static size m now thinks its range is 10 not 5 + _CCCL_ASSERT(__values[__i] == static_cast<_TDynamic>(__static_val), + "extents construction: mismatch of provided arguments with static extents."); + } + } } - template - __MDSPAN_FORCE_INLINE_FUNCTION static constexpr size_t - _static_extent_impl(size_t __n, _CUDA_VSTD::integer_sequence) noexcept + // access functions + _LIBCUDACXX_HIDE_FROM_ABI static constexpr _TStatic __static_value(size_t __i) noexcept { - return __MDSPAN_FOLD_PLUS_RIGHT(((_Idxs == __n) ? _Extents : 0), /* + ... + */ 0); + if constexpr (__size_ > 0) + { + _CCCL_ASSERT(__i < __size_, "extents access: index must be less than rank"); + } + return _StaticValues::__get(__i); } - template - friend class extents; + _LIBCUDACXX_HIDE_FROM_ABI constexpr _TDynamic __value(size_t __i) const + { + if constexpr (__size_ > 0) + { + _CCCL_ASSERT(__i < __size_, "extents access: index must be less than rank"); + } + _TStatic __static_val = _StaticValues::__get(__i); + return __static_val == _DynTag ? __dyn_vals_[_DynamicIdxMap::__get(__i)] : static_cast<_TDynamic>(__static_val); + } + _LIBCUDACXX_HIDE_FROM_ABI constexpr _TDynamic operator[](size_t __i) const + { + if constexpr (__size_ > 0) + { + _CCCL_ASSERT(__i < __size_, "extents access: index must be less than rank"); + } + return __value(__i); + } - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool - _eq_impl(_CUDA_VSTD::extents<_OtherIndexType, _OtherExtents...>, - false_type, - _CUDA_VSTD::index_sequence<_Idxs...>) const noexcept + // observers + _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t __size() { - return false; + return __size_; } - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool - _eq_impl(_CUDA_VSTD::extents<_OtherIndexType, _OtherExtents...> __other, - true_type, - _CUDA_VSTD::index_sequence<_Idxs...>) const noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t __size_dynamic() { - return __MDSPAN_FOLD_AND( - (__storage().template __get_n<_Idxs>() == __other.__storage().template __get_n<_Idxs>()) /* && ... */ - ); + return __size_dynamic_; } +}; + +template +static constexpr bool __potentially_narrowing = + static_cast>((numeric_limits<_To>::max)()) + < static_cast>((numeric_limits<_From>::max)()); + +// Function to check whether a value is representable as another type +// value must be a positive integer otherwise returns false +// if _From is not an integral, we just check positivity +_CCCL_TEMPLATE(class _To, class _From) +_CCCL_REQUIRES(integral<_To> _CCCL_AND integral<_From> _CCCL_AND _CCCL_TRAIT(is_signed, _From) + _CCCL_AND(!__potentially_narrowing<_To, _From>)) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_representable_as(_From __value) +{ + return __value >= 0; +} - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool - _not_eq_impl(_CUDA_VSTD::extents<_OtherIndexType, _OtherExtents...>, - false_type, - _CUDA_VSTD::index_sequence<_Idxs...>) const noexcept +_CCCL_TEMPLATE(class _To, class _From) +_CCCL_REQUIRES(integral<_To> _CCCL_AND integral<_From> _CCCL_AND _CCCL_TRAIT(is_signed, _From) + _CCCL_AND __potentially_narrowing<_To, _From>) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_representable_as(_From __value) +{ + using _To_u = make_unsigned_t<_To>; + using _From_u = make_unsigned_t<_From>; + if (__value < 0) { - return true; + return false; } - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool - _not_eq_impl(_CUDA_VSTD::extents<_OtherIndexType, _OtherExtents...> __other, - true_type, - _CUDA_VSTD::index_sequence<_Idxs...>) const noexcept + return static_cast<_To_u>((numeric_limits<_To>::max)()) >= static_cast<_From_u>(__value); +} + +_CCCL_TEMPLATE(class _To, class _From) +_CCCL_REQUIRES(integral<_To> _CCCL_AND integral<_From> _CCCL_AND(!_CCCL_TRAIT(is_signed, _From)) + _CCCL_AND(!__potentially_narrowing<_To, _From>)) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_representable_as(_From __value) +{ + return true; +} + +_CCCL_TEMPLATE(class _To, class _From) +_CCCL_REQUIRES(integral<_To> _CCCL_AND integral<_From> _CCCL_AND(!_CCCL_TRAIT(is_signed, _From)) + _CCCL_AND __potentially_narrowing<_To, _From>) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_representable_as(_From __value) +{ + using _To_u = make_unsigned_t<_To>; + using _From_u = make_unsigned_t<_From>; + return static_cast<_To_u>((numeric_limits<_To>::max)()) >= static_cast<_From_u>(__value); +} + +_CCCL_TEMPLATE(class _To, class _From) +_CCCL_REQUIRES(integral<_To> _CCCL_AND(!integral<_From>) _CCCL_AND _CCCL_TRAIT(is_signed, _To)) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_representable_as(_From __value) +{ + return static_cast<_To>(__value) >= 0; +} + +_CCCL_TEMPLATE(class _To, class _From) +_CCCL_REQUIRES(integral<_To> _CCCL_AND(!integral<_From>) _CCCL_AND(!_CCCL_TRAIT(is_signed, _To))) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_representable_as(_From __value) +{ + return true; +} + +_CCCL_TEMPLATE(class _To, class... _From) +_CCCL_REQUIRES(integral<_To>) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __are_representable_as(_From... __values) +{ + return _CCCL_FOLD_AND(__mdspan_detail::__is_representable_as<_To>(__values)); +} + +_CCCL_TEMPLATE(class _To, class _From, size_t _Size) +_CCCL_REQUIRES(integral<_To>) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __are_representable_as(span<_From, _Size> __values) +{ + for (size_t __i = 0; __i != _Size; __i++) { - return __MDSPAN_FOLD_OR( - (__storage().template __get_n<_Idxs>() != __other.__storage().template __get_n<_Idxs>()) /* || ... */ - ); + if (!__mdspan_detail::__is_representable_as<_To>(__values[__i])) + { + return false; + } } + return true; +} -# ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit extents(__base_t&& __b) noexcept - : __base_t(_CUDA_VSTD::move(__b)) - {} -# endif +} // namespace __mdspan_detail - // public interface: +// ------------------------------------------------------------------ +// ------------ extents --------------------------------------------- +// ------------------------------------------------------------------ +// Class to delegate between the different (non-)explicit constructors +struct __extent_delegate_tag +{}; + +// Class to describe the extents of a multi dimensional array. +// Used by mdspan, mdarray and layout mappings. +// See ISO C++ standard [mdspan.extents] +template +class extents +{ public: - /* Defined above for use in the private code - using rank_type = size_t; - using index_type = _ThisIndexType; - */ + // typedefs for integral types used + using index_type = _IndexType; + using size_type = make_unsigned_t; + using rank_type = size_t; + static_assert(is_integral::value && !is_same::value, + "extents::index_type must be a signed or unsigned integer type"); + static_assert( + __all<(__mdspan_detail::__is_representable_as(_Extents) || (_Extents == dynamic_extent))...>::value, + "extents ctor: arguments must be representable as index_type and nonnegative"); + +private: + static constexpr rank_type __rank_ = sizeof...(_Extents); + static constexpr rank_type __rank_dynamic_ = + _CCCL_FOLD_PLUS(rank_type(0), (static_cast(_Extents == dynamic_extent))); + + // internal storage type using __maybe_static_array + using _Values = __mdspan_detail::__maybe_static_array<_IndexType, size_t, dynamic_extent, _Extents...>; + _CCCL_NO_UNIQUE_ADDRESS _Values __vals_; + +public: + // [mdspan.extents.obs], observers of multidimensional index space _LIBCUDACXX_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { - return sizeof...(_Extents); + return __rank_; } _LIBCUDACXX_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { - return __MDSPAN_FOLD_PLUS_RIGHT((rank_type(_Extents == dynamic_extent)), /* + ... + */ 0); + return __rank_dynamic_; } - //-------------------------------------------------------------------------------- - // Constructors, Destructors, and Assignment + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept + { + return __vals_.__value(__r); + } + _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept + { + return _Values::__static_value(__r); + } - // Default constructor - _CCCL_HIDE_FROM_ABI constexpr extents() noexcept = default; + // [mdspan.extents.cons], constructors + constexpr extents() noexcept = default; + + template + static constexpr bool __all_convertible_to_index_type = + _CCCL_FOLD_AND(_CCCL_TRAIT(is_convertible, _OtherIndexTypes, index_type)) + && _CCCL_FOLD_AND(_CCCL_TRAIT(is_nothrow_constructible, index_type, _OtherIndexTypes)); + + // Construction from just dynamic or all values. + // Precondition check is deferred to __maybe_static_array constructor + _CCCL_TEMPLATE(class... _OtherIndexTypes) + _CCCL_REQUIRES((sizeof...(_OtherIndexTypes) == __rank_ || sizeof...(_OtherIndexTypes) == __rank_dynamic_) + _CCCL_AND __all_convertible_to_index_type<_OtherIndexTypes...>) + _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit extents(_OtherIndexTypes... __dynvals) noexcept + : __vals_(static_cast(__dynvals)...) + { + // Not catching this could lead to out of bounds errors later + // e.g. mdspan m(ptr, dextents(200u)); leads to an extent of -56 on m + _CCCL_ASSERT(__mdspan_detail::__are_representable_as(__dynvals...), + "extents ctor: arguments must be representable as index_type and nonnegative"); + } - // Converting constructor - _CCCL_TEMPLATE(class _OtherIndexType, size_t... _OtherExtents) - _CCCL_REQUIRES( - /* multi-stage check to protect from invalid pack expansion when sizes don't match? */ - (decltype(__detail::__check_compatible_extents( - integral_constant < bool, - sizeof...(_Extents) == sizeof...(_OtherExtents) > {}, - __indices_t{}, // _CUDA_VSTD::integer_sequence{} - _CUDA_VSTD::integer_sequence{}))::value)) - _LIBCUDACXX_HIDE_FROM_ABI __MDSPAN_CONDITIONAL_EXPLICIT( - (((_Extents != dynamic_extent) && (_OtherExtents == dynamic_extent)) || ...) - || (_CUDA_VSTD::numeric_limits::max() < _CUDA_VSTD::numeric_limits< - _OtherIndexType>::max())) constexpr extents(const extents<_OtherIndexType, _OtherExtents...>& __other) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __storage_{ -# else - : __base_t(__base_t { - __storage_t - { -# endif - __other.__storage().__enable_psa_conversion() -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - } - }) -# endif - { - /* TODO: precondition check - * __other.extent(r) equals Er for each r for which Er is a static extent, and - * either - * - sizeof...(_OtherExtents) is zero, or - * - __other.extent(r) is a representable value of type index_type for all rank index r of __other - */ - } - -# ifdef __NVCC__ - _CCCL_TEMPLATE(class... _Integral) - _CCCL_REQUIRES( - // TODO: check whether the other version works with newest NVCC, doesn't with 11.4 - // NVCC seems to pick up rank_dynamic from the wrong extents type??? - __fold_and_v<_CCCL_TRAIT(_CUDA_VSTD::is_convertible, _Integral, index_type)...> _CCCL_AND - __fold_and_v<_CCCL_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _Integral)...> _CCCL_AND - // NVCC chokes on the fold thingy here so wrote the workaround - ((sizeof...(_Integral) == __detail::__count_dynamic_extents<_Extents...>::val) - || (sizeof...(_Integral) == sizeof...(_Extents)))) -# else - _CCCL_TEMPLATE(class... _Integral) - _CCCL_REQUIRES(__fold_and_v<_CCCL_TRAIT(_CUDA_VSTD::is_convertible, _Integral, index_type)...> _CCCL_AND - __fold_and_v<_CCCL_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _Integral)...> _CCCL_AND( - (sizeof...(_Integral) == rank_dynamic()) || (sizeof...(_Integral) == rank()))) -# endif - _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr extents(_Integral... __exts) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __storage_{ -# else - : __base_t(__base_t { - typename __base_t::__stored_type - { -# endif - _CUDA_VSTD::conditional_t(), - static_cast(__exts)... -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - } - }) -# endif - { - /* TODO: precondition check - * If sizeof...(_IndexTypes) != rank_dynamic() is true, exts_arr[r] equals Er for each r for which Er is a - * static extent, and either - * - sizeof...(__exts) == 0 is true, or - * - each element of __exts is nonnegative and is a representable value of type index_type. - */ - } - - // TODO: check whether this works with newest NVCC, doesn't with 11.4 -# ifdef __NVCC__ - // NVCC seems to pick up rank_dynamic from the wrong extents type??? - // NVCC chokes on the fold thingy here so wrote the workaround - _CCCL_TEMPLATE(class _IndexType, size_t _Np) - _CCCL_REQUIRES( - _CCCL_TRAIT(_CUDA_VSTD::is_convertible, _IndexType, index_type) - _CCCL_AND _CCCL_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _IndexType) - _CCCL_AND((_Np == __detail::__count_dynamic_extents<_Extents...>::val) || (_Np == sizeof...(_Extents)))) -# else - _CCCL_TEMPLATE(class _IndexType, size_t _Np) - _CCCL_REQUIRES(_CCCL_TRAIT(_CUDA_VSTD::is_convertible, _IndexType, index_type) - _CCCL_AND _CCCL_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _IndexType) - _CCCL_AND(_Np == rank() || _Np == rank_dynamic())) -# endif - __MDSPAN_CONDITIONAL_EXPLICIT(_Np != rank_dynamic()) - _LIBCUDACXX_HIDE_FROM_ABI constexpr extents(_CUDA_VSTD::array<_IndexType, _Np> const& __exts) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __storage_{ -# else - : __base_t(__base_t { - typename __base_t::__stored_type - { -# endif - _CUDA_VSTD::conditional_t<_Np == rank_dynamic(), - __detail::__construct_psa_from_dynamic_exts_array_tag_t<0>, - __detail::__construct_psa_from_all_exts_array_tag_t>(), - _CUDA_VSTD::array<_IndexType, _Np>{__exts} -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - } - }) -# endif - { - /* TODO: precondition check - * If _Np != rank_dynamic() is true, __exts[r] equals Er for each r for which Er is a static extent, and - * either - * - _Np is zero, or - * - __exts[r] is nonnegative and is a representable value of type index_type for all rank index r - */ - } - - // TODO: check whether the below works with newest NVCC, doesn't with 11.4 -# ifdef __NVCC__ - // NVCC seems to pick up rank_dynamic from the wrong extents type??? - // NVCC chokes on the fold thingy here so wrote the workaround - _CCCL_TEMPLATE(class _IndexType, size_t _Np) - _CCCL_REQUIRES( - _CCCL_TRAIT(_CUDA_VSTD::is_convertible, _IndexType, index_type) - _CCCL_AND _CCCL_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _IndexType) - _CCCL_AND((_Np == __detail::__count_dynamic_extents<_Extents...>::val) || (_Np == sizeof...(_Extents)))) -# else - _CCCL_TEMPLATE(class _IndexType, size_t _Np) - _CCCL_REQUIRES(_CCCL_TRAIT(_CUDA_VSTD::is_convertible, _IndexType, index_type) - _CCCL_AND _CCCL_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _IndexType) - _CCCL_AND(_Np == rank() || _Np == rank_dynamic())) -# endif - __MDSPAN_CONDITIONAL_EXPLICIT(_Np != rank_dynamic()) - _LIBCUDACXX_HIDE_FROM_ABI constexpr extents(_CUDA_VSTD::span<_IndexType, _Np> __exts) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __storage_{ -# else - : __base_t(__base_t { - typename __base_t::__stored_type - { -# endif - _CUDA_VSTD::conditional_t<_Np == rank_dynamic(), - __detail::__construct_psa_from_dynamic_exts_array_tag_t<0>, - __detail::__construct_psa_from_all_exts_array_tag_t>(), - __exts -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - } - }) -# endif - { - /* TODO: precondition check - * If _Np != rank_dynamic() is true, __exts[r] equals Er for each r for which Er is a static extent, and - * either - * - _Np is zero, or - * - __exts[r] is nonnegative and is a representable value of type index_type for all rank index r - */ - } - - // Need this constructor for some submdspan implementation stuff - // for the layout_stride case where I use an extents object for strides - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit extents(__storage_t const& __sto) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __storage_{ -# else - : __base_t(__base_t { -# endif - __sto -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - }) -# endif + template + static constexpr bool __is_convertible_to_index_type = + _CCCL_TRAIT(is_convertible, const _OtherIndexType&, index_type) + && _CCCL_TRAIT(is_nothrow_constructible, index_type, const _OtherIndexType&); + + _CCCL_TEMPLATE(class _OtherIndexType, size_t _Size) + _CCCL_REQUIRES((_Size == __rank_dynamic_) _CCCL_AND __is_convertible_to_index_type<_OtherIndexType>) + _LIBCUDACXX_HIDE_FROM_ABI constexpr extents(const array<_OtherIndexType, _Size>& __exts) noexcept + : extents(span(__exts)) {} - //-------------------------------------------------------------------------------- + _CCCL_TEMPLATE(class _OtherIndexType, size_t _Size) + _CCCL_REQUIRES((_Size == __rank_) _CCCL_AND(_Size != __rank_dynamic_) + _CCCL_AND __is_convertible_to_index_type<_OtherIndexType>) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr extents(const array<_OtherIndexType, _Size>& __exts) noexcept + : extents(span(__exts)) + {} - _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t static_extent(size_t __n) noexcept + _CCCL_TEMPLATE(class _OtherIndexType, size_t _Size) + _CCCL_REQUIRES((_Size == __rank_dynamic_) _CCCL_AND __is_convertible_to_index_type<_OtherIndexType>) + _LIBCUDACXX_HIDE_FROM_ABI constexpr extents(span<_OtherIndexType, _Size> __exts) noexcept + : __vals_(__exts) { - // Can't do assert here since that breaks true constexpr ness - // assert(__n{}); + // Not catching this could lead to out of bounds errors later + // e.g. array a{200u}; mdspan> m(ptr, extents(span(a))); leads to an extent of -56 + // on m + _CCCL_ASSERT(__mdspan_detail::__are_representable_as(__exts), + "extents ctor: arguments must be representable as index_type and nonnegative"); } - _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type extent(size_t __n) const noexcept + _CCCL_TEMPLATE(class _OtherIndexType, size_t _Size) + _CCCL_REQUIRES((_Size != __rank_dynamic_) _CCCL_AND(_Size == __rank_) + _CCCL_AND __is_convertible_to_index_type<_OtherIndexType>) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr extents(span<_OtherIndexType, _Size> __exts) noexcept + : __vals_(__exts) { - // Can't do assert here since that breaks true constexpr ness - // assert(__n> m(ptr, extents(span(a))); leads to an extent of -56 + // on m + _CCCL_ASSERT(__mdspan_detail::__are_representable_as(__exts), + "extents ctor: arguments must be representable as index_type and nonnegative"); } - //-------------------------------------------------------------------------------- +private: + // Function to construct extents storage from other extents. + _CCCL_TEMPLATE(size_t _DynCount, size_t _Idx, class _OtherExtents, class... _DynamicValues) + _CCCL_REQUIRES((_Idx < __rank_) _CCCL_AND(static_extent(_Idx) == dynamic_extent)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr _Values __construct_vals_from_extents( + integral_constant, + integral_constant, + const _OtherExtents& __exts, + _DynamicValues... __dynamic_values) noexcept + { + return __construct_vals_from_extents( + integral_constant(), + integral_constant(), + __exts, + __dynamic_values..., + __exts.extent(_Idx)); + } - template - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator==(extents const& lhs, extents<_OtherIndexType, _RHS...> const& __rhs) noexcept + // Function to construct extents storage from other extents. + _CCCL_TEMPLATE(size_t _DynCount, size_t _Idx, class _OtherExtents, class... _DynamicValues) + _CCCL_REQUIRES((_Idx < __rank_) _CCCL_AND(static_extent(_Idx) != dynamic_extent)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr _Values __construct_vals_from_extents( + integral_constant, + integral_constant, + const _OtherExtents& __exts, + _DynamicValues... __dynamic_values) noexcept { - return lhs._eq_impl(__rhs, - integral_constant{}, - _CUDA_VSTD::make_index_sequence{}); + return __construct_vals_from_extents( + integral_constant(), integral_constant(), __exts, __dynamic_values...); } -# if !(__MDSPAN_HAS_CXX_20) - template - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator!=(extents const& lhs, extents<_OtherIndexType, _RHS...> const& __rhs) noexcept + _CCCL_TEMPLATE(size_t _DynCount, size_t _Idx, class _OtherExtents, class... _DynamicValues) + _CCCL_REQUIRES((_Idx == __rank_) && (_DynCount == __rank_dynamic_)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr _Values __construct_vals_from_extents( + integral_constant, + integral_constant, + const _OtherExtents&, + _DynamicValues... __dynamic_values) noexcept { - return lhs._not_eq_impl(__rhs, - integral_constant{}, - _CUDA_VSTD::make_index_sequence{}); + return _Values{static_cast(__dynamic_values)...}; } -# endif - // End of public interface + template + static constexpr bool __potentially_narrowing = __mdspan_detail::__potentially_narrowing; -public: // (but not really) - _LIBCUDACXX_HIDE_FROM_ABI static constexpr extents - __make_extents_impl(__detail::__partially_static_sizes&& __bs) noexcept + _CCCL_TEMPLATE(class _OtherIndexType, size_t... _OtherExtents) + _CCCL_REQUIRES((rank() > 0) _CCCL_AND __potentially_narrowing<_OtherIndexType>) + _LIBCUDACXX_HIDE_FROM_ABI constexpr extents(__extent_delegate_tag, + const extents<_OtherIndexType, _OtherExtents...>& __other) noexcept + : __vals_(__construct_vals_from_extents(integral_constant(), integral_constant(), __other)) { - // This effectively amounts to a sideways cast that can be done in a constexpr - // context, but we have to do it to handle the case where the extents and the - // strides could accidentally end up with the same types in their hierarchies - // somehow (which would cause layout_stride::mapping to not be standard_layout) - return extents( -# ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - __base_t{ -# endif - _CUDA_VSTD::move(__bs.template __with_tag<__detail::__extents_tag>()) -# ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# endif - ); + for (size_t __r = 0; __r < rank(); __r++) + { + // Not catching this could lead to out of bounds errors later + // e.g. dextents> e(dextents(200)) leads to an extent of -56 on e + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.extent(__r)), + "extents ctor: arguments must be representable as index_type and nonnegative"); + // Not catching this could lead to out of bounds errors later + // e.g. mdspan> m = mdspan>(new int[5], 5); + // Right-hand-side construction was ok, but m now thinks its range is 10 not 5 + _CCCL_ASSERT( + (_Values::__static_value(__r) == dynamic_extent) + || (static_cast(__other.extent(__r)) == static_cast(_Values::__static_value(__r))), + "extents construction: mismatch of provided arguments with static extents."); + } + } + + _CCCL_TEMPLATE(class _OtherIndexType, size_t... _OtherExtents) + _CCCL_REQUIRES((rank() > 0) _CCCL_AND(!__potentially_narrowing<_OtherIndexType>)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr extents(__extent_delegate_tag, + const extents<_OtherIndexType, _OtherExtents...>& __other) noexcept + : __vals_(__construct_vals_from_extents(integral_constant(), integral_constant(), __other)) + { + for (size_t __r = 0; __r < rank(); __r++) + { + // Not catching this could lead to out of bounds errors later + // e.g. mdspan> m = mdspan>(new int[5], 5); + // Right-hand-side construction was ok, but m now thinks its range is 10 not 5 + _CCCL_ASSERT( + (_Values::__static_value(__r) == dynamic_extent) + || (static_cast(__other.extent(__r)) == static_cast(_Values::__static_value(__r))), + "extents construction: mismatch of provided arguments with static extents."); + } + } + + _CCCL_TEMPLATE(class _OtherIndexType, size_t... _OtherExtents) + _CCCL_REQUIRES((rank() == 0)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr extents(__extent_delegate_tag, + const extents<_OtherIndexType, _OtherExtents...>& __other) noexcept + {} + +public: + // Converting constructor from other extents specializations + template + static constexpr bool __is_explicit_conversion = + _CCCL_FOLD_OR(((_Extents != dynamic_extent) && (_OtherExtents == dynamic_extent))) + || __potentially_narrowing<_OtherIndexType>; + + _CCCL_TEMPLATE(class _OtherIndexType, size_t... _OtherExtents) + _CCCL_REQUIRES((sizeof...(_OtherExtents) == sizeof...(_Extents)) _CCCL_AND _CCCL_FOLD_AND( + (_OtherExtents == dynamic_extent || _Extents == dynamic_extent || _OtherExtents == _Extents)) + _CCCL_AND(!__is_explicit_conversion<_OtherIndexType, _OtherExtents...>)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr extents(const extents<_OtherIndexType, _OtherExtents...>& __other) noexcept + : extents(__extent_delegate_tag{}, __other) + {} + + _CCCL_TEMPLATE(class _OtherIndexType, size_t... _OtherExtents) + _CCCL_REQUIRES((sizeof...(_OtherExtents) == sizeof...(_Extents)) _CCCL_AND _CCCL_FOLD_AND( + (_OtherExtents == dynamic_extent || _Extents == dynamic_extent || _OtherExtents == _Extents)) + _CCCL_AND __is_explicit_conversion<_OtherIndexType, _OtherExtents...>) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr extents( + const extents<_OtherIndexType, _OtherExtents...>& __other) noexcept + : extents(__extent_delegate_tag{}, __other) + {} + + // Comparison operator + template + _CCCL_NODISCARD_FRIEND _LIBCUDACXX_HIDE_FROM_ABI constexpr auto + operator==(const extents& __lhs, const extents<_OtherIndexType, _OtherExtents...>& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)((rank() != sizeof...(_OtherExtents))) + { + return false; + } + + template + _CCCL_NODISCARD_FRIEND _LIBCUDACXX_HIDE_FROM_ABI constexpr auto + operator==(const extents& __lhs, const extents<_OtherIndexType, _OtherExtents...>& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)((rank() == sizeof...(_OtherExtents) && rank() == 0)) + { + return true; } - template - __MDSPAN_FORCE_INLINE_FUNCTION constexpr index_type __extent() const noexcept + template + _CCCL_NODISCARD_FRIEND _LIBCUDACXX_HIDE_FROM_ABI constexpr auto + operator==(const extents& __lhs, const extents<_OtherIndexType, _OtherExtents...>& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)((rank() == sizeof...(_OtherExtents) && rank() != 0)) { - return __storage().template __get_n<_Np>(); + for (rank_type __r = 0; __r != __rank_; __r++) + { + // avoid warning when comparing signed and unsigner integers and pick the wider of two types + using _CommonType = common_type_t; + if (static_cast<_CommonType>(__lhs.extent(__r)) != static_cast<_CommonType>(__rhs.extent(__r))) + { + return false; + } + } + return true; } - template - _LIBCUDACXX_HIDE_FROM_ABI static constexpr index_type __static_extent() noexcept +# if _CCCL_STD_VER <= 2017 + template + _CCCL_NODISCARD_FRIEND _LIBCUDACXX_HIDE_FROM_ABI constexpr bool + operator!=(const extents& __lhs, const extents<_OtherIndexType, _OtherExtents...>& __rhs) noexcept { - return __storage_t::template __get_static_n<_Np, _Default>(); + return !(__lhs == __rhs); } +# endif // _CCCL_STD_VER <= 2017 }; -namespace __detail +// Recursive helper classes to implement dextents alias for extents +namespace __mdspan_detail { -template > +template > struct __make_dextents; +template > +using __make_dextents_t = typename __make_dextents<_IndexType, _Rank, _Extents>::type; + template -struct __make_dextents<_IndexType, _Rank, _CUDA_VSTD::extents<_IndexType, _ExtentsPack...>> +struct __make_dextents<_IndexType, _Rank, extents<_IndexType, _ExtentsPack...>> { - using type = - typename __make_dextents<_IndexType, - _Rank - 1, - _CUDA_VSTD::extents<_IndexType, _CUDA_VSTD::dynamic_extent, _ExtentsPack...>>::type; + using type = __make_dextents_t<_IndexType, _Rank - 1, extents<_IndexType, dynamic_extent, _ExtentsPack...>>; }; template -struct __make_dextents<_IndexType, 0, _CUDA_VSTD::extents<_IndexType, _ExtentsPack...>> +struct __make_dextents<_IndexType, 0, extents<_IndexType, _ExtentsPack...>> { - using type = _CUDA_VSTD::extents<_IndexType, _ExtentsPack...>; + using type = extents<_IndexType, _ExtentsPack...>; }; -} // end namespace __detail +} // end namespace __mdspan_detail +// [mdspan.extents.dextents], alias template template -using dextents = typename __detail::__make_dextents<_IndexType, _Rank>::type; +using dextents = __mdspan_detail::__make_dextents_t<_IndexType, _Rank>; template using dims = dextents<_IndexType, _Rank>; -# if defined(__MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) -template -_CCCL_HOST_DEVICE extents(_IndexTypes...) - // Workaround for nvcc - //-> extents()...>; - // Adding "(void)" so that clang doesn't complain this is unused - -> extents; -# endif - -namespace __detail +# if _CCCL_STD_VER >= 2017 +// nvcc cannot handle type conversions without this workaround +struct __to_dynamic_extent { + template + static constexpr size_t value = dynamic_extent; +}; -template -struct __is_extents : false_type -{}; +// Deduction guide for extents +template +_CCCL_HOST_DEVICE extents(_IndexTypes...) -> extents...>; +# endif // _CCCL_STD_VER >= 2017 + +namespace __mdspan_detail +{ template -struct __is_extents<_CUDA_VSTD::extents<_IndexType, _ExtentsPack...>> : true_type +struct __is_extents> : true_type {}; -template -static constexpr bool __is_extents_v = __is_extents<_Tp>::value; +// Function to check whether a set of indices are a multidimensional +// index into extents. This is a word of power in the C++ standard +// requiring that the indices are larger than 0 and smaller than +// the respective extents. -template -struct __extents_to_partially_static_sizes; +_CCCL_TEMPLATE(class _IndexType, class _From) +_CCCL_REQUIRES(integral<_IndexType> _CCCL_AND integral<_From> _CCCL_AND _CCCL_TRAIT(is_signed, _From)) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_index_in_extent(_IndexType __extent, _From __value) +{ + if (__value < 0) + { + return false; + } + using _Tp = common_type_t<_IndexType, _From>; + return static_cast<_Tp>(__value) < static_cast<_Tp>(__extent); +} -template -struct __extents_to_partially_static_sizes<_CUDA_VSTD::extents<_IndexType, _ExtentsPack...>> +_CCCL_TEMPLATE(class _IndexType, class _From) +_CCCL_REQUIRES(integral<_IndexType> _CCCL_AND integral<_From> _CCCL_AND(!_CCCL_TRAIT(is_signed, _From))) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_index_in_extent(_IndexType __extent, _From __value) { - using type = __detail::__partially_static_sizes::index_type, - size_t, - _ExtentsPack...>; -}; + using _Tp = common_type_t<_IndexType, _From>; + return static_cast<_Tp>(__value) < static_cast<_Tp>(__extent); +} -template -using __extents_to_partially_static_sizes_t = typename __extents_to_partially_static_sizes<_Extents>::type; +_CCCL_TEMPLATE(class _IndexType, class _From) +_CCCL_REQUIRES(integral<_IndexType> _CCCL_AND(!integral<_From>) _CCCL_AND _CCCL_TRAIT(is_signed, _From)) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_index_in_extent(_IndexType __extent, _From __value) +{ + if (static_cast<_IndexType>(__value) < 0) + { + return false; + } + return static_cast<_IndexType>(__value) < __extent; +} -} // end namespace __detail +_CCCL_TEMPLATE(class _IndexType, class _From) +_CCCL_REQUIRES(integral<_IndexType> _CCCL_AND(!integral<_From>) _CCCL_AND(!_CCCL_TRAIT(is_signed, _From))) +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_index_in_extent(_IndexType __extent, _From __value) +{ + return static_cast<_IndexType>(__value) < __extent; +} -#endif // _CCCL_STD_VER > 2011 +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool +__is_multidimensional_index_in_impl(index_sequence<_Idxs...>, const _Extents& __ext, _From... __values) +{ + return _CCCL_FOLD_AND(__mdspan_detail::__is_index_in_extent(__ext.extent(_Idxs), __values)); +} + +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_multidimensional_index_in(const _Extents& __ext, _From... __values) +{ + return __mdspan_detail::__is_multidimensional_index_in_impl( + make_index_sequence<_Extents::rank()>(), __ext, __values...); +} + +} // namespace __mdspan_detail _LIBCUDACXX_END_NAMESPACE_STD +#endif // _CCCL_STD_VER >= 2014 + _CCCL_POP_MACROS -#endif // _LIBCUDACXX___MDSPAN_EXTENTS_HPP +#endif // _LIBCUDACXX___MDSPAN_EXTENTS_H diff --git a/libcudacxx/include/cuda/std/__mdspan/full_extent_t.h b/libcudacxx/include/cuda/std/__mdspan/full_extent_t.h deleted file mode 100644 index c8d8c67733a..00000000000 --- a/libcudacxx/include/cuda/std/__mdspan/full_extent_t.h +++ /dev/null @@ -1,74 +0,0 @@ -/* -//@HEADER -// ************************************************************************ -// -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation -// -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) -// -// ************************************************************************ -//@HEADER -*/ - -#ifndef _LIBCUDACXX___MDSPAN_FULL_EXTENT_T_HPP -#define _LIBCUDACXX___MDSPAN_FULL_EXTENT_T_HPP - -#include - -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header - -#include - -_LIBCUDACXX_BEGIN_NAMESPACE_STD - -#if _CCCL_STD_VER > 2011 - -struct full_extent_t -{ - _CCCL_HIDE_FROM_ABI explicit full_extent_t() = default; -}; - -_CCCL_INLINE_VAR constexpr auto full_extent = full_extent_t{}; - -#endif // _CCCL_STD_VER > 2011 - -_LIBCUDACXX_END_NAMESPACE_STD - -#endif // _LIBCUDACXX___MDSPAN_FULL_EXTENT_T_HPP diff --git a/libcudacxx/include/cuda/std/__mdspan/layout_left.h b/libcudacxx/include/cuda/std/__mdspan/layout_left.h index 2f420b37a52..64ace949cac 100644 --- a/libcudacxx/include/cuda/std/__mdspan/layout_left.h +++ b/libcudacxx/include/cuda/std/__mdspan/layout_left.h @@ -1,48 +1,22 @@ -/* -//@HEADER -// ************************************************************************ +// -*- C++ -*- +//===----------------------------------------------------------------------===// // -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. // -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). // -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. // -// ************************************************************************ -//@HEADER -*/ +//===---------------------------------------------------------------------===// -#ifndef _LIBCUDACXX___MDSPAN_LAYOUT_LEFT_HPP -#define _LIBCUDACXX___MDSPAN_LAYOUT_LEFT_HPP +#ifndef _LIBCUDACXX___MDSPAN_LAYOUT_LEFT_H +#define _LIBCUDACXX___MDSPAN_LAYOUT_LEFT_H #include @@ -54,26 +28,30 @@ # pragma system_header #endif // no system header +#include +#include +#include #include -#include -#include -#include #include #include #include #include +#include #include +#include -_LIBCUDACXX_BEGIN_NAMESPACE_STD +#if _CCCL_STD_VER >= 2014 -#if _CCCL_STD_VER > 2011 - -//============================================================================== +_LIBCUDACXX_BEGIN_NAMESPACE_STD +// Helper for lightweight test checking that one did pass a layout policy as LayoutPolicy template argument template class layout_left::mapping { public: + static_assert(__mdspan_detail::__is_extents<_Extents>::value, + "layout_left::mapping template argument must be a specialization of extents."); + using extents_type = _Extents; using index_type = typename extents_type::index_type; using size_type = typename extents_type::size_type; @@ -81,120 +59,205 @@ class layout_left::mapping using layout_type = layout_left; private: - static_assert(__detail::__is_extents_v, - "layout_left::mapping must be instantiated with a specialization of _CUDA_VSTD::extents."); - - template - friend class mapping; - - // i0+(i1 + E(1)*(i2 + E(2)*i3)) - template - struct __rank_count - {}; - - template - _CCCL_HOST_DEVICE constexpr index_type - __compute_offset(__rank_count<_r, _Rank>, const _Ip& __i, _Indices... __idx) const + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __mul_overflow(index_type x, index_type y, index_type* res) noexcept { - return __compute_offset(__rank_count<_r + 1, _Rank>(), __idx...) * __extents.template __extent<_r>() + __i; + *res = x * y; + return x && ((*res / x) != y); } - template - _CCCL_HOST_DEVICE constexpr index_type - __compute_offset(__rank_count, const _Ip& __i) const + template = 0> + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __required_span_size_is_representable(const extents_type& __ext) { - return __i; + index_type __prod = __ext.extent(0); + for (rank_type __r = 1; __r < extents_type::rank(); __r++) + { + bool __overflowed = __mul_overflow(__prod, __ext.extent(__r), &__prod); + if (__overflowed) + { + return false; + } + } + return true; } - _CCCL_HOST_DEVICE constexpr index_type __compute_offset(__rank_count<0, 0>) const + template = 0> + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __required_span_size_is_representable(const extents_type& __ext) { - return 0; + return true; } + static_assert((extents_type::rank_dynamic() > 0) || __required_span_size_is_representable(extents_type()), + "layout_left::mapping product of static extents must be representable as index_type."); + public: - //-------------------------------------------------------------------------------- + // [mdspan.layout.left.cons], constructors + constexpr mapping() noexcept = default; + constexpr mapping(const mapping&) noexcept = default; + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const extents_type& __ext) noexcept + : __extents_(__ext) + { + // not catching this could lead to out-of-bounds access later when used inside mdspan + // mapping> map(dextents(40,40)); map(10, 3) == -126 + _CCCL_ASSERT(__required_span_size_is_representable(__ext), + "layout_left::mapping extents ctor: product of extents must be representable as index_type."); + } - _CCCL_HIDE_FROM_ABI constexpr mapping() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr mapping(mapping const&) noexcept = default; + _CCCL_TEMPLATE(class _OtherExtents) + _CCCL_REQUIRES(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) + _CCCL_AND _CCCL_TRAIT(is_convertible, _OtherExtents, extents_type)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) + { + // not catching this could lead to out-of-bounds access later when used inside mdspan + // mapping> map(mapping>(dextents(40,40))); map(10, 3) == -126 + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_left::mapping converting ctor: other.required_span_size() must be representable as " + "index_type."); + } - _CCCL_HOST_DEVICE constexpr mapping(extents_type const& __exts) noexcept - : __extents(__exts) - {} + _CCCL_TEMPLATE(class _OtherExtents) + _CCCL_REQUIRES(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) + _CCCL_AND(!_CCCL_TRAIT(is_convertible, _OtherExtents, extents_type))) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mapping(const mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) + { + // not catching this could lead to out-of-bounds access later when used inside mdspan + // mapping> map(mapping>(dextents(40,40))); map(10, 3) == -126 + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_left::mapping converting ctor: other.required_span_size() must be representable as " + "index_type."); + } _CCCL_TEMPLATE(class _OtherExtents) - _CCCL_REQUIRES(_CCCL_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents)) - __MDSPAN_CONDITIONAL_EXPLICIT((!_CUDA_VSTD::is_convertible<_OtherExtents, extents_type>::value)) // needs two () due - // to comma - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - mapping<_OtherExtents> const& __other) noexcept // NOLINT(google-explicit-constructor) - : __extents(__other.extents()) + _CCCL_REQUIRES((_OtherExtents::rank() <= 1) _CCCL_AND _CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) + _CCCL_AND _CCCL_TRAIT(is_convertible, _OtherExtents, extents_type)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const layout_right::mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) { - /* - * TODO: check precondition - * __other.required_span_size() is a representable value of type index_type - */ + // not catching this could lead to out-of-bounds access later when used inside mdspan + // Note: since this is constraint to rank 1, extents itself would catch the invalid conversion first + // and thus this assertion should never be triggered, but keeping it here for consistency + // layout_left::mapping> map( + // layout_right::mapping>(dextents(200))); map.extents().extent(0) == + // -56 + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_left::mapping converting ctor: other.required_span_size() must be representable as " + "index_type."); } _CCCL_TEMPLATE(class _OtherExtents) - _CCCL_REQUIRES(_CCCL_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents) - _CCCL_AND(extents_type::rank() <= 1)) - __MDSPAN_CONDITIONAL_EXPLICIT((!_CUDA_VSTD::is_convertible<_OtherExtents, extents_type>::value)) // needs two () due - // to comma - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - layout_right::mapping<_OtherExtents> const& __other) noexcept // NOLINT(google-explicit-constructor) - : __extents(__other.extents()) + _CCCL_REQUIRES((_OtherExtents::rank() <= 1) _CCCL_AND _CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) + _CCCL_AND(!_CCCL_TRAIT(is_convertible, _OtherExtents, extents_type))) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mapping(const layout_right::mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) { - /* - * TODO: check precondition - * __other.required_span_size() is a representable value of type index_type - */ + // not catching this could lead to out-of-bounds access later when used inside mdspan + // Note: since this is constraint to rank 1, extents itself would catch the invalid conversion first + // and thus this assertion should never be triggered, but keeping it here for consistency + // layout_left::mapping> map( + // layout_right::mapping>(dextents(200))); map.extents().extent(0) == + // -56 + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_left::mapping converting ctor: other.required_span_size() must be representable as " + "index_type."); + } + + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool __check_strides(const _OtherMappping& __other) const noexcept + { + // avoid warning when comparing signed and unsigner integers and pick the wider of two types + using _CommonType = common_type_t; + for (rank_type __r = 0; __r != extents_type::rank(); __r++) + { + if (static_cast<_CommonType>(stride(__r)) != static_cast<_CommonType>(__other.stride(__r))) + { + return false; + } + } + return true; } _CCCL_TEMPLATE(class _OtherExtents) - _CCCL_REQUIRES(_CCCL_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents)) - __MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 0)) - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - layout_stride::mapping<_OtherExtents> const& __other) // NOLINT(google-explicit-constructor) - : __extents(__other.extents()) + _CCCL_REQUIRES(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) _CCCL_AND(extents_type::rank() > 0)) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mapping(const layout_stride::mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) { - /* - * TODO: check precondition - * __other.required_span_size() is a representable value of type index_type - */ - NV_IF_TARGET(NV_IS_HOST, (size_t __stride = 1; for (rank_type __r = 0; __r < __extents.rank(); __r++) { - _LIBCUDACXX_THROW_RUNTIME_ERROR(__stride == static_cast(__other.stride(__r)), - "Assigning layout_stride to layout_left with invalid strides."); - __stride *= __extents.extent(__r); - })) + _CCCL_ASSERT(__check_strides(__other), + "layout_left::mapping from layout_stride ctor: strides are not compatible with layout_left."); + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_left::mapping from layout_stride ctor: other.required_span_size() must be representable as " + "index_type."); } - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED mapping& operator=(mapping const&) noexcept = default; + _CCCL_TEMPLATE(class _OtherExtents) + _CCCL_REQUIRES(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) _CCCL_AND(extents_type::rank() == 0)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const layout_stride::mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) + {} + + constexpr mapping& operator=(const mapping&) noexcept = default; + // [mdspan.layout.left.obs], observers _LIBCUDACXX_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept { - return __extents; + return __extents_; } + template = 0> _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type required_span_size() const noexcept { - index_type __value = 1; - for (rank_type __r = 0; __r != extents_type::rank(); __r++) + index_type __size = 1; + for (size_t __r = 0; __r != extents_type::rank(); __r++) { - __value *= __extents.extent(__r); + __size *= __extents_.extent(__r); } - return __value; + return __size; } - //-------------------------------------------------------------------------------- + template = 0> + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type required_span_size() const noexcept + { + return 1; + } + + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type + __op_index(const array& __idx_a, index_sequence<_Pos...>) const noexcept + { + index_type __res = 0; +# if _CCCL_STD_VER >= 2017 + ((__res = __idx_a[extents_type::rank() - 1 - _Pos] + __extents_.extent(extents_type::rank() - 1 - _Pos) * __res), + ...); +# else // ^^^ _CCCL_STD_VER >= 2017 ^^^ / vvv _CCCL_STD_VER <= 2014 vvv + constexpr size_t __pos[sizeof...(_Pos)] = {(extents_type::rank() - 1 - _Pos)...}; + for (size_t __i = 0; __i < sizeof...(_Pos); ++__i) + { + __res = (__idx_a[__pos[__i]] + __extents_.extent(__pos[__i]) * __res); + } +# endif // _CCCL_STD_VER <= 2014 + return __res; + } + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type + __op_index(const array&, index_sequence<>) const noexcept + { + return 0; + } _CCCL_TEMPLATE(class... _Indices) _CCCL_REQUIRES((sizeof...(_Indices) == extents_type::rank()) - _CCCL_AND __fold_and_v<_CCCL_TRAIT(_CUDA_VSTD::is_convertible, _Indices, index_type)...> // - _CCCL_AND __fold_and_v<_CCCL_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _Indices)...>) - _CCCL_HOST_DEVICE constexpr index_type operator()(_Indices... __idxs) const noexcept + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_convertible, _Indices, index_type)) + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_nothrow_constructible, index_type, _Indices))) + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type operator()(_Indices... __idx) const noexcept { - // Immediately cast incoming indices to `index_type` - return __compute_offset(__rank_count<0, extents_type::rank()>(), static_cast(__idxs)...); + // Mappings are generally meant to be used for accessing allocations and are meant to guarantee to never + // return a value exceeding required_span_size(), which is used to know how large an allocation one needs + // Thus, this is a canonical point in multi-dimensional data structures to make invalid element access checks + // However, mdspan does check this on its own, so for now we avoid double checking in hardened mode + _CCCL_ASSERT(__mdspan_detail::__is_multidimensional_index_in(__extents_, __idx...), + "layout_left::mapping: out of bounds indexing"); + + const array __idx_a{static_cast(__idx)...}; + return __op_index(__idx_a, make_index_sequence()); } _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_unique() noexcept @@ -210,67 +273,58 @@ class layout_left::mapping return true; } - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_unique() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_unique() noexcept { return true; } - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_exhaustive() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_exhaustive() noexcept { return true; } - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_strided() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_strided() noexcept { return true; } - _CCCL_TEMPLATE(class _Ext = _Extents) - _CCCL_REQUIRES((_Ext::rank() > 0)) - _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type stride(rank_type __i) const noexcept + _CCCL_TEMPLATE(class _Extents2 = _Extents) + _CCCL_REQUIRES((_Extents2::rank() > 0)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const noexcept { - index_type __value = 1; - for (rank_type __r = 0; __r < __i; __r++) + // While it would be caught by extents itself too, using a too large __r + // is functionally an out of bounds access on the stored information needed to compute strides + _CCCL_ASSERT(__r < extents_type::rank(), "layout_left::mapping::stride(): invalid rank index"); + index_type __s = 1; + for (rank_type __i = 0; __i < __r; __i++) { - __value *= __extents.extent(__r); + __s *= __extents_.extent(__i); } - return __value; + return __s; } - template - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator==(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept + template + _LIBCUDACXX_HIDE_FROM_ABI friend constexpr auto + operator==(const mapping& __lhs, const mapping<_OtherExtents>& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)((_OtherExtents::rank() == _Extents2::rank())) { return __lhs.extents() == __rhs.extents(); } - // In C++ 20 the not equal exists if equal is found -# if !(__MDSPAN_HAS_CXX_20) - template - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator!=(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept +# if _CCCL_STD_VER <= 2017 + template + _LIBCUDACXX_HIDE_FROM_ABI friend constexpr auto + operator!=(const mapping& __lhs, const mapping<_OtherExtents>& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)((_OtherExtents::rank() == _Extents2::rank())) { return __lhs.extents() != __rhs.extents(); } -# endif - - // Not really public, but currently needed to implement fully constexpr usable submdspan: - template - _CCCL_HOST_DEVICE constexpr index_type - __get_stride(_CUDA_VSTD::extents<_SizeType, _Ep...>, _CUDA_VSTD::integer_sequence) const - { - return __MDSPAN_FOLD_TIMES_RIGHT((_Idx < _Np ? __extents.template __extent<_Idx>() : 1), 1); - } - template - _CCCL_HOST_DEVICE constexpr index_type stride() const noexcept - { - return __get_stride<_Np>(__extents, _CUDA_VSTD::make_index_sequence()); - } +# endif // _CCCL_STD_VER <= 2017 private: - _CCCL_NO_UNIQUE_ADDRESS extents_type __extents{}; + _CCCL_NO_UNIQUE_ADDRESS extents_type __extents_{}; }; -#endif // _CCCL_STD_VER > 2011 - _LIBCUDACXX_END_NAMESPACE_STD -#endif // _LIBCUDACXX___MDSPAN_LAYOUT_LEFT_HPP +#endif // _CCCL_STD_VER >= 2014 + +#endif // _LIBCUDACXX___MDSPAN_LAYOUT_LEFT_H diff --git a/libcudacxx/include/cuda/std/__mdspan/layout_right.h b/libcudacxx/include/cuda/std/__mdspan/layout_right.h index 96c4bc60b1e..788584a0d75 100644 --- a/libcudacxx/include/cuda/std/__mdspan/layout_right.h +++ b/libcudacxx/include/cuda/std/__mdspan/layout_right.h @@ -1,48 +1,22 @@ -/* -//@HEADER -// ************************************************************************ +// -*- C++ -*- +//===----------------------------------------------------------------------===// // -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. // -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). // -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. // -// ************************************************************************ -//@HEADER -*/ +//===---------------------------------------------------------------------===// -#ifndef _LIBCUDACXX___MDSPAN_LAYOUT_RIGHT_HPP -#define _LIBCUDACXX___MDSPAN_LAYOUT_RIGHT_HPP +#ifndef _LIBCUDACXX___MDSPAN_LAYOUT_RIGHT_H +#define _LIBCUDACXX___MDSPAN_LAYOUT_RIGHT_H #include @@ -54,25 +28,30 @@ # pragma system_header #endif // no system header +#include +#include +#include #include -#include -#include #include #include #include #include #include +#include #include +#include -_LIBCUDACXX_BEGIN_NAMESPACE_STD +#if _CCCL_STD_VER >= 2014 -#if _CCCL_STD_VER > 2011 +_LIBCUDACXX_BEGIN_NAMESPACE_STD -//============================================================================== template class layout_right::mapping { public: + static_assert(__mdspan_detail::__is_extents<_Extents>::value, + "layout_right::mapping template argument must be a specialization of extents."); + using extents_type = _Extents; using index_type = typename extents_type::index_type; using size_type = typename extents_type::size_type; @@ -80,125 +59,202 @@ class layout_right::mapping using layout_type = layout_right; private: - static_assert(__detail::__is_extents_v, - "layout_right::mapping must be instantiated with a specialization of _CUDA_VSTD::extents."); - - template - friend class mapping; - - // i0+(i1 + E(1)*(i2 + E(2)*i3)) - template - struct __rank_count - {}; - - template - _CCCL_HOST_DEVICE constexpr index_type - __compute_offset(index_type __offset, __rank_count<_r, _Rank>, const _Ip& __i, _Indices... __idx) const + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool + __mul_overflow(index_type __x, index_type __y, index_type* __res) noexcept { - return __compute_offset(__offset * __extents.template __extent<_r>() + __i, __rank_count<_r + 1, _Rank>(), __idx...); + *__res = __x * __y; + return __x && ((*__res / __x) != __y); } - template - _CCCL_HOST_DEVICE constexpr index_type - __compute_offset(__rank_count<0, extents_type::rank()>, const _Ip& __i, _Indices... __idx) const + template = 0> + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __required_span_size_is_representable(const extents_type& __ext) { - return __compute_offset(__i, __rank_count<1, extents_type::rank()>(), __idx...); + index_type __prod = __ext.extent(0); + for (rank_type __r = 1; __r < extents_type::rank(); __r++) + { + bool __overflowed = __mul_overflow(__prod, __ext.extent(__r), &__prod); + if (__overflowed) + { + return false; + } + } + return true; } - _CCCL_HOST_DEVICE constexpr index_type - __compute_offset(size_t __offset, __rank_count) const + template = 0> + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __required_span_size_is_representable(const extents_type& __ext) { - return static_cast(__offset); + return true; } - _CCCL_HOST_DEVICE constexpr index_type __compute_offset(__rank_count<0, 0>) const - { - return 0; - } + static_assert((extents_type::rank_dynamic() > 0) || __required_span_size_is_representable(extents_type()), + "layout_right::mapping product of static extents must be representable as index_type."); public: - //-------------------------------------------------------------------------------- - + // [mdspan.layout.right.cons], constructors _CCCL_HIDE_FROM_ABI constexpr mapping() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr mapping(mapping const&) noexcept = default; + _CCCL_HIDE_FROM_ABI constexpr mapping(const mapping&) noexcept = default; + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const extents_type& __ext) noexcept + : __extents_(__ext) + { + // not catching this could lead to out-of-bounds access later when used inside mdspan + // mapping> map(dextents(40,40)); map(3, 10) == -126 + _CCCL_ASSERT(__required_span_size_is_representable(__ext), + "layout_right::mapping extents ctor: product of extents must be representable as index_type."); + } - _CCCL_HOST_DEVICE constexpr mapping(extents_type const& __exts) noexcept - : __extents(__exts) - {} + _CCCL_TEMPLATE(class _OtherExtents) + _CCCL_REQUIRES(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) + _CCCL_AND _CCCL_TRAIT(is_convertible, _OtherExtents, extents_type)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) + { + // not catching this could lead to out-of-bounds access later when used inside mdspan + // mapping> map(mapping>(dextents(40,40))); map(3, 10) == -126 + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_right::mapping converting ctor: other.required_span_size() must be representable as " + "index_type."); + } _CCCL_TEMPLATE(class _OtherExtents) - _CCCL_REQUIRES(_CCCL_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents)) - __MDSPAN_CONDITIONAL_EXPLICIT((!_CUDA_VSTD::is_convertible<_OtherExtents, extents_type>::value)) // needs two () due - // to comma - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - mapping<_OtherExtents> const& __other) noexcept // NOLINT(google-explicit-constructor) - : __extents(__other.extents()) + _CCCL_REQUIRES(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) + _CCCL_AND(!_CCCL_TRAIT(is_convertible, _OtherExtents, extents_type))) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mapping(const mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) { - /* - * TODO: check precondition - * __other.required_span_size() is a representable value of type index_type - */ + // not catching this could lead to out-of-bounds access later when used inside mdspan + // mapping> map(mapping>(dextents(40,40))); map(3, 10) == -126 + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_right::mapping converting ctor: other.required_span_size() must be representable as " + "index_type."); } _CCCL_TEMPLATE(class _OtherExtents) - _CCCL_REQUIRES(_CCCL_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents) - _CCCL_AND(extents_type::rank() <= 1)) - __MDSPAN_CONDITIONAL_EXPLICIT((!_CUDA_VSTD::is_convertible<_OtherExtents, extents_type>::value)) // needs two () due - // to comma - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - layout_left::mapping<_OtherExtents> const& __other) noexcept // NOLINT(google-explicit-constructor) - : __extents(__other.extents()) + _CCCL_REQUIRES((_OtherExtents::rank() <= 1) _CCCL_AND _CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) + _CCCL_AND _CCCL_TRAIT(is_convertible, _OtherExtents, extents_type)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const layout_left::mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) { - /* - * TODO: check precondition - * __other.required_span_size() is a representable value of type index_type - */ + // not catching this could lead to out-of-bounds access later when used inside mdspan + // Note: since this is constraint to rank 1, extents itself would catch the invalid conversion first + // and thus this assertion should never be triggered, but keeping it here for consistency + // layout_right::mapping> map( + // layout_left::mapping>(dextents(200))); map.extents().extent(0) == + // -56 + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_right::mapping converting ctor: other.required_span_size() must be representable as " + "index_type."); } _CCCL_TEMPLATE(class _OtherExtents) - _CCCL_REQUIRES(_CCCL_TRAIT(_CUDA_VSTD::is_constructible, extents_type, _OtherExtents)) - __MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 0)) - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - layout_stride::mapping<_OtherExtents> const& __other) // NOLINT(google-explicit-constructor) - : __extents(__other.extents()) + _CCCL_REQUIRES((_OtherExtents::rank() <= 1) _CCCL_AND _CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) + _CCCL_AND(!_CCCL_TRAIT(is_convertible, _OtherExtents, extents_type))) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mapping(const layout_left::mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) { - /* - * TODO: check precondition - * __other.required_span_size() is a representable value of type index_type - */ - NV_IF_TARGET(NV_IS_HOST, (size_t __stride = 1; for (rank_type __r = __extents.rank(); __r > 0; __r--) { - _LIBCUDACXX_THROW_RUNTIME_ERROR(__stride == static_cast(__other.stride(__r - 1)), - "Assigning layout_stride to layout_right with invalid strides."); - __stride *= __extents.extent(__r - 1); - })) + // not catching this could lead to out-of-bounds access later when used inside mdspan + // Note: since this is constraint to rank 1, extents itself would catch the invalid conversion first + // and thus this assertion should never be triggered, but keeping it here for consistency + // layout_right::mapping> map( + // layout_left::mapping>(dextents(200))); map.extents().extent(0) == + // -56 + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_right::mapping converting ctor: other.required_span_size() must be representable as " + "index_type."); } - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED mapping& operator=(mapping const&) noexcept = default; + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool __check_strides(const _OtherMappping& __other) const noexcept + { + // avoid warning when comparing signed and unsigner integers and pick the wider of two types + using _CommonType = common_type_t; + for (rank_type __r = 0; __r != extents_type::rank(); __r++) + { + if (static_cast<_CommonType>(stride(__r)) != static_cast<_CommonType>(__other.stride(__r))) + { + return false; + } + } + return true; + } + + _CCCL_TEMPLATE(class _OtherExtents) + _CCCL_REQUIRES(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) _CCCL_AND(extents_type::rank() > 0)) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mapping(const layout_stride::mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) + { + _CCCL_ASSERT(__check_strides(__other), + "layout_right::mapping from layout_stride ctor: strides are not compatible with layout_left."); + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_right::mapping from layout_stride ctor: other.required_span_size() must be representable as " + "index_type."); + } + _CCCL_TEMPLATE(class _OtherExtents) + _CCCL_REQUIRES(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents) _CCCL_AND(extents_type::rank() == 0)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const layout_stride::mapping<_OtherExtents>& __other) noexcept + : __extents_(__other.extents()) + {} + + _CCCL_HIDE_FROM_ABI constexpr mapping& operator=(const mapping&) noexcept = default; + + // [mdspan.layout.right.obs], observers _LIBCUDACXX_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept { - return __extents; + return __extents_; } + template = 0> _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type required_span_size() const noexcept { - index_type __value = 1; - for (rank_type __r = 0; __r != extents_type::rank(); ++__r) + index_type __size = 1; + for (size_t __r = 0; __r != extents_type::rank(); __r++) { - __value *= __extents.extent(__r); + __size *= __extents_.extent(__r); } - return __value; + return __size; } - //-------------------------------------------------------------------------------- + template = 0> + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type required_span_size() const noexcept + { + return 1; + } + + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type __op_index(index_sequence<_Pos...>, _Indices... __idx) const noexcept + { + index_type __res = 0; +# if _CCCL_STD_VER >= 2017 + ((__res = static_cast(__idx) + __extents_.extent(_Pos) * __res), ...); +# else // ^^^ _CCCL_STD_VER >= 2017 ^^^ / vvv _CCCL_STD_VER <= 2014 vvv + const index_type __arr_pos[sizeof...(_Pos)] = {__extents_.extent(_Pos)...}; + const index_type __arr_idx[sizeof...(_Pos)] = {static_cast(__idx)...}; + for (size_t __i = 0; __i < sizeof...(_Pos); ++__i) + { + __res = __arr_idx[__i] + __arr_pos[__i] * __res; + } +# endif // _CCCL_STD_VER <= 2014 + return __res; + } + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type __op_index(index_sequence<>) const noexcept + { + return 0; + } _CCCL_TEMPLATE(class... _Indices) _CCCL_REQUIRES((sizeof...(_Indices) == extents_type::rank()) - _CCCL_AND __fold_and_v<_CCCL_TRAIT(_CUDA_VSTD::is_convertible, _Indices, index_type)...> // - _CCCL_AND __fold_and_v<_CCCL_TRAIT(_CUDA_VSTD::is_nothrow_constructible, index_type, _Indices)...>) - _CCCL_HOST_DEVICE constexpr index_type operator()(_Indices... __idxs) const noexcept + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_convertible, _Indices, index_type)) + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_nothrow_constructible, index_type, _Indices))) + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type operator()(_Indices... __idx) const noexcept { - return __compute_offset(__rank_count<0, extents_type::rank()>(), static_cast(__idxs)...); + // Mappings are generally meant to be used for accessing allocations and are meant to guarantee to never + // return a value exceeding required_span_size(), which is used to know how large an allocation one needs + // Thus, this is a canonical point in multi-dimensional data structures to make invalid element access checks + // However, mdspan does check this on its own, so for now we avoid double checking in hardened mode + _CCCL_ASSERT(__mdspan_detail::__is_multidimensional_index_in(__extents_, __idx...), + "layout_right::mapping: out of bounds indexing"); + return __op_index(make_index_sequence(), __idx...); } _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_unique() noexcept @@ -213,67 +269,59 @@ class layout_right::mapping { return true; } - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_unique() const noexcept + + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_unique() noexcept { return true; } - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_exhaustive() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_exhaustive() noexcept { return true; } - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_strided() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_strided() noexcept { return true; } - _CCCL_TEMPLATE(class _Ext = _Extents) - _CCCL_REQUIRES((_Ext::rank() > 0)) - _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type stride(rank_type __i) const noexcept + _CCCL_TEMPLATE(class _Extents2 = _Extents) + _CCCL_REQUIRES((_Extents2::rank() > 0)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const noexcept { - index_type __value = 1; - for (rank_type __r = extents_type::rank() - 1; __r > __i; __r--) + // While it would be caught by extents itself too, using a too large __r + // is functionally an out of bounds access on the stored information needed to compute strides + _CCCL_ASSERT(__r < extents_type::rank(), "layout_right::mapping::stride(): invalid rank index"); + index_type __s = 1; + for (rank_type __i = extents_type::rank() - 1; __i > __r; __i--) { - __value *= __extents.extent(__r); + __s *= __extents_.extent(__i); } - return __value; + return __s; } - template - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator==(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept + template + _LIBCUDACXX_HIDE_FROM_ABI friend constexpr auto + operator==(const mapping& __lhs, const mapping<_OtherExtents>& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)((_OtherExtents::rank() == _Extents2::rank())) { return __lhs.extents() == __rhs.extents(); } - // In C++ 20 the not equal exists if equal is found -# if !(__MDSPAN_HAS_CXX_20) - template - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator!=(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept +# if _CCCL_STD_VER <= 2017 + template + _LIBCUDACXX_HIDE_FROM_ABI friend constexpr auto + operator!=(const mapping& __lhs, const mapping<_OtherExtents>& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)((_OtherExtents::rank() == _Extents2::rank())) { return __lhs.extents() != __rhs.extents(); } -# endif - - // Not really public, but currently needed to implement fully constexpr usable submdspan: - template - _CCCL_HOST_DEVICE constexpr index_type - __get_stride(_CUDA_VSTD::extents<_SizeType, _Ep...>, _CUDA_VSTD::integer_sequence) const - { - return __MDSPAN_FOLD_TIMES_RIGHT((_Idx > _Np ? __extents.template __extent<_Idx>() : 1), 1); - } - template - _CCCL_HOST_DEVICE constexpr index_type __stride() const noexcept - { - return __get_stride<_Np>(__extents, _CUDA_VSTD::make_index_sequence()); - } +# endif // _CCCL_STD_VER <= 2017 private: - _CCCL_NO_UNIQUE_ADDRESS extents_type __extents{}; + _CCCL_NO_UNIQUE_ADDRESS extents_type __extents_{}; }; -#endif // _CCCL_STD_VER > 2011 - _LIBCUDACXX_END_NAMESPACE_STD -#endif // _LIBCUDACXX___MDSPAN_LAYOUT_RIGHT_HPP +#endif // _CCCL_STD_VER >= 2014 + +#endif // _LIBCUDACXX___MDSPAN_LAYOUT_RIGHT_H diff --git a/libcudacxx/include/cuda/std/__mdspan/layout_stride.h b/libcudacxx/include/cuda/std/__mdspan/layout_stride.h index fcc56d8815b..a7e6004df26 100644 --- a/libcudacxx/include/cuda/std/__mdspan/layout_stride.h +++ b/libcudacxx/include/cuda/std/__mdspan/layout_stride.h @@ -1,48 +1,22 @@ -/* -//@HEADER -// ************************************************************************ +// -*- C++ -*- +//===----------------------------------------------------------------------===// // -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. // -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). // -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. // -// ************************************************************************ -//@HEADER -*/ +//===---------------------------------------------------------------------===// -#ifndef _LIBCUDACXX___MDSPAN_LAYOUT_STRIDE_HPP -#define _LIBCUDACXX___MDSPAN_LAYOUT_STRIDE_HPP +#ifndef _LIBCUDACXX___MDSPAN_LAYOUT_STRIDE_H +#define _LIBCUDACXX___MDSPAN_LAYOUT_STRIDE_H #include @@ -54,478 +28,597 @@ # pragma system_header #endif // no system header -#include +#include +#include #include -#include -#ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS -# include -#endif // _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS -#include -#include +#include #include #include #include #include -#include +#include #include -#include +#include #include -#include -#if __MDSPAN_USE_CONCEPTS && __MDSPAN_HAS_CXX_20 -# include -#endif // __MDSPAN_USE_CONCEPTS && __MDSPAN_HAS_CXX_20 -#include +#include +#include -_LIBCUDACXX_BEGIN_NAMESPACE_STD +_CCCL_PUSH_MACROS -#if _CCCL_STD_VER > 2011 +#if _CCCL_STD_VER >= 2014 -struct layout_left -{ - template - class mapping; -}; -struct layout_right -{ - template - class mapping; -}; +_LIBCUDACXX_BEGIN_NAMESPACE_STD -namespace __detail +namespace __layout_stride_detail { -template -_CCCL_INLINE_VAR constexpr bool __is_mapping_of = - is_same, _Mapping>::value; - -# if __MDSPAN_USE_CONCEPTS && __MDSPAN_HAS_CXX_20 -template -concept __layout_mapping_alike = requires { - requires __is_extents::value; - { _Mp::is_always_strided() } -> same_as; - { _Mp::is_always_exhaustive() } -> same_as; - { _Mp::is_always_unique() } -> same_as; - bool_constant<_Mp::is_always_strided()>::value; - bool_constant<_Mp::is_always_exhaustive()>::value; - bool_constant<_Mp::is_always_unique()>::value; -}; -# endif -} // namespace __detail -struct layout_stride +template +_CCCL_CONCEPT __can_convert = _CCCL_REQUIRES_EXPR((_StridedLayoutMapping, _Extents))( + requires(__mdspan_detail::__layout_mapping_alike<_StridedLayoutMapping>), + requires(_StridedLayoutMapping::is_always_unique()), + requires(_StridedLayoutMapping::is_always_strided()), + requires(_CCCL_TRAIT(is_constructible, _Extents, typename _StridedLayoutMapping::extents_type))); + +struct __constraints { - template - class mapping -# ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : private __detail::__no_unique_address_emulation< - __detail::__compressed_pair<_Extents, _CUDA_VSTD::array>> -# endif - { - public: - using extents_type = _Extents; - using index_type = typename extents_type::index_type; - using size_type = typename extents_type::size_type; - using rank_type = typename extents_type::rank_type; - using layout_type = layout_stride; - - // This could be a `requires`, but I think it's better and clearer as a `static_assert`. - static_assert(__detail::__is_extents_v<_Extents>, - "layout_stride::mapping must be instantiated with a specialization of _CUDA_VSTD::extents."); - - private: - //---------------------------------------------------------------------------- - - using __strides_storage_t = _CUDA_VSTD::array; //_CUDA_VSTD::dextents; - using __member_pair_t = __detail::__compressed_pair; - -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - _CCCL_NO_UNIQUE_ADDRESS __member_pair_t __members; -# else - using __base_t = __detail::__no_unique_address_emulation<__member_pair_t>; -# endif - - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __strides_storage_t const& __strides_storage() const noexcept - { -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - return __members.__second(); -# else - return this->__base_t::__ref().__second(); -# endif - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __strides_storage_t& __strides_storage() noexcept - { -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - return __members.__second(); -# else - return this->__base_t::__ref().__second(); -# endif - } + template + static constexpr bool __converts_implicit = + _CCCL_TRAIT(is_convertible, typename _StridedLayoutMapping::extents_type, _Extents) + && (__mdspan_detail::__is_mapping_of + || __mdspan_detail::__is_mapping_of + || __mdspan_detail::__is_mapping_of); +}; - template - _CCCL_HOST_DEVICE constexpr index_type - __get_size(_CUDA_VSTD::extents<_SizeType, _Ep...>, _CUDA_VSTD::integer_sequence) const - { - return __MDSPAN_FOLD_TIMES_RIGHT(static_cast(extents().extent(_Idx)), 1); - } +} // namespace __layout_stride_detail - //---------------------------------------------------------------------------- +template +class layout_stride::mapping +{ +public: + static_assert(__mdspan_detail::__is_extents<_Extents>::value, + "layout_stride::mapping template argument must be a specialization of extents."); - template - friend class mapping; + using extents_type = _Extents; + using index_type = typename extents_type::index_type; + using size_type = typename extents_type::size_type; + using rank_type = typename extents_type::rank_type; + using layout_type = layout_stride; - //---------------------------------------------------------------------------- +private: + static constexpr rank_type __rank_ = extents_type::rank(); - // Workaround for non-deducibility of the index sequence template parameter if it's given at the top level - template - struct __deduction_workaround; + using __stride_array = __mdspan_detail::__possibly_empty_array; - template - struct __deduction_workaround<_CUDA_VSTD::index_sequence<_Idxs...>> + // Used for default construction check and mandates + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool + __mul_overflow(index_type __x, index_type __y, index_type* __res) noexcept + { + *__res = __x * __y; + return __x && ((*__res / __x) != __y); + } + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool + __add_overflow(index_type __x, index_type __y, index_type* __res) noexcept + { + *__res = __x + __y; + return *__res < __y; + } + + _CCCL_TEMPLATE(size_t _Rank = _Extents::rank()) + _CCCL_REQUIRES((_Rank != 0)) + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool + __required_span_size_is_representable(const extents_type& __ext) noexcept + { + index_type __prod = __ext.extent(0); + for (rank_type __r = 1; __r < __rank_; __r++) { - template - _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool - _eq_impl(mapping const& __self, mapping<_OtherExtents> const& __other) noexcept + bool __overflowed = __mul_overflow(__prod, __ext.extent(__r), &__prod); + if (__overflowed) { - return __MDSPAN_FOLD_AND((__self.stride(_Idxs) == __other.stride(_Idxs)) /* && ... */) - && __MDSPAN_FOLD_AND((__self.extents().extent(_Idxs) == __other.extents().extent(_Idxs)) /* || ... */); - } - template - _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool - _not_eq_impl(mapping const& __self, mapping<_OtherExtents> const& __other) noexcept - { - return __MDSPAN_FOLD_OR((__self.stride(_Idxs) != __other.stride(_Idxs)) /* || ... */) - || __MDSPAN_FOLD_OR((__self.extents().extent(_Idxs) != __other.extents().extent(_Idxs)) /* || ... */); - } - - template - __MDSPAN_FORCE_INLINE_FUNCTION static constexpr size_t - _call_op_impl(mapping const& __self, _Integral... __idxs) noexcept - { - return __MDSPAN_FOLD_PLUS_RIGHT((__idxs * __self.stride(_Idxs)), /* + ... + */ 0); + return false; } + } + return true; + } + _CCCL_TEMPLATE(size_t _Rank = _Extents::rank()) + _CCCL_REQUIRES((_Rank == 0)) + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool + __required_span_size_is_representable(const extents_type& __ext) noexcept + { + return true; + } - _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t _req_span_size_impl(mapping const& __self) noexcept - { - // assumes no negative strides; not sure if I'm allowed to assume that or not - return __impl::_call_op_impl(__self, (__self.extents().template __extent<_Idxs>() - 1)...) + 1; - } + _CCCL_TEMPLATE(class _OtherIndexType) + _CCCL_REQUIRES(_CCCL_TRAIT(is_integral, _OtherIndexType)) + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __conversion_may_overflow(_OtherIndexType __stride) noexcept + { + using _CommonType = common_type_t; + return static_cast<_CommonType>(__stride) > static_cast<_CommonType>((numeric_limits::max)()); + } + _CCCL_TEMPLATE(class _OtherIndexType) + _CCCL_REQUIRES((!_CCCL_TRAIT(is_integral, _OtherIndexType))) + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __conversion_may_overflow(_OtherIndexType) noexcept + { + return false; + } - template - _LIBCUDACXX_HIDE_FROM_ABI static constexpr const __strides_storage_t fill_strides(const _OtherMapping& __map) + _CCCL_TEMPLATE(class _OtherIndexType) + _CCCL_REQUIRES((__rank_ != 0)) + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __required_span_size_is_representable( + const extents_type& __ext, span<_OtherIndexType, extents_type::rank()> __strides) + { + index_type __size = 1; + for (rank_type __r = 0; __r != __rank_; __r++) + { + // We can only check correct conversion of _OtherIndexType if it is an integral + if (__conversion_may_overflow(__strides[__r])) { - return __strides_storage_t{static_cast(__map.stride(_Idxs))...}; + return false; } - - _LIBCUDACXX_HIDE_FROM_ABI static constexpr const __strides_storage_t& fill_strides(const __strides_storage_t& __s) + if (__ext.extent(__r) == static_cast(0)) { - return __s; + return true; } - template - _LIBCUDACXX_HIDE_FROM_ABI static constexpr const __strides_storage_t - fill_strides(const _CUDA_VSTD::array<_IntegralType, extents_type::rank()>& __s) + index_type __prod = (__ext.extent(__r) - 1); + if (__mul_overflow(__prod, static_cast(__strides[__r]), &__prod)) { - return __strides_storage_t{static_cast(__s[_Idxs])...}; + return false; } - - template - _LIBCUDACXX_HIDE_FROM_ABI static constexpr const __strides_storage_t - fill_strides(const _CUDA_VSTD::span<_IntegralType, extents_type::rank()>& __s) + if (__add_overflow(__size, __prod, &__size)) { - return __strides_storage_t{static_cast(__s[_Idxs])...}; + return false; } + } + return true; + } - _LIBCUDACXX_HIDE_FROM_ABI static constexpr const __strides_storage_t fill_strides( - __detail::__extents_to_partially_static_sizes_t<_CUDA_VSTD::dextents>&& __s) - { - return __strides_storage_t{static_cast(__s.template __get_n<_Idxs>())...}; - } + _CCCL_TEMPLATE(class _OtherIndexType) + _CCCL_REQUIRES((__rank_ == 0)) + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __required_span_size_is_representable( + const extents_type& __ext, span<_OtherIndexType, extents_type::rank()> __strides) + { + return true; + } - template - _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t __return_zero() - { - return 0; - } + // compute offset of a strided layout mapping + template + _LIBCUDACXX_HIDE_FROM_ABI static constexpr auto + __offset(const _StridedMapping& __mapping, index_sequence<_Pos...>) noexcept + { + using index_type = typename _StridedMapping::index_type; + return static_cast(__mapping((_Pos ? 0 : 0)...)); + } - template - _LIBCUDACXX_HIDE_FROM_ABI static constexpr typename _Mapping::index_type __OFFSET(const _Mapping& m) - { - return m(__return_zero<_Idxs>()...); - } - }; - - // Can't use defaulted parameter in the __deduction_workaround template because of a bug in MSVC warning C4348. - using __impl = __deduction_workaround<_CUDA_VSTD::make_index_sequence<_Extents::rank()>>; - - //---------------------------------------------------------------------------- - -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit mapping(__member_pair_t&& __m) - : __members(_CUDA_VSTD::move(__m)) - {} -# else - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit mapping(__base_t&& __b) - : __base_t(_CUDA_VSTD::move(__b)) - {} -# endif - - public: // but not really - _LIBCUDACXX_HIDE_FROM_ABI static constexpr mapping __make_mapping( - __detail::__extents_to_partially_static_sizes_t<_Extents>&& __exts, - __detail::__extents_to_partially_static_sizes_t<_CUDA_VSTD::dextents>&& - __strs) noexcept - { - // call the private constructor we created for this purpose - return mapping( -# ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - __base_t{ -# endif - __member_pair_t(extents_type::__make_extents_impl(_CUDA_VSTD::move(__exts)), - __strides_storage_t{__impl::fill_strides(_CUDA_VSTD::move(__strs))}) -# ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# endif - ); - } - //---------------------------------------------------------------------------- - - public: - //-------------------------------------------------------------------------------- - - _CCCL_HIDE_FROM_ABI constexpr mapping() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr mapping(mapping const&) noexcept = default; - - // nvcc cannot deduce this constructor when using _CCCL_REQUIRES - template < - class _IntegralTypes, - enable_if_t<_CCCL_TRAIT(is_convertible, const remove_const_t<_IntegralTypes>&, index_type), int> = 0, - enable_if_t<_CCCL_TRAIT(is_nothrow_constructible, index_type, const remove_const_t<_IntegralTypes>&), int> = 0> - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - extents_type const& __e, _CUDA_VSTD::array<_IntegralTypes, extents_type::rank()> const& __s) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __members{ -# else - : __base_t(__base_t{__member_pair_t( -# endif - __e, __strides_storage_t(__impl::fill_strides(__s)) -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif + _CCCL_TEMPLATE(class _StridedMapping) + _CCCL_REQUIRES((_StridedMapping::extents_type::rank() != 0)) + _LIBCUDACXX_HIDE_FROM_ABI static constexpr index_type __offset(const _StridedMapping& __mapping) + { + if (__mapping.required_span_size() == static_cast(0)) { - /* - * TODO: check preconditions - * - __s[i] > 0 is true for all i in the range [0, rank_ ). - * - REQUIRED-SPAN-SIZE(__e, __s) is a representable value of type index_type ([basic.fundamental]). - * - If rank_ is greater than 0, then there exists a permutation P of the integers in the - * range [0, rank_), such that __s[ pi ] >= __s[ pi − 1 ] * __e.extent( pi − 1 ) is true for - * all i in the range [1, rank_ ), where pi is the ith element of P. - */ + return static_cast(0); } - - // nvcc cannot deduce this constructor when using _CCCL_REQUIRES - template < - class _IntegralTypes, - enable_if_t<_CCCL_TRAIT(is_convertible, const remove_const_t<_IntegralTypes>&, index_type), int> = 0, - enable_if_t<_CCCL_TRAIT(is_nothrow_constructible, index_type, const remove_const_t<_IntegralTypes>&), int> = 0> - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - extents_type const& __e, _CUDA_VSTD::span<_IntegralTypes, extents_type::rank()> const& __s) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __members{ -# else - : __base_t(__base_t{__member_pair_t( -# endif - __e, __strides_storage_t(__impl::fill_strides(__s)) -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif + else { - /* - * TODO: check preconditions - * - __s[i] > 0 is true for all i in the range [0, rank_ ). - * - REQUIRED-SPAN-SIZE(__e, __s) is a representable value of type index_type ([basic.fundamental]). - * - If rank_ is greater than 0, then there exists a permutation P of the integers in the - * range [0, rank_), such that __s[ pi ] >= __s[ pi − 1 ] * __e.extent( pi − 1 ) is true for - * all i in the range [1, rank_ ), where pi is the ith element of P. - */ + return __offset(__mapping, _CUDA_VSTD::make_index_sequence<__rank_>()); } - -# if !(__MDSPAN_USE_CONCEPTS && __MDSPAN_HAS_CXX_20) - _CCCL_TEMPLATE(class _StridedLayoutMapping) - _CCCL_REQUIRES( - _CCCL_TRAIT(_CUDA_VSTD::is_constructible, extents_type, typename _StridedLayoutMapping::extents_type) - _CCCL_AND __detail::__is_mapping_of - _CCCL_AND(_StridedLayoutMapping::is_always_unique()) _CCCL_AND(_StridedLayoutMapping::is_always_strided())) -# else - template - requires(__detail::__layout_mapping_alike<_StridedLayoutMapping> - && _CCCL_TRAIT(is_constructible, extents_type, typename _StridedLayoutMapping::extents_type) - && _StridedLayoutMapping::is_always_unique() && _StridedLayoutMapping::is_always_strided()) -# endif - __MDSPAN_CONDITIONAL_EXPLICIT( - (!is_convertible::value) - && (__detail::__is_mapping_of - || __detail::__is_mapping_of - || __detail::__is_mapping_of) ) // needs two () due to comma - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - _StridedLayoutMapping const& __other) noexcept // NOLINT(google-explicit-constructor) -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __members{ -# else - : __base_t(__base_t{__member_pair_t( -# endif - __other.extents(), __strides_storage_t(__impl::fill_strides(__other)) -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif + } + _CCCL_TEMPLATE(class _StridedMapping) + _CCCL_REQUIRES((_StridedMapping::extents_type::rank() == 0)) + _LIBCUDACXX_HIDE_FROM_ABI static constexpr index_type __offset(const _StridedMapping& __mapping) + { + return static_cast(__mapping()); + } + + static_assert((extents_type::rank_dynamic() > 0) || __required_span_size_is_representable(extents_type()), + "layout_stride::mapping product of static extents must be representable as index_type."); + +public: + // [mdspan.layout.stride.cons], constructors + _CCCL_TEMPLATE(size_t _Rank = _Extents::rank()) + _CCCL_REQUIRES((_Rank == 0)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping() noexcept + : __extents_(extents_type()) + {} + + _CCCL_TEMPLATE(size_t _Rank = _Extents::rank()) + _CCCL_REQUIRES((_Rank > 0)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping() noexcept + : __extents_(extents_type()) + { + index_type __stride = 1; + for (rank_type __r = __rank_ - 1; __r > static_cast(0); __r--) { - /* - * TODO: check preconditions - * - __other.stride(i) > 0 is true for all i in the range [0, rank_ ). - * - __other.required_span_size() is a representable value of type index_type ([basic.fundamental]). - * - OFFSET(__other) == 0 - */ + __strides_[__r] = __stride; + __stride *= __extents_.extent(__r); } + __strides_[0] = __stride; + } - //-------------------------------------------------------------------------------- + _CCCL_HIDE_FROM_ABI constexpr mapping(const mapping&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED mapping& operator=(mapping const&) noexcept = default; + template + _LIBCUDACXX_HIDE_FROM_ABI static constexpr auto + __to_strides_array(span<_OtherIndexType, extents_type::rank()> __strides, index_sequence<_Pos...>) noexcept + { + (void) __strides; // nvcc believes strides is unused here + return __stride_array{static_cast(_CUDA_VSTD::as_const(__strides[_Pos]))...}; + } - _LIBCUDACXX_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept + template + _LIBCUDACXX_HIDE_FROM_ABI static constexpr auto + __check_strides(span<_OtherIndexType, extents_type::rank()> __strides, index_sequence<_Pos...>) noexcept + { + if constexpr (_CCCL_TRAIT(is_integral, _OtherIndexType)) { -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - return __members.__first(); -# else - return this->__base_t::__ref().__first(); -# endif - }; - - _LIBCUDACXX_HIDE_FROM_ABI constexpr _CUDA_VSTD::array strides() const noexcept + return _CCCL_FOLD_AND((__strides[_Pos] > static_cast<_OtherIndexType>(0))); + } + else { - return __strides_storage(); + return _CCCL_FOLD_AND((static_cast(__strides[_Pos]) > static_cast(0))); } + _CCCL_UNREACHABLE(); + } - _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type required_span_size() const noexcept + // compute the permutation for sorting the stride array + // we never actually sort the stride array + _LIBCUDACXX_HIDE_FROM_ABI constexpr void + __bubble_sort_by_strides(array& __permute) const noexcept + { + for (rank_type __i = __rank_ - 1; __i > 0; __i--) { - index_type __span_size = 1; - for (rank_type __r = 0; __r != extents_type::rank(); __r++) + for (rank_type __r = 0; __r < __i; __r++) { - // Return early if any of the extents are zero - if (extents().extent(__r) == 0) + if (__strides_[__permute[__r]] > __strides_[__permute[__r + 1]]) { - return 0; + swap(__permute[__r], __permute[__r + 1]); + } + else + { + // if two strides are the same then one of the associated extents must be 1 or 0 + // both could be, but you can't have one larger than 1 come first + if ((__strides_[__permute[__r]] == __strides_[__permute[__r + 1]]) + && (__extents_.extent(__permute[__r]) > static_cast(1))) + { + swap(__permute[__r], __permute[__r + 1]); + } } - __span_size += (static_cast(extents().extent(__r) - 1) * __strides_storage()[__r]); } - return __span_size; } + } - _CCCL_TEMPLATE(class... _Indices) - _CCCL_REQUIRES((sizeof...(_Indices) == _Extents::rank()) - _CCCL_AND __fold_and_v<_CCCL_TRAIT(is_convertible, _Indices, index_type)...> // - _CCCL_AND __fold_and_v<_CCCL_TRAIT(is_nothrow_constructible, index_type, _Indices)...>) - __MDSPAN_FORCE_INLINE_FUNCTION - constexpr index_type operator()(_Indices... __idxs) const noexcept - { - // Should the op_impl operate in terms of `index_type` rather than `size_t`? - // Casting `size_t` to `index_type` here. - return static_cast(__impl::_call_op_impl(*this, static_cast(__idxs)...)); - } + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool __check_unique_mapping(index_sequence<_Pos...>) const noexcept + { + // basically sort the dimensions based on strides and extents, sorting is represented in permute array + array __permute{_Pos...}; + __bubble_sort_by_strides(__permute); - _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_unique() noexcept + // check that this permutations represents a growing set + for (rank_type __i = 1; __i < __rank_; __i++) { - return true; + if (static_cast(__strides_[__permute[__i]]) + < static_cast(__strides_[__permute[__i - 1]]) * __extents_.extent(__permute[__i - 1])) + { + return false; + } } - _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_exhaustive() noexcept + return true; + } + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool __check_unique_mapping(index_sequence<>) const noexcept + { + return true; + } + + // nvcc cannot deduce this constructor when using _CCCL_REQUIRES + template = 0, + enable_if_t<_CCCL_TRAIT(is_convertible, const _OtherIndexType&, index_type), int> = 0> + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const extents_type& __ext, + span<_OtherIndexType, extents_type::rank()> __strides) noexcept + : __extents_(__ext) + , __strides_(__to_strides_array(__strides, _CUDA_VSTD::make_index_sequence())) + { + _CCCL_ASSERT(__check_strides(__strides, _CUDA_VSTD::make_index_sequence()), + "layout_stride::mapping ctor: all strides must be greater than 0"); + _CCCL_ASSERT(__required_span_size_is_representable(__ext, __strides), + "layout_stride::mapping ctor: required span size is not representable as index_type."); + _CCCL_ASSERT(__check_unique_mapping(_CUDA_VSTD::make_index_sequence<__rank_>()), + "layout_stride::mapping ctor: the provided extents and strides lead to a non-unique mapping"); + } + + // nvcc cannot deduce this constructor when using _CCCL_REQUIRES + template = 0, + enable_if_t<_CCCL_TRAIT(is_convertible, const _OtherIndexType&, index_type), int> = 0> + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const extents_type& __ext, + const array<_OtherIndexType, extents_type::rank()> __strides) noexcept + : mapping(__ext, span(__strides)) + {} + + template + _LIBCUDACXX_HIDE_FROM_ABI static constexpr auto + __to_strides_array(const _StridedLayoutMapping& __other, index_sequence<_Pos...>) noexcept + { + return __stride_array{static_cast(__other.stride(_Pos))...}; + } + + // stride() only compiles for rank > 0 + template + _LIBCUDACXX_HIDE_FROM_ABI static constexpr auto + __check_mapped_strides(const _StridedLayoutMapping& __other, index_sequence<_Pos...>) noexcept + { + return _CCCL_FOLD_AND((static_cast(__other.stride(_Pos)) > static_cast(0))); + } + template + _LIBCUDACXX_HIDE_FROM_ABI static constexpr auto + __check_mapped_strides(const _StridedLayoutMapping&, index_sequence<>) noexcept + { + return true; + } + + _CCCL_TEMPLATE(class _StridedLayoutMapping) + _CCCL_REQUIRES(__layout_stride_detail::__can_convert<_StridedLayoutMapping, _Extents> _CCCL_AND + __layout_stride_detail::__constraints::__converts_implicit<_StridedLayoutMapping, _Extents>) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping(const _StridedLayoutMapping& __other) noexcept + : __extents_(__other.extents()) + , __strides_(__to_strides_array(__other, _CUDA_VSTD::make_index_sequence<__rank_>())) + { + _CCCL_ASSERT(__check_mapped_strides(__other, _CUDA_VSTD::make_index_sequence<__rank_>()), + "layout_stride::mapping converting ctor: all strides must be greater than 0"); + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_stride::mapping converting ctor: other.required_span_size() must be representable as " + "index_type."); + _CCCL_ASSERT(static_cast(0) == __offset(__other), + "layout_stride::mapping converting ctor: base offset of mapping must be zero."); + } + _CCCL_TEMPLATE(class _StridedLayoutMapping) + _CCCL_REQUIRES(__layout_stride_detail::__can_convert<_StridedLayoutMapping, _Extents> _CCCL_AND( + !__layout_stride_detail::__constraints::__converts_implicit<_StridedLayoutMapping, _Extents>)) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mapping(const _StridedLayoutMapping& __other) noexcept + : __extents_(__other.extents()) + , __strides_(__to_strides_array(__other, _CUDA_VSTD::make_index_sequence<__rank_>())) + { + _CCCL_ASSERT(__check_mapped_strides(__other, _CUDA_VSTD::make_index_sequence<__rank_>()), + "layout_stride::mapping converting ctor: all strides must be greater than 0"); + _CCCL_ASSERT(__mdspan_detail::__is_representable_as(__other.required_span_size()), + "layout_stride::mapping converting ctor: other.required_span_size() must be representable as " + "index_type."); + _CCCL_ASSERT(static_cast(0) == __offset(__other), + "layout_stride::mapping converting ctor: base offset of mapping must be zero."); + } + + _CCCL_HIDE_FROM_ABI constexpr mapping& operator=(const mapping&) noexcept = default; + + // [mdspan.layout.stride.obs], observers + _LIBCUDACXX_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept + { + return __extents_; + } + + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr array + __to_strides(index_sequence<_Pos...>) const noexcept + { + return array{__strides_[_Pos]...}; + } + + _LIBCUDACXX_HIDE_FROM_ABI constexpr array strides() const noexcept + { + return __to_strides(_CUDA_VSTD::make_index_sequence<__rank_>()); + } + + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type __required_span_size(index_sequence<_Pos...>) const noexcept + { + const index_type __product = _CCCL_FOLD_TIMES(index_type{1}, __extents_.extent(_Pos)); + if (__product == index_type{0}) { - return false; + return index_type{0}; } - _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_strided() noexcept + else { - return true; + return _CCCL_FOLD_PLUS(index_type{1}, ((__extents_.extent(_Pos) - static_cast(1)) * __strides_[_Pos])); } + } - _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_unique() noexcept - { - return true; - } - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_exhaustive() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type required_span_size() const noexcept + { + if constexpr (extents_type::rank() == 0) { - return required_span_size() == __get_size(extents(), _CUDA_VSTD::make_index_sequence()); + return static_cast(1); } - _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_strided() noexcept + else { - return true; + return __required_span_size(_CUDA_VSTD::make_index_sequence()); } + } - _CCCL_TEMPLATE(class _Ext = _Extents) - _CCCL_REQUIRES((_Ext::rank() > 0)) - _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const noexcept - { - return __strides_storage()[__r]; - } + template + _LIBCUDACXX_HIDE_FROM_ABI static constexpr index_type + __op_index(const __stride_array& __strides, index_sequence<_Pos...>, _Indices... __idx) noexcept + { + return _CCCL_FOLD_PLUS(index_type{0}, (static_cast(__idx) * __strides[_Pos])); + } + + _CCCL_TEMPLATE(class... _Indices) + _CCCL_REQUIRES((sizeof...(_Indices) == __rank_) + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_convertible, _Indices, index_type)) + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_nothrow_constructible, index_type, _Indices))) + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type operator()(_Indices... __idx) const noexcept + { + // Mappings are generally meant to be used for accessing allocations and are meant to guarantee to never + // return a value exceeding required_span_size(), which is used to know how large an allocation one needs + // Thus, this is a canonical point in multi-dimensional data structures to make invalid element access checks + // However, mdspan does check this on its own, so for now we avoid double checking in hardened mode + //_CCCL_ASSERT(__mdspan_detail::__is_multidimensional_index_in(__extents_, __idx...), + // "layout_stride::mapping: out of bounds indexing"); + return __op_index(__strides_, _CUDA_VSTD::make_index_sequence(), __idx...); + } + + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_unique() noexcept + { + return true; + } + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_exhaustive() noexcept + { + return false; + } + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_strided() noexcept + { + return true; + } -# if !(__MDSPAN_USE_CONCEPTS && __MDSPAN_HAS_CXX_20) - _CCCL_TEMPLATE(class _StridedLayoutMapping) - _CCCL_REQUIRES(__detail::__is_mapping_of - _CCCL_AND(extents_type::rank() == _StridedLayoutMapping::extents_type::rank()) - _CCCL_AND(_StridedLayoutMapping::is_always_strided())) -# else - template - requires(__detail::__layout_mapping_alike<_StridedLayoutMapping> - && (extents_type::rank() == _StridedLayoutMapping::extents_type::rank()) - && _StridedLayoutMapping::is_always_strided()) -# endif - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator==(const mapping& __x, const _StridedLayoutMapping& __y) noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_unique() noexcept + { + return true; + } + + // The answer of this function is fairly complex in the case where one or more + // extents are zero. + // Technically it is meaningless to query is_exhaustive() in that case, but unfortunately + // the way the standard defines this function, we can't give a simple true or false then. + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type __to_total_size(index_sequence<_Pos...>) const noexcept + { + return _CCCL_FOLD_TIMES(index_type{1}, (__extents_.extent(_Pos))); + } + + _CCCL_TEMPLATE(size_t _Rank = _Extents::rank()) + _CCCL_REQUIRES((_Rank > 1)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_exhaustive() const noexcept + { + const index_type __span_size = required_span_size(); + if (__span_size == static_cast(0)) { - bool __strides_match = true; - for (rank_type __r = 0; __r != extents_type::rank(); __r++) + rank_type __r_largest = 0; + for (rank_type __r = 1; __r < __rank_; __r++) { - __strides_match = __strides_match && (__x.stride(__r) == __y.stride(__r)); + if (__strides_[__r] > __strides_[__r_largest]) + { + __r_largest = __r; + } + } + for (rank_type __r = 0; __r != __rank_; __r++) + { + if (__extents_.extent(__r) == 0 && __r != __r_largest) + { + return false; + } } - return (__x.extents() == __y.extents()) - && (__impl::__OFFSET(__y) == static_cast(0)) && __strides_match; + return true; } - - // This one is not technically part of the proposal. Just here to make implementation a bit more optimal hopefully - _CCCL_TEMPLATE(class _OtherExtents) - _CCCL_REQUIRES((extents_type::rank() == _OtherExtents::rank())) - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator==(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept + else { - return __impl::_eq_impl(__lhs, __rhs); + const index_type __total_size = __to_total_size(_CUDA_VSTD::make_index_sequence<__rank_>()); + return __span_size == __total_size; } - -# if !__MDSPAN_HAS_CXX_20 - _CCCL_TEMPLATE(class _StridedLayoutMapping) - _CCCL_REQUIRES(__detail::__is_mapping_of - _CCCL_AND(extents_type::rank() == _StridedLayoutMapping::extents_type::rank()) - _CCCL_AND(_StridedLayoutMapping::is_always_strided())) - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator!=(const mapping& __x, const _StridedLayoutMapping& __y) noexcept + } + _CCCL_TEMPLATE(size_t _Rank = _Extents::rank()) + _CCCL_REQUIRES((_Rank == 1)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_exhaustive() const noexcept + { + const index_type __span_size = required_span_size(); + if (__span_size == static_cast(0)) { - return not(__x == __y); + return __strides_[0] == 1; } + else + { + const index_type __total_size = __to_total_size(_CUDA_VSTD::make_index_sequence<__rank_>()); + return __span_size == __total_size; + } + } + _CCCL_TEMPLATE(size_t _Rank = _Extents::rank()) + _CCCL_REQUIRES((_Rank == 0)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_exhaustive() const noexcept + { + return true; + } + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_strided() noexcept + { + return true; + } - _CCCL_TEMPLATE(class _OtherExtents) - _CCCL_REQUIRES((extents_type::rank() == _OtherExtents::rank())) - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator!=(mapping const& __lhs, mapping<_OtherExtents> const& __rhs) noexcept + // according to the standard layout_stride does not have a constraint on stride(r) for rank>0 + // it still has the precondition though + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const noexcept + { + _CCCL_ASSERT(__r < __rank_, "layout_stride::mapping::stride(): invalid rank index"); + return __strides_[__r]; + } + + template + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool + __op_eq(const mapping& __lhs, const _OtherMapping& __rhs, index_sequence<_Pos...>) noexcept + { + // avoid warning when comparing signed and unsigner integers and pick the wider of two types + using _CommonType = common_type_t; + return _CCCL_FOLD_AND( + (static_cast<_CommonType>(__lhs.stride(_Pos)) == static_cast<_CommonType>(__rhs.stride(_Pos)))); + } + + _CCCL_TEMPLATE(class _OtherMapping, class _Extents2 = _Extents) + _CCCL_REQUIRES((_Extents2::rank() > 0)) + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __op_eq(const mapping& __lhs, const _OtherMapping& __rhs) noexcept + { + if (__offset(__rhs)) { - return __impl::_not_eq_impl(__lhs, __rhs); + return false; } -# endif - }; -}; + return __lhs.extents() == __rhs.extents() && __op_eq(__lhs, __rhs, _CUDA_VSTD::make_index_sequence<__rank_>()); + } + + _CCCL_TEMPLATE(class _OtherMapping, class _Extents2 = _Extents) + _CCCL_REQUIRES((_Extents2::rank() == 0)) + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __op_eq(const mapping& __lhs, const _OtherMapping& __rhs) noexcept + { + return (!__offset(__rhs)); + } -#endif // _CCCL_STD_VER > 2011 + template + static constexpr bool __can_compare = + __mdspan_detail::__layout_mapping_alike<_OtherMapping> && (_OtherMapping::extents_type::rank() == _Extents::rank()) + && _OtherMapping::is_always_strided(); + + template + _LIBCUDACXX_HIDE_FROM_ABI friend constexpr auto operator==(const mapping& __lhs, const _OtherMapping& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)(__can_compare<_OtherMapping>) + { + return __op_eq(__lhs, __rhs); + } + +# if _CCCL_STD_VER <= 2017 + template + _LIBCUDACXX_HIDE_FROM_ABI friend constexpr auto operator==(const _OtherMapping& __lhs, const mapping& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)((!__mdspan_detail::__is_mapping_of) + && __can_compare<_OtherMapping>) + { + return __op_eq(__rhs, __lhs); + } + template + _LIBCUDACXX_HIDE_FROM_ABI friend constexpr auto operator!=(const mapping& __lhs, const _OtherMapping& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)(__can_compare<_OtherMapping>) + { + return !__op_eq(__lhs, __rhs); + } + template + _LIBCUDACXX_HIDE_FROM_ABI friend constexpr auto operator!=(const _OtherMapping& __lhs, const mapping& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)((!__mdspan_detail::__is_mapping_of) + && __can_compare<_OtherMapping>) + { + return __op_eq(__rhs, __lhs); + } +# endif // _CCCL_STD_VER <= 2017 + +private: + _CCCL_NO_UNIQUE_ADDRESS extents_type __extents_{}; + _CCCL_NO_UNIQUE_ADDRESS __stride_array __strides_{}; +}; _LIBCUDACXX_END_NAMESPACE_STD -#endif // _LIBCUDACXX___MDSPAN_LAYOUT_STRIDE_HPP +#endif // _CCCL_STD_VER >= 2014 + +_CCCL_POP_MACROS + +#endif // _LIBCUDACXX___MDSPAN_LAYOUT_STRIDE_H diff --git a/libcudacxx/include/cuda/std/__mdspan/macros.h b/libcudacxx/include/cuda/std/__mdspan/macros.h deleted file mode 100644 index 36895751bb1..00000000000 --- a/libcudacxx/include/cuda/std/__mdspan/macros.h +++ /dev/null @@ -1,646 +0,0 @@ -/* -//@HEADER -// ************************************************************************ -// -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation -// -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) -// -// ************************************************************************ -//@HEADER -*/ - -#ifndef _LIBCUDACXX___MDSPAN_MACROS_HPP -#define _LIBCUDACXX___MDSPAN_MACROS_HPP - -#include - -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header - -#include -#include -#include -#include -#include -#include - -#if _CCCL_STD_VER > 2011 - -# ifndef __MDSPAN_FORCE_INLINE_FUNCTION -# ifdef __MDSPAN_COMPILER_MSVC // Microsoft compilers -# define __MDSPAN_FORCE_INLINE_FUNCTION __forceinline _CCCL_HOST_DEVICE -# else -# define __MDSPAN_FORCE_INLINE_FUNCTION __attribute__((always_inline)) _CCCL_HOST_DEVICE -# endif -# endif - -//============================================================================== -// {{{1 - -# define __MDSPAN_PP_COUNT(...) \ - __MDSPAN_PP_INTERNAL_EXPAND_ARGS_PRIVATE(__MDSPAN_PP_INTERNAL_ARGS_AUGMENTER(__VA_ARGS__)) - -# define __MDSPAN_PP_INTERNAL_ARGS_AUGMENTER(...) unused, __VA_ARGS__ -# define __MDSPAN_PP_INTERNAL_EXPAND(x) x -# define __MDSPAN_PP_INTERNAL_EXPAND_ARGS_PRIVATE(...) \ - __MDSPAN_PP_INTERNAL_EXPAND(__MDSPAN_PP_INTERNAL_COUNT_PRIVATE( \ - __VA_ARGS__, \ - 69, \ - 68, \ - 67, \ - 66, \ - 65, \ - 64, \ - 63, \ - 62, \ - 61, \ - 60, \ - 59, \ - 58, \ - 57, \ - 56, \ - 55, \ - 54, \ - 53, \ - 52, \ - 51, \ - 50, \ - 49, \ - 48, \ - 47, \ - 46, \ - 45, \ - 44, \ - 43, \ - 42, \ - 41, \ - 40, \ - 39, \ - 38, \ - 37, \ - 36, \ - 35, \ - 34, \ - 33, \ - 32, \ - 31, \ - 30, \ - 29, \ - 28, \ - 27, \ - 26, \ - 25, \ - 24, \ - 23, \ - 22, \ - 21, \ - 20, \ - 19, \ - 18, \ - 17, \ - 16, \ - 15, \ - 14, \ - 13, \ - 12, \ - 11, \ - 10, \ - 9, \ - 8, \ - 7, \ - 6, \ - 5, \ - 4, \ - 3, \ - 2, \ - 1, \ - 0)) -# define __MDSPAN_PP_INTERNAL_COUNT_PRIVATE( \ - _1_, \ - _2_, \ - _3_, \ - _4_, \ - _5_, \ - _6_, \ - _7_, \ - _8_, \ - _9_, \ - _10, \ - _11, \ - _12, \ - _13, \ - _14, \ - _15, \ - _16, \ - _17, \ - _18, \ - _19, \ - _20, \ - _21, \ - _22, \ - _23, \ - _24, \ - _25, \ - _26, \ - _27, \ - _28, \ - _29, \ - _30, \ - _31, \ - _32, \ - _33, \ - _34, \ - _35, \ - _36, \ - _37, \ - _38, \ - _39, \ - _40, \ - _41, \ - _42, \ - _43, \ - _44, \ - _45, \ - _46, \ - _47, \ - _48, \ - _49, \ - _50, \ - _51, \ - _52, \ - _53, \ - _54, \ - _55, \ - _56, \ - _57, \ - _58, \ - _59, \ - _60, \ - _61, \ - _62, \ - _63, \ - _64, \ - _65, \ - _66, \ - _67, \ - _68, \ - _69, \ - _70, \ - count, \ - ...) \ - count /**/ - -# define __MDSPAN_PP_STRINGIFY_IMPL(x) #x -# define __MDSPAN_PP_STRINGIFY(x) __MDSPAN_PP_STRINGIFY_IMPL(x) - -# define __MDSPAN_PP_CAT_IMPL(x, y) x##y -# define __MDSPAN_PP_CAT(x, y) __MDSPAN_PP_CAT_IMPL(x, y) - -# define __MDSPAN_PP_EVAL(X, ...) X(__VA_ARGS__) - -# define __MDSPAN_PP_REMOVE_PARENS_IMPL(...) __VA_ARGS__ -# define __MDSPAN_PP_REMOVE_PARENS(...) __MDSPAN_PP_REMOVE_PARENS_IMPL __VA_ARGS__ - -// end Preprocessor helpers }}}1 -//============================================================================== - -//============================================================================== -// {{{1 - -# if __MDSPAN_USE_RETURN_TYPE_DEDUCTION -# define __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \ - auto __MDSPAN_PP_REMOVE_PARENS(SIGNATURE) \ - { \ - return __MDSPAN_PP_REMOVE_PARENS(BODY); \ - } -# define __MDSPAN_DEDUCE_DECLTYPE_AUTO_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \ - decltype(auto) __MDSPAN_PP_REMOVE_PARENS(SIGNATURE) \ - { \ - return __MDSPAN_PP_REMOVE_PARENS(BODY); \ - } -# else -# define __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \ - auto __MDSPAN_PP_REMOVE_PARENS(SIGNATURE) \ - -> _CUDA_VSTD::remove_cv_t<_CUDA_VSTD::remove_reference_t> \ - { \ - return __MDSPAN_PP_REMOVE_PARENS(BODY); \ - } -# define __MDSPAN_DEDUCE_DECLTYPE_AUTO_RETURN_TYPE_SINGLE_LINE(SIGNATURE, BODY) \ - auto __MDSPAN_PP_REMOVE_PARENS(SIGNATURE) -> decltype(BODY) \ - { \ - return __MDSPAN_PP_REMOVE_PARENS(BODY); \ - } - -# endif - -// end Return type deduction }}}1 -//============================================================================== - -//============================================================================== -// {{{1 - -# ifdef __MDSPAN_USE_FOLD_EXPRESSIONS -# define __MDSPAN_FOLD_AND(...) ((__VA_ARGS__) && ...) -# define __MDSPAN_FOLD_OR(...) ((__VA_ARGS__) || ...) -# define __MDSPAN_FOLD_ASSIGN_LEFT(__INIT, ...) (__INIT = ... = (__VA_ARGS__)) -# define __MDSPAN_FOLD_TIMES_RIGHT(__PACK, ...) (__PACK * ... * (__VA_ARGS__)) -# define __MDSPAN_FOLD_PLUS_RIGHT(__PACK, ...) (__PACK + ... + (__VA_ARGS__)) -# else - -_LIBCUDACXX_BEGIN_NAMESPACE_STD - -namespace __fold_compatibility_impl -{ - -// We could probably be more clever here, but at the (small) risk of losing some compiler understanding. For the -// few operations we need, it's not worth generalizing over the operation - -# if __MDSPAN_USE_RETURN_TYPE_DEDUCTION - -__MDSPAN_FORCE_INLINE_FUNCTION -constexpr decltype(auto) __fold_right_and_impl() -{ - return true; -} - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr decltype(auto) __fold_right_and_impl(_Arg&& __arg, _Args&&... __args) -{ - return ((_Arg&&) __arg) && __fold_compatibility_impl::__fold_right_and_impl((_Args&&) __args...); -} - -__MDSPAN_FORCE_INLINE_FUNCTION -constexpr decltype(auto) __fold_right_or_impl() -{ - return false; -} - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr auto __fold_right_or_impl(_Arg&& __arg, _Args&&... __args) -{ - return ((_Arg&&) __arg) || __fold_compatibility_impl::__fold_right_or_impl((_Args&&) __args...); -} - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr auto __fold_left_assign_impl(_Arg1&& __arg1) -{ - return (_Arg1&&) __arg1; -} - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr auto __fold_left_assign_impl(_Arg1&& __arg1, _Arg2&& __arg2, _Args&&... __args) -{ - return __fold_compatibility_impl::__fold_left_assign_impl( - (((_Arg1&&) __arg1) = ((_Arg2&&) __arg2)), (_Args&&) __args...); -} - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr auto __fold_right_assign_impl(_Arg1&& __arg1) -{ - return (_Arg1&&) __arg1; -} - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr auto __fold_right_assign_impl(_Arg1&& __arg1, _Arg2&& __arg2, _Args&&... __args) -{ - return ((_Arg1&&) __arg1) = - __fold_compatibility_impl::__fold_right_assign_impl((_Arg2&&) __arg2, (_Args&&) __args...); -} - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr auto __fold_right_plus_impl(_Arg1&& __arg1) -{ - return (_Arg1&&) __arg1; -} - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr auto __fold_right_plus_impl(_Arg1&& __arg1, _Arg2&& __arg2, _Args&&... __args) -{ - return ((_Arg1&&) __arg1) + __fold_compatibility_impl::__fold_right_plus_impl((_Arg2&&) __arg2, (_Args&&) __args...); -} - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr auto __fold_right_times_impl(_Arg1&& __arg1) -{ - return (_Arg1&&) __arg1; -} - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr auto __fold_right_times_impl(_Arg1&& __arg1, _Arg2&& __arg2, _Args&&... __args) -{ - return ((_Arg1&&) __arg1) * __fold_compatibility_impl::__fold_right_times_impl((_Arg2&&) __arg2, (_Args&&) __args...); -} - -# else - -//------------------------------------------------------------------------------ -// {{{2 - -template -struct __fold_right_and_impl_; -template <> -struct __fold_right_and_impl_<> -{ - using __rv = bool; - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl() noexcept - { - return true; - } -}; -template -struct __fold_right_and_impl_<_Arg, _Args...> -{ - using __next_t = __fold_right_and_impl_<_Args...>; - using __rv = decltype(_CUDA_VSTD::declval<_Arg>() && _CUDA_VSTD::declval()); - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl(_Arg&& __arg, _Args&&... __args) noexcept - { - return ((_Arg&&) __arg) && __next_t::__impl((_Args&&) __args...); - } -}; - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr typename __fold_right_and_impl_<_Args...>::__rv -__fold_right_and_impl(_Args&&... __args) -{ - return __fold_right_and_impl_<_Args...>::__impl((_Args&&) __args...); -} - -// end right and }}}2 -//------------------------------------------------------------------------------ - -//------------------------------------------------------------------------------ -// {{{2 - -template -struct __fold_right_or_impl_; -template <> -struct __fold_right_or_impl_<> -{ - using __rv = bool; - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl() noexcept - { - return false; - } -}; -template -struct __fold_right_or_impl_<_Arg, _Args...> -{ - using __next_t = __fold_right_or_impl_<_Args...>; - using __rv = decltype(_CUDA_VSTD::declval<_Arg>() || _CUDA_VSTD::declval()); - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl(_Arg&& __arg, _Args&&... __args) noexcept - { - return ((_Arg&&) __arg) || __next_t::__impl((_Args&&) __args...); - } -}; - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr typename __fold_right_or_impl_<_Args...>::__rv -__fold_right_or_impl(_Args&&... __args) -{ - return __fold_right_or_impl_<_Args...>::__impl((_Args&&) __args...); -} - -// end right or }}}2 -//------------------------------------------------------------------------------ - -//------------------------------------------------------------------------------ -// {{{2 - -template -struct __fold_right_plus_impl_; -template -struct __fold_right_plus_impl_<_Arg> -{ - using __rv = _Arg&&; - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl(_Arg&& __arg) noexcept - { - return (_Arg&&) __arg; - } -}; -template -struct __fold_right_plus_impl_<_Arg1, _Arg2, _Args...> -{ - using __next_t = __fold_right_plus_impl_<_Arg2, _Args...>; - using __rv = decltype(_CUDA_VSTD::declval<_Arg1>() + _CUDA_VSTD::declval()); - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl(_Arg1&& __arg, _Arg2&& __arg2, _Args&&... __args) noexcept - { - return ((_Arg1&&) __arg) + __next_t::__impl((_Arg2&&) __arg2, (_Args&&) __args...); - } -}; - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr typename __fold_right_plus_impl_<_Args...>::__rv -__fold_right_plus_impl(_Args&&... __args) -{ - return __fold_right_plus_impl_<_Args...>::__impl((_Args&&) __args...); -} - -// end right plus }}}2 -//------------------------------------------------------------------------------ - -//------------------------------------------------------------------------------ -// {{{2 - -template -struct __fold_right_times_impl_; -template -struct __fold_right_times_impl_<_Arg> -{ - using __rv = _Arg&&; - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl(_Arg&& __arg) noexcept - { - return (_Arg&&) __arg; - } -}; -template -struct __fold_right_times_impl_<_Arg1, _Arg2, _Args...> -{ - using __next_t = __fold_right_times_impl_<_Arg2, _Args...>; - using __rv = decltype(_CUDA_VSTD::declval<_Arg1>() * _CUDA_VSTD::declval()); - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl(_Arg1&& __arg, _Arg2&& __arg2, _Args&&... __args) noexcept - { - return ((_Arg1&&) __arg) * __next_t::__impl((_Arg2&&) __arg2, (_Args&&) __args...); - } -}; - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr typename __fold_right_times_impl_<_Args...>::__rv -__fold_right_times_impl(_Args&&... __args) -{ - return __fold_right_times_impl_<_Args...>::__impl((_Args&&) __args...); -} - -// end right times }}}2 -//------------------------------------------------------------------------------ - -//------------------------------------------------------------------------------ -// {{{2 - -template -struct __fold_right_assign_impl_; -template -struct __fold_right_assign_impl_<_Arg> -{ - using __rv = _Arg&&; - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl(_Arg&& __arg) noexcept - { - return (_Arg&&) __arg; - } -}; -template -struct __fold_right_assign_impl_<_Arg1, _Arg2, _Args...> -{ - using __next_t = __fold_right_assign_impl_<_Arg2, _Args...>; - using __rv = decltype(_CUDA_VSTD::declval<_Arg1>() = _CUDA_VSTD::declval()); - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl(_Arg1&& __arg, _Arg2&& __arg2, _Args&&... __args) noexcept - { - return ((_Arg1&&) __arg) = __next_t::__impl((_Arg2&&) __arg2, (_Args&&) __args...); - } -}; - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr typename __fold_right_assign_impl_<_Args...>::__rv -__fold_right_assign_impl(_Args&&... __args) -{ - return __fold_right_assign_impl_<_Args...>::__impl((_Args&&) __args...); -} - -// end right assign }}}2 -//------------------------------------------------------------------------------ - -//------------------------------------------------------------------------------ -// {{{2 - -template -struct __fold_left_assign_impl_; -template -struct __fold_left_assign_impl_<_Arg> -{ - using __rv = _Arg&&; - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl(_Arg&& __arg) noexcept - { - return (_Arg&&) __arg; - } -}; -template -struct __fold_left_assign_impl_<_Arg1, _Arg2, _Args...> -{ - using __assign_result_t = decltype(_CUDA_VSTD::declval<_Arg1>() = _CUDA_VSTD::declval<_Arg2>()); - using __next_t = __fold_left_assign_impl_<__assign_result_t, _Args...>; - using __rv = typename __next_t::__rv; - __MDSPAN_FORCE_INLINE_FUNCTION - static constexpr __rv __impl(_Arg1&& __arg, _Arg2&& __arg2, _Args&&... __args) noexcept - { - return __next_t::__impl(((_Arg1&&) __arg) = (_Arg2&&) __arg2, (_Args&&) __args...); - } -}; - -template -__MDSPAN_FORCE_INLINE_FUNCTION constexpr typename __fold_left_assign_impl_<_Args...>::__rv -__fold_left_assign_impl(_Args&&... __args) -{ - return __fold_left_assign_impl_<_Args...>::__impl((_Args&&) __args...); -} - -// end left assign }}}2 -//------------------------------------------------------------------------------ - -# endif - -template -struct __bools; - -} // namespace __fold_compatibility_impl - -_LIBCUDACXX_END_NAMESPACE_STD - -# define __MDSPAN_FOLD_AND(...) _CUDA_VSTD::__fold_compatibility_impl::__fold_right_and_impl((__VA_ARGS__)...) -# define __MDSPAN_FOLD_OR(...) _CUDA_VSTD::__fold_compatibility_impl::__fold_right_or_impl((__VA_ARGS__)...) -# define __MDSPAN_FOLD_ASSIGN_LEFT(__INIT, ...) \ - _CUDA_VSTD::__fold_compatibility_impl::__fold_left_assign_impl(__INIT, (__VA_ARGS__)...) -# define __MDSPAN_FOLD_TIMES_RIGHT(__PACK, ...) \ - _CUDA_VSTD::__fold_compatibility_impl::__fold_right_times_impl((__PACK)..., __VA_ARGS__) -# define __MDSPAN_FOLD_PLUS_RIGHT(__PACK, ...) \ - _CUDA_VSTD::__fold_compatibility_impl::__fold_right_plus_impl((__PACK)..., __VA_ARGS__) - -# endif - -// end Variable template compatibility }}}1 -//============================================================================== - -//============================================================================== -// {{{1 - -# if __MDSPAN_USE_CONSTEXPR_14 -// Workaround for a bug (I think?) in EDG frontends -# ifdef __EDG__ -# define __MDSPAN_CONSTEXPR_14_DEFAULTED -# else -# define __MDSPAN_CONSTEXPR_14_DEFAULTED constexpr -# endif -# else -# define __MDSPAN_CONSTEXPR_14_DEFAULTED -# endif - -// end Pre-C++14 constexpr }}}1 -//============================================================================== - -#endif // _CCCL_STD_VER > 2011 - -#ifndef _CCCL_NO_EXCEPTIONS -# define _LIBCUDACXX_THROW_RUNTIME_ERROR(_COND, _MESSAGE) \ - if (!(_COND)) \ - __throw_runtime_error(_MESSAGE) -#else // ^^^ !_CCCL_NO_EXCEPTIONS ^^^ / vvv _CCCL_NO_EXCEPTIONS vvv -# define _LIBCUDACXX_THROW_RUNTIME_ERROR(_COND, _MESSAGE) _CCCL_ASSERT(_COND, _MESSAGE) -#endif // _CCCL_NO_EXCEPTIONS - -#endif // _LIBCUDACXX___MDSPAN_MACROS_HPP diff --git a/libcudacxx/include/cuda/std/__mdspan/maybe_static_value.h b/libcudacxx/include/cuda/std/__mdspan/maybe_static_value.h deleted file mode 100644 index fd978c6c3f8..00000000000 --- a/libcudacxx/include/cuda/std/__mdspan/maybe_static_value.h +++ /dev/null @@ -1,165 +0,0 @@ -/* -//@HEADER -// ************************************************************************ -// -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation -// -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) -// -// ************************************************************************ -//@HEADER -*/ - -#ifndef _LIBCUDACXX___MDSPAN_MAYBE_STATIC_VALUE_HPP -#define _LIBCUDACXX___MDSPAN_MAYBE_STATIC_VALUE_HPP - -#include - -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header - -#include -#include -#ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS -# include -#endif // _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - -// This is only needed for the non-standard-layout version of partially -// static array. -// Needs to be after the includes above to work with the single header generator -#if !__MDSPAN_PRESERVE_STANDARD_LAYOUT - -_LIBCUDACXX_BEGIN_NAMESPACE_STD - -# if _CCCL_STD_VER > 2011 - -//============================================================================== - -namespace __detail -{ - -// static case -template -struct __maybe_static_value -{ - static constexpr _static_t __static_value = __v; - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _dynamic_t __value() const noexcept - { - return static_cast<_dynamic_t>(__v); - } - template - __MDSPAN_FORCE_INLINE_FUNCTION constexpr void __set_value(_Up&& /*__rhs*/) noexcept - { - // Should we assert that the value matches the static value here? - } - - //-------------------------------------------------------------------------- - - _CCCL_HIDE_FROM_ABI constexpr __maybe_static_value() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __maybe_static_value(__maybe_static_value const&) noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __maybe_static_value(__maybe_static_value&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __maybe_static_value& - operator=(__maybe_static_value const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __maybe_static_value& - operator=(__maybe_static_value&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__maybe_static_value() noexcept = default; - - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __maybe_static_value(_dynamic_t const&) noexcept - { - // Should we assert that the value matches the static value here? - } - - //-------------------------------------------------------------------------- -}; - -// dynamic case -template -struct __maybe_static_value<_dynamic_t, _static_t, __is_dynamic_sentinal, __is_dynamic_sentinal, __array_entry_index> -# ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __no_unique_address_emulation<_Tp> -# endif -{ - static constexpr _static_t __static_value = __is_dynamic_sentinal; -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - _CCCL_NO_UNIQUE_ADDRESS _dynamic_t __v = {}; - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _dynamic_t __value() const noexcept - { - return __v; - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _dynamic_t& __ref() noexcept - { - return __v; - } - template - __MDSPAN_FORCE_INLINE_FUNCTION constexpr void __set_value(_Up&& __rhs) noexcept - { - __v = (_Up&&) rhs; - } -# else - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _dynamic_t __value() const noexcept - { - return this->__no_unique_address_emulation<_dynamic_t>::__ref(); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _dynamic_t& __ref() noexcept - { - return this->__no_unique_address_emulation<_dynamic_t>::__ref(); - } - template - __MDSPAN_FORCE_INLINE_FUNCTION constexpr void __set_value(_Up&& __rhs) noexcept - { - this->__no_unique_address_emulation<_dynamic_t>::__ref() = (_Up&&) __rhs; - } -# endif -}; - -} // namespace __detail - -//============================================================================== - -# endif // _CCCL_STD_VER > 2011 - -_LIBCUDACXX_END_NAMESPACE_STD - -#endif // !__MDSPAN_PRESERVE_STANDARD_LAYOUT - -#endif // _LIBCUDACXX___MDSPAN_MAYBE_STATIC_VALUE_HPP diff --git a/libcudacxx/include/cuda/std/__mdspan/mdspan.h b/libcudacxx/include/cuda/std/__mdspan/mdspan.h index 07918917125..a7429a3b94d 100644 --- a/libcudacxx/include/cuda/std/__mdspan/mdspan.h +++ b/libcudacxx/include/cuda/std/__mdspan/mdspan.h @@ -1,45 +1,19 @@ -/* -//@HEADER -// ************************************************************************ +// -*- C++ -*- +//===----------------------------------------------------------------------===// // -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. // -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). // -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. // -// ************************************************************************ -//@HEADER -*/ +//===---------------------------------------------------------------------===// #ifndef _LIBCUDACXX___MDSPAN_MDSPAN_HPP #define _LIBCUDACXX___MDSPAN_MDSPAN_HPP @@ -54,19 +28,21 @@ # pragma system_header #endif // no system header -#include -#include -#include +#include +#include +#include #include #include #include #include -#include +#include +#include #include #include #include #include -#include +#include +#include #include #include #include @@ -76,13 +52,13 @@ #include #include #include +#include +#include #include -_LIBCUDACXX_BEGIN_NAMESPACE_STD - #if _CCCL_STD_VER >= 2014 -_CCCL_NV_DIAG_SUPPRESS(186) // pointless comparison of unsigned integer with zero +_LIBCUDACXX_BEGIN_NAMESPACE_STD template , - "mdspan's Extents template parameter must be a specialization of _CUDA_VSTD::extents."); - - // Workaround for non-deducibility of the index sequence template parameter if it's given at the top level - template - struct __deduction_workaround; - - template - struct __deduction_workaround<_CUDA_VSTD::index_sequence<_Idxs...>> - { - using index_type = typename _Extents::index_type; - - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t __size(mdspan const& __self) noexcept - { - return __MDSPAN_FOLD_TIMES_RIGHT( - (__self.__mapping_ref().extents().template __extent<_Idxs>()), /* * ... * */ size_t(1)); - } - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __empty(mdspan const& __self) noexcept - { - return (__self.rank() > 0) - && __MDSPAN_FOLD_OR((__self.__mapping_ref().extents().template __extent<_Idxs>() == index_type(0))); - } - - template - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool - __check_index(_Extents const& exts, _SizeTypes... __indices) - { -# if _CCCL_STD_VER >= 2017 - return (((is_unsigned_v ? true : static_cast(__indices) >= 0) - && static_cast(__indices) < exts.extent(_Idxs)) - && ...); -# else - return true; -# endif // _CCCL_STD_VER >= 2017 - } - - template - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr index_type - __index(mdspan const& __self, _SizeTypes... __indices) noexcept - { - _CCCL_ASSERT(__check_index(__self.__mapping_ref().extents(), __indices...), - "cuda::std::mdspan subscript out of range!"); - const index_type __res = __self.__mapping_ref()(index_type(__indices)...); - return __res; - } - template - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr index_type - __index(mdspan const& __self, const _CUDA_VSTD::array<_SizeType, _Np>& __indices) noexcept - { - _CCCL_ASSERT(__check_index(__self.__mapping_ref().extents(), __indices[_Idxs]...), - "cuda::std::mdspan subscript out of range!"); - const index_type __res = __self.__mapping_ref()(__indices[_Idxs]...); - return __res; - } - template - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr index_type - __index(mdspan const& __self, const _CUDA_VSTD::span<_SizeType, _Np>& __indices) noexcept - { - _CCCL_ASSERT(__check_index(__self.__mapping_ref().extents(), __indices[_Idxs]...), - "cuda::std::mdspan subscript out of range!"); - const index_type __res = __self.__mapping_ref()(__indices[_Idxs]...); - return __res; - } - }; + static_assert(__mdspan_detail::__is_extents_v<_Extents>, + "mdspan: Extents template parameter must be a specialization of extents."); + static_assert(!_CCCL_TRAIT(is_array, _ElementType), + "mdspan: ElementType template parameter may not be an array type"); + static_assert(!_CCCL_TRAIT(is_abstract, _ElementType), + "mdspan: ElementType template parameter may not be an abstract class"); + static_assert(_CCCL_TRAIT(is_same, _ElementType, typename _AccessorPolicy::element_type), + "mdspan: ElementType template parameter must match AccessorPolicy::element_type"); + static_assert(__mdspan_detail::__is_valid_layout_mapping<_LayoutPolicy, _Extents>, + "mdspan: LayoutPolicy template parameter is invalid. A common mistake is to pass a layout mapping " + "instead of a layout policy"); public: - //-------------------------------------------------------------------------------- - // Domain and codomain types - using extents_type = _Extents; using layout_type = _LayoutPolicy; using accessor_type = _AccessorPolicy; using mapping_type = typename layout_type::template mapping; using element_type = _ElementType; - using value_type = _CUDA_VSTD::remove_cv_t; + using value_type = remove_cv_t; using index_type = typename extents_type::index_type; using size_type = typename extents_type::size_type; using rank_type = typename extents_type::rank_type; using data_handle_type = typename accessor_type::data_handle_type; using reference = typename accessor_type::reference; - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t rank() noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr rank_type rank() noexcept { return extents_type::rank(); } - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t rank_dynamic() noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr rank_type rank_dynamic() noexcept { return extents_type::rank_dynamic(); } - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t static_extent(size_t __r) noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t static_extent(rank_type __r) noexcept { return extents_type::static_extent(__r); } - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type extent(size_t __r) const noexcept + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type extent(rank_type __r) const noexcept { - return __mapping_ref().extents().extent(__r); + return __map_.extents().extent(__r); }; -private: - // Can't use defaulted parameter in the __deduction_workaround template because of a bug in MSVC warning C4348. - using __impl = __deduction_workaround<_CUDA_VSTD::make_index_sequence>; - - using __map_acc_pair_t = __detail::__compressed_pair; - public: //-------------------------------------------------------------------------------- - // [mdspan.basic.cons], mdspan constructors, assignment, and destructor - -# if _CCCL_STD_VER <= 2020 - _CCCL_HIDE_FROM_ABI constexpr mdspan() = default; -# else // ^^^ C++17 ^^^ / vvv C++20 vvv - _CCCL_HIDE_FROM_ABI constexpr mdspan() - requires( - // Directly using rank_dynamic()>0 here doesn't work for nvcc - (extents_type::rank_dynamic() > 0) && _CCCL_TRAIT(is_default_constructible, data_handle_type) - && _CCCL_TRAIT(is_default_constructible, mapping_type) - && _CCCL_TRAIT(is_default_constructible, accessor_type)) - = default; -# endif // _CCCL_STD_VER >= 2020 - _CCCL_HIDE_FROM_ABI constexpr mdspan(const mdspan&) = default; - _CCCL_HIDE_FROM_ABI constexpr mdspan(mdspan&&) = default; - - _CCCL_TEMPLATE(class... _SizeTypes) - _CCCL_REQUIRES(__fold_and_v<_CCCL_TRAIT(is_convertible, _SizeTypes, index_type)...> _CCCL_AND - __fold_and_v<_CCCL_TRAIT(is_nothrow_constructible, index_type, _SizeTypes)...> _CCCL_AND( - (sizeof...(_SizeTypes) == rank()) || (sizeof...(_SizeTypes) == rank_dynamic())) - _CCCL_AND _CCCL_TRAIT(is_constructible, mapping_type, extents_type) - _CCCL_AND _CCCL_TRAIT(is_default_constructible, accessor_type)) - _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mdspan(data_handle_type __p, _SizeTypes... __dynamic_extents) - // TODO @proposal-bug shouldn't I be allowed to do `move(__p)` here? - : __members( - _CUDA_VSTD::move(__p), - __map_acc_pair_t(mapping_type(extents_type(static_cast(_CUDA_VSTD::move(__dynamic_extents))...)), - accessor_type())) - {} + // [mdspan.mdspan.cons], mdspan constructors, assignment, and destructor - _CCCL_TEMPLATE(class _SizeType, size_t _Np) - _CCCL_REQUIRES(_CCCL_TRAIT(is_convertible, _SizeType, index_type) - _CCCL_AND _CCCL_TRAIT(is_nothrow_constructible, index_type, _SizeType) - _CCCL_AND((_Np == rank()) || (_Np == rank_dynamic())) +# if _CCCL_STD_VER >= 2020 + constexpr mdspan() + requires((extents_type::rank_dynamic() > 0) && is_default_constructible_v + && is_default_constructible_v && is_default_constructible_v) + = default; +# else // ^^^ _CCCL_STD_VER >= 2020 ^^^ / vvv _CCCL_STD_VER <= 2017 vvv + _CCCL_TEMPLATE(class _Extents2 = _Extents) + _CCCL_REQUIRES((_Extents2::rank_dynamic() > 0) // + _CCCL_AND _CCCL_TRAIT(is_default_constructible, data_handle_type) + _CCCL_AND _CCCL_TRAIT(is_default_constructible, mapping_type) + _CCCL_AND _CCCL_TRAIT(is_default_constructible, accessor_type)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mdspan() noexcept {} +# endif // _CCCL_STD_VER <= 2017 + + constexpr mdspan(const mdspan&) = default; + constexpr mdspan(mdspan&&) = default; + + _CCCL_TEMPLATE(class... _OtherIndexTypes) + _CCCL_REQUIRES(((sizeof...(_OtherIndexTypes) == rank()) || (sizeof...(_OtherIndexTypes) == rank_dynamic())) + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_convertible, _OtherIndexTypes, index_type)) + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_nothrow_constructible, index_type, _OtherIndexTypes)) _CCCL_AND _CCCL_TRAIT(is_constructible, mapping_type, extents_type) _CCCL_AND _CCCL_TRAIT(is_default_constructible, accessor_type)) - __MDSPAN_CONDITIONAL_EXPLICIT(_Np != rank_dynamic()) - _LIBCUDACXX_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, - const _CUDA_VSTD::array<_SizeType, _Np>& __dynamic_extents) - : __members(_CUDA_VSTD::move(__p), - __map_acc_pair_t(mapping_type(extents_type(__dynamic_extents)), accessor_type())) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mdspan(data_handle_type __p, _OtherIndexTypes... __exts) + : __ptr_(_CUDA_VSTD::move(__p)) + , __map_(extents_type(static_cast(_CUDA_VSTD::move(__exts))...)) + , __acc_{} {} - _CCCL_TEMPLATE(class _SizeType, size_t _Np) - _CCCL_REQUIRES(_CCCL_TRAIT(is_convertible, _SizeType, index_type) - _CCCL_AND _CCCL_TRAIT(is_nothrow_constructible, index_type, _SizeType) - _CCCL_AND((_Np == rank()) || (_Np == rank_dynamic())) - _CCCL_AND _CCCL_TRAIT(is_constructible, mapping_type, extents_type) - _CCCL_AND _CCCL_TRAIT(is_default_constructible, accessor_type)) - __MDSPAN_CONDITIONAL_EXPLICIT(_Np != rank_dynamic()) - _LIBCUDACXX_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, _CUDA_VSTD::span<_SizeType, _Np> __dynamic_extents) - : __members(_CUDA_VSTD::move(__p), - __map_acc_pair_t(mapping_type(extents_type(_CUDA_VSTD::as_const(__dynamic_extents))), accessor_type())) + template + static constexpr bool __is_constructible_from = + _CCCL_TRAIT(is_convertible, const _OtherIndexType&, index_type) + && _CCCL_TRAIT(is_nothrow_constructible, index_type, const _OtherIndexType&) + && _CCCL_TRAIT(is_constructible, mapping_type, extents_type) + && _CCCL_TRAIT(is_default_constructible, accessor_type); + + _CCCL_TEMPLATE(class _OtherIndexType, size_t _Size) + _CCCL_REQUIRES((_Size == rank_dynamic()) _CCCL_AND __is_constructible_from<_OtherIndexType>) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, const array<_OtherIndexType, _Size>& __exts) + : __ptr_(_CUDA_VSTD::move(__p)) + , __map_(extents_type{__exts}) + , __acc_{} + {} + + _CCCL_TEMPLATE(class _OtherIndexType, size_t _Size) + _CCCL_REQUIRES((_Size == rank()) _CCCL_AND(_Size != rank_dynamic()) + _CCCL_AND __is_constructible_from<_OtherIndexType>) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mdspan(data_handle_type __p, const array<_OtherIndexType, _Size>& __exts) + : __ptr_(_CUDA_VSTD::move(__p)) + , __map_(extents_type{__exts}) + , __acc_{} + {} + + _CCCL_TEMPLATE(class _OtherIndexType, size_t _Size) + _CCCL_REQUIRES((_Size == rank_dynamic()) _CCCL_AND __is_constructible_from<_OtherIndexType>) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, span<_OtherIndexType, _Size> __exts) + : __ptr_(_CUDA_VSTD::move(__p)) + , __map_(extents_type{__exts}) + , __acc_{} + {} + + _CCCL_TEMPLATE(class _OtherIndexType, size_t _Size) + _CCCL_REQUIRES((_Size == rank()) _CCCL_AND(_Size != rank_dynamic()) + _CCCL_AND __is_constructible_from<_OtherIndexType>) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mdspan(data_handle_type __p, span<_OtherIndexType, _Size> __exts) + : __ptr_(_CUDA_VSTD::move(__p)) + , __map_(extents_type{__exts}) + , __acc_{} {} - _CCCL_TEMPLATE(bool _Is_default_constructible = _CCCL_TRAIT(is_default_constructible, accessor_type)) - _CCCL_REQUIRES(_Is_default_constructible _CCCL_AND _CCCL_TRAIT(is_constructible, mapping_type, extents_type)) + _CCCL_TEMPLATE(class _AccessorPolicy2 = _AccessorPolicy) + _CCCL_REQUIRES(_CCCL_TRAIT(is_default_constructible, _AccessorPolicy2) + _CCCL_AND _CCCL_TRAIT(is_constructible, mapping_type, const extents_type&)) _LIBCUDACXX_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, const extents_type& __exts) - : __members(_CUDA_VSTD::move(__p), __map_acc_pair_t(mapping_type(__exts), accessor_type())) + : __ptr_(_CUDA_VSTD::move(__p)) + , __map_(__exts) + , __acc_{} {} - _CCCL_TEMPLATE(bool _Is_default_constructible = _CCCL_TRAIT(is_default_constructible, accessor_type)) - _CCCL_REQUIRES(_Is_default_constructible) + _CCCL_TEMPLATE(class _AccessorPolicy2 = _AccessorPolicy) + _CCCL_REQUIRES(_CCCL_TRAIT(is_default_constructible, _AccessorPolicy2)) _LIBCUDACXX_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, const mapping_type& __m) - : __members(_CUDA_VSTD::move(__p), __map_acc_pair_t(__m, accessor_type())) + : __ptr_(_CUDA_VSTD::move(__p)) + , __map_(__m) + , __acc_{} {} _LIBCUDACXX_HIDE_FROM_ABI constexpr mdspan(data_handle_type __p, const mapping_type& __m, const accessor_type& __a) - : __members(_CUDA_VSTD::move(__p), __map_acc_pair_t(__m, __a)) + : __ptr_(_CUDA_VSTD::move(__p)) + , __map_(__m) + , __acc_(__a) {} + template + static constexpr bool __is_convertible_from = + _CCCL_TRAIT(is_constructible, mapping_type, const typename _OtherLayoutPolicy::template mapping<_OtherExtents>&) + && _CCCL_TRAIT(is_constructible, accessor_type, const _OtherAccessor&); + + template + static constexpr bool __is_implicit_convertible_from = + _CCCL_TRAIT(is_convertible, const typename _OtherLayoutPolicy::template mapping<_OtherExtents>&, mapping_type) + && _CCCL_TRAIT(is_convertible, const _OtherAccessor&, accessor_type); + _CCCL_TEMPLATE(class _OtherElementType, class _OtherExtents, class _OtherLayoutPolicy, class _OtherAccessor) - _CCCL_REQUIRES( - _CCCL_TRAIT(is_constructible, mapping_type, typename _OtherLayoutPolicy::template mapping<_OtherExtents>) - _CCCL_AND _CCCL_TRAIT(is_constructible, accessor_type, _OtherAccessor)) + _CCCL_REQUIRES((rank() > 0) // + _CCCL_AND __is_convertible_from<_OtherExtents, _OtherLayoutPolicy, _OtherAccessor> // + _CCCL_AND __is_implicit_convertible_from<_OtherExtents, _OtherLayoutPolicy, _OtherAccessor>) _LIBCUDACXX_HIDE_FROM_ABI constexpr mdspan( const mdspan<_OtherElementType, _OtherExtents, _OtherLayoutPolicy, _OtherAccessor>& __other) - : __members(__other.__ptr_ref(), __map_acc_pair_t(__other.__mapping_ref(), __other.__accessor_ref())) + : __ptr_(__other.__ptr_) + , __map_(__other.__map_) + , __acc_(__other.__acc_) { - static_assert(_CCCL_TRAIT(is_constructible, data_handle_type, typename _OtherAccessor::data_handle_type), - "Incompatible data_handle_type for mdspan construction"); + static_assert(_CCCL_TRAIT(is_constructible, data_handle_type, const typename _OtherAccessor::data_handle_type&), + "mdspan: incompatible data_handle_type for mdspan construction"); static_assert(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents), - "Incompatible extents for mdspan construction"); - /* - * TODO: Check precondition - * For each rank index __r of extents_type, static_extent(__r) == dynamic_extent || static_extent(__r) == - * __other.extent(__r) is true. - */ + "mdspan: incompatible extents for mdspan construction"); + + // The following precondition is part of the standard, but is unlikely to be triggered. + // The extents constructor checks this and the mapping must be storing the extents, since + // its extents() function returns a const reference to extents_type. + // The only way this can be triggered is if the mapping conversion constructor would for example + // always construct its extents() only from the dynamic extents, instead of from the other extents. + for (size_t __r = 0; __r < rank(); __r++) + { + // Not catching this could lead to out of bounds errors later + // e.g. mdspan, non_checking_layout> m = + // mdspan, non_checking_layout>(ptr, 200); leads to an extent of -56 on m + _CCCL_ASSERT((static_extent(__r) == dynamic_extent) + || (static_cast(__other.extent(__r)) == static_cast(static_extent(__r))), + "mdspan: conversion mismatch of source dynamic extents with static extents"); + } } - /* Might need this on NVIDIA? - _CCCL_HIDE_FROM_ABI - _CCCL_HIDE_FROM_ABI ~mdspan() = default; - */ + _CCCL_TEMPLATE(class _OtherElementType, class _OtherExtents, class _OtherLayoutPolicy, class _OtherAccessor) + _CCCL_REQUIRES((rank() == 0) // + _CCCL_AND __is_convertible_from<_OtherExtents, _OtherLayoutPolicy, _OtherAccessor> // + _CCCL_AND __is_implicit_convertible_from<_OtherExtents, _OtherLayoutPolicy, _OtherAccessor>) + _LIBCUDACXX_HIDE_FROM_ABI constexpr mdspan( + const mdspan<_OtherElementType, _OtherExtents, _OtherLayoutPolicy, _OtherAccessor>& __other) + : __ptr_(__other.__ptr_) + , __map_(__other.__map_) + , __acc_(__other.__acc_) + { + static_assert(_CCCL_TRAIT(is_constructible, data_handle_type, const typename _OtherAccessor::data_handle_type&), + "mdspan: incompatible data_handle_type for mdspan construction"); + static_assert(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents), + "mdspan: incompatible extents for mdspan construction"); + } + _CCCL_TEMPLATE(class _OtherElementType, class _OtherExtents, class _OtherLayoutPolicy, class _OtherAccessor) + _CCCL_REQUIRES((rank() > 0) // + _CCCL_AND __is_convertible_from<_OtherExtents, _OtherLayoutPolicy, _OtherAccessor> // + _CCCL_AND(!__is_implicit_convertible_from<_OtherExtents, _OtherLayoutPolicy, _OtherAccessor>)) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mdspan( + const mdspan<_OtherElementType, _OtherExtents, _OtherLayoutPolicy, _OtherAccessor>& __other) + : __ptr_(__other.__ptr_) + , __map_(__other.__map_) + , __acc_(__other.__acc_) + { + static_assert(_CCCL_TRAIT(is_constructible, data_handle_type, const typename _OtherAccessor::data_handle_type&), + "mdspan: incompatible data_handle_type for mdspan construction"); + static_assert(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents), + "mdspan: incompatible extents for mdspan construction"); + + // The following precondition is part of the standard, but is unlikely to be triggered. + // The extents constructor checks this and the mapping must be storing the extents, since + // its extents() function returns a const reference to extents_type. + // The only way this can be triggered is if the mapping conversion constructor would for example + // always construct its extents() only from the dynamic extents, instead of from the other extents. + for (size_t __r = 0; __r < rank(); __r++) + { + // Not catching this could lead to out of bounds errors later + // e.g. mdspan, non_checking_layout> m = + // mdspan, non_checking_layout>(ptr, 200); leads to an extent of -56 on m + _CCCL_ASSERT((static_extent(__r) == dynamic_extent) + || (static_cast(__other.extent(__r)) == static_cast(static_extent(__r))), + "mdspan: conversion mismatch of source dynamic extents with static extents"); + } + } - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED mdspan& operator=(const mdspan&) = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED mdspan& operator=(mdspan&&) = default; + _CCCL_TEMPLATE(class _OtherElementType, class _OtherExtents, class _OtherLayoutPolicy, class _OtherAccessor) + _CCCL_REQUIRES((rank() == 0) // + _CCCL_AND __is_convertible_from<_OtherExtents, _OtherLayoutPolicy, _OtherAccessor> // + _CCCL_AND(!__is_implicit_convertible_from<_OtherExtents, _OtherLayoutPolicy, _OtherAccessor>)) + _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr mdspan( + const mdspan<_OtherElementType, _OtherExtents, _OtherLayoutPolicy, _OtherAccessor>& __other) + : __ptr_(__other.__ptr_) + , __map_(__other.__map_) + , __acc_(__other.__acc_) + { + static_assert(_CCCL_TRAIT(is_constructible, data_handle_type, const typename _OtherAccessor::data_handle_type&), + "mdspan: incompatible data_handle_type for mdspan construction"); + static_assert(_CCCL_TRAIT(is_constructible, extents_type, _OtherExtents), + "mdspan: incompatible extents for mdspan construction"); + } + + constexpr mdspan& operator=(const mdspan&) = default; + constexpr mdspan& operator=(mdspan&&) = default; //-------------------------------------------------------------------------------- - // [mdspan.basic.mapping], mdspan mapping domain multidimensional index to access codomain element + // [mdspan.mdspan.members], members + +# if defined(_LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS) + _CCCL_TEMPLATE(class... _OtherIndexTypes) + _CCCL_REQUIRES((sizeof...(_OtherIndexTypes) == rank()) + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_convertible, _OtherIndexTypes, index_type)) + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_nothrow_constructible, index_type, _OtherIndexTypes))) + _LIBCUDACXX_HIDE_FROM_ABI constexpr reference operator[](_OtherIndexTypes... __indices) const + { + // Note the standard layouts would also check this, but user provided ones may not, so we + // check the precondition here + _CCCL_ASSERT(__mdspan_detail::__is_multidimensional_index_in(extents(), __indices...), + "mdspan: operator[] out of bounds access"); + return __acc_.access(__ptr_, __map_(static_cast(_CUDA_VSTD::move(__indices))...)); + } +# else + _CCCL_TEMPLATE(class _OtherIndexType) + _CCCL_REQUIRES((rank() == 1) _CCCL_AND _CCCL_TRAIT(is_convertible, _OtherIndexType, index_type) + _CCCL_AND _CCCL_TRAIT(is_nothrow_constructible, index_type, _OtherIndexType)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr reference operator[](_OtherIndexType __index) const + { + return __acc_.access(__ptr_, __map_(static_cast(_CUDA_VSTD::move(__index)))); + } +# endif // _LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS -# if __MDSPAN_USE_BRACKET_OPERATOR - _CCCL_TEMPLATE(class... _SizeTypes) - _CCCL_REQUIRES(__fold_and_v<_CCCL_TRAIT(is_convertible, _SizeTypes, index_type)...> _CCCL_AND - __fold_and_v<_CCCL_TRAIT(is_nothrow_constructible, index_type, _SizeTypes)...> _CCCL_AND( - rank() == sizeof...(_SizeTypes))) - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr reference operator[](_SizeTypes... __indices) const + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr decltype(auto) + __op_bracket(const array<_OtherIndexType, _Extents::rank()>& __indices, index_sequence<_Idxs...>) const noexcept { - return __accessor_ref().access(__ptr_ref(), __impl::__index(*this, __indices...)); + return __map_(__indices[_Idxs]...); } -# endif // __MDSPAN_USE_BRACKET_OPERATOR - _CCCL_TEMPLATE(class _SizeType) - _CCCL_REQUIRES(_CCCL_TRAIT(is_convertible, _SizeType, index_type) - _CCCL_AND _CCCL_TRAIT(is_nothrow_constructible, index_type, _SizeType)) - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr reference - operator[](const _CUDA_VSTD::array<_SizeType, rank()>& __indices) const + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr decltype(auto) + __op_bracket(span<_OtherIndexType, _Extents::rank()> __indices, index_sequence<_Idxs...>) const noexcept { - return __accessor_ref().access(__ptr_ref(), __impl::__index(*this, __indices)); + return __map_(__indices[_Idxs]...); } - _CCCL_TEMPLATE(class _SizeType) - _CCCL_REQUIRES(_CCCL_TRAIT(is_convertible, _SizeType, index_type) - _CCCL_AND _CCCL_TRAIT(is_nothrow_constructible, index_type, _SizeType)) - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr reference - operator[](_CUDA_VSTD::span<_SizeType, rank()> __indices) const + _CCCL_TEMPLATE(class _OtherIndexType) + _CCCL_REQUIRES(_CCCL_TRAIT(is_convertible, const _OtherIndexType&, index_type) + _CCCL_AND _CCCL_TRAIT(is_nothrow_constructible, index_type, const _OtherIndexType&)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr reference operator[](const array<_OtherIndexType, rank()>& __indices) const { - return __accessor_ref().access(__ptr_ref(), __impl::__index(*this, __indices)); + return __acc_.access(__ptr_, __op_bracket(__indices, make_index_sequence())); } -# if !__MDSPAN_USE_BRACKET_OPERATOR - _CCCL_TEMPLATE(class _Index) - _CCCL_REQUIRES(_CCCL_TRAIT(is_convertible, _Index, index_type) - _CCCL_AND _CCCL_TRAIT(is_nothrow_constructible, index_type, _Index) - _CCCL_AND(extents_type::rank() == 1)) - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr reference operator[](_Index __idx) const + _CCCL_TEMPLATE(class _OtherIndexType) + _CCCL_REQUIRES(_CCCL_TRAIT(is_convertible, const _OtherIndexType&, index_type) + _CCCL_AND _CCCL_TRAIT(is_nothrow_constructible, index_type, const _OtherIndexType&)) + _LIBCUDACXX_HIDE_FROM_ABI constexpr reference operator[](span<_OtherIndexType, rank()> __indices) const { - return __accessor_ref().access(__ptr_ref(), __impl::__index(*this, __idx)); + return __acc_.access(__ptr_, __op_bracket(__indices, make_index_sequence())); } -# endif // !__MDSPAN_USE_BRACKET_OPERATOR -# if __MDSPAN_USE_PAREN_OPERATOR - _CCCL_TEMPLATE(class... _SizeTypes) - _CCCL_REQUIRES(__fold_and_v<_CCCL_TRAIT(is_convertible, _SizeTypes, index_type)...> _CCCL_AND - __fold_and_v<_CCCL_TRAIT(is_nothrow_constructible, index_type, _SizeTypes)...> _CCCL_AND( - extents_type::rank() == sizeof...(_SizeTypes))) - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr reference operator()(_SizeTypes... __indices) const + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool __mul_overflow(size_t x, size_t y, size_t* res) noexcept { - return __accessor_ref().access(__ptr_ref(), __impl::__index(*this, __indices...)); + *res = x * y; + return x && ((*res / x) != y); } - _CCCL_TEMPLATE(class _SizeType) - _CCCL_REQUIRES(_CCCL_TRAIT(is_convertible, _SizeType, index_type) - _CCCL_AND _CCCL_TRAIT(is_nothrow_constructible, index_type, _SizeType)) - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr reference - operator()(const _CUDA_VSTD::array<_SizeType, rank()>& __indices) const + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool __check_size(index_sequence<_Idxs...>) const noexcept { - return __accessor_ref().access(__ptr_ref(), __impl::__index(*this, __indices)); + size_t __prod = 1; + return !_CCCL_FOLD_OR((__mul_overflow(__prod, __map_.extents().extent(_Idxs), &__prod))); } - _CCCL_TEMPLATE(class _SizeType) - _CCCL_REQUIRES(_CCCL_TRAIT(is_convertible, _SizeType, index_type) - _CCCL_AND _CCCL_TRAIT(is_nothrow_constructible, index_type, _SizeType)) - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr reference - operator()(_CUDA_VSTD::span<_SizeType, rank()> __indices) const + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr size_type __op_size(index_sequence<_Idxs...>) const noexcept { - return __accessor_ref().access(__ptr_ref(), __impl::__index(*this, __indices)); + return _CCCL_FOLD_TIMES(size_type{1}, static_cast(__map_.extents().extent(_Idxs))); } -# endif // __MDSPAN_USE_PAREN_OPERATOR - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr size_t size() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI constexpr size_type size() const noexcept { - return __impl::__size(*this); - }; + // Could leave this as only checked in debug mode: semantically size() is never + // guaranteed to be related to any accessible range + _CCCL_ASSERT(__check_size(make_index_sequence()), "mdspan: size() is not representable as size_type"); + return __op_size(make_index_sequence()); + } + + template + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool __op_empty(index_sequence<_Idxs...>) const noexcept + { + return _CCCL_FOLD_OR((__map_.extents().extent(_Idxs) == index_type{0})); + } _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr bool empty() const noexcept { - return __impl::__empty(*this); - }; + return __op_empty(make_index_sequence()); + } _LIBCUDACXX_HIDE_FROM_ABI friend constexpr void swap(mdspan& __x, mdspan& __y) noexcept { - swap(__x.__ptr_ref(), __y.__ptr_ref()); - swap(__x.__mapping_ref(), __y.__mapping_ref()); - swap(__x.__accessor_ref(), __y.__accessor_ref()); + swap(__x.__ptr_, __y.__ptr_); + swap(__x.__map_, __y.__map_); + swap(__x.__acc_, __y.__acc_); } - //-------------------------------------------------------------------------------- - // [mdspan.basic.domobs], mdspan observers of the domain multidimensional index space - - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept { - return __mapping_ref().extents(); + return __map_.extents(); }; _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr const data_handle_type& data_handle() const noexcept { - return __ptr_ref(); + return __ptr_; }; _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr const mapping_type& mapping() const noexcept { - return __mapping_ref(); + return __map_; }; _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr const accessor_type& accessor() const noexcept { - return __accessor_ref(); + return __acc_; }; - //-------------------------------------------------------------------------------- - // [mdspan.basic.obs], mdspan observers of the mapping - - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_unique() noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_unique() { return mapping_type::is_always_unique(); }; - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_exhaustive() noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_exhaustive() { return mapping_type::is_always_exhaustive(); }; - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_strided() noexcept + _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_strided() { return mapping_type::is_always_strided(); }; - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_unique() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_unique() const { - return __mapping_ref().is_unique(); + return __map_.is_unique(); }; - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_exhaustive() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_exhaustive() const { - return __mapping_ref().is_exhaustive(); + return __map_.is_exhaustive(); }; - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_strided() const noexcept + _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_strided() const { - return __mapping_ref().is_strided(); + return __map_.is_strided(); }; - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type stride(size_t __r) const + _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type stride(rank_type __r) const { - return __mapping_ref().stride(__r); + return __map_.stride(__r); }; private: - __detail::__compressed_pair __members{}; - - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr data_handle_type& __ptr_ref() noexcept - { - return __members.__first(); - } - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr data_handle_type const& __ptr_ref() const noexcept - { - return __members.__first(); - } - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping_type& __mapping_ref() noexcept - { - return __members.__second().__first(); - } - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping_type const& __mapping_ref() const noexcept - { - return __members.__second().__first(); - } - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr accessor_type& __accessor_ref() noexcept - { - return __members.__second().__second(); - } - _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr accessor_type const& __accessor_ref() const noexcept - { - return __members.__second().__second(); - } + _CCCL_NO_UNIQUE_ADDRESS data_handle_type __ptr_{}; + _CCCL_NO_UNIQUE_ADDRESS mapping_type __map_{}; + _CCCL_NO_UNIQUE_ADDRESS accessor_type __acc_{}; template friend class mdspan; }; -# if defined(__MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) -_CCCL_TEMPLATE(class _ElementType, class... _SizeTypes) -_CCCL_REQUIRES(__fold_and_v<_CCCL_TRAIT(is_integral, _SizeTypes)...> _CCCL_AND(sizeof...(_SizeTypes) > 0)) -_CCCL_HOST_DEVICE explicit mdspan(_ElementType*, _SizeTypes...) - -> mdspan<_ElementType, dextents>; +# if _CCCL_STD_VER >= 2017 +_CCCL_TEMPLATE(class _ElementType, class... _OtherIndexTypes) +_CCCL_REQUIRES((sizeof...(_OtherIndexTypes) > 0) + _CCCL_AND _CCCL_FOLD_AND(_CCCL_TRAIT(is_convertible, _OtherIndexTypes, size_t))) +explicit mdspan(_ElementType*, _OtherIndexTypes...) + -> mdspan<_ElementType, extents...>>; _CCCL_TEMPLATE(class _Pointer) -_CCCL_REQUIRES(_CCCL_TRAIT(is_pointer, _CUDA_VSTD::remove_reference_t<_Pointer>)) -_CCCL_HOST_DEVICE mdspan(_Pointer&&) - -> mdspan<_CUDA_VSTD::remove_pointer_t<_CUDA_VSTD::remove_reference_t<_Pointer>>, extents>; +_CCCL_REQUIRES(_CCCL_TRAIT(is_pointer, remove_reference_t<_Pointer>)) +mdspan(_Pointer&&) -> mdspan>, extents>; + _CCCL_TEMPLATE(class _CArray) _CCCL_REQUIRES(_CCCL_TRAIT(is_array, _CArray) _CCCL_AND(rank_v<_CArray> == 1)) -_CCCL_HOST_DEVICE mdspan(_CArray&) - -> mdspan<_CUDA_VSTD::remove_all_extents_t<_CArray>, extents>>; +mdspan(_CArray&) -> mdspan, extents>>; -template -_CCCL_HOST_DEVICE mdspan(_ElementType*, const _CUDA_VSTD::array<_SizeType, _Np>&) - -> mdspan<_ElementType, dextents>; +template +mdspan(_ElementType*, const array<_OtherIndexType, _Size>&) -> mdspan<_ElementType, dextents>; -template -_CCCL_HOST_DEVICE mdspan(_ElementType*, _CUDA_VSTD::span<_SizeType, _Np>) - -> mdspan<_ElementType, dextents>; +template +mdspan(_ElementType*, span<_OtherIndexType, _Size>) -> mdspan<_ElementType, dextents>; // This one is necessary because all the constructors take `data_handle_type`s, not // `_ElementType*`s, and `data_handle_type` is taken from `accessor_type::data_handle_type`, which // seems to throw off automatic deduction guides. -template -_CCCL_HOST_DEVICE mdspan(_ElementType*, const extents<_SizeType, _ExtentsPack...>&) - -> mdspan<_ElementType, extents<_SizeType, _ExtentsPack...>>; +template +mdspan(_ElementType*, const extents<_OtherIndexType, _ExtentsPack...>&) + -> mdspan<_ElementType, extents<_OtherIndexType, _ExtentsPack...>>; template -_CCCL_HOST_DEVICE mdspan(_ElementType*, const _MappingType&) +mdspan(_ElementType*, const _MappingType&) -> mdspan<_ElementType, typename _MappingType::extents_type, typename _MappingType::layout_type>; template -_CCCL_HOST_DEVICE mdspan(const typename _AccessorType::data_handle_type, const _MappingType&, const _AccessorType&) +mdspan(const typename _AccessorType::data_handle_type, const _MappingType&, const _AccessorType&) -> mdspan; -# endif // __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +# endif // _CCCL_STD_VER >= 2017 -_CCCL_NV_DIAG_DEFAULT(186) +_LIBCUDACXX_END_NAMESPACE_STD #endif // _CCCL_STD_VER >= 2014 -_LIBCUDACXX_END_NAMESPACE_STD - -#endif // _LIBCUDACXX___MDSPAN_MDSPAN_HPP +#endif // _LIBCUDACXX___MDSPAN_MDSPAN_H diff --git a/libcudacxx/include/cuda/std/__mdspan/no_unique_address.h b/libcudacxx/include/cuda/std/__mdspan/no_unique_address.h deleted file mode 100644 index 8d0d315b123..00000000000 --- a/libcudacxx/include/cuda/std/__mdspan/no_unique_address.h +++ /dev/null @@ -1,149 +0,0 @@ -/* -//@HEADER -// ************************************************************************ -// -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation -// -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) -// -// ************************************************************************ -//@HEADER -*/ - -#ifndef _LIBCUDACXX__LIBCUDACXX_NO_UNIQUE_ADDRESS_HPP -#define _LIBCUDACXX__LIBCUDACXX_NO_UNIQUE_ADDRESS_HPP - -#include - -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header - -#include -#include -#include -#include -#include - -_LIBCUDACXX_BEGIN_NAMESPACE_STD - -#if _CCCL_STD_VER > 2011 - -namespace __detail -{ - -//============================================================================== - -template -struct __no_unique_address_emulation -{ - using __stored_type = _Tp; - _Tp __v; - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp const& __ref() const noexcept - { - return __v; - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp& __ref() noexcept - { - return __v; - } -}; - -// Empty case -// This doesn't work if _Tp is final, of course, but we're not using anything -// like that currently. That kind of thing could be added pretty easily though -template -struct __no_unique_address_emulation<_Tp, - _Disambiguator, - _CUDA_VSTD::enable_if_t<_CCCL_TRAIT(_CUDA_VSTD::is_empty, _Tp) && - // If the type isn't trivially destructible, its destructor - // won't be called at the right time, so don't use this - // specialization - _CCCL_TRAIT(_CUDA_VSTD::is_trivially_destructible, _Tp)>> - : -# ifdef __MDSPAN_COMPILER_MSVC - // MSVC doesn't allow you to access public static member functions of a type - // when you *happen* to privately inherit from that type. - protected -# else - // But we still want this to be private if possible so that we don't accidentally - // access members of _Tp directly rather than calling __ref() first, which wouldn't - // work if _Tp happens to be stateful and thus we're using the unspecialized definition - // of __no_unique_address_emulation above. - private -# endif - _Tp -{ - using __stored_type = _Tp; - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp const& __ref() const noexcept - { - return *static_cast<_Tp const*>(this); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp& __ref() noexcept - { - return *static_cast<_Tp*>(this); - } - - _CCCL_HIDE_FROM_ABI constexpr __no_unique_address_emulation() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __no_unique_address_emulation(__no_unique_address_emulation const&) noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __no_unique_address_emulation(__no_unique_address_emulation&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __no_unique_address_emulation& - operator=(__no_unique_address_emulation const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __no_unique_address_emulation& - operator=(__no_unique_address_emulation&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__no_unique_address_emulation() noexcept = default; - - // Explicitly make this not a reference so that the copy or move - // constructor still gets called. - _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr __no_unique_address_emulation(_Tp const& __v) noexcept - : _Tp(__v) - {} - _LIBCUDACXX_HIDE_FROM_ABI explicit constexpr __no_unique_address_emulation(_Tp&& __v) noexcept - : _Tp(_CUDA_VSTD::move(__v)) - {} -}; - -//============================================================================== - -} // end namespace __detail - -#endif // _CCCL_STD_VER > 2011 - -_LIBCUDACXX_END_NAMESPACE_STD - -#endif // _LIBCUDACXX__LIBCUDACXX_NO_UNIQUE_ADDRESS_HPP diff --git a/libcudacxx/include/cuda/std/__mdspan/standard_layout_static_array.h b/libcudacxx/include/cuda/std/__mdspan/standard_layout_static_array.h deleted file mode 100644 index 04469e7d98d..00000000000 --- a/libcudacxx/include/cuda/std/__mdspan/standard_layout_static_array.h +++ /dev/null @@ -1,700 +0,0 @@ -/* -//@HEADER -// ************************************************************************ -// -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation -// -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) -// -// ************************************************************************ -//@HEADER -*/ - -#ifndef _LIBCUDACXX___MDSPAN_STANDARD_LAYOUT_STATIC_ARRAY_HPP -#define _LIBCUDACXX___MDSPAN_STANDARD_LAYOUT_STATIC_ARRAY_HPP - -#include - -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header - -#include -#include -#include -#ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS -# include -#endif // _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS -#include -#include -#include -#include -#include - -_LIBCUDACXX_BEGIN_NAMESPACE_STD - -#if _CCCL_STD_VER > 2011 - -namespace __detail -{ - -//============================================================================== - -struct __construct_psa_from_dynamic_exts_values_tag_t -{}; -_CCCL_GLOBAL_CONSTANT __construct_psa_from_dynamic_exts_values_tag_t __construct_psa_from_dynamic_exts_values_tag; - -struct __construct_psa_from_all_exts_values_tag_t -{}; -_CCCL_GLOBAL_CONSTANT __construct_psa_from_all_exts_values_tag_t __construct_psa_from_all_exts_values_tag; - -struct __construct_psa_from_all_exts_array_tag_t -{}; -template -struct __construct_psa_from_dynamic_exts_array_tag_t -{}; - -//============================================================================== - -template -using __repeated_with_idxs = _Tp; - -//============================================================================== - -# if __MDSPAN_PRESERVE_STANDARD_LAYOUT - -/** - * PSA = "partially static array" - * - * @tparam _Tp - * @tparam _ValsSeq - * @tparam __sentinal - */ -template (dynamic_extent), - class _IdxsSeq = _CUDA_VSTD::make_index_sequence<_ValsSeq::size()>> -struct __standard_layout_psa; - -//============================================================================== -// Static case -template -struct __standard_layout_psa<_Tag, - _Tp, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t, __value, __values_or_sentinals...>, - __sentinal, - _CUDA_VSTD::integer_sequence> -# ifdef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : private __no_unique_address_emulation< - __standard_layout_psa<_Tag, - _Tp, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>, - __sentinal, - _CUDA_VSTD::integer_sequence>> -# endif -{ - //-------------------------------------------------------------------------- - - using __next_t = - __standard_layout_psa<_Tag, - _Tp, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>, - __sentinal, - _CUDA_VSTD::integer_sequence>; - -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - _CCCL_NO_UNIQUE_ADDRESS __next_t __next_; -# else - using __base_t = __no_unique_address_emulation<__next_t>; -# endif - - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __next_t& __next() noexcept - { -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - return __next_; -# else - return this->__base_t::__ref(); -# endif - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __next_t const& __next() const noexcept - { -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - return __next_; -# else - return this->__base_t::__ref(); -# endif - } - - static constexpr auto __size = sizeof...(_Idxs) + 1; - static constexpr auto __size_dynamic = __next_t::__size_dynamic; - - //-------------------------------------------------------------------------- - - _CCCL_HIDE_FROM_ABI constexpr __standard_layout_psa() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __standard_layout_psa(__standard_layout_psa const&) noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __standard_layout_psa(__standard_layout_psa&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa& - operator=(__standard_layout_psa const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa& - operator=(__standard_layout_psa&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__standard_layout_psa() noexcept = default; - - //-------------------------------------------------------------------------- - - _LIBCUDACXX_HIDE_FROM_ABI constexpr __standard_layout_psa( - __construct_psa_from_all_exts_values_tag_t, - _Tp const& /*__val*/, - __repeated_with_idxs<_Idxs, _Tp> const&... __vals) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __next_{ -# else - : __base_t(__base_t{__next_t( -# endif - __construct_psa_from_all_exts_values_tag, __vals... -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __standard_layout_psa( - __construct_psa_from_dynamic_exts_values_tag_t, _Ts const&... __vals) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __next_{ -# else - : __base_t(__base_t{__next_t( -# endif - __construct_psa_from_dynamic_exts_values_tag, __vals... -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa(array<_Up, _Np> const& __vals) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __next_{ -# else - : __base_t(__base_t{__next_t( -# endif - __vals -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_all_exts_array_tag_t const& __tag, array<_Up, _NStatic> const& __vals) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __next_{ -# else - : __base_t(__base_t{__next_t( -# endif - __tag, __vals -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic> __tag, array<_Up, _NDynamic> const& __vals) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __next_{ -# else - : __base_t(__base_t{__next_t( -# endif - __tag, __vals -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa(_CUDA_VSTD::span<_Up, _Np> const& __vals) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __next_{ -# else - : __base_t(__base_t{__next_t( -# endif - __vals -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_all_exts_array_tag_t const& __tag, _CUDA_VSTD::span<_Up, _NStatic> const& __vals) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __next_{ -# else - : __base_t(__base_t{__next_t( -# endif - __tag, __vals -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic> __tag, - _CUDA_VSTD::span<_Up, _NDynamic> const& __vals) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __next_{ -# else - : __base_t(__base_t{__next_t( -# endif - __tag, __vals -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __standard_layout_psa( - __standard_layout_psa<_UTag, _Up, _static_U, _UValsSeq, __u_sentinal, _IdxsSeq> const& __rhs) noexcept -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - : __next_{ -# else - : __base_t(__base_t{__next_t( -# endif - __rhs.__next() -# ifndef _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS - } -# else - )}) -# endif - {} - - //-------------------------------------------------------------------------- - - // See https://godbolt.org/z/_KSDNX for a summary-by-example of why this is - // necessary. We're using inheritance here instead of an alias template - // because we have to deduce __values_or_sentinals in several places, and - // alias templates don't permit that in this context. - __MDSPAN_FORCE_INLINE_FUNCTION - constexpr __standard_layout_psa const& __enable_psa_conversion() const noexcept - { - return *this; - } - - template = 0> - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get_n() const noexcept - { - return __next().template __get_n<_Ip>(); - } - template = 1> - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get_n() const noexcept - { - return __value; - } - template = 0> - __MDSPAN_FORCE_INLINE_FUNCTION constexpr void __set_n(_Tp const& __rhs) noexcept - { - __next().__set_value(__rhs); - } - template = 1> - __MDSPAN_FORCE_INLINE_FUNCTION constexpr void __set_n(_Tp const&) noexcept - { - // Don't assert here because that would break constexpr. This better - // not change anything, though - } - template = __sentinal> - __MDSPAN_FORCE_INLINE_FUNCTION static constexpr _static_t __get_static_n() noexcept - { - return __value; - } - template __default = __sentinal> - __MDSPAN_FORCE_INLINE_FUNCTION static constexpr _static_t __get_static_n() noexcept - { - return __next_t::template __get_static_n<_Ip, __default>(); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get(size_t __n) const noexcept - { - return __value * (_Tp(_Idx == __n)) + __next().__get(__n); - } - - //-------------------------------------------------------------------------- -}; - -//============================================================================== - -// Dynamic case, __next_t may or may not be empty -template -struct __standard_layout_psa<_Tag, - _Tp, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t, __sentinal, __values_or_sentinals...>, - __sentinal, - _CUDA_VSTD::integer_sequence> -{ - //-------------------------------------------------------------------------- - - using __next_t = - __standard_layout_psa<_Tag, - _Tp, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>, - __sentinal, - _CUDA_VSTD::integer_sequence>; - - using __value_pair_t = __compressed_pair<_Tp, __next_t>; - __value_pair_t __value_pair; - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __next_t& __next() noexcept - { - return __value_pair.__second(); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __next_t const& __next() const noexcept - { - return __value_pair.__second(); - } - - static constexpr auto __size = sizeof...(_Idxs) + 1; - static constexpr auto __size_dynamic = 1 + __next_t::__size_dynamic; - - //-------------------------------------------------------------------------- - - _CCCL_HIDE_FROM_ABI constexpr __standard_layout_psa() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __standard_layout_psa(__standard_layout_psa const&) noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __standard_layout_psa(__standard_layout_psa&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa& - operator=(__standard_layout_psa const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa& - operator=(__standard_layout_psa&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__standard_layout_psa() noexcept = default; - - //-------------------------------------------------------------------------- - - _LIBCUDACXX_HIDE_FROM_ABI constexpr __standard_layout_psa( - __construct_psa_from_all_exts_values_tag_t, - _Tp const& __val, - __repeated_with_idxs<_Idxs, _Tp> const&... __vals) noexcept - : __value_pair(__val, __next_t(__construct_psa_from_all_exts_values_tag, __vals...)) - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __standard_layout_psa( - __construct_psa_from_dynamic_exts_values_tag_t, _Tp const& __val, _Ts const&... __vals) noexcept - : __value_pair(__val, __next_t(__construct_psa_from_dynamic_exts_values_tag, __vals...)) - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa(array<_Up, _Np> const& __vals) noexcept - : __value_pair(_CUDA_VSTD::get<_Idx>(__vals), __vals) - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_all_exts_array_tag_t __tag, array<_Up, _NStatic> const& __vals) noexcept - : __value_pair(_CUDA_VSTD::get<_Idx>(__vals), __next_t(__tag, __vals)) - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic>, array<_Up, _NDynamic> const& __vals) noexcept - : __value_pair(_CUDA_VSTD::get<_IDynamic>(__vals), - __next_t(__construct_psa_from_dynamic_exts_array_tag_t<_IDynamic + 1>{}, __vals)) - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa(_CUDA_VSTD::span<_Up, _Np> const& __vals) noexcept - : __value_pair(__vals[_Idx], __vals) - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_all_exts_array_tag_t __tag, _CUDA_VSTD::span<_Up, _NStatic> const& __vals) noexcept - : __value_pair(__vals[_Idx], __next_t(__tag, __vals)) - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic>, _CUDA_VSTD::span<_Up, _NDynamic> const& __vals) noexcept - : __value_pair(__vals[_IDynamic], - __next_t(__construct_psa_from_dynamic_exts_array_tag_t<_IDynamic + 1>{}, __vals)) - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __standard_layout_psa( - __standard_layout_psa<_UTag, _Up, _static_U, _UValsSeq, __u_sentinal, _UIdxsSeq> const& __rhs) noexcept - : __value_pair(__rhs.template __get_n<_Idx>(), __rhs.__next()) - {} - - //-------------------------------------------------------------------------- - - // See comment in the previous partial specialization for why this is - // necessary. Or just trust me that it's messy. - __MDSPAN_FORCE_INLINE_FUNCTION - constexpr __standard_layout_psa const& __enable_psa_conversion() const noexcept - { - return *this; - } - - template = 0> - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get_n() const noexcept - { - return __next().template __get_n<_Ip>(); - } - template = 1> - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get_n() const noexcept - { - return __value_pair.__first(); - } - template = 0> - __MDSPAN_FORCE_INLINE_FUNCTION constexpr void __set_n(_Tp const& __rhs) noexcept - { - __next().__set_value(__rhs); - } - template = 1> - __MDSPAN_FORCE_INLINE_FUNCTION constexpr void __set_n(_Tp const& __rhs) noexcept - { - __value_pair.__first() = __rhs; - } - template __default = __sentinal> - __MDSPAN_FORCE_INLINE_FUNCTION static constexpr _static_t __get_static_n() noexcept - { - return __default; - } - template __default = __sentinal> - __MDSPAN_FORCE_INLINE_FUNCTION static constexpr _static_t __get_static_n() noexcept - { - return __next_t::template __get_static_n<_Ip, __default>(); - } - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get(size_t __n) const noexcept - { - return __value_pair.__first() * (_Tp(_Idx == __n)) + __next().__get(__n); - } - - //-------------------------------------------------------------------------- -}; - -// empty/terminal case -template -struct __standard_layout_psa<_Tag, - _Tp, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t>, - __sentinal, - _CUDA_VSTD::integer_sequence> -{ - //-------------------------------------------------------------------------- - - static constexpr auto __size = 0; - static constexpr auto __size_dynamic = 0; - - //-------------------------------------------------------------------------- - - _CCCL_HIDE_FROM_ABI constexpr __standard_layout_psa() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __standard_layout_psa(__standard_layout_psa const&) noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __standard_layout_psa(__standard_layout_psa&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa& - operator=(__standard_layout_psa const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __standard_layout_psa& - operator=(__standard_layout_psa&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__standard_layout_psa() noexcept = default; - - _LIBCUDACXX_HIDE_FROM_ABI constexpr __standard_layout_psa(__construct_psa_from_all_exts_values_tag_t) noexcept {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __standard_layout_psa(__construct_psa_from_dynamic_exts_values_tag_t) noexcept - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa(array<_Up, _Np> const&) noexcept - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_all_exts_array_tag_t, array<_Up, _NStatic> const&) noexcept - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic>, array<_Up, _NDynamic> const&) noexcept - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa(_CUDA_VSTD::span<_Up, _Np> const&) noexcept - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_all_exts_array_tag_t, _CUDA_VSTD::span<_Up, _NStatic> const&) noexcept - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __standard_layout_psa( - __construct_psa_from_dynamic_exts_array_tag_t<_IDynamic>, _CUDA_VSTD::span<_Up, _NDynamic> const&) noexcept - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __standard_layout_psa( - __standard_layout_psa<_UTag, _Up, _static_U, _UValsSeq, __u_sentinal, _UIdxsSeq> const&) noexcept - {} - - // See comment in the previous partial specialization for why this is - // necessary. Or just trust me that it's messy. - __MDSPAN_FORCE_INLINE_FUNCTION - constexpr __standard_layout_psa const& __enable_psa_conversion() const noexcept - { - return *this; - } - - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get(size_t /*n*/) const noexcept - { - return 0; - } -}; - -// Same thing, but with a disambiguator so that same-base issues doesn't cause -// a loss of standard-layout-ness. -template -struct __partially_static_sizes_tagged - : __standard_layout_psa<_Tag, T, _static_t, _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>> -{ - using __tag_t = _Tag; - using __psa_impl_t = - __standard_layout_psa<_Tag, T, _static_t, _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>>; - - _LIBCUDACXX_DELEGATE_CONSTRUCTORS( - __partially_static_sizes_tagged, - __standard_layout_psa, - _Tag, - T, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>); - - _CCCL_HIDE_FROM_ABI constexpr __partially_static_sizes_tagged(__partially_static_sizes_tagged const&) noexcept = - default; - _CCCL_HIDE_FROM_ABI constexpr __partially_static_sizes_tagged(__partially_static_sizes_tagged&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __partially_static_sizes_tagged& - operator=(__partially_static_sizes_tagged const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __partially_static_sizes_tagged& - operator=(__partially_static_sizes_tagged&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__partially_static_sizes_tagged() noexcept = default; - - template - __MDSPAN_FORCE_INLINE_FUNCTION constexpr explicit __partially_static_sizes_tagged( - __partially_static_sizes_tagged<_UTag, T, _static_t, __values_or_sentinals...> const& __vals) noexcept - : __psa_impl_t(__vals.__enable_psa_conversion()) - {} -}; - -struct __no_tag -{}; -template -struct __partially_static_sizes : __partially_static_sizes_tagged<__no_tag, T, _static_t, __values_or_sentinals...> -{ -private: - using __base_t = __partially_static_sizes_tagged<__no_tag, T, _static_t, __values_or_sentinals...>; - template - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __partially_static_sizes( - __partially_static_sizes_tagged<_UTag, T, _static_t, __values_or_sentinals...>&& __vals) noexcept - : __base_t(_CUDA_VSTD::move(__vals)) - {} - -public: -# if _CCCL_COMPILER(NVRTC) || _CCCL_CUDACC_BELOW(11, 3) - template = 0> - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __partially_static_sizes(_Args&&... __args) noexcept( - noexcept(__base_t(_CUDA_VSTD::declval<_Args>()...))) - : __base_t(_CUDA_VSTD::forward<_Args>(__args)...) - {} -# else // ^^^ _CCCL_COMPILER(NVRTC) || nvcc < 11.3 ^^^ / vvv !_CCCL_COMPILER(NVRTC) || nvcc >= 11.3 vvv - using __base_t::__base_t; -# endif // !_CCCL_COMPILER(NVRTC) || nvcc >= 11.3 - -# ifdef __MDSPAN_DEFAULTED_CONSTRUCTORS_INHERITANCE_WORKAROUND - _LIBCUDACXX_HIDE_FROM_ABI constexpr __partially_static_sizes() noexcept - : __base_t() - {} -# endif - template - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __partially_static_sizes_tagged<_UTag, T, _static_t, __values_or_sentinals...> - __with_tag() const noexcept - { - return __partially_static_sizes_tagged<_UTag, T, _static_t, __values_or_sentinals...>(*this); - } -}; - -# endif // __MDSPAN_PRESERVE_STATIC_LAYOUT - -} // end namespace __detail - -#endif // _CCCL_STD_VER > 2011 - -_LIBCUDACXX_END_NAMESPACE_STD - -#endif // _LIBCUDACXX___MDSPAN_STANDARD_LAYOUT_STATIC_ARRAY_HPP diff --git a/libcudacxx/include/cuda/std/__mdspan/static_array.h b/libcudacxx/include/cuda/std/__mdspan/static_array.h deleted file mode 100644 index 770e40e114a..00000000000 --- a/libcudacxx/include/cuda/std/__mdspan/static_array.h +++ /dev/null @@ -1,296 +0,0 @@ -/* -//@HEADER -// ************************************************************************ -// -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation -// -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) -// -// ************************************************************************ -//@HEADER -*/ - -#ifndef _LIBCUDACXX___MDSPAN_STATIC_ARRAY_HPP -#define _LIBCUDACXX___MDSPAN_STATIC_ARRAY_HPP - -#include - -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header - -#include // dynamic_extent -#include -#include -#include -#include -#include -#include -#include -#include - -#if !__MDSPAN_PRESERVE_STANDARD_LAYOUT - -_LIBCUDACXX_BEGIN_NAMESPACE_STD - -# if _CCCL_STD_VER > 2011 - -namespace __detail -{ - -//============================================================================== - -template -struct __mask_element -{}; - -template -struct __mask_sequence_assign_op -{ - template <_Tp _V> - __mask_sequence_assign_op<_Tp, _Result..., _V> operator=(__mask_element<_Tp, _V, true>&&); - template <_Tp _V> - __mask_sequence_assign_op<_Tp, _Result...> operator=(__mask_element<_Tp, _V, false>&&); - using __result = _CUDA_VSTD::integer_sequence<_Tp, _Result...>; -}; - -template -struct __mask_sequence; - -template -struct __mask_sequence<_CUDA_VSTD::integer_sequence<_Tp, _Vals...>, _CUDA_VSTD::integer_sequence> -{ - using type = typename decltype(__MDSPAN_FOLD_ASSIGN_LEFT( - __mask_sequence_assign_op<_Tp>{}, /* = ... = */ __mask_element<_Tp, _Vals, _Masks>{}))::__result; -}; - -//============================================================================== - -template -class __partially_static_array_impl; - -template -class __partially_static_array_impl< - _Tp, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>, - __sentinal, - _CUDA_VSTD::integer_sequence, - _CUDA_VSTD::integer_sequence, - _CUDA_VSTD::integer_sequence> - : private __maybe_static_value<_Tp, _static_t, __values_or_sentinals, __sentinal, _Idxs>... -{ -private: - template - using __base_n = - __type_index_c<_Np, __maybe_static_value<_Tp, _static_t, __values_or_sentinals, __sentinal, _Idxs>...>; - -public: - static constexpr auto __size = sizeof...(_Idxs); - static constexpr auto __size_dynamic = - __MDSPAN_FOLD_PLUS_RIGHT(static_cast((__values_or_sentinals == __sentinal)), /* + ... + */ 0); - - //-------------------------------------------------------------------------- - - _CCCL_HIDE_FROM_ABI constexpr __partially_static_array_impl() = default; - _CCCL_HIDE_FROM_ABI constexpr __partially_static_array_impl(__partially_static_array_impl const&) noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr __partially_static_array_impl(__partially_static_array_impl&&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __partially_static_array_impl& - operator=(__partially_static_array_impl const&) noexcept = default; - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED __partially_static_array_impl& - operator=(__partially_static_array_impl&&) noexcept = default; - _CCCL_HIDE_FROM_ABI ~__partially_static_array_impl() noexcept = default; - - _LIBCUDACXX_HIDE_FROM_ABI constexpr __partially_static_array_impl( - __construct_psa_from_all_exts_values_tag_t, __repeated_with_idxs<_Idxs, _Tp> const&... __vals) noexcept - : __base_n<_Idxs>(__base_n<_Idxs>{{__vals}})... - {} - - _LIBCUDACXX_HIDE_FROM_ABI constexpr __partially_static_array_impl( - __construct_psa_from_dynamic_exts_values_tag_t, - __repeated_with_idxs<_IdxsDynamicIdxs, _Tp> const&... __vals) noexcept - : __base_n<_IdxsDynamic>(__base_n<_IdxsDynamic>{{__vals}})... - {} - - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __partially_static_array_impl( - array<_Tp, sizeof...(_Idxs)> const& __vals) noexcept - : __partially_static_array_impl(__construct_psa_from_all_exts_values_tag, _CUDA_VSTD::get<_Idxs>(__vals)...) - {} - - _CCCL_TEMPLATE(bool _SizeMatches = (sizeof...(_Idxs) != __size_dynamic)) - _CCCL_REQUIRES(_SizeMatches) - _LIBCUDACXX_HIDE_FROM_ABI constexpr explicit __partially_static_array_impl( - array<_Tp, __size_dynamic> const& __vals) noexcept - __partially_static_array_impl(__construct_psa_from_dynamic_exts_values_tag, - _CUDA_VSTD::get<_IdxsDynamicIdxs>(__vals)...) - {} - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr __partially_static_array_impl( - __partially_static_array_impl<_Up, - _static_u, - _UValsSeq, - __u_sentinal, - _UIdxsSeq, - _UIdxsDynamicSeq, - _UIdxsDynamicIdxsSeq> const& __rhs) noexcept - : __partially_static_array_impl(__construct_psa_from_all_exts_values_tag, __rhs.template __get_n<_Idxs>()...) - {} - - //-------------------------------------------------------------------------- - - // See comment in the previous partial specialization for why this is - // necessary. Or just trust me that it's messy. - __MDSPAN_FORCE_INLINE_FUNCTION - constexpr __partially_static_array_impl const& __enable_psa_conversion() const noexcept - { - return *this; - } - - template - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get_n() const noexcept - { - return static_cast<__base_n<_Ip> const*>(this)->__value(); - } - - template - __MDSPAN_FORCE_INLINE_FUNCTION constexpr void __set_n(_Up&& __rhs) noexcept - { - static_cast<__base_n<_Ip>*>(this)->__set_value((_Up&&) __rhs); - } - - template - __MDSPAN_FORCE_INLINE_FUNCTION static constexpr _static_t __get_static_n() noexcept - { - return __base_n<_Ip>::__static_value == __sentinal ? __default : __base_n<_Ip>::__static_value; - } - - __MDSPAN_FORCE_INLINE_FUNCTION constexpr _Tp __get(size_t __n) const noexcept - { - return __MDSPAN_FOLD_PLUS_RIGHT((_Tp(_Idxs == __n) * __get_n<_Idxs>()), /* + ... + */ _Tp(0)); - } -}; - -//============================================================================== - -template > -struct __partially_static_array_impl_maker; - -template -struct __partially_static_array_impl_maker<_Tp, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t, _Vals...>, - __sentinal, - _CUDA_VSTD::integer_sequence> -{ - using __dynamic_idxs = typename __mask_sequence<_CUDA_VSTD::integer_sequence, - _CUDA_VSTD::integer_sequence>::type; - using __impl_base = __partially_static_array_impl< - _Tp, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t, _Vals...>, - __sentinal, - _CUDA_VSTD::integer_sequence, - __dynamic_idxs, - _CUDA_VSTD::make_index_sequence<__dynamic_idxs::size()>>; -}; - -template -using __partially_static_array_impl_t = - typename __partially_static_array_impl_maker<_Tp, _static_t, _ValsSeq, __sentinal>::__impl_base; - -template -class __partially_static_array_with_sentinal - : public __partially_static_array_impl_t<_Tp, _static_t, _ValsSeq, __sentinal> -{ - _LIBCUDACXX_DELEGATE_CONSTRUCTORS( - __partially_static_array_with_sentinal, __partially_static_array_impl_t, _Tp, _static_t, _ValsSeq, __sentinal); -}; - -//============================================================================== - -template -struct __partially_static_sizes - : __partially_static_array_with_sentinal> -{ - _LIBCUDACXX_DELEGATE_CONSTRUCTORS( - __partially_static_sizes, - __partially_static_array_with_sentinal, - T, - _static_t, - _CUDA_VSTD::integer_sequence<_static_t, __values_or_sentinals...>); - - template - __MDSPAN_FORCE_INLINE_FUNCTION constexpr __partially_static_sizes - __with_tag() const noexcept - { - return *this; - } -}; - -// Tags are needed for the standard layout version, but not here -template -using __partially_static_sizes_tagged = __partially_static_sizes; - -} // end namespace __detail - -# endif // _CCCL_STD_VER > 2011 - -_LIBCUDACXX_END_NAMESPACE_STD - -#endif // !__MDSPAN_PRESERVE_STANDARD_LAYOUT - -#endif // _LIBCUDACXX___MDSPAN_STATIC_ARRAY_HPP diff --git a/libcudacxx/include/cuda/std/__mdspan/submdspan.h b/libcudacxx/include/cuda/std/__mdspan/submdspan.h deleted file mode 100644 index 74bf1f79943..00000000000 --- a/libcudacxx/include/cuda/std/__mdspan/submdspan.h +++ /dev/null @@ -1,530 +0,0 @@ -/* -//@HEADER -// ************************************************************************ -// -// Kokkos v. 2.0 -// Copyright (2019) Sandia Corporation -// -// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -// the U.S. Government retains certain rights in this software. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the Corporation nor the names of the -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY -// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// Questions? Contact Christian R. Trott (crtrott@sandia.gov) -// -// ************************************************************************ -//@HEADER -*/ - -#ifndef _LIBCUDACXX___MDSPAN_SUBMDSPAN_HPP -#define _LIBCUDACXX___MDSPAN_SUBMDSPAN_HPP - -#include - -#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) -# pragma GCC system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) -# pragma clang system_header -#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) -# pragma system_header -#endif // no system header - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -_LIBCUDACXX_BEGIN_NAMESPACE_STD - -#if _CCCL_STD_VER > 2011 - -namespace __detail -{ - -template -struct __slice_wrap -{ - _Tp slice; - size_t old_extent; - size_t old_stride; -}; - -//-------------------------------------------------------------------------------- - -template -_LIBCUDACXX_HIDE_FROM_ABI constexpr __slice_wrap<_OldExtent, _OldStaticStride, size_t> -__wrap_slice(size_t __val, size_t __ext, size_t __stride) -{ - return {__val, __ext, __stride}; -} - -template -_LIBCUDACXX_HIDE_FROM_ABI constexpr __slice_wrap<_OldExtent, _OldStaticStride, integral_constant<_IntegerType, _Value0>> -__wrap_slice(size_t __val, size_t __ext, integral_constant<_IntegerType, _Value0> __stride) -{ -# if __MDSPAN_HAS_CXX_17 - if constexpr (_CUDA_VSTD::is_signed_v<_IntegerType>) - { - static_assert(_Value0 >= _IntegerType(0), "Invalid slice specifier"); - } -# endif // __MDSPAN_HAS_CXX_17 - - return {__val, __ext, __stride}; -} - -template -_LIBCUDACXX_HIDE_FROM_ABI constexpr __slice_wrap<_OldExtent, _OldStaticStride, full_extent_t> -__wrap_slice(full_extent_t __val, size_t __ext, size_t __stride) -{ - return {__val, __ext, __stride}; -} - -// TODO generalize this to anything that works with get<0> and get<1> -template -_LIBCUDACXX_HIDE_FROM_ABI constexpr __slice_wrap<_OldExtent, _OldStaticStride, _CUDA_VSTD::tuple> -__wrap_slice(_CUDA_VSTD::tuple const& __val, size_t __ext, size_t __stride) -{ - return {__val, __ext, __stride}; -} - -template -_LIBCUDACXX_HIDE_FROM_ABI constexpr __slice_wrap< - _OldExtent, - _OldStaticStride, - _CUDA_VSTD::tuple, integral_constant<_IntegerType1, _Value1>>> -__wrap_slice( - _CUDA_VSTD::tuple, integral_constant<_IntegerType1, _Value1>> const& __val, - size_t __ext, - size_t __stride) -{ - static_assert(_Value1 >= _Value0, "Invalid slice tuple"); - return {__val, __ext, __stride}; -} - -//-------------------------------------------------------------------------------- - -// a layout right remains a layout right if it is indexed by 0 or more scalars, -// then optionally a pair and finally 0 or more all -template < - // what we encountered until now preserves the layout right - bool _Result = true, - // we only encountered 0 or more scalars, no pair or all - bool _EncounteredOnlyScalar = true> -struct preserve_layout_right_analysis : integral_constant -{ - using layout_type_if_preserved = layout_right; - using encounter_pair = preserve_layout_right_analysis< - // if we encounter a pair, the layout remains a layout right only if it was one before - // and that only scalars were encountered until now - _Result && _EncounteredOnlyScalar, - // if we encounter a pair, we didn't encounter scalars only - false>; - using encounter_all = preserve_layout_right_analysis< - // if we encounter a all, the layout remains a layout right if it was one before - _Result, - // if we encounter a all, we didn't encounter scalars only - false>; - using encounter_scalar = preserve_layout_right_analysis< - // if we encounter a scalar, the layout remains a layout right only if it was one before - // and that only scalars were encountered until now - _Result && _EncounteredOnlyScalar, - // if we encounter a scalar, the fact that we encountered scalars only doesn't change - _EncounteredOnlyScalar>; -}; - -// a layout left remains a layout left if it is indexed by 0 or more all, -// then optionally a pair and finally 0 or more scalars -template -struct preserve_layout_left_analysis : integral_constant -{ - using layout_type_if_preserved = layout_left; - using encounter_pair = preserve_layout_left_analysis< - // if we encounter a pair, the layout remains a layout left only if it was one before - // and that only all were encountered until now - _Result && _EncounteredOnlyAll, - // if we encounter a pair, we didn't encounter all only - false>; - using encounter_all = preserve_layout_left_analysis< - // if we encounter a all, the layout remains a layout left only if it was one before - // and that only all were encountered until now - _Result && _EncounteredOnlyAll, - // if we encounter a all, the fact that we encountered scalars all doesn't change - _EncounteredOnlyAll>; - using encounter_scalar = preserve_layout_left_analysis< - // if we encounter a scalar, the layout remains a layout left if it was one before - _Result, - // if we encounter a scalar, we didn't encounter scalars only - false>; -}; - -struct ignore_layout_preservation : integral_constant -{ - using layout_type_if_preserved = void; - using encounter_pair = ignore_layout_preservation; - using encounter_all = ignore_layout_preservation; - using encounter_scalar = ignore_layout_preservation; -}; - -template -struct preserve_layout_analysis : ignore_layout_preservation -{}; -template <> -struct preserve_layout_analysis : preserve_layout_right_analysis<> -{}; -template <> -struct preserve_layout_analysis : preserve_layout_left_analysis<> -{}; - -//-------------------------------------------------------------------------------- - -template , - class _ExtsArray = __partially_static_sizes<_IndexT, size_t>, - class _StridesArray = __partially_static_sizes<_IndexT, size_t>, - class = _CUDA_VSTD::make_index_sequence<_OffsetsArray::__size>, - class = _CUDA_VSTD::make_index_sequence<_ExtsArray::__size>, - class = _CUDA_VSTD::make_index_sequence<_StridesArray::__size>> -struct __assign_op_slice_handler; - -/* clang-format: off */ -template -struct __assign_op_slice_handler< - _IndexT, - _PreserveLayoutAnalysis, - __partially_static_sizes<_IndexT, size_t, _Offsets...>, - __partially_static_sizes<_IndexT, size_t, _Exts...>, - __partially_static_sizes<_IndexT, size_t, _Strides...>, - _CUDA_VSTD::integer_sequence, - _CUDA_VSTD::integer_sequence, - _CUDA_VSTD::integer_sequence> -{ - // TODO remove this for better compiler performance - static_assert(__fold_and_v<(_Strides == dynamic_extent || _Strides > 0)...>, " "); - static_assert(__fold_and_v<(_Offsets == dynamic_extent || _Offsets >= 0)...>, " "); - - using __offsets_storage_t = __partially_static_sizes<_IndexT, size_t, _Offsets...>; - using __extents_storage_t = __partially_static_sizes<_IndexT, size_t, _Exts...>; - using __strides_storage_t = __partially_static_sizes<_IndexT, size_t, _Strides...>; - __offsets_storage_t __offsets; - __extents_storage_t __exts; - __strides_storage_t __strides; - -# ifdef __INTEL_COMPILER -# if __INTEL_COMPILER <= 1800 - _LIBCUDACXX_HIDE_FROM_ABI constexpr __assign_op_slice_handler(__assign_op_slice_handler&& __other) noexcept - : __offsets(_CUDA_VSTD::move(__other.__offsets)) - , __exts(_CUDA_VSTD::move(__other.__exts)) - , __strides(_CUDA_VSTD::move(__other.__strides)) - {} - _LIBCUDACXX_HIDE_FROM_ABI constexpr __assign_op_slice_handler( - __offsets_storage_t&& __o, __extents_storage_t&& __e, __strides_storage_t&& __s) noexcept - : __offsets(_CUDA_VSTD::move(__o)) - , __exts(_CUDA_VSTD::move(__e)) - , __strides(_CUDA_VSTD::move(__s)) - {} -# endif -# endif - -// Don't define this unless we need it; they have a cost to compile -# ifndef __MDSPAN_USE_RETURN_TYPE_DEDUCTION - using __extents_type = _CUDA_VSTD::extents<_IndexT, _Exts...>; -# endif - - // For size_t slice, skip the extent and stride, but add an offset corresponding to the value - template - __MDSPAN_FORCE_INLINE_FUNCTION // NOLINT (misc-unconventional-assign-operator) - constexpr auto - operator=(__slice_wrap<_OldStaticExtent, _OldStaticStride, size_t>&& __slice) noexcept - -> __assign_op_slice_handler<_IndexT, - typename _PreserveLayoutAnalysis::encounter_scalar, - __partially_static_sizes<_IndexT, size_t, _Offsets..., dynamic_extent>, - __partially_static_sizes<_IndexT, size_t, _Exts...>, - __partially_static_sizes<_IndexT, size_t, _Strides...>> - { - return {__partially_static_sizes<_IndexT, size_t, _Offsets..., dynamic_extent>( - __construct_psa_from_all_exts_values_tag, __offsets.template __get_n<_OffsetIdxs>()..., __slice.slice), - _CUDA_VSTD::move(__exts), - _CUDA_VSTD::move(__strides)}; - } - - // Treat integral_constant slice like size_t slice, but with a compile-time offset. - // The result's extents_type can't take advantage of that, - // but it might help for specialized layouts. - template - __MDSPAN_FORCE_INLINE_FUNCTION // NOLINT (misc-unconventional-assign-operator) - constexpr auto - operator=(__slice_wrap<_OldStaticExtent, _OldStaticStride, integral_constant<_IntegerType, _Value0>>&&) noexcept - -> __assign_op_slice_handler<_IndexT, - typename _PreserveLayoutAnalysis::encounter_scalar, - __partially_static_sizes<_IndexT, size_t, _Offsets..., _Value0>, - __partially_static_sizes<_IndexT, size_t, _Exts...>, - __partially_static_sizes<_IndexT, size_t, _Strides...>> - { -# if __MDSPAN_HAS_CXX_17 - if constexpr (_CUDA_VSTD::is_signed_v<_IntegerType>) - { - static_assert(_Value0 >= _IntegerType(0), "Invalid slice specifier"); - } -# endif // __MDSPAN_HAS_CXX_17 - return {__partially_static_sizes<_IndexT, size_t, _Offsets..., _Value0>( - __construct_psa_from_all_exts_values_tag, __offsets.template __get_n<_OffsetIdxs>()..., size_t(_Value0)), - _CUDA_VSTD::move(__exts), - _CUDA_VSTD::move(__strides)}; - } - - // For a _CUDA_VSTD::full_extent, offset 0 and old extent - template - __MDSPAN_FORCE_INLINE_FUNCTION // NOLINT (misc-unconventional-assign-operator) - constexpr auto - operator=(__slice_wrap<_OldStaticExtent, _OldStaticStride, full_extent_t>&& __slice) noexcept - -> __assign_op_slice_handler<_IndexT, - typename _PreserveLayoutAnalysis::encounter_all, - __partially_static_sizes<_IndexT, size_t, _Offsets..., 0>, - __partially_static_sizes<_IndexT, size_t, _Exts..., _OldStaticExtent>, - __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>> - { - return { - __partially_static_sizes<_IndexT, size_t, _Offsets..., 0>( - __construct_psa_from_all_exts_values_tag, __offsets.template __get_n<_OffsetIdxs>()..., size_t(0)), - __partially_static_sizes<_IndexT, size_t, _Exts..., _OldStaticExtent>( - __construct_psa_from_all_exts_values_tag, __exts.template __get_n<_ExtIdxs>()..., __slice.old_extent), - __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>( - __construct_psa_from_all_exts_values_tag, __strides.template __get_n<_StrideIdxs>()..., __slice.old_stride)}; - } - - // For a _CUDA_VSTD::tuple, add an offset and add a new dynamic extent (strides still preserved) - template - __MDSPAN_FORCE_INLINE_FUNCTION // NOLINT (misc-unconventional-assign-operator) - constexpr auto - operator=(__slice_wrap<_OldStaticExtent, _OldStaticStride, tuple>&& __slice) noexcept - -> __assign_op_slice_handler<_IndexT, - typename _PreserveLayoutAnalysis::encounter_pair, - __partially_static_sizes<_IndexT, size_t, _Offsets..., dynamic_extent>, - __partially_static_sizes<_IndexT, size_t, _Exts..., dynamic_extent>, - __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>> - { - return { - __partially_static_sizes<_IndexT, size_t, _Offsets..., dynamic_extent>( - __construct_psa_from_all_exts_values_tag, - __offsets.template __get_n<_OffsetIdxs>()..., - _CUDA_VSTD::get<0>(__slice.slice)), - __partially_static_sizes<_IndexT, size_t, _Exts..., dynamic_extent>( - __construct_psa_from_all_exts_values_tag, - __exts.template __get_n<_ExtIdxs>()..., - _CUDA_VSTD::get<1>(__slice.slice) - _CUDA_VSTD::get<0>(__slice.slice)), - __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>( - __construct_psa_from_all_exts_values_tag, __strides.template __get_n<_StrideIdxs>()..., __slice.old_stride)}; - } - - // For a _CUDA_VSTD::tuple of two integral_constant, do something like - // we did above for a tuple of two size_t, but make sure the - // result's extents type make the values compile-time constants. - template - __MDSPAN_FORCE_INLINE_FUNCTION // NOLINT (misc-unconventional-assign-operator) - constexpr auto - operator=(__slice_wrap<_OldStaticExtent, - _OldStaticStride, - tuple, integral_constant<_IntegerType1, _Value1>>>&& - __slice) noexcept - -> __assign_op_slice_handler<_IndexT, - typename _PreserveLayoutAnalysis::encounter_pair, - __partially_static_sizes<_IndexT, size_t, _Offsets..., size_t(_Value0)>, - __partially_static_sizes<_IndexT, size_t, _Exts..., size_t(_Value1 - _Value0)>, - __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>> - { - static_assert(_Value1 >= _Value0, "Invalid slice specifier"); - return { - // We're still turning the template parameters _Value0 and _Value1 - // into (constexpr) run-time values here. - __partially_static_sizes<_IndexT, size_t, _Offsets..., size_t(_Value0)>( - __construct_psa_from_all_exts_values_tag, __offsets.template __get_n<_OffsetIdxs>()..., _Value0), - __partially_static_sizes<_IndexT, size_t, _Exts..., size_t(_Value1 - _Value0)>( - __construct_psa_from_all_exts_values_tag, __exts.template __get_n<_ExtIdxs>()..., _Value1 - _Value0), - __partially_static_sizes<_IndexT, size_t, _Strides..., _OldStaticStride>( - __construct_psa_from_all_exts_values_tag, __strides.template __get_n<_StrideIdxs>()..., __slice.old_stride)}; - } - - // TODO defer instantiation of this? - using layout_type = - conditional_t<_PreserveLayoutAnalysis::value, - typename _PreserveLayoutAnalysis::layout_type_if_preserved, - layout_stride>; - - // TODO noexcept specification - template - _LIBCUDACXX_HIDE_FROM_ABI __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE( - (constexpr /* auto */ - _make_layout_mapping_impl(NewLayout) noexcept), - ( - /* not layout stride, so don't pass dynamic_strides */ - /* return */ typename NewLayout::template mapping<_CUDA_VSTD::extents<_IndexT, _Exts...>>( - extents<_IndexT, _Exts...>::__make_extents_impl(_CUDA_VSTD::move(__exts))) /* ; */ - )) - - _LIBCUDACXX_HIDE_FROM_ABI __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE( - (constexpr /* auto */ - _make_layout_mapping_impl(layout_stride) noexcept), - ( - /* return */ layout_stride::template mapping<_CUDA_VSTD::extents<_IndexT, _Exts...>>::__make_mapping( - _CUDA_VSTD::move(__exts), _CUDA_VSTD::move(__strides)) /* ; */ - )) - - template // mostly for deferred instantiation, but maybe we'll use this in the future - _LIBCUDACXX_HIDE_FROM_ABI __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE( - (constexpr /* auto */ - make_layout_mapping(_OldLayoutMapping const&) noexcept), - ( - /* return */ this->_make_layout_mapping_impl(layout_type{}) /* ; */ - )) -}; - -//============================================================================== - -# if __MDSPAN_USE_RETURN_TYPE_DEDUCTION -// Forking this because the C++11 version will be *completely* unreadable -template -_LIBCUDACXX_HIDE_FROM_ABI constexpr auto _submdspan_impl( - _CUDA_VSTD::integer_sequence, - mdspan<_ET, _CUDA_VSTD::extents<_ST, _Exts...>, _LP, _AP> const& __src, - _SliceSpecs&&... __slices) noexcept -{ - using __index_t = _ST; - auto __handled = __MDSPAN_FOLD_ASSIGN_LEFT( - (__detail::__assign_op_slice_handler<__index_t, __detail::preserve_layout_analysis<_LP>>{ - __partially_static_sizes<__index_t, size_t>{}, - __partially_static_sizes<__index_t, size_t>{}, - __partially_static_sizes<__index_t, size_t>{}}), - /* = ... = */ - __detail::__wrap_slice<_Exts, dynamic_extent>( - __slices, __src.extents().template __extent<_Idxs>(), __src.mapping().stride(_Idxs))); - - size_t __offset_size = __src.mapping()(__handled.__offsets.template __get_n<_Idxs>()...); - auto __offset_ptr = __src.accessor().offset(__src.data_handle(), __offset_size); - auto __map = __handled.make_layout_mapping(__src.mapping()); - auto __acc_pol = typename _AP::offset_policy(__src.accessor()); - return mdspan<_ET, - remove_const_t<_CUDA_VSTD::remove_reference_t>, - typename decltype(__handled)::layout_type, - remove_const_t<_CUDA_VSTD::remove_reference_t>>( - _CUDA_VSTD::move(__offset_ptr), _CUDA_VSTD::move(__map), _CUDA_VSTD::move(__acc_pol)); -} -# else - -template -auto _submdspan_impl_helper(_Src&& __src, _Handled&& __h, _CUDA_VSTD::integer_sequence) - -> mdspan<_ET, typename _Handled::__extents_type, typename _Handled::layout_type, typename _AP::offset_policy> -{ - return {__src.accessor().offset(__src.data_handle(), __src.mapping()(__h.__offsets.template __get_n<_Idxs>()...)), - __h.make_layout_mapping(__src.mapping()), - typename _AP::offset_policy(__src.accessor())}; -} - -template -_LIBCUDACXX_HIDE_FROM_ABI __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE( - (constexpr /* auto */ _submdspan_impl(_CUDA_VSTD::integer_sequence __seq, - mdspan<_ET, _CUDA_VSTD::extents<_ST, _Exts...>, _LP, _AP> const& __src, - _SliceSpecs&&... __slices) noexcept), - ( - /* return */ _submdspan_impl_helper<_ET, _AP>( - __src, - __MDSPAN_FOLD_ASSIGN_LEFT( - (__detail::__assign_op_slice_handler>{ - __partially_static_sizes<_ST, size_t>{}, - __partially_static_sizes<_ST, size_t>{}, - __partially_static_sizes<_ST, size_t>{}}), - /* = ... = */ - __detail::__wrap_slice<_Exts, dynamic_extent>( - __slices, __src.extents().template __extent<_Idxs>(), __src.mapping().stride(_Idxs))), - __seq) /* ; */ - )) - -# endif - -template -struct _is_layout_stride : false_type -{}; -template <> -struct _is_layout_stride : true_type -{}; - -} // namespace __detail - -//============================================================================== - -_CCCL_TEMPLATE(class _ET, class _EXT, class _LP, class _AP, class... _SliceSpecs) -_CCCL_REQUIRES((_CCCL_TRAIT(_CUDA_VSTD::is_same, _LP, layout_left) - || _CCCL_TRAIT(_CUDA_VSTD::is_same, _LP, layout_right) || __detail::_is_layout_stride<_LP>::value) - _CCCL_AND __fold_and_v<(_CCCL_TRAIT(_CUDA_VSTD::is_convertible, _SliceSpecs, size_t) - || _CCCL_TRAIT(_CUDA_VSTD::is_convertible, _SliceSpecs, tuple) - || _CCCL_TRAIT(_CUDA_VSTD::is_convertible, _SliceSpecs, full_extent_t))...> - _CCCL_AND(sizeof...(_SliceSpecs) == _EXT::rank())) -_LIBCUDACXX_HIDE_FROM_ABI __MDSPAN_DEDUCE_RETURN_TYPE_SINGLE_LINE( - (constexpr submdspan(mdspan<_ET, _EXT, _LP, _AP> const& __src, _SliceSpecs... __slices) noexcept), - ( - /* return */ - __detail::_submdspan_impl(_CUDA_VSTD::make_index_sequence{}, __src, __slices...) /*;*/ - )) -/* clang-format: on */ - -#endif // _CCCL_STD_VER > 2011 - - _LIBCUDACXX_END_NAMESPACE_STD - -#endif // _LIBCUDACXX___MDSPAN_SUBMDSPAN_HPP diff --git a/libcudacxx/include/cuda/std/__tuple_dir/sfinae_helpers.h b/libcudacxx/include/cuda/std/__tuple_dir/sfinae_helpers.h index 83bb7d809fb..71141ae4896 100644 --- a/libcudacxx/include/cuda/std/__tuple_dir/sfinae_helpers.h +++ b/libcudacxx/include/cuda/std/__tuple_dir/sfinae_helpers.h @@ -40,12 +40,16 @@ _LIBCUDACXX_BEGIN_NAMESPACE_STD +#if _CCCL_STD_VER >= 2017 +template +using __all = integral_constant; +#else // ^^^ _CCCL_STD_VER >= 2017 ^^^ / vvv _CCCL_STD_VER <= 2014 vvv template struct __all_dummy; template using __all = is_same<__all_dummy<_Pred...>, __all_dummy<((void) _Pred, true)...>>; - +#endif // _CCCL_STD_VER <= 2014 struct __tuple_sfinae_base { template diff --git a/libcudacxx/include/cuda/std/__tuple_dir/tuple_like.h b/libcudacxx/include/cuda/std/__tuple_dir/tuple_like.h index d9f30347dde..b0e1fd43d14 100644 --- a/libcudacxx/include/cuda/std/__tuple_dir/tuple_like.h +++ b/libcudacxx/include/cuda/std/__tuple_dir/tuple_like.h @@ -20,57 +20,69 @@ # pragma system_header #endif // no system header +#include #include #include #include #include #include +#include #include #include +#include #include _LIBCUDACXX_BEGIN_NAMESPACE_STD template -struct __tuple_like : false_type +struct __tuple_like_impl : false_type {}; template -struct __tuple_like : public __tuple_like<_Tp> +struct __tuple_like_impl : public __tuple_like_impl<_Tp> {}; template -struct __tuple_like : public __tuple_like<_Tp> +struct __tuple_like_impl : public __tuple_like_impl<_Tp> {}; template -struct __tuple_like : public __tuple_like<_Tp> +struct __tuple_like_impl : public __tuple_like_impl<_Tp> {}; template -struct __tuple_like> : true_type +struct __tuple_like_impl> : true_type {}; template -struct __tuple_like> : true_type +struct __tuple_like_impl> : true_type {}; template -struct __tuple_like> : true_type +struct __tuple_like_impl> : true_type {}; template -struct __tuple_like> : true_type +struct __tuple_like_impl> : true_type {}; #if _CCCL_STD_VER >= 2017 && !_CCCL_COMPILER(MSVC2017) template -struct __tuple_like<_CUDA_VRANGES::subrange<_Ip, _Sp, _Kp>> : true_type +struct __tuple_like_impl<_CUDA_VRANGES::subrange<_Ip, _Sp, _Kp>> : true_type {}; #endif // _CCCL_STD_VER >= 2017 && !_CCCL_COMPILER(MSVC2017) template -struct __tuple_like<__tuple_types<_Tp...>> : true_type +struct __tuple_like_impl<__tuple_types<_Tp...>> : true_type {}; +#if _CCCL_STD_VER >= 2014 +template +_CCCL_CONCEPT __tuple_like = __tuple_like_impl>::value; + +template +_CCCL_CONCEPT __pair_like = _CCCL_REQUIRES_EXPR((_Tp)) // + (requires(__tuple_like_impl>::value), requires(tuple_size>::value == 2)); +#endif // _CCCL_STD_VER >= 2014 + _LIBCUDACXX_END_NAMESPACE_STD #endif // _LIBCUDACXX___TUPLE_TUPLE_LIKE_H diff --git a/libcudacxx/include/cuda/std/__type_traits/fold.h b/libcudacxx/include/cuda/std/__type_traits/fold.h index abb079dc076..288fdac1ddb 100644 --- a/libcudacxx/include/cuda/std/__type_traits/fold.h +++ b/libcudacxx/include/cuda/std/__type_traits/fold.h @@ -20,6 +20,7 @@ # pragma system_header #endif // no system header +#include #include #include #include @@ -64,6 +65,93 @@ _CCCL_INLINE_VAR constexpr bool __fold_or_v = __fold_or<_Preds...>::value; #endif // _CCCL_NO_FOLD_EXPRESSIONS +#if _CCCL_STD_VER >= 2017 +# define _CCCL_FOLD_AND(__pred) (__pred && ... && true) +# define _CCCL_FOLD_OR(__pred) (__pred || ...) +# define _CCCL_FOLD_PLUS(__init, __args) (__args + ... + __init) +# define _CCCL_FOLD_TIMES(__init, __args) (__args * ... * __init) + +#else // ^^^ _CCCL_STD_VER >= 2017 ^^^ / vvv _CCCL_STD_VER <= 2014 vvv + +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp __variadic_sum(_Tp __init) noexcept +{ + return __init; +} + +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp __variadic_sum(_Tp __init, _Args... __args) +{ + const _Tp __arr[sizeof...(_Args)] = {static_cast<_Tp>(__args)...}; + for (size_t __i = 0; __i < sizeof...(_Args); ++__i) + { + __init += __arr[__i]; + } + return __init; +} + +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp __variadic_times(_Tp __init) noexcept +{ + return __init; +} + +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp __variadic_times(_Tp __init, _Args... __args) +{ + const _Tp __arr[sizeof...(_Args)] = {static_cast<_Tp>(__args)...}; + for (size_t __i = 0; __i < sizeof...(_Args); ++__i) + { + __init *= __arr[__i]; + } + return __init; +} + +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __variadic_and() noexcept +{ + return true; +} + +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __variadic_and(_Args... __args) +{ + const bool __arr[sizeof...(_Args)] = {static_cast(__args)...}; + for (size_t __i = 0; __i < sizeof...(_Args); ++__i) + { + if (!__arr[__i]) + { + return false; + } + } + return true; +} + +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __variadic_or() noexcept +{ + return false; +} + +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __variadic_or(_Args... __args) +{ + const bool __arr[sizeof...(_Args)] = {static_cast(__args)...}; + for (size_t __i = 0; __i < sizeof...(_Args); ++__i) + { + if (__arr[__i]) + { + return true; + } + } + return false; +} + +# define _CCCL_FOLD_AND(__pred) _CUDA_VSTD::__variadic_and(__pred...) +# define _CCCL_FOLD_OR(__pred) _CUDA_VSTD::__variadic_or(__pred...) +# define _CCCL_FOLD_PLUS(__init, __args) _CUDA_VSTD::__variadic_sum(__init, __args...) +# define _CCCL_FOLD_TIMES(__init, __args) _CUDA_VSTD::__variadic_times(__init, __args...) + +#endif // _CCCL_STD_VER <= 2014 + _LIBCUDACXX_END_NAMESPACE_STD #endif // _LIBCUDACXX___TYPE_TRAITS_FOLD_H diff --git a/libcudacxx/include/cuda/std/detail/libcxx/include/tuple b/libcudacxx/include/cuda/std/detail/libcxx/include/tuple index 5457b9f3d2a..17f9b1e50e5 100644 --- a/libcudacxx/include/cuda/std/detail/libcxx/include/tuple +++ b/libcudacxx/include/cuda/std/detail/libcxx/include/tuple @@ -1234,7 +1234,7 @@ template struct __tuple_cat_return_1, true, _Tuple0, _Tuple1, _Tuples...> : public __tuple_cat_return_1< typename __tuple_cat_type, __make_tuple_types_t>>::type, - __tuple_like>::value, + __tuple_like_impl>::value, _Tuple1, _Tuples...> {}; @@ -1244,7 +1244,7 @@ struct __tuple_cat_return; template struct __tuple_cat_return<_Tuple0, _Tuples...> - : public __tuple_cat_return_1, __tuple_like>::value, _Tuple0, _Tuples...> + : public __tuple_cat_return_1, __tuple_like_impl>::value, _Tuple0, _Tuples...> {}; template <> diff --git a/libcudacxx/include/cuda/std/mdspan b/libcudacxx/include/cuda/std/mdspan index 78af32335ad..15fdcf27aa1 100644 --- a/libcudacxx/include/cuda/std/mdspan +++ b/libcudacxx/include/cuda/std/mdspan @@ -21,17 +21,18 @@ # pragma system_header #endif // no system header +_CCCL_PUSH_MACROS + +#include +#include #include -#include #include -#include #include #include #include -#include #include -#include -#include #include +_CCCL_POP_MACROS + #endif // _CUDA_STD_MDSPAN diff --git a/libcudacxx/test/libcudacxx/libcxx/containers/compressed_pair.pass.cpp b/libcudacxx/test/libcudacxx/libcxx/containers/compressed_pair.pass.cpp deleted file mode 100644 index 90bdeec7d15..00000000000 --- a/libcudacxx/test/libcudacxx/libcxx/containers/compressed_pair.pass.cpp +++ /dev/null @@ -1,45 +0,0 @@ -// -*- C++ -*- -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: nvrtc -// UNSUPPORTED: msvc && c++14 -// UNSUPPORTED: msvc && c++17 - -#include -#include - -#include "test_macros.h" - -// We are experiencing data corruption on clang when passing a mdspan mapping around where on of the subtypes is empty -struct empty -{}; -struct mapping -{ - using __member_pair_t = _CUDA_VSTD::__detail::__compressed_pair; - _CCCL_NO_UNIQUE_ADDRESS __member_pair_t __members; -}; - -__global__ void kernel(mapping arg1, mapping arg2) -{ - assert(arg1.__members.__second() == arg2.__members.__second()); -} - -void test() -{ - mapping strided{{empty{}, 1}}; - kernel<<<1, 1>>>(strided, strided); - cudaDeviceSynchronize(); -} - -int main(int, char**) -{ - NV_IF_TARGET(NV_IS_HOST, test();) - return 0; -} diff --git a/libcudacxx/test/libcudacxx/libcxx/iterators/unwrap_iter.pass.cpp b/libcudacxx/test/libcudacxx/libcxx/iterators/unwrap_iter.pass.cpp index 7d70944cadf..6c0694bb799 100644 --- a/libcudacxx/test/libcudacxx/libcxx/iterators/unwrap_iter.pass.cpp +++ b/libcudacxx/test/libcudacxx/libcxx/iterators/unwrap_iter.pass.cpp @@ -34,10 +34,14 @@ template using rev_rev_iter = rev_iter>; static_assert(cuda::std::is_same, int*>::value, ""); +#ifndef _LIBCUDACXX_ENABLE_DEBUG_MODE static_assert(cuda::std::is_same>, int*>::value, ""); +#endif // _LIBCUDACXX_ENABLE_DEBUG_MODE static_assert(cuda::std::is_same>, cuda::std::reverse_iterator>::value, ""); static_assert(cuda::std::is_same>, int*>::value, ""); +#ifndef _LIBCUDACXX_ENABLE_DEBUG_MODE static_assert(cuda::std::is_same>>, int*>::value, ""); +#endif // _LIBCUDACXX_ENABLE_DEBUG_MODE static_assert(cuda::std::is_same>>>, rev_iter>>::value, ""); diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/CommonHelpers.h b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/CommonHelpers.h new file mode 100644 index 00000000000..c51ea80abdf --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/CommonHelpers.h @@ -0,0 +1,100 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef TEST_STD_CONTAINERS_VIEWS_MDSPAN_COMMON_HELPERS_TYPE_H +#define TEST_STD_CONTAINERS_VIEWS_MDSPAN_COMMON_HELPERS_TYPE_H + +#include +#include +#include + +#include "test_macros.h" + +template , int> = 0> +__host__ __device__ constexpr void test_equality_handle(const MDS& m, const H& handle) +{ + assert(m.data_handle() == handle); +} +template , int> = 0> +__host__ __device__ constexpr void test_equality_handle(const MDS&, const H&) +{} + +template , int> = 0> +__host__ __device__ constexpr void test_equality_mapping(const MDS& m, const M& map) +{ + assert(m.mapping() == map); +} +template , int> = 0> +__host__ __device__ constexpr void test_equality_mapping(const MDS&, const M&) +{} + +template , int> = 0> +__host__ __device__ constexpr void test_equality_accessor(const MDS& m, const A& acc) +{ + assert(m.accessor() == acc); +} +template , int> = 0> +__host__ __device__ constexpr void test_equality_accessor(const MDS&, const A&) +{} + +template , + int> = 0> +__host__ __device__ constexpr void test_equality_with_handle(const ToMDS& to_mds, const FromMDS& from_mds) +{ + assert(to_mds.data_handle() == from_mds.data_handle()); +} + +template , + int> = 0> +__host__ __device__ constexpr void test_equality_with_handle(const ToMDS&, const FromMDS&) +{} + +template , + int> = 0> +__host__ __device__ constexpr void test_equality_with_mapping(const ToMDS& to_mds, const FromMDS& from_mds) +{ + assert(to_mds.mapping() == from_mds.mapping()); +} + +template , + int> = 0> +__host__ __device__ constexpr void test_equality_with_mapping(const ToMDS&, const FromMDS&) +{} + +template , + int> = 0> +__host__ __device__ constexpr void test_equality_with_accessor(const ToMDS& to_mds, const FromMDS& from_mds) +{ + assert(to_mds.accessor() == from_mds.accessor()); +} + +template , + int> = 0> +__host__ __device__ constexpr void test_equality_with_accessor(const ToMDS&, const FromMDS&) +{} + +#endif // TEST_STD_CONTAINERS_VIEWS_MDSPAN_COMMON_HELPERS_TYPE_H diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/ConvertibleToIntegral.h b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/ConvertibleToIntegral.h new file mode 100644 index 00000000000..a42d265ce46 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/ConvertibleToIntegral.h @@ -0,0 +1,99 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef TEST_STD_CONTAINERS_VIEWS_MDSPAN_CONVERTIBLE_TO_INTEGRAL_H +#define TEST_STD_CONTAINERS_VIEWS_MDSPAN_CONVERTIBLE_TO_INTEGRAL_H + +#include "CommonHelpers.h" +#include "test_macros.h" + +struct IntType +{ + int val; + IntType() = default; + __host__ __device__ constexpr IntType(int v) noexcept + : val(v){}; + + __host__ __device__ constexpr bool operator==(const IntType& rhs) const + { + return val == rhs.val; + } +#if TEST_STD_VER < 2020 + __host__ __device__ constexpr bool operator!=(const IntType& rhs) const + { + return val != rhs.val; + } +#endif // TEST_STD_VER < 2020 + __host__ __device__ constexpr operator int() const noexcept + { + return val; + } + __host__ __device__ constexpr operator unsigned char() const + { + return val; + } + __host__ __device__ constexpr operator char() const noexcept + { + return val; + } +}; + +// only non-const convertible +struct IntTypeNC +{ + int val; + IntTypeNC() = default; + __host__ __device__ constexpr IntTypeNC(int v) noexcept + : val(v){}; + + __host__ __device__ constexpr bool operator==(const IntType& rhs) const + { + return val == rhs.val; + } +#if TEST_STD_VER < 2020 + __host__ __device__ constexpr bool operator!=(const IntType& rhs) const + { + return val != rhs.val; + } +#endif // TEST_STD_VER < 2020 + __host__ __device__ constexpr operator int() noexcept + { + return val; + } + __host__ __device__ constexpr operator unsigned() + { + return val; + } + __host__ __device__ constexpr operator char() noexcept + { + return val; + } +}; + +// weird configurability of convertibility to int +template +struct IntConfig +{ + int val; + __host__ __device__ constexpr explicit IntConfig(int val_) + : val(val_) + {} + template = 0> + __host__ __device__ constexpr operator int() noexcept(ctor_nt_nc) + { + return val; + } + template = 0> + __host__ __device__ constexpr operator int() const noexcept(ctor_nt_c) + { + return val; + } +}; + +#endif // TEST_STD_CONTAINERS_VIEWS_MDSPAN_CONVERTIBLE_TO_INTEGRAL_H diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/CustomTestLayouts.h b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/CustomTestLayouts.h new file mode 100644 index 00000000000..3b1cd56f7d0 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/CustomTestLayouts.h @@ -0,0 +1,569 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===---------------------------------------------------------------------===// + +#ifndef TEST_STD_CONTAINERS_VIEWS_MDSPAN_CUSTOM_TEST_LAYOUTS_H +#define TEST_STD_CONTAINERS_VIEWS_MDSPAN_CUSTOM_TEST_LAYOUTS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test_macros.h" + +// Layout that wraps indices to test some idiosyncratic behavior +// - basically it is a layout_left where indices are first wrapped i.e. i%Wrap +// - only accepts integers as indices +// - is_always_strided and is_always_unique are false +// - is_strided and is_unique are true if all extents are smaller than Wrap +// - not default constructible +// - not extents constructible +// - not trivially copyable +// - does not check dynamic to static extent conversion in converting ctor +// - check via side-effects that mdspan::swap calls mappings swap via ADL + +__host__ __device__ bool mul_overflow(size_t x, size_t y, size_t* res) +{ + *res = x * y; + return x && ((*res / x) != y); +} + +template +__host__ __device__ inline const T& Min(const T& __a, const T& __b) +{ + return __b < __a ? __b : __a; +} + +struct not_extents_constructible_tag +{}; + +STATIC_TEST_GLOBAL_VAR int layout_wrapping_integral_swap_counter = 0; +template +class layout_wrapping_integral +{ +public: + template + class mapping; +}; + +template +template +class layout_wrapping_integral::mapping +{ + static constexpr typename Extents::index_type Wrap = static_cast(WrapArg); + +public: + using extents_type = Extents; + using index_type = typename extents_type::index_type; + using size_type = typename extents_type::size_type; + using rank_type = typename extents_type::rank_type; + using layout_type = layout_wrapping_integral; + +private: + template = 0> + __host__ __device__ static constexpr bool required_span_size_is_representable(const extents_type& ext) + { + return true; + } + + template = 0> + __host__ __device__ static constexpr bool required_span_size_is_representable(const extents_type& ext) + { + index_type prod = ext.extent(0); + for (rank_type r = 1; r < extents_type::rank(); r++) + { + bool overflowed = mul_overflow(prod, Min(ext.extent(r), Wrap), &prod); + if (overflowed) + { + return false; + } + } + return true; + } + +public: + __host__ __device__ constexpr mapping() noexcept = delete; + __host__ __device__ constexpr mapping(const mapping& other) noexcept + : extents_(other.extents()){}; + template = 0> + __host__ __device__ constexpr mapping(extents_type&& ext) noexcept + : extents_(ext) + {} + __host__ __device__ constexpr mapping(const extents_type& ext, not_extents_constructible_tag) noexcept + : extents_(ext) + {} + + template = 0> + __host__ __device__ static constexpr cuda::std::array + get_dyn_extents(const mapping& other) noexcept + { + cuda::std::array dyn_extents; + rank_type count = 0; + for (rank_type r = 0; r != extents_type::rank(); r++) + { + if (extents_type::static_extent(r) == cuda::std::dynamic_extent) + { + dyn_extents[count++] = other.extents().extent(r); + } + } + return dyn_extents; + } + template = 0> + __host__ __device__ static constexpr cuda::std::array + get_dyn_extents(const mapping& other) noexcept + { + return {}; + } + + template < + class OtherExtents, + cuda::std::enable_if_t::value && (Wrap != 8), int> = 0, + cuda::std::enable_if_t::value, int> = 0> + __host__ __device__ constexpr mapping(const mapping& other) noexcept + { + extents_ = extents_type(get_dyn_extents(other)); + } + + template < + class OtherExtents, + cuda::std::enable_if_t::value && (Wrap != 8), int> = 0, + cuda::std::enable_if_t::value, int> = 0> + __host__ __device__ constexpr explicit mapping(const mapping& other) noexcept + { + extents_ = extents_type(get_dyn_extents(other)); + } + + template < + class OtherExtents, + cuda::std::enable_if_t::value && (Wrap == 8), int> = 0, + cuda::std::enable_if_t::value, int> = 0> + __host__ __device__ constexpr mapping(mapping&& other) noexcept + { + extents_ = extents_type(get_dyn_extents(other)); + } + + template < + class OtherExtents, + cuda::std::enable_if_t::value && (Wrap == 8), int> = 0, + cuda::std::enable_if_t::value, int> = 0> + __host__ __device__ constexpr explicit mapping(mapping&& other) noexcept + { + extents_ = extents_type(get_dyn_extents(other)); + } + + __host__ __device__ constexpr mapping& operator=(const mapping& other) noexcept + { + extents_ = other.extents_; + return *this; + }; + + __host__ __device__ constexpr const extents_type& extents() const noexcept + { + return extents_; + } + + __host__ __device__ constexpr index_type required_span_size() const noexcept + { + index_type size = 1; + for (size_t r = 0; r != extents_type::rank(); r++) + { + size *= extents_.extent(r) < Wrap ? extents_.extent(r) : Wrap; + } + return size; + } + + struct rank_accumulator + { + __host__ __device__ constexpr rank_accumulator(const extents_type& extents) noexcept + : extents_(extents) + {} + + template + __host__ __device__ constexpr index_type operator()(cuda::std::index_sequence, Indices... idx) const noexcept + { + cuda::std::array idx_a{ + static_cast(static_cast(idx) % Wrap)...}; + cuda::std::array position = {(extents_type::rank() - 1 - Pos)...}; + + index_type res = 0; + for (size_t index = 0; index < extents_type::rank(); ++index) + { + res = idx_a[index] + (extents_.extent(index) < Wrap ? extents_.extent(index) : Wrap) * res; + } + return res; + } + + const extents_type& extents_{}; + }; + + template < + class... Indices, + cuda::std::enable_if_t = 0, + cuda::std::enable_if_t...>::value, int> = 0, + cuda::std::enable_if_t::value...>::value, int> = 0, + cuda::std::enable_if_t::value...>::value, + int> = 0> + __host__ __device__ constexpr index_type operator()(Indices... idx) const noexcept + { + return rank_accumulator{extents_}(cuda::std::make_index_sequence(), idx...); + } + + __host__ __device__ static constexpr bool is_always_unique() noexcept + { + return false; + } + __host__ __device__ static constexpr bool is_always_exhaustive() noexcept + { + return true; + } + __host__ __device__ static constexpr bool is_always_strided() noexcept + { + return false; + } + + TEST_NV_DIAG_SUPPRESS(186) // pointless comparison of unsigned integer with zero + + __host__ __device__ constexpr bool is_unique() const noexcept + { + for (rank_type r = 0; r != extents_type::rank(); r++) + { + if (extents_.extent(r) > Wrap) + { + return false; + } + } + return true; + } + __host__ __device__ static constexpr bool is_exhaustive() noexcept + { + return true; + } + __host__ __device__ constexpr bool is_strided() const noexcept + { + for (rank_type r = 0; r != extents_type::rank(); r++) + { + if (extents_.extent(r) > Wrap) + { + return false; + } + } + return true; + } + + template 0), int> = 0> + __host__ __device__ constexpr index_type stride(rank_type r) const noexcept + { + index_type s = 1; + for (rank_type i = extents_type::rank() - 1; i > r; i--) + { + s *= extents_.extent(i); + } + return s; + } + + template = 0> + __host__ __device__ friend constexpr bool operator==(const mapping& lhs, const mapping& rhs) noexcept + { + return lhs.extents() == rhs.extents(); + } + +#if TEST_STD_VER <= 2017 + template = 0> + __host__ __device__ friend constexpr bool operator!=(const mapping& lhs, const mapping& rhs) noexcept + { + return lhs.extents() != rhs.extents(); + } +#endif // TEST_STD_VER <= 2017 + + __host__ __device__ friend constexpr void swap(mapping& x, mapping& y) noexcept + { + swap(x.extents_, y.extents_); + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + layout_wrapping_integral_swap_counter++; + } + } + + __host__ __device__ static int& swap_counter() + { + return layout_wrapping_integral_swap_counter; + } + +private: + extents_type extents_{}; +}; + +template < + class MDS, + cuda::std::enable_if_t>::value, int> = 0> +__host__ __device__ constexpr void test_swap_counter() +{ + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + assert(MDS::mapping_type::swap_counter() > 0); + } +} +template < + class MDS, + cuda::std::enable_if_t>::value, int> = 0> +__host__ __device__ constexpr void test_swap_counter() +{} + +template +__host__ __device__ constexpr auto construct_mapping(cuda::std::layout_left, Extents exts) +{ + return cuda::std::layout_left::mapping(exts); +} + +template +__host__ __device__ constexpr auto construct_mapping(cuda::std::layout_right, Extents exts) +{ + return cuda::std::layout_right::mapping(exts); +} + +template +__host__ __device__ constexpr auto construct_mapping(layout_wrapping_integral, Extents exts) +{ + return typename layout_wrapping_integral::template mapping(exts, not_extents_constructible_tag{}); +} + +// This layout does not check convertibility of extents for its conversion ctor +// Allows triggering mdspan's ctor static assertion on convertibility of extents +STATIC_TEST_GLOBAL_VAR int always_convertible_layout_swap_counter = 0; +class always_convertible_layout +{ +public: + template + class mapping; +}; + +template +class always_convertible_layout::mapping +{ +public: + using extents_type = Extents; + using index_type = typename extents_type::index_type; + using size_type = typename extents_type::size_type; + using rank_type = typename extents_type::rank_type; + using layout_type = always_convertible_layout; + +private: + template = 0> + __host__ __device__ static constexpr bool required_span_size_is_representable(const extents_type& ext) + { + return true; + } + template = 0> + __host__ __device__ static constexpr bool required_span_size_is_representable(const extents_type& ext) + { + index_type prod = ext.extent(0); + for (rank_type r = 1; r < extents_type::rank(); r++) + { + bool overflowed = mul_overflow(prod, ext.extent(r), &prod); + if (overflowed) + { + return false; + } + } + return true; + } + +public: + __host__ __device__ constexpr mapping() noexcept = delete; + __host__ __device__ constexpr mapping(const mapping& other) noexcept + : extents_(other.extents_) + , offset_(other.offset_) + , scaling_(other.scaling_) + {} + __host__ __device__ constexpr mapping(const extents_type& ext) noexcept + : extents_(ext) + , offset_(0) + , scaling_(1){}; + __host__ __device__ constexpr mapping(const extents_type& ext, index_type offset) noexcept + : extents_(ext) + , offset_(offset) + , scaling_(1){}; + __host__ __device__ constexpr mapping(const extents_type& ext, index_type offset, index_type scaling) noexcept + : extents_(ext) + , offset_(offset) + , scaling_(scaling){}; + + template = 0> + __host__ __device__ constexpr mapping(const mapping& other) noexcept + { + cuda::std::array dyn_extents; + rank_type count = 0; + for (rank_type r = 0; r != extents_type::rank(); r++) + { + if (extents_type::static_extent(r) == cuda::std::dynamic_extent) + { + dyn_extents[count++] = other.extents().extent(r); + } + } + extents_ = extents_type(dyn_extents); + offset_ = other.offset_; + scaling_ = other.scaling_; + } + template = 0> + __host__ __device__ constexpr mapping(const mapping& other) noexcept + { + extents_ = extents_type(); + offset_ = other.offset_; + scaling_ = other.scaling_; + } + + __host__ __device__ constexpr mapping& operator=(const mapping& other) noexcept + { + extents_ = other.extents_; + offset_ = other.offset_; + scaling_ = other.scaling_; + return *this; + }; + + __host__ __device__ constexpr const extents_type& extents() const noexcept + { + return extents_; + } + + __host__ __device__ static constexpr const index_type& Max(const index_type& __a, const index_type& __b) noexcept + { + return __a > __b ? __a : __b; + } + + __host__ __device__ constexpr index_type required_span_size() const noexcept + { + index_type size = 1; + for (size_t r = 0; r != extents_type::rank(); r++) + { + size *= extents_.extent(r); + } + return Max(size * scaling_ + offset_, offset_); + } + + struct rank_accumulator + { + __host__ __device__ constexpr rank_accumulator(const extents_type& extents) noexcept + : extents_(extents) + {} + + template + __host__ __device__ constexpr index_type operator()(cuda::std::index_sequence, Indices... idx) const noexcept + { + cuda::std::array idx_a{ + static_cast(static_cast(idx))...}; + cuda::std::array position = {(extents_type::rank() - 1 - Pos)...}; + + index_type res = 0; + for (size_t index = 0; index < extents_type::rank(); ++index) + { + res = idx_a[index] + extents_.extent(index) * res; + } + return res; + } + + const extents_type& extents_{}; + }; + + template < + class... Indices, + cuda::std::enable_if_t = 0, + cuda::std::enable_if_t...>::value, int> = 0, + cuda::std::enable_if_t::value...>::value, int> = 0, + cuda::std::enable_if_t::value...>::value, + int> = 0> + __host__ __device__ constexpr index_type operator()(Indices... idx) const noexcept + { + return offset_ + + scaling_ * rank_accumulator{extents_}(cuda::std::make_index_sequence(), idx...); + } + + __host__ __device__ static constexpr bool is_always_unique() noexcept + { + return true; + } + __host__ __device__ static constexpr bool is_always_exhaustive() noexcept + { + return true; + } + __host__ __device__ static constexpr bool is_always_strided() noexcept + { + return true; + } + + __host__ __device__ static constexpr bool is_unique() noexcept + { + return true; + } + __host__ __device__ static constexpr bool is_exhaustive() noexcept + { + return true; + } + __host__ __device__ static constexpr bool is_strided() noexcept + { + return true; + } + + template 0), int> = 0> + __host__ __device__ constexpr index_type stride(rank_type r) const noexcept + { + index_type s = 1; + for (rank_type i = 0; i < r; i++) + { + s *= extents_.extent(i); + } + return s * scaling_; + } + + template + __host__ __device__ friend constexpr auto operator==(const mapping& lhs, const mapping& rhs) noexcept + -> cuda::std::enable_if_t + { + return lhs.extents() == rhs.extents() && lhs.offset_ == rhs.offset && lhs.scaling_ == rhs.scaling_; + } + +#if TEST_STD_VER < 2020 + template + __host__ __device__ friend constexpr auto operator!=(const mapping& lhs, const mapping& rhs) noexcept + -> cuda::std::enable_if_t + { + return !(lhs == rhs); + } +#endif // TEST_STD_VER < 2020 + + __host__ __device__ friend constexpr void swap(mapping& x, mapping& y) noexcept + { + swap(x.extents_, y.extents_); + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + always_convertible_layout_swap_counter++; + } + } + +private: + template + friend class mapping; + + extents_type extents_{}; + index_type offset_{}; + index_type scaling_{}; +}; +#endif // TEST_STD_CONTAINERS_VIEWS_MDSPAN_CUSTOM_TEST_LAYOUTS_H diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/MinimalElementType.h b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/MinimalElementType.h new file mode 100644 index 00000000000..a73c8e00ef3 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/MinimalElementType.h @@ -0,0 +1,116 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef TEST_STD_CONTAINERS_VIEWS_MDSPAN_MINIMAL_ELEMENT_TYPE_H +#define TEST_STD_CONTAINERS_VIEWS_MDSPAN_MINIMAL_ELEMENT_TYPE_H + +#include + +#include + +#include "CommonHelpers.h" +#include "test_macros.h" + +// Idiosyncratic element type for mdspan +// Make sure we don't assume copyable, default constructible, movable etc. +struct MinimalElementType +{ + int val; + constexpr MinimalElementType() = delete; + constexpr MinimalElementType(const MinimalElementType&) = delete; + constexpr MinimalElementType& operator=(const MinimalElementType&) = delete; + __host__ __device__ constexpr explicit MinimalElementType(int v) noexcept + : val(v) + {} +}; + +// Helper class to create pointer to MinimalElementType +template +struct ElementPool +{ + __host__ __device__ TEST_CONSTEXPR_CXX20 ElementPool() + { +#if TEST_STD_VER >= 2020 + if (cuda::std::is_constant_evaluated()) + { + // clang-format off + NV_IF_TARGET(NV_IS_HOST, ( + std::construct_at(&constexpr_ptr_, std::allocator>{}.allocate(N)); + for (int i = 0; i != N;++i) + { + std::construct_at(constexpr_ptr_ + i, 42); + } + )) + // clang-format on + } + else +#endif // TEST_STD_VER >= 2020 + { + T* ptr = reinterpret_cast(ptr_); + for (int i = 0; i != N; ++i) + { + cuda::std::__construct_at(ptr + i, 42); + } + } + } + + __host__ __device__ constexpr T* get_ptr() + { +#if TEST_STD_VER >= 2020 + if (cuda::std::is_constant_evaluated()) + { + // clang-format off + NV_IF_ELSE_TARGET(NV_IS_HOST, ( + return constexpr_ptr_; + ),( + return nullptr; + )) + // clang-format on + } + else +#endif // TEST_STD_VER >= 2020 + { + return reinterpret_cast(ptr_); + } + } + + __host__ __device__ TEST_CONSTEXPR_CXX20 ~ElementPool() + { +#if TEST_STD_VER >= 2020 + if (cuda::std::is_constant_evaluated()) + { + // clang-format off + NV_IF_TARGET(NV_IS_HOST,( + for (int i = 0; i != N; ++i) { + std::destroy_at(constexpr_ptr_ + i); + } + std::allocator>{}.deallocate(constexpr_ptr_, N); + )) + return; + // clang-format on + } + else +#endif // TEST_STD_VER >= 2020 + { + for (int i = 0; i != N; ++i) + { + cuda::std::__destroy_at(ptr_ + i); + } + } + } + +private: + union + { + char ptr_[N * sizeof(T)] = {}; + cuda::std::remove_const_t* constexpr_ptr_; + }; +}; + +#endif // TEST_STD_CONTAINERS_VIEWS_MDSPAN_MINIMAL_ELEMENT_TYPE_H diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/access.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/access.pass.cpp new file mode 100644 index 00000000000..c8574691e75 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/access.pass.cpp @@ -0,0 +1,59 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr reference access(data_handle_type p, size_t i) const noexcept; +// +// Effects: Equivalent to: return p[i]; + +#include +#include +#include + +#include "../MinimalElementType.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_access() +{ + ElementPool, 10> data; + T* ptr = data.get_ptr(); + cuda::std::default_accessor acc; + for (int i = 0; i < 10; i++) + { + static_assert( + cuda::std::is_same::reference>::value, ""); + ASSERT_NOEXCEPT(acc.access(ptr, i)); + assert(&acc.access(ptr, i) == ptr + i); + } +} + +__host__ __device__ constexpr bool test() +{ + test_access(); + test_access(); + test_access(); + test_access(); + return true; +} + +int main(int, char**) +{ + test(); +#if TEST_STD_VER >= 2020 + NV_IF_TARGET(NV_IS_HOST, + ( // This fails because we cannot allocate on device at compile time + static_assert(test(), "");)) +#endif // TEST_STD_VER >= 2020 + + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/ctor.conversion.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/ctor.conversion.pass.cpp new file mode 100644 index 00000000000..4b94e08dc7a --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/ctor.conversion.pass.cpp @@ -0,0 +1,82 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// +// +// Test converting constructor: +// +// template +// constexpr default_accessor(default_accessor) noexcept {} +// +// Constraints: is_convertible_v is true. + +#include +#include +#include +#include + +#include "../MinimalElementType.h" +#include "test_macros.h" + +struct Base +{}; +struct Derived : public Base +{}; + +template +__host__ __device__ constexpr void test_conversion() +{ + cuda::std::default_accessor acc_from; + ASSERT_NOEXCEPT(cuda::std::default_accessor(acc_from)); + cuda::std::default_accessor acc_to(acc_from); + unused(acc_to); +} + +__host__ __device__ constexpr bool test() +{ + // default accessor conversion largely behaves like pointer conversion + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + + // char is convertible to int, but accessors are not + static_assert( + !cuda::std::is_constructible, cuda::std::default_accessor>::value, ""); + // don't allow conversion from const elements to non-const + static_assert( + !cuda::std::is_constructible, cuda::std::default_accessor>::value, ""); + // MinimalElementType is constructible from int, but accessors should not be convertible + static_assert(!cuda::std::is_constructible, + cuda::std::default_accessor>::value, + ""); + // don't allow conversion from const elements to non-const + static_assert(!cuda::std::is_constructible, + cuda::std::default_accessor>::value, + ""); + // don't allow conversion from Base to Derived + static_assert( + !cuda::std::is_constructible, cuda::std::default_accessor>::value, ""); + // don't allow conversion from Derived to Base + static_assert( + !cuda::std::is_constructible, cuda::std::default_accessor>::value, ""); + + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/ctor.default.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/ctor.default.pass.cpp new file mode 100644 index 00000000000..922a3565482 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/ctor.default.pass.cpp @@ -0,0 +1,49 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test default construction: +// +// constexpr default_accessor() noexcept = default; + +#include +#include +#include +#include + +#include "../MinimalElementType.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_construction() +{ + ASSERT_NOEXCEPT(cuda::std::default_accessor{}); + cuda::std::default_accessor acc; + static_assert(cuda::std::is_trivially_default_constructible>::value); + unused(acc); +} + +__host__ __device__ constexpr bool test() +{ + test_construction(); + test_construction(); + test_construction(); + test_construction(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/element_type.verify.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/element_type.verify.cpp new file mode 100644 index 00000000000..b6941185810 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/element_type.verify.cpp @@ -0,0 +1,47 @@ +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 +// UNSUPPORTED: nvrtc + +// + +// template +// class default_accessor; + +// ElementType is required to be a complete object type that is neither an abstract class type nor an array type. + +#include + +#include "test_macros.h" + +class AbstractClass +{ +public: + __host__ __device__ virtual void method() = 0; +}; + +__host__ __device__ void not_abstract_class() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}default_accessor: template argument may + // not be an abstract class}} + cuda::std::default_accessor acc; + unused(acc); +} + +__host__ __device__ void not_array_type() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}default_accessor: template argument may + // not be an array type}} + cuda::std::default_accessor acc; + unused(acc); +} + +int main(int, char**) +{ + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/offset.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/offset.pass.cpp new file mode 100644 index 00000000000..4b58885672c --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/offset.pass.cpp @@ -0,0 +1,60 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr data_handle_type offset(data_handle_type p, size_t i) const noexcept; +// +// Effects: Equivalent to: return p+i; + +#include +#include +#include + +#include "../MinimalElementType.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_offset() +{ + ElementPool, 10> data; + T* ptr = data.get_ptr(); + cuda::std::default_accessor acc; + for (int i = 0; i < 10; i++) + { + static_assert( + cuda::std::is_same::data_handle_type>::value, + ""); + ASSERT_NOEXCEPT(acc.offset(ptr, i)); + assert(acc.offset(ptr, i) == ptr + i); + } +} + +__host__ __device__ constexpr bool test() +{ + test_offset(); + test_offset(); + test_offset(); + test_offset(); + return true; +} + +int main(int, char**) +{ + test(); +#if TEST_STD_VER >= 2020 + NV_IF_TARGET(NV_IS_HOST, + ( // This fails because we cannot allocate on device at compile time + static_assert(test(), "");)) +#endif // TEST_STD_VER >= 2020 + + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/types.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/types.pass.cpp new file mode 100644 index 00000000000..0a552001920 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/default_accessor/types.pass.cpp @@ -0,0 +1,56 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// struct default_accessor { +// using offset_policy = default_accessor; +// using element_type = ElementType; +// using reference = ElementType&; +// using data_handle_type = ElementType*; +// ... +// }; +// +// Each specialization of default_accessor is a trivially copyable type that models semiregular. + +#include +#include +#include +#include + +#include "../MinimalElementType.h" +#include "test_macros.h" + +template +__host__ __device__ void test() +{ + using A = cuda::std::default_accessor; + ASSERT_SAME_TYPE(typename A::offset_policy, A); + ASSERT_SAME_TYPE(typename A::element_type, T); + ASSERT_SAME_TYPE(typename A::reference, T&); + ASSERT_SAME_TYPE(typename A::data_handle_type, T*); + + static_assert(cuda::std::semiregular, ""); + static_assert(cuda::std::is_trivially_copyable::value, ""); + + // libcu++ extension + static_assert(cuda::std::is_empty::value, ""); +} + +int main(int, char**) +{ + test(); + test(); + test(); + test(); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/CtorTestCombinations.h b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/CtorTestCombinations.h new file mode 100644 index 00000000000..8e30379f7eb --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/CtorTestCombinations.h @@ -0,0 +1,130 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +#include +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "test_macros.h" + +// Helper file to implement combinatorial testing of extents constructor +// +// cuda::std::extents can be constructed from just indices, a cuda::std::array, or a cuda::std::span +// In each of those cases one can either provide all extents, or just the dynamic ones +// If constructed from cuda::std::span, the span needs to have a static extent +// Furthermore, the indices/array/span can have integer types other than index_type + +template = 0> +__host__ __device__ constexpr void test_runtime_observers(E ext, AllExtents expected) +{ + for (typename E::rank_type r = 0; r < ext.rank(); r++) + { + ASSERT_SAME_TYPE(decltype(ext.extent(0)), typename E::index_type); + ASSERT_NOEXCEPT(ext.extent(0)); + assert(ext.extent(r) == static_cast(expected[r])); + } +} + +template = 0> +__host__ __device__ constexpr void test_runtime_observers(E ext, AllExtents expected) +{ + // Nothing to do here +} + +template +__host__ __device__ constexpr void test_implicit_construction_call(E e, AllExtents all_ext) +{ + test_runtime_observers(e, all_ext); +} + +template = 0> +__host__ __device__ constexpr void test_construction(AllExtents all_ext) +{ + // test construction from all extents + Test::template test_construction(all_ext, all_ext, cuda::std::make_index_sequence()); + + // test construction from just dynamic extents + // create an array of just the extents corresponding to dynamic values + cuda::std::array dyn_ext{0}; + Test::template test_construction(all_ext, dyn_ext, cuda::std::make_index_sequence()); +} + +template = 0> +__host__ __device__ constexpr void test_construction(AllExtents all_ext) +{ + // test construction from all extents + Test::template test_construction(all_ext, all_ext, cuda::std::make_index_sequence()); + + // test construction from just dynamic extents + // create an array of just the extents corresponding to dynamic values + cuda::std::array dyn_ext{0}; + size_t dynamic_idx = 0; + for (size_t r = 0; r < E::rank(); r++) + { + if (E::static_extent(r) == cuda::std::dynamic_extent) + { + dyn_ext[dynamic_idx] = all_ext[r]; + dynamic_idx++; + } + } + Test::template test_construction(all_ext, dyn_ext, cuda::std::make_index_sequence()); +} + +template +__host__ __device__ constexpr void test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + test_construction, Test>(cuda::std::array{}); + + test_construction, Test>(cuda::std::array{3}); + test_construction, Test>(cuda::std::array{3}); + + test_construction, Test>(cuda::std::array{3, 7}); + test_construction, Test>(cuda::std::array{3, 7}); + test_construction, Test>(cuda::std::array{3, 7}); + test_construction, Test>(cuda::std::array{3, 7}); + + test_construction, Test>(cuda::std::array{3, 7, 9}); + test_construction, Test>(cuda::std::array{3, 7, 9}); + test_construction, Test>(cuda::std::array{3, 7, 9}); + test_construction, Test>(cuda::std::array{3, 7, 9}); + test_construction, Test>(cuda::std::array{3, 7, 9}); + test_construction, Test>(cuda::std::array{3, 7, 9}); + test_construction, Test>(cuda::std::array{3, 7, 9}); + test_construction, Test>(cuda::std::array{3, 7, 9}); + + test_construction, Test>( + cuda::std::array{1, 2, 3, 4, 5, 6, 7, 8, 9}); + test_construction, Test>( + cuda::std::array{1, 2, 3, 4, 5, 6, 7, 8, 9}); + test_construction, Test>( + cuda::std::array{1, 2, 3, 4, 5, 6, 7, 8, 9}); +} + +template +__host__ __device__ constexpr bool test_index_type_combo() +{ + test(); + test(); + test(); + test(); + test(); + test(); + test(); + test(); + test(); + return true; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/comparison.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/comparison.pass.cpp new file mode 100644 index 00000000000..19b2c9f56d2 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/comparison.pass.cpp @@ -0,0 +1,104 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// +// +// template +// friend constexpr bool operator==(const extents& lhs, +// const extents& rhs) noexcept; +// +// Returns: true if lhs.rank() equals rhs.rank() and +// if lhs.extent(r) equals rhs.extent(r) for every rank index r of rhs, otherwise false. +// + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_comparison(bool equal, To dest, From src) +{ + ASSERT_NOEXCEPT(dest == src); + assert((dest == src) == equal); + assert((dest != src) == !equal); +} + +template +__host__ __device__ constexpr void test_comparison_different_rank() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + test_comparison(false, cuda::std::extents(), cuda::std::extents(1)); + test_comparison(false, cuda::std::extents(), cuda::std::extents()); + + test_comparison(false, cuda::std::extents(1), cuda::std::extents()); + test_comparison(false, cuda::std::extents(), cuda::std::extents()); + + test_comparison(false, cuda::std::extents(5), cuda::std::extents(5, 5)); + test_comparison(false, cuda::std::extents(), cuda::std::extents(5)); + test_comparison(false, cuda::std::extents(), cuda::std::extents()); + + test_comparison(false, cuda::std::extents(5, 5), cuda::std::extents(5)); + test_comparison(false, cuda::std::extents(5), cuda::std::extents(5)); + test_comparison(false, cuda::std::extents(), cuda::std::extents()); +} + +template +__host__ __device__ constexpr void test_comparison_same_rank() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + test_comparison(true, cuda::std::extents(), cuda::std::extents()); + + test_comparison(true, cuda::std::extents(5), cuda::std::extents(5)); + test_comparison(true, cuda::std::extents(), cuda::std::extents(5)); + test_comparison(true, cuda::std::extents(5), cuda::std::extents()); + test_comparison(true, cuda::std::extents(), cuda::std::extents()); + test_comparison(false, cuda::std::extents(5), cuda::std::extents(7)); + test_comparison(false, cuda::std::extents(), cuda::std::extents(7)); + test_comparison(false, cuda::std::extents(5), cuda::std::extents()); + test_comparison(false, cuda::std::extents(), cuda::std::extents()); + + test_comparison( + true, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents(5, 6, 7, 8, 9)); + test_comparison(true, cuda::std::extents(5, 7, 9), cuda::std::extents(6, 7)); + test_comparison(true, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents()); + test_comparison( + false, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents(5, 6, 3, 8, 9)); + test_comparison(false, cuda::std::extents(5, 7, 9), cuda::std::extents(6, 7)); + test_comparison(false, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents()); +} + +template +__host__ __device__ constexpr void test_comparison() +{ + test_comparison_same_rank(); + test_comparison_different_rank(); +} + +__host__ __device__ constexpr bool test() +{ + test_comparison(); + test_comparison(); + test_comparison(); + test_comparison(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/conversion.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/conversion.pass.cpp new file mode 100644 index 00000000000..9fd8c607579 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/conversion.pass.cpp @@ -0,0 +1,154 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(see below) extents(const extents&) noexcept; +// +// Constraints: +// * sizeof...(OtherExtents) == rank() is true. +// * ((OtherExtents == dynamic_extent || Extents == dynamic_extent || +// OtherExtents == Extents) && ...) is true. +// +// Preconditions: +// * other.extent(r) equals Er for each r for which Er is a static extent, and +// * either +// - sizeof...(OtherExtents) is zero, or +// - other.extent(r) is representable as a value of type index_type for +// every rank index r of other. +// +// Remarks: The expression inside explicit is equivalent to: +// (((Extents != dynamic_extent) && (OtherExtents == dynamic_extent)) || ... ) || +// (numeric_limits::max() < numeric_limits::max()) + +#include +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_implicit_conversion(To dest, From src) +{ + assert(dest == src); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(From src) +{ + To dest(src); + assert(dest == src); + dest = src; + assert(dest == src); + test_implicit_conversion(src, src); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(From src) +{ + To dest(src); + assert(dest == src); +} + +template +__host__ __device__ constexpr void test_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + constexpr bool idx_convertible = static_cast(cuda::std::numeric_limits::max()) + >= static_cast(cuda::std::numeric_limits::max()); + + // clang-format off + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 7)); + test_conversion>( + cuda::std::extents(5, 7, 8, 9, 1)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + // clang-format on +} + +__host__ __device__ constexpr void test_no_implicit_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + // Sanity check that one static to dynamic conversion works + static_assert(cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(cuda::std::is_convertible, cuda::std::extents>::value, ""); + + // Check that dynamic to static conversion only works explicitly only + static_assert(cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(!cuda::std::is_convertible, cuda::std::extents>::value, ""); + + // Sanity check that one static to dynamic conversion works + static_assert(cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(cuda::std::is_convertible, cuda::std::extents>::value, ""); + + // Check that dynamic to static conversion only works explicitly only + static_assert(cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(!cuda::std::is_convertible, cuda::std::extents>::value, ""); + + // Sanity check that smaller index_type to larger index_type conversion works + static_assert(cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(cuda::std::is_convertible, cuda::std::extents>::value, ""); + + // Check that larger index_type to smaller index_type conversion works explicitly only + static_assert(cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(!cuda::std::is_convertible, cuda::std::extents>::value, ""); +} + +__host__ __device__ constexpr void test_rank_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(!cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(!cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(!cuda::std::is_constructible, cuda::std::extents>::value, + ""); +} + +__host__ __device__ constexpr void test_static_extent_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(!cuda::std::is_constructible, cuda::std::extents>::value, ""); + static_assert(!cuda::std::is_constructible, cuda::std::extents>::value, ""); +} + +__host__ __device__ constexpr bool test() +{ + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + test_no_implicit_conversion(); + test_rank_mismatch(); + test_static_extent_mismatch(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctad.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctad.pass.cpp new file mode 100644 index 00000000000..2185bddc444 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctad.pass.cpp @@ -0,0 +1,51 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11, c++14 + +// + +// template +// explicit extents(Integrals...) -> see below; +// Constraints: (is_convertible_v && ...) is true. +// +// Remarks: The deduced type is dextents. + +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test(E e, Expected expected) +{ + ASSERT_SAME_TYPE(E, Expected); + assert(e == expected); +} + +__host__ __device__ constexpr bool test() +{ + constexpr cuda::std::size_t D = cuda::std::dynamic_extent; + + test(cuda::std::extents(), cuda::std::extents()); + test(cuda::std::extents(1), cuda::std::extents(1)); + test(cuda::std::extents(1, 2u), cuda::std::extents(1, 2u)); + test(cuda::std::extents(1, 2u, 3, 4, 5, 6, 7, 8, 9), + cuda::std::extents(1, 2u, 3, 4, 5, 6, 7, 8, 9)); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_default.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_default.pass.cpp new file mode 100644 index 00000000000..88ceb27fa56 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_default.pass.cpp @@ -0,0 +1,70 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test default construction: +// +// constexpr extents() noexcept = default; +// +// Remarks: since the standard uses an exposition only array member, dynamic extents +// need to be zero initialized! + +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "CtorTestCombinations.h" +#include "test_macros.h" + +struct DefaultCtorTest +{ + template = 0> + __host__ __device__ static constexpr void + test_construction(AllExtents all_ext, Extents, cuda::std::index_sequence) + { + // This function gets called twice: once with Extents being just the dynamic ones, and once with all the extents + // specified. We only test during the all extent case, since then Indices is the correct number. This allows us to + // reuse the same testing machinery used in other constructor tests. + ASSERT_NOEXCEPT(E{}); + // Need to construct new expected values, replacing dynamic values with 0 + cuda::std::array expected_exts{ + ((E::static_extent(Indices) == cuda::std::dynamic_extent) + ? typename AllExtents::value_type(0) + : all_ext[Indices])...}; + test_runtime_observers(E{}, expected_exts); + } + + template = 0> + __host__ __device__ static constexpr void + test_construction(AllExtents all_ext, Extents, cuda::std::index_sequence) + { + // nothing to do here + } +}; + +int main(int, char**) +{ + test_index_type_combo(); +#if TEST_STD_VER >= 2017 + static_assert(test_index_type_combo(), ""); +#endif // TEST_STD_VER >= 2017 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_array.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_array.pass.cpp new file mode 100644 index 00000000000..b40b3908712 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_array.pass.cpp @@ -0,0 +1,132 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test construction from array: +// +// template +// constexpr explicit(N != rank_dynamic()) extents(const array& exts) noexcept; +// +// Constraints: +// * is_convertible_v is true, +// * is_nothrow_constructible_v is true, and +// * N == rank_dynamic() || N == rank() is true. +// +// Preconditions: +// * If N != rank_dynamic() is true, exts[r] equals Er for each r for which +// Er is a static extent, and +// * either +// - N is zero, or +// - exts[r] is nonnegative and is representable as a value of type index_type +// for every rank index r. +// + +#include +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "CtorTestCombinations.h" +#include "test_macros.h" + +struct ArrayCtorTest +{ + template = 0> + __host__ __device__ static constexpr void + test_construction(cuda::std::array all_ext, Extents ext, cuda::std::index_sequence) + { + ASSERT_NOEXCEPT(E(ext)); + test_implicit_construction_call(ext, all_ext); + test_runtime_observers(E(ext), all_ext); + } + + template = 0> + __host__ __device__ static constexpr void + test_construction(cuda::std::array all_ext, Extents ext, cuda::std::index_sequence) + { + ASSERT_NOEXCEPT(E(ext)); + test_runtime_observers(E(ext), all_ext); + } +}; + +template +struct implicit_construction +{ + bool value; + __host__ __device__ implicit_construction(E) + : value(true) + {} + template + __host__ __device__ implicit_construction(T) + : value(false) + {} +}; + +int main(int, char**) +{ + test_index_type_combo(); +#if TEST_STD_VER >= 2020 + static_assert(test_index_type_combo(), ""); +#endif // TEST_STD_VER >= 2020 + + constexpr size_t D = cuda::std::dynamic_extent; + using E = cuda::std::extents; + + // check can't construct from too few arguments + static_assert(!cuda::std::is_constructible>::value, + "extents constructible from illegal arguments"); + // check can't construct from rank_dynamic < #args < rank + static_assert(!cuda::std::is_constructible>::value, + "extents constructible from illegal arguments"); + // check can't construct from too many arguments + static_assert(!cuda::std::is_constructible>::value, + "extents constructible from illegal arguments"); + + // test implicit construction fails from span and array if all extents are given + cuda::std::array a5{3, 4, 5, 6, 7}; + // check that explicit construction works, i.e. no error + static_assert(cuda::std::is_constructible, decltype(a5)>::value, + "extents unexpectectly not constructible"); + // check that implicit construction doesn't work + assert((implicit_construction>(a5).value == false)); + + // test construction fails from types not convertible to index_type but convertible to other integer types + static_assert(cuda::std::is_convertible::value, + "Test helper IntType unexpectedly not convertible to int"); + static_assert(!cuda::std::is_constructible, cuda::std::array>::value, + "extents constructible from illegal arguments"); + + // index_type is not nothrow constructible + static_assert(cuda::std::is_convertible::value, ""); + static_assert(cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_nothrow_constructible::value, ""); + static_assert( + !cuda::std::is_constructible, cuda::std::array>::value, ""); + + // convertible from non-const to index_type but not from const + static_assert(cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_convertible::value, ""); + static_assert(cuda::std::is_nothrow_constructible::value, ""); + static_assert(!cuda::std::is_constructible, cuda::std::array>::value, ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_integral.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_integral.pass.cpp new file mode 100644 index 00000000000..15f733ebdd4 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_integral.pass.cpp @@ -0,0 +1,79 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test construction from integral: +// +// template +// constexpr explicit extents(OtherIndexTypes ... exts) noexcept; +// +// Let N be sizeof...(OtherIndexTypes), and let +// exts_arr be array{static_cast(cuda::std::move(exts))...}. +// +// Constraints: +// * (is_convertible_v && ...) is true, +// * (is_nothrow_constructible_v && ...) is true, and +// * N == rank_dynamic() || N == rank() is true. +// +// Preconditions: +// * If N != rank_dynamic() is true, exts_arr[r] equals Er for each r for which +// Er is a static extent, and +// * either +// - sizeof...(exts) == 0 is true, or +// - each element of exts is nonnegative and is representable as a value of type index_type. +// + +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "CtorTestCombinations.h" +#include "test_macros.h" + +struct IntegralCtorTest +{ + template + __host__ __device__ static constexpr void + test_construction(AllExtents all_ext, Extents ext, cuda::std::index_sequence) + { + // construction from indices + ASSERT_NOEXCEPT(E(ext[Indices]...)); + test_runtime_observers(E(ext[Indices]...), all_ext); + } +}; + +int main(int, char**) +{ + test_index_type_combo(); +#if TEST_STD_VER >= 2020 + static_assert(test_index_type_combo(), ""); +#endif // TEST_STD_VER >= 2020 + + constexpr size_t D = cuda::std::dynamic_extent; + using E = cuda::std::extents; + + // check can't construct from too few arguments + static_assert(!cuda::std::is_constructible::value, "extents constructible from illegal arguments"); + // check can't construct from rank_dynamic < #args < rank + static_assert(!cuda::std::is_constructible::value, "extents constructible from illegal arguments"); + // check can't construct from too many arguments + static_assert(!cuda::std::is_constructible::value, + "extents constructible from illegal arguments"); + + // test construction fails from types not convertible to index_type but convertible to other integer types + static_assert(cuda::std::is_convertible::value, + "Test helper IntType unexpectedly not convertible to int"); + static_assert(!cuda::std::is_constructible, IntType>::value, + "extents constructible from illegal arguments"); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_span.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_span.pass.cpp new file mode 100644 index 00000000000..cd1e63d4e8c --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/ctor_from_span.pass.cpp @@ -0,0 +1,136 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test construction from span: +// +// template +// constexpr explicit(N != rank_dynamic()) extents(span exts) noexcept; +// +// Constraints: +// * is_convertible_v is true, +// * is_nothrow_constructible_v is true, and +// * N == rank_dynamic() || N == rank() is true. +// +// Preconditions: +// * If N != rank_dynamic() is true, exts[r] equals Er for each r for which +// Er is a static extent, and +// * either +// - N is zero, or +// - exts[r] is nonnegative and is representable as a value of type index_type +// for every rank index r. +// + +#include +#include +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "CtorTestCombinations.h" +#include "test_macros.h" + +TEST_NV_DIAG_SUPPRESS(2912) // if-constexpr is a C++17 feature + +struct SpanCtorTest +{ + template = 0> + __host__ __device__ static constexpr void + test_construction(cuda::std::array all_ext, Extents ext, cuda::std::index_sequence) + { + ASSERT_NOEXCEPT(E(ext)); + test_implicit_construction_call(cuda::std::span(ext), all_ext); + test_runtime_observers(E(cuda::std::span(ext)), all_ext); + } + + template = 0> + __host__ __device__ static constexpr void + test_construction(cuda::std::array all_ext, Extents ext, cuda::std::index_sequence) + { + ASSERT_NOEXCEPT(E(ext)); + test_runtime_observers(E(cuda::std::span(ext)), all_ext); + } +}; + +template +struct implicit_construction +{ + bool value; + __host__ __device__ implicit_construction(E) + : value(true) + {} + template + __host__ __device__ implicit_construction(T) + : value(false) + {} +}; + +int main(int, char**) +{ + test_index_type_combo(); +#if TEST_STD_VER >= 2020 + static_assert(test_index_type_combo(), ""); +#endif // TEST_STD_VER >= 2020 + + constexpr size_t D = cuda::std::dynamic_extent; + using E = cuda::std::extents; + + // check can't construct from too few arguments + static_assert(!cuda::std::is_constructible>::value, + "extents constructible from illegal arguments"); + // check can't construct from rank_dynamic < #args < rank + static_assert(!cuda::std::is_constructible>::value, + "extents constructible from illegal arguments"); + // check can't construct from too many arguments + static_assert(!cuda::std::is_constructible>::value, + "extents constructible from illegal arguments"); + + // test implicit construction fails from span and array if all extents are given + cuda::std::array a5{3, 4, 5, 6, 7}; + cuda::std::span s5(a5.data(), 5); + // check that explicit construction works, i.e. no error + static_assert(cuda::std::is_constructible, decltype(s5)>::value, + "extents unexpectectly not constructible"); + // check that implicit construction doesn't work + assert((implicit_construction>(s5).value == false)); + + // test construction fails from types not convertible to index_type but convertible to other integer types + static_assert(cuda::std::is_convertible::value, + "Test helper IntType unexpectedly not convertible to int"); + static_assert(!cuda::std::is_constructible, cuda::std::span>::value, + "extents constructible from illegal arguments"); + + // index_type is not nothrow constructible + static_assert(cuda::std::is_convertible::value, ""); + static_assert(cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_nothrow_constructible::value, ""); + static_assert(!cuda::std::is_constructible, cuda::std::span>::value, + ""); + + // convertible from non-const to index_type but not from const + static_assert(cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_convertible::value, ""); + static_assert(cuda::std::is_nothrow_constructible::value, ""); + static_assert(!cuda::std::is_constructible, cuda::std::span>::value, ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/dextents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/dextents.pass.cpp new file mode 100644 index 00000000000..7187efca066 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/dextents.pass.cpp @@ -0,0 +1,43 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// using dextents = see below; +// +// Result: A type E that is a specialization of extents such that +// E::rank() == Rank && E::rank() == E::rank_dynamic() is true, +// and E::index_type denotes IndexType. + +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ void test_alias_template_dextents() +{ + constexpr size_t D = cuda::std::dynamic_extent; + ASSERT_SAME_TYPE(cuda::std::dextents, cuda::std::extents); + ASSERT_SAME_TYPE(cuda::std::dextents, cuda::std::extents); + ASSERT_SAME_TYPE(cuda::std::dextents, cuda::std::extents); + ASSERT_SAME_TYPE(cuda::std::dextents, cuda::std::extents); + ASSERT_SAME_TYPE(cuda::std::dextents, cuda::std::extents); +} + +int main(int, char**) +{ + test_alias_template_dextents(); + test_alias_template_dextents(); + test_alias_template_dextents(); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/obs_static.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/obs_static.pass.cpp new file mode 100644 index 00000000000..3971d4649b1 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/obs_static.pass.cpp @@ -0,0 +1,113 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// static constexpr rank_type rank() noexcept; +// static constexpr rank_type rank_dynamic() noexcept; +// +// static constexpr size_t static_extent(rank_type i) noexcept; +// +// Preconditions: i < rank() is true. +// +// Returns: Ei. +// +// +// constexpr index_type extent(rank_type i) const noexcept; +// +// Preconditions: i < rank() is true. +// +// Returns: Di. +// + +#include +#include +#include + +#include "test_macros.h" + +template 0), int> = 0> +__host__ __device__ void +test_static_observers(cuda::std::index_sequence, cuda::std::index_sequence) +{ + ASSERT_NOEXCEPT(E::rank()); + static_assert(E::rank() == rank, ""); + ASSERT_NOEXCEPT(E::rank_dynamic()); + static_assert(E::rank_dynamic() == rank_dynamic, ""); + + // Let's only test this if the call isn't a precondition violation + ASSERT_NOEXCEPT(E::static_extent(0)); + ASSERT_SAME_TYPE(decltype(E::static_extent(0)), size_t); + static_assert(cuda::std::__all < E::static_extent(Indices) == StaticExts... > ::value, ""); +} + +template = 0> +__host__ __device__ void +test_static_observers(cuda::std::index_sequence, cuda::std::index_sequence) +{ + ASSERT_NOEXCEPT(E::rank()); + static_assert(E::rank() == rank, ""); + ASSERT_NOEXCEPT(E::rank_dynamic()); + static_assert(E::rank_dynamic() == rank_dynamic, ""); +} + +template +__host__ __device__ void test_static_observers() +{ + test_static_observers( + cuda::std::index_sequence(), cuda::std::make_index_sequence()); +} + +template +__host__ __device__ void test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + constexpr size_t S = 5; + + test_static_observers, 0, 0>(); + + test_static_observers, 1, 0, S>(); + test_static_observers, 1, 1, D>(); + + test_static_observers, 2, 0, S, S>(); + test_static_observers, 2, 1, S, D>(); + test_static_observers, 2, 1, D, S>(); + test_static_observers, 2, 2, D, D>(); + + test_static_observers, 3, 0, S, S, S>(); + test_static_observers, 3, 1, S, S, D>(); + test_static_observers, 3, 1, S, D, S>(); + test_static_observers, 3, 1, D, S, S>(); + test_static_observers, 3, 2, S, D, D>(); + test_static_observers, 3, 2, D, S, D>(); + test_static_observers, 3, 2, D, D, S>(); + test_static_observers, 3, 3, D, D, D>(); +} + +int main(int, char**) +{ + test(); + test(); + test(); + test(); + test(); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/types.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/types.pass.cpp new file mode 100644 index 00000000000..99b6ca15b95 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/extents/types.pass.cpp @@ -0,0 +1,103 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// class extents { +// public: +// // types +// using index_type = IndexType; +// using size_type = make_unsigned_t; +// using rank_type = size_t; +// +// static constexpr rank_type rank() noexcept { return sizeof...(Extents); } +// static constexpr rank_type rank_dynamic() noexcept { return dynamic-index(rank()); } +// ... +// } + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr size_t count_dynamic_extents() +{ + constexpr size_t arr[] = {Extents...}; + size_t res = 0; + for (size_t i = 0; i < sizeof...(Extents); ++i) + { + res += static_cast(arr[i] == cuda::std::dynamic_extent); + } + return res; +} + +template +__host__ __device__ void testExtents() +{ + ASSERT_SAME_TYPE(typename E::index_type, IndexType); + ASSERT_SAME_TYPE(typename E::size_type, cuda::std::make_unsigned_t); + ASSERT_SAME_TYPE(typename E::rank_type, size_t); + + static_assert(sizeof...(Extents) == E::rank(), ""); + static_assert(count_dynamic_extents() == E::rank_dynamic()); + + static_assert(cuda::std::regular, ""); + static_assert(cuda::std::is_trivially_copyable::value, ""); + +// Did never find a way to make this true on windows +#ifndef TEST_COMPILER_MSVC + static_assert(cuda::std::is_empty::value == (E::rank_dynamic() == 0), ""); +#endif +} + +template +__host__ __device__ void testExtents() +{ + testExtents, IndexType, Extents...>(); +} + +template +__host__ __device__ void test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + testExtents(); + testExtents(); + testExtents(); + testExtents(); + testExtents(); + testExtents(); + testExtents(); + testExtents(); + testExtents(); + testExtents(); + testExtents(); + testExtents(); + testExtents(); + testExtents(); + + testExtents(); + testExtents(); + testExtents(); +} + +int main(int, char**) +{ + test(); + test(); + test(); + test(); + test(); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/foo_customizations.hpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/foo_customizations.hpp deleted file mode 100644 index e7b56b21150..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/foo_customizations.hpp +++ /dev/null @@ -1,252 +0,0 @@ -#ifndef _FOO_CUSTOMIZATIONS_HPP -#define _FOO_CUSTOMIZATIONS_HPP - -// Taken from the reference implementation repo - -namespace Foo -{ -template -struct foo_ptr -{ - T* data; - _CCCL_HOST_DEVICE constexpr foo_ptr(T* ptr) - : data(ptr) - {} -}; - -template -struct foo_accessor -{ - using offset_policy = foo_accessor; - using element_type = T; - using reference = T&; - using data_handle_type = foo_ptr; - - _LIBCUDACXX_HIDE_FROM_ABI constexpr foo_accessor(int* ptr = nullptr) noexcept - { - flag = ptr; - } - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr foo_accessor(cuda::std::default_accessor) noexcept - { - flag = nullptr; - } - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr foo_accessor(foo_accessor other) noexcept - { - flag = other.flag; - } - - _CCCL_HOST_DEVICE constexpr reference access(data_handle_type p, size_t i) const noexcept - { - return p.data[i]; - } - - _CCCL_HOST_DEVICE constexpr data_handle_type offset(data_handle_type p, size_t i) const noexcept - { - return data_handle_type(p.data + i); - } - int* flag; - - _CCCL_HOST_DEVICE friend constexpr void swap(foo_accessor& x, foo_accessor& y) - { - x.flag[0] = 99; - y.flag[0] = 77; - cuda::std::swap(x.flag, y.flag); - } -}; - -struct layout_foo -{ - template - class mapping; -}; - -template -class layout_foo::mapping -{ -public: - using extents_type = Extents; - using index_type = typename extents_type::index_type; - using size_type = typename extents_type::size_type; - using rank_type = typename extents_type::rank_type; - using layout_type = layout_foo; - -private: - static_assert(cuda::std::__detail::__is_extents_v, - "layout_foo::mapping must be instantiated with a specialization of cuda::std::extents."); - static_assert(extents_type::rank() < 3, "layout_foo only supports 0D, 1D and 2D"); - - template - friend class mapping; - -public: - //-------------------------------------------------------------------------------- - - _CCCL_HIDE_FROM_ABI constexpr mapping() noexcept = default; - _CCCL_HIDE_FROM_ABI constexpr mapping(mapping const&) noexcept = default; - - _CCCL_HOST_DEVICE constexpr mapping(extents_type const& __exts) noexcept - : __extents(__exts) - {} - - _CCCL_TEMPLATE(class OtherExtents) - _CCCL_REQUIRES(_CCCL_TRAIT(cuda::std::is_constructible, extents_type, OtherExtents)) - __MDSPAN_CONDITIONAL_EXPLICIT((!cuda::std::is_convertible::value)) // needs two () due to - // comma - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - mapping const& other) noexcept // NOLINT(google-explicit-constructor) - : __extents(other.extents()) - { - /* - * TODO: check precondition - * other.required_span_size() is a representable value of type index_type - */ - } - - _CCCL_TEMPLATE(class OtherExtents) - _CCCL_REQUIRES(_CCCL_TRAIT(cuda::std::is_constructible, extents_type, OtherExtents)) - __MDSPAN_CONDITIONAL_EXPLICIT((!cuda::std::is_convertible::value)) // needs two () due - // to comma - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - cuda::std::layout_right::mapping const& other) noexcept // NOLINT(google-explicit-constructor) - : __extents(other.extents()) - {} - - _CCCL_TEMPLATE(class OtherExtents) - _CCCL_REQUIRES(_CCCL_TRAIT(cuda::std::is_constructible, extents_type, OtherExtents) && (extents_type::rank() <= 1)) - __MDSPAN_CONDITIONAL_EXPLICIT((!cuda::std::is_convertible::value)) // needs two () due to - // comma - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - cuda::std::layout_left::mapping const& other) noexcept // NOLINT(google-explicit-constructor) - : __extents(other.extents()) - {} - - _CCCL_TEMPLATE(class OtherExtents) - _CCCL_REQUIRES(_CCCL_TRAIT(cuda::std::is_constructible, extents_type, OtherExtents)) - __MDSPAN_CONDITIONAL_EXPLICIT((extents_type::rank() > 0)) - _LIBCUDACXX_HIDE_FROM_ABI constexpr mapping( - cuda::std::layout_stride::mapping const& other) // NOLINT(google-explicit-constructor) - : __extents(other.extents()) - { - /* - * TODO: check precondition - * other.required_span_size() is a representable value of type index_type - */ - NV_IF_TARGET(NV_IS_HOST, (size_t stride = 1; for (rank_type r = __extents.rank(); r > 0; r--) { - assert(stride == other.stride(r - 1)); - // if(stride != other.stride(r-1)) - // throw std::runtime_error("Assigning layout_stride to layout_foo with invalid strides."); - stride *= __extents.extent(r - 1); - })) - } - - _CCCL_HIDE_FROM_ABI __MDSPAN_CONSTEXPR_14_DEFAULTED mapping& operator=(mapping const&) noexcept = default; - - _LIBCUDACXX_HIDE_FROM_ABI constexpr const extents_type& extents() const noexcept - { - return __extents; - } - - _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type required_span_size() const noexcept - { - index_type value = 1; - for (rank_type r = 0; r != extents_type::rank(); ++r) - { - value *= __extents.extent(r); - } - return value; - } - - //-------------------------------------------------------------------------------- - - _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type operator()() const noexcept - { - return index_type(0); - } - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type operator()(Indx0 idx0) const noexcept - { - return static_cast(idx0); - } - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type operator()(Indx0 idx0, Indx1 idx1) const noexcept - { - return static_cast(idx0 * __extents.extent(0) + idx1); - } - - _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_unique() noexcept - { - return true; - } - _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_exhaustive() noexcept - { - return true; - } - _LIBCUDACXX_HIDE_FROM_ABI static constexpr bool is_always_strided() noexcept - { - return true; - } - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_unique() const noexcept - { - return true; - } - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_exhaustive() const noexcept - { - return true; - } - _LIBCUDACXX_HIDE_FROM_ABI constexpr bool is_strided() const noexcept - { - return true; - } - - _LIBCUDACXX_HIDE_FROM_ABI constexpr index_type stride(rank_type i) const noexcept - { - index_type value = 1; - for (rank_type r = extents_type::rank() - 1; r > i; r--) - { - value *= __extents.extent(r); - } - return value; - } - - template - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator==(mapping const& lhs, mapping const& rhs) noexcept - { - return lhs.extents() == rhs.extents(); - } - - // In C++ 20 the not equal exists if equal is found -#if !(__MDSPAN_HAS_CXX_20) - template - _LIBCUDACXX_HIDE_FROM_ABI friend constexpr bool - operator!=(mapping const& lhs, mapping const& rhs) noexcept - { - return lhs.extents() != rhs.extents(); - } -#endif - - // Not really public, but currently needed to implement fully constexpr usable submdspan: - template - _CCCL_HOST_DEVICE constexpr index_type - __get_stride(cuda::std::extents, cuda::std::integer_sequence) const - { - return __MDSPAN_FOLD_TIMES_RIGHT((Idx > N ? __extents.template __extent() : 1), 1); - } - template - _CCCL_HOST_DEVICE constexpr index_type __stride() const noexcept - { - return __get_stride(__extents, std::make_index_sequence()); - } - -private: - _CCCL_NO_UNIQUE_ADDRESS extents_type __extents{}; -}; - -} // namespace Foo -#endif diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/comparison.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/comparison.pass.cpp new file mode 100644 index 00000000000..bf875e97e65 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/comparison.pass.cpp @@ -0,0 +1,144 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// friend constexpr bool operator==(const mapping& x, const mapping& y) noexcept; +// ` +// Constraints: extents_type::rank() == OtherExtents::rank() is true. + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_comparison(bool equal, To dest_exts, From src_exts) +{ + cuda::std::layout_left::mapping dest(dest_exts); + cuda::std::layout_left::mapping src(src_exts); + ASSERT_NOEXCEPT(dest == src); + assert((dest == src) == equal); + assert((dest != src) == !equal); +} + +template +_CCCL_CONCEPT_FRAGMENT( + can_compare_layouts_, + requires(E1 e1, E2 e2)((cuda::std::layout_left::mapping(e1) == cuda::std::layout_left::mapping(e2)))); + +template +_CCCL_CONCEPT can_compare_layouts = _CCCL_FRAGMENT(can_compare_layouts_, E1, E2); + +struct X +{ + __host__ __device__ constexpr bool does_not_match() + { + return true; + } +}; + +template , int> = 0> +__host__ __device__ constexpr X compare_layout_mappings(E1, E2) +{ + return {}; +} + +template , int> = 0> +__host__ __device__ constexpr auto compare_layout_mappings(E1, E2) +{ + return true; +} + +template +__host__ __device__ constexpr void test_comparison_different_rank() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + // sanity check same rank + static_assert(compare_layout_mappings(cuda::std::extents(5), cuda::std::extents(5)), ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents(5)), ""); + static_assert(compare_layout_mappings(cuda::std::extents(5), cuda::std::extents()), ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents()), ""); + + // not equality comparable when rank is not the same + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents(1)).does_not_match(), ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents()).does_not_match(), ""); + + static_assert(compare_layout_mappings(cuda::std::extents(1), cuda::std::extents()).does_not_match(), ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents()).does_not_match(), ""); + + static_assert( + compare_layout_mappings(cuda::std::extents(5), cuda::std::extents(5, 5)).does_not_match(), ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents(5)).does_not_match(), + ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents()).does_not_match(), + ""); + + static_assert( + compare_layout_mappings(cuda::std::extents(5, 5), cuda::std::extents(5)).does_not_match(), ""); + static_assert(compare_layout_mappings(cuda::std::extents(5), cuda::std::extents(5)).does_not_match(), + ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents()).does_not_match(), + ""); +} + +template +__host__ __device__ constexpr void test_comparison_same_rank() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + test_comparison(true, cuda::std::extents(), cuda::std::extents()); + + test_comparison(true, cuda::std::extents(5), cuda::std::extents(5)); + test_comparison(true, cuda::std::extents(), cuda::std::extents(5)); + test_comparison(true, cuda::std::extents(5), cuda::std::extents()); + test_comparison(true, cuda::std::extents(), cuda::std::extents()); + test_comparison(false, cuda::std::extents(5), cuda::std::extents(7)); + test_comparison(false, cuda::std::extents(), cuda::std::extents(7)); + test_comparison(false, cuda::std::extents(5), cuda::std::extents()); + test_comparison(false, cuda::std::extents(), cuda::std::extents()); + + test_comparison( + true, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents(5, 6, 7, 8, 9)); + test_comparison(true, cuda::std::extents(5, 7, 9), cuda::std::extents(6, 7)); + test_comparison(true, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents()); + test_comparison( + false, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents(5, 6, 3, 8, 9)); + test_comparison(false, cuda::std::extents(5, 7, 9), cuda::std::extents(6, 7)); + test_comparison(false, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents()); +} + +template +__host__ __device__ constexpr void test_comparison() +{ + test_comparison_same_rank(); + test_comparison_different_rank(); +} + +__host__ __device__ constexpr bool test() +{ + test_comparison(); + test_comparison(); + test_comparison(); + test_comparison(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.default.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.default.pass.cpp new file mode 100644 index 00000000000..04825133360 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.default.pass.cpp @@ -0,0 +1,74 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test default construction: +// +// constexpr mapping() noexcept = default; + +#include +#include +#include + +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_construction() +{ + using M = cuda::std::layout_left::mapping; + ASSERT_NOEXCEPT(M{}); + M m; + E e; + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); + + // check required_span_size() + typename E::index_type expected_size = 1; + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + expected_size *= e.extent(r); + } + assert(m.required_span_size() == expected_size); +} + +template = 0> +__host__ __device__ constexpr void test_construction() +{ + using M = cuda::std::layout_left::mapping; + ASSERT_NOEXCEPT(M{}); + M m; + E e; + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_construction>(); + test_construction>(); + test_construction>(); + test_construction>(); + test_construction>(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.extents.pass.cpp new file mode 100644 index 00000000000..c03399a19ac --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.extents.pass.cpp @@ -0,0 +1,75 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr mapping(const extents_type&) noexcept; +// +// Preconditions: The size of the multidimensional index space e is representable +// as a value of type index_type ([basic.fundamental]). +// +// Effects: Direct-non-list-initializes extents_ with e. + +#include +#include +#include + +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_construction(E e) +{ + using M = cuda::std::layout_left::mapping; + ASSERT_NOEXCEPT(M{e}); + M m(e); + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); + + // check required_span_size() + typename E::index_type expected_size = 1; + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + expected_size *= e.extent(r); + } + assert(m.required_span_size() == expected_size); +} + +template = 0> +__host__ __device__ constexpr void test_construction(E e) +{ + using M = cuda::std::layout_left::mapping; + ASSERT_NOEXCEPT(M{e}); + M m(e); + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_construction(cuda::std::extents()); + test_construction(cuda::std::extents(7)); + test_construction(cuda::std::extents()); + test_construction(cuda::std::extents()); + test_construction(cuda::std::extents(7, 9, 10)); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.layout_right.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.layout_right.pass.cpp new file mode 100644 index 00000000000..4d67678d6ad --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.layout_right.pass.cpp @@ -0,0 +1,149 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(!is_convertible) +// mapping(const layout_right::mapping&) noexcept; + +// Constraints: +// - extents_type::rank() <= 1 is true, and +// - is_constructible is true. +// +// Preconditions: other.required_span_size() is representable as a value of type index_type + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_implicit_conversion(To dest, From src) +{ + assert(dest.extents() == src.extents()); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_left::mapping; + using From = cuda::std::layout_right::mapping; + From src(src_exts); + + ASSERT_NOEXCEPT(To(src)); + To dest(src); + + assert(dest.extents() == src.extents()); + dest = src; + assert(dest.extents() == src.extents()); + test_implicit_conversion(src, src); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_left::mapping; + using From = cuda::std::layout_right::mapping; + From src(src_exts); + + ASSERT_NOEXCEPT(To(src)); + To dest(src); + + assert(dest.extents() == src.extents()); +} + +template +__host__ __device__ constexpr void test_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + constexpr bool idx_convertible = static_cast(cuda::std::numeric_limits::max()) + >= static_cast(cuda::std::numeric_limits::max()); + + // clang-format off + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + // clang-format on +} + +template +using lr_mapping_t = typename cuda::std::layout_right::template mapping>; +template +using ll_mapping_t = typename cuda::std::layout_left::template mapping>; + +__host__ __device__ constexpr void test_no_implicit_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + // Sanity check that one static to dynamic conversion works + static_assert(cuda::std::is_constructible, lr_mapping_t>::value, ""); + static_assert(cuda::std::is_convertible, ll_mapping_t>::value, ""); + + // Check that dynamic to static conversion only works explicitly + static_assert(cuda::std::is_constructible, lr_mapping_t>::value, ""); + static_assert(!cuda::std::is_convertible, ll_mapping_t>::value, ""); + + // Sanity check that smaller index_type to larger index_type conversion works + static_assert(cuda::std::is_constructible, lr_mapping_t>::value, ""); + static_assert(cuda::std::is_convertible, ll_mapping_t>::value, ""); + + // Check that larger index_type to smaller index_type conversion works explicitly only + static_assert(cuda::std::is_constructible, lr_mapping_t>::value, ""); + static_assert(!cuda::std::is_convertible, ll_mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_rank_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, lr_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, lr_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, lr_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, lr_mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_static_extent_mismatch() +{ + static_assert(!cuda::std::is_constructible, lr_mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_rank_greater_one() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, lr_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, lr_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, lr_mapping_t>::value, ""); +} + +__host__ __device__ constexpr bool test() +{ + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + test_no_implicit_conversion(); + test_rank_mismatch(); + test_static_extent_mismatch(); + test_rank_greater_one(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.layout_stride.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.layout_stride.pass.cpp new file mode 100644 index 00000000000..53d57d6b3d6 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.layout_stride.pass.cpp @@ -0,0 +1,144 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(extents_type::rank() > 0) +// mapping(const layout_stride::mapping& other); +// +// Constraints: is_constructible_v is true. +// +// Preconditions: +// - If extents_type::rank() > 0 is true, then for all r in the range [0, extents_type::rank()), +// other.stride(r) equals other.extents().fwd-prod-of-extents(r), and +// - other.required_span_size() is representable as a value of type index_type ([basic.fundamental]). +// +// Effects: Direct-non-list-initializes extents_ with other.extents(). + +#include +#include +#include +#include + +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_implicit_conversion(From src) +{ + To dest_implicit = src; + assert(dest_implicit == src); +} + +template = 0> +__host__ __device__ constexpr void test_implicit_conversion(From src) +{ + assert((!cuda::std::is_convertible_v) ); +} + +template 0), int> = 0> +__host__ __device__ constexpr cuda::std::array +get_strides(FromExt src_exts) +{ + cuda::std::array strides{}; + strides[0] = 1; + for (size_t r = 1; r < FromExt::rank(); r++) + { + strides[r] = src_exts.extent(r - 1) * strides[r - 1]; + } + return strides; +} + +template = 0> +__host__ __device__ constexpr cuda::std::array get_strides(FromExt) +{ + return {}; +} + +template , + class From = cuda::std::layout_stride::mapping> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + const cuda::std::array strides = get_strides(src_exts); + From src(src_exts, strides); + + ASSERT_NOEXCEPT(To(src)); + To dest(src); + assert(dest == src); + test_implicit_conversion(src); +} + +template +__host__ __device__ constexpr void test_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + // clang-format off + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 7)); + test_conversion>( + cuda::std::extents(5, 7, 8, 9, 1)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + // clang-format on +} + +template +using ll_mapping_t = typename cuda::std::layout_left::template mapping>; +template +using ls_mapping_t = typename cuda::std::layout_stride::template mapping>; + +__host__ __device__ constexpr void test_rank_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_static_extent_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); +} + +__host__ __device__ constexpr bool test() +{ + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + test_rank_mismatch(); + test_static_extent_mismatch(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.mapping.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.mapping.pass.cpp new file mode 100644 index 00000000000..6ec67b8f71c --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/ctor.mapping.pass.cpp @@ -0,0 +1,156 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(!is_convertible) +// mapping(const mapping&) noexcept; + +// Constraints: is_constructible is true. +// +// Preconditions: other.required_span_size() is representable as a value of type index_type + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_implicit_conversion(To dest, From src) +{ + assert(dest == src); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_left::mapping; + using From = cuda::std::layout_left::mapping; + From src(src_exts); + + ASSERT_NOEXCEPT(To(src)); + To dest(src); + + assert(dest == src); + dest = src; + assert(dest == src); + test_implicit_conversion(src, src); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_left::mapping; + using From = cuda::std::layout_left::mapping; + From src(src_exts); + + ASSERT_NOEXCEPT(To(src)); + To dest(src); + + assert(dest == src); +} + +template +__host__ __device__ constexpr void test_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + constexpr bool idx_convertible = static_cast(cuda::std::numeric_limits::max()) + >= static_cast(cuda::std::numeric_limits::max()); + + // clang-format off + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 7)); + test_conversion>( + cuda::std::extents(5, 7, 8, 9, 1)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + // clang-format on +} + +template +using mapping_t = typename cuda::std::layout_left::template mapping>; + +__host__ __device__ constexpr void test_no_implicit_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + // Sanity check that one static to dynamic conversion works + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(cuda::std::is_convertible, mapping_t>::value, ""); + + // Check that dynamic to static conversion only works explicitly + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_convertible, mapping_t>::value, ""); + + // Sanity check that one static to dynamic conversion works + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(cuda::std::is_convertible, mapping_t>::value, ""); + + // Check that dynamic to static conversion only works explicitly + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_convertible, mapping_t>::value, ""); + + // Sanity check that smaller index_type to larger index_type conversion works + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(cuda::std::is_convertible, mapping_t>::value, ""); + + // Check that larger index_type to smaller index_type conversion works explicitly only + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_convertible, mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_rank_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_static_extent_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); +} + +__host__ __device__ constexpr bool test() +{ + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + test_no_implicit_conversion(); + test_rank_mismatch(); + test_static_extent_mismatch(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/extents.verify.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/extents.verify.cpp new file mode 100644 index 00000000000..6bd07b505d5 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/extents.verify.cpp @@ -0,0 +1,48 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 +// UNSUPPORTED: nvrtc + +// + +// template +// class layout_left::mapping; + +// If Extents is not a specialization of extents, then the program is +// ill-formed. + +// Mandates: If Extents::rank_dynamic() == 0 is true, then the size of the +// multidimensional index space Extents() is representable as a value of type +// typename Extents::index_type. + +#include + +#include "test_macros.h" + +__host__ __device__ void not_extents() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}layout_left::mapping template argument + // must be a specialization of extents}} + cuda::std::layout_left::mapping mapping; + unused(mapping); +} + +__host__ __device__ void representable() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}layout_left::mapping product of static + // extents must be representable as index_type.}} + cuda::std::layout_left::mapping> mapping; + unused(mapping); +} + +int main(int, char**) +{ + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/index_operator.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/index_operator.pass.cpp new file mode 100644 index 00000000000..f5bd2a96955 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/index_operator.pass.cpp @@ -0,0 +1,141 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test default iteration: +// +// template +// constexpr index_type operator()(Indices...) const noexcept; +// +// Constraints: +// * sizeof...(Indices) == extents_type::rank() is true, +// * (is_convertible_v && ...) is true, and +// * (is_nothrow_constructible_v && ...) is true. +// +// Preconditions: +// * extents_type::index-cast(i) is a multidimensional index in extents_. + +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "test_macros.h" + +template +_CCCL_CONCEPT_FRAGMENT( + operator_constraints_, + requires(Mapping m, + Indices... idxs)((cuda::std::is_same::value))); + +template +_CCCL_CONCEPT operator_constraints = _CCCL_FRAGMENT(operator_constraints_, Mapping, Indices...); + +_CCCL_TEMPLATE(class Mapping, class... Indices) +_CCCL_REQUIRES(operator_constraints) +__host__ __device__ constexpr bool check_operator_constraints(Mapping, Indices...) +{ + return true; +} + +_CCCL_TEMPLATE(class Mapping, class... Indices) +_CCCL_REQUIRES((!operator_constraints) ) +__host__ __device__ constexpr bool check_operator_constraints(Mapping, Indices...) +{ + return false; +} + +template = 0> +__host__ __device__ constexpr void iterate_left(M m, T& count, Args... args) +{ + ASSERT_NOEXCEPT(m(args...)); + assert(count == m(args...)); + count++; +} +template = 0> +__host__ __device__ constexpr void iterate_left(M m, T& count, Args... args) +{ + constexpr int r = static_cast(M::extents_type::rank()) - 1 - static_cast(sizeof...(Args)); + for (typename M::index_type i = 0; i < m.extents().extent(r); i++) + { + iterate_left(m, count, i, args...); + } +} + +template +__host__ __device__ constexpr void test_iteration(Args... args) +{ + using M = cuda::std::layout_left::mapping; + M m(E(args...)); + + typename E::index_type count = 0; + iterate_left(m, count); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_iteration>(); + test_iteration>(1); + test_iteration>(7); + test_iteration>(); + test_iteration>(); + test_iteration>(1, 1, 1, 1); + + // Check operator constraint for number of arguments + static_assert(check_operator_constraints( + cuda::std::layout_left::mapping>(cuda::std::extents(1)), 0), + ""); + static_assert(!check_operator_constraints( + cuda::std::layout_left::mapping>(cuda::std::extents(1)), 0, 0), + ""); + + // Check operator constraint for convertibility of arguments to index_type + static_assert( + check_operator_constraints( + cuda::std::layout_left::mapping>(cuda::std::extents(1)), IntType(0)), + ""); + static_assert( + !check_operator_constraints( + cuda::std::layout_left::mapping>(cuda::std::extents(1)), IntType(0)), + ""); + + // Check operator constraint for no-throw-constructibility of index_type from arguments + static_assert( + !check_operator_constraints( + cuda::std::layout_left::mapping>(cuda::std::extents(1)), + IntType(0)), + ""); + + return true; +} + +__host__ __device__ constexpr bool test_large() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_iteration>(7, 9, 10); + test_iteration>(7, 10); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + + // The large test iterates over ~10k loop indices. + // With assertions enabled this triggered the maximum default limit + // for steps in consteval expressions. Assertions roughly double the + // total number of instructions, so this was already close to the maximum. + test_large(); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/properties.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/properties.pass.cpp new file mode 100644 index 00000000000..5303749d941 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/properties.pass.cpp @@ -0,0 +1,70 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// namespace std { +// template +// class layout_left::mapping { +// +// ... +// static constexpr bool is_always_unique() noexcept { return true; } +// static constexpr bool is_always_exhaustive() noexcept { return true; } +// static constexpr bool is_always_strided() noexcept { return true; } +// +// static constexpr bool is_unique() noexcept { return true; } +// static constexpr bool is_exhaustive() noexcept { return true; } +// static constexpr bool is_strided() noexcept { return true; } +// ... +// }; +// } + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_layout_mapping_left() +{ + using M = cuda::std::layout_left::template mapping; + assert(M::is_unique() == true); + assert(M::is_exhaustive() == true); + assert(M::is_strided() == true); + assert(M::is_always_unique() == true); + assert(M::is_always_exhaustive() == true); + assert(M::is_always_strided() == true); + ASSERT_NOEXCEPT(cuda::std::declval().is_unique()); + ASSERT_NOEXCEPT(cuda::std::declval().is_exhaustive()); + ASSERT_NOEXCEPT(cuda::std::declval().is_strided()); + ASSERT_NOEXCEPT(M::is_always_unique()); + ASSERT_NOEXCEPT(M::is_always_exhaustive()); + ASSERT_NOEXCEPT(M::is_always_strided()); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_layout_mapping_left>(); + test_layout_mapping_left>(); + test_layout_mapping_left>(); + test_layout_mapping_left>(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/required_span_size.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/required_span_size.pass.cpp new file mode 100644 index 00000000000..94a19cdae83 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/required_span_size.pass.cpp @@ -0,0 +1,54 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr index_type required_span_size() const noexcept; +// +// Returns: extents().fwd-prod-of-extents(extents_type::rank()). + +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_required_span_size(E e, typename E::index_type expected_size) +{ + using M = cuda::std::layout_left::mapping; + const M m(e); + + ASSERT_NOEXCEPT(m.required_span_size()); + assert(m.required_span_size() == expected_size); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_required_span_size(cuda::std::extents(), 1); + test_required_span_size(cuda::std::extents(0), 0); + test_required_span_size(cuda::std::extents(1), 1); + test_required_span_size(cuda::std::extents(7), 7); + test_required_span_size(cuda::std::extents(), 7); + test_required_span_size(cuda::std::extents(), 56); + test_required_span_size(cuda::std::extents(7, 9, 10), 5040); + test_required_span_size(cuda::std::extents(9, 10), 720); + test_required_span_size(cuda::std::extents(9, 10), 0); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/static_requirements.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/static_requirements.pass.cpp new file mode 100644 index 00000000000..d577d50ea23 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/static_requirements.pass.cpp @@ -0,0 +1,150 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// A type M meets the layout mapping requirements if +// - M models copyable and equality_comparable, +// - is_nothrow_move_constructible is true, +// - is_nothrow_move_assignable is true, +// - is_nothrow_swappable is true, and +// +// the following types and expressions are well-formed and have the specified semantics. +// +// typename M::extents_type +// Result: A type that is a specialization of extents. +// +// typename M::index_type +// Result: typename M::extents_type::index_type. +// +// typename M::rank_type +// Result: typename M::extents_type::rank_type. +// +// typename M::layout_type +// Result: A type MP that meets the layout mapping policy requirements ([mdspan.layout.policy.reqmts]) and for which +// is-mapping-of is true. +// +// m.extents() +// Result: const typename M::extents_type& +// +// m(i...) +// Result: typename M::index_type +// Returns: A nonnegative integer less than numeric_limits::max() and less than or equal to +// numeric_limits::max(). +// +// m(i...) == m(static_cast(i)...) +// Result: bool +// Returns: true +// +// m.required_span_size() +// Result: typename M::index_type +// Returns: If the size of the multidimensional index space m.extents() is 0, then 0, else 1 plus the maximum value +// of m(i...) for all i. +// +// m.is_unique() +// Result: bool +// Returns: true only if for every i and j where (i != j || ...) is true, m(i...) != m(j...) is true. +// +// m.is_exhaustive() +// Result: bool +// Returns: true only if for all k in the range [0, m.required_span_size()) there exists an i such that m(i...) +// equals k. +// +// m.is_strided() +// Result: bool +// Returns: true only if for every rank index r of m.extents() there exists an integer +// sr such that, for all i where (i+dr) is a multidimensional index in m.extents() ([mdspan.overview]), +// m((i + dr)...) - m(i...) equals sr +// +// m.stride(r) +// Preconditions: m.is_strided() is true. +// Result: typename M::index_type +// Returns: sr as defined in m.is_strided() above. +// +// M::is_always_unique() +// Result: A constant expression ([expr.const]) of type bool. +// Returns: true only if m.is_unique() is true for all possible objects m of type M. +// +// M::is_always_exhaustive() +// Result: A constant expression ([expr.const]) of type bool. +// Returns: true only if m.is_exhaustive() is true for all possible objects m of type M. +// +// M::is_always_strided() +// Result: A constant expression ([expr.const]) of type bool. +// Returns: true only if m.is_strided() is true for all possible objects m of type M. + +#include +#include +#include +#include + +#include "test_macros.h" + +template 0), int> = 0> +__host__ __device__ constexpr void test_mapping_stride_requirements() +{ + static_assert(cuda::std::is_same().stride(0)), typename M::index_type>::value, ""); +} + +template = 0> +__host__ __device__ constexpr void test_mapping_stride_requirements() +{} + +// Common requirements of all layout mappings +template +__host__ __device__ void test_mapping_requirements(cuda::std::index_sequence) +{ + using E = typename M::extents_type; + static_assert(cuda::std::__mdspan_detail::__is_extents::value, ""); + static_assert(cuda::std::is_copy_constructible::value, ""); + static_assert(cuda::std::is_nothrow_move_constructible::value, ""); + static_assert(cuda::std::is_nothrow_move_assignable::value, ""); + static_assert(cuda::std::is_nothrow_swappable::value, ""); + ASSERT_SAME_TYPE(typename M::index_type, typename E::index_type); + ASSERT_SAME_TYPE(typename M::size_type, typename E::size_type); + ASSERT_SAME_TYPE(typename M::rank_type, typename E::rank_type); + ASSERT_SAME_TYPE(typename M::layout_type, cuda::std::layout_left); + ASSERT_SAME_TYPE(typename M::layout_type::template mapping, M); + static_assert(cuda::std::is_same().extents()), const E&>::value, ""); + static_assert(cuda::std::is_same()(Idxs...)), typename M::index_type>::value, ""); + static_assert( + cuda::std::is_same().required_span_size()), typename M::index_type>::value, ""); + static_assert(cuda::std::is_same().is_unique()), bool>::value, ""); + static_assert(cuda::std::is_same().is_exhaustive()), bool>::value, ""); + static_assert(cuda::std::is_same().is_strided()), bool>::value, ""); + test_mapping_stride_requirements(); + static_assert(cuda::std::is_same::value, ""); + static_assert(cuda::std::is_same::value, ""); + static_assert(cuda::std::is_same::value, ""); +} + +template +__host__ __device__ void test_layout_mapping_requirements() +{ + using M = typename L::template mapping; + test_mapping_requirements(cuda::std::make_index_sequence()); +} + +template +__host__ __device__ void test_layout_mapping_left() +{ + test_layout_mapping_requirements(); +} + +int main(int, char**) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_layout_mapping_left>(); + test_layout_mapping_left>(); + test_layout_mapping_left>(); + test_layout_mapping_left>(); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/stride.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/stride.pass.cpp new file mode 100644 index 00000000000..2c03e44f1d2 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_left/stride.pass.cpp @@ -0,0 +1,57 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr index_type stride(rank_type i) const noexcept; +// +// Constraints: extents_type::rank() > 0 is true. +// +// Preconditions: i < extents_type::rank() is true. +// +// Returns: extents().rev-prod-of-extents(i). + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_stride(cuda::std::array strides, Args... args) +{ + using M = cuda::std::layout_left::mapping; + M m(E(args...)); + + ASSERT_NOEXCEPT(m.stride(0)); + for (size_t r = 0; r < E::rank(); r++) + { + assert(strides[r] == m.stride(r)); + } +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_stride>(cuda::std::array{1}, 7); + test_stride>(cuda::std::array{1}); + test_stride>(cuda::std::array{1, 7}); + test_stride>(cuda::std::array{1, 7, 56, 504}, 7, 9, 10); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/comparison.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/comparison.pass.cpp new file mode 100644 index 00000000000..ae6d50ee96c --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/comparison.pass.cpp @@ -0,0 +1,144 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// friend constexpr bool operator==(const mapping& x, const mapping& y) noexcept; +// ` +// Constraints: extents_type::rank() == OtherExtents::rank() is true. + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_comparison(bool equal, To dest_exts, From src_exts) +{ + cuda::std::layout_right::mapping dest(dest_exts); + cuda::std::layout_right::mapping src(src_exts); + ASSERT_NOEXCEPT(dest == src); + assert((dest == src) == equal); + assert((dest != src) == !equal); +} + +template +_CCCL_CONCEPT_FRAGMENT( + can_compare_layouts_, + requires(E1 e1, E2 e2)((cuda::std::layout_right::mapping(e1) == cuda::std::layout_right::mapping(e2)))); + +template +_CCCL_CONCEPT can_compare_layouts = _CCCL_FRAGMENT(can_compare_layouts_, E1, E2); + +struct X +{ + __host__ __device__ constexpr bool does_not_match() + { + return true; + } +}; + +template , int> = 0> +__host__ __device__ constexpr X compare_layout_mappings(E1, E2) +{ + return {}; +} + +template , int> = 0> +__host__ __device__ constexpr auto compare_layout_mappings(E1, E2) +{ + return true; +} + +template +__host__ __device__ constexpr void test_comparison_different_rank() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + // sanity check same rank + static_assert(compare_layout_mappings(cuda::std::extents(5), cuda::std::extents(5)), ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents(5)), ""); + static_assert(compare_layout_mappings(cuda::std::extents(5), cuda::std::extents()), ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents()), ""); + + // not equality comparable when rank is not the same + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents(1)).does_not_match(), ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents()).does_not_match(), ""); + + static_assert(compare_layout_mappings(cuda::std::extents(1), cuda::std::extents()).does_not_match(), ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents()).does_not_match(), ""); + + static_assert( + compare_layout_mappings(cuda::std::extents(5), cuda::std::extents(5, 5)).does_not_match(), ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents(5)).does_not_match(), + ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents()).does_not_match(), + ""); + + static_assert( + compare_layout_mappings(cuda::std::extents(5, 5), cuda::std::extents(5)).does_not_match(), ""); + static_assert(compare_layout_mappings(cuda::std::extents(5), cuda::std::extents(5)).does_not_match(), + ""); + static_assert(compare_layout_mappings(cuda::std::extents(), cuda::std::extents()).does_not_match(), + ""); +} + +template +__host__ __device__ constexpr void test_comparison_same_rank() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + test_comparison(true, cuda::std::extents(), cuda::std::extents()); + + test_comparison(true, cuda::std::extents(5), cuda::std::extents(5)); + test_comparison(true, cuda::std::extents(), cuda::std::extents(5)); + test_comparison(true, cuda::std::extents(5), cuda::std::extents()); + test_comparison(true, cuda::std::extents(), cuda::std::extents()); + test_comparison(false, cuda::std::extents(5), cuda::std::extents(7)); + test_comparison(false, cuda::std::extents(), cuda::std::extents(7)); + test_comparison(false, cuda::std::extents(5), cuda::std::extents()); + test_comparison(false, cuda::std::extents(), cuda::std::extents()); + + test_comparison( + true, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents(5, 6, 7, 8, 9)); + test_comparison(true, cuda::std::extents(5, 7, 9), cuda::std::extents(6, 7)); + test_comparison(true, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents()); + test_comparison( + false, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents(5, 6, 3, 8, 9)); + test_comparison(false, cuda::std::extents(5, 7, 9), cuda::std::extents(6, 7)); + test_comparison(false, cuda::std::extents(5, 6, 7, 8, 9), cuda::std::extents()); +} + +template +__host__ __device__ constexpr void test_comparison() +{ + test_comparison_same_rank(); + test_comparison_different_rank(); +} + +__host__ __device__ constexpr bool test() +{ + test_comparison(); + test_comparison(); + test_comparison(); + test_comparison(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.default.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.default.pass.cpp new file mode 100644 index 00000000000..55d808d4786 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.default.pass.cpp @@ -0,0 +1,74 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test default construction: +// +// constexpr mapping() noexcept = default; + +#include +#include +#include + +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_construction() +{ + using M = cuda::std::layout_right::mapping; + ASSERT_NOEXCEPT(M{}); + M m; + E e; + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); + + // check required_span_size() + typename E::index_type expected_size = 1; + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + expected_size *= e.extent(r); + } + assert(m.required_span_size() == expected_size); +} + +template = 0> +__host__ __device__ constexpr void test_construction() +{ + using M = cuda::std::layout_right::mapping; + ASSERT_NOEXCEPT(M{}); + M m; + E e; + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_construction>(); + test_construction>(); + test_construction>(); + test_construction>(); + test_construction>(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.extents.pass.cpp new file mode 100644 index 00000000000..4d078dcf083 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.extents.pass.cpp @@ -0,0 +1,75 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr mapping(const extents_type&) noexcept; +// +// Preconditions: The size of the multidimensional index space e is representable +// as a value of type index_type ([basic.fundamental]). +// +// Effects: Direct-non-list-initializes extents_ with e. + +#include +#include +#include + +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_construction(E e) +{ + using M = cuda::std::layout_right::mapping; + ASSERT_NOEXCEPT(M{e}); + M m(e); + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); + + // check required_span_size() + typename E::index_type expected_size = 1; + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + expected_size *= e.extent(r); + } + assert(m.required_span_size() == expected_size); +} + +template = 0> +__host__ __device__ constexpr void test_construction(E e) +{ + using M = cuda::std::layout_right::mapping; + ASSERT_NOEXCEPT(M{e}); + M m(e); + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_construction(cuda::std::extents()); + test_construction(cuda::std::extents(7)); + test_construction(cuda::std::extents()); + test_construction(cuda::std::extents()); + test_construction(cuda::std::extents(7, 9, 10)); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.layout_left.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.layout_left.pass.cpp new file mode 100644 index 00000000000..1f2120c98fa --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.layout_left.pass.cpp @@ -0,0 +1,149 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(!is_convertible) +// mapping(const layout_left::mapping&) noexcept; + +// Constraints: +// - extents_type::rank() <= 1 is true, and +// - is_constructible is true. +// +// Preconditions: other.required_span_size() is representable as a value of type index_type + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_implicit_conversion(To dest, From src) +{ + assert(dest.extents() == src.extents()); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_right::mapping; + using From = cuda::std::layout_left::mapping; + From src(src_exts); + + ASSERT_NOEXCEPT(To(src)); + To dest(src); + + assert(dest.extents() == src.extents()); + dest = src; + assert(dest.extents() == src.extents()); + test_implicit_conversion(src, src); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_right::mapping; + using From = cuda::std::layout_left::mapping; + From src(src_exts); + + ASSERT_NOEXCEPT(To(src)); + To dest(src); + + assert(dest.extents() == src.extents()); +} + +template +__host__ __device__ constexpr void test_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + constexpr bool idx_convertible = static_cast(cuda::std::numeric_limits::max()) + >= static_cast(cuda::std::numeric_limits::max()); + + // clang-format off + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + // clang-format on +} + +template +using lr_mapping_t = typename cuda::std::layout_right::template mapping>; +template +using ll_mapping_t = typename cuda::std::layout_left::template mapping>; + +__host__ __device__ constexpr void test_no_implicit_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + // Sanity check that one static to dynamic conversion works + static_assert(cuda::std::is_constructible, ll_mapping_t>::value, ""); + static_assert(cuda::std::is_convertible, lr_mapping_t>::value, ""); + + // Check that dynamic to static conversion only works explicitly + static_assert(cuda::std::is_constructible, ll_mapping_t>::value, ""); + static_assert(!cuda::std::is_convertible, lr_mapping_t>::value, ""); + + // Sanity check that smaller index_type to larger index_type conversion works + static_assert(cuda::std::is_constructible, ll_mapping_t>::value, ""); + static_assert(cuda::std::is_convertible, lr_mapping_t>::value, ""); + + // Check that larger index_type to smaller index_type conversion works explicitly only + static_assert(cuda::std::is_constructible, ll_mapping_t>::value, ""); + static_assert(!cuda::std::is_convertible, lr_mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_rank_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, ll_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ll_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ll_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ll_mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_static_extent_mismatch() +{ + static_assert(!cuda::std::is_constructible, ll_mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_rank_greater_one() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, ll_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ll_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ll_mapping_t>::value, ""); +} + +__host__ __device__ constexpr bool test() +{ + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + test_no_implicit_conversion(); + test_rank_mismatch(); + test_static_extent_mismatch(); + test_rank_greater_one(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.layout_stride.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.layout_stride.pass.cpp new file mode 100644 index 00000000000..8c66a027b0d --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.layout_stride.pass.cpp @@ -0,0 +1,141 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(extents_type::rank() > 0) +// mapping(const layout_stride::mapping& other); +// +// Constraints: is_constructible_v is true. +// +// Preconditions: +// - If extents_type::rank() > 0 is true, then for all r in the range [0, extents_type::rank()), +// other.stride(r) equals other.extents().fwd-prod-of-extents(r), and +// - other.required_span_size() is representable as a value of type index_type ([basic.fundamental]). +// +// Effects: Direct-non-list-initializes extents_ with other.extents(). + +#include +#include +#include +#include + +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_implicit_conversion(From src) +{ + To dest_implicit = src; + assert(dest_implicit == src); +} + +template = 0> +__host__ __device__ constexpr void test_implicit_conversion(From src) +{ + assert((!cuda::std::is_convertible_v) ); +} + +template 0), int> = 0> +__host__ __device__ constexpr auto get_strides(FromExt src_exts) +{ + cuda::std::array strides{}; + strides[0] = 1; + for (size_t r = 1; r < FromExt::rank(); r++) + { + strides[r] = src_exts.extent(r - 1) * strides[r - 1]; + } + return strides; +} + +template = 0> +__host__ __device__ constexpr auto get_strides(FromExt) +{ + return cuda::std::array{}; +} + +template +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_left::mapping; + using From = cuda::std::layout_stride::mapping; + const auto strides = get_strides(src_exts); + From src(src_exts, strides); + + ASSERT_NOEXCEPT(To(src)); + To dest(src); + assert(dest == src); + test_implicit_conversion(src); +} + +template +__host__ __device__ constexpr void test_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + // clang-format off + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 7)); + test_conversion>( + cuda::std::extents(5, 7, 8, 9, 1)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + // clang-format on +} + +template +using lr_mapping_t = typename cuda::std::layout_right::template mapping>; +template +using ls_mapping_t = typename cuda::std::layout_stride::template mapping>; + +__host__ __device__ constexpr void test_rank_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_static_extent_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, ls_mapping_t>::value, ""); +} + +__host__ __device__ constexpr bool test() +{ + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + test_rank_mismatch(); + test_static_extent_mismatch(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.mapping.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.mapping.pass.cpp new file mode 100644 index 00000000000..be4c42c7f69 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/ctor.mapping.pass.cpp @@ -0,0 +1,156 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(!is_convertible) +// mapping(const mapping&) noexcept; + +// Constraints: is_constructible is true. +// +// Preconditions: other.required_span_size() is representable as a value of type index_type + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_implicit_conversion(To dest, From src) +{ + assert(dest == src); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_right::mapping; + using From = cuda::std::layout_right::mapping; + From src(src_exts); + + ASSERT_NOEXCEPT(To(src)); + To dest(src); + + assert(dest == src); + dest = src; + assert(dest == src); + test_implicit_conversion(src, src); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_right::mapping; + using From = cuda::std::layout_right::mapping; + From src(src_exts); + + ASSERT_NOEXCEPT(To(src)); + To dest(src); + + assert(dest == src); +} + +template +__host__ __device__ constexpr void test_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + constexpr bool idx_convertible = static_cast(cuda::std::numeric_limits::max()) + >= static_cast(cuda::std::numeric_limits::max()); + + // clang-format off + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 7)); + test_conversion>( + cuda::std::extents(5, 7, 8, 9, 1)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + // clang-format on +} + +template +using mapping_t = typename cuda::std::layout_right::template mapping>; + +__host__ __device__ constexpr void test_no_implicit_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + // Sanity check that one static to dynamic conversion works + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(cuda::std::is_convertible, mapping_t>::value, ""); + + // Check that dynamic to static conversion only works explicitly + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_convertible, mapping_t>::value, ""); + + // Sanity check that one static to dynamic conversion works + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(cuda::std::is_convertible, mapping_t>::value, ""); + + // Check that dynamic to static conversion only works explicitly + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_convertible, mapping_t>::value, ""); + + // Sanity check that smaller index_type to larger index_type conversion works + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(cuda::std::is_convertible, mapping_t>::value, ""); + + // Check that larger index_type to smaller index_type conversion works explicitly only + static_assert(cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_convertible, mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_rank_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); +} + +__host__ __device__ constexpr void test_static_extent_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); + static_assert(!cuda::std::is_constructible, mapping_t>::value, ""); +} + +__host__ __device__ constexpr bool test() +{ + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + test_no_implicit_conversion(); + test_rank_mismatch(); + test_static_extent_mismatch(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/extents.verify.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/extents.verify.cpp new file mode 100644 index 00000000000..a6219485466 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/extents.verify.cpp @@ -0,0 +1,48 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 +// UNSUPPORTED: nvrtc + +// + +// template +// class layout_left::mapping; + +// If Extents is not a specialization of extents, then the program is +// ill-formed. + +// Mandates: If Extents::rank_dynamic() == 0 is true, then the size of the +// multidimensional index space Extents() is representable as a value of type +// typename Extents::index_type. + +#include + +#include "test_macros.h" + +__host__ __device__ void not_extents() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}layout_right::mapping template argument + // must be a specialization of extents}} + cuda::std::layout_right::mapping mapping; + unused(mapping); +} + +__host__ __device__ void representable() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}layout_right::mapping product of static + // extents must be representable as index_type.}} + cuda::std::layout_right::mapping> mapping; + unused(mapping) +} + +int main(int, char**) +{ + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/index_operator.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/index_operator.pass.cpp new file mode 100644 index 00000000000..402f17c00cd --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/index_operator.pass.cpp @@ -0,0 +1,143 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test default iteration: +// +// template +// constexpr index_type operator()(Indices...) const noexcept; +// +// Constraints: +// * sizeof...(Indices) == extents_type::rank() is true, +// * (is_convertible_v && ...) is true, and +// * (is_nothrow_constructible_v && ...) is true. +// +// Preconditions: +// * extents_type::index-cast(i) is a multidimensional index in extents_. + +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "test_macros.h" + +template +_CCCL_CONCEPT_FRAGMENT( + operator_constraints_, + requires(Mapping m, + Indices... idxs)((cuda::std::is_same::value))); + +template +_CCCL_CONCEPT operator_constraints = _CCCL_FRAGMENT(operator_constraints_, Mapping, Indices...); + +_CCCL_TEMPLATE(class Mapping, class... Indices) +_CCCL_REQUIRES(operator_constraints) +__host__ __device__ constexpr bool check_operator_constraints(Mapping m, Indices... idxs) +{ + (void) m(idxs...); + return true; +} + +_CCCL_TEMPLATE(class Mapping, class... Indices) +_CCCL_REQUIRES((!operator_constraints) ) +__host__ __device__ constexpr bool check_operator_constraints(Mapping, Indices...) +{ + return false; +} + +template = 0> +__host__ __device__ constexpr void iterate_right(M m, T& count, Args... args) +{ + ASSERT_NOEXCEPT(m(args...)); + assert(count == m(args...)); + count++; +} + +template = 0> +__host__ __device__ constexpr void iterate_right(M m, T& count, Args... args) +{ + constexpr size_t r = sizeof...(Args); + for (typename M::index_type i = 0; i < m.extents().extent(r); i++) + { + iterate_right(m, count, args..., i); + } +} + +template +__host__ __device__ constexpr void test_iteration(Args... args) +{ + using M = cuda::std::layout_right::mapping; + M m(E(args...)); + + typename E::index_type count = 0; + iterate_right(m, count); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_iteration>(); + test_iteration>(1); + test_iteration>(7); + test_iteration>(); + test_iteration>(); + test_iteration>(1, 1, 1, 1); + + // Check operator constraint for number of arguments + static_assert(check_operator_constraints( + cuda::std::layout_right::mapping>(cuda::std::extents(1)), 0), + ""); + static_assert(!check_operator_constraints( + cuda::std::layout_right::mapping>(cuda::std::extents(1)), 0, 0), + ""); + + // Check operator constraint for convertibility of arguments to index_type + static_assert( + check_operator_constraints( + cuda::std::layout_right::mapping>(cuda::std::extents(1)), IntType(0)), + ""); + static_assert(!check_operator_constraints( + cuda::std::layout_right::mapping>(cuda::std::extents(1)), + IntType(0)), + ""); + + // Check operator constraint for no-throw-constructibility of index_type from arguments + static_assert( + !check_operator_constraints( + cuda::std::layout_right::mapping>(cuda::std::extents(1)), + IntType(0)), + ""); + + return true; +} + +__host__ __device__ constexpr bool test_large() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_iteration>(7, 9, 10); + test_iteration>(7, 10); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + + // The large test iterates over ~10k loop indices. + // With assertions enabled this triggered the maximum default limit + // for steps in consteval expressions. Assertions roughly double the + // total number of instructions, so this was already close to the maximum. + test_large(); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/properties.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/properties.pass.cpp new file mode 100644 index 00000000000..fe1d7e4ae44 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/properties.pass.cpp @@ -0,0 +1,70 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// namespace std { +// template +// class layout_right::mapping { +// +// ... +// static constexpr bool is_always_unique() noexcept { return true; } +// static constexpr bool is_always_exhaustive() noexcept { return true; } +// static constexpr bool is_always_strided() noexcept { return true; } +// +// static constexpr bool is_unique() noexcept { return true; } +// static constexpr bool is_exhaustive() noexcept { return true; } +// static constexpr bool is_strided() noexcept { return true; } +// ... +// }; +// } + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_layout_mapping_right() +{ + using M = cuda::std::layout_right::template mapping; + assert(M::is_unique() == true); + assert(M::is_exhaustive() == true); + assert(M::is_strided() == true); + assert(M::is_always_unique() == true); + assert(M::is_always_exhaustive() == true); + assert(M::is_always_strided() == true); + ASSERT_NOEXCEPT(cuda::std::declval().is_unique()); + ASSERT_NOEXCEPT(cuda::std::declval().is_exhaustive()); + ASSERT_NOEXCEPT(cuda::std::declval().is_strided()); + ASSERT_NOEXCEPT(M::is_always_unique()); + ASSERT_NOEXCEPT(M::is_always_exhaustive()); + ASSERT_NOEXCEPT(M::is_always_strided()); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_layout_mapping_right>(); + test_layout_mapping_right>(); + test_layout_mapping_right>(); + test_layout_mapping_right>(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/required_span_size.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/required_span_size.pass.cpp new file mode 100644 index 00000000000..15c484cab6f --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/required_span_size.pass.cpp @@ -0,0 +1,54 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr index_type required_span_size() const noexcept; +// +// Returns: extents().fwd-prod-of-extents(extents_type::rank()). + +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_required_span_size(E e, typename E::index_type expected_size) +{ + using M = cuda::std::layout_right::mapping; + const M m(e); + + ASSERT_NOEXCEPT(m.required_span_size()); + assert(m.required_span_size() == expected_size); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_required_span_size(cuda::std::extents(), 1); + test_required_span_size(cuda::std::extents(0), 0); + test_required_span_size(cuda::std::extents(1), 1); + test_required_span_size(cuda::std::extents(7), 7); + test_required_span_size(cuda::std::extents(), 7); + test_required_span_size(cuda::std::extents(), 56); + test_required_span_size(cuda::std::extents(7, 9, 10), 5040); + test_required_span_size(cuda::std::extents(9, 10), 720); + test_required_span_size(cuda::std::extents(9, 10), 0); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/static_requirements.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/static_requirements.pass.cpp new file mode 100644 index 00000000000..78e0d50dd16 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/static_requirements.pass.cpp @@ -0,0 +1,150 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// A type M meets the layout mapping requirements if +// - M models copyable and equality_comparable, +// - is_nothrow_move_constructible_v is true, +// - is_nothrow_move_assignable_v is true, +// - is_nothrow_swappable_v is true, and +// +// the following types and expressions are well-formed and have the specified semantics. +// +// typename M::extents_type +// Result: A type that is a specialization of extents. +// +// typename M::index_type +// Result: typename M::extents_type::index_type. +// +// typename M::rank_type +// Result: typename M::extents_type::rank_type. +// +// typename M::layout_type +// Result: A type MP that meets the layout mapping policy requirements ([mdspan.layout.policy.reqmts]) and for which +// is-mapping-of is true. +// +// m.extents() +// Result: const typename M::extents_type& +// +// m(i...) +// Result: typename M::index_type +// Returns: A nonnegative integer less than numeric_limits::max() and less than or equal to +// numeric_limits::max(). +// +// m(i...) == m(static_cast(i)...) +// Result: bool +// Returns: true +// +// m.required_span_size() +// Result: typename M::index_type +// Returns: If the size of the multidimensional index space m.extents() is 0, then 0, else 1 plus the maximum value +// of m(i...) for all i. +// +// m.is_unique() +// Result: bool +// Returns: true only if for every i and j where (i != j || ...) is true, m(i...) != m(j...) is true. +// +// m.is_exhaustive() +// Result: bool +// Returns: true only if for all k in the range [0, m.required_span_size()) there exists an i such that m(i...) +// equals k. +// +// m.is_strided() +// Result: bool +// Returns: true only if for every rank index r of m.extents() there exists an integer +// sr such that, for all i where (i+dr) is a multidimensional index in m.extents() ([mdspan.overview]), +// m((i + dr)...) - m(i...) equals sr +// +// m.stride(r) +// Preconditions: m.is_strided() is true. +// Result: typename M::index_type +// Returns: sr as defined in m.is_strided() above. +// +// M::is_always_unique() +// Result: A constant expression ([expr.const]) of type bool. +// Returns: true only if m.is_unique() is true for all possible objects m of type M. +// +// M::is_always_exhaustive() +// Result: A constant expression ([expr.const]) of type bool. +// Returns: true only if m.is_exhaustive() is true for all possible objects m of type M. +// +// M::is_always_strided() +// Result: A constant expression ([expr.const]) of type bool. +// Returns: true only if m.is_strided() is true for all possible objects m of type M. + +#include +#include +#include +#include + +#include "test_macros.h" + +template 0), int> = 0> +__host__ __device__ constexpr void test_mapping_stride_requirements() +{ + static_assert(cuda::std::is_same().stride(0)), typename M::index_type>::value, ""); +} + +template = 0> +__host__ __device__ constexpr void test_mapping_stride_requirements() +{} + +// Common requirements of all layout mappings +template +__host__ __device__ void test_mapping_requirements(cuda::std::index_sequence) +{ + using E = typename M::extents_type; + static_assert(cuda::std::__mdspan_detail::__is_extents::value, ""); + static_assert(cuda::std::is_copy_constructible::value, ""); + static_assert(cuda::std::is_nothrow_move_constructible::value, ""); + static_assert(cuda::std::is_nothrow_move_assignable::value, ""); + static_assert(cuda::std::is_nothrow_swappable::value, ""); + ASSERT_SAME_TYPE(typename M::index_type, typename E::index_type); + ASSERT_SAME_TYPE(typename M::size_type, typename E::size_type); + ASSERT_SAME_TYPE(typename M::rank_type, typename E::rank_type); + ASSERT_SAME_TYPE(typename M::layout_type, cuda::std::layout_right); + ASSERT_SAME_TYPE(typename M::layout_type::template mapping, M); + static_assert(cuda::std::is_same().extents()), const E&>::value, ""); + static_assert(cuda::std::is_same()(Idxs...)), typename M::index_type>::value, ""); + static_assert( + cuda::std::is_same().required_span_size()), typename M::index_type>::value, ""); + static_assert(cuda::std::is_same().is_unique()), bool>::value, ""); + static_assert(cuda::std::is_same().is_exhaustive()), bool>::value, ""); + static_assert(cuda::std::is_same().is_strided()), bool>::value, ""); + test_mapping_stride_requirements(); + static_assert(cuda::std::is_same::value, ""); + static_assert(cuda::std::is_same::value, ""); + static_assert(cuda::std::is_same::value, ""); +} + +template +__host__ __device__ void test_layout_mapping_requirements() +{ + using M = typename L::template mapping; + test_mapping_requirements(cuda::std::make_index_sequence()); +} + +template +__host__ __device__ void test_layout_mapping_right() +{ + test_layout_mapping_requirements(); +} + +int main(int, char**) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_layout_mapping_right>(); + test_layout_mapping_right>(); + test_layout_mapping_right>(); + test_layout_mapping_right>(); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/stride.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/stride.pass.cpp new file mode 100644 index 00000000000..556292d532f --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_right/stride.pass.cpp @@ -0,0 +1,57 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr index_type stride(rank_type i) const noexcept; +// +// Constraints: extents_type::rank() > 0 is true. +// +// Preconditions: i < extents_type::rank() is true. +// +// Returns: extents().rev-prod-of-extents(i). + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_stride(cuda::std::array strides, Args... args) +{ + using M = cuda::std::layout_right::mapping; + M m(E(args...)); + + ASSERT_NOEXCEPT(m.stride(0)); + for (size_t r = 0; r < E::rank(); r++) + { + assert(strides[r] == m.stride(r)); + } +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_stride>(cuda::std::array{1}, 7); + test_stride>(cuda::std::array{1}); + test_stride>(cuda::std::array{8, 1}); + test_stride>(cuda::std::array{720, 90, 10, 1}, 7, 9, 10); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/comparison.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/comparison.pass.cpp new file mode 100644 index 00000000000..6a8808972cd --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/comparison.pass.cpp @@ -0,0 +1,303 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// friend constexpr bool operator==(const mapping& x, const OtherMapping& y) noexcept; +// +// Constraints: +// - layout-mapping-alike is satisfied. +// - rank_ == OtherMapping::extents_type::rank() is true. +// - OtherMapping::is_always_strided() is true. +// +// Preconditions: OtherMapping meets the layout mapping requirements ([mdspan.layout.policy.reqmts]). +// +// Returns: true if x.extents() == y.extents() is true, OFFSET(y) == 0 is true, and each of x.stride(r) == y.stride(r) +// is true for r in the range [0, x.extents().rank()). Otherwise, false. + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "test_macros.h" + +template +using strides = cuda::std::array; + +template , class S2 = strides> +_CCCL_CONCEPT_FRAGMENT(layout_mapping_comparable_, + requires(cuda::std::layout_stride::mapping e1, cuda::std::layout_stride::mapping e2)( // + (e1 == e2))); + +template +_CCCL_CONCEPT layout_mapping_comparable = _CCCL_FRAGMENT(layout_mapping_comparable_, E1, E2); + +template +__host__ __device__ constexpr void test_comparison_different_rank() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + // sanity check same rank + static_assert(layout_mapping_comparable, cuda::std::extents>); + static_assert(layout_mapping_comparable, cuda::std::extents>); + static_assert(layout_mapping_comparable, cuda::std::extents>); + static_assert(layout_mapping_comparable, cuda::std::extents>); + + // not equality comparable when rank is not the same + static_assert(!layout_mapping_comparable, cuda::std::extents>); + static_assert(!layout_mapping_comparable, cuda::std::extents>); + static_assert(!layout_mapping_comparable, cuda::std::extents>); + static_assert(!layout_mapping_comparable, cuda::std::extents>); + static_assert(!layout_mapping_comparable, cuda::std::extents>); + static_assert(!layout_mapping_comparable, cuda::std::extents>); + static_assert(!layout_mapping_comparable, cuda::std::extents>); + static_assert(!layout_mapping_comparable, cuda::std::extents>); + static_assert(!layout_mapping_comparable, cuda::std::extents>); + static_assert(!layout_mapping_comparable, cuda::std::extents>); +} + +template +__host__ __device__ constexpr void test_comparison( + bool equal, + To dest_exts, + From src_exts, + cuda::std::array dest_strides, + cuda::std::array src_strides) +{ + cuda::std::layout_stride::mapping dest(dest_exts, dest_strides); + cuda::std::layout_stride::mapping src(src_exts, src_strides); + ASSERT_NOEXCEPT(dest == src); + assert((dest == src) == equal); + assert((dest != src) == !equal); +} + +template +__host__ __device__ constexpr void test_comparison_same_rank() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + test_comparison( + true, cuda::std::extents(), cuda::std::extents(), cuda::std::array{}, cuda::std::array{}); + + test_comparison( + true, + cuda::std::extents(5), + cuda::std::extents(5), + cuda::std::array{1}, + cuda::std::array{1}); + test_comparison( + true, + cuda::std::extents(0), + cuda::std::extents(0), + cuda::std::array{1}, + cuda::std::array{1}); + test_comparison( + true, + cuda::std::extents(), + cuda::std::extents(5), + cuda::std::array{3}, + cuda::std::array{3}); + test_comparison( + true, + cuda::std::extents(5), + cuda::std::extents(), + cuda::std::array{1}, + cuda::std::array{1}); + test_comparison( + true, + cuda::std::extents(), + cuda::std::extents(), + cuda::std::array{1}, + cuda::std::array{1}); + test_comparison( + false, + cuda::std::extents(), + cuda::std::extents(5), + cuda::std::array{2}, + cuda::std::array{1}); + test_comparison( + false, + cuda::std::extents(5), + cuda::std::extents(5), + cuda::std::array{2}, + cuda::std::array{1}); + test_comparison( + false, + cuda::std::extents(5), + cuda::std::extents(7), + cuda::std::array{1}, + cuda::std::array{1}); + test_comparison( + false, + cuda::std::extents(), + cuda::std::extents(7), + cuda::std::array{1}, + cuda::std::array{1}); + test_comparison( + false, + cuda::std::extents(5), + cuda::std::extents(), + cuda::std::array{1}, + cuda::std::array{1}); + test_comparison( + false, + cuda::std::extents(), + cuda::std::extents(), + cuda::std::array{1}, + cuda::std::array{1}); + + test_comparison( + true, + cuda::std::extents(5, 6, 7, 8, 9), + cuda::std::extents(5, 6, 7, 8, 9), + cuda::std::array{2, 20, 200, 2000, 20000}, + cuda::std::array{2, 20, 200, 2000, 20000}); + test_comparison( + true, + cuda::std::extents(5, 7, 9), + cuda::std::extents(6, 7), + cuda::std::array{2, 20, 200, 2000, 20000}, + cuda::std::array{2, 20, 200, 2000, 20000}); + test_comparison( + true, + cuda::std::extents(5, 6, 7, 8, 9), + cuda::std::extents(), + cuda::std::array{2, 20, 200, 2000, 20000}, + cuda::std::array{2, 20, 200, 2000, 20000}); + test_comparison( + false, + cuda::std::extents(5, 6, 7, 8, 9), + cuda::std::extents(), + cuda::std::array{2, 20, 200, 20000, 2000}, + cuda::std::array{2, 20, 200, 2000, 20000}); + test_comparison( + false, + cuda::std::extents(5, 6, 7, 8, 9), + cuda::std::extents(5, 6, 3, 8, 9), + cuda::std::array{2, 20, 200, 2000, 20000}, + cuda::std::array{2, 20, 200, 2000, 20000}); + test_comparison( + false, + cuda::std::extents(5, 7, 9), + cuda::std::extents(6, 7), + cuda::std::array{2, 20, 200, 2000, 20000}, + cuda::std::array{2, 20, 200, 2000, 20000}); + test_comparison( + false, + cuda::std::extents(5, 6, 7, 8, 9), + cuda::std::extents(), + cuda::std::array{2, 20, 200, 2000, 20000}, + cuda::std::array{2, 20, 200, 2000, 20000}); +} + +template +__host__ __device__ constexpr void test_comparison_with( + bool expect_equal, + E1 e1, + cuda::std::array strides, + E2 e2, + OtherArgs... other_args) +{ + typename cuda::std::layout_stride::template mapping map(e1, strides); + typename OtherLayout::template mapping other_map(e2, other_args...); + + assert((map == other_map) == expect_equal); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void test_comparison_with_always_convertible() +{ + constexpr size_t D = cuda::std::dynamic_extent; + // test layout with strides not equal to product of extents + test_comparison_with( + true, + cuda::std::extents(5, 7), + cuda::std::array{2, 10}, + cuda::std::extents(5, 7), + 0, + 2); + // make sure that offset != 0 results in false + test_comparison_with( + false, + cuda::std::extents(5, 7), + cuda::std::array{2, 10}, + cuda::std::extents(5, 7), + 1, + 2); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void test_comparison_with_always_convertible() +{} + +template +__host__ __device__ constexpr void test_comparison_with() +{ + constexpr size_t D = cuda::std::dynamic_extent; + bool is_left_based = cuda::std::is_same_v + || cuda::std::is_same_v; + test_comparison_with( + true, cuda::std::extents(), cuda::std::array{}, cuda::std::extents()); + test_comparison_with( + true, cuda::std::extents(), cuda::std::array{1}, cuda::std::extents()); + test_comparison_with( + true, cuda::std::extents(5), cuda::std::array{1}, cuda::std::extents()); + test_comparison_with( + false, cuda::std::extents(5), cuda::std::array{2}, cuda::std::extents()); + test_comparison_with( + is_left_based, + cuda::std::extents(5, 7), + cuda::std::array{1, 5}, + cuda::std::extents(5, 7)); + test_comparison_with( + !is_left_based, + cuda::std::extents(5, 7), + cuda::std::array{7, 1}, + cuda::std::extents(5, 7)); + test_comparison_with( + false, + cuda::std::extents(5, 7), + cuda::std::array{8, 1}, + cuda::std::extents(5, 7)); + + test_comparison_with_always_convertible(); +} + +template +__host__ __device__ constexpr void test_comparison_index_type() +{ + test_comparison_same_rank(); + test_comparison_different_rank(); + test_comparison_with(); + test_comparison_with(); + test_comparison_with(); +} + +__host__ __device__ constexpr bool test() +{ + test_comparison_index_type(); + test_comparison_index_type(); + test_comparison_index_type(); + test_comparison_index_type(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.default.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.default.pass.cpp new file mode 100644 index 00000000000..5becc7a894e --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.default.pass.cpp @@ -0,0 +1,98 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test default construction: +// +// constexpr mapping() noexcept; +// +// +// Preconditions: layout_right::mapping().required_span_size() is representable as a value of type +// index_type ([basic.fundamental]). +// +// Effects: Direct-non-list-initializes extents_ with extents_type(), and for all d in the range [0, rank_), +// direct-non-list-initializes strides_[d] with layout_right::mapping().stride(d). + +#include +#include +#include + +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_construction() +{ + using M = cuda::std::layout_stride::mapping; + ASSERT_NOEXCEPT(M{}); + M m; + E e; + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); + + // check required_span_size() + typename E::index_type expected_size = 1; + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + expected_size *= e.extent(r); + } + assert(m.required_span_size() == expected_size); + + // check strides: node stride function is constrained on rank>0, e.extent(r) is not + auto strides = m.strides(); + ASSERT_NOEXCEPT(m.strides()); + cuda::std::layout_right::mapping m_right; + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + assert(m.stride(r) == m_right.stride(r)); + assert(strides[r] == m.stride(r)); + } +} +template = 0> +__host__ __device__ constexpr void test_construction() +{ + using M = cuda::std::layout_stride::mapping; + ASSERT_NOEXCEPT(M{}); + M m; + E e; + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); + + // check required_span_size() + typename E::index_type expected_size = 1; + assert(m.required_span_size() == expected_size); + + // check strides: node stride function is constrained on rank>0, e.extent(r) is not + ASSERT_NOEXCEPT(m.strides()); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_construction>(); + test_construction>(); + test_construction>(); + test_construction>(); + test_construction>(); + test_construction>(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.extents_array.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.extents_array.pass.cpp new file mode 100644 index 00000000000..4cf6e0d89b7 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.extents_array.pass.cpp @@ -0,0 +1,167 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr mapping(const extents_type& e, array s) noexcept; +// +// Constraints: +// - is_convertible_v is true, and +// - is_nothrow_constructible_v is true. +// +// Preconditions: +// - s[i] > 0 is true for all i in the range [0, rank_). +// - REQUIRED-SPAN-SIZE(e, s) is representable as a value of type index_type ([basic.fundamental]). +// - If rank_ is greater than 0, then there exists a permutation P of the integers in the range [0, rank_), +// such that s[pi] >= s[pi_1] * e.extent(pi_1) is true for all i in the range [1, rank_), where pi is the ith +// element of P. +// Note 1: For layout_stride, this condition is necessary and sufficient for is_unique() to be true. +// +// Effects: Direct-non-list-initializes extents_ with e, and for all d in the range [0, rank_), +// direct-non-list-initializes strides_[d] with as_const(s[d]). + +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_construction(E e, S s) +{ + using M = cuda::std::layout_stride::mapping; + ASSERT_NOEXCEPT(M{e, s}); + M m(e, s); + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); + + // check required_span_size() + typename E::index_type expected_size = 1; + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + if (e.extent(r) == 0) + { + expected_size = 0; + break; + } + expected_size += (e.extent(r) - 1) * static_cast(s[r]); + } + assert(m.required_span_size() == expected_size); + + // check strides: node stride function is constrained on rank>0, e.extent(r) is not + auto strides = m.strides(); + ASSERT_NOEXCEPT(m.strides()); + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + assert(m.stride(r) == static_cast(s[r])); + assert(strides[r] == m.stride(r)); + } +} +template = 0> +__host__ __device__ constexpr void test_construction(E e, S s) +{ + using M = cuda::std::layout_stride::mapping; + ASSERT_NOEXCEPT(M{e, s}); + M m(e, s); + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); + + // check required_span_size() + typename E::index_type expected_size = 1; + assert(m.required_span_size() == expected_size); + + // check strides: node stride function is constrained on rank>0, e.extent(r) is not + ASSERT_NOEXCEPT(m.strides()); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + { + cuda::std::array s{}; + test_construction(cuda::std::extents(), s); + } + { + cuda::std::array s{1}; + test_construction(cuda::std::extents(7), s); + } + { + cuda::std::array s{1}; + test_construction(cuda::std::extents(0), s); + } + { + cuda::std::array s{2}; + test_construction(cuda::std::extents(), s); + } + { + cuda::std::array s{1}; + test_construction(cuda::std::extents(7), s); + } + { + cuda::std::array s{3, 30}; + test_construction(cuda::std::extents(), s); + } + { + cuda::std::array s{20, 2, 200, 2000}; + test_construction(cuda::std::extents(7, 9, 10), s); + test_construction(cuda::std::extents(0, 9, 10), s); + test_construction(cuda::std::extents(0, 8, 0), s); + } + { + cuda::std::array s{200, 20, 20, 2000}; + test_construction(cuda::std::extents(7, 0, 8, 9), s); + test_construction(cuda::std::extents(7, 8, 0, 9), s); + test_construction(cuda::std::extents(7, 1, 8, 9), s); + test_construction(cuda::std::extents(7, 8, 1, 9), s); + test_construction(cuda::std::extents(7, 1, 1, 9), s); + test_construction(cuda::std::extents(7, 0, 0, 9), s); + test_construction(cuda::std::extents(7, 1, 1, 9), s); + test_construction(cuda::std::extents(7, 1, 0, 9), s); + test_construction(cuda::std::extents(7, 0, 1, 9), s); + } + + { + using mapping_t = cuda::std::layout_stride::mapping>; + // wrong strides size + static_assert(!cuda::std::is_constructible, cuda::std::array>::value, + ""); + static_assert(!cuda::std::is_constructible, cuda::std::array>::value, + ""); + // wrong extents rank + static_assert(!cuda::std::is_constructible, cuda::std::array>::value, + ""); + // none-convertible strides + static_assert( + !cuda::std::is_constructible, cuda::std::array>::value, ""); + } + { + // not no-throw constructible index_type from stride + using mapping_t = cuda::std::layout_stride::mapping>; + static_assert(cuda::std::is_convertible::value, ""); + static_assert( + !cuda::std::is_constructible, cuda::std::array>::value, ""); + } + + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.extents_span.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.extents_span.pass.cpp new file mode 100644 index 00000000000..1f7adbbecc0 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.extents_span.pass.cpp @@ -0,0 +1,170 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr mapping(const extents_type& e, span s) noexcept; +// +// Constraints: +// - is_convertible_v is true, and +// - is_nothrow_constructible_v is true. +// +// Preconditions: +// - s[i] > 0 is true for all i in the range [0, rank_). +// - REQUIRED-SPAN-SIZE(e, s) is representable as a value of type index_type ([basic.fundamental]). +// - If rank_ is greater than 0, then there exists a permutation P of the integers in the range [0, rank_), +// such that s[pi] >= s[pi_1] * e.extent(pi_1) is true for all i in the range [1, rank_), where pi is the ith +// element of P. +// Note 1: For layout_stride, this condition is necessary and sufficient for is_unique() to be true. +// +// Effects: Direct-non-list-initializes extents_ with e, and for all d in the range [0, rank_), +// direct-non-list-initializes strides_[d] with as_const(s[d]). + +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_construction(E e, S s) +{ + using M = cuda::std::layout_stride::mapping; + ASSERT_NOEXCEPT(M{e, s}); + M m(e, s); + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); + + // check required_span_size() + typename E::index_type expected_size = 1; + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + if (e.extent(r) == 0) + { + expected_size = 0; + break; + } + expected_size += (e.extent(r) - 1) * static_cast(s[r]); + } + assert(m.required_span_size() == expected_size); + + // check strides: node stride function is constrained on rank>0, e.extent(r) is not + auto strides = m.strides(); + ASSERT_NOEXCEPT(m.strides()); + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + assert(m.stride(r) == static_cast(s[r])); + assert(strides[r] == m.stride(r)); + } +} +template = 0> +__host__ __device__ constexpr void test_construction(E e, S s) +{ + using M = cuda::std::layout_stride::mapping; + ASSERT_NOEXCEPT(M{e, s}); + M m(e, s); + + // check correct extents are returned + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == e); + + // check required_span_size() + typename E::index_type expected_size = 1; + assert(m.required_span_size() == expected_size); + + // check strides: node stride function is constrained on rank>0, e.extent(r) is not + ASSERT_NOEXCEPT(m.strides()); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + { + cuda::std::array s{}; + test_construction(cuda::std::extents(), cuda::std::span(s)); + } + { + cuda::std::array s{1}; + test_construction(cuda::std::extents(7), cuda::std::span(s)); + } + { + cuda::std::array s{1}; + test_construction(cuda::std::extents(0), cuda::std::span(s)); + } + { + cuda::std::array s{2}; + test_construction(cuda::std::extents(), cuda::std::span(s)); + } + { + cuda::std::array s{1}; + test_construction(cuda::std::extents(7), cuda::std::span(s)); + } + { + cuda::std::array s{3, 30}; + test_construction(cuda::std::extents(), cuda::std::span(s)); + } + { + cuda::std::array s{20, 2, 200, 2000}; + test_construction(cuda::std::extents(7, 9, 10), cuda::std::span(s)); + } + { + cuda::std::array s{20, 2, 200, 2000}; + test_construction(cuda::std::extents(7, 0, 10), cuda::std::span(s)); + test_construction(cuda::std::extents(0, 9, 10), cuda::std::span(s)); + test_construction(cuda::std::extents(0, 8, 0), cuda::std::span(s)); + } + { + cuda::std::array s{200, 20, 20, 2000}; + test_construction(cuda::std::extents(7, 0, 8, 9), cuda::std::span(s)); + test_construction(cuda::std::extents(7, 8, 0, 9), cuda::std::span(s)); + test_construction(cuda::std::extents(7, 1, 8, 9), cuda::std::span(s)); + test_construction(cuda::std::extents(7, 8, 1, 9), cuda::std::span(s)); + test_construction(cuda::std::extents(7, 1, 1, 9), cuda::std::span(s)); + test_construction(cuda::std::extents(7, 0, 0, 9), cuda::std::span(s)); + test_construction(cuda::std::extents(7, 1, 1, 9), cuda::std::span(s)); + test_construction(cuda::std::extents(7, 1, 0, 9), cuda::std::span(s)); + test_construction(cuda::std::extents(7, 0, 1, 9), cuda::std::span(s)); + } + + { + using mapping_t = cuda::std::layout_stride::mapping>; + // wrong strides size + static_assert(!cuda::std::is_constructible, cuda::std::span>::value, + ""); + static_assert(!cuda::std::is_constructible, cuda::std::span>::value, + ""); + // wrong extents rank + static_assert(!cuda::std::is_constructible, cuda::std::span>::value, + ""); + // none-convertible strides + static_assert( + !cuda::std::is_constructible, cuda::std::span>::value, ""); + } + { + // not no-throw constructible index_type from stride + using mapping_t = cuda::std::layout_stride::mapping>; + static_assert(cuda::std::is_convertible::value, ""); + static_assert( + !cuda::std::is_constructible, cuda::std::span>::value, ""); + } + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.strided_mapping.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.strided_mapping.pass.cpp new file mode 100644 index 00000000000..a8cc7296cbc --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/ctor.strided_mapping.pass.cpp @@ -0,0 +1,219 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(see below) +// mapping(const StridedLayoutMapping& other) noexcept; +// +// Constraints: +// - layout-mapping-alike is satisfied. +// - is_constructible_v is true. +// - StridedLayoutMapping::is_always_unique() is true. +// - StridedLayoutMapping::is_always_strided() is true. +// +// Preconditions: +// - StridedLayoutMapping meets the layout mapping requirements ([mdspan.layout.policy.reqmts]), +// - other.stride(r) > 0 is true for every rank index r of extents(), +// - other.required_span_size() is representable as a value of type index_type ([basic.fundamental]), and +// - OFFSET(other) == 0 is true. +// +// Effects: Direct-non-list-initializes extents_ with other.extents(), and for all d in the range [0, rank_), +// direct-non-list-initializes strides_[d] with other.stride(d). +// +// Remarks: The expression inside explicit is equivalent to: +// - !(is_convertible_v && +// (is-mapping-of || +// is-mapping-of || +// is-mapping-of)) + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "test_macros.h" + +template ::value, int> = 0> +__host__ __device__ constexpr auto get_strides(FromExt src_exts) +{ + using From = typename FromL::template mapping; + + // just construct some strides which aren't layout_left/layout_right + cuda::std::array strides{}; + size_t stride = 2; + for (size_t r = 0; r < FromExt::rank(); r++) + { + strides[r] = stride; + stride *= src_exts.extent(r); + } + return From(src_exts, strides); +} +template ::value, int> = 0> +__host__ __device__ constexpr auto get_strides(FromExt src_exts) +{ + using From = typename FromL::template mapping; + return From(src_exts); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_stride::mapping; + using From = typename FromL::template mapping; + + From src(get_strides(src_exts)); + ASSERT_NOEXCEPT(To(src)); + To dest(src); + assert(dest == src); + + To dest_implicit = src; + assert(dest_implicit == src); +} + +template = 0> +__host__ __device__ constexpr void test_conversion(FromExt src_exts) +{ + using To = cuda::std::layout_stride::mapping; + using From = typename FromL::template mapping; + + From src(get_strides(src_exts)); + ASSERT_NOEXCEPT(To(src)); + To dest(src); + assert(dest == src); + assert((!cuda::std::is_convertible_v) ); +} + +template +__host__ __device__ constexpr void test_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + constexpr bool idx_convertible = static_cast(cuda::std::numeric_limits::max()) + >= static_cast(cuda::std::numeric_limits::max()); + constexpr bool l_convertible = + cuda::std::is_same_v || cuda::std::is_same_v + || cuda::std::is_same_v; + constexpr bool idx_l_convertible = idx_convertible && l_convertible; + + // clang-format off + // adding extents convertibility expectation + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(0)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5, 5)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + test_conversion>(cuda::std::extents(5, 7)); + test_conversion>( + cuda::std::extents(5, 7, 8, 9, 1)); + test_conversion>(cuda::std::extents(5)); + test_conversion>(cuda::std::extents()); + // clang-format on +} + +template +using ToM = typename cuda::std::layout_stride::template mapping>; + +template +using FromM = typename FromL::template mapping>; + +template , int> = 0> +__host__ __device__ constexpr void test_no_implicit_conversion() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + // Sanity check that one static to dynamic conversion works + static_assert(cuda::std::is_constructible, FromM>::value, ""); + static_assert(cuda::std::is_convertible, ToM>::value, ""); + + // Check that dynamic to static conversion only works explicitly + static_assert(cuda::std::is_constructible, FromM>::value, ""); + static_assert(!cuda::std::is_convertible, ToM>::value, ""); + + // Sanity check that one static to dynamic conversion works + static_assert(cuda::std::is_constructible, FromM>::value, ""); + static_assert(cuda::std::is_convertible, ToM>::value, ""); + + // Check that dynamic to static conversion only works explicitly + static_assert(cuda::std::is_constructible, FromM>::value, ""); + static_assert(!cuda::std::is_convertible, ToM>::value, ""); + + // Sanity check that smaller index_type to larger index_type conversion works + static_assert(cuda::std::is_constructible, FromM>::value, ""); + static_assert(cuda::std::is_convertible, ToM>::value, ""); + + // Check that larger index_type to smaller index_type conversion works explicitly only + static_assert(cuda::std::is_constructible, FromM>::value, ""); + static_assert(!cuda::std::is_convertible, ToM>::value, ""); +} + +// the implicit convertibility test doesn't apply to non cuda::std::layouts +template , int> = 0> +__host__ __device__ constexpr void test_no_implicit_conversion() +{} + +template +__host__ __device__ constexpr void test_rank_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, FromM>::value, ""); + static_assert(!cuda::std::is_constructible, FromM>::value, ""); + static_assert(!cuda::std::is_constructible, FromM>::value, ""); + static_assert(!cuda::std::is_constructible, FromM>::value, ""); +} + +template +__host__ __device__ constexpr void test_static_extent_mismatch() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(!cuda::std::is_constructible, FromM>::value, ""); + static_assert(!cuda::std::is_constructible, FromM>::value, ""); + static_assert(!cuda::std::is_constructible, FromM>::value, ""); +} + +template +__host__ __device__ constexpr void test_layout() +{ + test_conversion(); + test_conversion(); + test_conversion(); + test_conversion(); + test_no_implicit_conversion(); + test_rank_mismatch(); + test_static_extent_mismatch(); +} + +__host__ __device__ constexpr bool test() +{ + test_layout(); + test_layout(); + test_layout(); + test_layout(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/deduction.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/deduction.pass.cpp new file mode 100644 index 00000000000..9cdaf116ce2 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/deduction.pass.cpp @@ -0,0 +1,68 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11, c++14 +// ADDITIONAL_COMPILE_FLAGS: -Wno-ctad-maybe-unsupported + +// + +#include +#include +#include +#include + +#include "test_macros.h" + +// mdspan + +// layout_stride::mapping does not have explicit deduction guides, +// but implicit deduction guides for constructor taking extents and strides +// should work + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + + static_assert(_CCCL_TRAIT(cuda::std::is_convertible, const unsigned&, int) + && _CCCL_TRAIT(cuda::std::is_nothrow_constructible, int, const unsigned&)); + + ASSERT_SAME_TYPE( + decltype(cuda::std::layout_stride::mapping(cuda::std::extents(), cuda::std::array())), + cuda::std::layout_stride::template mapping>); + ASSERT_SAME_TYPE( + decltype(cuda::std::layout_stride::mapping(cuda::std::extents(), cuda::std::array{1})), + cuda::std::layout_stride::template mapping>); + ASSERT_SAME_TYPE( + decltype(cuda::std::layout_stride::mapping(cuda::std::extents(), cuda::std::array{1})), + cuda::std::layout_stride::template mapping>); + ASSERT_SAME_TYPE(decltype(cuda::std::layout_stride::mapping( + cuda::std::extents(), cuda::std::array{3, 100})), + cuda::std::layout_stride::template mapping>); + + ASSERT_SAME_TYPE( + decltype(cuda::std::layout_stride::mapping(cuda::std::extents(), cuda::std::span())), + cuda::std::layout_stride::template mapping>); + ASSERT_SAME_TYPE(decltype(cuda::std::layout_stride::mapping( + cuda::std::extents(), cuda::std::declval>())), + cuda::std::layout_stride::template mapping>); + ASSERT_SAME_TYPE(decltype(cuda::std::layout_stride::mapping( + cuda::std::extents(), cuda::std::declval>())), + cuda::std::layout_stride::template mapping>); + ASSERT_SAME_TYPE(decltype(cuda::std::layout_stride::mapping( + cuda::std::extents(), cuda::std::declval>())), + cuda::std::layout_stride::template mapping>); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/extents.verify.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/extents.verify.cpp new file mode 100644 index 00000000000..95665a2886e --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/extents.verify.cpp @@ -0,0 +1,47 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// class layout_stride::mapping; + +// If Extents is not a specialization of extents, then the program is +// ill-formed. + +// Mandates: If Extents::rank_dynamic() == 0 is true, then the size of the +// multidimensional index space Extents() is representable as a value of type +// typename Extents::index_type. + +#include + +#include "test_macros.h" + +__host__ __device__ void not_extents() +{ + // expected-error-re@*:* {{static assertion failed {{.*}}layout_stride::mapping template argument must be a + // specialization of extents}} + cuda::std::layout_stride::mapping mapping; + unused(mapping); +} + +__host__ __device__ void representable() +{ + // expected-error-re@*:* {{static assertion failed {{.*}}layout_stride::mapping product of static extents must be + // representable as index_type.}} + cuda::std::layout_stride::mapping> mapping; + unused(mapping); +} + +int main(int, char**) +{ + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/index_operator.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/index_operator.pass.cpp new file mode 100644 index 00000000000..4635e6504c0 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/index_operator.pass.cpp @@ -0,0 +1,154 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test default iteration: +// +// template +// constexpr index_type operator()(Indices...) const noexcept; +// +// Constraints: +// * sizeof...(Indices) == extents_type::rank() is true, +// * (is_convertible_v && ...) is true, and +// * (is_nothrow_constructible_v && ...) is true. +// +// Preconditions: +// * extents_type::index-cast(i) is a multidimensional index in extents_. + +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "test_macros.h" + +template +_CCCL_CONCEPT_FRAGMENT( + operator_constraints_, + requires(Mapping m, + Indices... idxs)((cuda::std::is_same::value))); + +template +_CCCL_CONCEPT operator_constraints = _CCCL_FRAGMENT(operator_constraints_, Mapping, Indices...); + +_CCCL_TEMPLATE(class Mapping, class... Indices) +_CCCL_REQUIRES(operator_constraints) +__host__ __device__ constexpr bool check_operator_constraints(Mapping m, Indices... idxs) +{ + (void) m(idxs...); + return true; +} + +_CCCL_TEMPLATE(class Mapping, class... Indices) +_CCCL_REQUIRES((!operator_constraints) ) +__host__ __device__ constexpr bool check_operator_constraints(Mapping, Indices...) +{ + return false; +} + +template +__host__ __device__ constexpr size_t get_strides( + const cuda::std::array& strides, cuda::std::index_sequence, Args... args) +{ + return _CCCL_FOLD_PLUS(size_t{0}, (args * strides[Pos])); +} + +template = 0> +__host__ __device__ constexpr void +iterate_stride(M m, const cuda::std::array& strides, Args... args) +{ + ASSERT_NOEXCEPT(m(args...)); + const size_t expected_val = + get_strides(strides, cuda::std::make_index_sequence(), args...); + assert(expected_val == static_cast(m(args...))); +} + +template = 0> +__host__ __device__ constexpr void +iterate_stride(M m, const cuda::std::array& strides, Args... args) +{ + constexpr size_t r = sizeof...(Args); + for (typename M::index_type i = 0; i < m.extents().extent(r); i++) + { + iterate_stride(m, strides, i, args...); + } +} + +template +__host__ __device__ constexpr void test_iteration(cuda::std::array strides, Args... args) +{ + using M = cuda::std::layout_stride::mapping; + M m(E(args...), strides); + + iterate_stride(m, strides); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_iteration>(cuda::std::array{}); + test_iteration>(cuda::std::array{2}, 1); + test_iteration>(cuda::std::array{3}, 7); + test_iteration>(cuda::std::array{4}); + test_iteration>(cuda::std::array{25, 3}); + test_iteration>(cuda::std::array{1, 1, 1, 1}, 1, 1, 1, 1); + + // Check operator constraint for number of arguments + static_assert(check_operator_constraints( + cuda::std::layout_stride::mapping>( + cuda::std::extents(1), cuda::std::array{1}), + 0)); + static_assert(!check_operator_constraints( + cuda::std::layout_stride::mapping>( + cuda::std::extents(1), cuda::std::array{1}), + 0, + 0)); + + // Check operator constraint for convertibility of arguments to index_type + static_assert(check_operator_constraints( + cuda::std::layout_stride::mapping>( + cuda::std::extents(1), cuda::std::array{1}), + IntType(0))); + static_assert(!check_operator_constraints( + cuda::std::layout_stride::mapping>( + cuda::std::extents(1), cuda::std::array{1}), + IntType(0))); + + // Check operator constraint for no-throw-constructibility of index_type from arguments + static_assert(!check_operator_constraints( + cuda::std::layout_stride::mapping>( + cuda::std::extents(1), cuda::std::array{1}), + IntType(0))); + + return true; +} + +__host__ __device__ constexpr bool test_large() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_iteration>(cuda::std::array{2000, 2, 20, 200}, 7, 9, 10); + test_iteration>(cuda::std::array{2000, 20, 20, 200}, 7, 10); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + + // The large test iterates over ~10k loop indices. + // With assertions enabled this triggered the maximum default limit + // for steps in consteval expressions. Assertions roughly double the + // total number of instructions, so this was already close to the maximum. + test_large(); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/is_exhaustive_corner_case.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/is_exhaustive_corner_case.pass.cpp new file mode 100644 index 00000000000..f16fb3966cf --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/is_exhaustive_corner_case.pass.cpp @@ -0,0 +1,59 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr bool is_exhaustive() const noexcept; +// +// Returns: +// - true if rank_ is 0. +// - Otherwise, true if there is a permutation P of the integers in the range [0, rank_) such that +// stride(p0) equals 1, and stride(pi) equals stride(pi_1) * extents().extent(pi_1) for i in the +// range [1, rank_), where pi is the ith element of P. +// - Otherwise, false. + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void +test_layout_mapping_stride(E ext, cuda::std::array strides, bool exhaustive) +{ + using M = cuda::std::layout_stride::template mapping; + M m(ext, strides); + assert(m.is_exhaustive() == exhaustive); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_layout_mapping_stride(cuda::std::extents(), cuda::std::array{1}, true); + test_layout_mapping_stride(cuda::std::extents(0), cuda::std::array{3}, false); + test_layout_mapping_stride(cuda::std::extents(), cuda::std::array{6, 2}, true); + test_layout_mapping_stride(cuda::std::extents(3, 0), cuda::std::array{6, 2}, false); + test_layout_mapping_stride(cuda::std::extents(0, 0), cuda::std::array{6, 2}, false); + test_layout_mapping_stride( + cuda::std::extents(3, 3, 0, 3), cuda::std::array{3, 1, 27, 9}, true); + test_layout_mapping_stride( + cuda::std::extents(0, 3, 3, 3), cuda::std::array{3, 1, 27, 9}, false); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/properties.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/properties.pass.cpp new file mode 100644 index 00000000000..bc7da57a336 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/properties.pass.cpp @@ -0,0 +1,141 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// namespace std { +// template +// class layout_stride::mapping { +// +// ... +// static constexpr bool is_always_unique() noexcept { return true; } +// static constexpr bool is_always_exhaustive() noexcept { return false; } +// static constexpr bool is_always_strided() noexcept { return true; } +// +// static constexpr bool is_unique() noexcept { return true; } +// static constexpr bool is_exhaustive() noexcept; +// static constexpr bool is_strided() noexcept { return true; } +// ... +// }; +// } +// +// +// layout_stride::mapping is a trivially copyable type that models regular for each E. +// +// constexpr bool is_exhaustive() const noexcept; +// +// Returns: +// - true if rank_ is 0. +// - Otherwise, true if there is a permutation P of the integers in the range [0, rank_) such that +// stride(p0) equals 1, and stride(pi) equals stride(pi_1) * extents().extent(pi_1) for i in the +// range [1, rank_), where pi is the ith element of P. +// - Otherwise, false. + +#include +#include +#include +#include + +#include "test_macros.h" + +template 0), int> = 0> +__host__ __device__ constexpr void +test_strides(E ext, M& m, const M& c_m, cuda::std::array strides) +{ + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + assert(m.stride(r) == strides[r]); + assert(c_m.stride(r) == strides[r]); + ASSERT_NOEXCEPT(m.stride(r)); + ASSERT_NOEXCEPT(c_m.stride(r)); + } + + typename E::index_type expected_size = 1; + for (typename E::rank_type r = 0; r < E::rank(); r++) + { + if (ext.extent(r) == 0) + { + expected_size = 0; + break; + } + expected_size += (ext.extent(r) - 1) * static_cast(strides[r]); + } + assert(m.required_span_size() == expected_size); + assert(c_m.required_span_size() == expected_size); + ASSERT_NOEXCEPT(m.required_span_size()); + ASSERT_NOEXCEPT(c_m.required_span_size()); +} +template = 0> +__host__ __device__ constexpr void +test_strides(E, M& m, const M& c_m, cuda::std::array strides) +{ + typename E::index_type expected_size = 1; + assert(m.required_span_size() == expected_size); + assert(c_m.required_span_size() == expected_size); + ASSERT_NOEXCEPT(m.required_span_size()); + ASSERT_NOEXCEPT(c_m.required_span_size()); +} + +template +__host__ __device__ constexpr void +test_layout_mapping_stride(E ext, cuda::std::array strides, bool exhaustive) +{ + using M = cuda::std::layout_stride::template mapping; + M m(ext, strides); + const M c_m = m; + assert(m.strides() == strides); + assert(c_m.strides() == strides); + assert(m.extents() == ext); + assert(c_m.extents() == ext); + assert(M::is_unique() == true); + assert(m.is_exhaustive() == exhaustive); + assert(c_m.is_exhaustive() == exhaustive); + assert(M::is_strided() == true); + assert(M::is_always_unique() == true); + assert(M::is_always_exhaustive() == false); + assert(M::is_always_strided() == true); + + ASSERT_NOEXCEPT(m.strides()); + ASSERT_NOEXCEPT(c_m.strides()); + ASSERT_NOEXCEPT(m.extents()); + ASSERT_NOEXCEPT(c_m.extents()); + ASSERT_NOEXCEPT(M::is_unique()); + ASSERT_NOEXCEPT(m.is_exhaustive()); + ASSERT_NOEXCEPT(c_m.is_exhaustive()); + ASSERT_NOEXCEPT(M::is_strided()); + ASSERT_NOEXCEPT(M::is_always_unique()); + ASSERT_NOEXCEPT(M::is_always_exhaustive()); + ASSERT_NOEXCEPT(M::is_always_strided()); + + test_strides(ext, m, c_m, strides); + + static_assert(cuda::std::is_trivially_copyable_v); + static_assert(cuda::std::regular); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_layout_mapping_stride(cuda::std::extents(), cuda::std::array{}, true); + test_layout_mapping_stride(cuda::std::extents(), cuda::std::array{1, 4}, true); + test_layout_mapping_stride(cuda::std::extents(), cuda::std::array{1, 5}, false); + test_layout_mapping_stride(cuda::std::extents(7), cuda::std::array{20, 2}, false); + test_layout_mapping_stride( + cuda::std::extents(3, 3, 3, 3), cuda::std::array{3, 1, 9, 27}, true); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/required_span_size.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/required_span_size.pass.cpp new file mode 100644 index 00000000000..48218c3768b --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/required_span_size.pass.cpp @@ -0,0 +1,63 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Let REQUIRED-SPAN-SIZE(e, strides) be: +// - 1, if e.rank() == 0 is true, +// - otherwise 0, if the size of the multidimensional index space e is 0, +// - otherwise 1 plus the sum of products of (e.extent(r) - 1) and strides[r] for all r in the range [0, e.rank()). + +// constexpr index_type required_span_size() const noexcept; +// +// Returns: REQUIRED-SPAN-SIZE(extents(), strides_). + +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void +test_required_span_size(E e, cuda::std::array strides, typename E::index_type expected_size) +{ + using M = cuda::std::layout_stride::mapping; + const M m(e, strides); + + ASSERT_NOEXCEPT(m.required_span_size()); + assert(m.required_span_size() == expected_size); +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_required_span_size(cuda::std::extents(), cuda::std::array{}, 1); + test_required_span_size(cuda::std::extents(0), cuda::std::array{5}, 0); + test_required_span_size(cuda::std::extents(1), cuda::std::array{5}, 1); + test_required_span_size(cuda::std::extents(7), cuda::std::array{5}, 31); + test_required_span_size(cuda::std::extents(), cuda::std::array{5}, 31); + test_required_span_size(cuda::std::extents(), cuda::std::array{20, 2}, 135); + test_required_span_size( + cuda::std::extents(7, 9, 10), cuda::std::array{1, 7, 7 * 8, 7 * 8 * 9}, 5040); + test_required_span_size( + cuda::std::extents(9, 10), cuda::std::array{1, 7, 7 * 8, 7 * 8 * 9}, 5034); + test_required_span_size( + cuda::std::extents(9, 10), cuda::std::array{1, 7, 7 * 8, 7 * 8 * 9}, 0); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/static_requirements.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/static_requirements.pass.cpp new file mode 100644 index 00000000000..40f328e4c97 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/static_requirements.pass.cpp @@ -0,0 +1,143 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// A type M meets the layout mapping requirements if +// - M models copyable and equality_comparable, +// - is_nothrow_move_constructible_v is true, +// - is_nothrow_move_assignable_v is true, +// - is_nothrow_swappable_v is true, and +// +// the following types and expressions are well-formed and have the specified semantics. +// +// typename M::extents_type +// Result: A type that is a specialization of extents. +// +// typename M::index_type +// Result: typename M::extents_type::index_type. +// +// typename M::rank_type +// Result: typename M::extents_type::rank_type. +// +// typename M::layout_type +// Result: A type MP that meets the layout mapping policy requirements ([mdspan.layout.policy.reqmts]) and for which +// is-mapping-of is true. +// +// m.extents() +// Result: const typename M::extents_type& +// +// m(i...) +// Result: typename M::index_type +// Returns: A nonnegative integer less than numeric_limits::max() and less than or equal to +// numeric_limits::max(). +// +// m(i...) == m(static_cast(i)...) +// Result: bool +// Returns: true +// +// m.required_span_size() +// Result: typename M::index_type +// Returns: If the size of the multidimensional index space m.extents() is 0, then 0, else 1 plus the maximum value +// of m(i...) for all i. +// +// m.is_unique() +// Result: bool +// Returns: true only if for every i and j where (i != j || ...) is true, m(i...) != m(j...) is true. +// +// m.is_exhaustive() +// Result: bool +// Returns: true only if for all k in the range [0, m.required_span_size()) there exists an i such that m(i...) +// equals k. +// +// m.is_strided() +// Result: bool +// Returns: true only if for every rank index r of m.extents() there exists an integer +// sr such that, for all i where (i+dr) is a multidimensional index in m.extents() ([mdspan.overview]), +// m((i + dr)...) - m(i...) equals sr +// +// m.stride(r) +// Preconditions: m.is_strided() is true. +// Result: typename M::index_type +// Returns: sr as defined in m.is_strided() above. +// +// M::is_always_unique() +// Result: A constant expression ([expr.const]) of type bool. +// Returns: true only if m.is_unique() is true for all possible objects m of type M. +// +// M::is_always_exhaustive() +// Result: A constant expression ([expr.const]) of type bool. +// Returns: true only if m.is_exhaustive() is true for all possible objects m of type M. +// +// M::is_always_strided() +// Result: A constant expression ([expr.const]) of type bool. +// Returns: true only if m.is_strided() is true for all possible objects m of type M. + +#include +#include +#include +#include + +#include "test_macros.h" + +// Common requirements of all layout mappings +template +__host__ __device__ void test_mapping_requirements(cuda::std::index_sequence) +{ + using E = typename M::extents_type; + static_assert(cuda::std::__mdspan_detail::__is_extents_v); + static_assert(cuda::std::is_copy_constructible_v); + static_assert(cuda::std::is_nothrow_move_constructible_v); + static_assert(cuda::std::is_nothrow_move_assignable_v); + static_assert(cuda::std::is_nothrow_swappable_v); + ASSERT_SAME_TYPE(typename M::index_type, typename E::index_type); + ASSERT_SAME_TYPE(typename M::size_type, typename E::size_type); + ASSERT_SAME_TYPE(typename M::rank_type, typename E::rank_type); + ASSERT_SAME_TYPE(typename M::layout_type, cuda::std::layout_stride); + ASSERT_SAME_TYPE(typename M::layout_type::template mapping, M); + static_assert(cuda::std::is_same().extents()), const E&>::value, ""); + static_assert(cuda::std::is_same().strides()), + cuda::std::array>::value, + ""); + static_assert(cuda::std::is_same()(Idxs...)), typename M::index_type>::value, ""); + static_assert( + cuda::std::is_same().required_span_size()), typename M::index_type>::value, ""); + static_assert(cuda::std::is_same().is_unique()), bool>::value, ""); + static_assert(cuda::std::is_same().is_exhaustive()), bool>::value, ""); + static_assert(cuda::std::is_same().is_strided()), bool>::value, ""); + static_assert(cuda::std::is_same().stride(0)), typename M::index_type>::value, ""); + static_assert(cuda::std::is_same::value, ""); + static_assert(cuda::std::is_same::value, ""); + static_assert(cuda::std::is_same::value, ""); +} + +template +__host__ __device__ void test_layout_mapping_requirements() +{ + using M = typename L::template mapping; + test_mapping_requirements(cuda::std::make_index_sequence()); +} + +template +__host__ __device__ void test_layout_mapping_stride() +{ + test_layout_mapping_requirements(); +} + +int main(int, char**) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_layout_mapping_stride>(); + test_layout_mapping_stride>(); + test_layout_mapping_stride>(); + test_layout_mapping_stride>(); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/stride.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/stride.pass.cpp new file mode 100644 index 00000000000..dda46f6103a --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/layout_stride/stride.pass.cpp @@ -0,0 +1,66 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr index_type stride(rank_type i) const noexcept; +// +// Constraints: extents_type::rank() > 0 is true. +// +// Preconditions: i < extents_type::rank() is true. +// +// Returns: extents().rev-prod-of-extents(i). + +#include +#include +#include +#include + +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_stride(cuda::std::array strides, Args... args) +{ + using M = cuda::std::layout_stride::mapping; + M m(E(args...), strides); + + ASSERT_NOEXCEPT(m.stride(0)); + for (size_t r = 0; r < E::rank(); r++) + { + assert(strides[r] == m.stride(r)); + } + + ASSERT_NOEXCEPT(m.strides()); + auto strides_out = m.strides(); + static_assert(cuda::std::is_same>::value, + ""); + for (size_t r = 0; r < E::rank(); r++) + { + assert(strides[r] == strides_out[r]); + } +} + +__host__ __device__ constexpr bool test() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_stride>(cuda::std::array{1}, 7); + test_stride>(cuda::std::array{1}); + test_stride>(cuda::std::array{8, 1}); + test_stride>(cuda::std::array{720, 90, 10, 1}, 7, 9, 10); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/access.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/access.pass.cpp deleted file mode 100644 index a3260812f32..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/access.pass.cpp +++ /dev/null @@ -1,28 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -int main(int, char**) -{ - { - using element_t = int; - cuda::std::array d{42, 43}; - cuda::std::default_accessor a; - - assert(a.access(d.data(), 0) == 42); - assert(a.access(d.data(), 1) == 43); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/copy.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/copy.pass.cpp deleted file mode 100644 index 0f1c58a1b74..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/copy.pass.cpp +++ /dev/null @@ -1,31 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -int main(int, char**) -{ - { - using element_t = int; - cuda::std::array d{42, 43}; - cuda::std::default_accessor a0; - cuda::std::default_accessor a(a0); - - assert(a.access(d.data(), 0) == 42); - assert(a.access(d.data(), 1) == 43); - assert(a.offset(d.data(), 0) == d.data()); - assert(a.offset(d.data(), 1) == d.data() + 1); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/offset.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/offset.pass.cpp deleted file mode 100644 index c69dfbe538e..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.accessor.default.members/offset.pass.cpp +++ /dev/null @@ -1,28 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -int main(int, char**) -{ - { - using element_t = int; - cuda::std::array d{42, 43}; - cuda::std::default_accessor a; - - assert(a.offset(d.data(), 0) == d.data()); - assert(a.offset(d.data(), 1) == d.data() + 1); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cmp/compare.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cmp/compare.pass.cpp deleted file mode 100644 index f08ffeea73e..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cmp/compare.pass.cpp +++ /dev/null @@ -1,58 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - using index_t = size_t; - - cuda::std::extents e0; - cuda::std::extents e1; - - assert(e0 == e1); - } - - { - using index_t = size_t; - - cuda::std::extents e0; - cuda::std::extents e1{10}; - - assert(e0 == e1); - } - - { - using index_t = size_t; - - cuda::std::extents e0; - cuda::std::extents e1; - - assert(e0 != e1); - } - - { - using index0_t = size_t; - using index1_t = uint8_t; - - cuda::std::extents e0; - cuda::std::extents e1; - - assert(e0 == e1); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/array.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/array.pass.cpp deleted file mode 100644 index ce60dbad713..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/array.pass.cpp +++ /dev/null @@ -1,73 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.extents.util/extents_util.hpp" -#include "../my_int.hpp" - -// TYPED_TEST(TestExtents, array_ctor) -template -__host__ __device__ void test_array_con() -{ - using TestFixture = TestExtents; - TestFixture t; - - auto e = typename TestFixture::extents_type(t.dyn_sizes); - assert(e == t.exts); -} - -template -struct is_array_cons_avail : cuda::std::false_type -{}; - -template -struct is_array_cons_avail< - T, - IndexType, - N, - cuda::std::enable_if_t>()}), T>::value>> - : cuda::std::true_type -{}; - -template -constexpr bool is_array_cons_avail_v = is_array_cons_avail::value; - -int main(int, char**) -{ - test_array_con>(); - test_array_con>(); - test_array_con>(); - test_array_con>(); - test_array_con>(); - test_array_con>(); - - static_assert(is_array_cons_avail_v, int, 2> == true, ""); - - static_assert(is_array_cons_avail_v, my_int, 2> == true, ""); - -#if !defined(TEST_COMPILER_CUDACC_BELOW_11_3) - // Constraint: rank consistency - static_assert(is_array_cons_avail_v, int, 2> == false, ""); - - // Constraint: convertibility - static_assert(is_array_cons_avail_v, my_int_non_convertible, 1> == false, ""); - - // Constraint: nonthrow-constructibility -# ifndef TEST_COMPILER_BROKEN_SMF_NOEXCEPT - static_assert(is_array_cons_avail_v, my_int_non_nothrow_constructible, 1> == false, ""); -# endif // TEST_COMPILER_BROKEN_SMF_NOEXCEPT -#endif // !defined(TEST_COMPILER_CUDACC_BELOW_11_3) - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/convertible_to_size_t.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/convertible_to_size_t.pass.cpp deleted file mode 100644 index d2467072f36..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/convertible_to_size_t.pass.cpp +++ /dev/null @@ -1,46 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include -#include - -__host__ __device__ void check(cuda::std::dextents e) -{ - static_assert(e.rank() == 2, ""); - static_assert(e.rank_dynamic() == 2, ""); - - assert(e.extent(0) == 2); - assert(e.extent(1) == 2); -} - -int main(int, char**) -{ - // TEST(TestExtentsCtorStdArrayConvertibleToSizeT, test_extents_ctor_std_array_convertible_to_size_t) - { - cuda::std::array i{2, 2}; - cuda::std::dextents e{i}; - - check(e); - } - - // TEST(TestExtentsCtorStdArrayConvertibleToSizeT, test_extents_ctor_std_span_convertible_to_size_t) - { - cuda::std::array i{2, 2}; - cuda::std::span s(i.data(), 2); - cuda::std::dextents e{s}; - - check(e); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/copy.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/copy.pass.cpp deleted file mode 100644 index 9f31e8ccc94..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/copy.pass.cpp +++ /dev/null @@ -1,61 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.extents.util/extents_util.hpp" - -// TYPED_TEST(TestExtents, copy_ctor) -template -__host__ __device__ void test_copy_con() -{ - using TestFixture = TestExtents; - TestFixture t; - - typename TestFixture::extents_type e{t.exts}; - assert(e == t.exts); -} - -template -struct is_copy_cons_avail : cuda::std::false_type -{}; - -template -struct is_copy_cons_avail()}), T1>::value>> - : cuda::std::true_type -{}; - -template -constexpr bool is_copy_cons_avail_v = is_copy_cons_avail::value; - -int main(int, char**) -{ - test_copy_con>(); - test_copy_con>(); - test_copy_con>(); - test_copy_con>(); - test_copy_con>(); - test_copy_con>(); - - static_assert(is_copy_cons_avail_v, cuda::std::extents> == true, ""); - - // Constraint: rank consistency - static_assert(is_copy_cons_avail_v, cuda::std::extents> == false, ""); - - // Constraint: extents consistency - static_assert(is_copy_cons_avail_v, cuda::std::extents> == false, ""); - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/default.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/default.pass.cpp deleted file mode 100644 index 22d250c2ad5..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/default.pass.cpp +++ /dev/null @@ -1,45 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.extents.util/extents_util.hpp" - -// TYPED_TEST(TestExtents, default_ctor) -template -__host__ __device__ void test_default_con() -{ - using TestFixture = TestExtents; - - auto e = typename TestFixture::extents_type(); - auto e2 = typename TestFixture::extents_type{}; - assert(e == e2); - - for (size_t r = 0; r < e.rank(); ++r) - { - bool is_dynamic = (e.static_extent(r) == cuda::std::dynamic_extent); - assert(e.extent(r) == (is_dynamic ? 0 : e.static_extent(r))); - } -} - -int main(int, char**) -{ - test_default_con>(); - test_default_con>(); - test_default_con>(); - test_default_con>(); - test_default_con>(); - test_default_con>(); - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/param_pack.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/param_pack.pass.cpp deleted file mode 100644 index cb1c26450f1..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/param_pack.pass.cpp +++ /dev/null @@ -1,91 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../my_int.hpp" - -__host__ __device__ void check(cuda::std::dextents e) -{ - static_assert(e.rank() == 2, ""); - static_assert(e.rank_dynamic() == 2, ""); - - assert(e.extent(0) == 2); - assert(e.extent(1) == 2); -} - -template -struct is_param_pack_cons_avail : cuda::std::false_type -{}; - -template -struct is_param_pack_cons_avail< - cuda::std::enable_if_t()...}), T>::value>, - T, - IndexTypes...> : cuda::std::true_type -{}; - -template -constexpr bool is_param_pack_cons_avail_v = is_param_pack_cons_avail::value; - -int main(int, char**) -{ - { - cuda::std::dextents e{2, 2}; - - check(e); - } - - { - cuda::std::dextents e(2, 2); - - check(e); - } - -#if defined(__cpp_deduction_guides) && defined(__MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) - { - cuda::std::extents e{2, 2}; - - check(e); - } - - { - cuda::std::extents e(2, 2); - - check(e); - } -#endif - - { - cuda::std::dextents e{2, 2}; - - check(e); - } - - static_assert(is_param_pack_cons_avail_v, int, int> == true, ""); - - static_assert(is_param_pack_cons_avail_v, my_int, my_int> == true, ""); - - // Constraint: rank consistency - static_assert(is_param_pack_cons_avail_v, int, int> == false, ""); - - // Constraint: convertibility - static_assert(is_param_pack_cons_avail_v, my_int_non_convertible> == false, ""); - - // Constraint: nonthrow-constructibility -#ifndef TEST_COMPILER_BROKEN_SMF_NOEXCEPT - static_assert(is_param_pack_cons_avail_v, my_int_non_nothrow_constructible> == false, ""); -#endif // TEST_COMPILER_BROKEN_SMF_NOEXCEPT - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/span.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/span.pass.cpp deleted file mode 100644 index fbb087f7184..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.cons/span.pass.cpp +++ /dev/null @@ -1,74 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.extents.util/extents_util.hpp" -#include "../my_int.hpp" - -// TYPED_TEST(TestExtents, array_ctor) -template -__host__ __device__ void test_span_con() -{ - using TestFixture = TestExtents; - TestFixture t; - - auto s = cuda::std::span(t.dyn_sizes); - auto e = typename TestFixture::extents_type(s); - assert(e == t.exts); -} - -template -struct is_span_cons_avail : cuda::std::false_type -{}; - -template -struct is_span_cons_avail< - T, - IndexType, - N, - cuda::std::enable_if_t>()}), T>::value>> - : cuda::std::true_type -{}; - -template -constexpr bool is_span_cons_avail_v = is_span_cons_avail::value; - -int main(int, char**) -{ - test_span_con>(); - test_span_con>(); - test_span_con>(); - test_span_con>(); - test_span_con>(); - test_span_con>(); - - static_assert(is_span_cons_avail_v, int, 2> == true, ""); - - static_assert(is_span_cons_avail_v, my_int, 2> == true, ""); - -#if !defined(TEST_COMPILER_CUDACC_BELOW_11_3) - // Constraint: rank consistency - static_assert(is_span_cons_avail_v, int, 2> == false, ""); - - // Constraint: convertibility - static_assert(is_span_cons_avail_v, my_int_non_convertible, 1> == false, ""); - - // Constraint: nonthrow-constructibility -# ifndef TEST_COMPILER_BROKEN_SMF_NOEXCEPT - static_assert(is_span_cons_avail_v, my_int_non_nothrow_constructible, 1> == false, ""); -# endif // TEST_COMPILER_BROKEN_SMF_NOEXCEPT -#endif // !defined(TEST_COMPILER_CUDACC_BELOW_11_3) - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/extent.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/extent.pass.cpp deleted file mode 100644 index fe56d16ce52..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/extent.pass.cpp +++ /dev/null @@ -1,66 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.extents.util/extents_util.hpp" - -template -struct TestExtentsExtent; -template -struct TestExtentsExtent : public TestExtents -{ - using base = TestExtents; - using extents_type = typename TestExtents::extents_type; - - __host__ __device__ void test_extent() - { - size_t result[extents_type::rank()]; - - extents_type _exts(DynamicSizes...); - for (size_t r = 0; r < _exts.rank(); r++) - { - result[r] = _exts.extent(r); - } - - int dyn_count = 0; - for (size_t r = 0; r != extents_type::rank(); r++) - { - bool is_dynamic = base::static_sizes[r] == cuda::std::dynamic_extent; - auto expected = is_dynamic ? base::dyn_sizes[dyn_count++] : base::static_sizes[r]; - - assert(result[r] == expected); - } - } -}; - -// TYPED_TEST(TestExtents, extent) -template -__host__ __device__ void test_extent() -{ - TestExtentsExtent test; - - test.test_extent(); -} - -int main(int, char**) -{ - test_extent>(); - test_extent>(); - test_extent>(); - test_extent>(); - test_extent>(); - test_extent>(); - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/rank.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/rank.pass.cpp deleted file mode 100644 index c6618239844..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/rank.pass.cpp +++ /dev/null @@ -1,65 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.extents.util/extents_util.hpp" -#include - -template -struct TestExtentsRank; -template -struct TestExtentsRank : public TestExtents -{ - using base = TestExtents; - using extents_type = typename TestExtents::extents_type; - - __host__ __device__ void test_rank() - { - size_t result[2]; - - extents_type _exts(DynamicSizes...); - // Silencing an unused warning in nvc++ the condition will never be true - size_t dyn_val = _exts.rank() > 0 ? static_cast(_exts.extent(0)) : 1; - result[0] = dyn_val > 1e9 ? dyn_val : _exts.rank(); - result[1] = _exts.rank_dynamic(); - - assert(result[0] == base::static_sizes.size()); - assert(result[1] == base::dyn_sizes.size()); - - // Makes sure that `rank()` returns a constexpr - cuda::std::array a; - unused(a); - } -}; - -// TYPED_TEST(TestExtents, rank) -template -__host__ __device__ void test_rank() -{ - TestExtentsRank test; - - test.test_rank(); -} - -int main(int, char**) -{ - test_rank>(); - test_rank>(); - test_rank>(); - test_rank>(); - test_rank>(); - test_rank>(); - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/static_extent.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/static_extent.pass.cpp deleted file mode 100644 index da17608b7f9..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.obs/static_extent.pass.cpp +++ /dev/null @@ -1,64 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.extents.util/extents_util.hpp" - -template -struct TestExtentsStaticExtent; -template -struct TestExtentsStaticExtent : public TestExtents -{ - using base = TestExtents; - using extents_type = typename TestExtents::extents_type; - - __host__ __device__ void test_static_extent() - { - size_t result[extents_type::rank()]; - - extents_type _exts(DynamicSizes...); - for (size_t r = 0; r < _exts.rank(); r++) - { - // Silencing an unused warning in nvc++ the condition will never be true - size_t dyn_val = static_cast(_exts.extent(r)); - result[r] = dyn_val > 1e9 ? dyn_val : _exts.static_extent(r); - } - - for (size_t r = 0; r != extents_type::rank(); r++) - { - assert(result[r] == base::static_sizes[r]); - } - } -}; - -// TYPED_TEST(TestExtents, static_extent) -template -__host__ __device__ void test_static_extent() -{ - TestExtentsStaticExtent test; - - test.test_static_extent(); -} - -int main(int, char**) -{ - test_static_extent>(); - test_static_extent>(); - test_static_extent>(); - test_static_extent>(); - test_static_extent>(); - test_static_extent>(); - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.overview/extents_element.fail.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.overview/extents_element.fail.cpp deleted file mode 100644 index dde72905b13..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.overview/extents_element.fail.cpp +++ /dev/null @@ -1,45 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -__host__ __device__ void check(cuda::std::dextents e) -{ - static_assert(e.rank() == 2, ""); - static_assert(e.rank_dynamic() == 2, ""); - - assert(e.extent(0) == 2); - assert(e.extent(1) == 2); -} - -struct dummy -{}; - -int main(int, char**) -{ - { - cuda::std::dextents e{2, 2}; - - check(e); - } - - // Mandate: each element of Extents is either equal to dynamic_extent, or is representable as a value of type - // IndexType - { - cuda::std::dextents e{dummy{}, 2}; - - check(e); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.overview/index_type.fail.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.overview/index_type.fail.cpp deleted file mode 100644 index 26dbde259f4..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.overview/index_type.fail.cpp +++ /dev/null @@ -1,44 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -__host__ __device__ void check(cuda::std::dextents e) -{ - static_assert(e.rank() == 2, ""); - static_assert(e.rank_dynamic() == 2, ""); - - assert(e.extent(0) == 2); - assert(e.extent(1) == 2); -} - -struct dummy -{}; - -int main(int, char**) -{ - { - cuda::std::dextents e{2, 2}; - - check(e); - } - - // Mandate: IndexType is a signed or unsigned integer type - { - cuda::std::dextents e{2, 2}; - - check(e); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.util/extents_util.hpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.util/extents_util.hpp deleted file mode 100644 index a77fe2714bb..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.extents.util/extents_util.hpp +++ /dev/null @@ -1,43 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -#include -#include - -#define TEST_TYPE \ - cuda::std::tuple, cuda::std::integer_sequence> - -template -struct TestExtents; -template -struct TestExtents< - cuda::std::tuple, cuda::std::integer_sequence>> -{ - using extents_type = cuda::std::extents; - // Double Braces here to make it work with GCC 5 - // Otherwise: "error: array must be initialized with a brace-enclosed initializer" - const cuda::std::array static_sizes{{Extents...}}; - const cuda::std::array dyn_sizes{{DynamicSizes...}}; - extents_type exts{DynamicSizes...}; -}; - -template -using _sizes = cuda::std::integer_sequence; -template -using _exts = cuda::std::extents; - -constexpr auto dyn = cuda::std::dynamic_extent; - -using extents_test_types = - cuda::std::tuple, _sizes<>>, - cuda::std::tuple<_exts, _sizes<10>>, - cuda::std::tuple<_exts<10, 3>, _sizes<>>, - cuda::std::tuple<_exts, _sizes<10>>, - cuda::std::tuple<_exts<10, dyn>, _sizes<3>>, - cuda::std::tuple<_exts, _sizes<10, 3>>>; diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/copy.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/copy.pass.cpp deleted file mode 100644 index 2f5ca9a6f56..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/copy.pass.cpp +++ /dev/null @@ -1,49 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - - { - cuda::std::layout_left::mapping> m0{cuda::std::dextents{16, 32}}; - cuda::std::layout_left::mapping> m{m0}; - - static_assert(m.is_exhaustive() == true, ""); - static_assert(m.extents().rank() == 2, ""); - static_assert(m.extents().rank_dynamic() == 2, ""); - - assert(m.extents().extent(0) == 16); - assert(m.extents().extent(1) == 32); - assert(m.stride(0) == 1); - assert(m.stride(1) == 16); - } - - // Constraint: is_constructible_v is true - { - using mapping0_t = cuda::std::layout_left::mapping>; - using mapping1_t = cuda::std::layout_left::mapping>; - using mappingd_t = cuda::std::layout_left::mapping>; - - static_assert(is_cons_avail_v == true, ""); - static_assert(is_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/layout_right_init.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/layout_right_init.pass.cpp deleted file mode 100644 index e16b201f4bf..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/layout_right_init.pass.cpp +++ /dev/null @@ -1,55 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - - { - cuda::std::layout_right::mapping> m_right{cuda::std::dextents{16}}; - cuda::std::layout_left ::mapping> m(m_right); - - static_assert(m.is_exhaustive() == true, ""); - - assert(m.extents().rank() == 1); - assert(m.extents().rank_dynamic() == 1); - assert(m.extents().extent(0) == 16); - assert(m.stride(0) == 1); - } - - // Constraint: extents_type::rank() <= 1 is true - { - using mapping0_t = cuda::std::layout_right::mapping>; - using mapping1_t = cuda::std::layout_left ::mapping>; - - static_assert(is_cons_avail_v == false, ""); - } - - // Constraint: is_constructible_v is true - { - using mapping0_t = cuda::std::layout_right::mapping>; - using mapping1_t = cuda::std::layout_left ::mapping>; - using mappingd_t = cuda::std::layout_left ::mapping>; - - static_assert(is_cons_avail_v == true, ""); - static_assert(is_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/layout_stride_init.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/layout_stride_init.pass.cpp deleted file mode 100644 index 0c620589f01..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/layout_stride_init.pass.cpp +++ /dev/null @@ -1,51 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - - { - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping> m_stride{ - cuda::std::dextents{16, 32}, a}; - cuda::std::layout_left ::mapping> m(m_stride); - - static_assert(m.is_exhaustive() == true, ""); - - assert(m.extents().rank() == 2); - assert(m.extents().rank_dynamic() == 2); - assert(m.extents().extent(0) == 16); - assert(m.extents().extent(1) == 32); - assert(m.stride(0) == 1); - assert(m.stride(1) == 16); - } - - // Constraint: is_constructible_v is true - { - using mapping0_t = cuda::std::layout_stride::mapping>; - using mapping1_t = cuda::std::layout_left ::mapping>; - using mappingd_t = cuda::std::layout_left::mapping>; - - static_assert(is_cons_avail_v == true, ""); - static_assert(is_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/list_init.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/list_init.pass.cpp deleted file mode 100644 index 7ad04d122b5..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.cons/list_init.pass.cpp +++ /dev/null @@ -1,70 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -template -using test_left_type = cuda::std::tuple, - cuda::std::integer_sequence>; - -__host__ __device__ void typed_test_default_ctor_left() -{ - typed_test_default_ctor>>(); - typed_test_default_ctor, 10>>(); - typed_test_default_ctor, 5>>(); - typed_test_default_ctor, 10>>(); - typed_test_default_ctor>>(); -} - -__host__ __device__ void typed_test_compatible_left() -{ - typed_test_compatible, _sizes<10>, _exts<10>, _sizes<>>>(); - typed_test_compatible, _sizes<5>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compatible, _sizes<5, 10>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compatible, _sizes<5, 10>, _exts, _sizes<5>>>(); - typed_test_compatible, _sizes<5, 10>, _exts<5, 10>, _sizes<>>>(); - typed_test_compatible, _sizes<>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compatible, _sizes<>, _exts, _sizes<5>>>(); - typed_test_compatible, _sizes<5, 10>, _exts<5, dyn, 15>, _sizes<10>>>(); - typed_test_compatible, _sizes<>, _exts<5, dyn, 15>, _sizes<10>>>(); - typed_test_compatible, _sizes<>, _exts, _sizes<5, 10, 15>>>(); -} - -int main(int, char**) -{ - typed_test_default_ctor_left(); - - typed_test_compatible_left(); - - // TEST(TestLayoutLeftListInitialization, test_layout_left_extent_initialization) - { - cuda::std::layout_left::mapping> m{cuda::std::dextents{16, 32}}; - - static_assert(m.is_exhaustive() == true, ""); - static_assert(m.extents().rank() == 2, ""); - static_assert(m.extents().rank_dynamic() == 2, ""); - - assert(m.extents().extent(0) == 16); - assert(m.extents().extent(1) == 32); - assert(m.stride(0) == 1); - assert(m.stride(1) == 16); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/compare.fail.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/compare.fail.cpp deleted file mode 100644 index e63e0093528..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/compare.fail.cpp +++ /dev/null @@ -1,34 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include - -int main(int, char**) -{ - using index_t = size_t; - using ext2d_t = cuda::std::extents; - using ext3d_t = cuda::std::extents; - - // Constraint: rank consistency - // This constraint is implemented in a different way in the reference implementation. There will be an overload - // function match but it will return false if the ranks are not consistent - { - constexpr ext2d_t e0; - constexpr ext3d_t e1; - constexpr cuda::std::layout_left::mapping m0{e0}; - constexpr cuda::std::layout_left::mapping m1{e1}; - - static_assert(m0 == m1, ""); // expected-error - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/compare.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/compare.pass.cpp deleted file mode 100644 index bc1da6482b6..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/compare.pass.cpp +++ /dev/null @@ -1,59 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -__host__ __device__ void typed_test_compare_left() -{ - typed_test_compare, _sizes<10>, _exts<10>, _sizes<>>>(); - typed_test_compare, _sizes<5>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compare, _sizes<5, 10>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compare, _sizes<5, 10>, _exts, _sizes<5>>>(); - typed_test_compare, _sizes<5, 10>, _exts<5, 10>, _sizes<>>>(); - typed_test_compare, _sizes<>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compare, _sizes<>, _exts, _sizes<5>>>(); - typed_test_compare, _sizes<5, 10>, _exts<5, dyn, 15>, _sizes<10>>>(); - typed_test_compare, _sizes<>, _exts<5, dyn, 15>, _sizes<10>>>(); - typed_test_compare, _sizes<>, _exts, _sizes<5, 10, 15>>>(); -} - -int main(int, char**) -{ - typed_test_compare_left(); - - using index_t = size_t; - using ext2d_t = cuda::std::extents; - - { - ext2d_t e{64, 128}; - cuda::std::layout_left::mapping m0{e}; - cuda::std::layout_left::mapping m{m0}; - - assert(m == m0); - } - - { - ext2d_t e0{64, 128}; - ext2d_t e1{16, 32}; - cuda::std::layout_left::mapping m0{e0}; - cuda::std::layout_left::mapping m1{e1}; - - assert(m0 != m1); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/extents.pass.cpp deleted file mode 100644 index d4c16efaa02..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/extents.pass.cpp +++ /dev/null @@ -1,49 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - using ext1d_t = cuda::std::extents; - using ext2d_t = cuda::std::extents; - - { - ext2d_t e{16, 32}; - cuda::std::layout_left::mapping m{e}; - - assert(m.extents() == e); - } - - { - ext1d_t e{16}; - cuda::std::layout_right::mapping m_right{e}; - cuda::std::layout_left ::mapping m{m_right}; - - assert(m.extents() == e); - } - - { - ext2d_t e{16, 32}; - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping m_stride{e, a}; - cuda::std::layout_left ::mapping m{m_stride}; - - assert(m.extents() == e); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_exhaustive.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_exhaustive.pass.cpp deleted file mode 100644 index 8290a38859b..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_exhaustive.pass.cpp +++ /dev/null @@ -1,38 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - - { - cuda::std::layout_left::mapping> m; - - static_assert(m.is_always_exhaustive() == true, ""); - assert(m.is_exhaustive() == true); - } - - { - cuda::std::extents e{16, 32}; - cuda::std::layout_left::mapping> m{e}; - - static_assert(m.is_always_exhaustive() == true, ""); - assert(m.is_exhaustive() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_strided.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_strided.pass.cpp deleted file mode 100644 index 9670f88558d..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_strided.pass.cpp +++ /dev/null @@ -1,29 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - cuda::std::extents e{64, 128}; - cuda::std::layout_left::mapping> m{e}; - - static_assert(m.is_always_strided() == true, ""); - assert(m.is_strided() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_unique.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_unique.pass.cpp deleted file mode 100644 index 484fc45500f..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/is_unique.pass.cpp +++ /dev/null @@ -1,38 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - - { - cuda::std::layout_left::mapping> m; - - static_assert(m.is_always_unique() == true, ""); - assert(m.is_unique() == true); - } - - { - cuda::std::extents e{16, 32}; - cuda::std::layout_left::mapping> m{e}; - - static_assert(m.is_always_unique() == true, ""); - assert(m.is_unique() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/paren_op.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/paren_op.pass.cpp deleted file mode 100644 index ab69990f3dd..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/paren_op.pass.cpp +++ /dev/null @@ -1,77 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" -#include "../my_int.hpp" -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - - { - cuda::std::extents e; - cuda::std::layout_left::mapping> m{e}; - - assert(m(5) == 5); - } - - { - cuda::std::extents e{16, 32}; - cuda::std::layout_left::mapping> m{e}; - - assert(m(2, 1) == 2 * 1 + 1 * 16); - } - - { - cuda::std::extents e{16, 32, 8}; - cuda::std::layout_left::mapping> m{e}; - - assert(m(2, 1, 3) == 2 * 1 + 1 * 16 + 3 * 16 * 32); - } - - // Indices are of a type implicitly convertible to index_type - { - cuda::std::extents e{16, 32}; - cuda::std::layout_left::mapping> m{e}; - - assert(m(my_int(2), my_int(1)) == 2 * 1 + 1 * 16); - } - - // Constraints - { - cuda::std::extents e; - cuda::std::layout_left::mapping> m{e}; - - unused(m); - - static_assert(is_paren_op_avail_v == true, ""); - - // rank consistency - static_assert(is_paren_op_avail_v == false, ""); - - // convertibility - static_assert(is_paren_op_avail_v == false, ""); - - // nothrow-constructibility -#ifndef TEST_COMPILER_BROKEN_SMF_NOEXCEPT - static_assert(is_paren_op_avail_v == false, ""); -#endif // TEST_COMPILER_BROKEN_SMF_NOEXCEPT - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/required_span_size.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/required_span_size.pass.cpp deleted file mode 100644 index a609f53d386..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/required_span_size.pass.cpp +++ /dev/null @@ -1,45 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - using ext2d_t = cuda::std::extents; - - { - cuda::std::extents e; - cuda::std::layout_left::mapping> m{e}; - - assert(m.required_span_size() == 16); - } - - { - ext2d_t e{16, 32}; - cuda::std::layout_left::mapping m{e}; - - assert(m.required_span_size() == 16 * 32); - } - - { - ext2d_t e{16, 0}; - cuda::std::layout_left::mapping m{e}; - - assert(m.required_span_size() == 0); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/stride.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/stride.pass.cpp deleted file mode 100644 index 0890a9c0880..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.left.obs/stride.pass.cpp +++ /dev/null @@ -1,56 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - using ext0d_t = cuda::std::extents; - using ext2d_t = cuda::std::extents; - - { - ext2d_t e{64, 128}; - cuda::std::layout_left::mapping m{e}; - - assert(m.stride(0) == 1); - assert(m.stride(1) == 64); - - static_assert(is_stride_avail_v == true, ""); - } - - { - ext2d_t e{1, 128}; - cuda::std::layout_left::mapping m{e}; - - assert(m.stride(0) == 1); - assert(m.stride(1) == 1); - } - - // constraint: extents_­type?::?rank() > 0 - { - ext0d_t e{}; - cuda::std::layout_left::mapping m{e}; - - unused(m); - - static_assert(is_stride_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/copy.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/copy.pass.cpp deleted file mode 100644 index 20261835f4c..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/copy.pass.cpp +++ /dev/null @@ -1,49 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - - { - cuda::std::layout_right::mapping> m0{cuda::std::dextents{16, 32}}; - cuda::std::layout_right::mapping> m{m0}; - - static_assert(m.is_exhaustive() == true, ""); - static_assert(m.extents().rank() == 2, ""); - static_assert(m.extents().rank_dynamic() == 2, ""); - - assert(m.extents().extent(0) == 16); - assert(m.extents().extent(1) == 32); - assert(m.stride(0) == 32); - assert(m.stride(1) == 1); - } - - // Constraint: is_constructible_v is true - { - using mapping0_t = cuda::std::layout_right::mapping>; - using mapping1_t = cuda::std::layout_right::mapping>; - using mappingd_t = cuda::std::layout_right::mapping>; - - static_assert(is_cons_avail_v == true, ""); - static_assert(is_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/layout_left_init.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/layout_left_init.pass.cpp deleted file mode 100644 index 2f917a6e708..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/layout_left_init.pass.cpp +++ /dev/null @@ -1,55 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - - { - cuda::std::layout_left ::mapping> m_right{cuda::std::dextents{16}}; - cuda::std::layout_right::mapping> m(m_right); - - static_assert(m.is_exhaustive() == true, ""); - - assert(m.extents().rank() == 1); - assert(m.extents().rank_dynamic() == 1); - assert(m.extents().extent(0) == 16); - assert(m.stride(0) == 1); - } - - // Constraint: extents_type::rank() <= 1 is true - { - using mapping0_t = cuda::std::layout_left::mapping>; - using mapping1_t = cuda::std::layout_right::mapping>; - - static_assert(is_cons_avail_v == false, ""); - } - - // Constraint: is_constructible_v is true - { - using mapping0_t = cuda::std::layout_left::mapping>; - using mapping1_t = cuda::std::layout_right::mapping>; - using mappingd_t = cuda::std::layout_right::mapping>; - - static_assert(is_cons_avail_v == true, ""); - static_assert(is_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/layout_stride_init.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/layout_stride_init.pass.cpp deleted file mode 100644 index c7c767bff38..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/layout_stride_init.pass.cpp +++ /dev/null @@ -1,51 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - - { - cuda::std::array a{32, 1}; - cuda::std::layout_stride::mapping> m_stride{ - cuda::std::dextents{16, 32}, a}; - cuda::std::layout_right ::mapping> m(m_stride); - - static_assert(m.is_exhaustive() == true, ""); - - assert(m.extents().rank() == 2); - assert(m.extents().rank_dynamic() == 2); - assert(m.extents().extent(0) == 16); - assert(m.extents().extent(1) == 32); - assert(m.stride(0) == 32); - assert(m.stride(1) == 1); - } - - // Constraint: is_constructible_v is true - { - using mapping0_t = cuda::std::layout_stride::mapping>; - using mapping1_t = cuda::std::layout_right ::mapping>; - using mappingd_t = cuda::std::layout_right ::mapping>; - - static_assert(is_cons_avail_v == true, ""); - static_assert(is_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/list_init.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/list_init.pass.cpp deleted file mode 100644 index 08fbd4093f2..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.cons/list_init.pass.cpp +++ /dev/null @@ -1,70 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -template -using test_right_type = cuda::std::tuple, - cuda::std::integer_sequence>; - -__host__ __device__ void typed_test_default_ctor_right() -{ - typed_test_default_ctor>>(); - typed_test_default_ctor, 10>>(); - typed_test_default_ctor, 5>>(); - typed_test_default_ctor, 10>>(); - typed_test_default_ctor>>(); -} - -__host__ __device__ void typed_test_compatible_right() -{ - typed_test_compatible, _sizes<10>, _exts<10>, _sizes<>>>(); - typed_test_compatible, _sizes<5>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compatible, _sizes<5, 10>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compatible, _sizes<5, 10>, _exts, _sizes<5>>>(); - typed_test_compatible, _sizes<5, 10>, _exts<5, 10>, _sizes<>>>(); - typed_test_compatible, _sizes<>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compatible, _sizes<>, _exts, _sizes<5>>>(); - typed_test_compatible, _sizes<5, 10>, _exts<5, dyn, 15>, _sizes<10>>>(); - typed_test_compatible, _sizes<>, _exts<5, dyn, 15>, _sizes<10>>>(); - typed_test_compatible, _sizes<>, _exts, _sizes<5, 10, 15>>>(); -} - -int main(int, char**) -{ - typed_test_default_ctor_right(); - - typed_test_compatible_right(); - - // TEST(TestLayoutRightListInitialization, test_layout_right_extent_initialization) - { - cuda::std::layout_right::mapping> m{cuda::std::dextents{16, 32}}; - - static_assert(m.is_exhaustive() == true, ""); - static_assert(m.extents().rank() == 2, ""); - static_assert(m.extents().rank_dynamic() == 2, ""); - - assert(m.extents().extent(0) == 16); - assert(m.extents().extent(1) == 32); - assert(m.stride(0) == 32); - assert(m.stride(1) == 1); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/compare.fail.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/compare.fail.cpp deleted file mode 100644 index bd7146a53a3..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/compare.fail.cpp +++ /dev/null @@ -1,34 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include - -int main(int, char**) -{ - using index_t = size_t; - using ext2d_t = cuda::std::extents; - using ext3d_t = cuda::std::extents; - - // Constraint: rank consistency - // This constraint is implemented in a different way in the reference implementation. There will be an overload - // function match but it will return false if the ranks are not consistent - { - constexpr ext2d_t e0; - constexpr ext3d_t e1; - constexpr cuda::std::layout_right::mapping m0{e0}; - constexpr cuda::std::layout_right::mapping m1{e1}; - - static_assert(m0 == m1, ""); // expected-error - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/compare.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/compare.pass.cpp deleted file mode 100644 index efc4aafa9cb..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/compare.pass.cpp +++ /dev/null @@ -1,59 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -__host__ __device__ void typed_test_compare_right() -{ - typed_test_compare, _sizes<10>, _exts<10>, _sizes<>>>(); - typed_test_compare, _sizes<5>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compare, _sizes<5, 10>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compare, _sizes<5, 10>, _exts, _sizes<5>>>(); - typed_test_compare, _sizes<5, 10>, _exts<5, 10>, _sizes<>>>(); - typed_test_compare, _sizes<>, _exts<5, dyn>, _sizes<10>>>(); - typed_test_compare, _sizes<>, _exts, _sizes<5>>>(); - typed_test_compare, _sizes<5, 10>, _exts<5, dyn, 15>, _sizes<10>>>(); - typed_test_compare, _sizes<>, _exts<5, dyn, 15>, _sizes<10>>>(); - typed_test_compare, _sizes<>, _exts, _sizes<5, 10, 15>>>(); -} - -int main(int, char**) -{ - typed_test_compare_right(); - - using index_t = size_t; - using ext2d_t = cuda::std::extents; - - { - ext2d_t e{64, 128}; - cuda::std::layout_right::mapping m0{e}; - cuda::std::layout_right::mapping m{m0}; - - assert(m == m0); - } - - { - ext2d_t e0{64, 128}; - ext2d_t e1{16, 32}; - cuda::std::layout_right::mapping m0{e0}; - cuda::std::layout_right::mapping m1{e1}; - - assert(m0 != m1); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/extents.pass.cpp deleted file mode 100644 index dca60133489..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/extents.pass.cpp +++ /dev/null @@ -1,49 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - using ext1d_t = cuda::std::extents; - using ext2d_t = cuda::std::extents; - - { - ext2d_t e{16, 32}; - cuda::std::layout_right::mapping m{e}; - - assert(m.extents() == e); - } - - { - ext1d_t e{16}; - cuda::std::layout_left ::mapping m_left{e}; - cuda::std::layout_right::mapping m(m_left); - - assert(m.extents() == e); - } - - { - ext2d_t e{16, 32}; - cuda::std::array a{32, 1}; - cuda::std::layout_stride::mapping m_stride{e, a}; - cuda::std::layout_right ::mapping m(m_stride); - - assert(m.extents() == e); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_exhaustive.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_exhaustive.pass.cpp deleted file mode 100644 index dc5ff7e84c7..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_exhaustive.pass.cpp +++ /dev/null @@ -1,38 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - - { - cuda::std::layout_right::mapping> m; - - static_assert(m.is_always_exhaustive() == true, ""); - assert(m.is_exhaustive() == true); - } - - { - cuda::std::extents e{16, 32}; - cuda::std::layout_right::mapping> m{e}; - - static_assert(m.is_always_exhaustive() == true, ""); - assert(m.is_exhaustive() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_strided.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_strided.pass.cpp deleted file mode 100644 index 8bd5ee2b8e0..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_strided.pass.cpp +++ /dev/null @@ -1,29 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - cuda::std::extents e{64, 128}; - cuda::std::layout_right::mapping> m{e}; - - static_assert(m.is_always_strided() == true, ""); - assert(m.is_strided() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_unique.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_unique.pass.cpp deleted file mode 100644 index 7676721a433..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/is_unique.pass.cpp +++ /dev/null @@ -1,38 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - - { - cuda::std::layout_right::mapping> m; - - static_assert(m.is_always_unique() == true, ""); - assert(m.is_unique() == true); - } - - { - cuda::std::extents e{16, 32}; - cuda::std::layout_right::mapping> m{e}; - - static_assert(m.is_always_unique() == true, ""); - assert(m.is_unique() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/paren_op.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/paren_op.pass.cpp deleted file mode 100644 index 0d026d3cc9a..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/paren_op.pass.cpp +++ /dev/null @@ -1,77 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" -#include "../my_int.hpp" -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - - { - cuda::std::extents e; - cuda::std::layout_right::mapping> m{e}; - - assert(m(5) == 5); - } - - { - cuda::std::extents e{16, 32}; - cuda::std::layout_right::mapping> m{e}; - - assert(m(2, 1) == 2 * 32 + 1 * 1); - } - - { - cuda::std::extents e{16, 32, 8}; - cuda::std::layout_right::mapping> m{e}; - - assert(m(2, 1, 3) == 2 * 32 * 8 + 1 * 8 + 3 * 1); - } - - // Indices are of a type implicitly convertible to index_type - { - cuda::std::extents e{16, 32}; - cuda::std::layout_right::mapping> m{e}; - - assert(m(my_int(2), my_int(1)) == 2 * 32 + 1 * 1); - } - - // Constraints - { - cuda::std::extents e; - cuda::std::layout_right::mapping> m{e}; - - unused(m); - - static_assert(is_paren_op_avail_v == true, ""); - - // rank consistency - static_assert(is_paren_op_avail_v == false, ""); - - // convertibility - static_assert(is_paren_op_avail_v == false, ""); - - // nothrow-constructibility -#ifndef TEST_COMPILER_BROKEN_SMF_NOEXCEPT - static_assert(is_paren_op_avail_v == false, ""); -#endif // TEST_COMPILER_BROKEN_SMF_NOEXCEPT - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/required_span_size.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/required_span_size.pass.cpp deleted file mode 100644 index 35c1a07e8b7..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/required_span_size.pass.cpp +++ /dev/null @@ -1,45 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - using ext2d_t = cuda::std::extents; - - { - cuda::std::extents e; - cuda::std::layout_right::mapping> m{e}; - - assert(m.required_span_size() == 16); - } - - { - ext2d_t e{16, 32}; - cuda::std::layout_right::mapping m{e}; - - assert(m.required_span_size() == 16 * 32); - } - - { - ext2d_t e{16, 0}; - cuda::std::layout_right::mapping m{e}; - - assert(m.required_span_size() == 0); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/stride.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/stride.pass.cpp deleted file mode 100644 index de77b14c05b..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.right.obs/stride.pass.cpp +++ /dev/null @@ -1,56 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - using ext0d_t = cuda::std::extents; - using ext2d_t = cuda::std::extents; - - { - ext2d_t e{64, 128}; - cuda::std::layout_right::mapping m{e}; - - assert(m.stride(0) == 128); - assert(m.stride(1) == 1); - - static_assert(is_stride_avail_v == true, ""); - } - - { - ext2d_t e{64, 1}; - cuda::std::layout_right::mapping m{e}; - - assert(m.stride(0) == 1); - assert(m.stride(1) == 1); - } - - // constraint: extents_type::rank() > 0 - { - ext0d_t e{}; - cuda::std::layout_right::mapping m{e}; - - unused(m); - - static_assert(is_stride_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.cons/list_init.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.cons/list_init.pass.cpp deleted file mode 100644 index d9b7ae156ca..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.cons/list_init.pass.cpp +++ /dev/null @@ -1,61 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include -#include - -#define CHECK_MAPPING(m) \ - assert(m.is_exhaustive() == false); \ - assert(m.extents().rank() == 2); \ - assert(m.extents().rank_dynamic() == 2); \ - assert(m.extents().extent(0) == 16); \ - assert(m.extents().extent(1) == 32); \ - assert(m.stride(0) == 1); \ - assert(m.stride(1) == 128); \ - assert(m.strides()[0] == 1); \ - assert(m.strides()[1] == 128) - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - // From a span - { - cuda::std::array a{1, 128}; - cuda::std::span s(a.data(), 2); - cuda::std::layout_stride::mapping> m{cuda::std::dextents{16, 32}, s}; - - CHECK_MAPPING(m); - } - - // TEST(TestLayoutStrideListInitialization, test_list_initialization) - { - cuda::std::layout_stride::mapping> m{ - cuda::std::dextents{16, 32}, cuda::std::array{1, 128}}; - - CHECK_MAPPING(m); - } - - // From another mapping - { - typedef size_t index_t; - - cuda::std::layout_stride::mapping> m0{ - cuda::std::dextents{16, 32}, cuda::std::array{1, 128}}; - cuda::std::layout_stride::mapping> m{m0}; - - CHECK_MAPPING(m); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/compare.fail.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/compare.fail.cpp deleted file mode 100644 index a4ac562c207..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/compare.fail.cpp +++ /dev/null @@ -1,37 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - using ext2d_t = cuda::std::extents; - using ext3d_t = cuda::std::extents; - - // Constraint: rank consistency - { - constexpr ext2d_t e0; - constexpr ext3d_t e1; - constexpr cuda::std::array a0{1, 64}; - constexpr cuda::std::array a1{1, 64, 64 * 128}; - constexpr cuda::std::layout_stride::mapping m0{e0, a0}; - constexpr cuda::std::layout_stride::mapping m1{e1, a1}; - - static_assert(m0 == m1, ""); // expected-error - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/compare.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/compare.pass.cpp deleted file mode 100644 index bd427cedb9a..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/compare.pass.cpp +++ /dev/null @@ -1,84 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - using ext2d_t = cuda::std::extents; - - { - cuda::std::extents e; - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping m0{e, a}; - cuda::std::layout_stride::mapping m{m0}; - - assert(m0 == m); - } - - { - using index2_t = int32_t; - - cuda::std::extents e; - cuda::std::array a{1, 16}; - cuda::std::extents e2; - cuda::std::array a2{1, 16}; - cuda::std::layout_stride::mapping m1{e, a}; - cuda::std::layout_stride::mapping m2{e2, a2}; - - assert(m1 == m2); - } - - { - cuda::std::extents e; - cuda::std::array a0{1, 16}; - cuda::std::array a1{1, 32}; - cuda::std::layout_stride::mapping m0{e, a0}; - cuda::std::layout_stride::mapping m1{e, a1}; - - assert(m0 != m1); - } - - { - cuda::std::extents e; - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping m{e, a}; - cuda::std::layout_left ::mapping m_left{e}; - - assert(m == m_left); - } - - { - cuda::std::extents e; - cuda::std::array a{32, 1}; - cuda::std::layout_stride::mapping m{e, a}; - cuda::std::layout_right ::mapping m_right{e}; - - assert(m == m_right); - } - - { - cuda::std::extents e0; - cuda::std::extents e1; - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping m0{e0, a}; - cuda::std::layout_stride::mapping m1{e1, a}; - - assert(m0 != m1); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/extents.pass.cpp deleted file mode 100644 index 41c6edda81c..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/extents.pass.cpp +++ /dev/null @@ -1,32 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - using ext2d_t = cuda::std::extents; - - { - ext2d_t e{16, 32}; - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping m{e, a}; - - assert(m.extents() == e); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_exhaustive.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_exhaustive.pass.cpp deleted file mode 100644 index 2bc6e4f8c90..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_exhaustive.pass.cpp +++ /dev/null @@ -1,77 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - - { - cuda::std::extents e; - cuda::std::array a{1}; - cuda::std::layout_stride::mapping> m{e, a}; - - static_assert(m.is_always_exhaustive() == false, ""); - assert(m.is_exhaustive() == true); - } - - { - cuda::std::extents e; - cuda::std::array a{2}; - cuda::std::layout_stride::mapping> m{e, a}; - - static_assert(m.is_always_exhaustive() == false, ""); - assert(m.is_exhaustive() == false); - } - - { - cuda::std::extents e; - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping> m{e, a}; - - static_assert(m.is_always_exhaustive() == false, ""); - assert(m.is_exhaustive() == true); - } - - { - cuda::std::extents e{16, 32}; - cuda::std::array a{1, 128}; - cuda::std::layout_stride::mapping> m{e, a}; - - static_assert(m.is_always_exhaustive() == false, ""); - assert(m.is_exhaustive() == false); - } - - { - cuda::std::extents e{16, 32, 4}; - cuda::std::array a{1, 16 * 4, 16}; - cuda::std::layout_stride::mapping> m{e, a}; - - static_assert(m.is_always_exhaustive() == false, ""); - assert(m.is_exhaustive() == true); - } - - { - cuda::std::extents e{16, 32, 4}; - cuda::std::array a{1, 16 * 4 + 1, 16}; - cuda::std::layout_stride::mapping> m{e, a}; - - static_assert(m.is_always_exhaustive() == false, ""); - assert(m.is_exhaustive() == false); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_strided.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_strided.pass.cpp deleted file mode 100644 index 93c85a07a50..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_strided.pass.cpp +++ /dev/null @@ -1,29 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -int main(int, char**) -{ - { - using dexts = cuda::std::dextents; - cuda::std::array a{1, 128}; - - cuda::std::layout_stride::mapping m{dexts{16, 32}, a}; - - static_assert(m.is_always_strided() == true, ""); - assert(m.is_strided() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_unique.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_unique.pass.cpp deleted file mode 100644 index ad663c45cec..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/is_unique.pass.cpp +++ /dev/null @@ -1,41 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - - { - cuda::std::extents e; - cuda::std::array a{1}; - cuda::std::layout_stride::mapping> m{e, a}; - - static_assert(m.is_always_unique() == true, ""); - assert(m.is_unique() == true); - } - - { - cuda::std::extents e{16, 32}; - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping> m{e, a}; - - static_assert(m.is_always_unique() == true, ""); - assert(m.is_unique() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/paren_op.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/paren_op.pass.cpp deleted file mode 100644 index ac933c0c109..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/paren_op.pass.cpp +++ /dev/null @@ -1,87 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" -#include "../my_int.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - - { - cuda::std::extents e; - cuda::std::array a{1}; - cuda::std::layout_stride::mapping> m{e, a}; - - assert(m(8) == 8); - } - - { - cuda::std::extents e{16, 32}; - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping> m{e, a}; - - assert(m(8, 16) == 8 * 1 + 16 * 16); - } - - { - cuda::std::extents e{32}; - cuda::std::array a{1, 24}; - cuda::std::layout_stride::mapping> m{e, a}; - - assert(m(8, 16) == 8 * 1 + 16 * 24); - } - - { - cuda::std::extents e{32}; - cuda::std::array a{48, 1}; - cuda::std::layout_stride::mapping> m{e, a}; - - assert(m(8, 16) == 8 * 48 + 16 * 1); - } - - // Indices are of a type implicitly convertible to index_type - { - cuda::std::extents e{16, 32}; - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping> m{e, a}; - - assert(m(my_int(8), my_int(16)) == 8 * 1 + 16 * 16); - } - - // Constraints - { - cuda::std::extents e; - cuda::std::array a{1}; - cuda::std::layout_stride::mapping> m{e, a}; - - static_assert(is_paren_op_avail_v == true, ""); - - // rank consistency - static_assert(is_paren_op_avail_v == false, ""); - - // convertibility - static_assert(is_paren_op_avail_v == false, ""); - - // nothrow-constructibility -#ifndef TEST_COMPILER_BROKEN_SMF_NOEXCEPT - static_assert(is_paren_op_avail_v == false, ""); -#endif // TEST_COMPILER_BROKEN_SMF_NOEXCEPT - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/required_span_size.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/required_span_size.pass.cpp deleted file mode 100644 index a2ab8601eeb..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/required_span_size.pass.cpp +++ /dev/null @@ -1,64 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = int; - using ext2d_t = cuda::std::extents; - - { - cuda::std::extents e; - cuda::std::array a{1}; - cuda::std::layout_stride::mapping> m{e, a}; - - assert(m.required_span_size() == 16); - } - - { - ext2d_t e{16, 32}; - cuda::std::array a{1, 16}; - cuda::std::layout_stride::mapping m{e, a}; - - assert(m.required_span_size() == 16 * 32); - } - - { - ext2d_t e{16, 0}; - cuda::std::array a{1, 1}; - cuda::std::layout_stride::mapping m{e, a}; - - assert(m.required_span_size() == 0); - } - - { - cuda::std::extents e{32}; - cuda::std::array a{1, 24}; - cuda::std::layout_stride::mapping m{e, a}; - - assert(m.required_span_size() == 32 * 24 - (24 - 16)); - } - - { - cuda::std::extents e{32}; - cuda::std::array a{48, 1}; - cuda::std::layout_stride::mapping m{e, a}; - - assert(m.required_span_size() == 16 * 48 - (48 - 32)); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/stride.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/stride.pass.cpp deleted file mode 100644 index 32d71626a82..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/stride.pass.cpp +++ /dev/null @@ -1,69 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.layout.util/layout_util.hpp" -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - using ext0d_t = cuda::std::extents; - using ext2d_t = cuda::std::extents; - - auto e = cuda::std::dextents{16, 32}; - auto s_arr = cuda::std::array{1, 128}; - - // From a span - { - cuda::std::span s(s_arr.data(), 2); - cuda::std::layout_stride::mapping m{e, s}; - - assert(m.stride(0) == 1); - assert(m.stride(1) == 128); - - static_assert(is_stride_avail_v == true, ""); - } - - // From an array - { - cuda::std::layout_stride::mapping m{e, s_arr}; - - assert(m.stride(0) == 1); - assert(m.stride(1) == 128); - } - - // From another mapping - { - cuda::std::layout_stride::mapping m0{e, s_arr}; - cuda::std::layout_stride::mapping m{m0}; - - assert(m.stride(0) == 1); - assert(m.stride(1) == 128); - } - - // constraint: extents_­type?::?rank() > 0 - { - ext0d_t e0d{}; - cuda::std::layout_stride::mapping m{e0d, cuda::std::array{}}; - - unused(m); - - static_assert(is_stride_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/strides.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/strides.pass.cpp deleted file mode 100644 index e7b9f28da92..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.stride.obs/strides.pass.cpp +++ /dev/null @@ -1,53 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - using index_t = size_t; - using ext2d_t = cuda::std::extents; - - auto e = cuda::std::dextents{16, 32}; - auto s_arr = cuda::std::array{1, 128}; - - // From a span - { - cuda::std::span s(s_arr.data(), 2); - cuda::std::layout_stride::mapping m{e, s}; - - assert(m.strides()[0] == 1); - assert(m.strides()[1] == 128); - } - - // From an array - { - cuda::std::layout_stride::mapping m{e, s_arr}; - - assert(m.strides()[0] == 1); - assert(m.strides()[1] == 128); - } - - // From another mapping - { - cuda::std::layout_stride::mapping m0{e, s_arr}; - cuda::std::layout_stride::mapping m{m0}; - - assert(m.strides()[0] == 1); - assert(m.strides()[1] == 128); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.util/layout_util.hpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.util/layout_util.hpp deleted file mode 100644 index ca93f1a3131..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.layout.util/layout_util.hpp +++ /dev/null @@ -1,166 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -#include - -template -struct TestLayoutCtors; -template -struct TestLayoutCtors>> -{ - using mapping_type = Mapping; - using extents_type = typename mapping_type::extents_type; - Mapping map = {extents_type{DynamicSizes...}}; -}; - -template -__host__ __device__ void typed_test_default_ctor() -// TYPED_TEST( TestLayoutCtors, default_ctor ) -{ - // Default constructor ensures extents() == Extents() is true. - using TestFixture = TestLayoutCtors; - auto m = typename TestFixture::mapping_type(); - assert(m.extents() == typename TestFixture::extents_type()); - auto m2 = typename TestFixture::mapping_type{}; - assert(m2.extents() == typename TestFixture::extents_type{}); - assert(m == m2); -} - -template -struct TestLayoutCompatCtors; -template -struct TestLayoutCompatCtors, - Mapping2, - cuda::std::integer_sequence>> -{ - using mapping_type1 = Mapping; - using mapping_type2 = Mapping2; - using extents_type1 = cuda::std::remove_reference_t().extents())>; - using extents_type2 = cuda::std::remove_reference_t().extents())>; - Mapping map1 = {extents_type1{DynamicSizes...}}; - Mapping2 map2 = {extents_type2{DynamicSizes2...}}; -}; - -template -__host__ __device__ void typed_test_compatible() -// TYPED_TEST(TestLayout{Left|Right}CompatCtors, compatible_construct_{1|2}) { -// TYPED_TEST(TestLayout{Left|Right}CompatCtors, compatible_assign_{1|2}) { -{ - using TestFixture = TestLayoutCompatCtors; - - // Construct - { - TestFixture t; - - auto m1 = typename TestFixture::mapping_type1(t.map2); - assert(m1.extents() == t.map2.extents()); - - auto m2 = typename TestFixture::mapping_type2(t.map1); - assert(m2.extents() == t.map1.extents()); - } - - // Assign - { - TestFixture t; - -#if __MDSPAN_HAS_CXX_17 - if constexpr (cuda::std::is_convertible::value) - { - t.map1 = t.map2; - } - else - { - t.map1 = typename TestFixture::mapping_type1(t.map2); - } -#else - t.map1 = typename TestFixture::mapping_type1(t.map2); -#endif - - assert(t.map1.extents() == t.map2.extents()); - } -} - -template -__host__ __device__ void typed_test_compare() -{ - using TestFixture = TestLayoutCompatCtors; - - { - TestFixture t; - - auto m1 = typename TestFixture::mapping_type1(t.map2); - assert(m1 == t.map2); - - auto m2 = typename TestFixture::mapping_type2(t.map1); - assert(m2 == t.map1); - } -} - -template -using _sizes = cuda::std::integer_sequence; -template -using _exts = cuda::std::extents; - -template -using test_left_type_pair = - cuda::std::tuple, - S1, - typename cuda::std::layout_left::template mapping, - S2>; - -template -using test_right_type_pair = - cuda::std::tuple, - S1, - typename cuda::std::layout_right::template mapping, - S2>; - -template -struct is_cons_avail : cuda::std::false_type -{}; - -template -struct is_cons_avail()}), T1>::value>> - : cuda::std::true_type -{}; - -template -constexpr bool is_cons_avail_v = is_cons_avail::value; - -template -struct is_paren_op_avail : cuda::std::false_type -{}; - -template -struct is_paren_op_avail< - cuda::std::enable_if_t()(cuda::std::declval()...)), - typename T::index_type>::value>, - T, - Indices...> : cuda::std::true_type -{}; - -template -constexpr bool is_paren_op_avail_v = is_paren_op_avail::value; - -template -struct is_stride_avail : cuda::std::false_type -{}; - -template -struct is_stride_avail< - T, - RankType, - cuda::std::enable_if_t().stride(cuda::std::declval())), - typename T::index_type>::value>> : cuda::std::true_type -{}; - -template -constexpr bool is_stride_avail_v = is_stride_avail::value; diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/array_init_extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/array_init_extents.pass.cpp deleted file mode 100644 index 3644ec42ff1..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/array_init_extents.pass.cpp +++ /dev/null @@ -1,107 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.mdspan.util/mdspan_util.hpp" -#include "../my_accessor.hpp" -#include "../my_int.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -template -struct is_array_cons_avail : cuda::std::false_type -{}; - -template -struct is_array_cons_avail< - T, - DataHandleT, - SizeType, - N, - cuda::std::enable_if_t(), cuda::std::declval>()}), - T>::value>> : cuda::std::true_type -{}; - -template -constexpr bool is_array_cons_avail_v = is_array_cons_avail::value; - -int main(int, char**) -{ - // extents from cuda::std::array - { - cuda::std::array d{42}; - cuda::std::mdspan> m{d.data(), cuda::std::array{64, 128}}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - // data from cptr, extents from cuda::std::array - { - using mdspan_t = cuda::std::mdspan>; - - cuda::std::array d{42}; - const int* const ptr = d.data(); - - static_assert(is_array_cons_avail_v == true, ""); - - mdspan_t m{ptr, cuda::std::array{64, 128}}; - - static_assert(cuda::std::is_same::value, ""); - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - // Constraint: (is_convertible_v && ...) is true - { - using mdspan_t = cuda::std::mdspan>; - using other_index_t = my_int_non_convertible; - - static_assert(is_array_cons_avail_v == false, ""); - } - - // Constraint: (is_nothrow_constructible && ...) is true -#ifndef TEST_COMPILER_BROKEN_SMF_NOEXCEPT - { - using mdspan_t = cuda::std::mdspan>; - using other_index_t = my_int_non_nothrow_constructible; - - static_assert(is_array_cons_avail_v == false, ""); - } -#endif // TEST_COMPILER_BROKEN_SMF_NOEXCEPT - - // Constraint: N == rank() || N == rank_dynamic() is true - { - using mdspan_t = cuda::std::mdspan>; - - static_assert(is_array_cons_avail_v == false, ""); - } - - // Constraint: is_constructible_v is true - { - using mdspan_t = cuda::std::mdspan, cuda::std::layout_stride>; - - static_assert(is_array_cons_avail_v == false, ""); - } - - // Constraint: is_default_constructible_v is true - { - using mdspan_t = - cuda::std::mdspan, cuda::std::layout_right, Foo::my_accessor>; - - static_assert(is_array_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/copy.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/copy.pass.cpp deleted file mode 100644 index 46ed00f5f49..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/copy.pass.cpp +++ /dev/null @@ -1,78 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.mdspan.util/mdspan_util.hpp" -#include "../my_accessor.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -template -struct is_copy_cons_avail : cuda::std::false_type -{}; - -template -struct is_copy_cons_avail()}), T1>::value>> - : cuda::std::true_type -{}; - -template -constexpr bool is_copy_cons_avail_v = is_copy_cons_avail::value; - -int main(int, char**) -{ - // copy constructor - { - using ext_t = cuda::std::extents; - using mdspan_t = cuda::std::mdspan; - - static_assert(is_copy_cons_avail_v == true, ""); - - cuda::std::array d{42}; - mdspan_t m0{d.data(), ext_t{64, 128}}; - mdspan_t m{m0}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - // copy constructor with conversion - { - cuda::std::array d{42}; - cuda::std::mdspan> m0{d.data(), cuda::std::extents{}}; - cuda::std::mdspan> m{m0}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - // Constraint: is_constructible_v&> is true - { - using mdspan1_t = cuda::std::mdspan, cuda::std::layout_left>; - using mdspan0_t = cuda::std::mdspan, cuda::std::layout_right>; - - static_assert(is_copy_cons_avail_v == false, ""); - } - - // Constraint: is_constructible_v is true - { - using mdspan1_t = - cuda::std::mdspan, cuda::std::layout_right, Foo::my_accessor>; - using mdspan0_t = cuda::std::mdspan, cuda::std::layout_right>; - - static_assert(is_copy_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_c_array.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_c_array.pass.cpp deleted file mode 100644 index 930a772b596..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_c_array.pass.cpp +++ /dev/null @@ -1,51 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -// No CTAD in C++14 or earlier -// UNSUPPORTED: c++14 - -#include -#include - -int main(int, char**) -{ -#ifdef __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION - // TEST(TestMdspanCTAD, ctad_carray) - { - int data[5] = {1, 2, 3, 4, 5}; - cuda::std::mdspan m(data); - - static_assert(cuda::std::is_same::value == true, ""); - static_assert(m.is_exhaustive() == true, ""); - - assert(m.data_handle() == &data[0]); - assert(m.rank() == 1); - assert(m.rank_dynamic() == 0); - assert(m.static_extent(0) == 5); - assert(m.extent(0) == 5); - assert(__MDSPAN_OP(m, 2) == 3); - - cuda::std::mdspan m2(data, 3); - - static_assert(cuda::std::is_same::value == true, ""); - static_assert(m2.is_exhaustive() == true, ""); - - assert(m2.data_handle() == &data[0]); - assert(m2.rank() == 1); - assert(m2.rank_dynamic() == 1); - assert(m2.extent(0) == 3); - assert(__MDSPAN_OP(m2, 2) == 3); - } -#endif - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_const_c_array.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_const_c_array.pass.cpp deleted file mode 100644 index 7957a3cdd39..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_const_c_array.pass.cpp +++ /dev/null @@ -1,40 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -// No CTAD in C++14 or earlier -// UNSUPPORTED: c++14 - -#include -#include - -int main(int, char**) -{ -#ifdef __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION - // TEST(TestMdspanCTAD, ctad_const_carray) - { - const int data[5] = {1, 2, 3, 4, 5}; - cuda::std::mdspan m(data); - - static_assert(cuda::std::is_same::value == true, ""); - static_assert(m.is_exhaustive() == true, ""); - - assert(m.data_handle() == &data[0]); - assert(m.rank() == 1); - assert(m.rank_dynamic() == 0); - assert(m.static_extent(0) == 5); - assert(m.extent(0) == 5); - assert(__MDSPAN_OP(m, 2) == 3); - } -#endif - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_copy.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_copy.pass.cpp deleted file mode 100644 index e8e9e05efdb..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_copy.pass.cpp +++ /dev/null @@ -1,37 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -// No CTAD in C++14 or earlier -// UNSUPPORTED: c++14 - -#include -#include - -#include "../mdspan.mdspan.util/mdspan_util.hpp" - -int main(int, char**) -{ -#ifdef __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION - constexpr auto dyn = cuda::std::dynamic_extent; - - // copy constructor - { - cuda::std::array d{42}; - cuda::std::mdspan> m0{d.data(), cuda::std::extents{64, 128}}; - cuda::std::mdspan m{m0}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } -#endif - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_extents.pass.cpp deleted file mode 100644 index aee3b1a7020..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_extents.pass.cpp +++ /dev/null @@ -1,76 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -// No CTAD in C++14 or earlier -// UNSUPPORTED: c++14 - -#include -#include - -#define CHECK_MDSPAN(m, d) \ - static_assert(m.is_exhaustive(), ""); \ - assert(m.data_handle() == d.data()); \ - assert(m.rank() == 2); \ - assert(m.rank_dynamic() == 2); \ - assert(m.extent(0) == 64); \ - assert(m.extent(1) == 128) - -int main(int, char**) -{ -#ifdef __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION - // TEST(TestMdspanCTAD, extents_object) - { - cuda::std::array d{42}; - cuda::std::mdspan m{d.data(), cuda::std::extents{64, 128}}; - - CHECK_MDSPAN(m, d); - } - - // TEST(TestMdspanCTAD, extents_object_move) - { - cuda::std::array d{42}; - cuda::std::mdspan m{d.data(), std::move(cuda::std::extents{64, 128})}; - - CHECK_MDSPAN(m, d); - } - - // TEST(TestMdspanCTAD, extents_std_array) - { - cuda::std::array d{42}; - cuda::std::mdspan m{d.data(), cuda::std::array{64, 128}}; - - CHECK_MDSPAN(m, d); - } - - // TEST(TestMdspanCTAD, cptr_extents_std_array) - { - cuda::std::array d{42}; - const int* const ptr = d.data(); - cuda::std::mdspan m{ptr, cuda::std::array{64, 128}}; - - static_assert(cuda::std::is_same::value, ""); - - CHECK_MDSPAN(m, d); - } - - // extents from std::span - { - cuda::std::array d{42}; - cuda::std::array sarr{64, 128}; - cuda::std::mdspan m{d.data(), cuda::std::span{sarr}}; - - CHECK_MDSPAN(m, d); - } -#endif - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_extents_pack.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_extents_pack.pass.cpp deleted file mode 100644 index 211ec80fccc..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_extents_pack.pass.cpp +++ /dev/null @@ -1,38 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -// No CTAD in C++14 or earlier -// UNSUPPORTED: c++14 - -#include -#include - -int main(int, char**) -{ -#ifdef __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION - // TEST(TestMdspanCTAD, extents_pack) - { - cuda::std::array d{42}; - cuda::std::mdspan m(d.data(), 64, 128); - - static_assert(m.is_exhaustive() == true, ""); - - assert(m.data_handle() == d.data()); - assert(m.rank() == 2); - assert(m.rank_dynamic() == 2); - assert(m.extent(0) == 64); - assert(m.extent(1) == 128); - } -#endif - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_layouts.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_layouts.pass.cpp deleted file mode 100644 index a82a0ba39e6..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_layouts.pass.cpp +++ /dev/null @@ -1,59 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -// No CTAD in C++14 or earlier -// UNSUPPORTED: c++14 - -#include -#include - -#define CHECK_MDSPAN(m, d, exhaust, s0, s1) \ - static_assert(m.rank() == 2, ""); \ - static_assert(m.rank_dynamic() == 2, ""); \ - assert(m.data_handle() == d.data()); \ - assert(m.extent(0) == 16); \ - assert(m.extent(1) == 32); \ - assert(m.stride(0) == s0); \ - assert(m.stride(1) == s1); \ - assert(m.is_exhaustive() == exhaust) - -int main(int, char**) -{ -#ifdef __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION - // TEST(TestMdspanCTAD, layout_left) - { - cuda::std::array d{42}; - cuda::std::mdspan m0{d.data(), cuda::std::layout_left::mapping{cuda::std::extents{16, 32}}}; - - CHECK_MDSPAN(m0, d, true, 1, 16); - } - - // TEST(TestMdspanCTAD, layout_right) - { - cuda::std::array d{42}; - cuda::std::mdspan m0{d.data(), cuda::std::layout_right::mapping{cuda::std::extents{16, 32}}}; - - CHECK_MDSPAN(m0, d, true, 32, 1); - } - - // TEST(TestMdspanCTAD, layout_stride) - { - cuda::std::array d{42}; - cuda::std::mdspan m0{d.data(), - cuda::std::layout_stride::mapping{cuda::std::extents{16, 32}, cuda::std::array{1, 128}}}; - - CHECK_MDSPAN(m0, d, false, 1, 128); - } -#endif - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_mapping.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_mapping.pass.cpp deleted file mode 100644 index e03fe5b0401..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_mapping.pass.cpp +++ /dev/null @@ -1,53 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -// No CTAD in C++14 or earlier -// UNSUPPORTED: c++14 - -#include -#include - -#include "../mdspan.mdspan.util/mdspan_util.hpp" - -int main(int, char**) -{ -#ifdef __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION - constexpr auto dyn = cuda::std::dynamic_extent; - - // mapping - { - using data_t = int; - using index_t = size_t; - cuda::std::array d{42}; - cuda::std::layout_left::mapping> map{ - cuda::std::dextents{64, 128}}; - cuda::std::mdspan m{d.data(), map}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - // mapping and accessor - { - using data_t = int; - using index_t = size_t; - cuda::std::array d{42}; - cuda::std::layout_left::mapping> map{ - cuda::std::dextents{64, 128}}; - cuda::std::default_accessor a; - cuda::std::mdspan m{d.data(), map, a}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } -#endif - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_pointer.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_pointer.pass.cpp deleted file mode 100644 index d4a854673e2..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/ctad_pointer.pass.cpp +++ /dev/null @@ -1,57 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -// No CTAD in C++14 or earlier -// UNSUPPORTED: c++14 - -#include -#include - -#define CHECK_MDSPAN(m, d) \ - static_assert(cuda::std::is_same::value, ""); \ - static_assert(m.is_exhaustive(), ""); \ - assert(m.data_handle() == d.data()); \ - assert(m.rank() == 0); \ - assert(m.rank_dynamic() == 0) - -int main(int, char**) -{ -#ifdef __MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION - // TEST(TestMdspanCTAD, ctad_pointer) - { - cuda::std::array d = {1, 2, 3, 4, 5}; - int* ptr = d.data(); - cuda::std::mdspan m(ptr); - - CHECK_MDSPAN(m, d); - } - - // TEST(TestMdspanCTAD, ctad_pointer_tmp) - { - cuda::std::array d = {1, 2, 3, 4, 5}; - cuda::std::mdspan m(d.data()); - - CHECK_MDSPAN(m, d); - } - - // TEST(TestMdspanCTAD, ctad_pointer_move) - { - cuda::std::array d = {1, 2, 3, 4, 5}; - int* ptr = d.data(); - cuda::std::mdspan m(std::move(ptr)); - - CHECK_MDSPAN(m, d); - } -#endif - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/custom_accessor.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/custom_accessor.pass.cpp deleted file mode 100644 index cb43d4156d0..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/custom_accessor.pass.cpp +++ /dev/null @@ -1,43 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../foo_customizations.hpp" -#include "../mdspan.mdspan.util/mdspan_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - using data_t = int; - using acc_t = Foo::foo_accessor; - using index_t = size_t; - - cuda::std::array d{42}; - cuda::std::layout_left::mapping> map{ - cuda::std::dextents{64, 128}}; - acc_t a; - cuda::std::mdspan, cuda::std::layout_left, acc_t> m{d.data(), map, a}; - - static_assert(m.is_exhaustive(), ""); - // assert(m.data_handle() == d.data()); - assert(m.rank() == 2); - assert(m.rank_dynamic() == 2); - assert(m.extent(0) == 64); - assert(m.extent(1) == 128); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/custom_layout.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/custom_layout.pass.cpp deleted file mode 100644 index 962395411f0..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/custom_layout.pass.cpp +++ /dev/null @@ -1,36 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../foo_customizations.hpp" -#include "../mdspan.mdspan.util/mdspan_util.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - using data_t = int; - using lay_t = Foo::layout_foo; - using index_t = size_t; - - cuda::std::array d{42}; - lay_t::mapping> map{cuda::std::dextents{64, 128}}; - cuda::std::mdspan, lay_t> m{d.data(), map}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/data_c_array.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/data_c_array.pass.cpp deleted file mode 100644 index f65e4d0c339..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/data_c_array.pass.cpp +++ /dev/null @@ -1,40 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -int main(int, char**) -{ - { - typedef int data_t; - typedef size_t index_t; - - data_t data[1] = {42}; - cuda::std::mdspan> m(data); - auto val = m(0); - - static_assert(m.is_exhaustive() == true, ""); - - assert(m.data_handle() == data); - assert(m.rank() == 1); - assert(m.rank_dynamic() == 0); - assert(m.extent(0) == 1); - assert(m.static_extent(0) == 1); - assert(m.stride(0) == 1); - assert(val == 42); - assert(m.size() == 1); - assert(m.empty() == false); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/default.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/default.pass.cpp deleted file mode 100644 index ccec9531b47..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/default.pass.cpp +++ /dev/null @@ -1,39 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - typedef int data_t; - typedef size_t index_t; - - cuda::std::mdspan> m; - - static_assert(m.is_exhaustive() == true, ""); - - assert(m.data_handle() == nullptr); - assert(m.rank() == 1); - assert(m.rank_dynamic() == 1); - assert(m.extent(0) == 0); - assert(m.static_extent(0) == dyn); - assert(m.stride(0) == 1); - assert(m.size() == 0); - assert(m.empty() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/extents.pass.cpp deleted file mode 100644 index c8f433ee182..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/extents.pass.cpp +++ /dev/null @@ -1,80 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.mdspan.util/mdspan_util.hpp" -#include "../my_accessor.hpp" -#include "../my_int.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -template -struct is_extents_cons_avail : cuda::std::false_type -{}; - -template -struct is_extents_cons_avail< - T, - DataHandleType, - ExtentsType, - cuda::std::enable_if_t< - cuda::std::is_same(), cuda::std::declval()}), T>::value>> - : cuda::std::true_type -{}; - -template -constexpr bool is_extents_cons_avail_v = is_extents_cons_avail::value; - -int main(int, char**) -{ - // extents from extents object - { - using ext_t = cuda::std::extents; - cuda::std::array d{42}; - cuda::std::mdspan m{d.data(), ext_t{64, 128}}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - // extents from extents object move - { - using ext_t = cuda::std::extents; - using mdspan_t = cuda::std::mdspan; - - static_assert(is_extents_cons_avail_v == true, ""); - - cuda::std::array d{42}; - mdspan_t m{d.data(), cuda::std::move(ext_t{64, 128})}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - // Constraint: is_constructible_v is true - { - using ext_t = cuda::std::extents; - using mdspan_t = cuda::std::mdspan; - - static_assert(is_extents_cons_avail_v == false, ""); - } - - // Constraint: is_default_constructible_v is true - { - using ext_t = cuda::std::extents; - using mdspan_t = cuda::std::mdspan>; - - static_assert(is_extents_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/extents_pack.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/extents_pack.pass.cpp deleted file mode 100644 index 145b9273330..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/extents_pack.pass.cpp +++ /dev/null @@ -1,111 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.mdspan.util/mdspan_util.hpp" -#include "../my_accessor.hpp" -#include "../my_int.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -template -struct is_param_pack_cons_avail : cuda::std::false_type -{}; - -template -struct is_param_pack_cons_avail< - cuda::std::enable_if_t< - cuda::std::is_same(), cuda::std::declval()...}), T>::value>, - T, - DataHandleT, - SizeTypes...> : cuda::std::true_type -{}; - -template -constexpr bool is_param_pack_cons_avail_v = is_param_pack_cons_avail::value; - -int main(int, char**) -{ - { - using index_t = int; - cuda::std::array d{42}; - cuda::std::mdspan> m{d.data(), 64, 128}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - { - using index_t = int; - cuda::std::array d{42}; - cuda::std:: - mdspan, cuda::std::layout_right, cuda::std::default_accessor> - m{d.data(), 64, 128}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - { - using mdspan_t = cuda::std::mdspan>; - using other_index_t = my_int; - - cuda::std::array d{42}; - mdspan_t m{d.data(), other_index_t(64), other_index_t(128)}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - - static_assert(is_param_pack_cons_avail_v == true, ""); - } - - // Constraint: (is_convertible_v && ...) is true - { - using mdspan_t = cuda::std::mdspan>; - using other_index_t = my_int_non_convertible; - - static_assert(is_param_pack_cons_avail_v == false, ""); - } - - // Constraint: (is_nothrow_constructible && ...) is true -#ifndef TEST_COMPILER_BROKEN_SMF_NOEXCEPT - { - using mdspan_t = cuda::std::mdspan>; - using other_index_t = my_int_non_nothrow_constructible; - - static_assert(is_param_pack_cons_avail_v == false, ""); - } -#endif // TEST_COMPILER_BROKEN_SMF_NOEXCEPT - - // Constraint: N == rank() || N == rank_dynamic() is true - { - using mdspan_t = cuda::std::mdspan>; - - static_assert(is_param_pack_cons_avail_v == false, ""); - } - - // Constraint: is_constructible_v is true - { - using mdspan_t = cuda::std::mdspan, cuda::std::layout_stride>; - - static_assert(is_param_pack_cons_avail_v == false, ""); - } - - // Constraint: is_default_constructible_v is true - { - using mdspan_t = - cuda::std::mdspan, cuda::std::layout_right, Foo::my_accessor>; - - static_assert(is_param_pack_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_left.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_left.pass.cpp deleted file mode 100644 index b0bae32d045..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_left.pass.cpp +++ /dev/null @@ -1,40 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - typedef int data_t; - typedef size_t index_t; - - cuda::std::array d{42}; - cuda::std::mdspan, cuda::std::layout_left> m{d.data(), 16, 32}; - - static_assert(m.is_exhaustive() == true, ""); - - assert(m.data_handle() == d.data()); - assert(m.rank() == 2); - assert(m.rank_dynamic() == 2); - assert(m.extent(0) == 16); - assert(m.extent(1) == 32); - assert(m.stride(0) == 1); - assert(m.stride(1) == 16); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_right.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_right.pass.cpp deleted file mode 100644 index 80845f9173c..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_right.pass.cpp +++ /dev/null @@ -1,40 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - typedef int data_t; - typedef size_t index_t; - - cuda::std::array d{42}; - cuda::std::mdspan, cuda::std::layout_right> m{d.data(), 16, 32}; - - static_assert(m.is_exhaustive() == true, ""); - - assert(m.data_handle() == d.data()); - assert(m.rank() == 2); - assert(m.rank_dynamic() == 2); - assert(m.extent(0) == 16); - assert(m.extent(1) == 32); - assert(m.stride(0) == 32); - assert(m.stride(1) == 1); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_stride.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_stride.pass.cpp deleted file mode 100644 index 39fd6756553..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/list_init_layout_stride.pass.cpp +++ /dev/null @@ -1,38 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - cuda::std::array d{42}; - - cuda::std::mdspan, cuda::std::layout_stride> m{ - d.data(), - cuda::std::layout_stride::template mapping>{ - cuda::std::dextents{16, 32}, cuda::std::array{1, 128}}}; - - assert(m.data_handle() == d.data()); - assert(m.rank() == 2); - assert(m.rank_dynamic() == 2); - assert(m.extent(0) == 16); - assert(m.extent(1) == 32); - assert(m.stride(0) == 1); - assert(m.stride(1) == 128); - assert(m.is_exhaustive() == false); - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/mapping.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/mapping.pass.cpp deleted file mode 100644 index bee833b1054..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/mapping.pass.cpp +++ /dev/null @@ -1,76 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.mdspan.util/mdspan_util.hpp" -#include "../my_accessor.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -template -struct is_mapping_cons_avail : cuda::std::false_type -{}; - -template -struct is_mapping_cons_avail< - T, - DataHandleType, - MappingType, - cuda::std::enable_if_t< - cuda::std::is_same(), cuda::std::declval()}), T>::value>> - : cuda::std::true_type -{}; - -template -constexpr bool is_mapping_cons_avail_v = is_mapping_cons_avail::value; - -int main(int, char**) -{ - using data_t = int; - using index_t = size_t; - using ext_t = cuda::std::extents; - using mapping_t = cuda::std::layout_left::mapping; - - // mapping - { - using mdspan_t = cuda::std::mdspan; - - static_assert(is_mapping_cons_avail_v == true, ""); - - cuda::std::array d{42}; - mapping_t map{cuda::std::dextents{64, 128}}; - mdspan_t m{d.data(), map}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - // Constraint: is_default_constructible_v is true - { - using mdspan_t = cuda::std::mdspan>; - - static_assert(is_mapping_cons_avail_v == false, ""); - } - - // mapping and accessor - { - cuda::std::array d{42}; - mapping_t map{cuda::std::dextents{64, 128}}; - cuda::std::default_accessor a; - cuda::std::mdspan m{d.data(), map, a}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/span_init_extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/span_init_extents.pass.cpp deleted file mode 100644 index d70b0648d18..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.cons/span_init_extents.pass.cpp +++ /dev/null @@ -1,97 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../mdspan.mdspan.util/mdspan_util.hpp" -#include "../my_accessor.hpp" -#include "../my_int.hpp" - -constexpr auto dyn = cuda::std::dynamic_extent; - -template -struct is_span_cons_avail : cuda::std::false_type -{}; - -template -struct is_span_cons_avail< - T, - DataHandleT, - SizeType, - N, - cuda::std::enable_if_t(), cuda::std::declval>()}), - T>::value>> : cuda::std::true_type -{}; - -template -constexpr bool is_span_cons_avail_v = is_span_cons_avail::value; - -int main(int, char**) -{ - // extents from cuda::std::span - { - using mdspan_t = cuda::std::mdspan>; - using other_index_t = int; - - static_assert(is_span_cons_avail_v == true, ""); - - cuda::std::array d{42}; - cuda::std::array sarr{64, 128}; - - mdspan_t m{d.data(), cuda::std::span{sarr}}; - - CHECK_MDSPAN_EXTENT(m, d, 64, 128); - } - - // Constraint: (is_convertible_v && ...) is true - { - using mdspan_t = cuda::std::mdspan>; - using other_index_t = my_int_non_convertible; - - static_assert(is_span_cons_avail_v == false, ""); - } - - // Constraint: (is_convertible_v && ...) is true - { - using mdspan_t = cuda::std::mdspan>; - using other_index_t = my_int_non_convertible; - - static_assert(is_span_cons_avail_v == false, ""); - } - - // Constraint: N == rank() || N == rank_dynamic() is true - { - using mdspan_t = cuda::std::mdspan>; - using other_index_t = int; - - static_assert(is_span_cons_avail_v == false, ""); - } - - // Constraint: is_constructible_v is true - { - using mdspan_t = cuda::std::mdspan, cuda::std::layout_stride>; - - static_assert(is_span_cons_avail_v == false, ""); - } - - // Constraint: is_default_constructible_v is true - { - using mdspan_t = - cuda::std::mdspan, cuda::std::layout_right, Foo::my_accessor>; - - static_assert(is_span_cons_avail_v == false, ""); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/accessor.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/accessor.pass.cpp deleted file mode 100644 index 5823babedfe..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/accessor.pass.cpp +++ /dev/null @@ -1,34 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - using data_t = int; - using index_t = size_t; - cuda::std::array d{42}; - cuda::std::layout_left::mapping> map{ - cuda::std::dextents{64, 128}}; - cuda::std::default_accessor const a; - cuda::std::mdspan, cuda::std::layout_left> m{d.data(), map, a}; - - assert(m.accessor().access(d.data(), 0) == a.access(d.data(), 0)); - assert(m.accessor().offset(d.data(), 0) == a.offset(d.data(), 0)); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/brackets_op.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/brackets_op.pass.cpp deleted file mode 100644 index 06a496aa214..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/brackets_op.pass.cpp +++ /dev/null @@ -1,167 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -#include "../my_int.hpp" - -// Will be testing `m[0,0]` when it becomes available -// Alternatively, could use macro `__MDSPAN_OP(m,0,0)` which is turned to either `m[0,0]` or `m(0,0)`, -// depending on if `__cpp_multidimensional_subscript` is defined or not - -constexpr auto dyn = cuda::std::dynamic_extent; - -template -struct is_bracket_op_avail : cuda::std::false_type -{}; - -template -struct is_bracket_op_avail< - cuda::std::enable_if_t()(cuda::std::declval()...)), - typename T::accessor_type::reference>::value>, - T, - OtherIndexTypes...> : cuda::std::true_type -{}; - -template -constexpr bool is_bracket_op_avail_v = is_bracket_op_avail::value; - -template -struct is_bracket_op_array_avail : cuda::std::false_type -{}; - -template -struct is_bracket_op_array_avail< - T, - OtherIndexType, - N, - cuda::std::enable_if_t< - cuda::std::is_same()(cuda::std::declval>())), - typename T::accessor_type::reference>::value>> : cuda::std::true_type -{}; - -template -constexpr bool is_bracket_op_array_avail_v = is_bracket_op_array_avail::value; - -template -struct is_bracket_op_span_avail : cuda::std::false_type -{}; - -template -struct is_bracket_op_span_avail< - T, - OtherIndexType, - N, - cuda::std::enable_if_t< - cuda::std::is_same()(cuda::std::declval>())), - typename T::accessor_type::reference>::value>> : cuda::std::true_type -{}; - -template -constexpr bool is_bracket_op_span_avail_v = is_bracket_op_span_avail::value; - -int main(int, char**) -{ - { - using element_t = int; - using index_t = int; - using ext_t = cuda::std::extents; - using mdspan_t = cuda::std::mdspan; - - cuda::std::array d{42, 43, 44, 45}; - mdspan_t m{d.data(), ext_t{2, 2}}; - - static_assert(is_bracket_op_avail_v == true, ""); - - // param pack - assert(m(0, 0) == 42); - assert(m(0, 1) == 43); - assert(m(1, 0) == 44); - assert(m(1, 1) == 45); - - // array of indices - assert(m(cuda::std::array{0, 0}) == 42); - assert(m(cuda::std::array{0, 1}) == 43); - assert(m(cuda::std::array{1, 0}) == 44); - assert(m(cuda::std::array{1, 1}) == 45); - - static_assert(is_bracket_op_array_avail_v == true, ""); - - // span of indices - assert(m(cuda::std::span{cuda::std::array{0, 0}}) == 42); - assert(m(cuda::std::span{cuda::std::array{0, 1}}) == 43); - assert(m(cuda::std::span{cuda::std::array{1, 0}}) == 44); - assert(m(cuda::std::span{cuda::std::array{1, 1}}) == 45); - - static_assert(is_bracket_op_span_avail_v == true, ""); - } - - // Param pack of indices in a type implicitly convertible to index_type - { - using element_t = int; - using index_t = int; - using ext_t = cuda::std::extents; - using mdspan_t = cuda::std::mdspan; - - cuda::std::array d{42, 43, 44, 45}; - mdspan_t m{d.data(), ext_t{2, 2}}; - - assert(m(my_int(0), my_int(0)) == 42); - assert(m(my_int(0), my_int(1)) == 43); - assert(m(my_int(1), my_int(0)) == 44); - assert(m(my_int(1), my_int(1)) == 45); - } - - // Constraint: rank consistency - { - using element_t = int; - using index_t = int; - using mdspan_t = cuda::std::mdspan>; - - static_assert(is_bracket_op_avail_v == false, ""); - - static_assert(is_bracket_op_array_avail_v == false, ""); - - static_assert(is_bracket_op_span_avail_v == false, ""); - } - - // Constraint: convertibility - { - using element_t = int; - using index_t = int; - using mdspan_t = cuda::std::mdspan>; - - static_assert(is_bracket_op_avail_v == false, ""); - - static_assert(is_bracket_op_array_avail_v == false, ""); - - static_assert(is_bracket_op_span_avail_v == false, ""); - } - - // Constraint: nonthrow-constructibility -#ifndef TEST_COMPILER_BROKEN_SMF_NOEXCEPT - { - using element_t = int; - using index_t = int; - using mdspan_t = cuda::std::mdspan>; - - static_assert(is_bracket_op_avail_v == false, ""); - - static_assert(is_bracket_op_array_avail_v == false, ""); - - static_assert(is_bracket_op_span_avail_v == false, ""); - } -#endif // TEST_COMPILER_BROKEN_SMF_NOEXCEPT - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/data_handle.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/data_handle.pass.cpp deleted file mode 100644 index 7f48124cb37..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/data_handle.pass.cpp +++ /dev/null @@ -1,70 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -int main(int, char**) -{ - // C array - { - const int d[5] = {1, 2, 3, 4, 5}; -#if defined(__cpp_deduction_guides) && defined(__MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) - cuda::std::mdspan m(d); -#else - cuda::std::mdspan> m(d); -#endif - - assert(m.data_handle() == d); - } - - // std array - { - cuda::std::array d = {1, 2, 3, 4, 5}; -#if defined(__cpp_deduction_guides) && defined(__MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) - cuda::std::mdspan m(d.data()); -#else - cuda::std::mdspan> m(d.data()); -#endif - - assert(m.data_handle() == d.data()); - } - - // C pointer - { - cuda::std::array d = {1, 2, 3, 4, 5}; - int* ptr = d.data(); -#if defined(__cpp_deduction_guides) && defined(__MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) - cuda::std::mdspan m(ptr); -#else - cuda::std::mdspan> m(ptr); -#endif - - assert(m.data_handle() == ptr); - } - - // Copy constructor - { - cuda::std::array d = {1, 2, 3, 4, 5}; -#if defined(__cpp_deduction_guides) && defined(__MDSPAN_USE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) - cuda::std::mdspan m0(d.data()); - cuda::std::mdspan m(m0); -#else - cuda::std::mdspan> m0(d.data()); - cuda::std::mdspan> m(m0); -#endif - - assert(m.data_handle() == m0.data_handle()); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/empty.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/empty.pass.cpp deleted file mode 100644 index 022f06ece3b..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/empty.pass.cpp +++ /dev/null @@ -1,51 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -int main(int, char**) -{ - cuda::std::array storage{1}; - - { - cuda::std::mdspan> m; - - assert(m.empty() == true); - } - - { - cuda::std::mdspan> m{storage.data(), 0}; - - assert(m.empty() == true); - } - - { - cuda::std::mdspan> m{storage.data(), 2}; - - assert(m.empty() == false); - } - - { - cuda::std::mdspan> m{storage.data(), 2, 0}; - - assert(m.empty() == true); - } - - { - cuda::std::mdspan> m{storage.data(), 2, 2}; - - assert(m.empty() == false); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/extent.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/extent.pass.cpp deleted file mode 100644 index 1739c909d48..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/extent.pass.cpp +++ /dev/null @@ -1,64 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - typedef int data_t; - typedef size_t index_t; - - cuda::std::array d{42}; - - { - cuda::std::mdspan> m; - - assert(m.extent(0) == 0); - assert(m.extent(1) == 0); - } - - { - cuda::std::mdspan> m{d.data()}; - - assert(m.extent(0) == 16); - assert(m.extent(1) == 32); - } - - { - cuda::std::mdspan> m{d.data(), 16, 32}; - - assert(m.extent(0) == 16); - assert(m.extent(1) == 32); - } - - { - cuda::std::mdspan, cuda::std::layout_left> m{d.data(), 16, 32}; - - assert(m.extent(0) == 16); - assert(m.extent(1) == 32); - } - - { - using dexts = cuda::std::dextents; - - cuda::std::mdspan, cuda::std::layout_stride> m{ - d.data(), cuda::std::layout_stride::template mapping{dexts{16, 32}, cuda::std::array{1, 128}}}; - - assert(m.extent(0) == 16); - assert(m.extent(1) == 32); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/extents.pass.cpp deleted file mode 100644 index 398d8196627..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/extents.pass.cpp +++ /dev/null @@ -1,29 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - cuda::std::array d{42}; - cuda::std::extents e{64, 128}; - cuda::std::mdspan> m{d.data(), e}; - - assert(&m.extents() == &m.mapping().extents()); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_exhaustive.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_exhaustive.pass.cpp deleted file mode 100644 index fe2e43702da..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_exhaustive.pass.cpp +++ /dev/null @@ -1,48 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - cuda::std::mdspan> m; - - static_assert(m.is_always_exhaustive() == true, ""); - assert(m.is_exhaustive() == true); - } - - cuda::std::array d{42}; - cuda::std::extents e{64, 128}; - - { - cuda::std::mdspan, cuda::std::layout_left> m{d.data(), e}; - - static_assert(m.is_always_exhaustive() == true, ""); - assert(m.is_exhaustive() == true); - } - - { - using dexts = cuda::std::dextents; - - cuda::std::mdspan, cuda::std::layout_stride> m{ - d.data(), cuda::std::layout_stride::template mapping{dexts{16, 32}, cuda::std::array{1, 128}}}; - - static_assert(m.is_always_exhaustive() == false, ""); - assert(m.is_exhaustive() == false); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_strided.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_strided.pass.cpp deleted file mode 100644 index 487e4cf219e..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_strided.pass.cpp +++ /dev/null @@ -1,48 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - cuda::std::mdspan> m; - - static_assert(m.is_always_strided() == true, ""); - assert(m.is_strided() == true); - } - - cuda::std::array d{42}; - cuda::std::extents e{64, 128}; - - { - cuda::std::mdspan, cuda::std::layout_left> m{d.data(), e}; - - static_assert(m.is_always_strided() == true, ""); - assert(m.is_strided() == true); - } - - { - using dexts = cuda::std::dextents; - - cuda::std::mdspan, cuda::std::layout_stride> m{ - d.data(), cuda::std::layout_stride::template mapping{dexts{16, 32}, cuda::std::array{1, 128}}}; - - static_assert(m.is_always_strided() == true, ""); - assert(m.is_strided() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_unique.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_unique.pass.cpp deleted file mode 100644 index e0f1abf7be4..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/is_unique.pass.cpp +++ /dev/null @@ -1,45 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - { - cuda::std::mdspan> m; - - static_assert(m.is_always_unique() == true, ""); - assert(m.is_unique() == true); - } - - cuda::std::array d{42}; - cuda::std::extents e{64, 128}; - - { - cuda::std::mdspan> m{d.data(), e}; - - static_assert(m.is_always_unique() == true, ""); - assert(m.is_unique() == true); - } - - { - cuda::std::mdspan, cuda::std::layout_left> m{d.data(), e}; - - static_assert(m.is_always_unique() == true, ""); - assert(m.is_unique() == true); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/mapping.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/mapping.pass.cpp deleted file mode 100644 index 9eac690dbda..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/mapping.pass.cpp +++ /dev/null @@ -1,46 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - // mapping - { - using data_t = int; - using index_t = size_t; - cuda::std::array d{42}; - cuda::std::layout_left::mapping> map{ - cuda::std::dextents{64, 128}}; - cuda::std::mdspan, cuda::std::layout_left> m{d.data(), map}; - - assert(m.mapping() == map); - } - - // mapping and accessor - { - using data_t = int; - using index_t = size_t; - cuda::std::array d{42}; - cuda::std::layout_left::mapping> map{ - cuda::std::dextents{64, 128}}; - cuda::std::default_accessor a; - cuda::std::mdspan, cuda::std::layout_left> m{d.data(), map, a}; - - assert(m.mapping() == map); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/rank.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/rank.pass.cpp deleted file mode 100644 index 8c66601523a..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/rank.pass.cpp +++ /dev/null @@ -1,54 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - typedef int data_t; - typedef size_t index_t; - - cuda::std::array d{42}; - - { - cuda::std::mdspan> m; - - static_assert(m.rank() == 1, ""); - assert(m.rank_dynamic() == 1); - } - - { - cuda::std::mdspan> m{d.data()}; - - static_assert(m.rank() == 1, ""); - assert(m.rank_dynamic() == 0); - } - - { - cuda::std::mdspan> m{d.data(), 16, 32}; - - static_assert(m.rank() == 2, ""); - assert(m.rank_dynamic() == 2); - } - - { - cuda::std::mdspan> m{d.data(), 16, 32}; - - static_assert(m.rank() == 3, ""); - assert(m.rank_dynamic() == 2); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/size.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/size.pass.cpp deleted file mode 100644 index b347d79d0d4..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/size.pass.cpp +++ /dev/null @@ -1,48 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -template -__host__ __device__ void test_mdspan_size(cuda::std::array& storage, Extents&& e) -{ - using extents_type = cuda::std::remove_cv_t>; - cuda::std::mdspan m(storage.data(), cuda::std::forward(e)); - - static_assert(cuda::std::is_same::value, - "The return type of mdspan::size() must be size_t."); - - // m.size() must not overflow, as long as the product of extents - // is representable as a value of type size_t. - assert(m.size() == N); -} - -int main(int, char**) -{ - // TEST(TestMdspan, MdspanSizeReturnTypeAndPrecondition) - { - cuda::std::array storage; - - static_assert(cuda::std::numeric_limits::max() == 127, "max int8_t != 127"); - test_mdspan_size(storage, cuda::std::extents{}); // 12 * 11 == 132 - } - - { - cuda::std::array storage; - - static_assert(cuda::std::numeric_limits::max() == 255, "max uint8_t != 255"); - test_mdspan_size(storage, cuda::std::extents{}); // 16 * 17 == 272 - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/stride.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/stride.pass.cpp deleted file mode 100644 index 00d01121ec9..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/stride.pass.cpp +++ /dev/null @@ -1,57 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -int main(int, char**) -{ - typedef int data_t; - typedef size_t index_t; - - cuda::std::array d{42}; - - { - cuda::std::mdspan> m; - - assert(m.stride(0) == 1); - assert(m.stride(1) == 1); - } - - { - cuda::std::mdspan> m{d.data(), 16, 32}; - - assert(m.stride(0) == 32); - assert(m.stride(1) == 1); - } - - { - cuda::std::mdspan, cuda::std::layout_left> m{d.data(), 16, 32}; - - assert(m.stride(0) == 1); - assert(m.stride(1) == 16); - } - - { - using dexts = cuda::std::dextents; - - cuda::std::mdspan, cuda::std::layout_stride> m{ - d.data(), cuda::std::layout_stride::template mapping{dexts{16, 32}, cuda::std::array{1, 128}}}; - - assert(m.stride(0) == 1); - assert(m.stride(1) == 128); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/swap.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/swap.pass.cpp deleted file mode 100644 index ecaaaa00625..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.members/swap.pass.cpp +++ /dev/null @@ -1,89 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -__host__ __device__ void test_std_swap_static_extents() -{ - int data1[12] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; - int data2[12] = {21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}; - - cuda::std::mdspan> m1(data1); - cuda::std::mdspan> m2(data2); - cuda::std::extents exts1; - cuda::std::layout_right::mapping> map1(exts1); - cuda::std::extents exts2; - cuda::std::layout_right::mapping> map2(exts2); - - assert(m1.data_handle() == data1); - assert(m1.mapping() == map1); - auto val1 = m1(0, 0); - assert(val1 == 1); - assert(m2.data_handle() == data2); - assert(m2.mapping() == map2); - auto val2 = m2(0, 0); - assert(val2 == 21); - - cuda::std::swap(m1, m2); - assert(m1.data_handle() == data2); - assert(m1.mapping() == map2); - val1 = m1(0, 0); - assert(val1 == 21); - assert(m2.data_handle() == data1); - assert(m2.mapping() == map1); - val2 = m2(0, 0); - assert(val2 == 1); -} - -__host__ __device__ void test_std_swap_dynamic_extents() -{ - int data1[12] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; - int data2[12] = {21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}; - - cuda::std::mdspan> m1(data1, 3, 4); - cuda::std::mdspan> m2(data2, 4, 3); - cuda::std::dextents exts1(3, 4); - cuda::std::layout_right::mapping> map1(exts1); - cuda::std::dextents exts2(4, 3); - cuda::std::layout_right::mapping> map2(exts2); - - assert(m1.data_handle() == data1); - assert(m1.mapping() == map1); - auto val1 = m1(0, 0); - assert(val1 == 1); - assert(m2.data_handle() == data2); - assert(m2.mapping() == map2); - auto val2 = m2(0, 0); - assert(val2 == 21); - - cuda::std::swap(m1, m2); - assert(m1.data_handle() == data2); - assert(m1.mapping() == map2); - val1 = m1(0, 0); - assert(val1 == 21); - assert(m2.data_handle() == data1); - assert(m2.mapping() == map1); - val2 = m2(0, 0); - assert(val2 == 1); -} - -int main(int, char**) -{ - test_std_swap_static_extents(); - - test_std_swap_dynamic_extents(); - - // TODO port tests for customized layout and accessor - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.util/mdspan_util.hpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.util/mdspan_util.hpp deleted file mode 100644 index 0766577e7b3..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.mdspan.util/mdspan_util.hpp +++ /dev/null @@ -1,16 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -#define CHECK_MDSPAN_EXTENT(m, d, e0, e1) \ - static_assert(m.is_exhaustive(), ""); \ - assert(m.data_handle() == d.data()); \ - assert(m.rank() == 2); \ - assert(m.rank_dynamic() == 2); \ - assert(m.extent(0) == e0); \ - assert(m.extent(1) == e1) diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/dim_reduction.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/dim_reduction.pass.cpp deleted file mode 100644 index 3636deec557..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/dim_reduction.pass.cpp +++ /dev/null @@ -1,74 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -int main(int, char**) -{ - // TEST(TestSubmdspanLayoutRightStaticSizedRankReducing3Dto1D, - // test_submdspan_layout_right_static_sized_rank_reducing_3d_to_1d) - { - cuda::std::array d; - cuda::std::mdspan> m(d.data()); - m(1, 1, 1) = 42; - auto sub0 = cuda::std::submdspan(m, 1, 1, cuda::std::full_extent); - - static_assert(decltype(sub0)::rank() == 1, "unexpected submdspan rank"); - static_assert(sub0.rank() == 1, ""); - static_assert(sub0.rank_dynamic() == 0, ""); - assert(sub0.extent(0) == 4); - assert(sub0(1) == 42); - } - - // TEST(TestSubmdspanLayoutLeftStaticSizedRankReducing3Dto1D, - // test_submdspan_layout_left_static_sized_rank_reducing_3d_to_1d) - { - cuda::std::array d; - cuda::std::mdspan, cuda::std::layout_left> m(d.data()); - m(1, 1, 1) = 42; - auto sub0 = cuda::std::submdspan(m, 1, 1, cuda::std::full_extent); - - static_assert(sub0.rank() == 1, ""); - static_assert(sub0.rank_dynamic() == 0, ""); - assert(sub0.extent(0) == 4); - assert(sub0(1) == 42); - } - - // TEST(TestSubmdspanLayoutRightStaticSizedRankReducingNested3Dto0D, - // test_submdspan_layout_right_static_sized_rank_reducing_nested_3d_to_0d) - { - cuda::std::array d; - cuda::std::mdspan> m(d.data()); - m(1, 1, 1) = 42; - auto sub0 = cuda::std::submdspan(m, 1, cuda::std::full_extent, cuda::std::full_extent); - - static_assert(sub0.rank() == 2, ""); - static_assert(sub0.rank_dynamic() == 0, ""); - assert(sub0.extent(0) == 3); - assert(sub0.extent(1) == 4); - assert(sub0(1, 1) == 42); - - auto sub1 = cuda::std::submdspan(sub0, 1, cuda::std::full_extent); - static_assert(sub1.rank() == 1, ""); - static_assert(sub1.rank_dynamic() == 0, ""); - assert(sub1.extent(0) == 4); - assert(sub1(1) == 42); - - auto sub2 = cuda::std::submdspan(sub1, 1); - static_assert(sub2.rank() == 0, ""); - static_assert(sub2.rank_dynamic() == 0, ""); - assert(sub2() == 42); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/pair_init.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/pair_init.pass.cpp deleted file mode 100644 index 6da702d260b..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/pair_init.pass.cpp +++ /dev/null @@ -1,35 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -int main(int, char**) -{ - // TEST(TestSubmdspanLayoutRightStaticSizedPairs, test_submdspan_layout_right_static_sized_pairs) - { - cuda::std::array d; - cuda::std::mdspan> m(d.data()); - m(1, 1, 1) = 42; - auto sub0 = cuda::std::submdspan( - m, cuda::std::pair{1, 2}, cuda::std::pair{1, 3}, cuda::std::pair{1, 4}); - - static_assert(sub0.rank() == 3, ""); - static_assert(sub0.rank_dynamic() == 3, ""); - assert(sub0.extent(0) == 1); - assert(sub0.extent(1) == 2); - assert(sub0.extent(2) == 3); - assert(sub0(0, 0, 0) == 42); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/return_type.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/return_type.pass.cpp deleted file mode 100644 index 8cc529f17bd..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/return_type.pass.cpp +++ /dev/null @@ -1,385 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -constexpr auto dyn = cuda::std::dynamic_extent; - -// template - -using submdspan_test_types = cuda::std::tuple< - // LayoutLeft to LayoutLeft - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::pair>, - cuda::std::tuple, - cuda::std::dextents, - int>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t, - cuda::std::pair>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t, - int>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t, - cuda::std::full_extent_t, - cuda::std::pair>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t, - cuda::std::pair, - int>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t, - int, - int>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::pair, - int, - int>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t, - cuda::std::full_extent_t, - cuda::std::pair, - int, - int, - int>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t, - cuda::std::pair, - int, - int, - int, - int>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t, - int, - int, - int, - int, - int>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::pair, - int, - int, - int, - int, - int> - // LayoutRight to LayoutRight - , - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::pair>, - cuda::std::tuple, - cuda::std::dextents, - int>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::full_extent_t, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::pair, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - int, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - cuda::std::pair, - cuda::std::full_extent_t, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - int, - cuda::std::pair, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - int, - int, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - int, - int, - int, - cuda::std::pair, - cuda::std::full_extent_t, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - int, - int, - int, - int, - cuda::std::pair, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::dextents, - int, - int, - int, - int, - int, - cuda::std::full_extent_t> - // LayoutRight to LayoutRight Check Extents Preservation - , - cuda::std::tuple, - cuda::std::extents, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::extents, - cuda::std::pair>, - cuda::std:: - tuple, cuda::std::extents, int>, - cuda::std::tuple, - cuda::std::extents, - cuda::std::full_extent_t, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::extents, - cuda::std::pair, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::extents, - int, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::extents, - cuda::std::pair, - cuda::std::full_extent_t, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::extents, - int, - cuda::std::pair, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::extents, - int, - int, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::extents, - int, - int, - int, - cuda::std::pair, - cuda::std::full_extent_t, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::extents, - int, - int, - int, - int, - cuda::std::pair, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::extents, - int, - int, - int, - int, - int, - cuda::std::full_extent_t> - - , - cuda::std::tuple, - cuda::std::extents, - cuda::std::full_extent_t, - int, - cuda::std::pair, - int, - int, - cuda::std::full_extent_t>, - cuda::std::tuple, - cuda::std::extents, - int, - cuda::std::full_extent_t, - cuda::std::pair, - int, - cuda::std::full_extent_t, - int>>; - -template -struct TestSubMDSpan; - -template -struct TestSubMDSpan> -{ - using mds_org_t = cuda::std::mdspan; - using mds_sub_t = cuda::std::mdspan; - using map_t = typename mds_org_t::mapping_type; - - using mds_sub_deduced_t = decltype(cuda::std::submdspan(mds_org_t(nullptr, map_t()), SubArgs()...)); - using sub_args_t = cuda::std::tuple; -}; - -// TYPED_TEST(TestSubMDSpan, submdspan_return_type) -template -__host__ __device__ void test_submdspan() -{ - using TestFixture = TestSubMDSpan; - - static_assert(cuda::std::is_same::value, - "SubMDSpan: wrong return type"); -} - -int main(int, char**) -{ - static_assert(cuda::std::tuple_size{} == 40, ""); - - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - test_submdspan>(); - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/tuple_init.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/tuple_init.pass.cpp deleted file mode 100644 index 91b8f87603c..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan.submdspan.submdspan/tuple_init.pass.cpp +++ /dev/null @@ -1,35 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. -// -//===----------------------------------------------------------------------===// - -// UNSUPPORTED: c++11 -// UNSUPPORTED: msvc && c++14, msvc && c++17 - -#include -#include - -int main(int, char**) -{ - // TEST(TestSubmdspanLayoutRightStaticSizedTuples, test_submdspan_layout_right_static_sized_tuples) - { - cuda::std::array d; - cuda::std::mdspan> m(d.data()); - m(1, 1, 1) = 42; - auto sub0 = cuda::std::submdspan( - m, cuda::std::tuple{1, 2}, cuda::std::tuple{1, 3}, cuda::std::tuple{1, 4}); - - static_assert(sub0.rank() == 3, ""); - static_assert(sub0.rank_dynamic() == 3, ""); - assert(sub0.extent(0) == 1); - assert(sub0.extent(1) == 2); - assert(sub0.extent(2) == 3); - assert(sub0(0, 0, 0) == 42); - } - - return 0; -} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/CustomTestAccessors.h b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/CustomTestAccessors.h new file mode 100644 index 00000000000..31bf5cbddc4 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/CustomTestAccessors.h @@ -0,0 +1,421 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +//===---------------------------------------------------------------------===// + +#ifndef TEST_STD_CONTAINERS_VIEWS_MDSPAN_MDSPAN_CUSTOM_TEST_ACCESSORS_H +#define TEST_STD_CONTAINERS_VIEWS_MDSPAN_MDSPAN_CUSTOM_TEST_ACCESSORS_H + +#include +#include +#include + +// This contains a bunch of accessors and handles which have different properties +// regarding constructibility and convertibility in order to test mdspan constraints + +// non default constructible data handle +template +struct no_default_ctor_handle +{ + T* ptr; + no_default_ctor_handle() = delete; + __host__ __device__ constexpr no_default_ctor_handle(T* ptr_) + : ptr(ptr_) + {} +}; + +// handle that can't convert from T to const T +template +struct not_const_convertible_handle +{ + T* ptr; + __host__ __device__ constexpr not_const_convertible_handle() + : ptr(nullptr) + {} + __host__ __device__ constexpr not_const_convertible_handle(T* ptr_) + : ptr(ptr_) + {} + + __host__ __device__ constexpr T& operator[](size_t i) const + { + return ptr[i]; + } +}; + +// handle where move has side effects +STATIC_TEST_GLOBAL_VAR int move_counted_handle_c = 0; +template +struct move_counted_handle +{ + T* ptr; + constexpr move_counted_handle() = default; + constexpr move_counted_handle(const move_counted_handle&) = default; + template ::value, int> = 0> + __host__ __device__ constexpr move_counted_handle(const move_counted_handle& other) + : ptr(other.ptr){}; + + __host__ __device__ constexpr move_counted_handle(move_counted_handle&& other) + : ptr(other.ptr) + { + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + move_counted_handle_c++; + } + } + + constexpr move_counted_handle& operator=(const move_counted_handle&) = default; + + __host__ __device__ constexpr move_counted_handle(T* ptr_) + : ptr(ptr_) + {} + + __host__ __device__ constexpr T& operator[](size_t i) const + { + return ptr[i]; + } + + __host__ __device__ static constexpr int& move_counter() noexcept + { + return move_counted_handle_c; + } +}; + +template >::value, int> = 0> +__host__ __device__ constexpr void test_move_counter() +{ + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + assert((H::move_counter() == 1)); + } +} +template >::value, int> = 0> +__host__ __device__ constexpr void test_move_counter() +{} + +// non-default constructible accessor with a bunch of different data handles +template +struct checked_accessor +{ + size_t N; + using offset_policy = cuda::std::default_accessor; + using element_type = ElementType; + using reference = ElementType&; + using data_handle_type = move_counted_handle; + + __host__ __device__ constexpr checked_accessor(size_t N_) + : N(N_) + {} + template ::value, int> = 0> + __host__ __device__ explicit constexpr checked_accessor(const checked_accessor& other) noexcept + { + N = other.N; + } + + __host__ __device__ constexpr reference access(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return p[i]; + } + __host__ __device__ constexpr data_handle_type offset(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return data_handle_type(p.ptr + i); + } +}; + +static_assert(cuda::std::is_constructible, const checked_accessor&>::value, ""); +static_assert(!cuda::std::is_convertible&, checked_accessor>::value, ""); + +template <> +struct checked_accessor +{ + size_t N; + using offset_policy = cuda::std::default_accessor; + using element_type = double; + using reference = double&; + using data_handle_type = no_default_ctor_handle; + + __host__ __device__ constexpr checked_accessor(size_t N_) + : N(N_) + {} + + template ::value, int> = 0> + __host__ __device__ constexpr checked_accessor(checked_accessor&& other) noexcept + { + N = other.N; + } + + __host__ __device__ constexpr reference access(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return p.ptr[i]; + } + __host__ __device__ constexpr data_handle_type offset(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return p.ptr + i; + } +}; + +template <> +struct checked_accessor +{ + size_t N; + using offset_policy = cuda::std::default_accessor; + using element_type = unsigned; + using reference = unsigned; + using data_handle_type = not_const_convertible_handle; + + __host__ __device__ constexpr checked_accessor() + : N(0) + {} + __host__ __device__ constexpr checked_accessor(size_t N_) + : N(N_) + {} + __host__ __device__ constexpr checked_accessor(const checked_accessor& acc) + : N(acc.N) + {} + + __host__ __device__ constexpr reference access(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return p[i]; + } + __host__ __device__ constexpr auto offset(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return p.ptr + i; + } +}; +template <> +struct checked_accessor +{ + size_t N; + using offset_policy = cuda::std::default_accessor; + using element_type = const unsigned; + using reference = unsigned; + using data_handle_type = not_const_convertible_handle; + + __host__ __device__ constexpr checked_accessor() + : N(0) + {} + __host__ __device__ constexpr checked_accessor(size_t N_) + : N(N_) + {} + __host__ __device__ constexpr checked_accessor(const checked_accessor& acc) + : N(acc.N) + {} + + template ::value, int> = 0> + __host__ __device__ constexpr checked_accessor(OtherACC&& acc) + : N(acc.N) + {} + + template ::value, int> = 0> + __host__ __device__ constexpr explicit checked_accessor(OtherACC&& acc) + : N(acc.N) + {} + + __host__ __device__ constexpr reference access(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return p[i]; + } + __host__ __device__ constexpr auto offset(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return p.ptr + i; + } +}; + +template <> +struct checked_accessor +{ + size_t N; + using offset_policy = cuda::std::default_accessor; + using element_type = const float; + using reference = const float&; + using data_handle_type = move_counted_handle; + + __host__ __device__ constexpr checked_accessor() + : N(0) + {} + __host__ __device__ constexpr checked_accessor(size_t N_) + : N(N_) + {} + __host__ __device__ constexpr checked_accessor(const checked_accessor& acc) + : N(acc.N) + {} + + __host__ __device__ constexpr checked_accessor(checked_accessor&& acc) + : N(acc.N) + {} + + __host__ __device__ constexpr reference access(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return p[i]; + } + __host__ __device__ constexpr data_handle_type offset(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return data_handle_type(p.ptr + i); + } +}; + +template <> +struct checked_accessor +{ + size_t N; + using offset_policy = cuda::std::default_accessor; + using element_type = const double; + using reference = const double&; + using data_handle_type = move_counted_handle; + + __host__ __device__ constexpr checked_accessor() + : N(0) + {} + __host__ __device__ constexpr checked_accessor(size_t N_) + : N(N_) + {} + __host__ __device__ constexpr checked_accessor(const checked_accessor& acc) + : N(acc.N) + {} + + __host__ __device__ constexpr reference access(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return p[i]; + } + __host__ __device__ constexpr data_handle_type offset(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return data_handle_type(p.ptr + i); + } +}; + +// Data handle pair which has configurable conversion properties +// bool template parameters are used to enable/disable ctors and assignment +// the c is the one for const T the nc for non-const (so we can convert mdspan) +// Note both take non-const T as template parameter though +template +struct conv_test_accessor_c; + +template +struct conv_test_accessor_nc +{ + using offset_policy = cuda::std::default_accessor; + using element_type = T; + using reference = T&; + using data_handle_type = T*; + + constexpr conv_test_accessor_nc() = default; + constexpr conv_test_accessor_nc(const conv_test_accessor_nc&) = default; + + template = 0> + __host__ __device__ constexpr operator conv_test_accessor_c() + { + return conv_test_accessor_c{}; + } + template = 0> + __host__ __device__ constexpr operator conv_test_accessor_c() const + { + return conv_test_accessor_c{}; + } + + __host__ __device__ constexpr reference access(data_handle_type p, size_t i) const noexcept + { + return p[i]; + } + __host__ __device__ constexpr data_handle_type offset(data_handle_type p, size_t i) const noexcept + { + return p + i; + } +}; + +template +struct conv_test_accessor_c +{ + using offset_policy = cuda::std::default_accessor; + using element_type = const T; + using reference = const T&; + using data_handle_type = const T*; + + constexpr conv_test_accessor_c() = default; + constexpr conv_test_accessor_c(const conv_test_accessor_c&) = default; + + template = 0> + __host__ __device__ constexpr conv_test_accessor_c(const conv_test_accessor_nc&) + {} + template = 0> + __host__ __device__ constexpr conv_test_accessor_c(conv_test_accessor_nc&&) + {} + template = 0> + __host__ __device__ constexpr conv_test_accessor_c& operator=(const conv_test_accessor_nc&) + { + return {}; + } + template = 0> + __host__ __device__ constexpr conv_test_accessor_c& operator=(conv_test_accessor_nc&&) + { + return {}; + } + + __host__ __device__ constexpr reference access(data_handle_type p, size_t i) const noexcept + { + return p[i]; + } + __host__ __device__ constexpr data_handle_type offset(data_handle_type p, size_t i) const noexcept + { + return p + i; + } +}; + +template +struct convertible_accessor_but_not_handle +{ + size_t N; + using offset_policy = cuda::std::default_accessor; + using element_type = ElementType; + using reference = ElementType&; + using data_handle_type = not_const_convertible_handle; + + constexpr convertible_accessor_but_not_handle() = default; + template ::value, int> = 0> + __host__ __device__ explicit constexpr convertible_accessor_but_not_handle( + const convertible_accessor_but_not_handle& other) noexcept + { + N = other.N; + } + + __host__ __device__ constexpr reference access(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return p[i]; + } + __host__ __device__ constexpr data_handle_type offset(data_handle_type p, size_t i) const noexcept + { + assert(i < N); + return data_handle_type(p.ptr + i); + } +}; + +#endif // TEST_STD_CONTAINERS_VIEWS_MDSPAN_MDSPAN_CUSTOM_TEST_ACCESSORS_H diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/CustomTestLayouts.h b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/CustomTestLayouts.h new file mode 100644 index 00000000000..60e5f8d700e --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/CustomTestLayouts.h @@ -0,0 +1,568 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +//===---------------------------------------------------------------------===// + +#ifndef TEST_STD_CONTAINERS_VIEWS_MDSPAN_CUSTOM_TEST_LAYOUTS_H +#define TEST_STD_CONTAINERS_VIEWS_MDSPAN_CUSTOM_TEST_LAYOUTS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test_macros.h" + +// Layout that wraps indices to test some idiosyncratic behavior +// - basically it is a layout_left where indices are first wrapped i.e. i%Wrap +// - only accepts integers as indices +// - is_always_strided and is_always_unique are false +// - is_strided and is_unique are true if all extents are smaller than Wrap +// - not default constructible +// - not extents constructible +// - not trivially copyable +// - does not check dynamic to static extent conversion in converting ctor +// - check via side-effects that mdspan::swap calls mappings swap via ADL + +__host__ __device__ bool mul_overflow(size_t x, size_t y, size_t* res) +{ + *res = x * y; + return x && ((*res / x) != y); +} + +template +__host__ __device__ inline const T& Min(const T& __a, const T& __b) +{ + return __b < __a ? __b : __a; +} + +struct not_extents_constructible_tag +{}; + +STATIC_TEST_GLOBAL_VAR int layout_wrapping_integral_swap_counter = 0; +template +class layout_wrapping_integral +{ +public: + template + class mapping; +}; + +template +template +class layout_wrapping_integral::mapping +{ + static constexpr typename Extents::index_type Wrap = static_cast(WrapArg); + +public: + using extents_type = Extents; + using index_type = typename extents_type::index_type; + using size_type = typename extents_type::size_type; + using rank_type = typename extents_type::rank_type; + using layout_type = layout_wrapping_integral; + +private: + template = 0> + __host__ __device__ static constexpr bool required_span_size_is_representable(const extents_type& ext) + { + return true; + } + + template = 0> + __host__ __device__ static constexpr bool required_span_size_is_representable(const extents_type& ext) + { + index_type prod = ext.extent(0); + for (rank_type r = 1; r < extents_type::rank(); r++) + { + bool overflowed = mul_overflow(prod, Min(ext.extent(r), Wrap), &prod); + if (overflowed) + { + return false; + } + } + return true; + } + +public: + __host__ __device__ constexpr mapping() noexcept = delete; + __host__ __device__ constexpr mapping(const mapping& other) noexcept + : extents_(other.extents()){}; + template = 0> + __host__ __device__ constexpr mapping(extents_type&& ext) noexcept + : extents_(ext) + {} + __host__ __device__ constexpr mapping(const extents_type& ext, not_extents_constructible_tag) noexcept + : extents_(ext) + {} + + template = 0> + __host__ __device__ static constexpr cuda::std::array + get_dyn_extents(const mapping& other) noexcept + { + cuda::std::array dyn_extents; + rank_type count = 0; + for (rank_type r = 0; r != extents_type::rank(); r++) + { + if (extents_type::static_extent(r) == cuda::std::dynamic_extent) + { + dyn_extents[count++] = other.extents().extent(r); + } + } + return dyn_extents; + } + template = 0> + __host__ __device__ static constexpr cuda::std::array + get_dyn_extents(const mapping& other) noexcept + { + return {}; + } + + template < + class OtherExtents, + cuda::std::enable_if_t::value && (Wrap != 8), int> = 0, + cuda::std::enable_if_t::value, int> = 0> + __host__ __device__ constexpr mapping(const mapping& other) noexcept + { + extents_ = extents_type(get_dyn_extents(other)); + } + + template < + class OtherExtents, + cuda::std::enable_if_t::value && (Wrap != 8), int> = 0, + cuda::std::enable_if_t::value, int> = 0> + __host__ __device__ constexpr explicit mapping(const mapping& other) noexcept + { + extents_ = extents_type(get_dyn_extents(other)); + } + + template < + class OtherExtents, + cuda::std::enable_if_t::value && (Wrap == 8), int> = 0, + cuda::std::enable_if_t::value, int> = 0> + __host__ __device__ constexpr mapping(mapping&& other) noexcept + { + extents_ = extents_type(get_dyn_extents(other)); + } + + template < + class OtherExtents, + cuda::std::enable_if_t::value && (Wrap == 8), int> = 0, + cuda::std::enable_if_t::value, int> = 0> + __host__ __device__ constexpr explicit mapping(mapping&& other) noexcept + { + extents_ = extents_type(get_dyn_extents(other)); + } + + __host__ __device__ constexpr mapping& operator=(const mapping& other) noexcept + { + extents_ = other.extents_; + return *this; + }; + + __host__ __device__ constexpr const extents_type& extents() const noexcept + { + return extents_; + } + + __host__ __device__ constexpr index_type required_span_size() const noexcept + { + index_type size = 1; + for (size_t r = 0; r != extents_type::rank(); r++) + { + size *= extents_.extent(r) < Wrap ? extents_.extent(r) : Wrap; + } + return size; + } + + struct rank_accumulator + { + __host__ __device__ constexpr rank_accumulator(const extents_type& extents) noexcept + : extents_(extents) + {} + + template + __host__ __device__ constexpr index_type operator()(cuda::std::index_sequence, Indices... idx) const noexcept + { + cuda::std::array idx_a{ + static_cast(static_cast(idx) % Wrap)...}; + cuda::std::array position = {(extents_type::rank() - 1 - Pos)...}; + + index_type res = 0; + for (size_t index = 0; index < extents_type::rank(); ++index) + { + res = idx_a[index] + (extents_.extent(index) < Wrap ? extents_.extent(index) : Wrap) * res; + } + return res; + } + + const extents_type& extents_{}; + }; + + template < + class... Indices, + cuda::std::enable_if_t = 0, + cuda::std::enable_if_t...>::value, int> = 0, + cuda::std::enable_if_t::value...>::value, int> = 0, + cuda::std::enable_if_t::value...>::value, + int> = 0> + __host__ __device__ constexpr index_type operator()(Indices... idx) const noexcept + { + return rank_accumulator{extents_}(cuda::std::make_index_sequence(), idx...); + } + + __host__ __device__ static constexpr bool is_always_unique() noexcept + { + return false; + } + __host__ __device__ static constexpr bool is_always_exhaustive() noexcept + { + return true; + } + __host__ __device__ static constexpr bool is_always_strided() noexcept + { + return false; + } + + TEST_NV_DIAG_SUPPRESS(186) // pointless comparison of unsigned integer with zero + + __host__ __device__ constexpr bool is_unique() const noexcept + { + for (rank_type r = 0; r != extents_type::rank(); r++) + { + if (extents_.extent(r) > Wrap) + { + return false; + } + } + return true; + } + __host__ __device__ static constexpr bool is_exhaustive() noexcept + { + return true; + } + __host__ __device__ constexpr bool is_strided() const noexcept + { + for (rank_type r = 0; r != extents_type::rank(); r++) + { + if (extents_.extent(r) > Wrap) + { + return false; + } + } + return true; + } + + template 0), int> = 0> + __host__ __device__ constexpr index_type stride(rank_type r) const noexcept + { + index_type s = 1; + for (rank_type i = extents_type::rank() - 1; i > r; i--) + { + s *= extents_.extent(i); + } + return s; + } + + template = 0> + __host__ __device__ friend constexpr bool operator==(const mapping& lhs, const mapping& rhs) noexcept + { + return lhs.extents() == rhs.extents(); + } + +#if TEST_STD_VER <= 2017 + template = 0> + __host__ __device__ friend constexpr bool operator!=(const mapping& lhs, const mapping& rhs) noexcept + { + return lhs.extents() != rhs.extents(); + } +#endif // TEST_STD_VER <= 2017 + + __host__ __device__ friend constexpr void swap(mapping& x, mapping& y) noexcept + { + swap(x.extents_, y.extents_); + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + layout_wrapping_integral_swap_counter++; + } + } + + __host__ __device__ static int& swap_counter() + { + return layout_wrapping_integral_swap_counter; + } + +private: + extents_type extents_{}; +}; + +template < + class MDS, + cuda::std::enable_if_t>::value, int> = 0> +__host__ __device__ constexpr void test_swap_counter() +{ + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + assert(MDS::mapping_type::swap_counter() > 0); + } +} +template < + class MDS, + cuda::std::enable_if_t>::value, int> = 0> +__host__ __device__ constexpr void test_swap_counter() +{} + +template +__host__ __device__ constexpr auto construct_mapping(cuda::std::layout_left, Extents exts) +{ + return cuda::std::layout_left::mapping(exts); +} + +template +__host__ __device__ constexpr auto construct_mapping(cuda::std::layout_right, Extents exts) +{ + return cuda::std::layout_right::mapping(exts); +} + +template +__host__ __device__ constexpr auto construct_mapping(layout_wrapping_integral, Extents exts) +{ + return typename layout_wrapping_integral::template mapping(exts, not_extents_constructible_tag{}); +} + +// This layout does not check convertibility of extents for its conversion ctor +// Allows triggering mdspan's ctor static assertion on convertibility of extents +STATIC_TEST_GLOBAL_VAR int always_convertible_layout_swap_counter = 0; +class always_convertible_layout +{ +public: + template + class mapping; +}; + +template +class always_convertible_layout::mapping +{ +public: + using extents_type = Extents; + using index_type = typename extents_type::index_type; + using size_type = typename extents_type::size_type; + using rank_type = typename extents_type::rank_type; + using layout_type = always_convertible_layout; + +private: + template = 0> + __host__ __device__ static constexpr bool required_span_size_is_representable(const extents_type& ext) + { + return true; + } + template = 0> + __host__ __device__ static constexpr bool required_span_size_is_representable(const extents_type& ext) + { + index_type prod = ext.extent(0); + for (rank_type r = 1; r < extents_type::rank(); r++) + { + bool overflowed = mul_overflow(prod, ext.extent(r), &prod); + if (overflowed) + { + return false; + } + } + return true; + } + +public: + __host__ __device__ constexpr mapping() noexcept = delete; + __host__ __device__ constexpr mapping(const mapping& other) noexcept + : extents_(other.extents_) + , offset_(other.offset_) + , scaling_(other.scaling_) + {} + __host__ __device__ constexpr mapping(const extents_type& ext) noexcept + : extents_(ext) + , offset_(0) + , scaling_(1){}; + __host__ __device__ constexpr mapping(const extents_type& ext, index_type offset) noexcept + : extents_(ext) + , offset_(offset) + , scaling_(1){}; + __host__ __device__ constexpr mapping(const extents_type& ext, index_type offset, index_type scaling) noexcept + : extents_(ext) + , offset_(offset) + , scaling_(scaling){}; + + template = 0> + __host__ __device__ constexpr mapping(const mapping& other) noexcept + { + cuda::std::array dyn_extents; + rank_type count = 0; + for (rank_type r = 0; r != extents_type::rank(); r++) + { + if (extents_type::static_extent(r) == cuda::std::dynamic_extent) + { + dyn_extents[count++] = other.extents().extent(r); + } + } + extents_ = extents_type(dyn_extents); + offset_ = other.offset_; + scaling_ = other.scaling_; + } + template = 0> + __host__ __device__ constexpr mapping(const mapping& other) noexcept + { + extents_ = extents_type(); + offset_ = other.offset_; + scaling_ = other.scaling_; + } + + __host__ __device__ constexpr mapping& operator=(const mapping& other) noexcept + { + extents_ = other.extents_; + offset_ = other.offset_; + scaling_ = other.scaling_; + return *this; + }; + + __host__ __device__ constexpr const extents_type& extents() const noexcept + { + return extents_; + } + + __host__ __device__ static constexpr const index_type& Max(const index_type& __a, const index_type& __b) noexcept + { + return __a > __b ? __a : __b; + } + + __host__ __device__ constexpr index_type required_span_size() const noexcept + { + index_type size = 1; + for (size_t r = 0; r != extents_type::rank(); r++) + { + size *= extents_.extent(r); + } + return Max(size * scaling_ + offset_, offset_); + } + + struct rank_accumulator + { + __host__ __device__ constexpr rank_accumulator(const extents_type& extents) noexcept + : extents_(extents) + {} + + template + __host__ __device__ constexpr index_type operator()(cuda::std::index_sequence, Indices... idx) const noexcept + { + cuda::std::array idx_a{ + static_cast(static_cast(idx))...}; + cuda::std::array position = {(extents_type::rank() - 1 - Pos)...}; + + index_type res = 0; + for (size_t index = 0; index < extents_type::rank(); ++index) + { + res = idx_a[index] + extents_.extent(index) * res; + } + return res; + } + + const extents_type& extents_{}; + }; + + template < + class... Indices, + cuda::std::enable_if_t = 0, + cuda::std::enable_if_t...>::value, int> = 0, + cuda::std::enable_if_t::value...>::value, int> = 0, + cuda::std::enable_if_t::value...>::value, + int> = 0> + __host__ __device__ constexpr index_type operator()(Indices... idx) const noexcept + { + return offset_ + + scaling_ * rank_accumulator{extents_}(cuda::std::make_index_sequence(), idx...); + } + + __host__ __device__ static constexpr bool is_always_unique() noexcept + { + return true; + } + __host__ __device__ static constexpr bool is_always_exhaustive() noexcept + { + return true; + } + __host__ __device__ static constexpr bool is_always_strided() noexcept + { + return true; + } + + __host__ __device__ static constexpr bool is_unique() noexcept + { + return true; + } + __host__ __device__ static constexpr bool is_exhaustive() noexcept + { + return true; + } + __host__ __device__ static constexpr bool is_strided() noexcept + { + return true; + } + + template 0), int> = 0> + __host__ __device__ constexpr index_type stride(rank_type r) const noexcept + { + index_type s = 1; + for (rank_type i = 0; i < r; i++) + { + s *= extents_.extent(i); + } + return s * scaling_; + } + + template + __host__ __device__ friend constexpr auto operator==(const mapping& lhs, const mapping& rhs) noexcept + -> cuda::std::enable_if_t + { + return lhs.extents() == rhs.extents() && lhs.offset_ == rhs.offset && lhs.scaling_ == rhs.scaling_; + } + +#if TEST_STD_VER < 2020 + template + __host__ __device__ friend constexpr auto operator!=(const mapping& lhs, const mapping& rhs) noexcept + -> cuda::std::enable_if_t + { + return !(lhs == rhs); + } +#endif // TEST_STD_VER < 2020 + + __host__ __device__ friend constexpr void swap(mapping& x, mapping& y) noexcept + { + swap(x.extents_, y.extents_); + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + always_convertible_layout_swap_counter++; + } + } + +private: + template + friend class mapping; + + extents_type extents_{}; + index_type offset_{}; + index_type scaling_{}; +}; +#endif // TEST_STD_CONTAINERS_VIEWS_MDSPAN_CUSTOM_TEST_LAYOUTS_H diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/assign.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/assign.pass.cpp new file mode 100644 index 00000000000..cbb4f6b462a --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/assign.pass.cpp @@ -0,0 +1,158 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr mdspan& operator=(const mdspan& rhs) = default; + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +// The defaulted assignment operator seems to be deprecated because: +// error: definition of implicit copy assignment operator for 'checked_accessor' is deprecated +// because it has a user-provided copy constructor [-Werror,-Wdeprecated-copy-with-user-provided-copy] +template >::value, int> = 0> +__host__ __device__ constexpr void test_implicit_copy_assignment(MDS& m, MDS& m_org) +{ + m = m_org; +} +template >::value, int> = 0> +__host__ __device__ constexpr void test_implicit_copy_assignment(MDS&, MDS&) +{} + +template +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + + MDS m_org(handle, map, acc); + MDS m(handle, map, acc); + + test_implicit_copy_assignment(m, m_org); + // even though the following checks out: + static_assert(cuda::std::copyable>, ""); + static_assert(cuda::std::is_assignable, checked_accessor>::value, ""); + + static_assert(noexcept(m = m_org), ""); + assert(m.extents() == map.extents()); + test_equality_handle(m, handle); + test_equality_mapping(m, map); + test_equality_accessor(m, acc); + + static_assert(cuda::std::is_trivially_assignable::value + == ((!cuda::std::is_class::value || cuda::std::is_trivially_assignable::value) + && cuda::std::is_trivially_assignable::value + && cuda::std::is_trivially_assignable::value), + ""); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + // make sure we test a trivially assignable mapping + static_assert(cuda::std::is_trivially_assignable< + typename cuda::std::layout_left::template mapping>, + const typename cuda::std::layout_left::template mapping>&>::value, + ""); + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + // make sure we test a not trivially assignable mapping + static_assert(!cuda::std::is_trivially_assignable< + typename layout_wrapping_integral<4>::template mapping>, + const typename layout_wrapping_integral<4>::template mapping>&>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + // make sure we test trivially constructible accessor and data_handle + static_assert(cuda::std::is_trivially_copyable>::value, ""); + static_assert(cuda::std::is_trivially_copyable::data_handle_type>::value, ""); + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is noexcept copy constructible except for const double + checked_accessor acc(1024); + static_assert(noexcept(checked_accessor(acc)) != cuda::std::is_same::value, ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.data()), acc); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + // make sure we test trivially constructible accessor and data_handle + static_assert(cuda::std::is_trivially_copyable>::value, ""); + static_assert(cuda::std::is_trivially_copyable::data_handle_type>::value, ""); + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is noexcept copy constructible except for const double + checked_accessor acc(1024); + static_assert(noexcept(checked_accessor(acc)) != cuda::std::is_same::value, ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.get_ptr()), acc); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/conversion.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/conversion.pass.cpp new file mode 100644 index 00000000000..111a255ca2f --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/conversion.pass.cpp @@ -0,0 +1,383 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(see below) +// mdspan(const mdspan& other); +// +// Constraints: +// - is_constructible_v&> is true, and +// - is_constructible_v is true. +// Mandates: +// - is_constructible_v is +// - is_constructible_v is true. +// +// Preconditions: +// - For each rank index r of extents_type, static_extent(r) == dynamic_extent || static_extent(r) == other.extent(r) +// is true. +// - [0, map_.required_span_size()) is an accessible range of ptr_ and acc_ for values of ptr_, map_, and acc_ after +// the invocation of this constructor. +// +// Effects: +// - Direct-non-list-initializes ptr_ with other.ptr_, +// - direct-non-list-initializes map_ with other.map_, and +// - direct-non-list-initializes acc_ with other.acc_. +// +// Remarks: The expression inside explicit is equivalent to: +// !is_convertible_v&, mapping_type> +// || !is_convertible_v + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_implicit_conversion(ToMDS to_mds, FromMDS from_mds) +{ + assert(to_mds.extents() == from_mds.extents()); + test_equality_with_handle(to_mds, from_mds); + test_equality_with_mapping(to_mds, from_mds); + test_equality_with_accessor(to_mds, from_mds); +} + +template +constexpr bool mapping_requirements = + cuda::std::copyable && cuda::std::equality_comparable && cuda::std::is_nothrow_move_constructible::value + && cuda::std::is_nothrow_move_assignable::value && cuda::std::is_nothrow_swappable::value; + +template = 0> +__host__ __device__ constexpr void test_conversion_impl(FromMDS) +{ + static_assert(!cuda::std::is_constructible::value, ""); +} +template = 0, + cuda::std::enable_if_t = 0> +__host__ __device__ constexpr void test_conversion_impl(FromMDS) +{} +template = 0, + cuda::std::enable_if_t = 0, + cuda::std::enable_if_t = 0> +__host__ __device__ constexpr void test_conversion_impl(FromMDS from_mds) +{ + ToMDS to_mds(from_mds); + assert(to_mds.extents() == from_mds.extents()); + test_equality_with_handle(to_mds, from_mds); + test_equality_with_mapping(to_mds, from_mds); + test_equality_with_accessor(to_mds, from_mds); + test_implicit_conversion(from_mds, from_mds); +} +template = 0, + cuda::std::enable_if_t = 0, + cuda::std::enable_if_t = 0> +__host__ __device__ constexpr void test_conversion_impl(FromMDS from_mds) +{ + ToMDS to_mds(from_mds); + assert(to_mds.extents() == from_mds.extents()); + test_equality_with_handle(to_mds, from_mds); + test_equality_with_mapping(to_mds, from_mds); + test_equality_with_accessor(to_mds, from_mds); + static_assert(!cuda::std::is_convertible::value, ""); +} + +template +__host__ __device__ constexpr void test_conversion(FromMDS from_mds) +{ + // check some requirements, to see we didn't screw up our test layouts/accessors + static_assert(cuda::std::copyable, ""); + static_assert(cuda::std::equality_comparable, ""); + static_assert(cuda::std::is_nothrow_move_constructible::value, ""); + static_assert(cuda::std::is_nothrow_move_assignable::value, ""); + static_assert(cuda::std::is_nothrow_swappable::value, ""); + static_assert(mapping_requirements, ""); + static_assert(mapping_requirements, ""); + + constexpr bool constructible = + cuda::std::is_constructible::value + && cuda::std::is_constructible::value; + constexpr bool convertible = + cuda::std::is_convertible::value + && cuda::std::is_convertible::value; + constexpr bool passes_mandates = + cuda::std::is_constructible::value + && cuda::std::is_constructible::value; + + test_conversion_impl(from_mds); +} + +template +__host__ __device__ constexpr void +construct_from_mds(const FromH& handle, const FromL& layout, const FromExt& exts, const FromA& acc) +{ + using ToMDS = cuda::std::mdspan; + using FromMDS = cuda::std::mdspan; + test_conversion(FromMDS(handle, construct_mapping(layout, exts), acc)); +} + +template +__host__ __device__ constexpr void mixin_extents(const FromH& handle, const FromL& layout, const FromA& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + // constructible and convertible + construct_from_mds, ToA>(handle, layout, cuda::std::dextents(), acc); + construct_from_mds, ToA>(handle, layout, cuda::std::dextents(4), acc); + construct_from_mds, ToA>(handle, layout, cuda::std::extents(), acc); + construct_from_mds, ToA>(handle, layout, cuda::std::dextents(4, 5), acc); + construct_from_mds, ToA>(handle, layout, cuda::std::dextents(4, 5), acc); + construct_from_mds, ToA>(handle, layout, cuda::std::extents(4), acc); + construct_from_mds, ToA>(handle, layout, cuda::std::extents(4), acc); + construct_from_mds, ToA>(handle, layout, cuda::std::extents(4), acc); + construct_from_mds, ToA>( + handle, layout, cuda::std::extents(4, 6), acc); + + // not convertible + construct_from_mds, ToA>(handle, layout, cuda::std::dextents(4), acc); + construct_from_mds, ToA>( + handle, layout, cuda::std::extents(4, 6, 7), acc); + + // not constructible + construct_from_mds, ToA>(handle, layout, cuda::std::dextents(4, 5), acc); + construct_from_mds, ToA>( + handle, layout, cuda::std::extents(4, 6), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const FromH& handle, const FromA& acc) +{ + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + // Check layout policy conversion + // different layout policies, but constructible and convertible + static_assert( + cuda::std::is_constructible>, + const cuda::std::layout_right::mapping>&>::value, + ""); + static_assert(cuda::std::is_convertible>&, + cuda::std::layout_left::mapping>>::value, + ""); + // different layout policies, not constructible + static_assert( + !cuda::std::is_constructible>, + const cuda::std::layout_right::mapping>&>::value, + ""); + // different layout policies, constructible and not convertible + static_assert( + cuda::std::is_constructible>, + const cuda::std::layout_right::mapping>&>::value, + ""); + static_assert(!cuda::std::is_convertible>&, + cuda::std::layout_left::mapping>>::value, + ""); + + mixin_extents(handle, cuda::std::layout_right(), acc); + mixin_extents, ToA>(handle, layout_wrapping_integral<4>(), acc); + // different layout policies, constructible and not convertible + static_assert( + !cuda::std::is_constructible::mapping>, + const layout_wrapping_integral<8>::mapping>&>::value, + ""); + static_assert(cuda::std::is_constructible::mapping>, + layout_wrapping_integral<8>::mapping>>::value, + ""); + mixin_extents, ToA>(handle, layout_wrapping_integral<8>(), acc); +} + +// check that we cover all corners with respect to constructibility and convertibility +template ::value + && !cuda::std::is_same::value, + int> = 0> +__host__ __device__ constexpr void test_impl(FromA from_acc) +{ + cuda::std::array elements = {42}; + mixin_layout(typename FromA::data_handle_type(elements.data()), from_acc); +} + +template ::value + || cuda::std::is_same::value, + int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void test_impl(FromA from_acc) +{ + ElementPool elements; + mixin_layout(typename FromA::data_handle_type(elements.get_ptr()), from_acc); +} + +template +__host__ __device__ constexpr bool test(FromA from_acc) +{ + static_assert(cuda::std::copyable, ""); + static_assert(cuda::std::copyable, ""); + static_assert(cuda::std::is_constructible::value == constructible_constref_acc, ""); + static_assert(cuda::std::is_constructible::value == constructible_nonconst_acc, ""); + static_assert( + cuda::std::is_constructible::value + == constructible_constref_handle, + ""); + static_assert(cuda::std::is_constructible::value + == constructible_nonconst_handle, + ""); + static_assert(cuda::std::is_convertible::value == convertible_constref_acc, ""); + static_assert(cuda::std::is_convertible::value == convertible_nonconst_acc, ""); + static_assert( + cuda::std::is_convertible::value + == convertible_constref_handle, + ""); + static_assert(cuda::std::is_convertible::value + == convertible_nonconst_handle, + ""); + + test_impl(from_acc); + return true; +} + +int main(int, char**) +{ + // using shorthands here: t and o for better visual distinguishability + constexpr bool t = true; + constexpr bool o = false; + + // possibility matrix for constructibility and convertibility https://godbolt.org/z/98KGo8Wbc + // you can't have convertibility without constructibility + // and if you take const T& then you also can take T + // this leaves 7 combinations + // const_ref_ctor, const_ref_conv, nonconst_ctor, nonconst_conv, tested + // o o o o X + // o o t o X + // o o t t X + // t o t o X + // t o t t X + // t t t o X + // t t t t X + + // checked_accessor has various weird data handles and some weird conversion properties + // conv_test_accessor_c/nc is an accessor pair which has configurable conversion properties, but plain ptr as data + // handle accessor constructible + test>(cuda::std::default_accessor()); + test>(cuda::std::default_accessor()); + test>( + cuda::std::default_accessor()); + test>( + cuda::std::default_accessor()); + test>(checked_accessor(1024)); + test>(checked_accessor(1024)); + test>(checked_accessor(1024)); + test>(checked_accessor(1024)); + test>(checked_accessor(1024)); + test>(checked_accessor(1024)); + test>(checked_accessor(1024)); + test>(conv_test_accessor_nc()); + test>(conv_test_accessor_nc()); +// FIXME: these tests trigger what appears to be a compiler bug on MINGW32 with --target=x86_64-w64-windows-gnu +// https://godbolt.org/z/KK8aj5bs7 +// Bug report: https://github.com/llvm/llvm-project/issues/64077 +#ifndef TEST_COMPILER_MSVC + test>(conv_test_accessor_nc()); + test>(conv_test_accessor_nc()); +#endif + + // ElementType convertible, but accessor not constructible + test>(cuda::std::default_accessor()); + test>(checked_accessor(1024)); + test>(checked_accessor(1024)); + test>(conv_test_accessor_nc()); + test>(conv_test_accessor_nc()); + test>(conv_test_accessor_nc()); + + // FIXME: nvcc cannot cope with mdspan constexpr evaluation in C++17 +#if TEST_STD_VER >= 2020 + + // Ran into trouble with doing it all in one static_assert: exceeding step limit for consteval + static_assert(test>(cuda::std::default_accessor()), + ""); + static_assert( + test>(cuda::std::default_accessor()), ""); +# if TEST_STD_VER >= 2020 + static_assert(test>( + cuda::std::default_accessor()), + ""); + static_assert(test>( + cuda::std::default_accessor()), + ""); +# endif // TEST_STD_VER >= 2020 + static_assert(test>(checked_accessor(1024)), ""); + static_assert(test>(checked_accessor(1024)), ""); + static_assert(test>(checked_accessor(1024)), ""); + static_assert(test>(checked_accessor(1024)), ""); + static_assert(test>(checked_accessor(1024)), ""); +# if TEST_STD_VER >= 2020 + static_assert( + test>(checked_accessor(1024)), ""); + static_assert(test>( + checked_accessor(1024)), + ""); +# endif // TEST_STD_VER >= 2020 + static_assert(test>(conv_test_accessor_nc()), + ""); + static_assert(test>(conv_test_accessor_nc()), + ""); + static_assert(test>(conv_test_accessor_nc()), + ""); + static_assert(test>(conv_test_accessor_nc()), + ""); + static_assert(test>(cuda::std::default_accessor()), + ""); + static_assert(test>(checked_accessor(1024)), ""); + static_assert(test>(checked_accessor(1024)), ""); + static_assert(test>(conv_test_accessor_nc()), + ""); + static_assert(test>(conv_test_accessor_nc()), + ""); + static_assert(test>(conv_test_accessor_nc()), + ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/conversion.verify.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/conversion.verify.cpp new file mode 100644 index 00000000000..11ebf7a88e3 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/conversion.verify.cpp @@ -0,0 +1,74 @@ +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 +// UNSUPPORTED: nvrtc + +// + +// template +// constexpr explicit(see below) +// mdspan(const mdspan& other); +// +// Constraints: +// - is_constructible_v&> is true, and +// - is_constructible_v is true. +// Mandates: +// - is_constructible_v is +// - is_constructible_v is true. +// +// Preconditions: +// - For each rank index r of extents_type, static_extent(r) == dynamic_extent || static_extent(r) == other.extent(r) +// is true. +// - [0, map_.required_span_size()) is an accessible range of ptr_ and acc_ for values of ptr_, map_, and acc_ after +// the invocation of this constructor. +// +// Effects: +// - Direct-non-list-initializes ptr_ with other.ptr_, +// - direct-non-list-initializes map_ with other.map_, and +// - direct-non-list-initializes acc_ with other.acc_. +// +// Remarks: The expression inside explicit is equivalent to: +// !is_convertible_v&, mapping_type> +// || !is_convertible_v + +#include + +#include "../CustomTestLayouts.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +__host__ __device__ void cant_construct_data_handle_type() +{ + int data; + cuda::std::mdspan, cuda::std::layout_right, convertible_accessor_but_not_handle> + m_nc(&data); + // expected-error-re@*:* {{{{.*}}no matching constructor for initialization of {{.*}} (aka + // 'not_const_convertible_handle')}} expected-error-re@*:* {{{{(static_assert|static assertion)}} failed + // {{.*}}mdspan: incompatible data_handle_type for mdspan construction}} + cuda::std:: + mdspan, cuda::std::layout_right, convertible_accessor_but_not_handle> + m_c(m_nc); + unused(m_c); +} + +__host__ __device__ void mapping_constructible_despite_extents_compatibility() +{ + int data; + cuda::std::mdspan, always_convertible_layout> m(&data); + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}mdspan: incompatible extents for mdspan + // construction}} + cuda::std::mdspan, always_convertible_layout> m2(m); + unused(m2); +} + +int main(int, char**) +{ + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.copy.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.copy.pass.cpp new file mode 100644 index 00000000000..b179468bd65 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.copy.pass.cpp @@ -0,0 +1,135 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr mdspan(const mdspan&) = default; +// +// A specialization of mdspan is a trivially copyable type if its accessor_type, mapping_type, and data_handle_type are +// trivially copyable types. + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + + MDS m_org(handle, map, acc); + MDS m(m_org); + static_assert(noexcept(MDS(m_org)) == (noexcept(H(handle)) && noexcept(M(map)) && noexcept(A(acc))), ""); + static_assert(cuda::std::is_trivially_copyable::value + == (cuda::std::is_trivially_copyable::value && cuda::std::is_trivially_copyable::value + && cuda::std::is_trivially_copyable::value), + ""); + assert(m.extents() == map.extents()); + test_equality_handle(m, handle); + test_equality_mapping(m, map); + test_equality_accessor(m, acc); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + // make sure we test a trivially copyable mapping + static_assert( + cuda::std::is_trivially_copyable>>::value, + ""); + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + // make sure we test a not trivially copyable mapping + static_assert(!cuda::std::is_trivially_copyable< + typename layout_wrapping_integral<4>::template mapping>>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + // make sure we test trivially constructible accessor and data_handle + static_assert(cuda::std::is_trivially_copyable>::value, ""); + static_assert(cuda::std::is_trivially_copyable::data_handle_type>::value, ""); + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is noexcept copy constructible except for const double + checked_accessor acc(1024); + static_assert(noexcept(checked_accessor(acc)) != cuda::std::is_same::value, ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.data()), acc); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + // make sure we test trivially constructible accessor and data_handle + static_assert(cuda::std::is_trivially_copyable>::value, ""); + static_assert(cuda::std::is_trivially_copyable::data_handle_type>::value, ""); + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is noexcept copy constructible except for const double + checked_accessor acc(1024); + static_assert(noexcept(checked_accessor(acc)) != cuda::std::is_same::value, ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.get_ptr()), acc); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.default.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.default.pass.cpp new file mode 100644 index 00000000000..9fa852a43f1 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.default.pass.cpp @@ -0,0 +1,168 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr mdspan(); +// Constraints: +// - rank_dynamic() > 0 is true. +// - is_default_constructible_v is true. +// - is_default_constructible_v is true. +// - is_default_constructible_v is true. +// +// Preconditions: [0, map_.required_span_size()) is an accessible range of ptr_ +// and acc_ for the values of map_ and acc_ after the invocation of this constructor. +// +// Effects: Value-initializes ptr_, map_, and acc_. + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template 0) && hc && mc && ac, int> = 0> +__host__ __device__ constexpr void test_mdspan_types(const H&, const M&, const A&) +{ + using MDS = cuda::std::mdspan; + + static_assert(hc == cuda::std::is_default_constructible::value, ""); + static_assert(mc == cuda::std::is_default_constructible::value, ""); + static_assert(ac == cuda::std::is_default_constructible::value, ""); + + MDS m; +#if !defined(TEST_COMPILER_GCC) + static_assert(noexcept(MDS()) == (noexcept(H()) && noexcept(M()) && noexcept(A())), ""); +#endif // !TEST_COMPILER_GCC + assert(m.extents() == typename MDS::extents_type()); + test_equality_handle(m, H{}); + test_equality_mapping(m, M{}); + test_equality_accessor(m, A{}); +} +template 0) && hc && mc && ac), int> = 0> +__host__ __device__ constexpr void test_mdspan_types(const H&, const M&, const A&) +{ + using MDS = cuda::std::mdspan; + + static_assert(hc == cuda::std::is_default_constructible::value, ""); + static_assert(mc == cuda::std::is_default_constructible::value, ""); + static_assert(ac == cuda::std::is_default_constructible::value, ""); + static_assert(!cuda::std::is_default_constructible::value, ""); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_types( + handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + + // Use weird layout, make sure it has the properties we want to test + constexpr size_t D = cuda::std::dynamic_extent; + static_assert(!cuda::std::is_default_constructible< + typename layout_wrapping_integral<4>::template mapping>>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + // checked_accessor's data handle type is not default constructible for double + static_assert(cuda::std::is_default_constructible::data_handle_type>::value + != cuda::std::is_same::value, + ""); + mixin_layout::value, cuda::std::is_same::value>( + typename checked_accessor::data_handle_type(elements.data()), checked_accessor(1024)); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + // checked_accessor's data handle type is not default constructible for double + static_assert(cuda::std::is_default_constructible::data_handle_type>::value + != cuda::std::is_same::value, + ""); + mixin_layout::value, cuda::std::is_same::value>( + typename checked_accessor::data_handle_type(elements.get_ptr()), checked_accessor(1024)); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_array.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_array.pass.cpp new file mode 100644 index 00000000000..cd570f969e8 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_array.pass.cpp @@ -0,0 +1,259 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(N != rank_dynamic()) +// mdspan(data_handle_type p, const array& exts); +// +// Constraints: +// - is_convertible_v is true, +// - (is_nothrow_constructible && ...) is true, +// - N == rank() || N == rank_dynamic() is true, +// - is_constructible_v is true, and +// - is_default_constructible_v is true. +// +// Preconditions: [0, map_.required_span_size()) is an accessible range of p and acc_ +// for the values of map_ and acc_ after the invocation of this constructor. +// +// Effects: +// - Direct-non-list-initializes ptr_ with cuda::std::move(p), +// - direct-non-list-initializes map_ with extents_type(exts), and +// - value-initializes acc_. + +#include +#include +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr auto array_from_extents(const Extents& exts, cuda::std::index_sequence) +{ + return cuda::std::array{exts.extent(Idxs)...}; +} + +template +_CCCL_CONCEPT_FRAGMENT(check_mdspan_ctor_implicit_, + requires(MDS m, typename MDS::data_handle_type h, const Exts& exts)((m = {h, exts}))); + +template +_CCCL_CONCEPT check_mdspan_ctor_implicit = _CCCL_FRAGMENT(check_mdspan_ctor_implicit_, MDS, Exts); + +template +__host__ __device__ constexpr void +test_mdspan_ctor_array(const H& handle, const M& map, const A&, cuda::std::array exts) +{ + using MDS = cuda::std::mdspan; + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + move_counted_handle::move_counter() = 0; + } + MDS m(handle, exts); + test_move_counter(); + + static_assert(!noexcept(MDS(handle, exts)), ""); + + static_assert(check_mdspan_ctor_implicit == (N == MDS::rank_dynamic()), ""); + + assert(m.extents() == map.extents()); + test_equality_handle(m, handle); + test_equality_mapping(m, map); + test_equality_accessor(m, A{}); +} + +template 0), int> = 0> +__host__ __device__ constexpr cuda::std::array +get_exts_dynamic(const cuda::std::array& exts) +{ + cuda::std::array exts_dynamic{}; + size_t r_dyn = 0; + for (size_t r = 0; r < MDS::rank(); r++) + { + if (MDS::static_extent(r) == cuda::std::dynamic_extent) + { + exts_dynamic[r_dyn++] = exts[r]; + } + } + return exts_dynamic; +} +template = 0> +__host__ __device__ constexpr cuda::std::array +get_exts_dynamic(const cuda::std::array&) +{ + return cuda::std::array{}; +} + +template = 0> +__host__ __device__ constexpr void test_mdspan_ctor(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + static_assert(mec == cuda::std::is_constructible::value, ""); + static_assert(ac == cuda::std::is_default_constructible::value, ""); + // test from all extents + const auto exts = array_from_extents(map.extents(), cuda::std::make_index_sequence()); + test_mdspan_ctor_array(handle, map, acc, exts); + + // test from dynamic extents + const auto exts_dynamic = get_exts_dynamic(exts); + test_mdspan_ctor_array(handle, map, acc, exts_dynamic); +} +template = 0> +__host__ __device__ constexpr void test_mdspan_ctor(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + static_assert(mec == cuda::std::is_constructible::value, ""); + static_assert(ac == cuda::std::is_default_constructible::value, ""); + static_assert( + !cuda::std::is_constructible&>::value, + ""); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_ctor(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_ctor(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_ctor(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_ctor(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_ctor(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_ctor( + handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + + // Sanity check that this layouts mapping is constructible from extents (via its move constructor) + static_assert( + cuda::std::is_constructible::template mapping>, + cuda::std::extents>::value, + ""); + static_assert( + !cuda::std::is_constructible::template mapping>, + const cuda::std::extents&>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<8>(), acc); + // Sanity check that this layouts mapping is not constructible from extents + static_assert( + !cuda::std::is_constructible::template mapping>, + cuda::std::extents>::value, + ""); + static_assert( + !cuda::std::is_constructible::template mapping>, + const cuda::std::extents&>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value); + mixin_layout::value>( + typename checked_accessor::data_handle_type(elements.data()), checked_accessor(1024)); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value); + mixin_layout::value>( + typename checked_accessor::data_handle_type(elements.get_ptr()), checked_accessor(1024)); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + + // test non-constructibility from wrong array type + constexpr size_t D = cuda::std::dynamic_extent; + using mds_t = cuda::std::mdspan>; + // sanity check + static_assert(cuda::std::is_constructible>::value, ""); + static_assert(cuda::std::is_constructible>::value, ""); + // wrong size + static_assert(!cuda::std::is_constructible>::value, ""); + static_assert(!cuda::std::is_constructible>::value, ""); + // not convertible to index_type + static_assert(cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_constructible>::value, ""); + + // index_type is not nothrow constructible + using mds_uchar_t = cuda::std::mdspan>; + static_assert(cuda::std::is_convertible::value, ""); + static_assert(cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_nothrow_constructible::value, ""); + static_assert(!cuda::std::is_constructible>::value, ""); + + // convertible from non-const to index_type but not from const + using mds_int_t = cuda::std::mdspan>; + static_assert(cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_convertible::value, ""); + static_assert(cuda::std::is_nothrow_constructible::value, ""); + static_assert(!cuda::std::is_constructible>::value, ""); + + // can't test a combo where cuda::std::is_nothrow_constructible_v is true, + // but cuda::std::is_convertible_v is false + + // test non-constructibility from wrong handle_type + static_assert(!cuda::std::is_constructible>::value, ""); + + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_extents.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_extents.pass.cpp new file mode 100644 index 00000000000..95a42fdf522 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_extents.pass.cpp @@ -0,0 +1,183 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr mdspan(data_handle_type p, const extents_type& ext); +// +// Constraints: +// - is_constructible_v is true, and +// - is_default_constructible_v is true. +// +// Preconditions: [0, map_.required_span_size()) is an accessible range of p and acc_ +// for the values of map_ and acc_ after the invocation of this constructor. +// +// Effects: +// - Direct-non-list-initializes ptr_ with cuda::std::move(p), +// - direct-non-list-initializes map_ with ext, and +// - value-initializes acc_. + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A&) +{ + using MDS = cuda::std::mdspan; + + static_assert(mec == cuda::std::is_constructible::value, ""); + static_assert(ac == cuda::std::is_default_constructible::value, ""); + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + move_counted_handle::move_counter() = 0; + } + // use formulation of constructor which tests that its not explicit + MDS m = {handle, map.extents()}; + test_move_counter(); + + static_assert(!noexcept(MDS(handle, map.extents())), ""); + assert(m.extents() == map.extents()); + test_equality_handle(m, handle); + test_equality_mapping(m, map); + test_equality_accessor(m, A{}); +} +template = 0> +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A&) +{ + using MDS = cuda::std::mdspan; + + static_assert(mec == cuda::std::is_constructible::value, ""); + static_assert(ac == cuda::std::is_default_constructible::value, ""); + static_assert(!cuda::std::is_constructible::value, ""); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_types( + handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + + // Use weird layout, make sure it has the properties we want to test + // Sanity check that this layouts mapping is constructible from extents (via its move constructor) + static_assert( + cuda::std::is_constructible::template mapping>, + cuda::std::extents>::value, + ""); + static_assert( + !cuda::std::is_constructible::template mapping>, + const cuda::std::extents&>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<8>(), acc); + // Sanity check that this layouts mapping is not constructible from extents + static_assert( + !cuda::std::is_constructible::template mapping>, + cuda::std::extents>::value, + ""); + static_assert( + !cuda::std::is_constructible::template mapping>, + const cuda::std::extents&>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + mixin_layout::value>( + typename checked_accessor::data_handle_type(elements.data()), checked_accessor(1024)); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + mixin_layout::value>( + typename checked_accessor::data_handle_type(elements.get_ptr()), checked_accessor(1024)); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + + // test non-constructibility from wrong extents type + constexpr size_t D = cuda::std::dynamic_extent; + using mds_t = cuda::std::mdspan>; + // sanity check + static_assert(cuda::std::is_constructible>::value, ""); + // wrong size + static_assert(!cuda::std::is_constructible>::value, ""); + static_assert(!cuda::std::is_constructible>::value, ""); + // wrong type in general: note the extents constructor does NOT convert, since it takes by const& + static_assert(!cuda::std::is_constructible>::value, ""); + static_assert(!cuda::std::is_constructible>::value, ""); + + // test non-constructibility from wrong handle_type + static_assert(!cuda::std::is_constructible>::value, ""); + + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_integers.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_integers.pass.cpp new file mode 100644 index 00000000000..26cbde822e5 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_integers.pass.cpp @@ -0,0 +1,208 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit mdspan(data_handle_type p, OtherIndexTypes... exts); +// +// Let N be sizeof...(OtherIndexTypes). +// +// Constraints: +// - (is_convertible_v && ...) is true, +// - (is_nothrow_constructible && ...) is true, +// - N == rank() || N == rank_dynamic() is true, +// - is_constructible_v is true, and +// - is_default_constructible_v is true. +// +// Preconditions: [0, map_.required_span_size()) is an accessible range of p and acc_ +// for the values of map_ and acc_ after the invocation of this constructor. +// +// Effects: +// - Direct-non-list-initializes ptr_ with cuda::std::move(p), +// - direct-non-list-initializes map_ with extents_type(static_cast(cuda::std::move(exts))...), and +// - value-initializes acc_. + +#include +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template +_CCCL_CONCEPT_FRAGMENT(check_mdspan_ctor_implicit_, requires(MDS m, Args... args)((m = {args...}))); + +template +_CCCL_CONCEPT check_mdspan_ctor_implicit = _CCCL_FRAGMENT(check_mdspan_ctor_implicit_, MDS, Args...); + +template = 0> +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A&, Idxs... idxs) +{ + using MDS = cuda::std::mdspan; + + static_assert(mec == cuda::std::is_constructible::value, ""); + static_assert(ac == cuda::std::is_default_constructible::value, ""); + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + move_counted_handle::move_counter() = 0; + } + MDS m(handle, idxs...); + test_move_counter(); + + // sanity check that concept works + static_assert(check_mdspan_ctor_implicit>, + ""); + // check that the constructor from integral is explicit + static_assert(!check_mdspan_ctor_implicit, ""); + + assert(m.extents() == map.extents()); + test_equality_handle(m, handle); + test_equality_mapping(m, map); + test_equality_accessor(m, A{}); +} +template = 0> +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A&, Idxs... idxs) +{ + using MDS = cuda::std::mdspan; + + static_assert(mec == cuda::std::is_constructible::value, ""); + static_assert(ac == cuda::std::is_default_constructible::value, ""); + static_assert(!cuda::std::is_constructible::value, ""); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + // construct from just dynamic extents + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc, 7); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc, 2, 3); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc, 0, 3); + test_mdspan_types( + handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc, 1, 2, 3, 2); + + // construct from all extents + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc, 7); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc, 2, 4, 3); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc, 0, 7, 3); + test_mdspan_types( + handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc, 1, 7, 2, 4, 3, 2); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + + // Use weird layout, make sure it has the properties we want to test + // Sanity check that this layouts mapping is constructible from extents (via its move constructor) + static_assert( + cuda::std::is_constructible::template mapping>, + cuda::std::extents>::value, + ""); + static_assert( + !cuda::std::is_constructible::template mapping>, + const cuda::std::extents&>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<8>(), acc); + // Sanity check that this layouts mapping is not constructible from extents + static_assert( + !cuda::std::is_constructible::template mapping>, + cuda::std::extents>::value, + ""); + static_assert( + !cuda::std::is_constructible::template mapping>, + const cuda::std::extents&>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + mixin_layout::value>( + typename checked_accessor::data_handle_type(elements.data()), checked_accessor(1024)); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + mixin_layout::value>( + typename checked_accessor::data_handle_type(elements.get_ptr()), checked_accessor(1024)); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + + // test non-constructibility from wrong integer types + constexpr size_t D = cuda::std::dynamic_extent; + using mds_t = cuda::std::mdspan>; + // sanity check + static_assert(cuda::std::is_constructible::value, ""); + static_assert(cuda::std::is_constructible::value, ""); + // wrong number of arguments + static_assert(!cuda::std::is_constructible::value, ""); + static_assert(!cuda::std::is_constructible::value, ""); + // not convertible to int + static_assert(!cuda::std::is_constructible>::value, ""); + + // test non-constructibility from wrong handle_type + static_assert(!cuda::std::is_constructible::value, ""); + + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_map.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_map.pass.cpp new file mode 100644 index 00000000000..b553591e678 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_map.pass.cpp @@ -0,0 +1,165 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr mdspan(data_handle_type p, const mapping_type& m); +// +// Constraints: is_default_constructible_v is true. +// +// Preconditions: [0, m.required_span_size()) is an accessible range of p and acc_ +// for the value of acc_ after the invocation of this constructor. +// +// Effects: +// - Direct-non-list-initializes ptr_ with cuda::std::move(p), +// - direct-non-list-initializes map_ with m, and +// - value-initializes acc_. + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template = 0> +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A&) +{ + using MDS = cuda::std::mdspan; + + static_assert(ac == cuda::std::is_default_constructible::value, ""); + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + move_counted_handle::move_counter() = 0; + } + // use formulation of constructor which tests that it is not explicit + MDS m = {handle, map}; + test_move_counter(); + + static_assert(!noexcept(MDS(handle, map))); + assert(m.extents() == map.extents()); + test_equality_handle(m, handle); + test_equality_mapping(m, map); + test_equality_accessor(m, A{}); +} +template = 0> +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A&) +{ + using MDS = cuda::std::mdspan; + + static_assert(ac == cuda::std::is_default_constructible::value, ""); + static_assert(!cuda::std::is_constructible::value, ""); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_types( + handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + mixin_layout::value>( + typename checked_accessor::data_handle_type(elements.data()), checked_accessor(1024)); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + mixin_layout::value>( + typename checked_accessor::data_handle_type(elements.get_ptr()), checked_accessor(1024)); +} + +template +using mapping_t = typename cuda::std::layout_right::template mapping; + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + + constexpr size_t D = cuda::std::dynamic_extent; + using mds_t = cuda::std::mdspan>; + + // sanity check + static_assert(cuda::std::is_constructible>>::value, ""); + + // test non-constructibility from wrong mapping type + // wrong rank + static_assert(!cuda::std::is_constructible>>::value, ""); + static_assert(!cuda::std::is_constructible>>::value, ""); + // wrong type in general: note the map constructor does NOT convert, since it takes by const& + static_assert(!cuda::std::is_constructible>>::value, ""); + static_assert(!cuda::std::is_constructible>>::value, + ""); + + // test non-constructibility from wrong handle_type + static_assert(!cuda::std::is_constructible>>::value, + ""); + + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_map_acc.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_map_acc.pass.cpp new file mode 100644 index 00000000000..12c6ba6f515 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_map_acc.pass.cpp @@ -0,0 +1,171 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr mdspan(data_handle_type p, const mapping_type& m, const accessor_type& a); +// +// Preconditions: [0, m.required_span_size()) is an accessible range of p and a. +// +// Effects: +// - Direct-non-list-initializes ptr_ with cuda::std::move(p), +// - direct-non-list-initializes map_ with m, and +// - direct-non-list-initializes acc_ with a. + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + move_counted_handle::move_counter() = 0; + } + // use formulation of constructor which tests that it is not explicit + MDS m = {handle, map, acc}; + test_move_counter(); + + static_assert(!noexcept(MDS(handle, map, acc)), ""); + assert(m.extents() == map.extents()); + test_equality_handle(m, handle); + test_equality_mapping(m, map); + test_equality_accessor(m, acc); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + // checked_accessor's data handle type is not default constructible for double + static_assert(cuda::std::is_default_constructible::data_handle_type>::value + != cuda::std::is_same::value, + ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.data()), checked_accessor(1024)); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + // checked_accessor's data handle type is not default constructible for double + static_assert(cuda::std::is_default_constructible::data_handle_type>::value + != cuda::std::is_same::value, + ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.get_ptr()), checked_accessor(1024)); +} + +template +using mapping_t = typename cuda::std::layout_right::template mapping; + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + + // test non-constructibility from wrong args + constexpr size_t D = cuda::std::dynamic_extent; + using mds_t = cuda::std::mdspan>; + using acc_t = cuda::std::default_accessor; + + // sanity check + static_assert(cuda::std::is_constructible>, acc_t>::value, + ""); + + // test non-constructibility from wrong accessor + static_assert(!cuda::std::is_constructible>, + cuda::std::default_accessor>::value, + ""); + + // test non-constructibility from wrong mapping type + // wrong rank + static_assert(!cuda::std::is_constructible>, acc_t>::value, + ""); + static_assert( + !cuda::std::is_constructible>, acc_t>::value, ""); + // wrong type in general: note the map constructor does NOT convert, since it takes by const& + static_assert(!cuda::std::is_constructible>, acc_t>::value, + ""); + static_assert( + !cuda::std::is_constructible>, acc_t>::value, ""); + + // test non-constructibility from wrong handle_type + static_assert( + !cuda::std::is_constructible>, acc_t>::value, ""); + + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_span.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_span.pass.cpp new file mode 100644 index 00000000000..997a3ed5cbb --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.dh_span.pass.cpp @@ -0,0 +1,259 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template +// constexpr explicit(N != rank_dynamic()) +// mdspan(data_handle_type p, span exts); +// +// Constraints: +// - is_convertible_v is true, +// - (is_nothrow_constructible && ...) is true, +// - N == rank() || N == rank_dynamic() is true, +// - is_constructible_v is true, and +// - is_default_constructible_v is true. +// +// Preconditions: [0, map_.required_span_size()) is an accessible range of p and acc_ +// for the values of map_ and acc_ after the invocation of this constructor. +// +// Effects: +// - Direct-non-list-initializes ptr_ with cuda::std::move(p), +// - direct-non-list-initializes map_ with extents_type(exts), and +// - value-initializes acc_. + +#include +#include +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr auto array_from_extents(const Extents& exts, cuda::std::index_sequence) +{ + return cuda::std::array{exts.extent(Idxs)...}; +} + +template +_CCCL_CONCEPT_FRAGMENT(check_mdspan_ctor_implicit_, + requires(MDS m, typename MDS::data_handle_type h, const Exts& exts)((m = {h, exts}))); + +template +_CCCL_CONCEPT check_mdspan_ctor_implicit = _CCCL_FRAGMENT(check_mdspan_ctor_implicit_, MDS, Exts); + +template +__host__ __device__ constexpr void +test_mdspan_ctor_span(const H& handle, const M& map, const A&, cuda::std::span exts) +{ + using MDS = cuda::std::mdspan; + if (!cuda::std::__cccl_default_is_constant_evaluated()) + { + move_counted_handle::move_counter() = 0; + } + MDS m(handle, exts); + test_move_counter(); + + static_assert(!noexcept(MDS(handle, exts)), ""); + static_assert(check_mdspan_ctor_implicit == (N == MDS::rank_dynamic()), ""); + + assert(m.extents() == map.extents()); + test_equality_handle(m, handle); + test_equality_mapping(m, map); + test_equality_accessor(m, A{}); +} + +template 0), int> = 0> +__host__ __device__ constexpr cuda::std::array +get_exts_dynamic(const cuda::std::array& exts) +{ + cuda::std::array exts_dynamic{}; + size_t r_dyn = 0; + for (size_t r = 0; r < MDS::rank(); r++) + { + if (MDS::static_extent(r) == cuda::std::dynamic_extent) + { + exts_dynamic[r_dyn++] = exts[r]; + } + } + return exts_dynamic; +} +template = 0> +__host__ __device__ constexpr cuda::std::array +get_exts_dynamic(const cuda::std::array&) +{ + return cuda::std::array{}; +} + +template = 0> +__host__ __device__ constexpr void test_mdspan_ctor(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + static_assert(mec == cuda::std::is_constructible::value, ""); + static_assert(ac == cuda::std::is_default_constructible::value, ""); + // test from all extents + const auto exts = array_from_extents(map.extents(), cuda::std::make_index_sequence()); + test_mdspan_ctor_span( + handle, map, acc, cuda::std::span(exts)); + + // test from dynamic extents + const auto exts_dynamic = get_exts_dynamic(exts); + test_mdspan_ctor_span( + handle, map, acc, cuda::std::span(exts_dynamic)); +} +template = 0> +__host__ __device__ constexpr void test_mdspan_ctor(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + static_assert(mec == cuda::std::is_constructible::value, ""); + static_assert(ac == cuda::std::is_default_constructible::value, ""); + static_assert( + !cuda::std::is_constructible>::value, ""); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_ctor(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_ctor(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_ctor(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_ctor(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_ctor(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_ctor( + handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + + // Sanity check that this layouts mapping is constructible from extents (via its move constructor) + static_assert( + cuda::std::is_constructible::template mapping>, + cuda::std::extents>::value, + ""); + static_assert( + !cuda::std::is_constructible::template mapping>, + const cuda::std::extents&>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<8>(), acc); + // Sanity check that this layouts mapping is not constructible from extents + static_assert( + !cuda::std::is_constructible::template mapping>, + cuda::std::extents>::value, + ""); + static_assert( + !cuda::std::is_constructible::template mapping>, + const cuda::std::extents&>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + mixin_layout::value>( + typename checked_accessor::data_handle_type(elements.data()), checked_accessor(1024)); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is not default constructible except for const double, where it is not noexcept + static_assert( + cuda::std::is_default_constructible>::value == cuda::std::is_same::value, ""); + mixin_layout::value>( + typename checked_accessor::data_handle_type(elements.get_ptr()), checked_accessor(1024)); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + + // test non-constructibility from wrong span type + constexpr size_t D = cuda::std::dynamic_extent; + using mds_t = cuda::std::mdspan>; + // sanity check + static_assert(cuda::std::is_constructible>::value, ""); + static_assert(cuda::std::is_constructible>::value, ""); + // wrong size + static_assert(!cuda::std::is_constructible>::value, ""); + static_assert(!cuda::std::is_constructible>::value, ""); + // not convertible to index_type + static_assert(cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_constructible>::value, ""); + + // index_type is not nothrow constructible + using mds_uchar_t = cuda::std::mdspan>; + static_assert(cuda::std::is_convertible::value, ""); + static_assert(cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_nothrow_constructible::value, ""); + static_assert(!cuda::std::is_constructible>::value, ""); + + // convertible from non-const to index_type but not from const + using mds_int_t = cuda::std::mdspan>; + static_assert(cuda::std::is_convertible::value, ""); + static_assert(!cuda::std::is_convertible::value, ""); + static_assert(cuda::std::is_nothrow_constructible::value, ""); + static_assert(!cuda::std::is_constructible>::value, ""); + + // can't test a combo where cuda::std::is_nothrow_constructible_v is true, + // but cuda::std::is_convertible_v is false + + // test non-constructibility from wrong handle_type + static_assert(!cuda::std::is_constructible>::value, ""); + + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + // test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.move.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.move.pass.cpp new file mode 100644 index 00000000000..3818a7de6ca --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/ctor.move.pass.cpp @@ -0,0 +1,141 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr mdspan(mdspan&&) = default; +// +// A specialization of mdspan is a trivially copyable type if its accessor_type, mapping_type, and data_handle_type are +// trivially copyable types. + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + + MDS m_org(handle, map, acc); + MDS m(cuda::std::move(m_org)); + static_assert( + cuda::std::is_trivially_move_constructible::value + == (cuda::std::is_trivially_move_constructible::value && cuda::std::is_trivially_move_constructible::value + && cuda::std::is_trivially_move_constructible::value), + ""); + assert(m.extents() == map.extents()); + test_equality_handle(m, handle); + test_equality_mapping(m, map); + test_equality_accessor(m, acc); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + // make sure we test a trivially copyable mapping + static_assert(cuda::std::is_trivially_move_constructible< + typename cuda::std::layout_left::template mapping>>::value, + ""); + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + // make sure we test a not trivially copyable mapping + static_assert(!cuda::std::is_trivially_move_constructible< + typename layout_wrapping_integral<4>::template mapping>>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + // make sure we test trivially constructible accessor and data_handle + static_assert(cuda::std::is_trivially_move_constructible>::value, ""); + static_assert( + cuda::std::is_trivially_move_constructible::data_handle_type>::value, ""); + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is noexcept copy constructible except for const double + checked_accessor acc(1024); + static_assert(cuda::std::is_trivially_move_constructible::data_handle_type>::value + == cuda::std::is_same::value, + ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.data()), acc); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + // make sure we test trivially constructible accessor and data_handle + static_assert(cuda::std::is_trivially_move_constructible>::value, ""); + static_assert( + cuda::std::is_trivially_move_constructible::data_handle_type>::value, ""); + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is noexcept copy constructible except for const double + checked_accessor acc(1024); + static_assert(cuda::std::is_trivially_move_constructible::data_handle_type>::value + == cuda::std::is_same::value, + ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.get_ptr()), acc); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/deduction.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/deduction.pass.cpp new file mode 100644 index 00000000000..88384caced9 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/deduction.pass.cpp @@ -0,0 +1,224 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11, c++14 + +// + +// template +// requires(is_array_v && rank_v == 1) +// mdspan(CArray&) +// -> mdspan, extents>>; +// +// template +// requires(is_pointer_v>) +// mdspan(Pointer&&) +// -> mdspan>, extents>; +// +// template +// requires((is_convertible_v && ...) && sizeof...(Integrals) > 0) +// explicit mdspan(ElementType*, Integrals...) +// -> mdspan...>>; +// +// template +// mdspan(ElementType*, span) +// -> mdspan>; +// +// template +// mdspan(ElementType*, const array&) +// -> mdspan>; +// +// template +// mdspan(ElementType*, const extents&) +// -> mdspan>; +// +// template +// mdspan(ElementType*, const MappingType&) +// -> mdspan; +// +// template +// mdspan(const typename AccessorType::data_handle_type&, const MappingType&, +// const AccessorType&) +// -> mdspan; + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + + // deduction from data_handle_type (including non-pointer), mapping and accessor + ASSERT_SAME_TYPE(decltype(cuda::std::mdspan(handle, map, acc)), MDS); + + if constexpr (cuda::std::is_same>::value) + { + // deduction from pointer and mapping + // non-pointer data-handle-types have other accessor + ASSERT_SAME_TYPE(decltype(cuda::std::mdspan(handle, map)), MDS); + if constexpr (cuda::std::is_same::value) + { + // deduction from pointer and extents + ASSERT_SAME_TYPE(decltype(cuda::std::mdspan(handle, map.extents())), MDS); + } + } +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +struct SizeTIntType +{ + size_t val; + __host__ __device__ constexpr SizeTIntType(size_t val_) + : val(val_) + {} + __host__ __device__ constexpr operator size_t() const noexcept + { + return size_t(val); + } +}; + +template +_CCCL_CONCEPT_FRAGMENT(can_deduce_layout_, + requires()( // + requires((sizeof(decltype(cuda::std::mdspan(cuda::std::declval(), 10))) > 0)))); + +template +_CCCL_CONCEPT can_deduce_layout = _CCCL_FRAGMENT(can_deduce_layout_, H); + +template , int> = 0> +__host__ __device__ constexpr bool test_no_layout_deduction_guides(const H& handle, const A&) +{ + using T = typename A::element_type; + // deduction from pointer alone + ASSERT_SAME_TYPE(decltype(cuda::std::mdspan(handle)), cuda::std::mdspan>); + // deduction from pointer and integral like + ASSERT_SAME_TYPE(decltype(cuda::std::mdspan(handle, 5, SizeTIntType(6))), + cuda::std::mdspan>); + + // P3029R1: deduction from `integral_constant` + ASSERT_SAME_TYPE(decltype(cuda::std::mdspan(handle, cuda::std::integral_constant{})), + cuda::std::mdspan>); + ASSERT_SAME_TYPE( + decltype(cuda::std::mdspan(handle, cuda::std::integral_constant{}, cuda::std::dynamic_extent)), + cuda::std::mdspan>); + ASSERT_SAME_TYPE( + decltype(cuda::std::mdspan(handle, + cuda::std::integral_constant{}, + cuda::std::dynamic_extent, + cuda::std::integral_constant{})), + cuda::std::mdspan>); + + cuda::std::array exts; + // deduction from pointer and array + ASSERT_SAME_TYPE(decltype(cuda::std::mdspan(handle, exts)), cuda::std::mdspan>); + // deduction from pointer and span + ASSERT_SAME_TYPE(decltype(cuda::std::mdspan(handle, cuda::std::span(exts))), + cuda::std::mdspan>); + return true; +} + +template , int> = 0> +__host__ __device__ constexpr bool test_no_layout_deduction_guides(const H&, const A&) +{ + return false; +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); + + // checking that there is no deduction happen for non-pointer handle type + assert((test_no_layout_deduction_guides(handle, acc) == cuda::std::is_same::value)); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is noexcept copy constructible except for const double + checked_accessor acc(1024); + static_assert(noexcept(checked_accessor(acc)) != cuda::std::is_same::value, ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.data()), acc); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is noexcept copy constructible except for const double + checked_accessor acc(1024); + static_assert(noexcept(checked_accessor(acc)) != cuda::std::is_same::value, ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.get_ptr()), acc); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + + // deduction from array alone + float a[12] = {}; + ASSERT_SAME_TYPE(decltype(cuda::std::mdspan(a)), cuda::std::mdspan>); + unused(a); + + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/element_type.verify.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/element_type.verify.cpp new file mode 100644 index 00000000000..190258bd9da --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/element_type.verify.cpp @@ -0,0 +1,57 @@ +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 +// UNSUPPORTED: nvrtc + +// + +// template class mdspan; +// +// Mandates: +// - ElementType is a complete object type that is neither an abstract class type nor an array type. +// - is_same_v is true. + +#include + +#include "test_macros.h" + +class AbstractClass +{ +public: + __host__ __device__ virtual void method() = 0; +}; + +__host__ __device__ void not_abstract_class() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}mdspan: ElementType template parameter + // may not be an abstract class}} + cuda::std::mdspan> m; + unused(m); +} + +__host__ __device__ void not_array_type() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}mdspan: ElementType template parameter + // may not be an array type}} + cuda::std::mdspan> m; + unused(m); +} + +__host__ __device__ void element_type_mismatch() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}mdspan: ElementType template parameter + // must match AccessorPolicy::element_type}} + cuda::std::mdspan, cuda::std::layout_right, cuda::std::default_accessor> m; + unused(m); +} + +int main(int, char**) +{ + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/extents.verify.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/extents.verify.cpp new file mode 100644 index 00000000000..ae82ee7d942 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/extents.verify.cpp @@ -0,0 +1,34 @@ +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 +// UNSUPPORTED: nvrtc + +// + +// template class mdspan; +// +// Mandates: +// - Extents is a specialization of extents + +#include + +#include "test_macros.h" + +__host__ __device__ void not_extents() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}mdspan: Extents template parameter must + // be a specialization of extents.}} + cuda::std::mdspan m; + unused(m); +} + +int main(int, char**) +{ + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/index_operator.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/index_operator.pass.cpp new file mode 100644 index 00000000000..0a6551c970d --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/index_operator.pass.cpp @@ -0,0 +1,342 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// Test default iteration: +// +// template +// constexpr reference operator[](Indices...) const noexcept; +// +// Constraints: +// * sizeof...(Indices) == extents_type::rank() is true, +// * (is_convertible_v && ...) is true, and +// * (is_nothrow_constructible_v && ...) is true. +// +// Preconditions: +// * extents_type::index-cast(i) is a multidimensional index in extents_. + +#include +#include +#include + +#include "../ConvertibleToIntegral.h" +#include "../CustomTestLayouts.h" +#include "test_macros.h" + +// GCC warns about comma operator changing its meaning inside [] in C++23 +#if defined(TEST_COMPILER_GCC) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wcomma-subscript" +#endif // TEST_COMPILER_GCC + +template +__host__ __device__ constexpr auto& access(MDS mds, int64_t i0) +{ + return mds[i0]; +} + +#if defined(_LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS) +template ()[cuda::std::declval()...]), + typename MDS::reference>::value>::value, + int> = 0> +__host__ __device__ constexpr bool check_operator_constraints(MDS m, Indices... idxs) +{ + unused(m[idxs...]); + return true; +} +#else // ^^^ _LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS ^^^ / vvv!_LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS vvv +template < + class MDS, + class Index, + class = cuda::std::enable_if_t()[cuda::std::declval()]), + typename MDS::reference>::value>> +__host__ __device__ constexpr bool check_operator_constraints(MDS m, Index idx) +{ + unused(m[idx]); + return true; +} +#endif // !_LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS + +template +__host__ __device__ constexpr bool check_operator_constraints(MDS, Indices...) +{ + return false; +} + +#if defined(_LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS) +template +__host__ __device__ constexpr auto& access(MDS mds) +{ + return mds[]; +} +template +__host__ __device__ constexpr auto& access(MDS mds, int64_t i0, int64_t i1) +{ + return mds[i0, i1]; +} +template +__host__ __device__ constexpr auto& access(MDS mds, int64_t i0, int64_t i1, int64_t i2) +{ + return mds[i0, i1, i2]; +} +template +__host__ __device__ constexpr auto& access(MDS mds, int64_t i0, int64_t i1, int64_t i2, int64_t i3) +{ + return mds[i0, i1, i2, i3]; +} +#endif // !_LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS + +// We must ensure that we do not try to access multiarg accessors +template = 0> +__host__ __device__ constexpr void assert_access(MDS mds, Arg arg) +{ + int* ptr1 = &(mds.accessor().access(mds.data_handle(), mds.mapping()(arg))); + int* ptr2 = &access(mds, arg); + assert(ptr1 == ptr2); +} + +template = 0> +__host__ __device__ constexpr void assert_access(MDS mds, Args... args) +{ +#if defined(_LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS) + int* ptr1 = &(mds.accessor().access(mds.data_handle(), mds.mapping()(args...))); + int* ptr2 = &access(mds, args...); + assert(ptr1 == ptr2); +#else + unused(mds, args...); +#endif // !_LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS +} + +template = 0> +__host__ __device__ constexpr void iterate(MDS mds, Args... args) +{ + int* ptr1 = &(mds.accessor().access(mds.data_handle(), mds.mapping()(args...))); + assert_access(mds, args...); + + cuda::std::array args_arr{static_cast(args)...}; + int* ptr3 = &mds[args_arr]; + assert(ptr3 == ptr1); + int* ptr4 = &mds[cuda::std::span(args_arr)]; + assert(ptr4 == ptr1); +} + +template = 0> +__host__ __device__ constexpr void iterate(MDS mds, Args... args) +{ + constexpr int r = static_cast(MDS::extents_type::rank()) - 1 - static_cast(sizeof...(Args)); + for (typename MDS::index_type i = 0; i < mds.extents().extent(r); i++) + { + iterate(mds, i, args...); + } +} + +template +__host__ __device__ constexpr void test_iteration(Mapping m) +{ + cuda::std::array data{}; + using MDS = cuda::std::mdspan; + MDS mds(data.data(), m); + iterate(mds); +} + +template +__host__ __device__ constexpr void test_layout() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_iteration(construct_mapping(Layout(), cuda::std::extents(1))); + test_iteration(construct_mapping(Layout(), cuda::std::extents(7))); + test_iteration(construct_mapping(Layout(), cuda::std::extents())); + test_iteration(construct_mapping(Layout(), cuda::std::extents())); + test_iteration(construct_mapping(Layout(), cuda::std::extents(1, 1, 1, 1))); + +#if defined(_LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS) + test_iteration(construct_mapping(Layout(), cuda::std::extents())); + int data[1]; + // Check operator constraint for number of arguments + static_assert( + check_operator_constraints(cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), 0), + ""); + static_assert(!check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), 0, 0), + ""); + + // Check operator constraint for convertibility of arguments to index_type + static_assert(check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), IntType(0)), + ""); + static_assert(!check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), IntType(0)), + ""); + + // Check operator constraint for no-throw-constructibility of index_type from arguments + static_assert( + !check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), IntType(0)), + ""); + + // Check that mixed integrals work: note the second one tests that mdspan casts: layout_wrapping_integral does not + // accept IntType + static_assert(check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1, 1))), + int(0), + size_t(0)), + ""); + static_assert(check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1, 1))), + unsigned(0), + IntType(0)), + ""); + + constexpr bool t = true; + constexpr bool o = false; + static_assert(!check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1, 1))), + unsigned(0), + IntConfig(0)), + ""); + static_assert(check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1, 1))), + unsigned(0), + IntConfig(0)), + ""); + static_assert(check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1, 1))), + unsigned(0), + IntConfig(0)), + ""); + static_assert(!check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1, 1))), + unsigned(0), + IntConfig(0)), + ""); + static_assert(check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1, 1))), + unsigned(0), + IntConfig(0)), + ""); + + // layout_wrapped wouldn't quite work here the way we wrote the check + // IntConfig has configurable conversion properties: convert from const&, convert from non-const, no-throw-ctor from + // const&, no-throw-ctor from non-const + if constexpr (cuda::std::is_same::value) + { + static_assert( + !check_operator_constraints(cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), + cuda::std::array{IntConfig(0)}), + ""); + static_assert( + !check_operator_constraints(cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), + cuda::std::array{IntConfig(0)}), + ""); + static_assert( + !check_operator_constraints(cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), + cuda::std::array{IntConfig(0)}), + ""); + static_assert( + !check_operator_constraints(cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), + cuda::std::array{IntConfig(0)}), + ""); + static_assert( + check_operator_constraints(cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), + cuda::std::array{IntConfig(0)}), + ""); + static_assert( + check_operator_constraints(cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), + cuda::std::array{IntConfig(0)}), + ""); + + { + cuda::std::array idx{IntConfig(0)}; + cuda::std::span s(idx); + assert(!check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), s)); + } + { + cuda::std::array idx{IntConfig(0)}; + cuda::std::span s(idx); + assert(!check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), s)); + } + { + cuda::std::array idx{IntConfig(0)}; + cuda::std::span s(idx); + assert(!check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), s)); + } + { + cuda::std::array idx{IntConfig(0)}; + cuda::std::span s(idx); + assert(!check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), s)); + } + { + cuda::std::array idx{IntConfig(0)}; + cuda::std::span s(idx); + assert(!check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), s)); + } + { + cuda::std::array idx{IntConfig(0)}; + cuda::std::span s(idx); + assert(!check_operator_constraints( + cuda::std::mdspan(data, construct_mapping(Layout(), cuda::std::extents(1))), s)); + } + } +#endif // _LIBCUDACXX_HAS_MULTIARG_OPERATOR_BRACKETS +} + +template +__host__ __device__ constexpr void test_layout_large() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_iteration(construct_mapping(Layout(), cuda::std::extents(3, 5, 6))); + test_iteration(construct_mapping(Layout(), cuda::std::extents(3, 6))); +} + +// mdspan::operator[] casts to index_type before calling mapping +// mapping requirements only require the index operator to mixed integer types not anything convertible to index_type +__host__ __device__ constexpr void test_index_cast_happens() {} + +__host__ __device__ constexpr bool test() +{ + test_layout(); + test_layout(); + test_layout>(); + return true; +} + +__host__ __device__ constexpr bool test_large() +{ + test_layout_large(); + test_layout_large(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + + // The large test iterates over ~10k loop indices. + // With assertions enabled this triggered the maximum default limit + // for steps in consteval expressions. Assertions roughly double the + // total number of instructions, so this was already close to the maximum. + // test_large(); + return 0; +} + +#if defined(TEST_COMPILER_GCC) +# pragma GCC diagnostic pop +#endif // TEST_COMPILER_GCC diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/mapping.verify.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/mapping.verify.cpp new file mode 100644 index 00000000000..88a2960a157 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/mapping.verify.cpp @@ -0,0 +1,34 @@ +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 +// UNSUPPORTED: nvrtc + +// + +// template class mdspan; +// +// Mandates: +// - LayoutPolicy shall meet the layout mapping policy requirements ([mdspan.layout.policy.reqmts]) + +#include + +#include "test_macros.h" + +__host__ __device__ void not_layout_policy() +{ + // expected-error-re@*:* {{{{(static_assert|static assertion)}} failed {{.*}}mdspan: LayoutPolicy template parameter + // is invalid. A common mistake is to pass a layout mapping instead of a layout policy}} + cuda::std::mdspan, cuda::std::layout_left::template mapping>> m; + unused(m); +} + +int main(int, char**) +{ + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/move.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/move.pass.cpp new file mode 100644 index 00000000000..10ff562fd1b --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/move.pass.cpp @@ -0,0 +1,138 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// constexpr mdspan& operator=(mdspan&&) = default; +// +// A specialization of mdspan is a trivially copyable type if its accessor_type, mapping_type, and data_handle_type are +// trivially copyable types. + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + + MDS m_org(handle, map, acc); + MDS m_copy(m_org); + MDS m(cuda::std::move(m_copy)); + + assert(m.extents() == map.extents()); + test_equality_handle(m, handle); + test_equality_mapping(m, map); + test_equality_accessor(m, acc); + + static_assert( + cuda::std::is_trivially_move_assignable::value + == (cuda::std::is_trivially_move_assignable::value && cuda::std::is_trivially_move_assignable::value + && cuda::std::is_trivially_move_assignable::value), + ""); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + // make sure we test a trivially copyable mapping + static_assert(cuda::std::is_trivially_move_assignable< + typename cuda::std::layout_left::template mapping>>::value, + ""); + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + // make sure we test a not trivially copyable mapping + static_assert(!cuda::std::is_trivially_move_assignable< + typename layout_wrapping_integral<4>::template mapping>>::value, + ""); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + // make sure we test trivially constructible accessor and data_handle + static_assert(cuda::std::is_trivially_copyable>::value, ""); + static_assert(cuda::std::is_trivially_copyable::data_handle_type>::value, ""); + mixin_layout(elements.data(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is noexcept copy constructible except for const double + checked_accessor acc(1024); + static_assert(noexcept(checked_accessor(acc)) != cuda::std::is_same::value, ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.data()), acc); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + // make sure we test trivially constructible accessor and data_handle + static_assert(cuda::std::is_trivially_copyable>::value, ""); + static_assert(cuda::std::is_trivially_copyable::data_handle_type>::value, ""); + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); + + // Using weird accessor/data_handle + // Make sure they actually got the properties we want to test + // checked_accessor is noexcept copy constructible except for const double + checked_accessor acc(1024); + static_assert(noexcept(checked_accessor(acc)) != cuda::std::is_same::value, ""); + mixin_layout(typename checked_accessor::data_handle_type(elements.get_ptr()), acc); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + + static_assert(test(), ""); +#if TEST_STD_VER >= 2020 + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/properties.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/properties.pass.cpp new file mode 100644 index 00000000000..d74388b21a0 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/properties.pass.cpp @@ -0,0 +1,260 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// + +// template> +// class mdspan { +// public: +// static constexpr rank_type rank() noexcept { return extents_type::rank(); } +// static constexpr rank_type rank_dynamic() noexcept { return extents_type::rank_dynamic(); } +// static constexpr size_t static_extent(rank_type r) noexcept +// { return extents_type::static_extent(r); } +// constexpr index_type extent(rank_type r) const noexcept { return extents().extent(r); } +// +// constexpr size_type size() const noexcept; +// [[nodiscard]] constexpr bool empty() const noexcept; +// +// +// constexpr const extents_type& extents() const noexcept { return map_.extents(); } +// constexpr const data_handle_type& data_handle() const noexcept { return ptr_; } +// constexpr const mapping_type& mapping() const noexcept { return map_; } +// constexpr const accessor_type& accessor() const noexcept { return acc_; } +// static constexpr bool is_always_unique() +// { return mapping_type::is_always_unique(); } +// static constexpr bool is_always_exhaustive() +// { return mapping_type::is_always_exhaustive(); } +// static constexpr bool is_always_strided() +// { return mapping_type::is_always_strided(); } +// +// constexpr bool is_unique() const +// { return map_.is_unique(); } +// constexpr bool is_exhaustive() const +// { return map_.is_exhaustive(); } +// constexpr bool is_strided() const +// { return map_.is_strided(); } +// constexpr index_type stride(rank_type r) const +// { return map_.stride(r); } +// }; +// +// Each specialization MDS of mdspan models copyable and +// - is_nothrow_move_constructible_v is true, +// - is_nothrow_move_assignable_v is true, and +// - is_nothrow_swappable_v is true. +// A specialization of mdspan is a trivially copyable type if its accessor_type, mapping_type, and data_handle_type are +// trivially copyable types. + +#include +#include +#include +#include + +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "test_macros.h" + +template 0), int> = 0> +__host__ __device__ constexpr void test_mdspan_size(const MDS& m) +{ + typename MDS::size_type size = 1; + for (typename MDS::rank_type r = 0; r < MDS::rank(); r++) + { + ASSERT_SAME_TYPE(decltype(MDS::static_extent(r)), size_t); + ASSERT_NOEXCEPT(MDS::static_extent(r)); + assert(MDS::static_extent(r) == MDS::extents_type::static_extent(r)); + ASSERT_SAME_TYPE(decltype(m.extent(r)), typename MDS::index_type); + ASSERT_NOEXCEPT(m.extent(r)); + assert(m.extent(r) == m.extents().extent(r)); + size *= m.extent(r); + } + assert(m.size() == size); +} + +template = 0> +__host__ __device__ constexpr void test_mdspan_size(const MDS& m) +{ + assert(m.size() == 1); +} + +template 0), int> = 0> +__host__ __device__ constexpr void test_mdspan_stride(const MDS& m, const M& map) +{ + if (m.is_strided()) + { + for (typename MDS::rank_type r = 0; r < MDS::rank(); r++) + { + ASSERT_SAME_TYPE(decltype(m.stride(r)), typename MDS::index_type); + assert(!noexcept(m.stride(r))); + assert(m.stride(r) == map.stride(r)); + } + } +} + +template = 0> +__host__ __device__ constexpr void test_mdspan_stride(const MDS&, const M&) +{} + +template +__host__ __device__ constexpr void test_mdspan_types(const H& handle, const M& map, const A& acc) +{ + using MDS = cuda::std::mdspan; + MDS m(handle, map, acc); + + // ===================================== + // Traits for every mdspan + // ===================================== + static_assert(cuda::std::copyable, ""); + static_assert(cuda::std::is_nothrow_move_constructible::value, ""); + static_assert(cuda::std::is_nothrow_move_assignable::value, ""); + static_assert(cuda::std::is_nothrow_swappable::value, ""); + + // ===================================== + // Invariants coming from data handle + // ===================================== + // data_handle() + ASSERT_SAME_TYPE(decltype(m.data_handle()), const H&); + ASSERT_NOEXCEPT(m.data_handle()); + test_equality_handle(m, handle); + + // ===================================== + // Invariants coming from extents + // ===================================== + + // extents() + ASSERT_SAME_TYPE(decltype(m.extents()), const typename MDS::extents_type&); + ASSERT_NOEXCEPT(m.extents()); + assert(m.extents() == map.extents()); + + // rank() + ASSERT_SAME_TYPE(decltype(m.rank()), typename MDS::rank_type); + ASSERT_NOEXCEPT(m.rank()); + assert(MDS::rank() == MDS::extents_type::rank()); + + // rank_dynamic() + ASSERT_SAME_TYPE(decltype(m.rank_dynamic()), typename MDS::rank_type); + ASSERT_NOEXCEPT(m.rank_dynamic()); + assert(MDS::rank_dynamic() == MDS::extents_type::rank_dynamic()); + + // extent(r), static_extent(r), size() + test_mdspan_size(m); + ASSERT_SAME_TYPE(decltype(m.size()), typename MDS::size_type); + ASSERT_NOEXCEPT(m.size()); + + // empty() + ASSERT_SAME_TYPE(decltype(m.empty()), bool); + ASSERT_NOEXCEPT(m.empty()); + assert(m.empty() == (m.size() == 0)); + + // ===================================== + // Invariants coming from mapping + // ===================================== + + // mapping() + ASSERT_SAME_TYPE(decltype(m.mapping()), const M&); + ASSERT_NOEXCEPT(m.mapping()); + + // is_[always_]unique/exhaustive/strided() + ASSERT_SAME_TYPE(decltype(MDS::is_always_unique()), bool); + ASSERT_SAME_TYPE(decltype(MDS::is_always_exhaustive()), bool); + ASSERT_SAME_TYPE(decltype(MDS::is_always_strided()), bool); + ASSERT_SAME_TYPE(decltype(m.is_unique()), bool); + ASSERT_SAME_TYPE(decltype(m.is_exhaustive()), bool); + ASSERT_SAME_TYPE(decltype(m.is_strided()), bool); + assert(!noexcept(MDS::is_always_unique())); + assert(!noexcept(MDS::is_always_exhaustive())); + assert(!noexcept(MDS::is_always_strided())); + assert(!noexcept(m.is_unique())); + assert(!noexcept(m.is_exhaustive())); + assert(!noexcept(m.is_strided())); + assert(MDS::is_always_unique() == M::is_always_unique()); + assert(MDS::is_always_exhaustive() == M::is_always_exhaustive()); + assert(MDS::is_always_strided() == M::is_always_strided()); + assert(m.is_unique() == map.is_unique()); + assert(m.is_exhaustive() == map.is_exhaustive()); + assert(m.is_strided() == map.is_strided()); + + // stride(r) + test_mdspan_stride(m, map); + + // ===================================== + // Invariants coming from accessor + // ===================================== + + // accessor() + ASSERT_SAME_TYPE(decltype(m.accessor()), const A&); + ASSERT_NOEXCEPT(m.accessor()); +} + +template +__host__ __device__ constexpr void mixin_extents(const H& handle, const L& layout, const A& acc) +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(7)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents()), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(2, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(0, 3)), acc); + test_mdspan_types(handle, construct_mapping(layout, cuda::std::extents(1, 2, 3, 2)), acc); +} + +template +__host__ __device__ constexpr void mixin_layout(const H& handle, const A& acc) +{ + mixin_extents(handle, cuda::std::layout_left(), acc); + mixin_extents(handle, cuda::std::layout_right(), acc); + mixin_extents(handle, layout_wrapping_integral<4>(), acc); +} + +template ::value, int> = 0> +__host__ __device__ constexpr void mixin_accessor() +{ + cuda::std::array elements{42}; + mixin_layout(elements.data(), cuda::std::default_accessor()); +} + +template ::value, int> = 0> +__host__ __device__ TEST_CONSTEXPR_CXX20 void mixin_accessor() +{ + ElementPool elements; + mixin_layout(elements.get_ptr(), cuda::std::default_accessor()); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + + test_evil(); +#if TEST_STD_VER >= 2020 + static_assert(test(), ""); +#endif // TEST_STD_VER >= 2020 + + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/swap.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/swap.pass.cpp new file mode 100644 index 00000000000..94b240b6d72 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/swap.pass.cpp @@ -0,0 +1,72 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// +// +// friend constexpr void swap(mdspan& x, mdspan& y) noexcept; +// +// Effects: Equivalent to: +// swap(x.ptr_, y.ptr_); +// swap(x.map_, y.map_); +// swap(x.acc_, y.acc_); + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "test_macros.h" + +template +__host__ __device__ constexpr void test_swap(MDS a, MDS b) +{ + auto org_a = a; + auto org_b = b; + swap(a, b); + assert(a.extents() == org_b.extents()); + assert(b.extents() == org_a.extents()); + test_equality_handle(a, org_b.data_handle()); + test_equality_handle(b, org_a.data_handle()); + test_equality_mapping(a, org_b.mapping()); + test_equality_mapping(b, org_a.mapping()); + // This check uses a side effect of layout_wrapping_integral::swap to make sure + // mdspan calls the underlying components' swap via ADL + test_swap_counter(); +} + +__host__ __device__ constexpr bool test() +{ + using extents_t = cuda::std::extents; + float data_a[1024] = {}; + float data_b[1024] = {}; + { + cuda::std::mdspan a(data_a, extents_t(12)); + cuda::std::mdspan b(data_b, extents_t(5)); + test_swap(a, b); + } + { + layout_wrapping_integral<4>::template mapping map_a(extents_t(12), not_extents_constructible_tag()), + map_b(extents_t(5), not_extents_constructible_tag()); + cuda::std::mdspan> a(data_a, map_a); + cuda::std::mdspan> b(data_b, map_b); + test_swap(a, b); + } + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/types.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/types.pass.cpp new file mode 100644 index 00000000000..8b932a60bae --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/mdspan/types.pass.cpp @@ -0,0 +1,227 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +// +// +// template> +// class mdspan { +// public: +// using extents_type = Extents; +// using layout_type = LayoutPolicy; +// using accessor_type = AccessorPolicy; +// using mapping_type = typename layout_type::template mapping; +// using element_type = ElementType; +// using value_type = remove_cv_t; +// using index_type = typename extents_type::index_type; +// using size_type = typename extents_type::size_type; +// using rank_type = typename extents_type::rank_type; +// using data_handle_type = typename accessor_type::data_handle_type; +// using reference = typename accessor_type::reference; +// ... +// }; + +#include +#include +#include +#include + +#include "../CustomTestLayouts.h" +#include "../MinimalElementType.h" +#include "CustomTestAccessors.h" +#include "test_macros.h" + +// Calculated expected size of an mdspan +// Note this expects that only default_accessor is empty +template +__host__ __device__ constexpr size_t expected_size() +{ + size_t sizeof_dht = sizeof(typename MDS::data_handle_type); + size_t result = sizeof_dht; + if (MDS::rank_dynamic() > 0) + { + size_t alignof_idx = alignof(typename MDS::index_type); + size_t sizeof_idx = sizeof(typename MDS::index_type); + // add alignment if necessary + result += sizeof_dht % alignof_idx == 0 ? 0 : alignof_idx - (sizeof_dht % alignof_idx); + // add sizeof stored extents + result += MDS::rank_dynamic() * sizeof_idx; + } + using A = typename MDS::accessor_type; + if (!cuda::std::is_same>::value) + { + size_t alignof_acc = alignof(A); + size_t sizeof_acc = sizeof(A); + // add alignment if necessary + result += result % alignof_acc == 0 ? 0 : alignof_acc - (result % alignof_acc); + // add sizeof stored accessor + result += sizeof_acc; + } + // add alignment of the mdspan itself + result += result % alignof(MDS) == 0 ? 0 : alignof(MDS) - (result % alignof(MDS)); + return result; +} + +// check triviality +template +constexpr bool trv_df_ctor = cuda::std::is_trivially_default_constructible::value; +template +constexpr bool trv_cp_ctor = cuda::std::is_trivially_copy_constructible::value; +template +constexpr bool trv_mv_ctor = cuda::std::is_trivially_move_constructible::value; +template +constexpr bool trv_dstruct = cuda::std::is_trivially_destructible::value; +template +constexpr bool trv_cp_asgn = cuda::std::is_trivially_copy_assignable::value; +template +constexpr bool trv_mv_asgn = cuda::std::is_trivially_move_assignable::value; + +template +__host__ __device__ constexpr void check_triviality() +{ + static_assert(trv_df_ctor == default_ctor, ""); + static_assert(trv_cp_ctor == copy_ctor, ""); + static_assert(trv_mv_ctor == move_ctor, ""); + static_assert(trv_dstruct == destr, ""); + static_assert(trv_cp_asgn == copy_assign, ""); + static_assert(trv_mv_asgn == move_assign, ""); +} + +// Standard extension +template ::value + || cuda::std::is_same::value, + int> = 0> +__host__ __device__ constexpr void test_mdspan_no_unique_address() +{ + static_assert(sizeof(MDS) == expected_size(), ""); +} + +template ::value + && !cuda::std::is_same::value, + int> = 0> +__host__ __device__ constexpr void test_mdspan_no_unique_address() +{} + +template +__host__ __device__ constexpr void test_mdspan_types() +{ + using MDS = cuda::std::mdspan; + + ASSERT_SAME_TYPE(typename MDS::extents_type, E); + ASSERT_SAME_TYPE(typename MDS::layout_type, L); + ASSERT_SAME_TYPE(typename MDS::accessor_type, A); + ASSERT_SAME_TYPE(typename MDS::mapping_type, typename L::template mapping); + ASSERT_SAME_TYPE(typename MDS::element_type, T); + ASSERT_SAME_TYPE(typename MDS::value_type, cuda::std::remove_cv_t); + ASSERT_SAME_TYPE(typename MDS::index_type, typename E::index_type); + ASSERT_SAME_TYPE(typename MDS::size_type, typename E::size_type); + ASSERT_SAME_TYPE(typename MDS::rank_type, typename E::rank_type); + ASSERT_SAME_TYPE(typename MDS::data_handle_type, typename A::data_handle_type); + ASSERT_SAME_TYPE(typename MDS::reference, typename A::reference); + + // This miserably failed with clang-cl - likely because it doesn't honor/enable + // no-unique-address fully by default + // #ifndef TEST_COMPILER_MSVC + test_mdspan_no_unique_address(); + // #endif // TEST_COMPILER_MSVC + + // check default template parameters: + ASSERT_SAME_TYPE(cuda::std::mdspan, + cuda::std::mdspan>); + ASSERT_SAME_TYPE(cuda::std::mdspan, cuda::std::mdspan>); + + // check triviality + using DH = typename MDS::data_handle_type; + using MP = typename MDS::mapping_type; + + check_triviality && trv_cp_ctor && trv_cp_ctor, + trv_mv_ctor && trv_mv_ctor && trv_mv_ctor, + trv_dstruct && trv_dstruct && trv_dstruct, + trv_cp_asgn && trv_cp_asgn && trv_cp_asgn, + trv_mv_asgn && trv_mv_asgn && trv_mv_asgn>(); +} + +template +__host__ __device__ constexpr void mixin_extents() +{ + constexpr size_t D = cuda::std::dynamic_extent; + test_mdspan_types, L, A>(); + test_mdspan_types, L, A>(); + test_mdspan_types, L, A>(); + test_mdspan_types, L, A>(); + test_mdspan_types, L, A>(); + test_mdspan_types, L, A>(); + test_mdspan_types, L, A>(); + test_mdspan_types, L, A>(); +} + +template +__host__ __device__ constexpr void mixin_layout() +{ + mixin_extents(); + mixin_extents(); + mixin_extents, A>(); +} + +template +__host__ __device__ constexpr void mixin_accessor() +{ + mixin_layout>(); + mixin_layout>(); +} + +__host__ __device__ constexpr bool test() +{ + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + mixin_accessor(); + + // sanity checks for triviality + check_triviality>, false, true, true, true, true, true>(); + check_triviality>, false, true, true, true, true, true>(); + check_triviality, cuda::std::layout_right, checked_accessor>, + false, + true, + false, + true, + true, + true>(); + + return true; +} + +__host__ __device__ TEST_CONSTEXPR_CXX20 bool test_evil() +{ + mixin_accessor(); + mixin_accessor(); + + return true; +} + +int main(int, char**) +{ + test(); + test_evil(); + + static_assert(test(), ""); +#if TEST_STD_VER >= 2020 + static_assert(test_evil(), ""); +#endif // TEST_STD_VER >= 2020 + + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/my_accessor.hpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/my_accessor.hpp deleted file mode 100644 index fe875fb22dc..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/my_accessor.hpp +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef _MY_ACCESSOR_HPP -#define _MY_ACCESSOR_HPP - -#include "foo_customizations.hpp" - -namespace Foo -{ -// Same as Foo::foo_accessor but -// 1. Doesn't have a default constructor -// 2. Isn't contructible from the default accessor -template -struct my_accessor -{ - using offset_policy = my_accessor; - using element_type = T; - using reference = T&; - using data_handle_type = foo_ptr; - - _LIBCUDACXX_HIDE_FROM_ABI constexpr my_accessor(int* ptr) noexcept - { - flag = ptr; - } - - template - _LIBCUDACXX_HIDE_FROM_ABI constexpr my_accessor(my_accessor other) noexcept - { - flag = other.flag; - } - - _CCCL_HOST_DEVICE constexpr reference access(data_handle_type p, size_t i) const noexcept - { - return p.data[i]; - } - - _CCCL_HOST_DEVICE constexpr data_handle_type offset(data_handle_type p, size_t i) const noexcept - { - return data_handle_type(p.data + i); - } - int* flag; -}; -} // namespace Foo - -#endif diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/my_int.hpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/my_int.hpp deleted file mode 100644 index 4f27784cd61..00000000000 --- a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/my_int.hpp +++ /dev/null @@ -1,65 +0,0 @@ -#ifndef _MY_INT_HPP -#define _MY_INT_HPP - -#include "test_macros.h" - -struct my_int_non_convertible; - -struct my_int -{ - int _val; - - __host__ __device__ my_int(my_int_non_convertible) noexcept; - __host__ __device__ constexpr my_int(int val) - : _val(val){}; - __host__ __device__ constexpr operator int() const noexcept - { - return _val; - } -}; - -template <> -struct cuda::std::is_integral : cuda::std::true_type -{}; - -// Wrapper type that's not implicitly convertible - -struct my_int_non_convertible -{ - my_int _val; - - __host__ __device__ my_int_non_convertible(); - __host__ __device__ my_int_non_convertible(my_int val) - : _val(val) {}; - __host__ __device__ operator my_int() const noexcept - { - return _val; - } -}; - -__host__ __device__ my_int::my_int(my_int_non_convertible) noexcept {} - -template <> -struct cuda::std::is_integral : cuda::std::true_type -{}; - -// Wrapper type that's not nothrow-constructible - -struct my_int_non_nothrow_constructible -{ - int _val; - - __host__ __device__ my_int_non_nothrow_constructible(); - __host__ __device__ my_int_non_nothrow_constructible(int val) - : _val(val) {}; - __host__ __device__ operator int() const - { - return _val; - } -}; - -template <> -struct cuda::std::is_integral : cuda::std::true_type -{}; - -#endif diff --git a/pyproject.toml b/pyproject.toml index 20e78ab35ef..86f9909f6fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,6 +19,6 @@ extend-select = ["I"] skip = "./.git,./build,./CITATION.md" # ignore short words, and typename parameters like OffsetT ignore-regex = "\\b(.{1,4}|[A-Z]\\w*T)\\b" -ignore-words-list = "inout,imovable,optionN,aCount,quitted,Invokable,countr,unexpect,numer,euclidian,couldn,OffsetT" +ignore-words-list = "inout,imovable,optionN,aCount,quitted,Invokable,countr,unexpect,numer,euclidian,couldn,OffsetT,FromM" builtin = "clear" quiet-level = 3 From 220ce83ea697ebe32dc142f66b98b083383c45ca Mon Sep 17 00:00:00 2001 From: Michael Schellenberger Costa Date: Thu, 30 May 2024 17:50:22 +0200 Subject: [PATCH 2/3] Implement `submdspan_extents` --- .../include/cuda/std/__mdspan/concepts.h | 87 ++++ .../cuda/std/__mdspan/submdspan_extents.h | 193 ++++++++ .../cuda/std/__mdspan/submdspan_helper.h | 252 ++++++++++ libcudacxx/include/cuda/std/mdspan | 2 + libcudacxx/include/cuda/std/version | 1 + .../mdspan/submdspan/strided_slice.pass.cpp | 129 +++++ .../submdspan/submdspan_extent.pass.cpp | 467 ++++++++++++++++++ 7 files changed, 1131 insertions(+) create mode 100644 libcudacxx/include/cuda/std/__mdspan/submdspan_extents.h create mode 100644 libcudacxx/include/cuda/std/__mdspan/submdspan_helper.h create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/strided_slice.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/submdspan_extent.pass.cpp diff --git a/libcudacxx/include/cuda/std/__mdspan/concepts.h b/libcudacxx/include/cuda/std/__mdspan/concepts.h index d53b4bc563d..6dec80fd6c9 100644 --- a/libcudacxx/include/cuda/std/__mdspan/concepts.h +++ b/libcudacxx/include/cuda/std/__mdspan/concepts.h @@ -29,9 +29,23 @@ #endif // no system header #include +#include +#include +#include #include +#include +#include #include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include #if _CCCL_STD_VER >= 2014 @@ -53,6 +67,50 @@ template _CCCL_INLINE_VAR constexpr bool __is_mapping_of = _CCCL_TRAIT(is_same, typename _Layout::template mapping, _Mapping); +// [mdspan.layout.reqmts]/1 +# if _CCCL_STD_VER >= 2020 +template +concept __layout_mapping_req_type = + copyable<_Mapping> && equality_comparable<_Mapping> && // + is_nothrow_move_constructible_v<_Mapping> && is_move_assignable_v<_Mapping> && is_nothrow_swappable_v<_Mapping>; +# else // ^^^ _CCCL_STD_VER >= 2020 ^^^ / vvv _CCCL_STD_VER <= 2017 vvv +template +_CCCL_CONCEPT_FRAGMENT( + __layout_mapping_req_type_, + requires()( // + requires(copyable<_Mapping>), + requires(equality_comparable<_Mapping>), + requires(_CCCL_TRAIT(is_nothrow_move_constructible, _Mapping)), + requires(_CCCL_TRAIT(is_move_assignable, _Mapping)), + requires(_CCCL_TRAIT(is_nothrow_swappable, _Mapping)))); + +template +_CCCL_CONCEPT __layout_mapping_req_type = _CCCL_FRAGMENT(__layout_mapping_req_type_, _Mapping); +# endif // _CCCL_STD_VER <= 2017 + +// [mdspan.layout.reqmts]/2-4 +# if _CCCL_STD_VER >= 2020 +template +concept __layout_mapping_req_types = requires { + requires __is_extents_v; + requires same_as; + requires same_as; + requires __is_mapping_of; +}; +# else // ^^^ _CCCL_STD_VER >= 2020 ^^^ / vvv _CCCL_STD_VER <= 2017 vvv +template +_CCCL_CONCEPT_FRAGMENT( + __layout_mapping_req_types_, + requires()( // + requires(__is_extents_v), + requires(same_as), + requires(same_as), + requires(__is_mapping_of))); + +template +_CCCL_CONCEPT __layout_mapping_req_types = _CCCL_FRAGMENT(__layout_mapping_req_types_, _Mapping); +# endif // _CCCL_STD_VER <= 2017 + // [mdspan.layout.stride.expo]/4 # if _CCCL_STD_VER >= 2020 template @@ -96,6 +154,35 @@ _CCCL_CONCEPT __layout_mapping_alike = _CCCL_FRAGMENT(__layout_mapping_alike_, _ } // namespace __mdspan_detail +# if _CCCL_STD_VER >= 2020 + +template +concept __index_pair_like = + __pair_like<_Tp> // + && convertible_to, _IndexType> // + && convertible_to, _IndexType>; + +# else // ^^^ _CCCL_STD_VER >= 2020 ^^^ / vvv _CCCL_STD_VER <= 2017 vvv + +template +_CCCL_CONCEPT_FRAGMENT( + __index_pair_like_, + requires()( // + requires(__pair_like<_Tp>), + requires(convertible_to, _IndexType>), + requires(convertible_to, _IndexType>) // + )); +template +_CCCL_CONCEPT __index_pair_like = _CCCL_FRAGMENT(__index_pair_like_, _Tp, _IndexType); + +# endif // _CCCL_STD_VER <= 2017 + +// [mdspan.submdspan.strided.slice]/3 + +template +_CCCL_CONCEPT __index_like = + _CCCL_TRAIT(is_signed, _Tp) || _CCCL_TRAIT(is_unsigned, _Tp) || __integral_constant_like<_Tp>; + _LIBCUDACXX_END_NAMESPACE_STD #endif // _CCCL_STD_VER >= 2014 diff --git a/libcudacxx/include/cuda/std/__mdspan/submdspan_extents.h b/libcudacxx/include/cuda/std/__mdspan/submdspan_extents.h new file mode 100644 index 00000000000..ac3e8b91703 --- /dev/null +++ b/libcudacxx/include/cuda/std/__mdspan/submdspan_extents.h @@ -0,0 +1,193 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___MDSPAN_SUBMDSPAN_EXTENTS_H +#define _LIBCUDACXX___MDSPAN_SUBMDSPAN_EXTENTS_H + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if _CCCL_STD_VER >= 2014 + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +// Helper to get an index_sequence of all slices that are not convertible to index_type +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto +__filter_slices_convertible_to_index(index_sequence<_FilteredIndices...>, index_sequence<>) noexcept +{ + return index_sequence<_FilteredIndices...>{}; +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto __filter_slices_convertible_to_index( + index_sequence<_SliceIndices...>, index_sequence<_CurrentIndex, _Remaining...>) noexcept +{ + using _SliceType = __get_slice_type<_CurrentIndex, _Slices...>; + if constexpr (convertible_to<_SliceType, _IndexType>) + { + return _CUDA_VSTD::__filter_slices_convertible_to_index<_IndexType, _Slices...>( + index_sequence<_SliceIndices...>{}, index_sequence<_Remaining...>{}); + } + else + { + return _CUDA_VSTD::__filter_slices_convertible_to_index<_IndexType, _Slices...>( + index_sequence<_SliceIndices..., _CurrentIndex>{}, index_sequence<_Remaining...>{}); + } +} + +// [mdspan.sub.extents] +// [mdspan.sub.extents-4.2.2] +template +_CCCL_CONCEPT __subextents_is_index_pair = _CCCL_REQUIRES_EXPR((_Extents, _SliceType))( + requires(__index_pair_like<_SliceType, typename _Extents::index_type>), + requires(__integral_constant_like>), + requires(__integral_constant_like>)); + +// [mdspan.sub.extents-4.2.3] +template +_CCCL_CONCEPT __subextents_is_strided_slice_zero_extent = _CCCL_REQUIRES_EXPR((_Extents, _SliceType))( + requires(__is_strided_slice>), + requires(__integral_constant_like), + requires(typename _SliceType::extent_type() == 0)); + +// [mdspan.sub.extents-4.2.4] +template +_CCCL_CONCEPT __subextents_is_strided_slice = _CCCL_REQUIRES_EXPR((_SliceType))( + requires(__is_strided_slice>), + requires(__integral_constant_like), + requires(__integral_constant_like)); + +struct __get_subextent +{ + template + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr size_t __get_static_subextents() noexcept + { + // [mdspan.sub.extents-4.2.1] + if constexpr (convertible_to<_SliceType, full_extent_t>) + { + return _Extents::static_extent(_SliceIndex); + } + // [mdspan.sub.extents-4.2.2] + else if constexpr (__subextents_is_index_pair<_Extents, _SliceType>) + { + return _CUDA_VSTD::__de_ice(tuple_element_t<1, _SliceType>()) + - _CUDA_VSTD::__de_ice(tuple_element_t<0, _SliceType>()); + } + // [mdspan.sub.extents-4.2.3] + else if constexpr (__subextents_is_strided_slice_zero_extent<_Extents, _SliceType>) + { + return 0; + } + // [mdspan.sub.extents-4.2.4] + else if constexpr (__subextents_is_strided_slice<_SliceType>) + { + return 1 + + (_CUDA_VSTD::__de_ice(typename _SliceType::extent_type()) - 1) + / _CUDA_VSTD::__de_ice(typename _SliceType::stride_type()); + } + else + { + // [mdspan.sub.extents-4.2.5] + return dynamic_extent; + } + _CCCL_UNREACHABLE(); + } + + template + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr typename _Extents::index_type + __get_dynamic_subextents(const _Extents& __src, _Slices... __slices) noexcept + { + using _SliceType = __get_slice_type<_SliceIndex, _Slices...>; + // [mdspan.sub.extents-5.1] + if constexpr (__is_strided_slice>) + { + _SliceType& __slice = _CUDA_VSTD::__get_slice_at<_SliceIndex>(__slices...); + return __slice.extent == 0 + ? 0 + : 1 + (_CUDA_VSTD::__de_ice(__slice.extent) - 1) / _CUDA_VSTD::__de_ice(__slice.stride); + } + // [mdspan.sub.extents-5.2] + else + { + return _CUDA_VSTD::__last_extent_from_slice<_SliceIndex>(__src, __slices...) + - _CUDA_VSTD::__first_extent_from_slice(__slices...); + } + _CCCL_UNREACHABLE(); + } + + template + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto + __impl(index_sequence<_SliceIndices...>, const _Extents& __src, _Slices... __slices) noexcept + { + using _IndexType = typename _Extents::index_type; + using _SubExtents = + extents<_IndexType, + __get_static_subextents<_Extents, _SliceIndices, __get_slice_type<_SliceIndices, _Slices...>>()...>; + return _SubExtents{__get_dynamic_subextents<_SliceIndices>(__src, __slices...)...}; + } + + template + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto + operator()(const _Extents& __src, _Slices... __slices) noexcept + { + const auto __filtered_indices = __filter_slices_convertible_to_index( + index_sequence<>{}, _CUDA_VSTD::index_sequence_for<_Slices...>()); + return __impl(__filtered_indices, __src, __slices...); + } +}; + +template +_CCCL_INLINE_VAR constexpr bool __is_valid_subextents = + convertible_to<_SliceType, _IndexType> || __index_pair_like<_SliceType, _IndexType> + || _CCCL_TRAIT(is_convertible, _SliceType, full_extent_t) || __is_strided_slice>; + +_CCCL_TEMPLATE(class _Extents, class... _Slices) +_CCCL_REQUIRES((_Extents::rank() == sizeof...(_Slices))) +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto submdspan_extents(const _Extents& __src, _Slices... __slices) +{ + static_assert(_CCCL_FOLD_AND((__is_valid_subextents) ), + "[mdspan.sub.extents] For each rank index k of src.extents(), exactly one of the following is true:"); + return __get_subextent{}(__src, __slices...); +} + +template +using __get_subextents_t = + decltype(_CUDA_VSTD::submdspan_extents(_CUDA_VSTD::declval<_Extents>(), _CUDA_VSTD::declval<_Slices>()...)); + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _CCCL_STD_VER >= 2014 + +#endif // _LIBCUDACXX___MDSPAN_SUBMDSPAN_EXTENTS_H diff --git a/libcudacxx/include/cuda/std/__mdspan/submdspan_helper.h b/libcudacxx/include/cuda/std/__mdspan/submdspan_helper.h new file mode 100644 index 00000000000..c617480e795 --- /dev/null +++ b/libcudacxx/include/cuda/std/__mdspan/submdspan_helper.h @@ -0,0 +1,252 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___MDSPAN_SUBMDSPAN_HELPER_H +#define _LIBCUDACXX___MDSPAN_SUBMDSPAN_HELPER_H + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if _CCCL_STD_VER >= 2014 + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +// [mdspan.sub.overview]-2.5 +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr array +__map_rank(size_t __count = 0) noexcept +{ + return {(convertible_to<_SliceTypes, _IndexType> ? dynamic_extent : __count++)...}; +} + +// [mdspan.submdspan.strided.slice] +template +struct strided_slice +{ + using offset_type = _OffsetType; + using extent_type = _ExtentType; + using stride_type = _StrideType; + + static_assert(__index_like, + "[mdspan.submdspan.strided.slice] cuda::std::strided_slice::offset_type must be signed or unsigned or " + "integral-constant-like"); + static_assert(__index_like, + "[mdspan.submdspan.strided.slice] cuda::std::strided_slice::extent_type must be signed or unsigned or " + "integral-constant-like"); + static_assert(__index_like, + "[mdspan.submdspan.strided.slice] cuda::std::strided_slice::stride_type must be signed or unsigned or " + "integral-constant-like"); + + _CCCL_NO_UNIQUE_ADDRESS offset_type offset{}; + _CCCL_NO_UNIQUE_ADDRESS extent_type extent{}; + _CCCL_NO_UNIQUE_ADDRESS stride_type stride{}; +}; + +template +strided_slice(_OffsetType, _ExtentType, _StrideType) -> strided_slice<_OffsetType, _ExtentType, _StrideType>; + +template +_CCCL_INLINE_VAR constexpr bool __is_strided_slice = false; + +template +_CCCL_INLINE_VAR constexpr bool __is_strided_slice> = true; + +struct full_extent_t +{ + explicit full_extent_t() = default; +}; +_CCCL_GLOBAL_CONSTANT full_extent_t full_extent{}; + +// [mdspan.submdspan.submdspan.mapping.result] +template +struct submdspan_mapping_result +{ + static_assert(true, // __is_layout_mapping<_LayoutMapping>, + "[mdspan.submdspan.submdspan.mapping.result] shall meet the layout mapping requirements"); + + _CCCL_NO_UNIQUE_ADDRESS _LayoutMapping mapping{}; + size_t offset{}; +}; + +// [mdspan.submdspan.helpers] +_CCCL_TEMPLATE(class _Tp) +_CCCL_REQUIRES((!__integral_constant_like<_Tp>) ) +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp __de_ice(_Tp __val) +{ + return __val; +} + +_CCCL_TEMPLATE(class _Tp) +_CCCL_REQUIRES(__integral_constant_like<_Tp>) +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp __de_ice(_Tp) +{ + return _Tp::value; +} + +_CCCL_TEMPLATE(class _IndexType, class _From) +_CCCL_REQUIRES(_CCCL_TRAIT(is_integral, _From) _CCCL_AND(!_CCCL_TRAIT(is_same, _From, bool))) +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto __index_cast(_From&& __from) noexcept +{ + return __from; +} +_CCCL_TEMPLATE(class _IndexType, class _From) +_CCCL_REQUIRES((!_CCCL_TRAIT(is_integral, _From)) || _CCCL_TRAIT(is_same, _From, bool)) +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto __index_cast(_From&& __from) noexcept +{ + return static_cast<_IndexType>(__from); +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr decltype(auto) __get_slice_at(_Slices&&... __slices) noexcept +{ + return _CUDA_VSTD::get<_Index>(_CUDA_VSTD::forward_as_tuple(_CUDA_VSTD::forward<_Slices>(__slices)...)); +} + +struct __first_extent_from_slice_impl +{ + _CCCL_TEMPLATE(class _IndexType, class _SliceType) + _CCCL_REQUIRES(convertible_to<_SliceType, _IndexType>) + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr _IndexType __get(_SliceType& __slice) noexcept + { + return _CUDA_VSTD::__index_cast<_IndexType>(__slice); + } + _CCCL_TEMPLATE(class _IndexType, class _SliceType) + _CCCL_REQUIRES((!convertible_to<_SliceType, _IndexType>) _CCCL_AND __index_pair_like<_SliceType, _IndexType>) + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr _IndexType __get(_SliceType& __slice) noexcept + { + return _CUDA_VSTD::__index_cast<_IndexType>(_CUDA_VSTD::get<0>(__slice)); + } + _CCCL_TEMPLATE(class _IndexType, class _SliceType) + _CCCL_REQUIRES((!convertible_to<_SliceType, _IndexType>) _CCCL_AND(!__index_pair_like<_SliceType, _IndexType>) + _CCCL_AND __is_strided_slice<_SliceType>) + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr _IndexType __get(_SliceType& __slice) noexcept + { + return _CUDA_VSTD::__index_cast<_IndexType>(_CUDA_VSTD::__de_ice(__slice.offset)); + } + _CCCL_TEMPLATE(class _IndexType, class _SliceType) + _CCCL_REQUIRES((!convertible_to<_SliceType, _IndexType>) _CCCL_AND(!__index_pair_like<_SliceType, _IndexType>) + _CCCL_AND(!__is_strided_slice<_SliceType>)) + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr _IndexType __get(_SliceType&) noexcept + { + return 0; + } +}; + +template +using __get_slice_type = __tuple_element_t<_Index, __tuple_types<_Slices...>>; + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr _IndexType __first_extent_from_slice(_Slices... __slices) noexcept +{ + static_assert(_CCCL_TRAIT(is_signed, _IndexType) || _CCCL_TRAIT(is_unsigned, _IndexType), + "[mdspan.sub.helpers] mandates IndexType to be a signed or unsigned integral"); + using _SliceType = __get_slice_type<_Index, _Slices...>; + _SliceType& __slice = _CUDA_VSTD::__get_slice_at<_Index>(__slices...); + return __first_extent_from_slice_impl::template __get<_IndexType>(__slice); +} + +template +struct __last_extent_from_slice_impl +{ + _CCCL_TEMPLATE(class _IndexType, class _SliceType, class _Extents) + _CCCL_REQUIRES(convertible_to<_SliceType, _IndexType>) + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr _IndexType + __get(_SliceType& __slice, const _Extents&) noexcept + { + return _CUDA_VSTD::__index_cast<_IndexType>(_CUDA_VSTD::__de_ice(__slice) + 1); + } + _CCCL_TEMPLATE(class _IndexType, class _SliceType, class _Extents) + _CCCL_REQUIRES((!convertible_to<_SliceType, _IndexType>) _CCCL_AND __index_pair_like<_SliceType, _IndexType>) + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr _IndexType + __get(_SliceType& __slice, const _Extents&) noexcept + { + return _CUDA_VSTD::__index_cast<_IndexType>(_CUDA_VSTD::get<1>(__slice)); + } + _CCCL_TEMPLATE(class _IndexType, class _SliceType, class _Extents) + _CCCL_REQUIRES((!convertible_to<_SliceType, _IndexType>) _CCCL_AND(!__index_pair_like<_SliceType, _IndexType>) + _CCCL_AND __is_strided_slice<_SliceType>) + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr _IndexType + __get(_SliceType& __slice, const _Extents&) noexcept + { + return _CUDA_VSTD::__index_cast<_IndexType>( + _CUDA_VSTD::__de_ice(__slice.offset) * _CUDA_VSTD::__de_ice(__slice.extent)); + } + _CCCL_TEMPLATE(class _IndexType, class _SliceType, class _Extents) + _CCCL_REQUIRES((!convertible_to<_SliceType, _IndexType>) _CCCL_AND(!__index_pair_like<_SliceType, _IndexType>) + _CCCL_AND(!__is_strided_slice<_SliceType>)) + _CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI static constexpr _IndexType + __get(_SliceType&, const _Extents& __src) noexcept + { + return _CUDA_VSTD::__index_cast<_IndexType>(__src.extent(_Index)); + } +}; + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr typename _Extents::index_type +__last_extent_from_slice(const _Extents& __src, _Slices... __slices) noexcept +{ + static_assert(_CCCL_TRAIT(__mdspan_detail::__is_extents, _Extents), + "[mdspan.sub.helpers] mandates Extents to be a specialization of extents"); + using _IndexType = typename _Extents::index_type; + using _SliceType = __get_slice_type<_Index, _Slices...>; + _SliceType& __slice = _CUDA_VSTD::__get_slice_at<_Index>(__slices...); + return __last_extent_from_slice_impl<_Index>::template __get<_IndexType>(__slice, __src); +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr array<_IndexType, sizeof...(_Slices)> __src_indices( + index_sequence<_SliceIndexes...>, const array<_IndexType, sizeof...(_Slices)>& __indices, _Slices... __slices) noexcept +{ + constexpr array __ranks = _CUDA_VSTD::__map_rank<_IndexType, _Slices...>(); + array<_IndexType, sizeof...(_Slices)> __arr = { + _CUDA_VSTD::__first_extent_from_slice<_IndexType, _SliceIndexes>(__slices)...}; + + for (size_t __index = 0; __index < sizeof...(_Slices); ++__index) + { + if (__ranks[__index] != dynamic_extent) + { + __arr += __indices[__ranks[__index]]; + } + } + return __arr; +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr array<_IndexType, sizeof...(_Slices)> +__src_indices(const array<_IndexType, sizeof...(_Slices)>& __indices, _Slices... __slices) noexcept +{ + return _CUDA_VSTD::__src_indices(index_sequence_for<_Slices...>(), __indices, __slices...); +} + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _CCCL_STD_VER >= 2014 + +#endif // _LIBCUDACXX___MDSPAN_SUBMDSPAN_HELPER_H diff --git a/libcudacxx/include/cuda/std/mdspan b/libcudacxx/include/cuda/std/mdspan index 15fdcf27aa1..75e4b8f3705 100644 --- a/libcudacxx/include/cuda/std/mdspan +++ b/libcudacxx/include/cuda/std/mdspan @@ -31,6 +31,8 @@ _CCCL_PUSH_MACROS #include #include #include +#include +#include #include _CCCL_POP_MACROS diff --git a/libcudacxx/include/cuda/std/version b/libcudacxx/include/cuda/std/version index 951680733c4..79f9dd8f60a 100644 --- a/libcudacxx/include/cuda/std/version +++ b/libcudacxx/include/cuda/std/version @@ -96,6 +96,7 @@ # define __cccl_lib_span 202311L # define __cccl_lib_span_initializer_list 202311L // # define __cccl_lib_string_udls 201304L +# define __cccl_lib_submdspan 202207L # define __cccl_lib_tuples_by_type 201304L #endif // _CCCL_STD_VER >= 2014 diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/strided_slice.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/strided_slice.pass.cpp new file mode 100644 index 00000000000..028d9b8ceef --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/strided_slice.pass.cpp @@ -0,0 +1,129 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +#include +#include +#include + +#include "test_macros.h" + +template +struct integral_like : cuda::std::integral_constant +{}; +static_assert(cuda::std::__integral_constant_like>, ""); + +template +struct not_integral_like : cuda::std::integral_constant +{ + __host__ __device__ constexpr not_integral_like(int) noexcept {} +}; +static_assert(!cuda::std::__integral_constant_like>, ""); + +template +_CCCL_CONCEPT_FRAGMENT(can_strided_slice_, + requires()( // + (cuda::std::strided_slice{}) // + )); + +template +_CCCL_CONCEPT can_strided_slice = _CCCL_FRAGMENT(can_strided_slice_, OffsetType, ExtentType, StrideType); + +static_assert(can_strided_slice, ""); +static_assert(can_strided_slice, int, int>, ""); +static_assert(can_strided_slice, int>, ""); +static_assert(can_strided_slice>, ""); + +// We cannot check mandates with the current setup +// static_assert(!can_strided_slice>, ""); +// static_assert(!can_strided_slice, int, int>, ""); +// static_assert(!can_strided_slice, int>, ""); +// static_assert(!can_strided_slice>, ""); + +template ::value, int> = 0> +__host__ __device__ constexpr T construct_from_int(int val) noexcept +{ + return T(val); +} +template ::value, int> = 0> +__host__ __device__ constexpr T construct_from_int(int) noexcept +{ + return T{}; +} + +template +__host__ __device__ constexpr void test() +{ + using strided_slice = cuda::std::strided_slice; + // Ensure we are trivially copy/move constructible + static_assert(cuda::std::is_trivially_copy_constructible::value, ""); + static_assert(cuda::std::is_trivially_move_constructible::value, ""); + +#if defined(_CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS) + // Ensure we properly do not store compile time sizes + static_assert(sizeof(strided_slice) == sizeof(OffsetType) + sizeof(ExtentType) + sizeof(StrideType), ""); +#endif // _CCCL_HAS_NO_ATTRIBUTE_NO_UNIQUE_ADDRESS + + // Ensure we have the right alias types + static_assert(cuda::std::is_same::value, ""); + static_assert(cuda::std::is_same::value, ""); + static_assert(cuda::std::is_same::value, ""); + + // Ensure we have the right members with the right types + static_assert(cuda::std::is_same::value, ""); + static_assert(cuda::std::is_same::value, ""); + static_assert(cuda::std::is_same::value, ""); + + { + strided_slice zero_initialized; + assert(zero_initialized.offset == (cuda::std::is_empty::value ? 42 : 0)); + assert(zero_initialized.extent == (cuda::std::is_empty::value ? 42 : 0)); + assert(zero_initialized.stride == (cuda::std::is_empty::value ? 42 : 0)); + } + { + strided_slice value_initialized{}; + assert(value_initialized.offset == (cuda::std::is_empty::value ? 42 : 0)); + assert(value_initialized.extent == (cuda::std::is_empty::value ? 42 : 0)); + assert(value_initialized.stride == (cuda::std::is_empty::value ? 42 : 0)); + } + { + strided_slice list_initialized = { + construct_from_int(1), construct_from_int(2), construct_from_int(3)}; + assert(list_initialized.offset == (cuda::std::is_empty::value ? 42 : 1)); + assert(list_initialized.extent == (cuda::std::is_empty::value ? 42 : 2)); + assert(list_initialized.stride == (cuda::std::is_empty::value ? 42 : 3)); + } + { + strided_slice aggregate_initialized = { + .offset = construct_from_int(1), + .extent = construct_from_int(2), + .stride = construct_from_int(3)}; + assert(aggregate_initialized.offset == (cuda::std::is_empty::value ? 42 : 1)); + assert(aggregate_initialized.extent == (cuda::std::is_empty::value ? 42 : 2)); + assert(aggregate_initialized.stride == (cuda::std::is_empty::value ? 42 : 3)); + } +} + +__host__ __device__ constexpr bool test() +{ + test(); + test, int, int>(); + test, int>(); + test>(); + + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/submdspan_extent.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/submdspan_extent.pass.cpp new file mode 100644 index 00000000000..a4d3b67e785 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/submdspan_extent.pass.cpp @@ -0,0 +1,467 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11 + +#include +#include +#include + +#include "test_macros.h" + +template +_CCCL_CONCEPT can_submdspan_extents = _CCCL_REQUIRES_EXPR( + (Extent, variadic Slices), const Extent& ext, Slices... slices)((cuda::std::submdspan_extents(ext, slices...))); + +__host__ __device__ constexpr bool test() +{ + { // single dimension, all static + cuda::std::extents ext{}; + + using extents_t = decltype(ext); + static_assert(extents_t::rank() == 1); + static_assert(extents_t::rank_dynamic() == 0); + assert(ext.extent(0) == 3); + assert(ext.static_extent(0) == 3); + + { // [mdspan.sub.extents-4.1] + // S_k convertible_to + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == 0); + static_assert(subextents_t::rank_dynamic() == 0); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.1] + // S_k is_convertible + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, cuda::std::full_extent); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + assert(sub_ext.static_extent(0) == ext.static_extent(0)); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.2] + // S_k models index-pair-like and both model integral-contant-like + const auto slice = + cuda::std::pair{cuda::std::integral_constant{}, cuda::std::integral_constant{}}; + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == 1); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.2] + // S_k models index-pair-like and one does not model integral-contant-like + const auto slice = cuda::std::pair{cuda::std::integral_constant{}, 2}; + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() + 1); + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == cuda::std::dynamic_extent); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.3] + // S_k models index-pair-like and one does not model integral-contant-like + const auto slice = cuda::std::pair{1, cuda::std::integral_constant{}}; + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() + 1); + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == cuda::std::dynamic_extent); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.3] + // S_k is a specialization of strided_slice and extend_type models integral-contant-like and is zero + const auto slice = cuda::std::strided_slice{0, cuda::std::integral_constant{}, 1}; + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + // [mdspan.sub.extents-5.1] + // S_k.extent == 0 ? 0 : 1 + (de-ice(S_k.extent) - 1) / de-ice(S_k.stride) + assert(sub_ext.extent(0) == 0); + assert(sub_ext.static_extent(0) == 0); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.3] + // S_k is a specialization of strided_slice and extend_type models integral-contant-like and is not zero + const auto slice = cuda::std::strided_slice{0, cuda::std::integral_constant{}, 1}; + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() + 1); + assert(sub_ext.extent(0) == 2); + assert(sub_ext.static_extent(0) == cuda::std::dynamic_extent); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.4] + // S_k is a specialization of strided_slice and extend_type and stride_type model integral-contant-like but extent + // is zero + const auto slice = cuda::std::strided_slice{ + 0, cuda::std::integral_constant{}, cuda::std::integral_constant{}}; + static_assert(cuda::std::__integral_constant_like); + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + // [mdspan.sub.extents-5.1] + // S_k.extent == 0 ? 0 : 1 + (de-ice(S_k.extent) - 1) / de-ice(S_k.stride) + assert(sub_ext.extent(0) == 0); + assert(sub_ext.static_extent(0) == 0); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.4] + // S_k is a specialization of strided_slice and extend_type and stride_type model integral-contant-like + const auto slice = cuda::std::strided_slice{ + 0, cuda::std::integral_constant{}, cuda::std::integral_constant{}}; + static_assert(cuda::std::__integral_constant_like); + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + // [mdspan.sub.extents-5.1] + // S_k.extent == 0 ? 0 : 1 + (de-ice(S_k.extent) - 1) / de-ice(S_k.stride) + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == 1); + unused(sub_ext); + } + + { // Constraints: sizeof...(slices) == Extents::rank + static_assert(!can_submdspan_extents>); + static_assert(can_submdspan_extents, size_t>); + static_assert(!can_submdspan_extents, size_t, size_t>); + } + } + + { // single dimension, all dynamic + cuda::std::extents ext{3}; + + using extents_t = decltype(ext); + static_assert(extents_t::rank() == 1); + static_assert(extents_t::rank_dynamic() == 1); + assert(ext.extent(0) == 3); + assert(ext.static_extent(0) == cuda::std::dynamic_extent); + + { // [mdspan.sub.extents-4.1] + // S_k convertible_to + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == 0); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() - 1); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.1] + // S_k is_convertible + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, cuda::std::full_extent); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + assert(sub_ext.static_extent(0) == ext.static_extent(0)); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.2] + // S_k models index-pair-like and both model integral-contant-like + const auto slice = + cuda::std::pair{cuda::std::integral_constant{}, cuda::std::integral_constant{}}; + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() - 1); + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == 1); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.2] + // S_k models index-pair-like and one does not model integral-contant-like + const auto slice = cuda::std::pair{cuda::std::integral_constant{}, 2}; + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == cuda::std::dynamic_extent); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.3] + // S_k models index-pair-like and one does not model integral-contant-like + const auto slice = cuda::std::pair{1, cuda::std::integral_constant{}}; + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == cuda::std::dynamic_extent); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.3] + // S_k is a specialization of strided_slice and extend_type models integral-contant-like and is zero + const auto slice = cuda::std::strided_slice{0, cuda::std::integral_constant{}, 1}; + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() - 1); + // [mdspan.sub.extents-5.1] + // S_k.extent == 0 ? 0 : 1 + (de-ice(S_k.extent) - 1) / de-ice(S_k.stride) + assert(sub_ext.extent(0) == 0); + assert(sub_ext.static_extent(0) == 0); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.3] + // S_k is a specialization of strided_slice and extend_type models integral-contant-like and is not zero + const auto slice = cuda::std::strided_slice{0, cuda::std::integral_constant{}, 1}; + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + assert(sub_ext.extent(0) == 2); + assert(sub_ext.static_extent(0) == cuda::std::dynamic_extent); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.4] + // S_k is a specialization of strided_slice and extend_type and stride_type model integral-contant-like but extent + // is zero + const auto slice = cuda::std::strided_slice{ + 0, cuda::std::integral_constant{}, cuda::std::integral_constant{}}; + static_assert(cuda::std::__integral_constant_like); + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() - 1); + // [mdspan.sub.extents-5.1] + // S_k.extent == 0 ? 0 : 1 + (de-ice(S_k.extent) - 1) / de-ice(S_k.stride) + assert(sub_ext.extent(0) == 0); + assert(sub_ext.static_extent(0) == 0); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.4] + // S_k is a specialization of strided_slice and extend_type and stride_type model integral-contant-like + const auto slice = cuda::std::strided_slice{ + 0, cuda::std::integral_constant{}, cuda::std::integral_constant{}}; + static_assert(cuda::std::__integral_constant_like); + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, slice); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank()); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() - 1); + // [mdspan.sub.extents-5.1] + // S_k.extent == 0 ? 0 : 1 + (de-ice(S_k.extent) - 1) / de-ice(S_k.stride) + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == 1); + unused(sub_ext); + } + + { // Constraints: sizeof...(slices) == Extents::rank + static_assert(!can_submdspan_extents>); + static_assert(can_submdspan_extents, size_t>); + static_assert(!can_submdspan_extents, size_t, size_t>); + } + } + + { // multi dimension, mixed + cuda::std::extents ext{3}; + + using extents_t = decltype(ext); + static_assert(extents_t::rank() == 3); + static_assert(extents_t::rank_dynamic() == 1); + assert(ext.extent(0) == 2); + assert(ext.extent(1) == 3); + assert(ext.extent(2) == 4); + assert(ext.static_extent(0) == 2); + assert(ext.static_extent(1) == cuda::std::dynamic_extent); + assert(ext.static_extent(2) == 4); + + { // [mdspan.sub.extents-4.1] + // S_k convertible_to + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1, 2, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == 0); + static_assert(subextents_t::rank_dynamic() == 0); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.1] + // S_k is_convertible + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1, cuda::std::full_extent, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == 1); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + assert(sub_ext.extent(0) == ext.extent(1)); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.2] + // S_k models index-pair-like and both model integral-contant-like + const auto slice = + cuda::std::pair{cuda::std::integral_constant{}, cuda::std::integral_constant{}}; + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1, slice, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == 1); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() - 1); + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == 1); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.2] + // S_k models index-pair-like and one does not model integral-contant-like + const auto slice = cuda::std::pair{cuda::std::integral_constant{}, 2}; + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1, slice, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank() - 2); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == cuda::std::dynamic_extent); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.3] + // S_k models index-pair-like and one does not model integral-contant-like + const auto slice = cuda::std::pair{1, cuda::std::integral_constant{}}; + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1, slice, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank() - 2); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == cuda::std::dynamic_extent); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.3] + // S_k is a specialization of strided_slice and extend_type models integral-contant-like and is zero + const auto slice = cuda::std::strided_slice{0, cuda::std::integral_constant{}, 1}; + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1, slice, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank() - 2); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() - 1); + // [mdspan.sub.extents-5.1] + // S_k.extent == 0 ? 0 : 1 + (de-ice(S_k.extent) - 1) / de-ice(S_k.stride) + assert(sub_ext.extent(0) == 0); + assert(sub_ext.static_extent(0) == 0); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.3] + // S_k is a specialization of strided_slice and extend_type models integral-contant-like and is not zero + const auto slice = cuda::std::strided_slice{0, cuda::std::integral_constant{}, 1}; + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1, slice, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank() - 2); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic()); + assert(sub_ext.extent(0) == 2); + assert(sub_ext.static_extent(0) == cuda::std::dynamic_extent); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.4] + // S_k is a specialization of strided_slice and extend_type and stride_type model integral-contant-like but extent + // is zero + const auto slice = cuda::std::strided_slice{ + 0, cuda::std::integral_constant{}, cuda::std::integral_constant{}}; + static_assert(cuda::std::__integral_constant_like); + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1, slice, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank() - 2); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() - 1); + // [mdspan.sub.extents-5.1] + // S_k.extent == 0 ? 0 : 1 + (de-ice(S_k.extent) - 1) / de-ice(S_k.stride) + assert(sub_ext.extent(0) == 0); + assert(sub_ext.static_extent(0) == 0); + unused(sub_ext); + } + + { // [mdspan.sub.extents-4.2.4] + // S_k is a specialization of strided_slice and extend_type and stride_type model integral-contant-like + const auto slice = cuda::std::strided_slice{ + 0, cuda::std::integral_constant{}, cuda::std::integral_constant{}}; + static_assert(cuda::std::__integral_constant_like); + static_assert(cuda::std::__integral_constant_like); + cuda::std::extents sub_ext = cuda::std::submdspan_extents(ext, 1, slice, 1); + + using subextents_t = decltype(sub_ext); + static_assert(subextents_t::rank() == extents_t::rank() - 2); + static_assert(subextents_t::rank_dynamic() == extents_t::rank_dynamic() - 1); + // [mdspan.sub.extents-5.1] + // S_k.extent == 0 ? 0 : 1 + (de-ice(S_k.extent) - 1) / de-ice(S_k.stride) + assert(sub_ext.extent(0) == 1); + assert(sub_ext.static_extent(0) == 1); + unused(sub_ext); + } + + { // Constraints: sizeof...(slices) == Extents::rank + static_assert(!can_submdspan_extents>); + static_assert(can_submdspan_extents, size_t>); + static_assert(!can_submdspan_extents, size_t, size_t>); + } + } + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} From b6fc321c1bf7d0ec4636b2370575306d8b04bccd Mon Sep 17 00:00:00 2001 From: Michael Schellenberger Costa Date: Fri, 9 Aug 2024 15:55:15 +0200 Subject: [PATCH 3/3] Implement `submdspan_mapping` --- .../cuda/std/__mdspan/submdspan_helper.h | 15 +- .../cuda/std/__mdspan/submdspan_mapping.h | 325 ++++++++++++++ libcudacxx/include/cuda/std/mdspan | 1 + .../views/mdspan/submdspan/helper.h | 55 +++ .../mdspan/submdspan/layout_left.pass.cpp | 413 +++++++++++++++++ .../mdspan/submdspan/layout_right.pass.cpp | 414 ++++++++++++++++++ 6 files changed, 1210 insertions(+), 13 deletions(-) create mode 100644 libcudacxx/include/cuda/std/__mdspan/submdspan_mapping.h create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/helper.h create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/layout_left.pass.cpp create mode 100644 libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/layout_right.pass.cpp diff --git a/libcudacxx/include/cuda/std/__mdspan/submdspan_helper.h b/libcudacxx/include/cuda/std/__mdspan/submdspan_helper.h index c617480e795..2f90614f414 100644 --- a/libcudacxx/include/cuda/std/__mdspan/submdspan_helper.h +++ b/libcudacxx/include/cuda/std/__mdspan/submdspan_helper.h @@ -84,28 +84,17 @@ struct full_extent_t }; _CCCL_GLOBAL_CONSTANT full_extent_t full_extent{}; -// [mdspan.submdspan.submdspan.mapping.result] -template -struct submdspan_mapping_result -{ - static_assert(true, // __is_layout_mapping<_LayoutMapping>, - "[mdspan.submdspan.submdspan.mapping.result] shall meet the layout mapping requirements"); - - _CCCL_NO_UNIQUE_ADDRESS _LayoutMapping mapping{}; - size_t offset{}; -}; - // [mdspan.submdspan.helpers] _CCCL_TEMPLATE(class _Tp) _CCCL_REQUIRES((!__integral_constant_like<_Tp>) ) -_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp __de_ice(_Tp __val) +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp __de_ice(_Tp __val) noexcept { return __val; } _CCCL_TEMPLATE(class _Tp) _CCCL_REQUIRES(__integral_constant_like<_Tp>) -_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr _Tp __de_ice(_Tp) +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto __de_ice(_Tp) noexcept { return _Tp::value; } diff --git a/libcudacxx/include/cuda/std/__mdspan/submdspan_mapping.h b/libcudacxx/include/cuda/std/__mdspan/submdspan_mapping.h new file mode 100644 index 00000000000..657511d9b7b --- /dev/null +++ b/libcudacxx/include/cuda/std/__mdspan/submdspan_mapping.h @@ -0,0 +1,325 @@ +//===----------------------------------------------------------------------===// +// +// Part of libcu++, the C++ Standard Library for your entire system, +// under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCUDACXX___MDSPAN_SUBMDSPAN_MAPPING_H +#define _LIBCUDACXX___MDSPAN_SUBMDSPAN_MAPPING_H + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if _CCCL_STD_VER >= 2014 + +_LIBCUDACXX_BEGIN_NAMESPACE_STD + +// [mdspan.sub.map] + +// [mdspan.submdspan.submdspan.mapping.result] +template +struct submdspan_mapping_result +{ + static_assert(true, // __is_layout_mapping<_LayoutMapping>, + "[mdspan.submdspan.submdspan.mapping.result] shall meet the layout mapping requirements"); + + _CCCL_NO_UNIQUE_ADDRESS _LayoutMapping mapping{}; + size_t offset{}; +}; + +// [mdspan.sub.map.common] +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto +__get_submdspan_strides(const _LayoutMapping& __mapping, _Slices... __slices) noexcept +{ + using _SliceType = __get_slice_type<_SliceIndex, _Slices...>; + using _Extents = typename _LayoutMapping::extents_type; + using _IndexType = typename _Extents::index_type; + if constexpr (__is_strided_slice>) + { + _SliceType& __slice = _CUDA_VSTD::__get_slice_at<_SliceIndex>(__slices...); + using __unsigned_stride = make_unsigned_t; + using __unsigned_extent = make_unsigned_t; + return static_cast<_IndexType>( + __mapping.stride(_SliceIndex) + * (static_cast<__unsigned_stride>(__slice.stride) < static_cast<__unsigned_extent>(__slice.extent) + ? _CUDA_VSTD::__de_ice(__slice.stride) + : 1)); + } + else + { + return static_cast<_IndexType>(__mapping.stride(_SliceIndex)); + } +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto +__submdspan_strides(index_sequence<_SliceIndices...>, const _LayoutMapping& __mapping, _Slices... __slices) noexcept +{ + using _Extents = typename _LayoutMapping::extents_type; + using _IndexType = typename _Extents::index_type; + using _SubExtents = __get_subextents_t<_Extents, _Slices...>; + return array<_IndexType, _SubExtents::rank()>{ + _CUDA_VSTD::__get_submdspan_strides<_SliceIndices>(__mapping, __slices...)...}; +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto +__submdspan_strides(const _LayoutMapping& __mapping, _Slices... __slices) +{ + using _Extents = typename _LayoutMapping::extents_type; + using _IndexType = typename _Extents::index_type; + const auto __filtered_indices = __filter_slices_convertible_to_index( + index_sequence<>{}, _CUDA_VSTD::index_sequence_for<_Slices...>()); + return _CUDA_VSTD::__submdspan_strides(__filtered_indices, __mapping, __slices...); +} + +// [mdspan.sub.map.common-8] +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr size_t +__submdspan_offset(index_sequence<_SliceIndices...>, const _LayoutMapping& __mapping, _Slices... __slices) +{ + using _Extents = typename _LayoutMapping::extents_type; + using _IndexType = typename _Extents::index_type; + // If first_(slices...) + const array<_IndexType, _Extents::rank()> __offsets = { + _CUDA_VSTD::__first_extent_from_slice<_IndexType, _SliceIndices>(__slices...)...}; + + using _SubExtents = __get_subextents_t<_Extents, _Slices...>; + for (size_t __index = 0; __index != _SubExtents::rank(); ++__index) + { + // If first_(slices...) equals extents().extent(k) for any rank index k of extents() + if (__offsets[__index] == __mapping.extents().extent(__index)) + { + // then let offset be a value of type size_t equal to (*this).required_span_size() + return static_cast(__mapping.required_span_size()); + } + } + // Otherwise, let offset be a value of type size_t equal to (*this)(first_(slices...)...). + return static_cast(__mapping(__offsets[_SliceIndices]...)); +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr size_t +__submdspan_offset(const _LayoutMapping& __mapping, _Slices... __slices) +{ + return _CUDA_VSTD::__submdspan_offset(_CUDA_VSTD::index_sequence_for<_Slices...>(), __mapping, __slices...); +} + +// [mdspan.sub.map.common-9] +// [mdspan.sub.map.common-9.1] +template +_CCCL_CONCEPT __is_strided_slice_stride_of_one = _CCCL_REQUIRES_EXPR((_SliceType))( + requires(__is_strided_slice>), + requires(__integral_constant_like), + requires(_SliceType::stride_type::value == 1)); + +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __is_unit_stride_slice() +{ + // [mdspan.sub.map.common-9.1] + if constexpr (__is_strided_slice_stride_of_one<_SliceType>) + { + return true; + } + // [mdspan.sub.map.common-9.2] + else if constexpr (__index_pair_like<_SliceType, typename _LayoutMapping::index_type>) + { + return true; + } + // [mdspan.sub.map.common-9.3] + else if constexpr (_CCCL_TRAIT(is_convertible, _SliceType, full_extent_t)) + { + return true; + } + else + { + return false; + } + _CCCL_UNREACHABLE(); +} + +// [mdspan.sub.map.left] +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __can_layout_left() +{ + // [mdspan.sub.map.left-1.2] + if constexpr (_SubExtents::rank() == 0) + { + return true; + } + // [mdspan.sub.map.left-1.3.2] + else if constexpr (sizeof...(_OtherSlices) == 0) + { + return _CUDA_VSTD::__is_unit_stride_slice<_LayoutMapping, _Slice>(); + } + // [mdspan.sub.map.left-1.3.1] + else if constexpr (_CCCL_TRAIT(is_convertible, _Slice, full_extent_t)) + { + return _CUDA_VSTD::__can_layout_left<_LayoutMapping, _SubExtents, _OtherSlices...>(); + } + else + { + return false; + } + _CCCL_UNREACHABLE(); +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto +__submdspan_mapping_impl(const typename layout_left::mapping<_Extents>& __mapping, _Slices... __slices) +{ + // [mdspan.sub.map.left-1.1] + if constexpr (_Extents::rank() == 0) + { + return submdspan_mapping_result{__mapping, 0}; + } + + // [mdspan.sub.map.left-1.2] + // [mdspan.sub.map.left-1.3] + using _SubExtents = __get_subextents_t<_Extents, _Slices...>; + const auto __sub_ext = _CUDA_VSTD::submdspan_extents(__mapping.extents(), __slices...); + const auto __offset = _CUDA_VSTD::__submdspan_offset(__mapping, __slices...); + if constexpr (_CUDA_VSTD::__can_layout_left, _SubExtents, _Slices...>()) + { + return submdspan_mapping_result>{layout_left::mapping{__sub_ext}, __offset}; + } + // [mdspan.sub.map.left-1.4] + // TODO: Implement padded layouts + else + { + // [mdspan.sub.map.left-1.5] + const auto __sub_strides = _CUDA_VSTD::__submdspan_strides(__mapping, __slices...); + return submdspan_mapping_result>{ + layout_stride::mapping{__sub_ext, __sub_strides}, __offset}; + } + _CCCL_UNREACHABLE(); +} + +template +_LIBCUDACXX_HIDE_FROM_ABI constexpr bool __can_layout_right() +{ + // [mdspan.sub.map.right-1.2] + if constexpr (_SubExtents::rank() == 0) + { + return true; + } + // [mdspan.sub.map.right-1.3.2] + else if constexpr (sizeof...(_OtherSlices) == 0) + { + return _CUDA_VSTD::__is_unit_stride_slice<_LayoutMapping, _Slice>(); + } + // [mdspan.sub.map.right-1.3.1] + else if constexpr (_CCCL_TRAIT(is_convertible, _Slice, full_extent_t)) + { + return _CUDA_VSTD::__can_layout_left<_LayoutMapping, _SubExtents, _OtherSlices...>(); + } + else + { + return false; + } + _CCCL_UNREACHABLE(); +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto +__submdspan_mapping_impl(const typename layout_right::mapping<_Extents>& __mapping, _Slices... __slices) +{ + // [mdspan.sub.map.right-1.1] + if constexpr (_Extents::rank() == 0) + { + return submdspan_mapping_result{__mapping, 0}; + } + else + { + // [mdspan.sub.map.right-1.2] + // [mdspan.sub.map.right-1.3] + using _SubExtents = __get_subextents_t<_Extents, _Slices...>; + const auto __sub_ext = _CUDA_VSTD::submdspan_extents(__mapping.extents(), __slices...); + const auto __offset = _CUDA_VSTD::__submdspan_offset(__mapping, __slices...); + if constexpr (_CUDA_VSTD::__can_layout_right, _SubExtents, _Slices...>()) + { + return submdspan_mapping_result>{layout_right::mapping{__sub_ext}, __offset}; + } + // [mdspan.sub.map.right-1.4] + // TODO: Implement padded layouts + else + { + // [mdspan.sub.map.right-1.5] + const auto __sub_strides = _CUDA_VSTD::__submdspan_strides(__mapping, __slices...); + return submdspan_mapping_result>{ + layout_stride::mapping{__sub_ext, __sub_strides}, __offset}; + } + } + _CCCL_UNREACHABLE(); +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto +__submdspan_mapping_impl(const typename layout_stride::mapping<_Extents>& __mapping, _Slices... __slices) +{ + // [mdspan.sub.map.stride-1.1] + if constexpr (_Extents::rank() == 0) + { + return submdspan_mapping_result{__mapping, 0}; + } + else + { + // [mdspan.sub.map.stride-1.2] + using _SubExtents = __get_subextents_t<_Extents, _Slices...>; + const auto __sub_ext = _CUDA_VSTD::submdspan_extents(__mapping.extents(), __slices...); + const auto __offset = _CUDA_VSTD::__submdspan_offset(__mapping, __slices...); + const auto __sub_strides = _CUDA_VSTD::__submdspan_strides(__mapping, __slices...); + return submdspan_mapping_result{layout_stride::mapping{__sub_ext, __sub_strides}, __offset}; + } +} + +template +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto +submdspan_mapping(const _LayoutMapping& __mapping, _Slices... __slices) +{ + return _CUDA_VSTD::__submdspan_mapping_impl(__mapping, __slices...); +} + +_CCCL_TEMPLATE(class _Tp, class _Extents, class _Layout, class _Accessor, class... _Slices) +_CCCL_REQUIRES(true) +_CCCL_NODISCARD _LIBCUDACXX_HIDE_FROM_ABI constexpr auto +submdspan(const mdspan<_Tp, _Extents, _Layout, _Accessor>& __src, _Slices... __slices) +{ + auto __sub_map_result = _CUDA_VSTD::submdspan_mapping(__src.mapping(), __slices...); + return mdspan(__src.accessor().offset(__src.data_handle(), __sub_map_result.offset), + __sub_map_result.mapping, + typename _Accessor::offset_policy(__src.accessor())); +} + +_LIBCUDACXX_END_NAMESPACE_STD + +#endif // _CCCL_STD_VER >= 2014 + +#endif // _LIBCUDACXX___MDSPAN_SUBMDSPAN_MAPPING_H diff --git a/libcudacxx/include/cuda/std/mdspan b/libcudacxx/include/cuda/std/mdspan index 75e4b8f3705..ac75b2ac700 100644 --- a/libcudacxx/include/cuda/std/mdspan +++ b/libcudacxx/include/cuda/std/mdspan @@ -33,6 +33,7 @@ _CCCL_PUSH_MACROS #include #include #include +#include #include _CCCL_POP_MACROS diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/helper.h b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/helper.h new file mode 100644 index 00000000000..7d09d8eed44 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/helper.h @@ -0,0 +1,55 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +#ifndef TEST_STD_CONTAINERS_VIEWS_MDSPAN_SUBMDSPAN_HELPER_H +#define TEST_STD_CONTAINERS_VIEWS_MDSPAN_SUBMDSPAN_HELPER_H + +#include +#include + +_CCCL_TEMPLATE(class MDSpan) +_CCCL_REQUIRES((MDSpan::rank() == 0)) +__host__ __device__ constexpr bool equal_to(const MDSpan& mdspan, const char* expected) +{ + return mdspan[cuda::std::array{}] == expected[0]; +} + +_CCCL_TEMPLATE(class MDSpan) +_CCCL_REQUIRES((MDSpan::rank() == 1)) +__host__ __device__ constexpr bool equal_to(const MDSpan& mdspan, const char* expected) +{ + for (size_t i = 0; i != mdspan.size(); ++i) + { + if (mdspan[i] != expected[i]) + { + return false; + } + } + return true; +} + +_CCCL_TEMPLATE(class MDSpan) +_CCCL_REQUIRES((MDSpan::rank() == 2)) +__host__ __device__ constexpr bool equal_to(const MDSpan& mdspan, cuda::std::array expected) +{ + for (size_t i = 0; i != mdspan.extent(0); ++i) + { + for (size_t j = 0; j != mdspan.extent(1); ++j) + { + const cuda::std::array indices{i, j}; + if (mdspan[indices] != expected[i][j]) + { + return false; + } + } + } + return true; +} + +#endif // TEST_STD_CONTAINERS_VIEWS_MDSPAN_SUBMDSPAN_HELPER_H diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/layout_left.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/layout_left.pass.cpp new file mode 100644 index 00000000000..805dfd96592 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/layout_left.pass.cpp @@ -0,0 +1,413 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11, c++14 + +// + +// constexpr mdspan& operator=(const mdspan& rhs) = default; + +#include +#include +#include + +#include "helper.h" +#include "test_macros.h" + +__host__ __device__ constexpr bool test() +{ + constexpr char data[] = {'H', 'O', 'P', 'P', 'E', 'R'}; + + { // 1d mdspan + // ['H', 'O', 'P', 'P', 'E', 'R'] + cuda::std::mdspan md{data, cuda::std::layout_left::mapping{cuda::std::dims<1>{6}}}; + static_assert(md.rank() == 1); + static_assert(md.rank_dynamic() == 1); + assert(equal_to(md, "HOPPER")); + + using mdspan_t = decltype(md); + static_assert(cuda::std::is_same_v); + + { // full_extent + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x x x ] + cuda::std::mdspan sub = cuda::std::submdspan(md, cuda::std::full_extent); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 6); + assert(sub.size() == 6); + assert(equal_to(sub, "HOPPER")); + } + + { // Slice of elements from start 0:4 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x ] + const auto slice = cuda::std::pair{0, 4}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 4); + assert(sub.size() == 4); + assert(equal_to(sub, "HOPP")); + } + + { // Slice of elements in the middle 2:5 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x ] + const auto slice = cuda::std::pair{2, 5}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 3); + assert(sub.size() == 3); + assert(equal_to(sub, "PPE")); + } + + { // Slice of elements in the end 3:6 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x ] + const auto slice = cuda::std::pair{3, 6}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 3); + assert(sub.size() == 3); + assert(equal_to(sub, "PER")); + } + + { // Slice of elements with strided slice without offset, full size and stride 1 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x x x ] offset + // [ x x x x x x ] size + // [ x x x x x x ] stride + // [ x x x x x x ] + const cuda::std::strided_slice slice{0, md.extent(0), 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 6); + assert(sub.size() == 6); + assert(equal_to(sub, "HOPPER")); + } + + { // Slice of elements with strided slice with offset, full remaing size and stride 1 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x ] offset + // [ x x x x ] size + // [ x x x x ] stride + // [ x x x x ] + const cuda::std::strided_slice slice{2, md.extent(0) - 2, 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 4); + assert(sub.size() == 4); + assert(equal_to(sub, "PPER")); + } + + { // Slice of elements with strided slice with offset, smaller size and stride 1 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x ] offset + // [ x x ] size + // [ x x ] stride + // [ x x ] + const cuda::std::strided_slice slice{2, md.extent(0) - 4, 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 2); + assert(sub.size() == 2); + assert(equal_to(sub, "PP")); + } + + { // Slice of elements with strided slice without offset, full size and stride 3 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x x x ] offset + // [ x x x x x x ] size + // [ x x ] stride + // [ x x ] + const cuda::std::strided_slice slice{0, md.extent(0), 3}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 3); + assert(sub.extent(0) == 2); + assert(sub.size() == 2); + assert(equal_to(sub, "HP")); + } + + { // Slice of elements with strided slice with offset, full size and stride 3 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x x ] offset + // [ x x x x x ] size + // [ x x ] stride + // [ x x ] + const cuda::std::strided_slice slice{1, md.extent(0) - 1, 3}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 3); + assert(sub.extent(0) == 2); + assert(sub.size() == 2); + assert(equal_to(sub, "OE")); + } + + { // Slice of elements with strided slice with offset, size less equal than stride + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x x ] offset + // [ x x x ] size + // [ x ] stride + // [ x ] + const cuda::std::strided_slice slice{1, 3, 3}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 1); + assert(sub.size() == 1); + assert(equal_to(sub, "O")); + } + + { // Single element, with integral constant + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x ] + const auto slice = cuda::std::integral_constant{}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 0); + static_assert(sub.rank_dynamic() == 0); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.size() == 1); + assert(equal_to(sub, "P")); + } + } + + { // 2d mdspan + // ['H', 'P', 'E'] + // ['O', 'P', 'R'] + cuda::std::mdspan md{data, cuda::std::layout_left::mapping{cuda::std::dims<2>{2, 3}}}; + static_assert(md.rank() == 2); + static_assert(md.rank_dynamic() == 2); + + assert(md.stride(0) == 1); + assert(md.stride(1) == md.extent(0)); + assert(md.extent(0) == 2); + assert(md.extent(1) == 3); + assert(md.size() == 6); + assert(equal_to(md, {"HPE", "OPR"})); + + { // full_extent + // ['H', 'P', 'E'] [ x ] [ x x x ] + // ['O', 'P', 'R'] [ x ] [ x x x ] + cuda::std::mdspan sub = cuda::std::submdspan(md, cuda::std::full_extent, cuda::std::full_extent); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.stride(1) == md.extent(0)); + assert(sub.extent(0) == md.extent(0)); + assert(sub.extent(1) == md.extent(1)); + assert(sub.size() == md.size()); + assert(equal_to(sub, {"HPE", "OPR"})); + } + + { // full extent, then slice of elements from start 0:2 + // ['H', 'P', 'E'] [ x ] [ x x ] + // ['O', 'P', 'R'] [ x ] [ x x ] + const auto slice2 = cuda::std::pair{0, 2}; + cuda::std::mdspan sub = cuda::std::submdspan(md, cuda::std::full_extent, slice2); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.stride(1) == md.stride(1)); + assert(sub.extent(0) == md.extent(0)); + assert(sub.extent(1) == 2); + assert(sub.size() == 4); + assert(equal_to(sub, {"HP", "OP"})); + } + + { // Slice of elements from start 0:1, then full extent + // ['H', 'P', 'E'] [ x ] [ x x x ] + // ['O', 'P', 'R'] [ ] [ ] + const auto slice1 = cuda::std::pair{0, 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice1, cuda::std::full_extent); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.stride(1) == md.stride(1)); + assert(sub.extent(0) == 1); + assert(sub.extent(1) == md.extent(1)); + assert(sub.size() == 3); + assert(equal_to(sub, {"HPE", ""})); + } + + { // Slice of elements from middle 1:2, then strided_slice without offset, full size and stride 1 + // ['H', 'P', 'E'] [ ] [ ] + // ['O', 'P', 'R'] [ x ] [ x x x ] + const auto slice1 = cuda::std::pair{1, 2}; + const cuda::std::strided_slice slice2{0, md.extent(1), 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice1, slice2); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.stride(1) == md.stride(1)); + assert(sub.extent(0) == 1); + assert(sub.extent(1) == md.extent(1)); + assert(sub.size() == 3); + assert(equal_to(sub, {"OPR", ""})); + } + + { // Slice of elements from middle 1:2, then strided_slice with offset, full size and stride 1 + // ['H', 'P', 'E'] [ ] [ ] + // ['O', 'P', 'R'] [ x ] [ x x ] + const auto slice1 = cuda::std::pair{1, 2}; + const cuda::std::strided_slice slice2{1, md.extent(1) - 1, 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice1, slice2); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.stride(1) == md.stride(1)); + assert(sub.extent(0) == 1); + assert(sub.extent(1) == md.extent(1) - 1); + assert(sub.size() == 2); + assert(equal_to(sub, {"PR", ""})); + } + + { // Slice of elements from middle 1:2, then strided_slice without offset, full size and stride 2 + // ['H', 'P', 'E'] [ ] [ ] + // ['O', 'P', 'R'] [ x ] [ x x ] + const auto slice1 = cuda::std::pair{1, 2}; + const cuda::std::strided_slice slice2{0, md.extent(1), 2}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice1, slice2); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.stride(1) == 2 * md.stride(1)); + assert(sub.extent(0) == 1); + assert(sub.extent(1) == md.extent(1) - 1); + assert(sub.size() == 2); + assert(equal_to(sub, {"OR", ""})); + } + + { // Slice of elements from middle 1:2, then index + // ['H', 'P', 'E'] [ ] [ ] + // ['O', 'P', 'R'] [ x ] [ x ] + const auto slice1 = cuda::std::pair{1, 2}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice1, 2); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.extent(0) == 1); + assert(sub.size() == 1); + assert(equal_to(sub, {"R"})); + } + } + return true; +} + +int main(int, char**) +{ + test(); + static_assert(test(), ""); + return 0; +} diff --git a/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/layout_right.pass.cpp b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/layout_right.pass.cpp new file mode 100644 index 00000000000..60839ae9e82 --- /dev/null +++ b/libcudacxx/test/libcudacxx/std/containers/views/mdspan/submdspan/layout_right.pass.cpp @@ -0,0 +1,414 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: c++11, c++14 + +// + +// constexpr mdspan& operator=(const mdspan& rhs) = default; + +#include +#include +#include + +#include "helper.h" +#include "test_macros.h" + +__host__ __device__ constexpr bool test() +{ + constexpr char data[] = {'H', 'O', 'P', 'P', 'E', 'R'}; + + { // 1d mdspan + // ['H', 'O', 'P', 'P', 'E', 'R'] + cuda::std::mdspan md{data, cuda::std::layout_right::mapping{cuda::std::dims<1>{6}}}; + static_assert(md.rank() == 1); + static_assert(md.rank_dynamic() == 1); + assert(equal_to(md, "HOPPER")); + + using mdspan_t = decltype(md); + static_assert(cuda::std::is_same_v); + + { // full_extent + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x x x ] + cuda::std::mdspan sub = cuda::std::submdspan(md, cuda::std::full_extent); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 6); + assert(sub.size() == 6); + assert(equal_to(sub, "HOPPER")); + } + + { // Slice of elements from start 0:4 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x ] + const auto slice = cuda::std::pair{0, 4}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 4); + assert(sub.size() == 4); + assert(equal_to(sub, "HOPP")); + } + + { // Slice of elements in the middle 2:5 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x ] + const auto slice = cuda::std::pair{2, 5}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 3); + assert(sub.size() == 3); + assert(equal_to(sub, "PPE")); + } + + { // Slice of elements in the end 3:6 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x ] + const auto slice = cuda::std::pair{3, 6}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 3); + assert(sub.size() == 3); + assert(equal_to(sub, "PER")); + } + + { // Slice of elements with strided slice without offset, full size and stride 1 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x x x ] offset + // [ x x x x x x ] size + // [ x x x x x x ] stride + // [ x x x x x x ] + const cuda::std::strided_slice slice{0, md.extent(0), 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 6); + assert(sub.size() == 6); + assert(equal_to(sub, "HOPPER")); + } + + { // Slice of elements with strided slice with offset, full remaing size and stride 1 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x ] offset + // [ x x x x ] size + // [ x x x x ] stride + // [ x x x x ] + const cuda::std::strided_slice slice{2, md.extent(0) - 2, 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 4); + assert(sub.size() == 4); + assert(equal_to(sub, "PPER")); + } + + { // Slice of elements with strided slice with offset, smaller size and stride 1 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x ] offset + // [ x x ] size + // [ x x ] stride + // [ x x ] + const cuda::std::strided_slice slice{2, md.extent(0) - 4, 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 2); + assert(sub.size() == 2); + assert(equal_to(sub, "PP")); + } + + { // Slice of elements with strided slice without offset, full size and stride 3 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x x x ] offset + // [ x x x x x x ] size + // [ x x ] stride + // [ x x ] + const cuda::std::strided_slice slice{0, md.extent(0), 3}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 3); + assert(sub.extent(0) == 2); + assert(sub.size() == 2); + assert(equal_to(sub, "HP")); + } + + { // Slice of elements with strided slice with offset, full size and stride 3 + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x x ] offset + // [ x x x x x ] size + // [ x x ] stride + // [ x x ] + const cuda::std::strided_slice slice{1, md.extent(0) - 1, 3}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 3); + assert(sub.extent(0) == 2); + assert(sub.size() == 2); + assert(equal_to(sub, "OE")); + } + + { // Slice of elements with strided slice with offset, size less equal than stride + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x x x x x ] offset + // [ x x x ] size + // [ x ] stride + // [ x ] + const cuda::std::strided_slice slice{1, 3, 3}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.extent(0) == 1); + assert(sub.size() == 1); + assert(equal_to(sub, "O")); + } + + { // Single element, with integral constant + // ['H', 'O', 'P', 'P', 'E', 'R'] + // [ x ] + const auto slice = cuda::std::integral_constant{}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice); + + static_assert(sub.rank() == 0); + static_assert(sub.rank_dynamic() == 0); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.size() == 1); + assert(equal_to(sub, "P")); + } + } + + { // 2d mdspan + // ['H', 'O', 'P'] + // ['P', 'E', 'R'] + cuda::std::mdspan md{data, cuda::std::layout_right::mapping{cuda::std::dims<2>{2, 3}}}; + static_assert(md.rank() == 2); + static_assert(md.rank_dynamic() == 2); + + assert(md.stride(0) == md.extent(1)); + assert(md.stride(1) == 1); + assert(md.extent(0) == 2); + assert(md.extent(1) == 3); + assert(md.size() == 6); + assert(equal_to(md, {"HOP", "PER"})); + + { // full_extent + // ['H', 'O', 'P'] [ x ] [ x x x ] + // ['P', 'E', 'R'] [ x ] [ x x x ] + cuda::std::mdspan sub = cuda::std::submdspan(md, cuda::std::full_extent, cuda::std::full_extent); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.extent(1)); + assert(sub.stride(1) == 1); + assert(sub.extent(0) == md.extent(0)); + assert(sub.extent(1) == md.extent(1)); + assert(sub.size() == md.size()); + assert(equal_to(sub, {"HOP", "PER"})); + } + + { // full extent, then slice of elements from start 0:1 + // ['H', 'O', 'P'] [ x ] [ x ] + // ['P', 'E', 'R'] [ x ] [ x ] + const auto slice2 = cuda::std::pair{0, 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, cuda::std::full_extent, slice2); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == 1); + assert(sub.stride(1) == md.stride(1)); + assert(sub.extent(0) == md.extent(0)); + assert(sub.extent(1) == 1); + assert(sub.size() == 2); + assert(equal_to(sub, {"H", "O"})); + } + + { // Slice of elements from start 1:2, then full extent + // ['H', 'O', 'P'] [ ] [ ] + // ['P', 'E', 'R'] [ x ] [ x x x ] + const auto slice1 = cuda::std::pair{1, 2}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice1, cuda::std::full_extent); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.stride(1) == md.stride(1)); + assert(sub.extent(0) == 1); + assert(sub.extent(1) == md.extent(1)); + assert(sub.size() == 3); + assert(equal_to(sub, {"PER", ""})); + } + + { // Slice of elements from middle 1:2, then strided_slice without offset, full size and stride 1 + // ['H', 'O', 'P'] [ ] [ ] + // ['P', 'E', 'R'] [ x ] [ x x x ] + const auto slice1 = cuda::std::pair{1, 2}; + const cuda::std::strided_slice slice2{0, md.extent(1), 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice1, slice2); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.stride(1) == md.stride(1)); + assert(sub.extent(0) == 1); + assert(sub.extent(1) == md.extent(1)); + assert(sub.size() == 3); + assert(equal_to(sub, {"PER", ""})); + } + + { // Slice of elements from middle 1:2, then strided_slice with offset, full size and stride 1 + // ['H', 'O', 'P'] [ ] [ ] + // ['P', 'E', 'R'] [ x ] [ x x ] + const auto slice1 = cuda::std::pair{1, 2}; + const cuda::std::strided_slice slice2{1, md.extent(1) - 1, 1}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice1, slice2); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.stride(1) == md.stride(1)); + assert(sub.extent(0) == 1); + assert(sub.extent(1) == md.extent(1) - 1); + assert(sub.size() == 2); + assert(equal_to(sub, {"ER", ""})); + } + + { // Slice of elements from middle 1:2, then strided_slice without offset, full size and stride 2 + // ['H', 'O', 'P'] [ ] [ ] + // ['P', 'E', 'R'] [ x ] [ x ] + const auto slice1 = cuda::std::pair{1, 2}; + const cuda::std::strided_slice slice2{0, 2, 2}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice1, slice2); + + static_assert(sub.rank() == 2); + static_assert(sub.rank_dynamic() == 2); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.stride(1) == 1); + assert(sub.extent(0) == 1); + assert(sub.extent(1) == 1); + assert(sub.size() == 1); + assert(equal_to(sub, {"P", ""})); + } + + { // Slice of elements from middle 1:2, then index + // ['H', 'O', 'P'] [ ] [ ] + // ['P', 'E', 'R'] [ x ] [ x ] + const auto slice1 = cuda::std::pair{1, 2}; + cuda::std::mdspan sub = cuda::std::submdspan(md, slice1, 2); + + static_assert(sub.rank() == 1); + static_assert(sub.rank_dynamic() == 1); + + using submdspan_t = decltype(sub); + static_assert(cuda::std::is_same_v); + + assert(sub.stride(0) == md.stride(0)); + assert(sub.extent(0) == 1); + assert(sub.size() == 1); + assert(equal_to(sub, {"R"})); + } + } + + return true; +} + +int main(int, char**) +{ + test(); + // static_assert(test(), ""); + return 0; +}