Skip to content

Commit

Permalink
Revert "Replace shape size_t with int64"
Browse files Browse the repository at this point in the history
This reverts commit 6740ea6.
  • Loading branch information
ccummingsNV committed Jan 22, 2025
1 parent 6740ea6 commit 7b516ee
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 37 deletions.
28 changes: 14 additions & 14 deletions src/sgl/utils/python/slangpy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ void NativeMarshall::store_readback(
read_back.append(nb::make_tuple(binding, value, data));
}

void NativeBoundVariableRuntime::populate_call_shape(std::vector<int64_t>& call_shape, nb::object value)
void NativeBoundVariableRuntime::populate_call_shape(std::vector<size_t>& call_shape, nb::object value)
{
if (m_children) {
// We have children, so load each child value and recurse down the tree.
Expand All @@ -70,7 +70,7 @@ void NativeBoundVariableRuntime::populate_call_shape(std::vector<int64_t>& call_

// Read the transform and call shape size.
auto tf = m_transform.as_vector();
int64_t csl = call_shape.size();
size_t csl = call_shape.size();

// Get the shape of the value. In the case of none-concrete types,
// only the container shape is needed, as we never map elements.
Expand All @@ -82,8 +82,8 @@ void NativeBoundVariableRuntime::populate_call_shape(std::vector<int64_t>& call_
// Apply this shape to the overall call shape.
auto shape = m_shape.as_vector();
for (size_t i = 0; i < tf.size(); ++i) {
int64_t shape_dim = shape[i];
int64_t call_idx = tf[i];
size_t shape_dim = shape[i];
size_t call_idx = tf[i];

// If the call index loaded from the transform is
// out of bounds, this dimension is a sub-element index,
Expand All @@ -97,7 +97,7 @@ void NativeBoundVariableRuntime::populate_call_shape(std::vector<int64_t>& call_
//- if current call shape == 1, shape_dim != 1, call is expanded
//- if current call shape != 1, shape_dim == 1, shape is broadcast
//- if current call shape != 1, shape_dim != 1, it's a mismatch
int64_t& cs = call_shape[call_idx];
size_t& cs = call_shape[call_idx];
if (cs != shape_dim) {
if (cs != 1 && shape_dim != 1) {
throw NativeBoundVariableException(
Expand Down Expand Up @@ -215,7 +215,7 @@ nb::object NativeBoundVariableRuntime::read_output(CallContext* context, nb::obj
Shape NativeBoundCallRuntime::calculate_call_shape(int call_dimensionality, nb::list args, nb::dict kwargs)
{
// Setup initial call shape of correct dimensionality, with all dimensions set to 1.
std::vector<int64_t> call_shape(call_dimensionality, 1);
std::vector<size_t> call_shape(call_dimensionality, 1);

// Populate call shape for each positional argument.
for (size_t idx = 0; idx < args.size(); ++idx) {
Expand Down Expand Up @@ -806,19 +806,19 @@ SGL_PY_EXPORT(utils_slangpy)
[](Shape& self, nb::args args)
{
if (args.size() == 0) {
new (&self) Shape(std::vector<int64_t>());
new (&self) Shape(std::vector<size_t>());
} else if (args.size() == 1) {
if (args[0].is_none()) {
new (&self) Shape(std::nullopt);
} else if (nb::isinstance<nb::tuple>(args[0])) {
new (&self) Shape(nb::cast<std::vector<int64_t>>(args[0]));
new (&self) Shape(nb::cast<std::vector<size_t>>(args[0]));
} else if (nb::isinstance<Shape>(args[0])) {
new (&self) Shape(nb::cast<Shape>(args[0]));
} else {
new (&self) Shape(nb::cast<std::vector<int64_t>>(args));
new (&self) Shape(nb::cast<std::vector<size_t>>(args));
}
} else {
new (&self) Shape(nb::cast<std::vector<int64_t>>(args));
new (&self) Shape(nb::cast<std::vector<size_t>>(args));
}
},
"args"_a,
Expand All @@ -832,7 +832,7 @@ SGL_PY_EXPORT(utils_slangpy)
)
.def(
"__getitem__",
[](const Shape& self, int64_t i) -> int64_t
[](const Shape& self, size_t i) -> size_t
{
if (i >= self.size())
throw nb::index_error(); // throwing index_error allows this to be used as a python iterator
Expand All @@ -848,9 +848,9 @@ SGL_PY_EXPORT(utils_slangpy)
"as_tuple",
[](Shape& self)
{
std::vector<int64_t>& v = self.as_vector();
std::vector<size_t>& v = self.as_vector();
nb::list py_list;
for (const int64_t& item : v) {
for (const size_t& item : v) {
py_list.append(item);
}
return nb::tuple(py_list);
Expand All @@ -873,7 +873,7 @@ SGL_PY_EXPORT(utils_slangpy)
return self.as_vector() == nb::cast<Shape>(other).as_vector();
}

std::vector<int64_t> v;
std::vector<size_t> v;
if (nb::try_cast(other, v)) {
return self.as_vector() == v;
}
Expand Down
2 changes: 1 addition & 1 deletion src/sgl/utils/python/slangpy.h
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ class NativeBoundVariableRuntime : public Object {
void set_call_dimensionality(int call_dimensionality) { m_call_dimensionality = call_dimensionality; }

/// Recursively populate the overall kernel call shape.
void populate_call_shape(std::vector<int64_t>& call_shape, nb::object value);
void populate_call_shape(std::vector<size_t>& call_shape, nb::object value);

/// Write call data to shader cursor before dispatch, optionally writing data for read back after the kernel has
/// run.
Expand Down
20 changes: 10 additions & 10 deletions src/sgl/utils/python/slangpybuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,16 +60,16 @@ void NativeNDBufferMarshall::write_shader_cursor_pre_dispatch(
field["buffer"] = buffer->storage();

// Write shape vector as an array of ints.
std::vector<int64_t> shape_vec = buffer->shape().as_vector();
std::vector<size_t> shape_vec = buffer->shape().as_vector();
field["shape"]._set_array(&shape_vec[0], shape_vec.size() * 4, TypeReflection::ScalarType::int32, shape_vec.size());

// Generate and write strides vector, clearing strides to 0
// for dimensions that are broadcast.
std::vector<int64_t> strides_vec = buffer->strides().as_vector();
std::vector<int64_t> transform = binding->get_transform().as_vector();
std::vector<int64_t> call_shape = context->call_shape().as_vector();
std::vector<size_t> strides_vec = buffer->strides().as_vector();
std::vector<size_t> transform = binding->get_transform().as_vector();
std::vector<size_t> call_shape = context->call_shape().as_vector();
for (size_t i = 0; i < transform.size(); i++) {
int64_t csidx = transform[i];
size_t csidx = transform[i];
if (call_shape[csidx] != shape_vec[i]) {
strides_vec[i] = 0;
}
Expand Down Expand Up @@ -134,7 +134,7 @@ NativeNDBufferMarshall::read_output(CallContext* context, NativeBoundVariableRun
Shape NativeNumpyMarshall::get_shape(nb::object data) const
{
auto ndarray = nb::cast<nb::ndarray<nb::numpy>>(data);
std::vector<int64_t> shape_vec;
std::vector<size_t> shape_vec;
for (size_t i = 0; i < ndarray.ndim(); i++) {
shape_vec.push_back(ndarray.shape(i));
}
Expand All @@ -152,15 +152,15 @@ void NativeNumpyMarshall::write_shader_cursor_pre_dispatch(
auto ndarray = nb::cast<nb::ndarray<nb::numpy>>(value);
SGL_CHECK(ndarray.dtype() == m_dtype, "numpy array dtype does not match the expected dtype");

std::vector<int64_t> shape_vec;
std::vector<size_t> shape_vec;
for (size_t i = 0; i < ndarray.ndim(); i++) {
shape_vec.push_back(ndarray.shape(i));
}

std::vector<int64_t> vector_shape = binding->get_vector_type()->get_shape().as_vector();
std::vector<size_t> vector_shape = binding->get_vector_type()->get_shape().as_vector();
for (size_t i = 0; i < vector_shape.size(); i++) {
int64_t vs_size = vector_shape[vector_shape.size() - i - 1];
int64_t arr_size = shape_vec[shape_vec.size() - i - 1];
size_t vs_size = vector_shape[vector_shape.size() - i - 1];
size_t arr_size = shape_vec[shape_vec.size() - i - 1];

SGL_CHECK(
vs_size == arr_size,
Expand Down
24 changes: 12 additions & 12 deletions src/sgl/utils/slangpy.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ class SGL_API Shape {
Shape() = default;

/// Constructor from optional 'tuple'.
Shape(const std::optional<std::vector<int64_t>>& shape)
Shape(const std::optional<std::vector<size_t>>& shape)
: m_shape(shape)
{
}
Expand All @@ -70,7 +70,7 @@ class SGL_API Shape {
{
auto& this_vec = as_vector();
auto& other_vec = other.as_vector();
std::vector<int64_t> combined = this_vec;
std::vector<size_t> combined = this_vec;
combined.insert(combined.end(), other_vec.begin(), other_vec.end());
return Shape(combined);
}
Expand All @@ -83,11 +83,11 @@ class SGL_API Shape {
}

/// Indexers.
int64_t operator[](int64_t i) const { return as_vector()[i]; }
int64_t& operator[](int64_t i) { return as_vector()[i]; }
size_t operator[](size_t i) const { return as_vector()[i]; }
size_t& operator[](size_t i) { return as_vector()[i]; }

/// Access to internal vector.
std::vector<int64_t>& as_vector()
std::vector<size_t>& as_vector()
{
if (!m_shape) {
SGL_THROW("Shape is invalid");
Expand All @@ -96,7 +96,7 @@ class SGL_API Shape {
}

/// Const access to internal vector.
const std::vector<int64_t>& as_vector() const
const std::vector<size_t>& as_vector() const
{
if (!m_shape) {
SGL_THROW("Shape is invalid");
Expand All @@ -108,7 +108,7 @@ class SGL_API Shape {
bool valid() const { return m_shape.has_value(); }

/// Get size (i.e. number of dimensions) of shape.
int64_t size() const { return as_vector().size(); }
size_t size() const { return as_vector().size(); }

/// Check if concrete shape (no dimensions are -1).
bool concrete() const
Expand All @@ -131,9 +131,9 @@ class SGL_API Shape {
}

/// Total element count (if this represented contiguous array)
int64_t element_count() const
size_t element_count() const
{
int64_t result = 1;
size_t result = 1;
for (auto dim : as_vector()) {
result *= dim;
}
Expand All @@ -145,8 +145,8 @@ class SGL_API Shape {
{
if (valid()) {
auto& shape = as_vector();
int64_t total = 1;
std::vector<int64_t> strides(shape.size(), 1);
size_t total = 1;
std::vector<size_t> strides(shape.size(), 1);
for (int i = (int)shape.size() - 1; i >= 0; --i) {
strides[i] = total;
total *= shape[i];
Expand All @@ -158,7 +158,7 @@ class SGL_API Shape {
}

private:
std::optional<std::vector<int64_t>> m_shape;
std::optional<std::vector<size_t>> m_shape;
};

class SGL_API CallContext : Object {
Expand Down

0 comments on commit 7b516ee

Please sign in to comment.