Skip to content

Commit

Permalink
Enable ResizeBilinear and ResizeNearestNeighbor ops
Browse files Browse the repository at this point in the history
Tracked-On:
Signed-off-by: Ratnesh Kumar Rai <[email protected]>
  • Loading branch information
rairatne committed May 29, 2023
1 parent 92708d3 commit 3f389cd
Show file tree
Hide file tree
Showing 5 changed files with 48 additions and 38 deletions.
10 changes: 10 additions & 0 deletions BasePreparedModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,11 @@ bool BasePreparedModel::initialize() {
auto& nnapiOperandType = mModelInfo->getOperand(i).type;
switch (nnapiOperandType) {
case OperandType::FLOAT32:
case OperandType::FLOAT16:
case OperandType::TENSOR_FLOAT32:
case OperandType::TENSOR_FLOAT16:
case OperandType::TENSOR_INT32:
case OperandType::INT32:
break;
default :
ALOGD("GRPC Remote Infer not enabled for %d", nnapiOperandType);
Expand Down Expand Up @@ -410,6 +412,11 @@ void asyncExecute(const Request& request, MeasureTiming measure, BasePreparedMod
} else {
returned = notify(callback, ErrorStatus::NONE, modelInfo->getOutputShapes(), kNoTiming);
}

if (!modelInfo->unmapRuntimeMemPools()) {
ALOGE("Failed to unmap the request pool infos");
}

if (!returned.isOk()) {
ALOGE("hidl callback failed to return properly: %s", returned.description().c_str());
}
Expand Down Expand Up @@ -639,6 +646,9 @@ static std::tuple<ErrorStatus, hidl_vec<V1_2::OutputShape>, Timing> executeSynch
return {ErrorStatus::NONE, modelInfo->getOutputShapes(), timing};
}
ALOGV("Exiting %s", __func__);
if (!modelInfo->unmapRuntimeMemPools()) {
ALOGE("Failed to unmap the request pool infos");
}
return {ErrorStatus::NONE, modelInfo->getOutputShapes(), kNoTiming};
}

Expand Down
2 changes: 1 addition & 1 deletion DetectionClient.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ void DetectionClient::clear_data() {

std::string DetectionClient::remote_infer() {
ClientContext context;
time_point deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(5000);
time_point deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(10000);
context.set_deadline(deadline);

request.mutable_token()->set_data(mToken);
Expand Down
3 changes: 2 additions & 1 deletion ModelManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -186,10 +186,11 @@ class NnapiModelInfo {

std::vector<V1_2::OutputShape> getOutputShapes() { return mOutputShapes; }

void unmapRuntimeMemPools() {
bool unmapRuntimeMemPools() {
for (auto& runtimeInfo : mRequestPoolInfos) {
runtimeInfo.unmap_mem();
}
return true;
}

bool isOmittedInput(int operationIndex, uint32_t index);
Expand Down
34 changes: 18 additions & 16 deletions ngraph_creator/operations/src/ResizeBilinear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,6 @@ ResizeBilinear::ResizeBilinear(int operationIndex, GraphMetadata graphMetadata )
}

bool ResizeBilinear::validate() {
// TODO Add FLOAT16 check when VPUX plugin is supported
if (!checkOutputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32) &&
!checkOutputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM)) {
ALOGE("%s check for output types failed", __func__);
return false;
}

if (!checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32) &&
!checkInputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM)) {
return false;
}

const auto& inputDimensionsSize = getInputOperandDimensions(0).size();
if (inputDimensionsSize != 4) {
ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputDimensionsSize);
Expand Down Expand Up @@ -72,10 +60,14 @@ std::shared_ptr<ov::Node> ResizeBilinear::createNode() {
input_height = inputDimensions[1];
}

if (!useNchw) inputNode = transpose(NHWC_NCHW, inputNode);
const auto& inputIndex = mOpModelInfo->getOperationInput(mNnapiOperationIndex, 0);
const auto inputOp = mOpModelInfo->getOperand(inputIndex);
if (!useNchw) { // No conversion needed if useNchw set
inputNode = transpose(NHWC_NCHW, inputNode);
}

// FLOAT16 type check added for future when VPUX plugin support is added
if (checkInputOperandType(1, (int32_t)OperandType::FLOAT32) ||
checkInputOperandType(1, (int32_t)OperandType::FLOAT16)) {
if (checkInputOperandType(1, (int32_t)OperandType::FLOAT32)) {
// In tensorflow lite, resizing by size is supported. Scaling factors are
// calculated based on output shape.
attrs.shape_calculation_mode = ov::op::v4::Interpolate::ShapeCalcMode::sizes;
Expand All @@ -87,6 +79,14 @@ std::shared_ptr<ov::Node> ResizeBilinear::createNode() {
// integer
width_scale = (float)out_width / (float)input_width;
height_scale = (float)out_height / (float)input_height;
} else if (checkInputOperandType(1, (int32_t)OperandType::FLOAT16)) {
attrs.shape_calculation_mode = ov::op::v4::Interpolate::ShapeCalcMode::sizes;
width_scale = mOpModelInfo->ParseOperationInput<_Float16>(mNnapiOperationIndex, 1);
height_scale = mOpModelInfo->ParseOperationInput<_Float16>(mNnapiOperationIndex, 2);
out_width = (int)(input_width * width_scale);
out_height = (int)(input_height * height_scale);
width_scale = (float)out_width / (float)input_width;
height_scale = (float)out_height / (float)input_height;
} else if (checkInputOperandType(1, (int32_t)OperandType::INT32)) {
attrs.shape_calculation_mode = ov::op::v4::Interpolate::ShapeCalcMode::sizes;
out_width = mOpModelInfo->ParseOperationInput<int>(mNnapiOperationIndex, 1);
Expand All @@ -109,7 +109,7 @@ std::shared_ptr<ov::Node> ResizeBilinear::createNode() {
}

// mode is passed as "linear" for bilinear interpolation
attrs.mode = ov::op::v4::Interpolate::InterpolateMode::linear;
attrs.mode = ov::op::v4::Interpolate::InterpolateMode::linear_onnx;

std::vector<int32_t> output_shape = {out_height, out_width};
auto outputShapeNode = createConstNode(ov::element::i32, {2}, output_shape);
Expand All @@ -128,12 +128,14 @@ std::shared_ptr<ov::Node> ResizeBilinear::createNode() {

outputNode = std::make_shared<ov::op::v4::Interpolate>(inputNode, outputShapeNode, scaleNode,
axesNode, attrs);
auto outputIndex = mOpModelInfo->getOperationOutput(mNnapiOperationIndex, 0);
if (!useNchw) {
outputNode = transpose(NCHW_NHWC, outputNode);
}
return outputNode;
}


} // namespace nnhal
} // namespace neuralnetworks
} // namespace hardware
Expand Down
37 changes: 17 additions & 20 deletions ngraph_creator/operations/src/ResizeNearestNeighbor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,6 @@ ResizeNearestNeighbor::ResizeNearestNeighbor(int operationIndex, GraphMetadata g
}

bool ResizeNearestNeighbor::validate() {
// TODO Add FLOAT16 check when VPUX plugin is supported
if (!checkOutputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32) &&
!checkOutputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM)) {
ALOGE("%s check for output types failed", __func__);
return false;
}

if (!checkInputOperandType(0, (int32_t)OperandType::TENSOR_FLOAT32) &&
!checkInputOperandType(0, (int32_t)OperandType::TENSOR_QUANT8_ASYMM)) {
return false;
}

const auto& inputDimensionsSize = getInputOperandDimensions(0).size();
if (inputDimensionsSize != 4) {
ALOGE("%s Invalid dimensions size for input(%lu)", __func__, inputDimensionsSize);
Expand Down Expand Up @@ -72,10 +60,12 @@ std::shared_ptr<ov::Node> ResizeNearestNeighbor::createNode() {
input_height = inputDimensions[1];
}

if (!useNchw) inputNode = transpose(NHWC_NCHW, inputNode);
// FLOAT16 type check added for future when VPUX plugin support is added
if (checkInputOperandType(1, (int32_t)OperandType::FLOAT32) ||
checkInputOperandType(1, (int32_t)OperandType::FLOAT16)) {
const auto& inputIndex = mOpModelInfo->getOperationInput(mNnapiOperationIndex, 0);
const auto inputOp = mOpModelInfo->getOperand(inputIndex);
if (!useNchw) {
inputNode = transpose(NHWC_NCHW, inputNode);
}
if (checkInputOperandType(1, (int32_t)OperandType::FLOAT32)) {
// In tensorflow lite, resizing by size is supported. Scaling factors are
// calculated based on output shape.
attrs.shape_calculation_mode = ov::op::v4::Interpolate::ShapeCalcMode::sizes;
Expand All @@ -87,6 +77,14 @@ std::shared_ptr<ov::Node> ResizeNearestNeighbor::createNode() {
// integer
width_scale = (float)out_width / (float)input_width;
height_scale = (float)out_height / (float)input_height;
} else if (checkInputOperandType(1, (int32_t)OperandType::FLOAT16)) {
attrs.shape_calculation_mode = ov::op::v4::Interpolate::ShapeCalcMode::sizes;
width_scale = mOpModelInfo->ParseOperationInput<_Float16>(mNnapiOperationIndex, 1);
height_scale = mOpModelInfo->ParseOperationInput<_Float16>(mNnapiOperationIndex, 2);
out_width = (int)(input_width * width_scale);
out_height = (int)(input_height * height_scale);
width_scale = (float)out_width / (float)input_width;
height_scale = (float)out_height / (float)input_height;
} else if (checkInputOperandType(1, (int32_t)OperandType::INT32)) {
attrs.shape_calculation_mode = ov::op::v4::Interpolate::ShapeCalcMode::sizes;
out_width = mOpModelInfo->ParseOperationInput<int>(mNnapiOperationIndex, 1);
Expand All @@ -102,15 +100,14 @@ std::shared_ptr<ov::Node> ResizeNearestNeighbor::createNode() {
attrs.coordinate_transformation_mode =
ov::op::v4::Interpolate::CoordinateTransformMode::half_pixel;
} else {
// If none of the align_corners and half_pixel are true, transformation
// If none of the align_corners and half_pixel are false, transformation
// mode is set to asymmetric
attrs.coordinate_transformation_mode =
ov::op::v4::Interpolate::CoordinateTransformMode::asymmetric;
}

// mode is passed as "nearest" for Nearest Neighbor interpolation
attrs.mode = ov::op::v4::Interpolate::InterpolateMode::nearest;
attrs.nearest_mode = ov::op::v4::Interpolate::NearestMode::floor;
// mode is passed as "linear" for bilinear interpolation
attrs.mode = ov::op::v4::Interpolate::InterpolateMode::linear_onnx;

std::vector<int32_t> output_shape = {out_height, out_width};
auto outputShapeNode = createConstNode(ov::element::i32, {2}, output_shape);
Expand Down

0 comments on commit 3f389cd

Please sign in to comment.