diff --git a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp index 5b72354abea..672131b7549 100644 --- a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp +++ b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,9 +30,9 @@ #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/utils/helpers/AdjustVecSize.h" -#include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -44,10 +44,12 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, - DataType::S32, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32, DataType::S64); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN( + input, one_channel, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, one_channel, DataType::S32, DataType::S64); ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX && op != ReductionOperation::ARG_IDX_MIN, "Only ARG_IDX_MAX and ARG_IDX_MIN are supported"); @@ -57,8 +59,13 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u if (output->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32, DataType::S64, - DataType::U64); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + } + else + { + // Assume largest possible data type since we don't know. + const auto output_info = TensorInfo(input->tensor_shape(), one_channel, DataType::S64); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); } return Status{}; diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp index c88a852a44e..53441d65918 100644 --- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -52,19 +53,24 @@ Status validate_arguments(const ITensorInfo *input, ActivationLayerInfo act_info) { ARM_COMPUTE_UNUSED(epsilon); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, mean, var); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, mean, var); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, one_channel, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var); ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(get_data_layout_dimension_index( input->data_layout(), DataLayoutDimension::CHANNEL)) != mean->dimension(0)); if (beta != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(beta); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, beta); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, beta); } if (gamma != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(gamma); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, gamma); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, gamma); } @@ -82,10 +88,16 @@ Status validate_arguments(const ITensorInfo *input, if (output != nullptr && output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp index c640b5a8d6f..9046c29c20d 100644 --- a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp +++ b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -42,16 +43,23 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *block_info, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, block_info, output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(block_info, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, block_info); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(block_info, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); // Validate output if initialized if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + // Ignored; dynamic block is deprecated. + } return Status{}; } @@ -62,6 +70,9 @@ Status validate_arguments_static(const ITensorInfo *input, const CropInfo &crop_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != one_channel); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(block_shape_x <= 0); ARM_COMPUTE_RETURN_ERROR_ON(block_shape_y <= 0); @@ -70,16 +81,23 @@ Status validate_arguments_static(const ITensorInfo *input, const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_batch] % (block_shape_x * block_shape_y) != 0); + const TensorShape expected_output_shape = compute_batch_to_space_shape(input->data_layout(), input->tensor_shape(), + block_shape_x, block_shape_y, crop_info); + // Validate output if initialized if (output->total_size() != 0) { - const TensorShape expected_output_shape = compute_batch_to_space_shape( - input->data_layout(), input->tensor_shape(), block_shape_x, block_shape_y, crop_info); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); const TensorInfo expected_output = output->clone()->set_tensor_shape(expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &expected_output); ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const auto output_info = TensorInfo(expected_output_shape, one_channel, input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLBitwiseKernel.cpp b/src/core/CL/kernels/CLBitwiseKernel.cpp index de3fb43de8c..a1a06ed4f2b 100644 --- a/src/core/CL/kernels/CLBitwiseKernel.cpp +++ b/src/core/CL/kernels/CLBitwiseKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021, 2023 Arm Limited. + * Copyright (c) 2020-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/utils/helpers/AdjustVecSize.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -49,17 +50,23 @@ void CLBitwiseKernel::configure(const CLCompileContext &compile_context, BitwiseOperation op) { ARM_COMPUTE_ERROR_ON_NULLPTR(input1); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(input1->info()); +#ifdef ARM_COMPUTE_ASSERTS_ENABLED + const size_t one_channel = 1u; +#endif + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, one_channel, DataType::U8); if (op != BitwiseOperation::NOT) { ARM_COMPUTE_ERROR_ON_NULLPTR(input2); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(input2->info()); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, one_channel, DataType::U8); } ARM_COMPUTE_ERROR_ON_NULLPTR(output); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, one_channel, DataType::U8); - // Output auto inizialitation if not yet initialized + // Output auto initialization if not yet initialized auto_init_if_empty(*(output->info()), *(input1->info())); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(output->info()); auto padding_info = get_padding_info({input1, input2, output}); // Configure kernel window diff --git a/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp b/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp index f32c518e29c..08906abf2ce 100644 --- a/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp +++ b/src/core/CL/kernels/CLBoundingBoxTransformKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -47,9 +48,13 @@ Status validate_arguments(const ITensorInfo *boxes, const BoundingBoxTransformInfo &info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(boxes, pred_boxes, deltas); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(boxes, deltas); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(boxes); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(boxes, DataType::QASYMM16, DataType::F32, DataType::F16); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(deltas, DataType::QASYMM8, DataType::F32, DataType::F16); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(boxes, one_channel, DataType::QASYMM16, DataType::F32, + DataType::F16); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(deltas, one_channel, DataType::QASYMM8, DataType::F32, + DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON(deltas->tensor_shape()[1] != boxes->tensor_shape()[1]); ARM_COMPUTE_RETURN_ERROR_ON(deltas->tensor_shape()[0] % 4 != 0); ARM_COMPUTE_RETURN_ERROR_ON(boxes->tensor_shape()[0] != 4); @@ -62,7 +67,7 @@ Status validate_arguments(const ITensorInfo *boxes, const UniformQuantizationInfo boxes_qinfo = boxes->quantization_info().uniform(); ARM_COMPUTE_RETURN_ERROR_ON(boxes_qinfo.scale != 0.125f); ARM_COMPUTE_RETURN_ERROR_ON(boxes_qinfo.offset != 0); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(deltas, DataType::QASYMM8); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(deltas, one_channel, DataType::QASYMM8); } else { @@ -71,6 +76,7 @@ Status validate_arguments(const ITensorInfo *boxes, if (pred_boxes->total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(pred_boxes); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(pred_boxes->tensor_shape(), deltas->tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(pred_boxes, boxes); ARM_COMPUTE_RETURN_ERROR_ON(pred_boxes->num_dimensions() > 2); @@ -81,6 +87,11 @@ Status validate_arguments(const ITensorInfo *boxes, ARM_COMPUTE_RETURN_ERROR_ON(pred_boxes_qinfo.offset != 0); } } + else + { + const auto pred_boxes_info = TensorInfo(deltas->tensor_shape(), one_channel, boxes->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&pred_boxes_info); + } ARM_COMPUTE_RETURN_ERROR_ON(info.scale() <= 0); return Status{}; diff --git a/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp b/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp index ec58bf9e7a1..3e5c92927bc 100644 --- a/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp +++ b/src/core/CL/kernels/CLChannelShuffleLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -43,6 +44,8 @@ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int num_groups) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_groups < 2, "Channel shuffling with less than 2 groups would be inefficient"); @@ -61,10 +64,16 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u // Checks performed when output is configured if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/CL/kernels/CLComparisonKernel.cpp b/src/core/CL/kernels/CLComparisonKernel.cpp index a0f9aca54a8..2d3f3972bab 100644 --- a/src/core/CL/kernels/CLComparisonKernel.cpp +++ b/src/core/CL/kernels/CLComparisonKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,10 +25,12 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/helpers/AdjustVecSize.h" #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -51,6 +53,7 @@ Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &output, ComparisonOperation operation) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&input1, &input2); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&input1); ARM_COMPUTE_RETURN_ERROR_ON(input1.data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &input2); @@ -59,13 +62,21 @@ Status validate_arguments(const ITensorInfo &input1, const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible"); + const size_t one_channel = 1u; + // Validate in case of configured output if (output.total_size() > 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, one_channel, DataType::U8); ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0), "Wrong shape for output"); } + else + { + const auto output_info = TensorInfo(out_shape, one_channel, DataType::U8); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp b/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp index f8ecc4c098f..998f9c426bd 100644 --- a/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp +++ b/src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" namespace arm_compute @@ -47,6 +48,7 @@ Status CLDeconvolutionLayerUpsampleKernel::validate(const ITensorInfo *input, { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, output); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); diff --git a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp index b33e0a8b6f8..f83cf102f4c 100644 --- a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp +++ b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,10 +27,12 @@ #include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -47,6 +49,7 @@ Status validate_arguments(const ITensorInfo *input, const PadStrideInfo &deconv_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, input_info, weights_info); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, input_info, weights_info); const DataLayout data_layout = input_info->data_layout(); const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); @@ -58,8 +61,9 @@ Status validate_arguments(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON(weights_info->dimension(idx_w) != deconv_info.stride().first); ARM_COMPUTE_RETURN_ERROR_ON(weights_info->dimension(idx_h) != deconv_info.stride().second); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8, - DataType::QASYMM8_SIGNED, DataType::S32); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, one_channel, DataType::F32, DataType::F16, + DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S32); if (!is_qasymm) { ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, input_info, weights_info); @@ -72,9 +76,10 @@ Status validate_arguments(const ITensorInfo *input, if (bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); if (is_qasymm) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, one_channel, DataType::S32); } else { @@ -83,17 +88,24 @@ Status validate_arguments(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(0) != weights_info->dimension(idx_b)); } - if (output->total_size() != 0) - { - const PadStrideInfo stride_info(deconv_info.stride().first, deconv_info.stride().second); - auto out_dims = deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h), - weights_info->dimension(idx_w), weights_info->dimension(idx_h), - stride_info); + const PadStrideInfo stride_info(deconv_info.stride().first, deconv_info.stride().second); + const auto out_dims = + deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h), + weights_info->dimension(idx_w), weights_info->dimension(idx_h), stride_info); - const TensorShape output_shape = - misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info); + const TensorShape output_shape = + misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info); + if (output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(output, input); + } + else + { + const auto output_info = TensorInfo(output_shape, one_channel, input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); } return Status{}; } diff --git a/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp index cdf19ab2e1b..f0a04106b3d 100644 --- a/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp +++ b/src/core/CL/kernels/CLDepthToSpaceLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,10 +25,12 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -41,6 +43,9 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != one_channel); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(block_shape < 2); @@ -49,18 +54,22 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] % (block_shape * block_shape) != 0); + const TensorShape output_shape = + compute_depth_to_space_shape(input->tensor_shape(), input->data_layout(), block_shape); + const auto output_info = TensorInfo(output_shape, one_channel, input->data_type()); + // Validate output if initialized if (output->total_size() != 0) { - const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_width] != - (block_shape * input->tensor_shape()[idx_width])); - ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_height] != - (block_shape * input->tensor_shape()[idx_height])); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &output_info); ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp index b95abe795f3..4bc3387faf2 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023 Arm Limited. + * Copyright (c) 2019-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -37,6 +37,7 @@ #include "src/core/CL/CLUtils.h" #include "src/core/CL/CLValidate.h" #include "src/core/CL/ICLKernel.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h" @@ -62,11 +63,13 @@ Status validate_arguments(const ITensorInfo *input, in_place = true; output = input; } - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, weights); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, - DataType::F16, DataType::F32); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, one_channel, DataType::QASYMM8, + DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(conv_info.pad_stride_info.stride().first > 1 && dwc_info.m0 != 1); ARM_COMPUTE_RETURN_ERROR_ON(conv_info.dilation.x() > 1 && dwc_info.m0 != 1); ARM_COMPUTE_RETURN_ERROR_ON((dwc_info.export_input_to_cl_image == true)); @@ -112,12 +115,13 @@ Status validate_arguments(const ITensorInfo *input, if (biases != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(biases); ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != output_shape[idx_c]); ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); if (is_quantized) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, one_channel, DataType::S32); } else { @@ -128,14 +132,15 @@ Status validate_arguments(const ITensorInfo *input, if (is_quantized) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output_multipliers, output_shifts); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output_multipliers, output_shifts); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, one_channel, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1); if (is_data_type_quantized_per_channel(weights->data_type())) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QSYMM8_PER_CHANNEL); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, one_channel, DataType::QSYMM8_PER_CHANNEL); ARM_COMPUTE_RETURN_ERROR_ON(output_shape[idx_c] != output_multipliers->dimension(0)); ARM_COMPUTE_RETURN_ERROR_ON(output_shape[idx_c] != output_shifts->dimension(0)); } @@ -153,9 +158,15 @@ Status validate_arguments(const ITensorInfo *input, if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const auto output_info = TensorInfo(output_shape, one_channel, input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } if (is_data_type_quantized(input->data_type())) { diff --git a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp index 3d8f875ef7b..b9f2cf85487 100644 --- a/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp +++ b/src/core/CL/kernels/CLFFTDigitReverseKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -43,20 +44,30 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *idx, const FFTDigitReverseKernelInfo &config) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, idx); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, idx); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != 1 && input->num_channels() != 2); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(idx, 1, DataType::U32); + const size_t one_channel = 1u; + const size_t two_channels = 2u; + ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != one_channel && input->num_channels() != two_channels); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(idx, one_channel, DataType::U32); ARM_COMPUTE_RETURN_ERROR_ON(std::set({0, 1}).count(config.axis) == 0); ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[config.axis] != idx->tensor_shape().x()); // Checks performed when output is configured if ((output != nullptr) && (output->total_size() != 0)) { - ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() != 2); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() != two_channels); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const auto output_info = TensorInfo(input->tensor_shape(), two_channels, input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp index 3729e6b77dd..19b558b8f55 100644 --- a/src/core/CL/kernels/CLFFTRadixStageKernel.cpp +++ b/src/core/CL/kernels/CLFFTRadixStageKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -44,8 +45,11 @@ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const FFTRadixStageKernelInfo &config) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F16, DataType::F32); + const size_t two_channels = 2u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, two_channels, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(CLFFTRadixStageKernel::supported_radix().count(config.radix) == 0); ARM_COMPUTE_RETURN_ERROR_ON(std::set({0, 1}).count(config.axis) == 0); ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[config.axis] % config.radix); @@ -53,9 +57,15 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c // Checks performed when output is configured if ((output != nullptr) && (output->total_size() != 0)) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/CL/kernels/CLFFTScaleKernel.cpp b/src/core/CL/kernels/CLFFTScaleKernel.cpp index be6e16b074b..0fffdce3c34 100644 --- a/src/core/CL/kernels/CLFFTScaleKernel.cpp +++ b/src/core/CL/kernels/CLFFTScaleKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -40,16 +41,26 @@ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 2, DataType::F16, DataType::F32); + const size_t two_channels = 2u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, two_channels, DataType::F16, DataType::F32); // Checks performed when output is configured if ((output != nullptr) && (output->total_size() != 0)) { - ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() != 1 && output->num_channels() != 2); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON(output->num_channels() != one_channel && output->num_channels() != two_channels); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/CL/kernels/CLFillBorderKernel.cpp b/src/core/CL/kernels/CLFillBorderKernel.cpp index 86bb502da38..6bae868ab33 100644 --- a/src/core/CL/kernels/CLFillBorderKernel.cpp +++ b/src/core/CL/kernels/CLFillBorderKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, 2023 Arm Limited. + * Copyright (c) 2016-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" #include "support/StringSupport.h" @@ -80,7 +81,8 @@ void CLFillBorderKernel::configure(const CLCompileContext &compile_context, BorderMode border_mode, const PixelValue &constant_border_value) { - ARM_COMPUTE_ERROR_ON(tensor == nullptr); + ARM_COMPUTE_ERROR_ON_NULLPTR(tensor); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(tensor); ARM_COMPUTE_ERROR_ON(tensor->num_channels() != 1); auto padding_info = get_padding_info({tensor}); diff --git a/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp b/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp index 7da0679ae40..7bace4e6b7b 100644 --- a/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp +++ b/src/core/CL/kernels/CLFuseBatchNormalizationKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -53,8 +54,10 @@ Status validate_arguments(const ITensorInfo *input_weights, { ARM_COMPUTE_UNUSED(epsilon); ARM_COMPUTE_ERROR_ON_NULLPTR(input_weights, bn_mean, bn_var); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input_weights, bn_mean, bn_var); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input_weights); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_weights, 1, DataType::F16, DataType::F32); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_weights, one_channel, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, bn_var); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_mean, bn_var); ARM_COMPUTE_RETURN_ERROR_ON(input_bias == nullptr && fused_bias == nullptr); @@ -74,34 +77,49 @@ Status validate_arguments(const ITensorInfo *input_weights, // Validate bias if (input_bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input_bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, input_bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, input_bias); } // Validate beta if (bn_beta != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bn_beta); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, bn_beta); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_beta); } // Validate gamma if (bn_gamma != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bn_gamma); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, bn_gamma); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_gamma); } // Validate output weights if (fused_weights != nullptr && fused_weights->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(fused_weights); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input_weights, fused_weights); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input_weights, fused_weights); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, fused_weights); } + else + { + // No configured output. Since `fused_weights` is expected to match + // `input_weights`, there's nothing extra to check in this case. + } // Validate output bias if (fused_bias != nullptr && fused_bias->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(fused_bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, fused_bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, fused_bias); } + else + { + const auto fused_bias_info = TensorInfo(bn_mean->tensor_shape(), one_channel, input_weights->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&fused_bias_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLGatherKernel.cpp b/src/core/CL/kernels/CLGatherKernel.cpp index 904bb072823..e3cf51aa61a 100644 --- a/src/core/CL/kernels/CLGatherKernel.cpp +++ b/src/core/CL/kernels/CLGatherKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023-2024 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,8 +24,10 @@ #include "src/core/CL/kernels/CLGatherKernel.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -40,22 +42,32 @@ inline Status validate_arguments(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, indices, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, indices); const uint32_t actual_axis = wrap_around(axis, static_cast(input->num_dimensions())); ARM_COMPUTE_RETURN_ERROR_ON((input->num_dimensions() + indices->num_dimensions() - 1) > 4); ARM_COMPUTE_RETURN_ERROR_ON(actual_axis >= input->num_dimensions()); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON(input->num_channels() != one_channel); + + const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape( + input->tensor_shape(), indices->tensor_shape(), actual_axis); if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape( - input->tensor_shape(), indices->tensor_shape(), actual_axis); ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size()); } + else + { + const auto output_info = TensorInfo(output_shape, one_channel, input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, one_channel, DataType::U32, DataType::S32); return Status{}; } diff --git a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp index b9ff72b9289..20fe0eccd51 100644 --- a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp +++ b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -44,25 +45,33 @@ namespace Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(anchors, all_anchors); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(anchors); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(anchors); ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi()); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::QSYMM16, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2); + const size_t feature_height = info.feat_height(); + const size_t feature_width = info.feat_width(); + const size_t num_anchors = anchors->dimension(1); + const TensorShape output_shape(info.values_per_roi(), feature_width * feature_height * num_anchors); + const size_t one_channel = 1u; + const auto all_anchors_info = + TensorInfo(output_shape, one_channel, anchors->data_type(), anchors->quantization_info()); if (all_anchors->total_size() > 0) { - size_t feature_height = info.feat_height(); - size_t feature_width = info.feat_width(); - size_t num_anchors = anchors->dimension(1); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(all_anchors); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(all_anchors, anchors); - ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->num_dimensions() > 2); - ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi()); - ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(all_anchors, &all_anchors_info); if (is_data_type_quantized(anchors->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(anchors, all_anchors); } } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&all_anchors_info); + } return Status{}; } } // namespace diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp index b13eb165565..762bb6163e9 100644 --- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -44,32 +45,54 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.epsilon == 0.f, "Epsilon must be different than 0"); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32); if (output != nullptr && output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels"); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } Status validate_arguments_meanvar(const ITensorInfo *input, const ITensorInfo *output) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32); if (output != nullptr && output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels"); } + else + { + const auto data_layout = input->data_layout(); + const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); + const unsigned int batches_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); + const unsigned int input_channel = input->dimension(channel_idx); + const unsigned int input_batches = input->dimension(batches_idx); + const TensorShape out_shape(input_channel, 2u, input_batches); + const auto output_info = TensorInfo(out_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp index 9ed9d7c5b00..1334cda53c5 100644 --- a/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp +++ b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -50,6 +51,7 @@ validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, const ITens const uint32_t actual_axis = wrap_around(axis, max_input_tensor_dim); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, sum, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, sum); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, sum); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); @@ -64,11 +66,17 @@ validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, const ITens if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(input->tensor_shape(), output->tensor_shape()); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.cpp b/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.cpp index e560f1de4a6..b9762497463 100644 --- a/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.cpp +++ b/src/core/CL/kernels/CLMaxUnpoolingLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021, 2023 Arm Limited. + * Copyright (c) 2020-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -49,6 +50,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *indices) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, indices); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, indices); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); @@ -67,11 +69,20 @@ Status validate_arguments(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_type != PoolingType::MAX, "Pooling indices only supported for MAX pooling method"); ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2"); + + const TensorShape output_shape = compute_unpool_shape(*input, pool_info); + if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); } + else + { + const auto output_info = TensorInfo(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp b/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp index 8632bdf6232..a3165d112d9 100644 --- a/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp +++ b/src/core/CL/kernels/CLMeanStdDevNormalizationKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023 Arm Limited. + * Copyright (c) 2019-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -46,15 +47,22 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, f ARM_COMPUTE_UNUSED(epsilon); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 2, "Input tensor cannot have more than 2 dimensions"); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); // Checks performed when output is configured if ((output != nullptr) && (output->total_size() != 0)) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } } // namespace diff --git a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp index b636c485e72..da84c9a68ba 100644 --- a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,7 @@ #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/NormalizationHelpers.h" #include "src/core/helpers/WindowHelpers.h" @@ -46,20 +47,27 @@ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, NormalizationLayerInfo norm_info) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NCHW, DataLayout::NHWC); - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(norm_info.norm_size() % 2), "Normalization size should be odd"); // Checks performed when output is configured if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp index 59352a8fb7c..0ad74e83756 100644 --- a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp +++ b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -45,7 +46,8 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *std) { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, mean, std); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, mean, std); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); @@ -61,10 +63,16 @@ validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const IT // Checks performed when output is configured if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/CL/kernels/CLPadLayerKernel.cpp b/src/core/CL/kernels/CLPadLayerKernel.cpp index 0ac285038ee..e6dc1e60542 100644 --- a/src/core/CL/kernels/CLPadLayerKernel.cpp +++ b/src/core/CL/kernels/CLPadLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,9 +25,11 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/helpers/AdjustVecSize.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -43,6 +45,7 @@ Status validate_arguments(const ITensorInfo *input, PaddingMode mode) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_UNUSED(constant_value); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON((padding.size() < 1) || (padding.size() > input->num_dimensions())); @@ -58,13 +61,19 @@ Status validate_arguments(const ITensorInfo *input, } } + const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding); + if (output->total_size() > 0) { - TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding); - + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(output, input); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), padded_shape); } + else + { + const auto output_info = TensorInfo(padded_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp b/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp index 7dcdf1de6f1..8da660c41ae 100644 --- a/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp +++ b/src/core/CL/kernels/CLPriorBoxLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -48,6 +49,7 @@ Status validate_arguments(const ITensorInfo *input1, const PriorBoxLayerInfo &info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input1, input2, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input1, input2); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2); @@ -77,10 +79,9 @@ Status validate_arguments(const ITensorInfo *input1, "Max size should be greater than min size"); } - if (output != nullptr && output->total_size() != 0) - { - ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(1) != 2); - } + // There is no default configure, so we expect output to be initialized. + ARM_COMPUTE_RETURN_ERROR_ON(output->total_size() == 0); + ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(1) != 2); return Status{}; } diff --git a/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp b/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp index 731fcb8e048..0701b3444af 100644 --- a/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp +++ b/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021, 2023 Arm Limited. + * Copyright (c) 2020-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,6 +28,7 @@ #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/core/utils/StringUtils.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -66,6 +67,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weight, bias, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, weight, bias); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 2, "Input tensor cannot have more than 2 dimensions"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(weight->num_dimensions() > 1, "Weight tensor cannot have more than 1 dimensions"); @@ -81,9 +83,15 @@ Status validate_arguments(const ITensorInfo *input, // Checks performed when output is configured if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } } // namespace diff --git a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp index c97910ef790..712226d1dc8 100644 --- a/src/core/CL/kernels/CLROIAlignLayerKernel.cpp +++ b/src/core/CL/kernels/CLROIAlignLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -49,6 +50,7 @@ Status validate_arguments(const ITensorInfo *input, const ROIPoolingLayerInfo &pool_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, rois, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, rois); ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(0) != 5); ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); @@ -57,12 +59,19 @@ Status validate_arguments(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC, DataLayout::NCHW); ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0)); + const TensorShape output_shape = compute_roi_align_shape(*input, *rois, pool_info); + if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(compute_roi_align_shape(*input, *rois, pool_info), - output->tensor_shape()); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_shape, output->tensor_shape()); + } + else + { + const auto output_info = TensorInfo(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); } if (is_data_type_quantized_asymmetric(input->data_type())) diff --git a/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp b/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp index 1b2c414a496..1564ca72967 100644 --- a/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp +++ b/src/core/CL/kernels/CLROIPoolingLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -54,10 +55,9 @@ Status CLROIPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ROIPoolingLayerInfo &pool_info) { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, rois, output); - //Validate arguments ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, rois, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, rois); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(rois, 1, DataType::U16); ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(0) != 5); ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2); @@ -65,13 +65,19 @@ Status CLROIPoolingLayerKernel::validate(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8); ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0)); + const TensorShape output_shape(pool_info.pooled_width(), pool_info.pooled_height(), input->dimension(2), + rois->dimension(1)); + if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) != pool_info.pooled_width()) || - (output->dimension(1) != pool_info.pooled_height())); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != output->dimension(2)); - ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(1) != output->dimension(3)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); + } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); } return Status{}; diff --git a/src/core/CL/kernels/CLRangeKernel.cpp b/src/core/CL/kernels/CLRangeKernel.cpp index 622f6210b9b..6ddd8b04fd2 100644 --- a/src/core/CL/kernels/CLRangeKernel.cpp +++ b/src/core/CL/kernels/CLRangeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -43,6 +44,8 @@ constexpr unsigned int vector_size_byte_opencl = 16; Status validate_arguments(const ITensorInfo *output, const float start, const float end, const float step) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); + ARM_COMPUTE_RETURN_ERROR_ON(output->total_size() == 0); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S8, DataType::QASYMM8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp index c8665f8fbda..ada594dacc2 100644 --- a/src/core/CL/kernels/CLReductionOperationKernel.cpp +++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,7 @@ #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -46,6 +47,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); if (input->num_channels() == 1) { @@ -68,10 +70,20 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u ARM_COMPUTE_RETURN_ERROR_ON_MSG((op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN), "Not supported reduction operation, use CLArgMinMaxLayer"); + const TensorShape output_shape = + arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, true); + if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_shape, output->tensor_shape()); + } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); } return Status{}; diff --git a/src/core/CL/kernels/CLReorgLayerKernel.cpp b/src/core/CL/kernels/CLReorgLayerKernel.cpp index 9fd21943e89..9773b39a952 100644 --- a/src/core/CL/kernels/CLReorgLayerKernel.cpp +++ b/src/core/CL/kernels/CLReorgLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -45,6 +46,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t stride) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN); @@ -57,14 +59,20 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_height] % stride) != 0, "The height of the input tensor must be a multiple of stride"); + const TensorShape output_shape = misc::shape_calculator::compute_reorg_output_shape(*input, stride); + // Validate output if initialized if (output->total_size() != 0) { - const TensorInfo tensor_info_output = - output->clone()->set_tensor_shape(misc::shape_calculator::compute_reorg_output_shape(*input, stride)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLReverseKernel.cpp b/src/core/CL/kernels/CLReverseKernel.cpp index 9722441bdbe..007b83bf0fe 100644 --- a/src/core/CL/kernels/CLReverseKernel.cpp +++ b/src/core/CL/kernels/CLReverseKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023-2024 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -45,6 +46,7 @@ validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const IT { ARM_COMPUTE_UNUSED(use_inverted_axis); ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, axis); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, axis); #ifndef __aarch64__ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->element_size() > 4, @@ -61,10 +63,16 @@ validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const IT // Checks performed when output is configured if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/CL/kernels/CLSelectKernel.cpp b/src/core/CL/kernels/CLSelectKernel.cpp index 703c64d8d37..55bc2e4c1d0 100644 --- a/src/core/CL/kernels/CLSelectKernel.cpp +++ b/src/core/CL/kernels/CLSelectKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/helpers/AdjustVecSize.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -43,6 +44,7 @@ namespace Status validate_arguments(const ITensorInfo *c, const ITensorInfo *x, const ITensorInfo *y, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(c, x, y, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(c, x, y); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(x); ARM_COMPUTE_RETURN_ERROR_ON(x->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(x, y); @@ -57,9 +59,15 @@ Status validate_arguments(const ITensorInfo *c, const ITensorInfo *x, const ITen if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(x, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(x, output); } + else + { + // No configured output. Since `output` is expected to match `x`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp index f4c0839ad2a..4a123e2bec4 100644 --- a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp +++ b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,10 +25,12 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -45,6 +47,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, block_info, paddings, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, block_info, paddings, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(block_info, 1, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); @@ -53,15 +56,13 @@ Status validate_arguments(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON(paddings->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(paddings->tensor_shape(), TensorShape{2, 2}); - // Validate output if initialized - if (output->total_size() != 0) - { - const DataLayout data_layout = input->data_layout(); - const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] != output->tensor_shape()[idx_channel]); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - } + // There is no default config, so we expect output to be initialized. + ARM_COMPUTE_RETURN_ERROR_ON(output->total_size() == 0); + const DataLayout data_layout = input->data_layout(); + const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); + ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] != output->tensor_shape()[idx_channel]); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); return Status{}; } @@ -73,19 +74,27 @@ Status validate_arguments_static(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(block_shape_x < 1 || block_shape_y < 1); + const TensorShape expected_output_shape = misc::shape_calculator::compute_space_to_batch_shape( + input, block_shape_x, block_shape_y, padding_left, padding_right); + // Validate output if initialized if (output->total_size() != 0) { - TensorShape expected_output_shape = misc::shape_calculator::compute_space_to_batch_shape( - input, block_shape_x, block_shape_y, padding_left, padding_right); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); } + else + { + const TensorInfo output_info(expected_output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp b/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp index 25662b5c628..024301f2b8b 100644 --- a/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp +++ b/src/core/CL/kernels/CLSpaceToDepthLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,10 +25,12 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -41,13 +43,17 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(block_shape < 1); + const TensorShape output_shape = compute_space_to_depth_shape(input, block_shape); + // Validate output if initialized if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); const DataLayout data_layout = input->data_layout(); const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); @@ -58,8 +64,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_batch] != output->tensor_shape()[idx_batch]); ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_channel] % (block_shape * block_shape) != 0); ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().total_size() != output->tensor_shape().total_size()); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_shape, output->tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLStackLayerKernel.cpp b/src/core/CL/kernels/CLStackLayerKernel.cpp index 23e26716e7e..f2bc04f7c71 100644 --- a/src/core/CL/kernels/CLStackLayerKernel.cpp +++ b/src/core/CL/kernels/CLStackLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -49,19 +50,27 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(idx_input >= num_tensors); ARM_COMPUTE_RETURN_ERROR_ON(axis > input->num_dimensions()); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); + const TensorShape output_shape = compute_stack_shape(*input, axis, num_tensors); + if (output->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), - compute_stack_shape(*input, axis, num_tensors)); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLStridedSliceKernel.cpp b/src/core/CL/kernels/CLStridedSliceKernel.cpp index 20cd835069b..b4152a39c49 100644 --- a/src/core/CL/kernels/CLStridedSliceKernel.cpp +++ b/src/core/CL/kernels/CLStridedSliceKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023-2024 Arm Limited. + * Copyright (c) 2018-2021, 2023-2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,6 +29,7 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/utils/helpers/bit_ops.h" @@ -49,6 +50,7 @@ Status validate_arguments(const ITensorInfo *input, int32_t shrink_axis_mask) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().num_dimensions() > 4); @@ -66,10 +68,15 @@ Status validate_arguments(const ITensorInfo *input, // Checks output if configured if (output->total_size() != 0) { - const TensorInfo exp_output_info = output->clone()->set_tensor_shape(exp_output_shape); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &exp_output_info); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(exp_output_shape, output->tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const TensorInfo output_info(exp_output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CL/kernels/CLTileKernel.cpp b/src/core/CL/kernels/CLTileKernel.cpp index fa996c4008c..6ca4727a186 100644 --- a/src/core/CL/kernels/CLTileKernel.cpp +++ b/src/core/CL/kernels/CLTileKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,9 +24,11 @@ #include "src/core/CL/kernels/CLTileKernel.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/StringSupport.h" @@ -38,18 +40,26 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Multiples &multiples) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(multiples.size() > 4); ARM_COMPUTE_RETURN_ERROR_ON(multiples.empty()); ARM_COMPUTE_RETURN_ERROR_ON(std::any_of(multiples.begin(), multiples.end(), [](uint32_t e) { return e == 0; })); + const TensorShape output_shape = misc::shape_calculator::compute_tiled_shape(input->tensor_shape(), multiples); + // Validate output if initialized if (output->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS( - misc::shape_calculator::compute_tiled_shape(input->tensor_shape(), multiples), output->tensor_shape()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_shape, output->tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp b/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp index 02686eb4f6a..f884aa10d32 100644 --- a/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp +++ b/src/core/CPP/kernels/CPPBoxWithNonMaximaSuppressionLimitKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020, 2022 Arm Limited. + * Copyright (c) 2018-2020, 2022, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,6 +25,7 @@ #include "arm_compute/core/Helpers.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include @@ -381,6 +382,8 @@ void CPPBoxWithNonMaximaSuppressionLimitKernel::configure(const ITensor * const BoxNMSLimitInfo info) { ARM_COMPUTE_ERROR_ON_NULLPTR(scores_in, boxes_in, scores_out, boxes_out, classes); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(scores_in->info(), boxes_in->info(), scores_out->info(), boxes_out->info(), + classes->info()); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(scores_in, 1, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(scores_in, boxes_in, scores_out); const unsigned int num_classes = scores_in->info()->dimension(0); @@ -396,8 +399,10 @@ void CPPBoxWithNonMaximaSuppressionLimitKernel::configure(const ITensor * ARM_COMPUTE_ERROR_ON(scores_out->info()->dimension(0) != classes->info()->dimension(0)); if (keeps != nullptr) { + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(keeps->info()); ARM_COMPUTE_ERROR_ON_MSG(keeps_size == nullptr, "keeps_size cannot be nullptr if keeps has to be provided as output"); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(keeps_size->info()); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(scores_in, keeps); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(keeps_size, 1, DataType::U32); ARM_COMPUTE_ERROR_ON(scores_out->info()->dimension(0) != keeps->info()->dimension(0)); @@ -405,10 +410,12 @@ void CPPBoxWithNonMaximaSuppressionLimitKernel::configure(const ITensor * } if (batch_splits_in != nullptr) { + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(batch_splits_in->info()); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(scores_in, batch_splits_in); } if (batch_splits_out != nullptr) { + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(batch_splits_out->info()); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(scores_in, batch_splits_out); } diff --git a/src/core/CPP/kernels/CPPNonMaximumSuppressionKernel.cpp b/src/core/CPP/kernels/CPPNonMaximumSuppressionKernel.cpp index 1224ec14a71..a9c22a618c8 100644 --- a/src/core/CPP/kernels/CPPNonMaximumSuppressionKernel.cpp +++ b/src/core/CPP/kernels/CPPNonMaximumSuppressionKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2020, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -26,6 +26,7 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -43,6 +44,7 @@ Status validate_arguments(const ITensorInfo *bboxes, const float iou_threshold) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(bboxes, scores, output_indices); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bboxes, scores, output_indices); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bboxes, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_indices, 1, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON_MSG(bboxes->num_dimensions() > 2, diff --git a/src/core/CPP/kernels/CPPPermuteKernel.cpp b/src/core/CPP/kernels/CPPPermuteKernel.cpp index e68090d82bd..c2befd858d9 100644 --- a/src/core/CPP/kernels/CPPPermuteKernel.cpp +++ b/src/core/CPP/kernels/CPPPermuteKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2020, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,8 +24,10 @@ #include "arm_compute/core/CPP/kernels/CPPPermuteKernel.h" #include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -38,6 +40,8 @@ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MSG(perm.num_dimensions() > 4, "Only up to 4D permutation vectors are supported"); @@ -46,9 +50,15 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c // Validate configured output if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const auto output_info = TensorInfo(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/CPP/kernels/CPPTopKVKernel.cpp b/src/core/CPP/kernels/CPPTopKVKernel.cpp index 6ffb68e770b..c31014744da 100644 --- a/src/core/CPP/kernels/CPPTopKVKernel.cpp +++ b/src/core/CPP/kernels/CPPTopKVKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2020, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,7 @@ #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/Traits.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -53,9 +54,13 @@ Status validate_arguments(const ITensorInfo *predictions, const unsigned int k) { ARM_COMPUTE_UNUSED(k); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(predictions, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, - DataType::S32, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(targets, 1, DataType::U32); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(predictions, targets, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(predictions, targets); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(predictions, one_channel, DataType::QASYMM8, + DataType::QASYMM8_SIGNED, DataType::S32, DataType::F16, + DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(targets, one_channel, DataType::U32); ARM_COMPUTE_RETURN_ERROR_ON(predictions->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON(targets->num_dimensions() > 1); @@ -63,8 +68,14 @@ Status validate_arguments(const ITensorInfo *predictions, // Validate configured output if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), targets->tensor_shape()); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, one_channel, DataType::U8); + } + else + { + const auto output_info = TensorInfo(targets->tensor_shape(), one_channel, DataType::U8); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); } return Status{}; diff --git a/src/core/CPP/kernels/CPPUpsampleKernel.cpp b/src/core/CPP/kernels/CPPUpsampleKernel.cpp index b1efe324460..6f468a496bc 100644 --- a/src/core/CPP/kernels/CPPUpsampleKernel.cpp +++ b/src/core/CPP/kernels/CPPUpsampleKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2020, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,6 +25,7 @@ #include "arm_compute/core/Helpers.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include @@ -44,6 +45,7 @@ bool CPPUpsampleKernel::is_parallelisable() const void CPPUpsampleKernel::configure(const ITensor *input, ITensor *output, const PadStrideInfo &info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(input->info(), output->info()); _input = input; _output = output; diff --git a/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp b/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp index f299bb94a41..4126838a74a 100644 --- a/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp +++ b/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, 2023 Arm Limited. + * Copyright (c) 2019-2020, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -42,6 +43,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *block_info, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, block_info, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, block_info); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(block_info, 1, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); @@ -49,9 +51,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *block_inf // Validate output if initialized if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + // We don't know the shape, but dynamic block shape is deprecated anyway. + } return Status{}; } @@ -62,6 +69,7 @@ Status validate_arguments_static(const ITensorInfo *input, const CropInfo &crop_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(block_shape_x <= 0); ARM_COMPUTE_RETURN_ERROR_ON(block_shape_y <= 0); @@ -69,16 +77,22 @@ Status validate_arguments_static(const ITensorInfo *input, const DataLayout data_layout = input->data_layout(); const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_batch] % (block_shape_x * block_shape_y) != 0); + + const TensorShape expected_output_shape = compute_batch_to_space_shape(input->data_layout(), input->tensor_shape(), + block_shape_x, block_shape_y, crop_info); + // Validate output if initialized if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - - const TensorShape expected_output_shape = compute_batch_to_space_shape( - input->data_layout(), input->tensor_shape(), block_shape_x, block_shape_y, crop_info); - const TensorInfo expected_output = output->clone()->set_tensor_shape(expected_output_shape); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &expected_output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(expected_output_shape, output->tensor_shape()); + } + else + { + const TensorInfo output_info(expected_output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); } return Status{}; diff --git a/src/core/NEON/kernels/NEBitwiseAndKernel.cpp b/src/core/NEON/kernels/NEBitwiseAndKernel.cpp index a59bbd233b9..8721dd188cf 100644 --- a/src/core/NEON/kernels/NEBitwiseAndKernel.cpp +++ b/src/core/NEON/kernels/NEBitwiseAndKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021 Arm Limited. + * Copyright (c) 2016-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,6 +28,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/NEON/wrapper/wrapper.h" @@ -70,6 +71,7 @@ void NEBitwiseAndKernel::configure(const ITensor *input1, const ITensor *input2, set_format_if_unknown(*input1->info(), Format::U8); set_format_if_unknown(*input2->info(), Format::U8); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(input1->info(), input2->info(), output->info()); ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input1, input2, output); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8); diff --git a/src/core/NEON/kernels/NEBitwiseNotKernel.cpp b/src/core/NEON/kernels/NEBitwiseNotKernel.cpp index ecd181a7af0..c05f4ef5eb8 100644 --- a/src/core/NEON/kernels/NEBitwiseNotKernel.cpp +++ b/src/core/NEON/kernels/NEBitwiseNotKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021 Arm Limited. + * Copyright (c) 2016-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,6 +28,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -64,6 +65,7 @@ void NEBitwiseNotKernel::configure(const ITensor *input, ITensor *output) set_format_if_unknown(*output->info(), Format::U8); set_format_if_unknown(*input->info(), Format::U8); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(input->info(), output->info()); ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8); diff --git a/src/core/NEON/kernels/NEBitwiseOrKernel.cpp b/src/core/NEON/kernels/NEBitwiseOrKernel.cpp index 4c906134aa1..47737f89c71 100644 --- a/src/core/NEON/kernels/NEBitwiseOrKernel.cpp +++ b/src/core/NEON/kernels/NEBitwiseOrKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021 Arm Limited. + * Copyright (c) 2016-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,6 +28,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -72,6 +73,7 @@ void NEBitwiseOrKernel::configure(const ITensor *input1, const ITensor *input2, ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2, output); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(input1->info(), input2->info(), output->info()); _input1 = input1; _input2 = input2; diff --git a/src/core/NEON/kernels/NEBitwiseXorKernel.cpp b/src/core/NEON/kernels/NEBitwiseXorKernel.cpp index dbbed2483c3..9f16ba4f90d 100644 --- a/src/core/NEON/kernels/NEBitwiseXorKernel.cpp +++ b/src/core/NEON/kernels/NEBitwiseXorKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021 Arm Limited. + * Copyright (c) 2016-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,6 +28,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -67,6 +68,7 @@ void NEBitwiseXorKernel::configure(const ITensor *input1, const ITensor *input2, set_format_if_unknown(*input1->info(), Format::U8); set_format_if_unknown(*input2->info(), Format::U8); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(input1->info(), input2->info(), output->info()); ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input1, input2, output); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8); diff --git a/src/core/NEON/kernels/NEBoundingBoxTransformKernel.cpp b/src/core/NEON/kernels/NEBoundingBoxTransformKernel.cpp index 694def1a3ad..46c33aea19f 100644 --- a/src/core/NEON/kernels/NEBoundingBoxTransformKernel.cpp +++ b/src/core/NEON/kernels/NEBoundingBoxTransformKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, 2024 Arm Limited. + * Copyright (c) 2019-2022, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -99,6 +99,7 @@ Status validate_arguments(const ITensorInfo *boxes, const BoundingBoxTransformInfo &info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(boxes, pred_boxes, deltas); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(boxes, deltas); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(boxes); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(boxes, DataType::QASYMM16, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(deltas, DataType::QASYMM8, DataType::F32, DataType::F16); @@ -123,6 +124,7 @@ Status validate_arguments(const ITensorInfo *boxes, if (pred_boxes->total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(pred_boxes); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(pred_boxes->tensor_shape(), deltas->tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(pred_boxes, deltas); ARM_COMPUTE_RETURN_ERROR_ON(pred_boxes->num_dimensions() > 2); @@ -133,6 +135,11 @@ Status validate_arguments(const ITensorInfo *boxes, ARM_COMPUTE_RETURN_ERROR_ON(pred_qinfo.offset != 0); } } + else + { + // No configured output. Since `pred_boxes` is expected to match `deltas`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/NEON/kernels/NEChannelShuffleLayerKernel.cpp b/src/core/NEON/kernels/NEChannelShuffleLayerKernel.cpp index 3b53b7055f1..bf40e502e19 100644 --- a/src/core/NEON/kernels/NEChannelShuffleLayerKernel.cpp +++ b/src/core/NEON/kernels/NEChannelShuffleLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -41,6 +41,8 @@ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, unsigned int num_groups) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions. ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NCHW, DataLayout::NHWC); @@ -59,10 +61,16 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u // Checks performed when output is configured if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/NEON/kernels/NECropKernel.cpp b/src/core/NEON/kernels/NECropKernel.cpp index d98c3b78751..7339b326e98 100644 --- a/src/core/NEON/kernels/NECropKernel.cpp +++ b/src/core/NEON/kernels/NECropKernel.cpp @@ -229,6 +229,8 @@ Status NECropKernel::validate(const ITensorInfo *input, float extrapolation_value) { ARM_COMPUTE_UNUSED(extrapolation_value); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, crop_boxes, box_ind, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, crop_boxes, box_ind); const auto *uk = get_implementation(CropSelectorData{input->data_type()}); ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr); @@ -243,11 +245,17 @@ Status NECropKernel::validate(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON(box_ind->tensor_shape()[0] <= crop_box_ind); if (output->total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(output, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() != 3); ARM_COMPUTE_RETURN_ERROR_ON(output->has_padding()); } + else + { + // Complicated, but since it's crop, `output` should be no larger than + // `input` anyway, so there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/NEON/kernels/NEDepthToSpaceLayerKernel.cpp b/src/core/NEON/kernels/NEDepthToSpaceLayerKernel.cpp index e0eb5cf2020..1556fcca2b6 100644 --- a/src/core/NEON/kernels/NEDepthToSpaceLayerKernel.cpp +++ b/src/core/NEON/kernels/NEDepthToSpaceLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020, 2023 Arm Limited. + * Copyright (c) 2019-2020, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -26,10 +26,12 @@ #include "arm_compute/core/CoreTypes.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/cpu/kernels/depth_to_space/list.h" @@ -43,6 +45,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(block_shape < 2); @@ -50,18 +53,29 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i const DataLayout data_layout = input->data_layout(); const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] % (block_shape * block_shape) != 0); + + const TensorShape output_shape = + misc::shape_calculator::compute_depth_to_space_shape(input->tensor_shape(), input->data_layout(), block_shape); + // Validate output if initialized if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_width] != (block_shape * input->tensor_shape()[idx_width])); ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_height] != (block_shape * input->tensor_shape()[idx_height])); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_shape, output->tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/NEON/kernels/NEFillBorderKernel.cpp b/src/core/NEON/kernels/NEFillBorderKernel.cpp index 00b0c0ae8d4..9b5acf42794 100644 --- a/src/core/NEON/kernels/NEFillBorderKernel.cpp +++ b/src/core/NEON/kernels/NEFillBorderKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021 Arm Limited. + * Copyright (c) 2016-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/NEON/kernels/NEFillBorderKernel.h" @@ -108,6 +109,7 @@ void NEFillBorderKernel::configure(ITensor *tensor, const PixelValue &constant_border_value) { ARM_COMPUTE_ERROR_ON_NULLPTR(tensor); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(tensor->info()); _tensor = tensor; configure(tensor->info(), border_size, border_mode, constant_border_value); } @@ -118,6 +120,7 @@ void NEFillBorderKernel::configure(ITensorInfo *tensor, const PixelValue &constant_border_value) { ARM_COMPUTE_ERROR_ON_NULLPTR(tensor); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(tensor); //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions. ARM_COMPUTE_ERROR_ON(tensor->data_type() == DataType::UNKNOWN); diff --git a/src/core/NEON/kernels/NEFuseBatchNormalizationKernel.cpp b/src/core/NEON/kernels/NEFuseBatchNormalizationKernel.cpp index cbe5136fb1b..5139fd9eda8 100644 --- a/src/core/NEON/kernels/NEFuseBatchNormalizationKernel.cpp +++ b/src/core/NEON/kernels/NEFuseBatchNormalizationKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022 Arm Limited. + * Copyright (c) 2018-2022, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -162,6 +162,7 @@ Status validate_arguments(const ITensorInfo *input_weights, { ARM_COMPUTE_UNUSED(epsilon); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input_weights, bn_mean, bn_var); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input_weights, bn_mean, bn_var); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input_weights); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_weights, 1, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, bn_var); @@ -182,18 +183,21 @@ Status validate_arguments(const ITensorInfo *input_weights, // Validate bias if (input_bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input_bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, input_bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, input_bias); } // Validate beta if (bn_beta != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bn_beta); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, bn_beta); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_beta); } // Validate gamma if (bn_gamma != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bn_gamma); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, bn_gamma); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, bn_gamma); } @@ -201,16 +205,30 @@ Status validate_arguments(const ITensorInfo *input_weights, // Validate output weights if (fused_weights != nullptr && fused_weights->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(fused_weights); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input_weights, fused_weights); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input_weights, fused_weights); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, fused_weights); } + else + { + const auto fused_weights_info = + TensorInfo(input_weights->tensor_shape(), input_weights->num_channels(), input_weights->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&fused_weights_info); + } // Validate output bias if (fused_bias != nullptr && fused_bias->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(fused_bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mean, fused_bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_weights, fused_bias); } + else + { + const auto fused_bias_info = + TensorInfo(bn_mean->tensor_shape(), input_weights->num_channels(), input_weights->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&fused_bias_info); + } return Status{}; } diff --git a/src/core/NEON/kernels/NEGatherKernel.cpp b/src/core/NEON/kernels/NEGatherKernel.cpp index f1d457d399f..d611da3a6a0 100644 --- a/src/core/NEON/kernels/NEGatherKernel.cpp +++ b/src/core/NEON/kernels/NEGatherKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023 Arm Limited. + * Copyright (c) 2019-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -41,6 +42,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, indices, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, indices); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); if (axis < 0) @@ -53,13 +55,20 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *indices, Coordinates::num_max_dimensions); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); + const TensorShape output_shape = + arm_compute::misc::shape_calculator::compute_gather_shape(input->tensor_shape(), indices->tensor_shape(), axis); + if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape( - input->tensor_shape(), indices->tensor_shape(), axis); - ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size()); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_shape, output->tensor_shape()); + } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); } ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32, DataType::S32); diff --git a/src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp b/src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp index e23e3d020ff..58f452adf9c 100644 --- a/src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp +++ b/src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, 2024 Arm Limited. + * Copyright (c) 2019-2022, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -90,25 +90,33 @@ const ComputeAllAnchorsKernel *get_implementation(const ComputeAllAnchorsData &d Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anchors, const ComputeAnchorsInfo &info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(anchors, all_anchors); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(anchors); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(anchors); ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi()); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::QSYMM16, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2); + + const size_t feature_height = info.feat_height(); + const size_t feature_width = info.feat_width(); + const size_t num_anchors = anchors->dimension(1); + const TensorShape output_shape(info.values_per_roi(), feature_width * feature_height * num_anchors); if (all_anchors->total_size() > 0) { - const size_t feature_height = info.feat_height(); - const size_t feature_width = info.feat_width(); - const size_t num_anchors = anchors->dimension(1); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(all_anchors); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(all_anchors, anchors); ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->num_dimensions() > 2); - ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi()); - ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(all_anchors->tensor_shape(), output_shape); if (is_data_type_quantized(anchors->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(anchors, all_anchors); } } + else + { + const TensorInfo all_anchors_info(output_shape, anchors->num_channels(), anchors->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&all_anchors_info); + } return Status{}; } diff --git a/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp index 5883731088e..c6ed7dc62c3 100644 --- a/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp +++ b/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, 2024 Arm Limited. + * Copyright (c) 2019-2022, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -96,6 +96,8 @@ const InstanceNormKernel *get_implementation(const InstanceNormSelectorData &dat Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); ARM_COMPUTE_UNUSED(gamma); ARM_COMPUTE_UNUSED(beta); @@ -107,12 +109,18 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, f if (output != nullptr && output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels"); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp b/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp index eea57a17d38..128010450b9 100644 --- a/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp +++ b/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2022, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "src/common/cpuinfo/CpuIsaInfo.h" #include "src/core/common/Registrars.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/NEON/NEMath.h" @@ -114,6 +115,7 @@ validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, const ITens const uint32_t actual_axis = wrap_around(axis, max_input_tensor_dim); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, sum, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, sum); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, sum); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MSG(actual_axis > 2, "Actual axis greater than 2 is not supported"); @@ -127,11 +129,17 @@ validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, const ITens if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(input->tensor_shape(), output->tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/NEON/kernels/NELogicalKernel.cpp b/src/core/NEON/kernels/NELogicalKernel.cpp index 6be62845283..7aac458e316 100644 --- a/src/core/NEON/kernels/NELogicalKernel.cpp +++ b/src/core/NEON/kernels/NELogicalKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021 Arm Limited. + * Copyright (c) 2020-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,9 +24,11 @@ #include "src/core/NEON/kernels/NELogicalKernel.h" #include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Validate.h" #include "src/common/utils/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -297,12 +299,16 @@ Status NELogicalKernel::validate(const ITensorInfo *input1, const ITensorInfo *output, LogicalOperation op) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input1); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8); ARM_COMPUTE_RETURN_ERROR_ON(op == LogicalOperation::Unknown); TensorShape out_shape = input1->tensor_shape(); if (op != LogicalOperation::Not) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input2); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input2); out_shape = TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible"); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2); @@ -311,9 +317,15 @@ Status NELogicalKernel::validate(const ITensorInfo *input1, // Checks performed when output is configured if ((output != nullptr) && (output->total_size() != 0)) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON(detail::have_different_dimensions(out_shape, output->tensor_shape(), 0)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output); } + else + { + const TensorInfo output_info(out_shape, input1->num_channels(), input1->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/NEON/kernels/NENormalizationLayerKernel.cpp b/src/core/NEON/kernels/NENormalizationLayerKernel.cpp index 8399c6c49db..5034027300d 100644 --- a/src/core/NEON/kernels/NENormalizationLayerKernel.cpp +++ b/src/core/NEON/kernels/NENormalizationLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -51,6 +51,7 @@ Status validate_arguments(const ITensorInfo *input, const NormalizationLayerInfo &norm_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, input_squared, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, input_squared); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); @@ -61,10 +62,16 @@ Status validate_arguments(const ITensorInfo *input, // Checks performed when output is configured if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/NEON/kernels/NEPriorBoxLayerKernel.cpp b/src/core/NEON/kernels/NEPriorBoxLayerKernel.cpp index 15e933e66eb..99866a0ea4d 100644 --- a/src/core/NEON/kernels/NEPriorBoxLayerKernel.cpp +++ b/src/core/NEON/kernels/NEPriorBoxLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,6 +28,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -43,6 +44,7 @@ Status validate_arguments(const ITensorInfo *input1, const PriorBoxLayerInfo &info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input1, input2, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input1, input2); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2); @@ -72,11 +74,10 @@ Status validate_arguments(const ITensorInfo *input1, "Max size should be greater than min size"); } - if (output != nullptr && output->total_size() != 0) - { - ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(1) != 2); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output); - } + // There's no default configuration, so we expect that output is initialized. + ARM_COMPUTE_RETURN_ERROR_ON(output->total_size() == 0); + ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(1) != 2); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output); return Status{}; } diff --git a/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.cpp b/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.cpp index 8e1ed3a2a5c..a82d572a268 100644 --- a/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.cpp +++ b/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021, 2023 Arm Limited. + * Copyright (c) 2020-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -144,6 +144,7 @@ Status NEQLSTMLayerNormalizationKernel::validate(const ITensorInfo *input, ARM_COMPUTE_UNUSED(output, bias, weight, input); ARM_COMPUTE_ERROR_ON_NULLPTR(input, weight, bias, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, weight, bias); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QSYMM16); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weight, 1, DataType::QSYMM16); @@ -158,9 +159,15 @@ Status NEQLSTMLayerNormalizationKernel::validate(const ITensorInfo *input, if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); } + else + { + // No configured output. Since `output` is expected to match `input`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/NEON/kernels/NEROIAlignLayerKernel.cpp b/src/core/NEON/kernels/NEROIAlignLayerKernel.cpp index 29e9ccb656d..340ec8b7bd2 100644 --- a/src/core/NEON/kernels/NEROIAlignLayerKernel.cpp +++ b/src/core/NEON/kernels/NEROIAlignLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, 2024 Arm Limited. + * Copyright (c) 2019-2022, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -101,6 +101,7 @@ Status validate_arguments(const ITensorInfo *input, const ROIPoolingLayerInfo &pool_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, rois, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, rois); ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(0) != 5); ARM_COMPUTE_RETURN_ERROR_ON(rois->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, @@ -109,12 +110,19 @@ Status validate_arguments(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0)); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); + const TensorShape output_shape = compute_roi_align_shape(*input, *rois, pool_info); + if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(compute_roi_align_shape(*input, *rois, pool_info), - output->tensor_shape()); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_shape, output->tensor_shape()); + } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); } if (input->data_type() == DataType::QASYMM8 || input->data_type() == DataType::QASYMM8_SIGNED) diff --git a/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp b/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp index 1a3810fb56e..46c20b8371a 100644 --- a/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp +++ b/src/core/NEON/kernels/NEROIPoolingLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,6 +44,7 @@ Status validate_arguments(const ITensorInfo *input, const ROIPoolingLayerInfo &pool_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, rois); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, rois); //Validate arguments ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(rois, DataType::U16); @@ -52,13 +53,19 @@ Status validate_arguments(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F32, DataType::QASYMM8); ARM_COMPUTE_RETURN_ERROR_ON((pool_info.pooled_width() == 0) || (pool_info.pooled_height() == 0)); + const TensorShape output_shape(pool_info.pooled_width(), pool_info.pooled_height(), input->dimension(2), + rois->dimension(1)); + if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON((output->dimension(0) != pool_info.pooled_width()) || - (output->dimension(1) != pool_info.pooled_height())); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != output->dimension(2)); - ARM_COMPUTE_RETURN_ERROR_ON(rois->dimension(1) != output->dimension(3)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); + } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); } return Status{}; diff --git a/src/core/NEON/kernels/NERangeKernel.cpp b/src/core/NEON/kernels/NERangeKernel.cpp index 87b7b76b729..31a1d64d795 100644 --- a/src/core/NEON/kernels/NERangeKernel.cpp +++ b/src/core/NEON/kernels/NERangeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/common/Registrars.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/NEON/NEAsymm.h" @@ -95,6 +96,7 @@ const RangeUKernel *get_implementation(const RangeSelectorData &data) Status validate_arguments(const ITensorInfo &output, const float start, const float end, const float step) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output); const auto *uk = get_implementation(RangeSelectorData{output.data_type()}); ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr); diff --git a/src/core/NEON/kernels/NEReorgLayerKernel.cpp b/src/core/NEON/kernels/NEReorgLayerKernel.cpp index 227570405cc..0f2a0608ed4 100644 --- a/src/core/NEON/kernels/NEReorgLayerKernel.cpp +++ b/src/core/NEON/kernels/NEReorgLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -43,6 +44,8 @@ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t stride) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions. ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN); @@ -56,14 +59,21 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i ARM_COMPUTE_RETURN_ERROR_ON_MSG((input->tensor_shape()[idx_height] % stride) != 0, "The height of the input tensor must be a multiple of stride"); + const TensorShape output_shape = misc::shape_calculator::compute_reorg_output_shape(*input, stride); + // Validate output if initialized if (output->total_size() != 0) { - const TensorInfo tensor_info_output = - output->clone()->set_tensor_shape(misc::shape_calculator::compute_reorg_output_shape(*input, stride)); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + const TensorInfo tensor_info_output = output->clone()->set_tensor_shape(output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const auto output_info = TensorInfo(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/NEON/kernels/NESelectKernel.cpp b/src/core/NEON/kernels/NESelectKernel.cpp index 7789b828ea5..a9961c7b948 100644 --- a/src/core/NEON/kernels/NESelectKernel.cpp +++ b/src/core/NEON/kernels/NESelectKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022 Arm Limited. + * Copyright (c) 2018-2022, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -155,6 +155,7 @@ Status NESelectKernel::validate(const ITensorInfo *c, const ITensorInfo *x, const ITensorInfo *y, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(c, x, y); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(c, x, y); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(x); ARM_COMPUTE_RETURN_ERROR_ON(x->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(x, y); @@ -169,9 +170,15 @@ NESelectKernel::validate(const ITensorInfo *c, const ITensorInfo *x, const ITens if (output != nullptr && output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(x, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(x, output); } + else + { + // No configured output. Since `output` is expected to match `x`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp b/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp index da023aeb964..c22a38d1e83 100644 --- a/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp +++ b/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2020, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,10 +25,12 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/NEON/wrapper/wrapper.h" @@ -48,6 +50,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, block_info, paddings, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, block_info, paddings, output); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(block_info, 1, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); @@ -56,15 +59,13 @@ Status validate_arguments(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON(paddings->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(paddings->tensor_shape(), TensorShape{2, 2}); - // Validate output if initialized - if (output->total_size() != 0) - { - const DataLayout data_layout = input->data_layout(); - const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] != output->tensor_shape()[idx_channel]); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - } + // There's no default config so we expect output to be initialized. + ARM_COMPUTE_RETURN_ERROR_ON(output->total_size() == 0); + const DataLayout data_layout = input->data_layout(); + const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); + ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] != output->tensor_shape()[idx_channel]); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); return Status{}; } @@ -76,19 +77,27 @@ Status validate_arguments_static(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(block_shape_x < 1 || block_shape_y < 1); + const TensorShape expected_output_shape = misc::shape_calculator::compute_space_to_batch_shape( + input, block_shape_x, block_shape_y, padding_left, padding_right); + // Validate output if initialized if (output->total_size() != 0) { - TensorShape expected_output_shape = misc::shape_calculator::compute_space_to_batch_shape( - input, block_shape_x, block_shape_y, padding_left, padding_right); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); } + else + { + const TensorInfo output_info(expected_output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/NEON/kernels/NESpaceToDepthLayerKernel.cpp b/src/core/NEON/kernels/NESpaceToDepthLayerKernel.cpp index b49c5ee344b..a74105d41d6 100644 --- a/src/core/NEON/kernels/NESpaceToDepthLayerKernel.cpp +++ b/src/core/NEON/kernels/NESpaceToDepthLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2020, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,10 +25,12 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/NEON/wrapper/wrapper.h" @@ -45,14 +47,18 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(block_shape < 1); + const TensorShape output_shape = misc::shape_calculator::compute_space_to_depth_shape(input, block_shape); + // Validate output if initialized if (output->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); const DataLayout data_layout = input->data_layout(); const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); @@ -63,8 +69,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, i ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_batch] != output->tensor_shape()[idx_batch]); ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_channel] % (block_shape * block_shape) != 0); ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().total_size() != output->tensor_shape().total_size()); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/NEON/kernels/NEStackLayerKernel.cpp b/src/core/NEON/kernels/NEStackLayerKernel.cpp index 225e4fcfd2a..4f206e8781c 100644 --- a/src/core/NEON/kernels/NEStackLayerKernel.cpp +++ b/src/core/NEON/kernels/NEStackLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/Utils.h" #include "src/core/helpers/WindowHelpers.h" @@ -50,6 +51,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use CPU FP16 instructions. ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(idx_input >= num_tensors); @@ -57,13 +59,20 @@ Status validate_arguments(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() != rank); + const TensorShape output_shape = compute_stack_shape(*input, axis, num_tensors); + if (output->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), - compute_stack_shape(*input, axis, num_tensors)); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/core/NEON/kernels/NETileKernel.cpp b/src/core/NEON/kernels/NETileKernel.cpp index 577ce5b69e3..351f1ce2ab8 100644 --- a/src/core/NEON/kernels/NETileKernel.cpp +++ b/src/core/NEON/kernels/NETileKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2020, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -41,18 +42,26 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Multiples &multiples) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(multiples.size() > 4); ARM_COMPUTE_RETURN_ERROR_ON(multiples.empty()); ARM_COMPUTE_RETURN_ERROR_ON(std::any_of(multiples.begin(), multiples.end(), [](uint32_t e) { return e == 0; })); + const TensorShape output_shape = misc::shape_calculator::compute_tiled_shape(input->tensor_shape(), multiples); + // Validate output if initialized if (output->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS( - misc::shape_calculator::compute_tiled_shape(input->tensor_shape(), multiples), output->tensor_shape()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_shape, output->tensor_shape()); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/cpu/kernels/CpuAddMulAddKernel.cpp b/src/cpu/kernels/CpuAddMulAddKernel.cpp index 428c1a280cb..d201b91b4b1 100644 --- a/src/cpu/kernels/CpuAddMulAddKernel.cpp +++ b/src/cpu/kernels/CpuAddMulAddKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2025 Arm Limited. + * Copyright (c) 2023, 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -66,6 +66,7 @@ Status validate_arguments(const ITensorInfo *input1, const ActivationLayerInfo &act_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, bn_mul, bn_add, final_output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input1, input2, bn_mul, bn_add); ARM_COMPUTE_RETURN_ERROR_ON_MSG(policy != ConvertPolicy::SATURATE, "Only Saturate Policy is supported"); @@ -100,16 +101,28 @@ Status validate_arguments(const ITensorInfo *input1, // Validate in case we have add layer's output (intermediate) initialized if (add_output != nullptr && add_output->total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(add_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, add_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, add_output); } + else + { + // No configured output. Since `add_output` is expected to match `input1`, + // there's nothing extra to check in this case. + } // Validate in case final output has been initialized if (final_output->total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(final_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, final_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, final_output); } + else + { + // No configured output. Since `final_output` is expected to match `input1`, + // there's nothing extra to check in this case. + } const auto uk = CpuAddMulAddKernel::get_implementation( DataTypeISASelectorData{input1->data_type(), CPUInfo::get().get_isa()}); diff --git a/src/cpu/kernels/CpuCastKernel.cpp b/src/cpu/kernels/CpuCastKernel.cpp index 6f98b76e295..a755224d08b 100644 --- a/src/cpu/kernels/CpuCastKernel.cpp +++ b/src/cpu/kernels/CpuCastKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2025 Arm Limited. + * Copyright (c) 2016-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -76,6 +76,8 @@ static const std::vector available_kernels = { Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(dst); ARM_COMPUTE_UNUSED(policy); @@ -154,8 +156,15 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, Conver // Validate in case of configured dst if (dst->total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } + else + { + // The data type must be given, but the shape is expected to match src. + const TensorInfo dst_info(src->tensor_shape(), src->num_channels(), dst->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/cpu/kernels/CpuConvertQuantizedSignednessKernel.cpp b/src/cpu/kernels/CpuConvertQuantizedSignednessKernel.cpp index 29f168632e2..3a0428c07e8 100644 --- a/src/cpu/kernels/CpuConvertQuantizedSignednessKernel.cpp +++ b/src/cpu/kernels/CpuConvertQuantizedSignednessKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2025 Arm Limited. + * Copyright (c) 2019-2021, 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/Window.h" #include "src/common/utils/profile/acl_profile.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/NEON/wrapper/wrapper.h" @@ -46,14 +47,22 @@ namespace Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); // Validate output if initialized if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(src->tensor_shape(), dst->tensor_shape()); } + else + { + // No configured output. Since `dst` is expected to match `src` (except + // for data type, which changes nothing in terms of size), there's nothing + // extra to check in this case. + } return Status{}; } diff --git a/src/cpu/kernels/CpuDirectConv3dKernel.cpp b/src/cpu/kernels/CpuDirectConv3dKernel.cpp index b3cba2e3f46..f6811a1867a 100644 --- a/src/cpu/kernels/CpuDirectConv3dKernel.cpp +++ b/src/cpu/kernels/CpuDirectConv3dKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022, 2024-2025 Arm Limited. + * Copyright (c) 2021-2022, 2024-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,7 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/Steps.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" @@ -68,6 +69,7 @@ Status validate_arguments(const ITensorInfo *src0, const Conv3dInfo &conv_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src0, src1); ARM_COMPUTE_RETURN_ERROR_ON(src0->data_layout() != DataLayout::NDHWC); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src0, src1, dst); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src0); @@ -90,6 +92,7 @@ Status validate_arguments(const ITensorInfo *src0, if (src2 != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src2); if (is_data_type_quantized(src0->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src2, 1, DataType::S32); @@ -103,16 +106,20 @@ Status validate_arguments(const ITensorInfo *src0, ARM_COMPUTE_RETURN_ERROR_ON_MSG(src2->num_dimensions() > 1, "Biases should be one dimensional"); } + const TensorShape output_shape = + misc::shape_calculator::compute_conv3d_shape(src0->tensor_shape(), src1->tensor_shape(), conv_info); + // Checks performed when output is configured if (dst->total_size() != 0) { - TensorShape output_shape = - misc::shape_calculator::compute_conv3d_shape(src0->tensor_shape(), src1->tensor_shape(), conv_info); - - DataType data_type = src0->data_type(); - + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); - ARM_COMPUTE_RETURN_ERROR_ON(dst->data_type() != data_type); + ARM_COMPUTE_RETURN_ERROR_ON(dst->data_type() != src0->data_type()); + } + else + { + const TensorInfo dst_info(output_shape, src0->num_channels(), src0->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } return Status{}; diff --git a/src/cpu/kernels/CpuDynamicGemmKernel.cpp b/src/cpu/kernels/CpuDynamicGemmKernel.cpp index e261af983fb..73a7bca1ec0 100644 --- a/src/cpu/kernels/CpuDynamicGemmKernel.cpp +++ b/src/cpu/kernels/CpuDynamicGemmKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2025 Arm Limited. + * Copyright (c) 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,7 @@ #include "arm_compute/function_info/GEMMInfo.h" #include "src/common/utils/profile/acl_profile.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/MemoryHelpers.h" #include "src/cpu/kernels/dynamic_gemm/heuristics/CpuDynamicGemmKernelHeuristics.h" #include "src/cpu/utils/CpuAuxTensorHandler.h" @@ -138,6 +139,8 @@ void CpuDynamicGemmKernel::run_op(ITensorPack &tensors, const Window &window, co ITensor *d = tensors.get_tensor(ACL_DST); ITensor *pack_b = tensors.get_tensor(offset_int_vec(_base_aux_slot + PackedRHS)); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(a->info(), b->info(), c->info(), d->info(), pack_b->info()); + ARM_COMPUTE_EXIT_ON_MSG( a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); diff --git a/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp b/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp index c82f226e900..746145761fd 100644 --- a/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp +++ b/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023, 2025 Arm Limited. + * Copyright (c) 2018-2023, 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -26,6 +26,7 @@ #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" @@ -214,6 +215,11 @@ void CpuElementwiseUnaryKernel::configure(ElementWiseUnary op, const ITensorInfo Status CpuElementwiseUnaryKernel::validate(ElementWiseUnary op, const ITensorInfo &src, const ITensorInfo &dst) { ARM_COMPUTE_TRACE_EVENT(ARM_COMPUTE_PROF_CAT_CPU, ARM_COMPUTE_PROF_LVL_CPU, "CpuElementwiseUnaryKernel::validate"); + if (!src.is_dynamic()) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&src); + ARM_COMPUTE_RETURN_ERROR_ON(dst.is_dynamic()); + } ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&src); const auto *uk = CpuElementwiseUnaryKernel::get_implementation( @@ -239,11 +245,20 @@ Status CpuElementwiseUnaryKernel::validate(ElementWiseUnary op, const ITensorInf default: ARM_COMPUTE_ERROR("ElementWiseUnary operation not supported"); } + // Validate in case of configured dst if (dst.total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst); + ARM_COMPUTE_RETURN_ERROR_ON(src.is_dynamic()); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &dst); } + else + { + // No configured output, or it's dynamic. Since `dst` is expected to + // match `src, there's nothing extra to check in this case. + } return Status{}; } @@ -256,6 +271,12 @@ void CpuElementwiseUnaryKernel::run_op(ITensorPack &tensors, const Window &windo auto src = tensors.get_const_tensor(TensorType::ACL_SRC); auto dst = tensors.get_tensor(TensorType::ACL_DST); + if (src->info()->is_dynamic()) + { + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(src->info(), dst->info()); + ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(src, dst); + } + _run_method(src, dst, window, _op, _lut.get()); } diff --git a/src/cpu/kernels/CpuFillKernel.cpp b/src/cpu/kernels/CpuFillKernel.cpp index 61a66fbf1b5..b0dc7d361a5 100644 --- a/src/cpu/kernels/CpuFillKernel.cpp +++ b/src/cpu/kernels/CpuFillKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2025 Arm Limited. + * Copyright (c) 2018-2021, 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/Window.h" #include "src/common/utils/profile/acl_profile.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" @@ -43,6 +44,7 @@ void CpuFillKernel::configure(const ITensorInfo *tensor, const PixelValue &const { ARM_COMPUTE_TRACE_EVENT(ARM_COMPUTE_PROF_CAT_CPU, ARM_COMPUTE_PROF_LVL_CPU, "CpuFillKernel::configure"); ARM_COMPUTE_ERROR_ON_NULLPTR(tensor); + ARM_COMPUTE_ERROR_ON_SIZE_UNSUPPORTED(tensor); _constant_value = constant_value; // Configure kernel window diff --git a/src/cpu/kernels/CpuFloorKernel.cpp b/src/cpu/kernels/CpuFloorKernel.cpp index 1a494becc93..11a864eaf18 100644 --- a/src/cpu/kernels/CpuFloorKernel.cpp +++ b/src/cpu/kernels/CpuFloorKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022, 2025 Arm Limited. + * Copyright (c) 2017-2022, 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -52,6 +52,7 @@ static const std::vector available_kernels = { Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); const auto *uk = CpuFloorKernel::get_implementation(DataTypeISASelectorData{src->data_type(), CPUInfo::get().get_isa()}); @@ -60,9 +61,15 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst) // Validate in case of configured output if (dst->total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } + else + { + // No configured output. Since `dst` is expected to match `src`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/cpu/kernels/CpuMaxUnpoolingLayerKernel.cpp b/src/cpu/kernels/CpuMaxUnpoolingLayerKernel.cpp index e72744653e2..dd195f7d97e 100644 --- a/src/cpu/kernels/CpuMaxUnpoolingLayerKernel.cpp +++ b/src/cpu/kernels/CpuMaxUnpoolingLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2023, 2025 Arm Limited. + * Copyright (c) 2020-2023, 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -63,6 +63,7 @@ Status validate_arguments(const ITensorInfo *src, const PoolingLayerInfo &pool_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, indices, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, indices); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); @@ -81,11 +82,20 @@ Status validate_arguments(const ITensorInfo *src, ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_type != PoolingType::MAX, "Pooling indices only supported for MAX pooling method"); ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2"); + + const TensorShape output_shape = compute_unpool_shape(*src, pool_info); + if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst); } + else + { + const TensorInfo dst_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/cpu/kernels/CpuPool3dKernel.cpp b/src/cpu/kernels/CpuPool3dKernel.cpp index 0fca93f238d..7df8ecd4406 100644 --- a/src/cpu/kernels/CpuPool3dKernel.cpp +++ b/src/cpu/kernels/CpuPool3dKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2025 Arm Limited. + * Copyright (c) 2022, 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -58,6 +58,7 @@ static const std::vector available_kernels = { Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const Pooling3dLayerInfo &pool_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_layout() != DataLayout::NDHWC, "Only NDHWC layout supported"); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32, DataType::QASYMM8, @@ -97,13 +98,19 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_width < 1 || output_height < 1 || output_depth < 1), "Calculated output dimension size is invalid"); + const TensorShape output_shape = compute_pool3d_shape(src->tensor_shape(), pool_info); + if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst); - TensorInfo out_info( - TensorInfo(compute_pool3d_shape(src->tensor_shape(), pool_info), 1, dst->data_type(), DataLayout::NDHWC)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &out_info); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_shape, dst->tensor_shape()); + } + else + { + const TensorInfo dst_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } const auto *uk = diff --git a/src/cpu/kernels/CpuScaleKernel.cpp b/src/cpu/kernels/CpuScaleKernel.cpp index b9b6b4192b1..bdea0432243 100644 --- a/src/cpu/kernels/CpuScaleKernel.cpp +++ b/src/cpu/kernels/CpuScaleKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2023, 2025 Arm Limited. + * Copyright (c) 2016-2023, 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,6 +29,7 @@ #include "src/common/utils/profile/acl_profile.h" #include "src/core/common/Registrars.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/ScaleHelpers.h" #include "src/core/helpers/WindowHelpers.h" #include "src/cpu/kernels/scale/neon/list.h" @@ -341,11 +342,12 @@ Status validate_arguments(const ITensorInfo *src, ITensorInfo *dst, const ScaleKernelInfo &info) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); const auto *uk = CpuScaleKernel::get_implementation( ScaleKernelDataTypeISASelectorData{src->data_type(), CPUInfo::get().get_isa(), info.interpolation_policy}); ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr); - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON(dst == src); ARM_COMPUTE_RETURN_ERROR_ON(src->num_channels() != 1); @@ -367,6 +369,11 @@ Status validate_arguments(const ITensorInfo *src, info.interpolation_policy != InterpolationPolicy::BILINEAR || info.border_mode != BorderMode::REPLICATE)); + if (offsets != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(offsets); + } + if (info.interpolation_policy == InterpolationPolicy::NEAREST_NEIGHBOR && offsets != nullptr) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(offsets, 1, DataType::S32); @@ -377,6 +384,7 @@ Status validate_arguments(const ITensorInfo *src, ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(offsets, 1, DataType::S32); if (dx != nullptr && dy != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dx, dy); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dx, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dy, 1, DataType::F32); } diff --git a/src/cpu/kernels/CpuScatterKernel.cpp b/src/cpu/kernels/CpuScatterKernel.cpp index 48c80abecfc..64d9cb05029 100644 --- a/src/cpu/kernels/CpuScatterKernel.cpp +++ b/src/cpu/kernels/CpuScatterKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024-2025 Arm Limited. + * Copyright (c) 2024-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -121,6 +121,8 @@ Status CpuScatterKernel::validate(const ITensorInfo *updates, { ARM_COMPUTE_TRACE_EVENT(ARM_COMPUTE_PROF_CAT_CPU, ARM_COMPUTE_PROF_LVL_CPU, "CpuScatterKernel::validate"); ARM_COMPUTE_UNUSED(scatter_info); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(updates, indices, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(updates, indices, dst); const TensorShape &ind_shape = indices->tensor_shape(); const TensorShape &upt_shape = updates->tensor_shape(); diff --git a/src/cpu/kernels/CpuTopKVKernel.cpp b/src/cpu/kernels/CpuTopKVKernel.cpp index 063338177eb..c0d5b5c090f 100644 --- a/src/cpu/kernels/CpuTopKVKernel.cpp +++ b/src/cpu/kernels/CpuTopKVKernel.cpp @@ -59,14 +59,17 @@ static const std::vector available_kernels = { Status validate_arguments(const ITensorInfo &predictions, const ITensorInfo &targets, const ITensorInfo &dst, uint32_t k) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&predictions, &targets); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&predictions); // predictions (logical shape [C, N], where N defaults to 1 if dimension 1 is absent) - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&predictions, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, - DataType::S32, DataType::F16, DataType::F32); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&predictions, one_channel, DataType::QASYMM8, + DataType::QASYMM8_SIGNED, DataType::S32, DataType::F16, + DataType::F32); // targets (class indices) - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&targets, 1, DataType::U32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&targets, one_channel, DataType::U32); const unsigned int C = predictions.tensor_shape()[0]; // classes const unsigned int N = predictions.tensor_shape()[1]; // batch (defaults to 1 if not present) @@ -91,9 +94,15 @@ validate_arguments(const ITensorInfo &predictions, const ITensorInfo &targets, c // If dst is already configured, validate it if (dst.total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, 1, DataType::U8); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, one_channel, DataType::U8); ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst.tensor_shape() != out_shape, "dst shape must be [N]"); } + else + { + const auto dst_info = TensorInfo(out_shape, one_channel, DataType::U8); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } const auto uk = CpuTopKVKernel::get_implementation( CpuTopKVKernelDataTypeISASelectorData{predictions.data_type(), CPUInfo::get().get_isa()}); diff --git a/src/gpu/cl/kernels/ClActivationKernel.cpp b/src/gpu/cl/kernels/ClActivationKernel.cpp index a85296f7cd7..ec3a8719a2c 100644 --- a/src/gpu/cl/kernels/ClActivationKernel.cpp +++ b/src/gpu/cl/kernels/ClActivationKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, 2023 Arm Limited. + * Copyright (c) 2016-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/function_info/ActivationLayerInfo.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -50,6 +51,8 @@ namespace { Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ActivationLayerInfo &act_info) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM16, DataType::F16, DataType::F32); @@ -93,9 +96,15 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const // Checks performed when destination is configured if ((dst != nullptr) && (dst->total_size() != 0)) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); } + else + { + // No configured output. Since `dst` is expected to match `src`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClBatchConcatenateKernel.cpp b/src/gpu/cl/kernels/ClBatchConcatenateKernel.cpp index a853f6bc1b4..53cc9a83246 100644 --- a/src/gpu/cl/kernels/ClBatchConcatenateKernel.cpp +++ b/src/gpu/cl/kernels/ClBatchConcatenateKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023 Arm Limited. + * Copyright (c) 2019-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" #include "support/StringSupport.h" @@ -47,6 +48,7 @@ namespace Status validate_arguments(const ITensorInfo *src, unsigned int batch_offset, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); diff --git a/src/gpu/cl/kernels/ClCastKernel.cpp b/src/gpu/cl/kernels/ClCastKernel.cpp index 2d8cfceb913..56276ebcfbf 100644 --- a/src/gpu/cl/kernels/ClCastKernel.cpp +++ b/src/gpu/cl/kernels/ClCastKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2024 Arm Limited. + * Copyright (c) 2016-2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -50,6 +51,8 @@ namespace Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy) { ARM_COMPUTE_UNUSED(policy); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON(src == dst); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN( @@ -70,8 +73,15 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, Conver // Validate in case of configured dst if (dst->total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } + else + { + // The data type must be given and the shape is the same as src. + const TensorInfo dst_info(src->tensor_shape(), src->num_channels(), dst->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClCol2ImKernel.cpp b/src/gpu/cl/kernels/ClCol2ImKernel.cpp index 9972e07f05b..d93e74f8778 100644 --- a/src/gpu/cl/kernels/ClCol2ImKernel.cpp +++ b/src/gpu/cl/kernels/ClCol2ImKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,10 +28,12 @@ #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/CL/OpenCL.h" #include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -54,20 +56,28 @@ Status validate_arguments(const ITensorInfo *src, unsigned int num_groups) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); + const TensorShape output_shape = compute_col2im_shape(*src, convolved_dims, true, num_groups); + // Checks performed when output is configured if (dst->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS( - dst->tensor_shape(), compute_col2im_shape(*src, convolved_dims, true, num_groups)); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->data_layout() != DataLayout::NCHW, "Col2Im output's data layout must always be NCHW"); } + else + { + const TensorInfo dst_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.cpp b/src/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.cpp index 85d3c3939c3..ff3a9aeb12c 100644 --- a/src/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.cpp +++ b/src/gpu/cl/kernels/ClConvertFullyConnectedWeightsKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/Utils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -96,6 +97,7 @@ Status ClConvertFullyConnectedWeightsKernel::validate(const ITensorInfo *src, DataLayout data_layout) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(src->num_dimensions() != 2); @@ -105,9 +107,15 @@ Status ClConvertFullyConnectedWeightsKernel::validate(const ITensorInfo *src, // Checks performed when dst is configured if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } + else + { + // No configured output. Since `dst` is expected to match `src`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClCopyKernel.cpp b/src/gpu/cl/kernels/ClCopyKernel.cpp index c80ef664f52..172c513cecd 100644 --- a/src/gpu/cl/kernels/ClCopyKernel.cpp +++ b/src/gpu/cl/kernels/ClCopyKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -49,10 +50,12 @@ namespace Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, Window *dst_window = nullptr) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); // Validate dst if initialized if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); if (dst_window == nullptr) @@ -64,6 +67,11 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, Window ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(src->tensor_shape(), dst_window->shape()); } } + else + { + // No configured output. Since `dst` is expected to match (or be smaller + // than) `src`, there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClCropKernel.cpp b/src/gpu/cl/kernels/ClCropKernel.cpp index 6c5066779f2..bb9dc7312cf 100644 --- a/src/gpu/cl/kernels/ClCropKernel.cpp +++ b/src/gpu/cl/kernels/ClCropKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2024 Arm Limited. + * Copyright (c) 2019-2021, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -105,6 +105,8 @@ Status ClCropKernel::validate(const ITensorInfo *src, Window *dst_window) { ARM_COMPUTE_UNUSED(extrapolation_value, dst_window); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, dst); ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC); ARM_COMPUTE_RETURN_ERROR_ON(src->tensor_shape().num_dimensions() > 4); @@ -117,12 +119,12 @@ Status ClCropKernel::validate(const ITensorInfo *src, { ARM_COMPUTE_RETURN_ERROR_ON(dst_window->x().step() != 1); } - if (dst->total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(dst, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst); - ARM_COMPUTE_RETURN_ERROR_ON(dst->num_dimensions() > 3); - } + + // There's no default configuration, so we expect that dst is initialized. + ARM_COMPUTE_RETURN_ERROR_ON(dst->total_size() == 0); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(dst, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON(dst->num_dimensions() > 3); return Status{}; } diff --git a/src/gpu/cl/kernels/ClDepthConcatenateKernel.cpp b/src/gpu/cl/kernels/ClDepthConcatenateKernel.cpp index ec44d88f01b..e9049f7b480 100644 --- a/src/gpu/cl/kernels/ClDepthConcatenateKernel.cpp +++ b/src/gpu/cl/kernels/ClDepthConcatenateKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2023 Arm Limited. + * Copyright (c) 2017-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" #include "support/StringSupport.h" @@ -47,6 +48,7 @@ namespace Status validate_arguments(const ITensorInfo *src, unsigned int depth_offset, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); diff --git a/src/gpu/cl/kernels/ClDequantizeKernel.cpp b/src/gpu/cl/kernels/ClDequantizeKernel.cpp index 53429ab1aaf..027bd1ca114 100644 --- a/src/gpu/cl/kernels/ClDequantizeKernel.cpp +++ b/src/gpu/cl/kernels/ClDequantizeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -48,16 +49,25 @@ namespace Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, one_channel, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::QSYMM8, DataType::QSYMM16); if (dst->tensor_shape().total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } + else + { + // Assume the larger one of the possible data types. + const TensorInfo dst_info(src->tensor_shape(), one_channel, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp index 7cf1958c1ba..dfcfe32d7e8 100644 --- a/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp +++ b/src/gpu/cl/kernels/ClDirectConv2dKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2023 Arm Limited. + * Copyright (c) 2017-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -38,6 +38,7 @@ #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h" @@ -60,6 +61,8 @@ Status validate_arguments(const ITensorInfo *src, const ActivationLayerInfo &act_info, const DirectConvComputeKernelInfo &desc) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, weights); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::F16, DataType::F32); @@ -130,6 +133,7 @@ Status validate_arguments(const ITensorInfo *src, if (biases != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(biases); if (is_data_type_quantized_asymmetric(src->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32); @@ -143,13 +147,20 @@ Status validate_arguments(const ITensorInfo *src, ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->num_dimensions() > 1, "Biases should be one dimensional"); } + const TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info); + // Checks performed when dst is configured if (dst->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS( - dst->tensor_shape(), misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info)); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); } + else + { + const TensorInfo output_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } const auto data_type = src->data_type(); if (is_data_type_quantized(data_type)) diff --git a/src/gpu/cl/kernels/ClDirectConv3dKernel.cpp b/src/gpu/cl/kernels/ClDirectConv3dKernel.cpp index 8002520a879..0c0bb5e78de 100644 --- a/src/gpu/cl/kernels/ClDirectConv3dKernel.cpp +++ b/src/gpu/cl/kernels/ClDirectConv3dKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2023 Arm Limited. + * Copyright (c) 2021-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,12 +24,14 @@ #include "src/gpu/cl/kernels/ClDirectConv3dKernel.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/helpers/AdjustVecSize.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -47,6 +49,8 @@ Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *dst, const Conv3dInfo &conv3d_info) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src0, src1); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_LAYOUT(src0, src1, dst); ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->data_layout() != DataLayout::NDHWC, "Only NDHWC layout supported"); @@ -72,6 +76,7 @@ Status validate_arguments(const ITensorInfo *src0, if (src2 != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src2); if (is_data_type_quantized(src0->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src2, 1, DataType::S32); @@ -85,15 +90,22 @@ Status validate_arguments(const ITensorInfo *src0, ARM_COMPUTE_RETURN_ERROR_ON_MSG(src2->num_dimensions() > 1, "Biases should be one dimensional"); } + const TensorShape output_shape = + misc::shape_calculator::compute_conv3d_shape(src0->tensor_shape(), src1->tensor_shape(), conv3d_info); + // Checks performed when dst is configured if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->dimension(0) != src1->dimension(0), "Weights and dst OFMs should match"); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS( - dst->tensor_shape(), - misc::shape_calculator::compute_conv3d_shape(src0->tensor_shape(), src1->tensor_shape(), conv3d_info)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst); } + else + { + const TensorInfo dst_info(output_shape, src0->num_channels(), src0->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClElementwiseKernel.cpp b/src/gpu/cl/kernels/ClElementwiseKernel.cpp index cdb3527a921..859f01c4c0a 100644 --- a/src/gpu/cl/kernels/ClElementwiseKernel.cpp +++ b/src/gpu/cl/kernels/ClElementwiseKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,6 +25,7 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/utils/ActivationFunctionUtils.h" #include "arm_compute/core/utils/helpers/AdjustVecSize.h" @@ -32,6 +33,7 @@ #include "src/common/utils/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -101,7 +103,7 @@ Status validate_arguments_with_float_only_supported_rules(const ITensorInfo &src const ITensorInfo &src2, const ITensorInfo &dst) { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(&src1, &src2, &dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&src1, &src2); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src1); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src1, 1, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src1, &src2); @@ -117,11 +119,16 @@ Status validate_arguments_with_float_only_supported_rules(const ITensorInfo &src // Validate in case of configured dst if (dst.total_size() > 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src1, &dst); ARM_COMPUTE_RETURN_ON_ERROR( validate_in_place_output_shape(in_place, src1_in_place, src1, src2, dst, out_shape)); } + else + { + const TensorInfo dst_info(out_shape, src1.num_channels(), src1.data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } @@ -129,6 +136,7 @@ Status validate_arguments_with_float_only_supported_rules(const ITensorInfo &src Status validate_arguments_divide_operation(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src1, src2); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src1); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, 1, DataType::F16, DataType::F32, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2); @@ -144,11 +152,16 @@ Status validate_arguments_divide_operation(const ITensorInfo *src1, const ITenso // Validate in case of configured dst if (dst->total_size() > 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::F16, DataType::F32, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, dst); ARM_COMPUTE_RETURN_ON_ERROR( validate_in_place_output_shape(in_place, src1_in_place, *src1, *src2, *dst, out_shape)); } + else + { + const TensorInfo dst_info(out_shape, src1->num_channels(), src1->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } @@ -156,6 +169,7 @@ Status validate_arguments_divide_operation(const ITensorInfo *src1, const ITenso Status validate_arguments_with_arithmetic_rules(const ITensorInfo &src1, const ITensorInfo &src2, const ITensorInfo &dst) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&src1, &src2); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src1); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src1, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S16, DataType::QSYMM16, @@ -180,6 +194,7 @@ validate_arguments_with_arithmetic_rules(const ITensorInfo &src1, const ITensorI // Validate in case of configured dst if (dst.total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src1, &dst); ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, dst.tensor_shape(), 0), "Wrong shape for dst"); @@ -192,6 +207,11 @@ validate_arguments_with_arithmetic_rules(const ITensorInfo &src1, const ITensorI ARM_COMPUTE_RETURN_ERROR_ON_MSG(offset != 0, "For quantized symmetric, offset must be zero"); } } + else + { + const TensorInfo dst_info(out_shape, src1.num_channels(), src1.data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp b/src/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp index f7c198ee54a..58747e49b12 100644 --- a/src/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp +++ b/src/gpu/cl/kernels/ClElementwiseUnaryKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" #include "support/StringSupport.h" @@ -46,6 +47,7 @@ constexpr unsigned int vector_size_byte_opencl = 16; Status validate_arguments(const ITensorInfo &src, const ITensorInfo &dst, const ElementWiseUnary op) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&src); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src); if (op == ElementWiseUnary::LOGICAL_NOT) { @@ -68,10 +70,16 @@ Status validate_arguments(const ITensorInfo &src, const ITensorInfo &dst, const // Validate in case of configured dst if (dst.total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst); } + else + { + // No configured output. Since `dst` is expected to match `src`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClFillKernel.cpp b/src/gpu/cl/kernels/ClFillKernel.cpp index 96ad503730e..798ea789c44 100644 --- a/src/gpu/cl/kernels/ClFillKernel.cpp +++ b/src/gpu/cl/kernels/ClFillKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -92,8 +93,8 @@ void ClFillKernel::configure(const CLCompileContext &compile_context, Status ClFillKernel::validate(const ITensorInfo *tensor, const PixelValue &constant_value, Window *window) { - ARM_COMPUTE_UNUSED(tensor); ARM_COMPUTE_UNUSED(constant_value); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(tensor); if (window != nullptr) { ARM_COMPUTE_RETURN_ERROR_ON(window->x().step() != 1); diff --git a/src/gpu/cl/kernels/ClFloorKernel.cpp b/src/gpu/cl/kernels/ClFloorKernel.cpp index 358e84012bf..3c82b949932 100644 --- a/src/gpu/cl/kernels/ClFloorKernel.cpp +++ b/src/gpu/cl/kernels/ClFloorKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -49,15 +50,22 @@ namespace Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32); // Validate in case of configured output if (dst->total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } + else + { + // No configured output. Since `dst` is expected to match `src`, + // there's nothing extra to check in this case. + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyNativeKernel.cpp b/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyNativeKernel.cpp index e0d925dfb29..6e1a3f8307c 100644 --- a/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyNativeKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyNativeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023 Arm Limited. + * Copyright (c) 2019-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/AccessWindowStatic.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -57,14 +58,17 @@ Status validate_arguments(const ITensorInfo *src0, const GEMMReshapeInfo &gemm_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src0, src1); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, one_channel, DataType::QASYMM8, + DataType::QASYMM8_SIGNED); if (src0->data_type() == DataType::QASYMM8) { ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1); } else { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, 1, DataType::QASYMM8, DataType::QSYMM8, + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, one_channel, DataType::QASYMM8, DataType::QSYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL); } ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, @@ -100,12 +104,18 @@ Status validate_arguments(const ITensorInfo *src0, ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(1) != static_cast(m)); } + const TensorShape output_shape = misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info); + if (dst->total_size() != 0) { - const TensorInfo tensor_info_dst = - dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, DataType::S32); + } + else + { + const TensorInfo dst_info(output_shape, one_channel, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } return Status{}; diff --git a/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedKernel.cpp b/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedKernel.cpp index ddbc809cdde..df415dfbf8a 100644 --- a/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023 Arm Limited. + * Copyright (c) 2019-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -58,7 +59,10 @@ Status validate_arguments(const ITensorInfo *src0, const GEMMReshapeInfo &gemm_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src0, src1); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, one_channel, DataType::QASYMM8, + DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1); ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4"); @@ -98,11 +102,18 @@ Status validate_arguments(const ITensorInfo *src0, ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src0, &tensor_info_reshaped0); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1); + const TensorShape output_shape = compute_mm_shape(*src0, *src1, gemm_info); + if (dst->total_size() != 0) { - const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(compute_mm_shape(*src0, *src1, gemm_info)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, DataType::S32); + } + else + { + const TensorInfo dst_info(output_shape, one_channel, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } return Status{}; diff --git a/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedOnlyRhsKernel.cpp b/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedOnlyRhsKernel.cpp index 2f1f3b8df0f..768d774595d 100644 --- a/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedOnlyRhsKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedOnlyRhsKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023 Arm Limited. + * Copyright (c) 2019-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/AccessWindowStatic.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -64,14 +65,25 @@ Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *output_shifts) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); + if (vector_sum_col != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(vector_sum_col); + } + if (vector_sum_row != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(vector_sum_row); + } + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src0, src1); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, one_channel, DataType::QASYMM8, + DataType::QASYMM8_SIGNED); if (src0->data_type() == DataType::QASYMM8) { ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1); } else { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, 1, DataType::QASYMM8, DataType::QSYMM8, + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, one_channel, DataType::QASYMM8, DataType::QSYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL); } ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, @@ -114,23 +126,25 @@ Status validate_arguments(const ITensorInfo *src0, ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1); const TensorShape expected_dst_shape = compute_mm_shape(*src0, *src1, gemm_info); + const DataType expected_data_type = + (output_stage.type == GEMMLowpOutputStageType::NONE) ? DataType::S32 : src0->data_type(); if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(expected_dst_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); - if (output_stage.type == GEMMLowpOutputStageType::NONE) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::S32); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst); - } + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, expected_data_type); + } + else + { + const TensorInfo dst_info(expected_dst_shape, one_channel, expected_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } if (bias != nullptr) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(expected_dst_shape[0] != bias->dimension(0)); } @@ -145,14 +159,14 @@ Status validate_arguments(const ITensorInfo *src0, // If a_offset == 0, vector_sum_col can be a nullptr if (gemm_info.a_offset != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != expected_dst_shape[0]); } // If b_offset == 0, vector_sum_row can be a nullptr if (gemm_info.b_offset != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, one_channel, DataType::S32); // Check if mm result is a 3D reinterpretation const bool reinterpret_as_3d = @@ -196,9 +210,10 @@ Status validate_arguments(const ITensorInfo *src0, if (output_multipliers != nullptr && output_shifts != nullptr) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output_multipliers, output_shifts); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1); if (output_stage.is_quantized_per_channel) { diff --git a/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedOnlyRhsMMULKernel.cpp b/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedOnlyRhsMMULKernel.cpp index 030c11d069f..4564eb0a7e4 100644 --- a/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedOnlyRhsMMULKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmLowpMatrixMultiplyReshapedOnlyRhsMMULKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022-2023 Arm Limited. + * Copyright (c) 2022-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -57,9 +58,20 @@ Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *output_shifts) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src0, src1); + if (vector_sum_col != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(vector_sum_col); + } + if (vector_sum_row != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(vector_sum_row); + } ARM_COMPUTE_RETURN_ERROR_ON_MSG(!arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()), "The extension cl_arm_matrix_multiply is not supported on the target platform"); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, one_channel, DataType::QASYMM8, + DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1); ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4"); @@ -101,23 +113,24 @@ Status validate_arguments(const ITensorInfo *src0, ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1); const TensorShape expected_dst_shape = compute_mm_shape(*src0, *src1, gemm_info); + const DataType expected_data_type = + (output_stage.type == GEMMLowpOutputStageType::NONE) ? DataType::S32 : src0->data_type(); if (dst->total_size() != 0) { - const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(expected_dst_shape); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); - if (output_stage.type == GEMMLowpOutputStageType::NONE) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::S32); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst); - } + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), expected_dst_shape); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, expected_data_type); + } + else + { + const TensorInfo dst_info(expected_dst_shape, one_channel, expected_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } if (bias != nullptr) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(expected_dst_shape[0] != bias->dimension(0)); } @@ -131,14 +144,14 @@ Status validate_arguments(const ITensorInfo *src0, // If a_offset == 0, vector_sum_col can be a nullptr if (gemm_info.a_offset != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != expected_dst_shape[0]); } // If b_offset == 0, vector_sum_row can be a nullptr if (gemm_info.b_offset != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, one_channel, DataType::S32); // Check if mm result is a 3D reinterpretation const bool reinterpret_as_3d = @@ -182,9 +195,10 @@ Status validate_arguments(const ITensorInfo *src0, if (output_multipliers != nullptr && output_shifts != nullptr) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output_multipliers, output_shifts); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1); if (output_stage.is_quantized_per_channel) { diff --git a/src/gpu/cl/kernels/ClGemmLowpOffsetContributionKernel.cpp b/src/gpu/cl/kernels/ClGemmLowpOffsetContributionKernel.cpp index d93dbde95a8..ce53db879b7 100644 --- a/src/gpu/cl/kernels/ClGemmLowpOffsetContributionKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmLowpOffsetContributionKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2023 Arm Limited. + * Copyright (c) 2017-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" #include "support/StringSupport.h" @@ -50,10 +51,21 @@ Status validate_arguments(const ITensorInfo *mm_result, int32_t a_offset, int32_t b_offset) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(mm_result); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(mm_result); + if (vector_sum_col != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(vector_sum_col); + } + if (vector_sum_row != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(vector_sum_row); + } ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32); if (bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0)); diff --git a/src/gpu/cl/kernels/ClGemmLowpOffsetContributionOutputStageKernel.cpp b/src/gpu/cl/kernels/ClGemmLowpOffsetContributionOutputStageKernel.cpp index 26f479f61a5..e39f019649c 100644 --- a/src/gpu/cl/kernels/ClGemmLowpOffsetContributionOutputStageKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmLowpOffsetContributionOutputStageKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023 Arm Limited. + * Copyright (c) 2018-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -56,18 +57,30 @@ Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(mm_result, output_multipliers, output_shifts); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(mm_result, output_multipliers, output_shifts); + if (vector_sum_col != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(vector_sum_col); + } + if (vector_sum_row != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(vector_sum_row); + } + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, one_channel, DataType::S32); if (bias != nullptr) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0)); } - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1); if (output_stage.is_quantized_per_channel) { @@ -78,14 +91,14 @@ Status validate_arguments(const ITensorInfo *mm_result, // If a_offset == 0, vector_sum_col can be a nullptr if (a_offset != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0)); } // If b_offset == 0, vector_sum_row can be a nullptr if (b_offset != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, one_channel, DataType::S32); // Check if input is a 3D reinterpretation const bool reinterpret_as_3d = @@ -125,10 +138,17 @@ Status validate_arguments(const ITensorInfo *mm_result, // Checks performed when output is configured if ((dst != nullptr) && (dst->total_size() != 0)) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON(output_stage.output_data_type != dst->data_type()); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, DataType::QASYMM8, + DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, dst); } + else + { + const TensorInfo dst_info(mm_result->tensor_shape(), one_channel, output_stage.output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound); ARM_COMPUTE_RETURN_ERROR_ON_MSG(output_stage.gemmlowp_multipliers.size() != output_stage.gemmlowp_shifts.size(), diff --git a/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp b/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp index 7b7beab12c8..36c2ba82d37 100644 --- a/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2023 Arm Limited. + * Copyright (c) 2020-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -52,11 +53,15 @@ Status validate_arguments(const ITensorInfo *src, const GEMMLowpOutputStageInfo *info) { ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, one_channel, DataType::S32); // Check biases if exist if (bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, bias); ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != bias->dimension(0)); @@ -64,9 +69,15 @@ Status validate_arguments(const ITensorInfo *src, if (dst->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->data_type() != info->output_data_type, "Mismatching dst data type"); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, info->output_data_type); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } + else + { + const TensorInfo dst_info(src->tensor_shape(), one_channel, info->output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFloatKernel.cpp b/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFloatKernel.cpp index 52ebd32d46f..612386e3ec9 100644 --- a/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFloatKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFloatKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023 Arm Limited. + * Copyright (c) 2018-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -51,7 +52,10 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const GEMMLowpOutputStageInfo *info) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON((info->output_data_type != DataType::QASYMM8) && (info->output_data_type != DataType::QASYMM8_SIGNED)); ARM_COMPUTE_RETURN_ERROR_ON( @@ -65,6 +69,7 @@ Status validate_arguments(const ITensorInfo *src, // Check biases if exist if (bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, bias); ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != bias->dimension(0)); @@ -72,9 +77,15 @@ Status validate_arguments(const ITensorInfo *src, if (dst->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->data_type() != info->output_data_type, "Mismatching output data type"); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, info->output_data_type); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } + else + { + const TensorInfo dst_info(src->tensor_shape(), one_channel, info->output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleKernel.cpp b/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleKernel.cpp index 31434ce61b0..8d4cffed3ff 100644 --- a/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2023 Arm Limited. + * Copyright (c) 2020-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -26,12 +26,14 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/utils/helpers/AdjustVecSize.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/core/utils/StringUtils.h" #include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -50,7 +52,10 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, one_channel, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON((output_stage->output_data_type != DataType::QASYMM8) && (output_stage->output_data_type != DataType::QASYMM8_SIGNED)); ARM_COMPUTE_RETURN_ERROR_ON( @@ -64,6 +69,7 @@ Status validate_arguments(const ITensorInfo *src, // Check biases if exist if (bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, bias); ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != bias->dimension(0)); @@ -71,10 +77,15 @@ Status validate_arguments(const ITensorInfo *src, if (dst->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->data_type() != output_stage->output_data_type, - "Mismatching output data type"); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, output_stage->output_data_type); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } + else + { + const TensorInfo dst_info(src->tensor_shape(), one_channel, output_stage->output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClGemmLowpReductionKernel.cpp b/src/gpu/cl/kernels/ClGemmLowpReductionKernel.cpp index ee4a191fed5..1c65069e294 100644 --- a/src/gpu/cl/kernels/ClGemmLowpReductionKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmLowpReductionKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2023 Arm Limited. + * Copyright (c) 2017-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -26,10 +26,13 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/KernelDescriptors.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/utils/helpers/AdjustVecSize.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -37,6 +40,7 @@ namespace arm_compute { +using namespace misc::shape_calculator; namespace opencl { namespace kernels @@ -46,15 +50,25 @@ namespace Status validate_arguments_matrix_a_reduction(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, one_channel, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8); + // Output vector matches the number of rows of the input matrix. + const TensorShape output_shape = compute_reductionB_shape(*src); + const auto output_data_type = DataType::S32; + if (dst->total_size() > 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::S32); - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - dst->dimension(0) != src->dimension(1), - "Output vector must have length equal to the number of rows of the input matrix"); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); + } + else + { + const TensorInfo dst_info(output_shape, one_channel, output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } return Status{}; } @@ -62,15 +76,25 @@ Status validate_arguments_matrix_a_reduction(const ITensorInfo *src, const ITens Status validate_arguments_matrix_b_reduction(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, one_channel, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8, DataType::QSYMM8_PER_CHANNEL); + // Output vector matches the number of columns of the input matrix. + const TensorShape output_shape = compute_reductionA_shape(*src); + const auto output_data_type = DataType::S32; + if (dst->total_size() > 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::S32); - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - dst->dimension(0) != src->dimension(0), - "Output vector must have length equal to the number of columns of the input matrix"); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); + } + else + { + const TensorInfo dst_info(output_shape, one_channel, output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } return Status{}; } diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp index fd23aa99244..98abd1f0096 100644 --- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,6 +36,7 @@ #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLUtils.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/utils/helpers/float_ops.h" @@ -64,6 +65,7 @@ Status validate_arguments(const ITensorInfo *src0, { ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src0, src1); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1); ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, @@ -108,6 +110,7 @@ Status validate_arguments(const ITensorInfo *src0, const unsigned int src2_dim0 = src2->dimension(0); const unsigned int src2_dim1 = src2->dimension(1); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src2); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1); if (gemm_info.broadcast_bias) { @@ -120,13 +123,19 @@ Status validate_arguments(const ITensorInfo *src0, } } + const TensorShape output_shape = misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info); + if (dst->total_size() != 0) { - const TensorInfo tensor_info_dst = - dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst); } + else + { + const TensorInfo dst_info(output_shape, src0->num_channels(), src0->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp index 4fe6bddb36a..3bf286d0402 100644 --- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,6 +36,7 @@ #include "src/core/CL/CLUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/utils/helpers/float_ops.h" @@ -65,6 +66,7 @@ Status validate_arguments(const ITensorInfo *src0, { ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src0, src1); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src0); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1); @@ -107,6 +109,7 @@ Status validate_arguments(const ITensorInfo *src0, const unsigned int src2_dim0 = src2->dimension(0); const unsigned int src2_dim1 = src2->dimension(1); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src2); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1); if (gemm_info.broadcast_bias) { @@ -130,13 +133,19 @@ Status validate_arguments(const ITensorInfo *src0, ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src0, &tensor_info_reshaped0); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1); + const TensorShape output_shape = misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info); + if (dst->total_size() != 0) { - const TensorInfo tensor_info_dst = - dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst); } + else + { + const TensorInfo dst_info(output_shape, src0->num_channels(), src0->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp index 1b19f1ec5bb..bdb66d796a3 100644 --- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023 Arm Limited. + * Copyright (c) 2019-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "src/core/CL/CLUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/utils/helpers/float_ops.h" @@ -59,6 +60,7 @@ Status validate_arguments(const ITensorInfo *src0, { ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src0, src1); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src0); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1); @@ -93,6 +95,7 @@ Status validate_arguments(const ITensorInfo *src0, const unsigned int src2_dim0 = src2->dimension(0); const unsigned int src2_dim1 = src2->dimension(1); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src2); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src0); if (gemm_info.broadcast_bias) { @@ -121,13 +124,19 @@ Status validate_arguments(const ITensorInfo *src0, } ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1); + const TensorShape output_shape = misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info); + if (dst->total_size() != 0) { - const TensorInfo tensor_info_dst = - dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst); } + else + { + const TensorInfo dst_info(output_shape, src0->num_channels(), src0->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsMMULKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsMMULKernel.cpp index 50271fcb585..85899ab78c7 100644 --- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsMMULKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsMMULKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022-2023, 2025 Arm Limited. + * Copyright (c) 2022-2023, 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLUtils.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/utils/helpers/float_ops.h" @@ -69,6 +70,7 @@ Status validate_arguments(const ITensorInfo *src0, { ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src0, src1); ARM_COMPUTE_RETURN_ERROR_ON_MSG(!arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()), "The extension cl_arm_matrix_multiply is not supported on the target platform"); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F16, DataType::F32); @@ -147,6 +149,7 @@ Status validate_arguments(const ITensorInfo *src0, const unsigned int src2_dim0 = src2->dimension(0); const unsigned int src2_dim1 = src2->dimension(1); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src2); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1); if (gemm_info.broadcast_bias) { @@ -169,13 +172,19 @@ Status validate_arguments(const ITensorInfo *src0, ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1); + const TensorShape output_shape = misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info); + if (dst->total_size() != 0) { - const TensorInfo tensor_info_dst = - dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, gemm_info)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst); } + else + { + const TensorInfo dst_info(output_shape, src0->num_channels(), src0->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp b/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp index eea2a169a33..2a329c6877e 100644 --- a/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2023 Arm Limited. + * Copyright (c) 2018-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -53,6 +54,7 @@ Status validate_arguments(const ITensorInfo *src, bool reinterpret_input_as_3d) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 == 0); ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 == 0); ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.v0 == 0); @@ -65,14 +67,21 @@ Status validate_arguments(const ITensorInfo *src, ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); + const TensorShape output_shape = + misc::shape_calculator::compute_lhs_reshaped_shape(*src, lhs_info, reinterpret_input_as_3d); + if (dst->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS( - dst->tensor_shape(), - misc::shape_calculator::compute_lhs_reshaped_shape(*src, lhs_info, reinterpret_input_as_3d)); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); } + else + { + const TensorInfo dst_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp b/src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp index b9ce3873c73..08f13b233e2 100644 --- a/src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h" @@ -50,6 +51,8 @@ namespace { Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const GEMMRHSMatrixInfo &rhs_info) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.n0 == 0); ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 == 0); ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.h0 == 0); @@ -71,13 +74,20 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ARM_COMPUTE_RETURN_ON_ERROR(gemm::validate_image2d_support_on_rhs(tensor_reshaped_info, rhs_info)); } + const TensorShape output_shape = misc::shape_calculator::compute_rhs_reshaped_shape(*src, rhs_info); + if (dst->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS( - dst->tensor_shape(), misc::shape_calculator::compute_rhs_reshaped_shape(*src, rhs_info)); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); } + else + { + const TensorInfo dst_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClHeightConcatenateKernel.cpp b/src/gpu/cl/kernels/ClHeightConcatenateKernel.cpp index 2e1cefc6e7a..f4a341c64ac 100644 --- a/src/gpu/cl/kernels/ClHeightConcatenateKernel.cpp +++ b/src/gpu/cl/kernels/ClHeightConcatenateKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023 Arm Limited. + * Copyright (c) 2019-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" #include "support/StringSupport.h" @@ -47,6 +48,7 @@ namespace Status validate_arguments(const ITensorInfo *src, unsigned int height_offset, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, dst); ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(Window::DimY) + height_offset > dst->dimension(Window::DimY)); diff --git a/src/gpu/cl/kernels/ClIm2ColKernel.cpp b/src/gpu/cl/kernels/ClIm2ColKernel.cpp index ef7a52828f1..823fcc381a3 100644 --- a/src/gpu/cl/kernels/ClIm2ColKernel.cpp +++ b/src/gpu/cl/kernels/ClIm2ColKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,7 @@ #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -69,13 +70,14 @@ Status validate_arguments(const ITensorInfo *src, const Size2D &dilation, unsigned int num_groups) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); const unsigned int channel_idx = get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::CHANNEL); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(src->data_type()) && has_bias); - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(dst); ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1)); ARM_COMPUTE_RETURN_ERROR_ON(src->data_layout() == DataLayout::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(num_groups == 0); @@ -89,14 +91,21 @@ Status validate_arguments(const ITensorInfo *src, const unsigned total_height = src->dimension(height_idx) + conv_info.pad_top() + conv_info.pad_bottom(); ARM_COMPUTE_RETURN_ERROR_ON((total_width < kernel_dims.width) || (total_height < kernel_dims.height)); + const TensorShape output_shape = + compute_im2col_conv_shape(src, kernel_dims, conv_info, has_bias, dilation, num_groups == 1, num_groups); + if (dst->total_size() > 0) { - const TensorInfo tensor_info_output = dst->clone()->set_tensor_shape( - compute_im2col_conv_shape(src, kernel_dims, conv_info, has_bias, dilation, num_groups == 1, num_groups)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); } + else + { + const TensorInfo dst_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp b/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp index 8c493d08c64..efd1f38ac31 100644 --- a/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp +++ b/src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022-2023 Arm Limited. + * Copyright (c) 2022-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,10 +27,12 @@ #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/KernelDescriptors.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -50,8 +52,11 @@ Status validate_arguments(const ITensorInfo *src, const PadStrideInfo &conv_info, const DirectConvComputeKernelInfo &desc) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, weights); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, one_channel, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights); ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC); ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) != src->dimension(0), @@ -60,14 +65,21 @@ Status validate_arguments(const ITensorInfo *src, ARM_COMPUTE_RETURN_ERROR_ON_MSG(desc.m0 <= 0 || desc.m0 > 8, "M0 can only be greater than 0 and less than or equal to 8"); + const TensorShape output_shape = misc::shape_calculator::compute_indirect_buffer_shape( + src->tensor_shape(), src->data_layout(), weights->tensor_shape(), conv_info, desc); + const auto output_data_type = DataType::S32; + // Checks performed when dst is configured if (dst->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS( - dst->tensor_shape(), - misc::shape_calculator::compute_indirect_buffer_shape(src->tensor_shape(), src->data_layout(), - weights->tensor_shape(), conv_info, desc)); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, one_channel, output_data_type); + } + else + { + const TensorInfo dst_info(output_shape, one_channel, output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } return Status{}; diff --git a/src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp b/src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp index 3510b6970cb..720a0ed30ca 100644 --- a/src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp +++ b/src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022-2023 Arm Limited. + * Copyright (c) 2022-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "src/core/CL/CLUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h" @@ -57,6 +58,8 @@ Status validate_arguments(const ITensorInfo *src, const DirectConvComputeKernelInfo &desc) { ARM_COMPUTE_UNUSED(act_info); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, indirect_buffer, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, weights, indirect_buffer); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indirect_buffer, 1, DataType::S32); @@ -93,6 +96,7 @@ Status validate_arguments(const ITensorInfo *src, if (biases != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(biases); if (is_data_type_quantized_asymmetric(src->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32); @@ -106,13 +110,20 @@ Status validate_arguments(const ITensorInfo *src, ARM_COMPUTE_RETURN_ERROR_ON_MSG(biases->num_dimensions() > 1, "Biases should be one dimensional"); } + const TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info); + // Checks performed when dst is configured if (dst->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS( - dst->tensor_shape(), misc::shape_calculator::compute_deep_convolution_shape(*src, *weights, conv_info)); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); } + else + { + const TensorInfo dst_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp index 0bb6b0c0838..a0229abc218 100644 --- a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp +++ b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023 Arm Limited. + * Copyright (c) 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/common/utils/Log.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/gpu/cl/ClCompileContext.h" @@ -95,6 +96,7 @@ Status ClMatMulLowpNativeKernel::validate(const ITensorInfo *lhs, const ActivationLayerInfo &act_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(lhs, rhs); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs); ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info)); @@ -111,13 +113,20 @@ Status ClMatMulLowpNativeKernel::validate(const ITensorInfo *lhs, if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); const TensorInfo tensor_info_output = dst->clone()->set_tensor_shape(expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst); } + else + { + const TensorInfo dst_info(expected_output_shape, lhs->num_channels(), lhs->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } if (bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(expected_output_shape[0] != bias->dimension(0)); diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.cpp b/src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.cpp index 1df0ca0410f..02241fdcef7 100644 --- a/src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.cpp +++ b/src/gpu/cl/kernels/ClMatMulLowpNativeMMULKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023 Arm Limited. + * Copyright (c) 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/common/utils/Log.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/gpu/cl/ClCompileContext.h" #include "src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h" @@ -97,6 +98,7 @@ Status ClMatMulLowpNativeMMULKernel::validate(const ITensorInfo *lhs, const ActivationLayerInfo &act_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(lhs, rhs); ARM_COMPUTE_RETURN_ERROR_ON_MSG(!arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()), "The extension cl_arm_matrix_multiply is not supported on the target platform"); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); @@ -116,13 +118,20 @@ Status ClMatMulLowpNativeMMULKernel::validate(const ITensorInfo *lhs, if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); const TensorInfo tensor_info_output = dst->clone()->set_tensor_shape(expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst); } + else + { + const TensorInfo dst_info(expected_output_shape, lhs->num_channels(), lhs->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } if (bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(expected_output_shape[0] != bias->dimension(0)); diff --git a/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp b/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp index a1fa9fa9ab2..0a22bb841c9 100644 --- a/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp +++ b/src/gpu/cl/kernels/ClMatMulNativeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023 Arm Limited. + * Copyright (c) 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "src/common/utils/Log.h" #include "src/core/CL/CLUtils.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h" @@ -120,6 +121,7 @@ Status ClMatMulNativeKernel::validate(const ITensorInfo *lhs, { ARM_COMPUTE_UNUSED(act_info); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(lhs, rhs); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs); ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info)); @@ -132,13 +134,20 @@ Status ClMatMulNativeKernel::validate(const ITensorInfo *lhs, if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst); } + else + { + const TensorInfo dst_info(expected_output_shape, lhs->num_channels(), lhs->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } if (bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(bias, lhs); ARM_COMPUTE_RETURN_ERROR_ON_MSG((bias->num_dimensions() > 1), "Multi dimensional bias is unsupported."); ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != expected_output_shape[0], diff --git a/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp b/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp index 76bf846e745..8154af1304f 100644 --- a/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp +++ b/src/gpu/cl/kernels/ClMatMulNativeMMULKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023 Arm Limited. + * Copyright (c) 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/common/utils/Log.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/gpu/cl/kernels/helpers/MatMulKernelHelpers.h" #include "support/Cast.h" @@ -90,6 +91,7 @@ Status ClMatMulNativeMMULKernel::validate(const ITensorInfo *lhs, const MatMulKernelInfo &matmul_kernel_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(lhs, rhs); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_MSG(!arm_matrix_multiply_supported(CLKernelLibrary::get().get_device()), "The extension cl_arm_matrix_multiply is not supported on the target platform"); @@ -107,13 +109,20 @@ Status ClMatMulNativeMMULKernel::validate(const ITensorInfo *lhs, if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(expected_output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, dst); } + else + { + const TensorInfo dst_info(expected_output_shape, lhs->num_channels(), lhs->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } if (bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); ARM_COMPUTE_RETURN_ERROR_ON_MSG((bias->num_dimensions() > 1), "Multi dimensional bias is unsupported."); ARM_COMPUTE_RETURN_ERROR_ON_MSG(bias->dimension(0) != expected_output_shape[0], "First dimension of bias and output tensors must match."); diff --git a/src/gpu/cl/kernels/ClMulKernel.cpp b/src/gpu/cl/kernels/ClMulKernel.cpp index 3b59c2a7fc2..fb111b68524 100644 --- a/src/gpu/cl/kernels/ClMulKernel.cpp +++ b/src/gpu/cl/kernels/ClMulKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, 2023 Arm Limited. + * Copyright (c) 2016-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -59,6 +60,7 @@ Status validate_arguments(const ITensorInfo *src1, ARM_COMPUTE_UNUSED(rounding_policy); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src1, src2); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src1); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S16, DataType::QSYMM16, @@ -80,6 +82,7 @@ Status validate_arguments(const ITensorInfo *src1, // Validate in case of configured dst if (dst->total_size() > 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S16, DataType::QSYMM16, DataType::F16, DataType::S32, DataType::F32); @@ -114,6 +117,11 @@ Status validate_arguments(const ITensorInfo *src1, "Wrong shape for dst"); } } + else + { + const TensorInfo dst_info(out_shape, src1->num_channels(), src1->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } @@ -361,8 +369,11 @@ Status validate_arguments_complex(const ITensorInfo *src1, const ITensorInfo *dst, const ActivationLayerInfo &act_info) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, 2, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src2, 2, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src1, src2); + const size_t two_channels = 2u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src1, two_channels, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src2, two_channels, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2); const TensorShape &out_shape = TensorShape::broadcast_shape(src1->tensor_shape(), src2->tensor_shape()); @@ -373,11 +384,17 @@ Status validate_arguments_complex(const ITensorInfo *src1, // Validate in case of configured dst if (dst->total_size() > 0) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 2, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON(dst->num_channels() != two_channels); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, dst); ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, dst->tensor_shape(), 0), "Wrong shape for dst"); } + else + { + const TensorInfo dst_info(out_shape, src1->num_channels(), src1->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClPermuteKernel.cpp b/src/gpu/cl/kernels/ClPermuteKernel.cpp index a4755782ed8..6c4d230df28 100644 --- a/src/gpu/cl/kernels/ClPermuteKernel.cpp +++ b/src/gpu/cl/kernels/ClPermuteKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -33,6 +33,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -56,6 +57,7 @@ TensorShape get_dst_shape(const ITensorInfo *src, const PermutationVector &perm) Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const PermutationVector &perm) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->num_dimensions() < 1 || src->num_dimensions() > 4, "Permutation up to 4-D src tensor is supported"); @@ -66,14 +68,21 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ARM_COMPUTE_RETURN_ERROR_ON_MSG(p >= perm.num_dimensions(), "Permutation vector has invalid values"); } + const TensorShape dst_shape = misc::shape_calculator::compute_permutation_output_shape(*src, perm); + // Validate configured dst if (dst->total_size() != 0) { - const TensorShape dst_shape = misc::shape_calculator::compute_permutation_output_shape(*src, perm); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), dst_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); } + else + { + const TensorInfo dst_info(dst_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } } // namespace diff --git a/src/gpu/cl/kernels/ClPool2dKernel.cpp b/src/gpu/cl/kernels/ClPool2dKernel.cpp index 41ab4d6922b..7798bebbe25 100644 --- a/src/gpu/cl/kernels/ClPool2dKernel.cpp +++ b/src/gpu/cl/kernels/ClPool2dKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -50,6 +51,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *indices) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); @@ -75,6 +77,8 @@ Status validate_arguments(const ITensorInfo *src, ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_width < 1 || output_height < 1), "Calculated output dimension size is invalid"); + const TensorShape output_shape = compute_pool_shape(*src, pool_info); + // Check indices if (indices) { @@ -86,18 +90,23 @@ Status validate_arguments(const ITensorInfo *src, if (indices->total_size() != 0) { - TensorInfo idx_info(TensorInfo(compute_pool_shape(*src, pool_info), 1, DataType::U32)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(indices, &idx_info); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(indices); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(indices->tensor_shape(), output_shape); } } // Checks performed when dst is configured if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst); - TensorInfo out_info(TensorInfo(compute_pool_shape(*src, pool_info), 1, dst->data_type())); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &out_info); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); + } + else + { + const TensorInfo dst_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } return Status{}; diff --git a/src/gpu/cl/kernels/ClPool3dKernel.cpp b/src/gpu/cl/kernels/ClPool3dKernel.cpp index a08c5d4be71..defd49b0bf2 100644 --- a/src/gpu/cl/kernels/ClPool3dKernel.cpp +++ b/src/gpu/cl/kernels/ClPool3dKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022-2023 Arm Limited. + * Copyright (c) 2022-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -48,6 +49,7 @@ namespace Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const Pooling3dLayerInfo &pool_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_layout() != DataLayout::NDHWC, "Only NDHWC layout supported"); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); @@ -85,13 +87,21 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_width < 1 || output_height < 1 || output_depth < 1), "Calculated output dimension size is invalid"); + + const TensorShape output_shape = compute_pool3d_shape(src->tensor_shape(), pool_info); + // Checks performed when dst is configured if (dst->total_size() != 0) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst); - TensorInfo out_info(TensorInfo(compute_pool3d_shape(src->tensor_shape(), pool_info), 1, dst->data_type())); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &out_info); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); + } + else + { + const TensorInfo dst_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); } return Status{}; diff --git a/src/gpu/cl/kernels/ClQuantizeKernel.cpp b/src/gpu/cl/kernels/ClQuantizeKernel.cpp index a01e31559f9..c4c6d323622 100644 --- a/src/gpu/cl/kernels/ClQuantizeKernel.cpp +++ b/src/gpu/cl/kernels/ClQuantizeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023-2024 Arm Limited. + * Copyright (c) 2017-2021, 2023-2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" #include "support/StringSupport.h" @@ -50,6 +51,7 @@ namespace Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); diff --git a/src/gpu/cl/kernels/ClReshapeKernel.cpp b/src/gpu/cl/kernels/ClReshapeKernel.cpp index 53889f3a6b4..7db409b9da1 100644 --- a/src/gpu/cl/kernels/ClReshapeKernel.cpp +++ b/src/gpu/cl/kernels/ClReshapeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2022, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/Utils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -49,15 +50,16 @@ namespace Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); - if (dst->tensor_shape().total_size() != 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); - ARM_COMPUTE_RETURN_ERROR_ON(src->tensor_shape().total_size() != dst->tensor_shape().total_size()); - } + // There's no default config so we expect the output to be initialized. + ARM_COMPUTE_RETURN_ERROR_ON(dst->tensor_shape().total_size() == 0); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON(src->tensor_shape().total_size() != dst->tensor_shape().total_size()); return Status{}; } diff --git a/src/gpu/cl/kernels/ClScaleKernel.cpp b/src/gpu/cl/kernels/ClScaleKernel.cpp index 4305acad262..663bd09fc94 100644 --- a/src/gpu/cl/kernels/ClScaleKernel.cpp +++ b/src/gpu/cl/kernels/ClScaleKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2023 Arm Limited. + * Copyright (c) 2016-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/utils/ScaleUtils.h" #include "support/Cast.h" @@ -65,6 +66,7 @@ calculate_scale_factors(const ITensorInfo *src, const ITensorInfo *dst, DataLayo Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ScaleKernelInfo &info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::U8, DataType::S16, DataType::F16, DataType::F32); diff --git a/src/gpu/cl/kernels/ClScatterKernel.cpp b/src/gpu/cl/kernels/ClScatterKernel.cpp index 19adc1ef344..41738b4de5e 100644 --- a/src/gpu/cl/kernels/ClScatterKernel.cpp +++ b/src/gpu/cl/kernels/ClScatterKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024 Arm Limited. + * Copyright (c) 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/utils/helpers/AdjustVecSize.h" #include "src/common/utils/Log.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -58,6 +59,8 @@ Status ClScatterKernel::validate(const ITensorInfo *updates, const ScatterInfo &info) { ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(updates, indices, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(updates, indices, dst); const TensorShape &ind_shape = indices->tensor_shape(); const TensorShape &upt_shape = updates->tensor_shape(); diff --git a/src/gpu/cl/kernels/ClSoftmaxKernel.cpp b/src/gpu/cl/kernels/ClSoftmaxKernel.cpp index 796345a9237..a2bfa96b8fb 100644 --- a/src/gpu/cl/kernels/ClSoftmaxKernel.cpp +++ b/src/gpu/cl/kernels/ClSoftmaxKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -43,6 +43,7 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" #include "support/StringSupport.h" @@ -63,6 +64,7 @@ ClSoftmaxKernel::ClSoftmaxKernel() Status ClSoftmaxKernel::validate(const ITensorInfo &src, const ITensorInfo &dst, const SoftmaxKernelInfo &info) { ARM_COMPUTE_UNUSED(src, dst, info); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&src, &dst); ARM_COMPUTE_RETURN_ERROR_ON(src.num_dimensions() > 4); diff --git a/src/gpu/cl/kernels/ClTransposeKernel.cpp b/src/gpu/cl/kernels/ClTransposeKernel.cpp index f95a215107b..7631af54285 100644 --- a/src/gpu/cl/kernels/ClTransposeKernel.cpp +++ b/src/gpu/cl/kernels/ClTransposeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, 2023 Arm Limited. + * Copyright (c) 2017-2021, 2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "arm_compute/core/Validate.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -108,19 +109,25 @@ void ClTransposeKernel::configure(const CLCompileContext &compile_context, const Status ClTransposeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); + const TensorShape output_shape = misc::shape_calculator::compute_transposed_shape(*src); + // Validate configured dst if (dst->total_size() != 0) { - const TensorInfo dst_info = - src->clone()->set_tensor_shape(misc::shape_calculator::compute_transposed_shape(*src)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &dst_info); - + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst); } + else + { + const TensorInfo dst_info(output_shape, src->num_channels(), src->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&dst_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClTransposedConvolutionKernel.cpp b/src/gpu/cl/kernels/ClTransposedConvolutionKernel.cpp index 76f39ac5001..e32a7e118a2 100644 --- a/src/gpu/cl/kernels/ClTransposedConvolutionKernel.cpp +++ b/src/gpu/cl/kernels/ClTransposedConvolutionKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022-2023 Arm Limited. + * Copyright (c) 2022-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,12 +24,14 @@ #include "src/gpu/cl/kernels/ClTransposedConvolutionKernel.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/helpers/AdjustVecSize.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -48,6 +50,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PadStrideInfo &deconv_info) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input, weights); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32, DataType::QASYMM8_SIGNED, DataType::QASYMM8); @@ -66,6 +70,7 @@ Status validate_arguments(const ITensorInfo *input, if (biases != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(biases); if (is_data_type_quantized_asymmetric(input->data_type())) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32); @@ -81,23 +86,29 @@ Status validate_arguments(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC); } + const size_t input_width = input->dimension(width_idx); + const size_t input_height = input->dimension(height_idx); + const size_t weights_width = weights->dimension(width_idx); + const size_t weights_height = weights->dimension(height_idx); + + const auto out_dims = + deconvolution_output_dimensions(input_width, input_height, weights_width, weights_height, deconv_info); + const TensorShape output_shape = + misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input, *weights); + // Checks performed when output is configured if (output->total_size() != 0) { - const size_t input_width = input->dimension(width_idx); - const size_t input_height = input->dimension(height_idx); - const size_t weights_width = weights->dimension(width_idx); - const size_t weights_height = weights->dimension(height_idx); - - auto out_dims = - deconvolution_output_dimensions(input_width, input_height, weights_width, weights_height, deconv_info); - TensorShape output_shape = - misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input, *weights); - + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(output, DataLayout::NHWC); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClWeightsReshapeKernel.cpp b/src/gpu/cl/kernels/ClWeightsReshapeKernel.cpp index af80c4d7968..c06af2740fa 100644 --- a/src/gpu/cl/kernels/ClWeightsReshapeKernel.cpp +++ b/src/gpu/cl/kernels/ClWeightsReshapeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,8 +25,10 @@ #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/Error.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -47,6 +49,7 @@ Status validate_arguments(const ITensorInfo *input, unsigned int num_groups) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON(num_groups == 0); ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::NHWC && num_groups > 1); @@ -55,6 +58,7 @@ Status validate_arguments(const ITensorInfo *input, if (biases != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(biases); ARM_COMPUTE_RETURN_ERROR_ON(!is_data_type_float(input->data_type())); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases); ARM_COMPUTE_RETURN_ERROR_ON((input->num_dimensions() == 4) && (biases->num_dimensions() != 1)); @@ -66,14 +70,21 @@ Status validate_arguments(const ITensorInfo *input, (biases->dimension(0) != input->tensor_shape()[3] || biases->dimension(1) != input->tensor_shape()[4])); } + const TensorShape output_shape = compute_weights_reshaped_shape(*input, biases != nullptr, num_groups); + // Checks performed when output is configured if (output->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS( - output->tensor_shape(), compute_weights_reshaped_shape(*input, biases != nullptr, num_groups)); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp b/src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp index 15195025cee..7de29df4344 100644 --- a/src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp +++ b/src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023 Arm Limited. + * Copyright (c) 2018-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,6 +31,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/utils/helpers/tensor_info.h" #include "support/Cast.h" @@ -47,6 +48,7 @@ namespace Status validate_arguments(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src1, src2, dst); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src1); ARM_COMPUTE_RETURN_ERROR_ON(src1->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2, dst); diff --git a/src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp b/src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp index c4f84e3e453..97d867f1f4a 100644 --- a/src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp +++ b/src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023 Arm Limited. + * Copyright (c) 2018-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "src/core/utils/helpers/tensor_info.h" #include "support/Cast.h" @@ -52,6 +53,7 @@ Status validate_arguments(const ITensorInfo *src1, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src1, src2, src3, src4, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src1, src2, src3, src4, dst); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src1); ARM_COMPUTE_RETURN_ERROR_ON(src1->data_type() == DataType::UNKNOWN); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src1, src2, src3, src4, dst); diff --git a/src/gpu/cl/kernels/ClWidthConcatenateKernel.cpp b/src/gpu/cl/kernels/ClWidthConcatenateKernel.cpp index 989de4a7b74..a1b2bf193d8 100644 --- a/src/gpu/cl/kernels/ClWidthConcatenateKernel.cpp +++ b/src/gpu/cl/kernels/ClWidthConcatenateKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023 Arm Limited. + * Copyright (c) 2018-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -32,6 +32,7 @@ #include "arm_compute/core/utils/StringUtils.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" #include "support/StringSupport.h" @@ -47,6 +48,7 @@ namespace Status validate_arguments(const ITensorInfo *src, unsigned int width_offset, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN); diff --git a/src/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp b/src/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp index 58c01d4da56..4232902fa10 100644 --- a/src/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp +++ b/src/gpu/cl/kernels/ClWinogradFilterTransformKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023 Arm Limited. + * Copyright (c) 2018-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,7 @@ #include "arm_compute/core/Window.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -52,6 +53,8 @@ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); @@ -68,15 +71,20 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c input->dimension(idx_h) != kernel_size.height); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); + const TensorShape output_shape = compute_winograd_filter_transform_shape(*input, winograd_info); + // Checks performed when output is configured if (output->total_size() != 0) { - const TensorInfo tensor_info_output = - input->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input, winograd_info)); - - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp b/src/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp index 54c48986fcd..ae92ad795e9 100644 --- a/src/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp +++ b/src/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023 Arm Limited. + * Copyright (c) 2018-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,12 +29,14 @@ #include "arm_compute/core/CL/OpenCL.h" #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/StringUtils.h" #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -50,6 +52,8 @@ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info) { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); @@ -66,15 +70,21 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c ARM_COMPUTE_UNUSED(output_tile_size); ARM_COMPUTE_UNUSED(kernel_size); + const TensorShape output_shape = + misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info); + // Validate configured output if (output->total_size() != 0) { - const TensorShape output_shape = - misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info); - + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp b/src/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp index 89c80c55ef3..d66f316cdd9 100644 --- a/src/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp +++ b/src/gpu/cl/kernels/ClWinogradOutputTransformKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023 Arm Limited. + * Copyright (c) 2018-2023, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -38,6 +38,7 @@ #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" @@ -62,6 +63,8 @@ Status validate_arguments(const ITensorInfo *input, const ActivationLayerInfo &act_info) { ARM_COMPUTE_UNUSED(act_info); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); @@ -88,19 +91,25 @@ Status validate_arguments(const ITensorInfo *input, if (bias != nullptr) { + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(bias); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias); ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != bias->dimension(0)); } + const TensorShape output_shape = compute_winograd_output_transform_shape(*input, winograd_info); + // Checks performed when output is configured if (output->total_size() != 0) { - const TensorInfo tensor_info_output = - input->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*input, winograd_info)); - - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } + else + { + const TensorInfo output_info(output_shape, input->num_channels(), input->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/runtime/CL/functions/CLCropResize.cpp b/src/runtime/CL/functions/CLCropResize.cpp index 6111bee6a79..2fdb61a4f0e 100644 --- a/src/runtime/CL/functions/CLCropResize.cpp +++ b/src/runtime/CL/functions/CLCropResize.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2024 Arm Limited. + * Copyright (c) 2019-2021, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -96,8 +96,9 @@ Status CLCropResize::validate(const ITensorInfo *input, ARM_COMPUTE_RETURN_ERROR_ON(method == InterpolationPolicy::AREA); ARM_COMPUTE_RETURN_ERROR_ON(boxes->tensor_shape()[0] != 4); ARM_COMPUTE_RETURN_ERROR_ON(boxes->tensor_shape()[1] != box_ind->tensor_shape()[0]); - TensorInfo temp_info; - ARM_COMPUTE_RETURN_ON_ERROR(CLCrop::validate(input->clone().get(), &temp_info, {0, 0}, {1, 1}, + auto temp_info = output->clone(); + temp_info->set_tensor_shape(TensorShape(input->dimension(0), crop_size.x, crop_size.y)); + ARM_COMPUTE_RETURN_ON_ERROR(CLCrop::validate(input->clone().get(), temp_info.get(), {0, 0}, {1, 1}, input->dimension(3) - 1, extrapolation_value)); if (output->total_size() > 0) { diff --git a/src/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.cpp b/src/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.cpp index 196ceca5317..a8ddd52fe27 100644 --- a/src/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.cpp +++ b/src/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2024 Arm Limited. + * Copyright (c) 2018-2021, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,7 @@ #include "arm_compute/runtime/Scheduler.h" #include "src/common/utils/Log.h" +#include "src/core/CPP/Validate.h" namespace arm_compute { @@ -253,6 +254,7 @@ Status validate(const ITensorInfo *scores_in, { ARM_COMPUTE_UNUSED(batch_splits_in, batch_splits_out, keeps, keeps_size, info); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(scores_in, boxes_in, scores_out, boxes_out, classes); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(scores_in, boxes_in, scores_out, boxes_out, classes); ARM_COMPUTE_RETURN_ERROR_ON_DYNAMIC_SHAPE(scores_in, boxes_in, scores_out, boxes_out, classes); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(scores_in, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); diff --git a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp index 27584134139..2182fa6aa0b 100644 --- a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp +++ b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, 2024 Arm Limited. + * Copyright (c) 2018-2021, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,9 +25,11 @@ #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Validate.h" #include "src/common/utils/Log.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include @@ -43,6 +45,7 @@ Status validate_arguments(const ITensorInfo *input_loc, DetectionOutputLayerInfo info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input_loc, input_conf, input_priorbox, output); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input_loc, input_conf, input_priorbox); ARM_COMPUTE_RETURN_ERROR_ON_DYNAMIC_SHAPE(input_loc, input_conf, input_priorbox, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_loc, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_loc, input_conf, input_priorbox); @@ -61,14 +64,21 @@ Status validate_arguments(const ITensorInfo *input_loc, input_conf->tensor_shape()[0], "Number of priors must match number of confidence predictions."); + const unsigned int max_size = info.keep_top_k() * (input_loc->num_dimensions() > 1 ? input_loc->dimension(1) : 1); + const TensorShape output_shape(7U, max_size); + // Validate configured output if (output->total_size() != 0) { - const unsigned int max_size = - info.keep_top_k() * (input_loc->num_dimensions() > 1 ? input_loc->dimension(1) : 1); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), TensorShape(7U, max_size)); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_loc, output); } + else + { + const TensorInfo output_info(output_shape, input_loc->num_channels(), input_loc->data_type()); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_info); + } return Status{}; } diff --git a/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp b/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp index a8b398a3957..8eb4ad52768 100644 --- a/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp +++ b/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2024 Arm Limited. + * Copyright (c) 2019-2021, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,6 +28,7 @@ #include "arm_compute/core/Validate.h" #include "src/common/utils/Log.h" +#include "src/core/CPP/Validate.h" #include "src/core/helpers/AutoConfiguration.h" #include @@ -49,11 +50,14 @@ Status validate_arguments(const ITensorInfo *input_box_encoding, const unsigned int kBatchSize, const unsigned int kNumCoordBox) { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input_box_encoding, input_class_score, input_anchors); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input_box_encoding, input_class_score, input_anchors, output_boxes, + output_classes, output_scores, num_detection); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(input_box_encoding, input_class_score, input_anchors); ARM_COMPUTE_RETURN_ERROR_ON_DYNAMIC_SHAPE(input_box_encoding, input_class_score, input_anchors, output_boxes, output_classes, output_scores, num_detection); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_box_encoding, 1, DataType::F32, DataType::QASYMM8, - DataType::QASYMM8_SIGNED); + const size_t one_channel = 1u; + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_box_encoding, one_channel, DataType::F32, + DataType::QASYMM8, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_box_encoding, input_anchors); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_box_encoding->num_dimensions() > 3, "The location input tensor shape should be [4, N, kBatchSize]."); @@ -90,29 +94,56 @@ Status validate_arguments(const ITensorInfo *input_box_encoding, const unsigned int num_detected_boxes = info.max_detections() * info.max_classes_per_detection(); + const TensorShape output_boxes_shape(4U, num_detected_boxes, 1U); + const TensorShape output_classes_shape(num_detected_boxes, 1U); + const TensorShape output_scores_shape(num_detected_boxes, 1U); + const TensorShape num_detection_shape(1U); + const auto output_data_type = DataType::F32; + // Validate configured outputs if (output_boxes->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_boxes->tensor_shape(), - TensorShape(4U, num_detected_boxes, 1U)); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_boxes, 1, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output_boxes); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_boxes->tensor_shape(), output_boxes_shape); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_boxes, one_channel, output_data_type); + } + else + { + const TensorInfo output_boxes_info(output_boxes_shape, one_channel, output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_boxes_info); } if (output_classes->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_classes->tensor_shape(), - TensorShape(num_detected_boxes, 1U)); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_classes, 1, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output_classes); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_classes->tensor_shape(), output_classes_shape); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_classes, one_channel, output_data_type); + } + else + { + const TensorInfo output_classes_info(output_classes_shape, one_channel, output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_classes_info); } if (output_scores->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_scores->tensor_shape(), - TensorShape(num_detected_boxes, 1U)); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_scores, 1, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(output_scores); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_scores->tensor_shape(), output_scores_shape); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_scores, one_channel, output_data_type); + } + else + { + const TensorInfo output_scores_info(output_scores_shape, one_channel, output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&output_scores_info); } if (num_detection->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(num_detection->tensor_shape(), TensorShape(1U)); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(num_detection, 1, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(num_detection); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(num_detection->tensor_shape(), num_detection_shape); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(num_detection, one_channel, output_data_type); + } + else + { + const TensorInfo num_detection_info(num_detection_shape, one_channel, output_data_type); + ARM_COMPUTE_RETURN_ERROR_ON_SIZE_UNSUPPORTED(&num_detection_info); } return Status{}; diff --git a/tests/validation/NEON/Gather.cpp b/tests/validation/NEON/Gather.cpp index a9bcc1e35a8..11605e76a83 100644 --- a/tests/validation/NEON/Gather.cpp +++ b/tests/validation/NEON/Gather.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022, 2025 Arm Limited. + * Copyright (c) 2019-2022, 2025-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -80,7 +80,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip( TensorInfo(TensorShape(27U, 27U), 1, DataType::F16), }), make("Axis", { - 0, 1, -2, 0,