diff --git a/arm_compute/runtime/ITensorAllocator.h b/arm_compute/runtime/ITensorAllocator.h index 1aed08fe33..6041f18e4b 100644 --- a/arm_compute/runtime/ITensorAllocator.h +++ b/arm_compute/runtime/ITensorAllocator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, 2024-2025 Arm Limited. + * Copyright (c) 2016-2021, 2024-2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -110,10 +110,26 @@ class ITensorAllocator /** Interface to be implemented by the child class to unlock the memory allocation after the CPU is done accessing it. */ virtual void unlock() = 0; + /** Returns whether allocator currently owns the TensorInfo metadata object. */ + bool owns_info() const; + /** Returns whether the allocator has imported external backing memory. */ + bool is_imported() const; + /** Track import state for derived allocators when ownership of backing memory changes. */ + void set_imported(bool imported); + /** For external TensorInfo metadata, import state determines resizable behavior from allocator perspective. */ + bool allocator_considers_resizable() const; + /** Set the resizable flag on the owned TensorInfo only if the allocator owns it. + * + * @return True if the TensorInfo was mutated, false otherwise. + */ + bool set_resizable_if_info_owned(bool is_resizable); + private: TensorInfo _info_owned{}; /**< Tensor's metadata. */ TensorInfo *_info_external{nullptr}; /**< External Tensor's metadata */ size_t _alignment{}; /**< Tensor's alignment in bytes */ + bool _owns_info{true}; /**< True when allocator owns metadata; false for soft_init(). */ + bool _is_imported{false}; /**< True when memory was imported instead of allocated by allocator. */ }; } // namespace arm_compute #endif // ACL_ARM_COMPUTE_RUNTIME_ITENSORALLOCATOR_H diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp index b109288bfb..c3b15dd487 100644 --- a/src/runtime/CL/CLTensorAllocator.cpp +++ b/src/runtime/CL/CLTensorAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021, 2024 Arm Limited. + * Copyright (c) 2016-2021, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -157,7 +157,8 @@ void CLTensorAllocator::allocate() } // Lock allocator - info().set_is_resizable(false); + set_imported(false); + set_resizable_if_info_owned(false); } void CLTensorAllocator::free() @@ -165,7 +166,8 @@ void CLTensorAllocator::free() _mapping = nullptr; _memory.set_region(nullptr); clear_quantization_arrays(_scale, _offset); - info().set_is_resizable(true); + set_imported(false); + set_resizable_if_info_owned(true); } bool CLTensorAllocator::is_allocated() const @@ -182,7 +184,8 @@ Status CLTensorAllocator::import_memory(cl::Buffer buffer) _memory.set_owned_region(std::make_unique(buffer)); - info().set_is_resizable(false); + set_imported(true); + set_resizable_if_info_owned(false); return Status{}; } diff --git a/src/runtime/ITensorAllocator.cpp b/src/runtime/ITensorAllocator.cpp index fe3d2804cb..f3a66a71f6 100644 --- a/src/runtime/ITensorAllocator.cpp +++ b/src/runtime/ITensorAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021 Arm Limited. + * Copyright (c) 2016-2021, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,12 +35,16 @@ void ITensorAllocator::init(const TensorInfo &input, size_t alignment) _info_owned = input; _info_external = nullptr; _alignment = alignment; + _owns_info = true; + _is_imported = false; } void ITensorAllocator::soft_init(TensorInfo &input, size_t alignment) { _info_external = &input; _alignment = alignment; + _owns_info = false; + _is_imported = false; } TensorInfo &ITensorAllocator::info() @@ -57,3 +61,38 @@ size_t ITensorAllocator::alignment() const { return _alignment; } + +bool ITensorAllocator::owns_info() const +{ + return _owns_info; +} + +bool ITensorAllocator::is_imported() const +{ + return _is_imported; +} + +void ITensorAllocator::set_imported(bool imported) +{ + _is_imported = imported; +} + +bool ITensorAllocator::allocator_considers_resizable() const +{ + if (_owns_info) + { + const TensorInfo *info_ptr = (_info_external != nullptr) ? _info_external : &_info_owned; + return (info_ptr != nullptr) ? info_ptr->is_resizable() : true; + } + return !_is_imported; +} + +bool ITensorAllocator::set_resizable_if_info_owned(bool is_resizable) +{ + if (_owns_info) + { + info().set_is_resizable(is_resizable); + return true; + } + return false; +} diff --git a/src/runtime/TensorAllocator.cpp b/src/runtime/TensorAllocator.cpp index b803f77522..82ad5ee04b 100644 --- a/src/runtime/TensorAllocator.cpp +++ b/src/runtime/TensorAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020, 2024 Arm Limited. + * Copyright (c) 2016-2020, 2024, 2026 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -71,7 +71,7 @@ TensorAllocator::TensorAllocator(IMemoryManageable *owner) : _owner(owner), _ass TensorAllocator::~TensorAllocator() { - info().set_is_resizable(true); + set_resizable_if_info_owned(true); } TensorAllocator::TensorAllocator(TensorAllocator &&o) noexcept @@ -142,13 +142,15 @@ void TensorAllocator::allocate() { _associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment_to_use); } - info().set_is_resizable(false); + set_imported(false); + set_resizable_if_info_owned(false); } void TensorAllocator::free() { _memory.set_region(nullptr); - info().set_is_resizable(true); + set_imported(false); + set_resizable_if_info_owned(true); } bool TensorAllocator::is_allocated() const @@ -163,7 +165,8 @@ Status TensorAllocator::import_memory(void *memory) ARM_COMPUTE_RETURN_ERROR_ON(alignment() != 0 && !arm_compute::utility::check_aligned(memory, alignment())); _memory.set_owned_region(std::make_unique(memory, info().total_size())); - info().set_is_resizable(false); + set_imported(true); + set_resizable_if_info_owned(false); return Status{}; } diff --git a/tests/validation/UNIT/shared_import_memory_test.cpp b/tests/validation/UNIT/shared_import_memory_test.cpp new file mode 100644 index 0000000000..457534d384 --- /dev/null +++ b/tests/validation/UNIT/shared_import_memory_test.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2026 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Coordinates.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/runtime/Tensor.h" + +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/validation/Validation.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +TEST_SUITE(UNIT) +TEST_SUITE(TensorInfo) + +TEST_CASE(ImportMemoryDoesNotMutateExternalInfo, framework::DatasetMode::ALL) +{ + TensorInfo out_info(TensorShape(16U, 4U, 4U), 1, DataType::F32, DataLayout::NHWC); + out_info.set_is_resizable(true); + + Tensor out_tensor; + out_tensor.allocator()->init(out_info); + out_tensor.allocator()->allocate(); + + // Simulate a shared TensorInfo used as a view wrapper. + TensorInfo shared_info(out_info); + shared_info.set_is_resizable(true); + + Tensor view_tensor; + view_tensor.allocator()->soft_init(shared_info); + + // Ensure it's still resizable before import. + ARM_COMPUTE_EXPECT(shared_info.is_resizable(), framework::LogLevel::ERRORS); + + // Import memory into the view tensor. + ARM_COMPUTE_ASSERT(bool(view_tensor.allocator()->import_memory(out_tensor.buffer()))); + + // Regression assert: import_memory must NOT mutate the caller-owned shared_info. + ARM_COMPUTE_EXPECT(shared_info.is_resizable(), framework::LogLevel::ERRORS); + + // extend_padding should succeed (not throw). + ARM_COMPUTE_EXPECT_NO_THROW(shared_info.extend_padding(PaddingSize(1, 1, 1, 1)), framework::LogLevel::ERRORS); +} + +TEST_SUITE_END() // TensorInfo +TEST_SUITE_END() // UNIT +} // namespace validation +} // namespace test +} // namespace arm_compute