1 /** 2 * Copyright 2022 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_NNACL_CXX_UTILS_H_ 18 #define MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_NNACL_CXX_UTILS_H_ 19 20 #include <stddef.h> 21 #include <stdint.h> 22 23 namespace mindspore::nnacl { 24 void *DefaultAllocatorMalloc(void *allocator, size_t sz); 25 void DefaultAllocatorFree(void *allocator, void *ptr); 26 int DefaultThreadPoolParallelLunch(void *threadPool, void *task, void *param, int taskNr); 27 void *DefaultGetSharingPackData(void *manager, const void *tensor_data, const size_t size, bool *is_packed); 28 void DefaultFreeSharingPackData(void *manager, void *tensor_data); 29 int DefaultUpdateThreadNumPass(int32_t kernel_type, int64_t per_unit_load_num, int64_t per_unit_store_num, 30 int64_t unit_num, int thread_num); 31 } // namespace mindspore::nnacl 32 #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_NNACL_CXX_UTILS_H_ 33