1 /* 2 * Copyright (c) 2019-2020 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_NE_DETECTION_POSTPROCESS_H 25 #define ARM_COMPUTE_NE_DETECTION_POSTPROCESS_H 26 27 #include "arm_compute/runtime/NEON/INESimpleFunction.h" 28 29 #include "arm_compute/core/Types.h" 30 #include "arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h" 31 #include "arm_compute/runtime/IMemoryManager.h" 32 #include "arm_compute/runtime/MemoryGroup.h" 33 #include "arm_compute/runtime/NEON/functions/NEDequantizationLayer.h" 34 #include "arm_compute/runtime/Tensor.h" 35 36 #include <map> 37 38 namespace arm_compute 39 { 40 class ITensor; 41 42 /** NE Function to generate the detection output based on center size encoded boxes, class prediction and anchors 43 * by doing non maximum suppression. 44 * 45 * @note Intended for use with MultiBox detection method. 46 */ 47 class NEDetectionPostProcessLayer : public IFunction 48 { 49 public: 50 /** Constructor */ 51 NEDetectionPostProcessLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr); 52 /** Prevent instances of this class from being copied (As this class contains pointers) */ 53 NEDetectionPostProcessLayer(const NEDetectionPostProcessLayer &) = delete; 54 /** Prevent instances of this class from being copied (As this class contains pointers) */ 55 NEDetectionPostProcessLayer &operator=(const NEDetectionPostProcessLayer &) = delete; 56 /** Default destructor */ 57 ~NEDetectionPostProcessLayer() = default; 58 /** Configure the detection output layer NE function 59 * 60 * @param[in] input_box_encoding The bounding box input tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F32. 61 * @param[in] input_score The class prediction input tensor. Data types supported: same as @p input_box_encoding. 62 * @param[in] input_anchors The anchors input tensor. Data types supported: same as @p input_box_encoding. 63 * @param[out] output_boxes The boxes output tensor. Data types supported: F32. 64 * @param[out] output_classes The classes output tensor. Data types supported: Same as @p output_boxes. 65 * @param[out] output_scores The scores output tensor. Data types supported: Same as @p output_boxes. 66 * @param[out] num_detection The number of output detection. Data types supported: Same as @p output_boxes. 67 * @param[in] info (Optional) DetectionPostProcessLayerInfo information. 68 * 69 * @note Output contains all the detections. Of those, only the ones selected by the valid region are valid. 70 */ 71 void configure(const ITensor *input_box_encoding, const ITensor *input_score, const ITensor *input_anchors, 72 ITensor *output_boxes, ITensor *output_classes, ITensor *output_scores, ITensor *num_detection, DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo()); 73 /** Static function to check if given info will lead to a valid configuration of @ref NEDetectionPostProcessLayer 74 * 75 * @param[in] input_box_encoding The bounding box input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32. 76 * @param[in] input_class_score The class prediction input tensor info. Data types supported: same as @p input_box_encoding. 77 * @param[in] input_anchors The anchors input tensor info. Data types supported: same as @p input_box_encoding. 78 * @param[in] output_boxes The output tensor info. Data types supported: F32. 79 * @param[in] output_classes The output tensor info. Data types supported: Same as @p output_boxes. 80 * @param[in] output_scores The output tensor info. Data types supported: Same as @p output_boxes. 81 * @param[in] num_detection The number of output detection tensor info. Data types supported: Same as @p output_boxes. 82 * @param[in] info (Optional) DetectionPostProcessLayerInfo information. 83 * 84 * @return a status 85 */ 86 static Status validate(const ITensorInfo *input_box_encoding, const ITensorInfo *input_class_score, const ITensorInfo *input_anchors, 87 ITensorInfo *output_boxes, ITensorInfo *output_classes, ITensorInfo *output_scores, ITensorInfo *num_detection, 88 DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo()); 89 // Inherited methods overridden: 90 void run() override; 91 92 private: 93 MemoryGroup _memory_group; 94 95 NEDequantizationLayer _dequantize; 96 CPPDetectionPostProcessLayer _detection_post_process; 97 98 Tensor _decoded_scores; 99 bool _run_dequantize; 100 }; 101 } // namespace arm_compute 102 #endif /* ARM_COMPUTE_NE_DETECTION_POSTPROCESS_H */ 103