1 /* 2 * Copyright (c) 2018-2020 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H 25 #define ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H 26 27 #include "arm_compute/runtime/CL/ICLOperator.h" 28 #include "arm_compute/runtime/IFunction.h" 29 30 namespace arm_compute 31 { 32 class ICLTensor; 33 class CLCompileContext; 34 class ITensorInfo; 35 36 namespace experimental 37 { 38 /** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition 39 * 40 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 41 * @note The function performs an arithmetic addition between two tensors. 42 */ 43 class CLArithmeticAddition : public ICLOperator 44 { 45 public: 46 /** Default Constructor */ 47 CLArithmeticAddition(); 48 /** Initialise the kernel's inputs, output and conversion policy. 49 * 50 * Valid configurations (Input1,Input2) -> Output : 51 * 52 * - (U8,U8) -> U8 53 * - (U8,U8) -> S16 54 * - (S16,U8) -> S16 55 * - (U8,S16) -> S16 56 * - (S16,S16) -> S16 57 * - (S32,S32) -> S32 58 * - (F16,F16) -> F16 59 * - (F32,F32) -> F32 60 * - (QASYMM8,QASYMM8) -> QASYMM8 61 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED 62 * - (QSYMM16,QSYMM16) -> QSYMM16 63 * 64 * @param[in] compile_context The compile context to be used. 65 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 66 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 67 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 68 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 69 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 70 * @param[in] policy Policy to use to handle overflow. 71 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 72 */ 73 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy, 74 const ActivationLayerInfo &act_info = ActivationLayerInfo()); 75 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition 76 * 77 * Valid configurations (Input1,Input2) -> Output : 78 * 79 * - (U8,U8) -> U8 80 * - (U8,U8) -> S16 81 * - (S16,U8) -> S16 82 * - (U8,S16) -> S16 83 * - (S16,S16) -> S16 84 * - (S32,S32) -> S32 85 * - (F16,F16) -> F16 86 * - (F32,F32) -> F32 87 * - (QASYMM8,QASYMM8) -> QASYMM8 88 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED 89 * - (QSYMM16,QSYMM16) -> QSYMM16 90 * 91 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 92 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 93 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 94 * @param[in] policy Policy to use to handle overflow. 95 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 96 * 97 * @return a status 98 */ 99 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 100 101 // Inherited methods overridden: 102 void run(ITensorPack &tensors) override; 103 }; 104 105 /** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction 106 * 107 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32. 108 * @note The function performs an arithmetic subtraction between two tensors. 109 */ 110 class CLArithmeticSubtraction : public ICLOperator 111 { 112 public: 113 /** Default Constructor */ 114 CLArithmeticSubtraction(); 115 /** Initialise the kernel's inputs, output and conversion policy. 116 * 117 * Valid configurations (Input1,Input2) -> Output : 118 * 119 * - (U8,U8) -> U8 120 * - (U8,U8) -> S16 121 * - (S16,U8) -> S16 122 * - (U8,S16) -> S16 123 * - (S16,S16) -> S16 124 * - (S32,S32) -> S32 125 * - (F16,F16) -> F16 126 * - (F32,F32) -> F32 127 * - (QASYMM8,QASYMM8) -> QASYMM8 128 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED 129 * - (QSYMM16,QSYMM16) -> QSYMM16 130 * 131 * @param[in] compile_context The compile context to be used. 132 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 133 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 134 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 135 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 136 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 137 * @param[in] policy Policy to use to handle overflow. 138 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 139 */ 140 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy, 141 const ActivationLayerInfo &act_info = ActivationLayerInfo()); 142 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction 143 * 144 * Valid configurations (Input1,Input2) -> Output : 145 * 146 * - (U8,U8) -> U8 147 * - (U8,U8) -> S16 148 * - (S16,U8) -> S16 149 * - (U8,S16) -> S16 150 * - (S16,S16) -> S16 151 * - (S32,S32) -> S32 152 * - (F16,F16) -> F16 153 * - (F32,F32) -> F32 154 * - (QASYMM8,QASYMM8) -> QASYMM8 155 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED 156 * - (QSYMM16,QSYMM16) -> QSYMM16 157 * 158 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 159 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 160 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 161 * @param[in] policy Policy to use to handle overflow. 162 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 163 * 164 * @return a status 165 */ 166 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 167 168 // Inherited methods overridden: 169 void run(ITensorPack &tensors) override; 170 }; 171 172 /** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division 173 * 174 * @note The tensor data type for the inputs must be F16/F32. 175 * @note The function performs an arithmetic division between two tensors. 176 */ 177 class CLArithmeticDivision : public ICLOperator 178 { 179 public: 180 /** Default Constructor */ 181 CLArithmeticDivision(); 182 /** Initialise the kernel's inputs, output. 183 * 184 * @param[in] compile_context The compile context to be used. 185 * @param[in, out] input1 First tensor input. Data types supported: F16/F32. 186 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 187 * @param[in, out] input2 Second tensor input. Same as @p input1. 188 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 189 * @param[out] output Output tensor. Data types supported: Same as @p input1. 190 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 191 */ 192 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 193 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision 194 * 195 * @param[in] input1 First tensor input info. Data types supported: F16/F32. 196 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. 197 * @param[in] output Output tensor info. Data types supported: Same as @p input1. 198 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 199 * 200 * @return a status 201 */ 202 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 203 204 // Inherited methods overridden: 205 void run(ITensorPack &tensors) override; 206 }; 207 208 /** Basic function to run @ref CLArithmeticOperationKernel for max 209 * 210 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32. 211 * @note The function performs a max operation between two tensors. 212 */ 213 class CLElementwiseMax : public ICLOperator 214 { 215 public: 216 /** Default Constructor */ 217 CLElementwiseMax(); 218 /** Initialise the kernel's inputs, output and conversion policy. 219 * 220 * @param[in] compile_context The compile context to be used. 221 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. 222 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 223 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. 224 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 225 * @param[out] output Output tensor. Data types supported: same as @p input1. 226 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 227 */ 228 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 229 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for max 230 * 231 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. 232 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1. 233 * @param[in] output Output tensor info. Data types supported: same as @p input1. 234 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 235 * 236 * @return a status 237 */ 238 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 239 240 // Inherited methods overridden: 241 void run(ITensorPack &tensors) override; 242 }; 243 244 /** Basic function to run @ref CLArithmeticOperationKernel for min 245 * 246 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32. 247 * @note The function performs a max operation between two tensors. 248 */ 249 class CLElementwiseMin : public ICLOperator 250 { 251 public: 252 /** Default Constructor */ 253 CLElementwiseMin(); 254 /** Initialise the kernel's inputs, output and conversion policy. 255 * 256 * @param[in] compile_context The compile context to be used. 257 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. 258 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 259 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. 260 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 261 * @param[out] output Output tensor. Data types supported: same as @p input1. 262 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 263 */ 264 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 265 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for min 266 * 267 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. 268 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1. 269 * @param[in] output Output tensor info. Data types supported: same as @p input1. 270 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 271 * 272 * @return a status 273 */ 274 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 275 276 // Inherited methods overridden: 277 void run(ITensorPack &tensors) override; 278 }; 279 280 /** Basic function to run @ref CLArithmeticOperationKernel for squared difference 281 * 282 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32. 283 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2 284 */ 285 class CLElementwiseSquaredDiff : public ICLOperator 286 { 287 public: 288 /** Default Constructor */ 289 CLElementwiseSquaredDiff(); 290 /** Initialise the kernel's inputs, output and conversion policy. 291 * 292 * @param[in] compile_context The compile context to be used. 293 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32. 294 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 295 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. 296 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 297 * @param[out] output Output tensor. Data types supported: same as @p input1. 298 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 299 */ 300 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 301 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for squared difference 302 * 303 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32. 304 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1. 305 * @param[in] output Output tensor info. Data types supported: same as @p input1. 306 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 307 * 308 * @return a status 309 */ 310 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 311 312 // Inherited methods overridden: 313 void run(ITensorPack &tensors) override; 314 }; 315 316 /** Basic function to run @ref CLArithmeticOperationKernel for power 317 * 318 * @note The tensor data type for the inputs must be F16/F32. 319 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i]) 320 */ 321 class CLElementwisePower : public ICLOperator 322 { 323 public: 324 /** Default Constructor */ 325 CLElementwisePower(); 326 /** Initialise the kernel's inputs, output and conversion policy. 327 * 328 * @param[in] compile_context The compile context to be used. 329 * @param[in, out] input1 First tensor input. Data types supported: F16/F32. 330 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 331 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32. 332 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 333 * @param[out] output Output tensor. Data types supported:F16/F32. 334 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 335 */ 336 void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 337 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for power 338 * 339 * @param[in] input1 First tensor input info. Data types supported: F16/F32. 340 * @param[in] input2 Second tensor input info. Data types supported: F16/F32. 341 * @param[in] output Output tensor info. Data types supported: F16/F32. 342 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 343 * 344 * @return a status 345 */ 346 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 347 348 // Inherited methods overridden: 349 void run(ITensorPack &tensors) override; 350 }; 351 } // namespace experimental 352 353 /** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition 354 * 355 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 356 * @note The function performs an arithmetic addition between two tensors. 357 */ 358 class CLArithmeticAddition : public IFunction 359 { 360 public: 361 /** Default Constructor */ 362 CLArithmeticAddition(); 363 /** Default Destructor */ 364 ~CLArithmeticAddition(); 365 /** Prevent instances of this class from being copied (As this class contains pointers) */ 366 CLArithmeticAddition(const CLArithmeticAddition &) = delete; 367 /** Default move constructor */ 368 CLArithmeticAddition(CLArithmeticAddition &&); 369 /** Prevent instances of this class from being copied (As this class contains pointers) */ 370 CLArithmeticAddition &operator=(const CLArithmeticAddition &) = delete; 371 /** Default move assignment operator */ 372 CLArithmeticAddition &operator=(CLArithmeticAddition &&); 373 /** Initialise the kernel's inputs, output and conversion policy. 374 * 375 * Valid configurations (Input1,Input2) -> Output : 376 * 377 * - (U8,U8) -> U8 378 * - (U8,U8) -> S16 379 * - (S16,U8) -> S16 380 * - (U8,S16) -> S16 381 * - (S16,S16) -> S16 382 * - (S32,S32) -> S32 383 * - (F16,F16) -> F16 384 * - (F32,F32) -> F32 385 * - (QASYMM8,QASYMM8) -> QASYMM8 386 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED 387 * - (QSYMM16,QSYMM16) -> QSYMM16 388 * 389 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 390 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 391 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 392 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 393 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 394 * @param[in] policy Policy to use to handle overflow. 395 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 396 */ 397 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 398 /** Initialise the kernel's inputs, output and conversion policy. 399 * 400 * Valid configurations (Input1,Input2) -> Output : 401 * 402 * - (U8,U8) -> U8 403 * - (U8,U8) -> S16 404 * - (S16,U8) -> S16 405 * - (U8,S16) -> S16 406 * - (S16,S16) -> S16 407 * - (S32,S32) -> S32 408 * - (F16,F16) -> F16 409 * - (F32,F32) -> F32 410 * - (QASYMM8,QASYMM8) -> QASYMM8 411 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED 412 * - (QSYMM16,QSYMM16) -> QSYMM16 413 * 414 * @param[in] compile_context The compile context to be used. 415 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 416 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 417 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 418 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 419 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 420 * @param[in] policy Policy to use to handle overflow. 421 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 422 */ 423 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, 424 const ActivationLayerInfo &act_info = ActivationLayerInfo()); 425 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition 426 * 427 * Valid configurations (Input1,Input2) -> Output : 428 * 429 * - (U8,U8) -> U8 430 * - (U8,U8) -> S16 431 * - (S16,U8) -> S16 432 * - (U8,S16) -> S16 433 * - (S16,S16) -> S16 434 * - (S32,S32) -> S32 435 * - (F16,F16) -> F16 436 * - (F32,F32) -> F32 437 * - (QASYMM8,QASYMM8) -> QASYMM8 438 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED 439 * - (QSYMM16,QSYMM16) -> QSYMM16 440 * 441 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 442 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 443 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 444 * @param[in] policy Policy to use to handle overflow. 445 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 446 * 447 * @return a status 448 */ 449 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 450 451 // Inherited methods overridden: 452 void run() override; 453 454 private: 455 struct Impl; 456 std::unique_ptr<Impl> _impl; 457 }; 458 459 /** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction 460 * 461 * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32. 462 * @note The function performs an arithmetic subtraction between two tensors. 463 */ 464 class CLArithmeticSubtraction : public IFunction 465 { 466 public: 467 /** Default Constructor */ 468 CLArithmeticSubtraction(); 469 /** Default Destructor */ 470 ~CLArithmeticSubtraction(); 471 /** Prevent instances of this class from being copied (As this class contains pointers) */ 472 CLArithmeticSubtraction(const CLArithmeticSubtraction &) = delete; 473 /** Default move constructor */ 474 CLArithmeticSubtraction(CLArithmeticSubtraction &&); 475 /** Prevent instances of this class from being copied (As this class contains pointers) */ 476 CLArithmeticSubtraction &operator=(const CLArithmeticSubtraction &) = delete; 477 /** Default move assignment operator */ 478 CLArithmeticSubtraction &operator=(CLArithmeticSubtraction &&); 479 /** Initialise the kernel's inputs, output and conversion policy. 480 * 481 * Valid configurations (Input1,Input2) -> Output : 482 * 483 * - (U8,U8) -> U8 484 * - (U8,U8) -> S16 485 * - (S16,U8) -> S16 486 * - (U8,S16) -> S16 487 * - (S16,S16) -> S16 488 * - (S32,S32) -> S32 489 * - (F16,F16) -> F16 490 * - (F32,F32) -> F32 491 * - (QASYMM8,QASYMM8) -> QASYMM8 492 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED 493 * - (QSYMM16,QSYMM16) -> QSYMM16 494 * 495 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 496 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 497 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 498 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 499 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 500 * @param[in] policy Policy to use to handle overflow. 501 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 502 */ 503 void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 504 /** Initialise the kernel's inputs, output and conversion policy. 505 * 506 * Valid configurations (Input1,Input2) -> Output : 507 * 508 * - (U8,U8) -> U8 509 * - (U8,U8) -> S16 510 * - (S16,U8) -> S16 511 * - (U8,S16) -> S16 512 * - (S16,S16) -> S16 513 * - (S32,S32) -> S32 514 * - (F16,F16) -> F16 515 * - (F32,F32) -> F32 516 * - (QASYMM8,QASYMM8) -> QASYMM8 517 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED 518 * - (QSYMM16,QSYMM16) -> QSYMM16 519 * 520 * @param[in] compile_context The compile context to be used. 521 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 522 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 523 * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 524 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 525 * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 526 * @param[in] policy Policy to use to handle overflow. 527 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 528 */ 529 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, 530 const ActivationLayerInfo &act_info = ActivationLayerInfo()); 531 /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction 532 * 533 * Valid configurations (Input1,Input2) -> Output : 534 * 535 * - (U8,U8) -> U8 536 * - (U8,U8) -> S16 537 * - (S16,U8) -> S16 538 * - (U8,S16) -> S16 539 * - (S16,S16) -> S16 540 * - (S32,S32) -> S32 541 * - (F16,F16) -> F16 542 * - (F32,F32) -> F32 543 * - (QASYMM8,QASYMM8) -> QASYMM8 544 * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED 545 * - (QSYMM16,QSYMM16) -> QSYMM16 546 * 547 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 548 * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 549 * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. 550 * @param[in] policy Policy to use to handle overflow. 551 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 552 * 553 * @return a status 554 */ 555 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 556 557 // Inherited methods overridden: 558 void run() override; 559 560 private: 561 struct Impl; 562 std::unique_ptr<Impl> _impl; 563 }; 564 565 /** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division 566 * 567 * @note The tensor data type for the inputs must be F16/F32. 568 * @note The function performs an arithmetic division between two tensors. 569 */ 570 class CLArithmeticDivision : public IFunction 571 { 572 public: 573 /** Default Constructor */ 574 CLArithmeticDivision(); 575 /** Default Destructor */ 576 ~CLArithmeticDivision(); 577 /** Prevent instances of this class from being copied (As this class contains pointers) */ 578 CLArithmeticDivision(const CLArithmeticDivision &) = delete; 579 /** Default move constructor */ 580 CLArithmeticDivision(CLArithmeticDivision &&); 581 /** Prevent instances of this class from being copied (As this class contains pointers) */ 582 CLArithmeticDivision &operator=(const CLArithmeticDivision &) = delete; 583 /** Default move assignment operator */ 584 CLArithmeticDivision &operator=(CLArithmeticDivision &&); 585 /** Initialise the kernel's inputs, output. 586 * 587 * @param[in, out] input1 First tensor input. Data types supported: F16/F32. 588 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 589 * @param[in, out] input2 Second tensor input. Same as @p input1. 590 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 591 * @param[out] output Output tensor. Data types supported: Same as @p input1. 592 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 593 */ 594 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 595 /** Initialise the kernel's inputs, output. 596 * 597 * @param[in] compile_context The compile context to be used. 598 * @param[in, out] input1 First tensor input. Data types supported: F16/F32. 599 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 600 * @param[in, out] input2 Second tensor input. Same as @p input1. 601 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 602 * @param[out] output Output tensor. Data types supported: Same as @p input1. 603 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 604 */ 605 void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 606 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision 607 * 608 * @param[in] input1 First tensor input info. Data types supported: F16/F32. 609 * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. 610 * @param[in] output Output tensor info. Data types supported: Same as @p input1. 611 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 612 * 613 * @return a status 614 */ 615 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 616 617 // Inherited methods overridden: 618 void run() override; 619 620 private: 621 struct Impl; 622 std::unique_ptr<Impl> _impl; 623 }; 624 625 /** Basic function to run @ref CLArithmeticOperationKernel for max 626 * 627 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32. 628 * @note The function performs a max operation between two tensors. 629 */ 630 class CLElementwiseMax : public IFunction 631 { 632 public: 633 /** Default Constructor */ 634 CLElementwiseMax(); 635 /** Default Destructor */ 636 ~CLElementwiseMax(); 637 /** Prevent instances of this class from being copied (As this class contains pointers) */ 638 CLElementwiseMax(const CLElementwiseMax &) = delete; 639 /** Default move constructor */ 640 CLElementwiseMax(CLElementwiseMax &&); 641 /** Prevent instances of this class from being copied (As this class contains pointers) */ 642 CLElementwiseMax &operator=(const CLElementwiseMax &) = delete; 643 /** Default move assignment operator */ 644 CLElementwiseMax &operator=(CLElementwiseMax &&); 645 /** Initialise the kernel's inputs, output and conversion policy. 646 * 647 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. 648 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 649 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. 650 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 651 * @param[out] output Output tensor. Data types supported: same as @p input1. 652 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 653 */ 654 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 655 /** Initialise the kernel's inputs, output and conversion policy. 656 * 657 * @param[in] compile_context The compile context to be used. 658 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. 659 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 660 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. 661 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 662 * @param[out] output Output tensor. Data types supported: same as @p input1. 663 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 664 */ 665 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 666 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for max 667 * 668 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. 669 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1. 670 * @param[in] output Output tensor info. Data types supported: same as @p input1. 671 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 672 * 673 * @return a status 674 */ 675 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 676 677 // Inherited methods overridden: 678 void run() override; 679 680 private: 681 struct Impl; 682 std::unique_ptr<Impl> _impl; 683 }; 684 685 /** Basic function to run @ref CLArithmeticOperationKernel for min 686 * 687 * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32. 688 * @note The function performs a max operation between two tensors. 689 */ 690 class CLElementwiseMin : public IFunction 691 { 692 public: 693 /** Default Constructor */ 694 CLElementwiseMin(); 695 /** Default Destructor */ 696 ~CLElementwiseMin(); 697 /** Prevent instances of this class from being copied (As this class contains pointers) */ 698 CLElementwiseMin(const CLElementwiseMin &) = delete; 699 /** Default move constructor */ 700 CLElementwiseMin(CLElementwiseMin &&); 701 /** Prevent instances of this class from being copied (As this class contains pointers) */ 702 CLElementwiseMin &operator=(const CLElementwiseMin &) = delete; 703 /** Default move assignment operator */ 704 CLElementwiseMin &operator=(CLElementwiseMin &&); 705 /** Initialise the kernel's inputs, output and conversion policy. 706 * 707 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. 708 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 709 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. 710 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 711 * @param[out] output Output tensor. Data types supported: same as @p input1. 712 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 713 */ 714 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 715 /** Initialise the kernel's inputs, output and conversion policy. 716 * 717 * @param[in] compile_context The compile context to be used. 718 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. 719 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 720 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. 721 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 722 * @param[out] output Output tensor. Data types supported: same as @p input1. 723 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 724 */ 725 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 726 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for min 727 * 728 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. 729 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1. 730 * @param[in] output Output tensor info. Data types supported: same as @p input1. 731 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 732 * 733 * @return a status 734 */ 735 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 736 737 // Inherited methods overridden: 738 void run() override; 739 740 private: 741 struct Impl; 742 std::unique_ptr<Impl> _impl; 743 }; 744 745 /** Basic function to run @ref CLArithmeticOperationKernel for squared difference 746 * 747 * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32. 748 * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2 749 */ 750 class CLElementwiseSquaredDiff : public IFunction 751 { 752 public: 753 /** Default Constructor */ 754 CLElementwiseSquaredDiff(); 755 /** Default Destructor */ 756 ~CLElementwiseSquaredDiff(); 757 /** Prevent instances of this class from being copied (As this class contains pointers) */ 758 CLElementwiseSquaredDiff(const CLElementwiseSquaredDiff &) = delete; 759 /** Default move constructor */ 760 CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&); 761 /** Prevent instances of this class from being copied (As this class contains pointers) */ 762 CLElementwiseSquaredDiff &operator=(const CLElementwiseSquaredDiff &) = delete; 763 /** Default move assignment operator */ 764 CLElementwiseSquaredDiff &operator=(CLElementwiseSquaredDiff &&); 765 /** Initialise the kernel's inputs, output and conversion policy. 766 * 767 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32. 768 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 769 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. 770 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 771 * @param[out] output Output tensor. Data types supported: same as @p input1. 772 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 773 */ 774 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 775 /** Initialise the kernel's inputs, output and conversion policy. 776 * 777 * @param[in] compile_context The compile context to be used. 778 * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32. 779 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 780 * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. 781 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 782 * @param[out] output Output tensor. Data types supported: same as @p input1. 783 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 784 */ 785 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 786 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for squared difference 787 * 788 * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32. 789 * @param[in] input2 Second tensor input info. Data types supported: same as @p input1. 790 * @param[in] output Output tensor info. Data types supported: same as @p input1. 791 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 792 * 793 * @return a status 794 */ 795 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 796 797 // Inherited methods overridden: 798 void run() override; 799 800 private: 801 struct Impl; 802 std::unique_ptr<Impl> _impl; 803 }; 804 805 /** Basic function to run @ref CLArithmeticOperationKernel for power 806 * 807 * @note The tensor data type for the inputs must be F16/F32. 808 * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i]) 809 */ 810 class CLElementwisePower : public IFunction 811 { 812 public: 813 /** Default Constructor */ 814 CLElementwisePower(); 815 /** Default Destructor */ 816 ~CLElementwisePower(); 817 /** Prevent instances of this class from being copied (As this class contains pointers) */ 818 CLElementwisePower(const CLElementwisePower &) = delete; 819 /** Default move constructor */ 820 CLElementwisePower(CLElementwisePower &&); 821 /** Prevent instances of this class from being copied (As this class contains pointers) */ 822 CLElementwisePower &operator=(const CLElementwisePower &) = delete; 823 /** Default move assignment operator */ 824 CLElementwisePower &operator=(CLElementwisePower &&); 825 /** Initialise the kernel's inputs, output and conversion policy. 826 * 827 * @param[in, out] input1 First tensor input. Data types supported: F16/F32. 828 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 829 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32. 830 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 831 * @param[out] output Output tensor. Data types supported:F16/F32. 832 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 833 */ 834 void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 835 /** Initialise the kernel's inputs, output and conversion policy. 836 * 837 * @param[in] compile_context The compile context to be used. 838 * @param[in, out] input1 First tensor input. Data types supported: F16/F32. 839 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 840 * @param[in, out] input2 Second tensor input. Data types supported: F16/F32. 841 * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. 842 * @param[out] output Output tensor. Data types supported:F16/F32. 843 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 844 */ 845 void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 846 /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for power 847 * 848 * @param[in] input1 First tensor input info. Data types supported: F16/F32. 849 * @param[in] input2 Second tensor input info. Data types supported: F16/F32. 850 * @param[in] output Output tensor info. Data types supported: F16/F32. 851 * @param[in] act_info (Optional) Activation layer information in case of a fused activation. 852 * 853 * @return a status 854 */ 855 static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); 856 857 // Inherited methods overridden: 858 void run() override; 859 860 private: 861 struct Impl; 862 std::unique_ptr<Impl> _impl; 863 }; 864 } // namespace arm_compute 865 #endif /* ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H */ 866