1 // 2 // Copyright © 2017 Arm Ltd. All rights reserved. 3 // SPDX-License-Identifier: MIT 4 // 5 #pragma once 6 7 #include <armnn/backends/CpuTensorHandleFwd.hpp> 8 #include <armnn/backends/ITensorHandle.hpp> 9 10 #include <InternalTypes.hpp> 11 12 #include <armnn/Deprecated.hpp> 13 #include <armnn/Descriptors.hpp> 14 #include <armnn/Exceptions.hpp> 15 #include <armnn/Types.hpp> 16 #include <armnn/Tensor.hpp> 17 18 #include <backendsCommon/WorkloadInfo.hpp> 19 20 namespace armnn 21 { 22 23 //A helper function that returns the bias data type required for given input data type. 24 DataType GetBiasDataType(DataType inputDataType); 25 26 struct WorkloadInfo; 27 28 struct QueueDescriptor 29 { 30 std::vector<ITensorHandle*> m_Inputs; 31 std::vector<ITensorHandle*> m_Outputs; 32 void* m_AdditionalInfoObject; 33 34 void ValidateInputsOutputs(const std::string& descName, 35 unsigned int numExpectedIn, 36 unsigned int numExpectedOut) const; 37 38 template<typename T> GetAdditionalInformationarmnn::QueueDescriptor39 const T* GetAdditionalInformation() const 40 { 41 return static_cast<T*>(m_AdditionalInfoObject); 42 } 43 44 protected: 45 ~QueueDescriptor() = default; QueueDescriptorarmnn::QueueDescriptor46 QueueDescriptor() 47 : m_AdditionalInfoObject(nullptr) 48 {} 49 QueueDescriptor(QueueDescriptor const&) = default; 50 QueueDescriptor& operator=(QueueDescriptor const&) = default; 51 }; 52 53 // Base class for queue descriptors which contain parameters. 54 template <typename LayerDescriptor> 55 struct QueueDescriptorWithParameters : public QueueDescriptor 56 { 57 LayerDescriptor m_Parameters; 58 59 protected: 60 ~QueueDescriptorWithParameters() = default; 61 QueueDescriptorWithParameters() = default; 62 QueueDescriptorWithParameters(QueueDescriptorWithParameters const&) = default; 63 QueueDescriptorWithParameters& operator=(QueueDescriptorWithParameters const&) = default; 64 }; 65 66 struct MapQueueDescriptor : QueueDescriptor 67 { 68 void Validate(const WorkloadInfo& workloadInfo) const; 69 }; 70 71 struct UnmapQueueDescriptor : QueueDescriptor 72 { 73 void Validate(const WorkloadInfo& workloadInfo) const; 74 }; 75 76 struct MemCopyQueueDescriptor : QueueDescriptor 77 { 78 void Validate(const WorkloadInfo& workloadInfo) const; 79 }; 80 81 using InputQueueDescriptor = MemCopyQueueDescriptor; 82 using OutputQueueDescriptor = MemCopyQueueDescriptor; 83 84 struct MemImportQueueDescriptor : QueueDescriptor 85 { 86 void Validate(const WorkloadInfo& workloadInfo) const; 87 }; 88 89 struct MemSyncQueueDescriptor : QueueDescriptor 90 { 91 void Validate(const WorkloadInfo& workloadInfo) const; 92 }; 93 94 // Softmax layer workload data. 95 struct SoftmaxQueueDescriptor : QueueDescriptorWithParameters<SoftmaxDescriptor> 96 { 97 void Validate(const WorkloadInfo& workloadInfo) const; 98 }; 99 100 // Splitter layer workload data. 101 struct SplitterQueueDescriptor : QueueDescriptorWithParameters<ViewsDescriptor> 102 { 103 struct ViewOrigin 104 { ViewOriginarmnn::SplitterQueueDescriptor::ViewOrigin105 ViewOrigin() {} ViewOriginarmnn::SplitterQueueDescriptor::ViewOrigin106 ViewOrigin(std::vector<unsigned int> const& origin) : m_Origin(origin) {} 107 108 //View origin (size of the vector is the same as number of dimensions of the view). 109 std::vector<unsigned int> m_Origin; 110 }; 111 112 //View defines a tensor that will be carved from the input tensor. 113 //View origins are stored here, the extents are defined by sizes of the output tensors. 114 std::vector<ViewOrigin> m_ViewOrigins; 115 116 void Validate(const WorkloadInfo& workloadInfo) const; 117 }; 118 119 // Concat layer workload data. 120 struct ConcatQueueDescriptor : QueueDescriptorWithParameters<OriginsDescriptor> 121 { 122 struct ViewOrigin 123 { ViewOriginarmnn::ConcatQueueDescriptor::ViewOrigin124 ViewOrigin() {} ViewOriginarmnn::ConcatQueueDescriptor::ViewOrigin125 ViewOrigin(const std::vector<unsigned int>& origin) : m_Origin(origin) {} 126 127 //View origin (size of the vector is the same as number of dimensions of the view). 128 std::vector<unsigned int> m_Origin; 129 }; 130 131 //View defines a sub-area of the output tensor that will be filled with the corresponding input tensor. 132 //View origins are stored here, the extents are defined by sizes of the input tensors. 133 std::vector<ViewOrigin> m_ViewOrigins; 134 135 void Validate(const WorkloadInfo& workloadInfo) const; 136 }; 137 138 // Deprecated. Use ConcatQueueDescriptor instead 139 using MergerQueueDescriptor = ConcatQueueDescriptor; 140 141 // Stack layer workload data. 142 struct StackQueueDescriptor : QueueDescriptorWithParameters<StackDescriptor> 143 { 144 void Validate(const WorkloadInfo& workloadInfo) const; 145 }; 146 147 // Activation layer workload data. 148 struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescriptor> 149 { 150 void Validate(const WorkloadInfo& workloadInfo) const; 151 }; 152 153 struct ArgMinMaxQueueDescriptor : QueueDescriptorWithParameters<ArgMinMaxDescriptor> 154 { 155 void Validate(const WorkloadInfo& workloadInfo) const; 156 }; 157 158 // Fill layer workload data. 159 struct FillQueueDescriptor : QueueDescriptorWithParameters<FillDescriptor> 160 { 161 void Validate(const WorkloadInfo& workloadInfo) const; 162 }; 163 164 // Fully connected layer workload data. 165 struct FullyConnectedQueueDescriptor : QueueDescriptorWithParameters<FullyConnectedDescriptor> 166 { FullyConnectedQueueDescriptorarmnn::FullyConnectedQueueDescriptor167 FullyConnectedQueueDescriptor() 168 : m_Weight(nullptr) 169 , m_Bias(nullptr) 170 { 171 } 172 173 const ConstCpuTensorHandle* m_Weight; 174 const ConstCpuTensorHandle* m_Bias; 175 176 void Validate(const WorkloadInfo& workloadInfo) const; 177 }; 178 179 // Permute layer workload data. 180 struct PermuteQueueDescriptor : QueueDescriptorWithParameters<PermuteDescriptor> 181 { 182 void Validate(const WorkloadInfo& workloadInfo) const; 183 }; 184 185 // Pooling 2D layer workload data. 186 struct Pooling2dQueueDescriptor : QueueDescriptorWithParameters<Pooling2dDescriptor> 187 { 188 void Validate(const WorkloadInfo& workloadInfo) const; 189 }; 190 191 // Convolution 2D layer workload data. 192 struct Convolution2dQueueDescriptor : QueueDescriptorWithParameters<Convolution2dDescriptor> 193 { Convolution2dQueueDescriptorarmnn::Convolution2dQueueDescriptor194 Convolution2dQueueDescriptor() 195 : m_Weight(nullptr) 196 , m_Bias(nullptr) 197 { 198 } 199 200 const ConstCpuTensorHandle* m_Weight; 201 const ConstCpuTensorHandle* m_Bias; 202 203 void Validate(const WorkloadInfo& workloadInfo) const; 204 }; 205 206 // Depthwise Convolution 2D layer workload data. 207 struct DepthwiseConvolution2dQueueDescriptor : QueueDescriptorWithParameters<DepthwiseConvolution2dDescriptor> 208 { DepthwiseConvolution2dQueueDescriptorarmnn::DepthwiseConvolution2dQueueDescriptor209 DepthwiseConvolution2dQueueDescriptor() 210 : m_Weight(nullptr) 211 , m_Bias(nullptr) 212 { 213 } 214 215 const ConstCpuTensorHandle* m_Weight; 216 const ConstCpuTensorHandle* m_Bias; 217 218 void Validate(const WorkloadInfo& workloadInfo) const; 219 }; 220 221 struct DetectionPostProcessQueueDescriptor : QueueDescriptorWithParameters<DetectionPostProcessDescriptor> 222 { DetectionPostProcessQueueDescriptorarmnn::DetectionPostProcessQueueDescriptor223 DetectionPostProcessQueueDescriptor() 224 : m_Anchors(nullptr) 225 { 226 } 227 228 const ConstCpuTensorHandle* m_Anchors; 229 230 void Validate(const WorkloadInfo& workloadInfo) const; 231 }; 232 233 // Normalization layer workload data. 234 struct NormalizationQueueDescriptor : QueueDescriptorWithParameters<NormalizationDescriptor> 235 { 236 void Validate(const WorkloadInfo& workloadInfo) const; 237 }; 238 239 // Add layer workload data. 240 struct AdditionQueueDescriptor : QueueDescriptor 241 { 242 void Validate(const WorkloadInfo& workloadInfo) const; 243 }; 244 245 // Multiplication layer workload data. 246 struct MultiplicationQueueDescriptor : QueueDescriptor 247 { 248 void Validate(const WorkloadInfo& workloadInfo) const; 249 }; 250 251 // Division layer workload data. 252 struct DivisionQueueDescriptor : QueueDescriptor 253 { 254 void Validate(const WorkloadInfo& workloadInfo) const; 255 }; 256 257 // Subtraction layer workload data. 258 struct SubtractionQueueDescriptor : QueueDescriptor 259 { 260 void Validate(const WorkloadInfo& workloadInfo) const; 261 }; 262 263 // Maximum layer workload data. 264 struct MaximumQueueDescriptor : QueueDescriptor 265 { 266 void Validate(const WorkloadInfo& workloadInfo) const; 267 }; 268 269 // Mean layer workload data. 270 struct MeanQueueDescriptor : QueueDescriptorWithParameters<MeanDescriptor> 271 { 272 void Validate(const WorkloadInfo& workloadInfo) const; 273 }; 274 275 // Pad layer workload data 276 struct PadQueueDescriptor : QueueDescriptorWithParameters<PadDescriptor> 277 { 278 void Validate(const WorkloadInfo& workloadInfo) const; 279 }; 280 281 struct QuantizeQueueDescriptor : QueueDescriptor 282 { 283 void Validate(const WorkloadInfo& workloadInfo) const; 284 }; 285 286 // Deprecated use ComparisonQueueDescriptor instead 287 struct EqualQueueDescriptor : QueueDescriptor 288 { 289 void Validate(const WorkloadInfo& workloadInfo) const; 290 }; 291 292 // Batch norm layer workload data. 293 struct BatchNormalizationQueueDescriptor : QueueDescriptorWithParameters<BatchNormalizationDescriptor> 294 { BatchNormalizationQueueDescriptorarmnn::BatchNormalizationQueueDescriptor295 BatchNormalizationQueueDescriptor() 296 : m_Mean(nullptr) 297 , m_Variance(nullptr) 298 , m_Beta(nullptr) 299 , m_Gamma(nullptr) 300 { 301 } 302 303 const ConstCpuTensorHandle* m_Mean; 304 const ConstCpuTensorHandle* m_Variance; 305 const ConstCpuTensorHandle* m_Beta; 306 const ConstCpuTensorHandle* m_Gamma; 307 308 void Validate(const WorkloadInfo& workloadInfo) const; 309 }; 310 311 struct RankQueueDescriptor : QueueDescriptor 312 { 313 void Validate(const WorkloadInfo& workloadInfo) const; 314 }; 315 316 struct ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilinearDescriptor> 317 { 318 void Validate(const WorkloadInfo& workloadInfo) const; 319 }; 320 321 struct ResizeQueueDescriptor : QueueDescriptorWithParameters<ResizeDescriptor> 322 { 323 void Validate(const WorkloadInfo& workloadInfo) const; 324 }; 325 326 struct FakeQuantizationQueueDescriptor : QueueDescriptorWithParameters<FakeQuantizationDescriptor> 327 { FakeQuantizationQueueDescriptorarmnn::FakeQuantizationQueueDescriptor328 FakeQuantizationQueueDescriptor() 329 : m_Min(nullptr) 330 , m_Max(nullptr) 331 { 332 } 333 334 const ConstCpuTensorHandle* m_Min; 335 const ConstCpuTensorHandle* m_Max; 336 337 void Validate(const WorkloadInfo& workloadInfo) const; 338 }; 339 340 struct InstanceNormalizationQueueDescriptor : QueueDescriptorWithParameters<InstanceNormalizationDescriptor> 341 { 342 void Validate(const WorkloadInfo& workloadInfo) const; 343 }; 344 345 struct L2NormalizationQueueDescriptor : QueueDescriptorWithParameters<L2NormalizationDescriptor> 346 { 347 void Validate(const WorkloadInfo& workloadInfo) const; 348 }; 349 350 struct LogSoftmaxQueueDescriptor : QueueDescriptorWithParameters<LogSoftmaxDescriptor> 351 { 352 void Validate(const WorkloadInfo& workloadInfo) const; 353 }; 354 355 struct ConstantQueueDescriptor : QueueDescriptor 356 { ConstantQueueDescriptorarmnn::ConstantQueueDescriptor357 ConstantQueueDescriptor() 358 : m_LayerOutput(nullptr) 359 { 360 } 361 362 const ConstCpuTensorHandle* m_LayerOutput; 363 364 void Validate(const WorkloadInfo& workloadInfo) const; 365 }; 366 367 struct ReshapeQueueDescriptor : QueueDescriptorWithParameters<ReshapeDescriptor> 368 { 369 void Validate(const WorkloadInfo& workloadInfo) const; 370 }; 371 372 struct SpaceToBatchNdQueueDescriptor : QueueDescriptorWithParameters<SpaceToBatchNdDescriptor> 373 { 374 void Validate(const WorkloadInfo& workloadInfo) const; 375 }; 376 377 struct SpaceToDepthQueueDescriptor : QueueDescriptorWithParameters<SpaceToDepthDescriptor> 378 { 379 void Validate(const WorkloadInfo& workloadInfo) const; 380 }; 381 382 struct FloorQueueDescriptor : QueueDescriptor 383 { 384 void Validate(const WorkloadInfo& workloadInfo) const; 385 }; 386 387 struct LstmQueueDescriptor : QueueDescriptorWithParameters<LstmDescriptor> 388 { LstmQueueDescriptorarmnn::LstmQueueDescriptor389 LstmQueueDescriptor() 390 : m_InputToInputWeights(nullptr) 391 , m_InputToForgetWeights(nullptr) 392 , m_InputToCellWeights(nullptr) 393 , m_InputToOutputWeights(nullptr) 394 , m_RecurrentToInputWeights(nullptr) 395 , m_RecurrentToForgetWeights(nullptr) 396 , m_RecurrentToCellWeights(nullptr) 397 , m_RecurrentToOutputWeights(nullptr) 398 , m_CellToInputWeights(nullptr) 399 , m_CellToForgetWeights(nullptr) 400 , m_CellToOutputWeights(nullptr) 401 , m_InputGateBias(nullptr) 402 , m_ForgetGateBias(nullptr) 403 , m_CellBias(nullptr) 404 , m_OutputGateBias(nullptr) 405 , m_ProjectionWeights(nullptr) 406 , m_ProjectionBias(nullptr) 407 , m_InputLayerNormWeights(nullptr) 408 , m_ForgetLayerNormWeights(nullptr) 409 , m_CellLayerNormWeights(nullptr) 410 , m_OutputLayerNormWeights(nullptr) 411 { 412 } 413 414 const ConstCpuTensorHandle* m_InputToInputWeights; 415 const ConstCpuTensorHandle* m_InputToForgetWeights; 416 const ConstCpuTensorHandle* m_InputToCellWeights; 417 const ConstCpuTensorHandle* m_InputToOutputWeights; 418 const ConstCpuTensorHandle* m_RecurrentToInputWeights; 419 const ConstCpuTensorHandle* m_RecurrentToForgetWeights; 420 const ConstCpuTensorHandle* m_RecurrentToCellWeights; 421 const ConstCpuTensorHandle* m_RecurrentToOutputWeights; 422 const ConstCpuTensorHandle* m_CellToInputWeights; 423 const ConstCpuTensorHandle* m_CellToForgetWeights; 424 const ConstCpuTensorHandle* m_CellToOutputWeights; 425 const ConstCpuTensorHandle* m_InputGateBias; 426 const ConstCpuTensorHandle* m_ForgetGateBias; 427 const ConstCpuTensorHandle* m_CellBias; 428 const ConstCpuTensorHandle* m_OutputGateBias; 429 const ConstCpuTensorHandle* m_ProjectionWeights; 430 const ConstCpuTensorHandle* m_ProjectionBias; 431 const ConstCpuTensorHandle* m_InputLayerNormWeights; 432 const ConstCpuTensorHandle* m_ForgetLayerNormWeights; 433 const ConstCpuTensorHandle* m_CellLayerNormWeights; 434 const ConstCpuTensorHandle* m_OutputLayerNormWeights; 435 436 void Validate(const WorkloadInfo& workloadInfo) const; 437 }; 438 439 struct ConvertBf16ToFp32QueueDescriptor : QueueDescriptor 440 { 441 void Validate(const WorkloadInfo& workloadInfo) const; 442 }; 443 444 struct ConvertFp32ToBf16QueueDescriptor : QueueDescriptor 445 { 446 void Validate(const WorkloadInfo& workloadInfo) const; 447 }; 448 449 struct ConvertFp16ToFp32QueueDescriptor : QueueDescriptor 450 { 451 void Validate(const WorkloadInfo& workloadInfo) const; 452 }; 453 454 struct ConvertFp32ToFp16QueueDescriptor : QueueDescriptor 455 { 456 void Validate(const WorkloadInfo& workloadInfo) const; 457 }; 458 459 struct BatchToSpaceNdQueueDescriptor : QueueDescriptorWithParameters<BatchToSpaceNdDescriptor> 460 { 461 void Validate(const WorkloadInfo& workloadInfo) const; 462 }; 463 464 struct StridedSliceQueueDescriptor : QueueDescriptorWithParameters<StridedSliceDescriptor> 465 { 466 void Validate(const WorkloadInfo& workloadInfo) const; 467 }; 468 469 // Minimum layer workload data. 470 struct MinimumQueueDescriptor : QueueDescriptor 471 { 472 void Validate(const WorkloadInfo& workloadInfo) const; 473 }; 474 475 // Deprecated use ComparisonQueueDescriptor instead 476 struct GreaterQueueDescriptor : QueueDescriptor 477 { 478 void Validate(const WorkloadInfo& workloadInfo) const; 479 }; 480 481 struct DebugQueueDescriptor : QueueDescriptor 482 { DebugQueueDescriptorarmnn::DebugQueueDescriptor483 DebugQueueDescriptor() : m_Guid(0) {} 484 485 void Validate(const WorkloadInfo& workloadInfo) const; 486 487 LayerGuid m_Guid; 488 std::string m_LayerName; 489 unsigned int m_SlotIndex; 490 }; 491 492 struct RsqrtQueueDescriptor : QueueDescriptor 493 { 494 void Validate(const WorkloadInfo& workloadInfo) const; 495 }; 496 497 struct GatherQueueDescriptor : QueueDescriptorWithParameters<GatherDescriptor> 498 { 499 void Validate(const WorkloadInfo& workloadInfo) const; 500 }; 501 502 struct PreCompiledQueueDescriptor : QueueDescriptorWithParameters<PreCompiledDescriptor> 503 { PreCompiledQueueDescriptorarmnn::PreCompiledQueueDescriptor504 PreCompiledQueueDescriptor() 505 : m_PreCompiledObject(nullptr) 506 { 507 } 508 509 void* m_PreCompiledObject; 510 511 void Validate(const WorkloadInfo& workloadInfo) const; 512 }; 513 514 struct DequantizeQueueDescriptor : QueueDescriptor 515 { 516 void Validate(const WorkloadInfo& workloadInfo) const; 517 }; 518 519 struct MergeQueueDescriptor : QueueDescriptor 520 { 521 void Validate(const WorkloadInfo& workloadInfo) const; 522 }; 523 524 struct SwitchQueueDescriptor : QueueDescriptor 525 { 526 void Validate(const WorkloadInfo& workloadInfo) const; 527 }; 528 529 struct PreluQueueDescriptor : QueueDescriptor 530 { 531 void Validate(const WorkloadInfo& workloadInfo) const; 532 }; 533 534 struct TransposeConvolution2dQueueDescriptor : QueueDescriptorWithParameters<TransposeConvolution2dDescriptor> 535 { TransposeConvolution2dQueueDescriptorarmnn::TransposeConvolution2dQueueDescriptor536 TransposeConvolution2dQueueDescriptor() : 537 m_Weight(nullptr), 538 m_Bias(nullptr) 539 {} 540 541 const ConstCpuTensorHandle* m_Weight; 542 const ConstCpuTensorHandle* m_Bias; 543 544 void Validate(const WorkloadInfo& workloadInfo) const; 545 }; 546 547 struct TransposeQueueDescriptor : QueueDescriptorWithParameters<TransposeDescriptor> 548 { 549 void Validate(const WorkloadInfo& workloadInfo) const; 550 }; 551 552 struct QLstmQueueDescriptor : QueueDescriptorWithParameters<QLstmDescriptor> 553 { QLstmQueueDescriptorarmnn::QLstmQueueDescriptor554 QLstmQueueDescriptor() 555 : m_InputToInputWeights(nullptr) 556 , m_InputToForgetWeights(nullptr) 557 , m_InputToCellWeights(nullptr) 558 , m_InputToOutputWeights(nullptr) 559 , m_RecurrentToInputWeights(nullptr) 560 , m_RecurrentToForgetWeights(nullptr) 561 , m_RecurrentToCellWeights(nullptr) 562 , m_RecurrentToOutputWeights(nullptr) 563 , m_CellToInputWeights(nullptr) 564 , m_CellToForgetWeights(nullptr) 565 , m_CellToOutputWeights(nullptr) 566 , m_InputGateBias(nullptr) 567 , m_ForgetGateBias(nullptr) 568 , m_CellBias(nullptr) 569 , m_OutputGateBias(nullptr) 570 , m_ProjectionWeights(nullptr) 571 , m_ProjectionBias(nullptr) 572 , m_InputLayerNormWeights(nullptr) 573 , m_ForgetLayerNormWeights(nullptr) 574 , m_CellLayerNormWeights(nullptr) 575 , m_OutputLayerNormWeights(nullptr) 576 { 577 } 578 579 const ConstCpuTensorHandle* m_InputToInputWeights; 580 const ConstCpuTensorHandle* m_InputToForgetWeights; 581 const ConstCpuTensorHandle* m_InputToCellWeights; 582 const ConstCpuTensorHandle* m_InputToOutputWeights; 583 const ConstCpuTensorHandle* m_RecurrentToInputWeights; 584 const ConstCpuTensorHandle* m_RecurrentToForgetWeights; 585 const ConstCpuTensorHandle* m_RecurrentToCellWeights; 586 const ConstCpuTensorHandle* m_RecurrentToOutputWeights; 587 const ConstCpuTensorHandle* m_CellToInputWeights; 588 const ConstCpuTensorHandle* m_CellToForgetWeights; 589 const ConstCpuTensorHandle* m_CellToOutputWeights; 590 const ConstCpuTensorHandle* m_InputGateBias; 591 const ConstCpuTensorHandle* m_ForgetGateBias; 592 const ConstCpuTensorHandle* m_CellBias; 593 const ConstCpuTensorHandle* m_OutputGateBias; 594 const ConstCpuTensorHandle* m_ProjectionWeights; 595 const ConstCpuTensorHandle* m_ProjectionBias; 596 const ConstCpuTensorHandle* m_InputLayerNormWeights; 597 const ConstCpuTensorHandle* m_ForgetLayerNormWeights; 598 const ConstCpuTensorHandle* m_CellLayerNormWeights; 599 const ConstCpuTensorHandle* m_OutputLayerNormWeights; 600 601 void Validate(const WorkloadInfo& workloadInfo) const; 602 }; 603 604 struct QuantizedLstmQueueDescriptor : QueueDescriptor 605 { QuantizedLstmQueueDescriptorarmnn::QuantizedLstmQueueDescriptor606 QuantizedLstmQueueDescriptor() 607 : m_InputToInputWeights(nullptr) 608 , m_InputToForgetWeights(nullptr) 609 , m_InputToCellWeights(nullptr) 610 , m_InputToOutputWeights(nullptr) 611 612 , m_RecurrentToInputWeights(nullptr) 613 , m_RecurrentToForgetWeights(nullptr) 614 , m_RecurrentToCellWeights(nullptr) 615 , m_RecurrentToOutputWeights(nullptr) 616 617 , m_InputGateBias(nullptr) 618 , m_ForgetGateBias(nullptr) 619 , m_CellBias(nullptr) 620 , m_OutputGateBias(nullptr) 621 {} 622 623 const ConstCpuTensorHandle* m_InputToInputWeights; 624 const ConstCpuTensorHandle* m_InputToForgetWeights; 625 const ConstCpuTensorHandle* m_InputToCellWeights; 626 const ConstCpuTensorHandle* m_InputToOutputWeights; 627 628 const ConstCpuTensorHandle* m_RecurrentToInputWeights; 629 const ConstCpuTensorHandle* m_RecurrentToForgetWeights; 630 const ConstCpuTensorHandle* m_RecurrentToCellWeights; 631 const ConstCpuTensorHandle* m_RecurrentToOutputWeights; 632 633 const ConstCpuTensorHandle* m_InputGateBias; 634 const ConstCpuTensorHandle* m_ForgetGateBias; 635 const ConstCpuTensorHandle* m_CellBias; 636 const ConstCpuTensorHandle* m_OutputGateBias; 637 638 void Validate(const WorkloadInfo& workloadInfo) const; 639 }; 640 641 struct AbsQueueDescriptor : QueueDescriptor 642 { 643 void Validate(const WorkloadInfo& workloadInfo) const; 644 }; 645 646 struct SliceQueueDescriptor : QueueDescriptorWithParameters<SliceDescriptor> 647 { 648 void Validate(const WorkloadInfo& workloadInfo) const; 649 }; 650 651 struct DepthToSpaceQueueDescriptor : QueueDescriptorWithParameters<DepthToSpaceDescriptor> 652 { 653 void Validate(const WorkloadInfo& workloadInfo) const; 654 }; 655 656 struct ComparisonQueueDescriptor : QueueDescriptorWithParameters<ComparisonDescriptor> 657 { 658 void Validate(const WorkloadInfo& workloadInfo) const; 659 }; 660 661 struct ElementwiseUnaryQueueDescriptor : QueueDescriptorWithParameters<ElementwiseUnaryDescriptor> 662 { 663 void Validate(const WorkloadInfo& workloadInfo) const; 664 }; 665 666 struct LogicalBinaryQueueDescriptor : QueueDescriptorWithParameters<LogicalBinaryDescriptor> 667 { 668 void Validate(const WorkloadInfo& workloadInfo) const; 669 }; 670 671 } // namespace armnn 672