1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 #ifndef TENSORFLOW_LITE_DELEGATES_HEXAGON_HEXAGON_DELEGATE_H_ 16 #define TENSORFLOW_LITE_DELEGATES_HEXAGON_HEXAGON_DELEGATE_H_ 17 18 #include "tensorflow/lite/c/common.h" 19 20 #ifdef __cplusplus 21 extern "C" { 22 #endif // __cplusplus 23 24 // Use TfLiteHexagonDelegateOptionsDefault() for Default options. 25 struct TFL_CAPI_EXPORT TfLiteHexagonDelegateOptions { 26 // This corresponds to the debug level in the hexagon SDK. 0 (default) 27 // means no debug. 28 int debug_level; 29 30 // This corresponds to powersave_level in the hexagon SDK. 31 // where 0 (default) means high performance which means more power 32 // consumption. 33 int powersave_level; 34 35 // If set to true, performance information about the graph will be dumped 36 // to Standard output, this includes cpu cycles. 37 // WARNING: Experimental and subject to change anytime. 38 bool print_graph_profile; 39 40 // If set to true, graph structure will be dumped to Standard output. 41 // This is usually beneficial to see what actual nodes executed on 42 // the DSP. Combining with 'debug_level' more information will be printed. 43 // WARNING: Experimental and subject to change anytime. 44 bool print_graph_debug; 45 46 // This sets the maximum number of Hexagon graphs created with 47 // hexagon_nn_init. Each graph corresponds to one delegated node subset in the 48 // TFLite model. 49 int max_delegated_partitions; 50 // This sets the minimum number of nodes per graph created with 51 // hexagon_nn_init. Defaults to 2. 52 int min_nodes_per_partition; 53 54 // If true, then the hexagon graph will adapt for inputs with dynamic batch. 55 // See below options are needed to be set. 56 // Currently, Only supported when the whole graph is delegated, and 57 // with batch as index 0. 58 // WARNING: Experimental and subject to change anytime. 59 bool enable_dynamic_batch_size; 60 61 // Maximum value for a batch dimension when evaluating graphs with 62 // dynamic batch. The input to the graph can have value for batch bigger than 63 // this number, internally the graph will run multiple times each with 64 // batch dimension <= max_batch_size. you should decide the value of this 65 // based on memory/latency tradeoffs. 66 // This needs to be set only if 'enable_dynamic_batch_size' is true. 67 // Not needed for fixed graphs. 68 // WARNING: Experimental and subject to change anytime. 69 int max_batch_size; 70 71 // Each element identifies the index of the batch dimension in a single input. 72 // input_batch_dimensions->data[i] is the index of the batch dimension for 73 // input[i]. If the graph has 1 input then the size of the array should be 1, 74 // and so on. This needs to be set only if 'enable_dynamic_batch_size' is 75 // true. Not needed for fixed graphs. 76 // If input[i] doesn't have dynamic batch, then input_batch_dimensions[i] 77 // should be -1. 78 // Delegate will take ownership of the pointer. 79 // WARNING: Experimental and subject to change anytime. 80 TfLiteIntArray* input_batch_dimensions; 81 82 // Each element identifies the index of the batch dimension in a single 83 // output. output_batch_dimensions->data[i] is the index of the batch 84 // dimension for output[i]. If the graph has 1 output then the size of the 85 // array should be 1, and so on. This needs to be set only if 86 // 'enable_dynamic_batch_size' is true. Not needed for fixed graphs. If 87 // output[i] has doesn't have dynamic batch, then output_batch_dimensions[i] 88 // should be -1. Delegate will take ownership of the pointer. WARNING: 89 // Experimental and subject to change anytime. 90 TfLiteIntArray* output_batch_dimensions; 91 }; 92 93 // Return a delegate that uses Hexagon SDK for ops execution. 94 // Must outlive the interpreter. 95 TfLiteDelegate* TFL_CAPI_EXPORT 96 TfLiteHexagonDelegateCreate(const TfLiteHexagonDelegateOptions* options); 97 98 // Returns TfLiteHexagonDelegateOptions populated with default values. 99 TFL_CAPI_EXPORT TfLiteHexagonDelegateOptions 100 TfLiteHexagonDelegateOptionsDefault(); 101 102 // Do any needed cleanup and delete 'delegate'. 103 void TFL_CAPI_EXPORT TfLiteHexagonDelegateDelete(TfLiteDelegate* delegate); 104 105 // Initializes the DSP connection. 106 // This should be called before doing any usage of the delegate. 107 // "lib_directory_path": Path to the directory which holds the 108 // shared libraries for the Hexagon NN libraries on the device. 109 void TFL_CAPI_EXPORT TfLiteHexagonInitWithPath(const char* lib_directory_path); 110 111 // Same as above method but doesn't accept the path params. 112 // Assumes the environment setup is already done. Only initialize Hexagon. 113 void TFL_CAPI_EXPORT TfLiteHexagonInit(); 114 115 // Clean up and switch off the DSP connection. 116 // This should be called after all processing is done and delegate is deleted. 117 void TFL_CAPI_EXPORT TfLiteHexagonTearDown(); 118 #ifdef __cplusplus 119 } 120 #endif // __cplusplus 121 122 #endif // TENSORFLOW_LITE_DELEGATES_HEXAGON_HEXAGON_DELEGATE_H_ 123