1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MODEL_HINTS_H_ 17 #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MODEL_HINTS_H_ 18 19 #include <cstdint> 20 21 namespace tflite { 22 namespace gpu { 23 24 struct ModelHints { 25 using ModelHint = uint64_t; 26 27 // By default we want the fastest inference. 28 static constexpr ModelHint kFastestInference = 0x00000000; 29 // Can improve compilation time, but inference can be slower. 30 static constexpr ModelHint kReduceKernelsCount = 0x00000001; 31 // Can improve tuning time, but inference can be slower. 32 static constexpr ModelHint kFastTuning = 0x00000002; 33 34 // Experimental. 35 // Can improve performance and memory consumption, but slow down 36 // initialization a lot and create more unique kernels. 37 static constexpr ModelHint kAllowSpecialKernels = 0x00000004; 38 AddModelHints39 void Add(ModelHint hint) { 40 if (hint == kFastestInference) { 41 hints = kFastestInference; 42 } else { 43 hints |= hint; 44 } 45 } 46 CheckModelHints47 bool Check(ModelHint hint) const { return hints & hint; } 48 49 uint64_t hints = kFastestInference; 50 }; 51 52 } // namespace gpu 53 } // namespace tflite 54 55 #endif // TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MODEL_HINTS_H_ 56