Home
last modified time | relevance | path

Searched refs:CpuBackendContext (Results 1 – 25 of 59) sorted by relevance

123

/external/tensorflow/tensorflow/lite/kernels/
Dcpu_backend_context.cc47 CpuBackendContext::CpuInfo::~CpuInfo() { in ~CpuInfo()
53 bool CpuBackendContext::CpuInfo::EnsureInitialized() { in EnsureInitialized()
60 CpuBackendContext::CpuInfo::InitStatus
61 CpuBackendContext::CpuInfo::Initialize() { in Initialize()
69 bool CpuBackendContext::CpuInfo::Avx2Fma() { in Avx2Fma()
74 bool CpuBackendContext::CpuInfo::Avx() { in Avx()
78 bool CpuBackendContext::CpuInfo::Avx512() { in Avx512()
85 CpuBackendContext::CpuInfo::~CpuInfo() {} in ~CpuInfo()
87 bool CpuBackendContext::CpuInfo::EnsureInitialized() { in EnsureInitialized()
95 bool CpuBackendContext::CpuInfo::Avx2Fma() { return false; } in Avx2Fma()
[all …]
Dcpu_backend_context.h33 class CpuBackendContext final : public TfLiteInternalBackendContext {
35 static CpuBackendContext* GetFromContext(TfLiteContext* context);
37 CpuBackendContext();
38 ~CpuBackendContext() override;
118 CpuBackendContext(const CpuBackendContext&) = delete;
Dadd_n.cc72 CpuBackendContext* cpu_backend_context = in Prepare()
73 CpuBackendContext::GetFromContext(context); in Prepare()
114 CpuBackendContext* cpu_backend_context = in EvalAddN()
115 CpuBackendContext::GetFromContext(context); in EvalAddN()
Dcpu_backend_threadpool.h38 CpuBackendContext* cpu_backend_context) { in Execute()
50 CpuBackendContext* cpu_backend_context) {
Dcpu_backend_gemm.h120 CpuBackendContext* context) { in Gemm()
180 CpuBackendContext* context) { in Gemm()
205 CpuBackendContext* context) { in Gemm()
Dcpu_backend_gemm_x86.h43 CpuBackendContext* context) { in Run()
73 CpuBackendContext* context) {
Dfully_connected.cc570 CpuBackendContext::GetFromContext(context)); in EvalHybridDense()
725 CpuBackendContext* cpu_backend_context = in EvalHybrid()
726 CpuBackendContext::GetFromContext(context); in EvalHybrid()
795 CpuBackendContext* cpu_backend_context) { in FullyConnectedInt8()
852 CpuBackendContext* cpu_backend_context) { in FullyConnectedPerChannelInt8()
1000 CpuBackendContext::GetFromContext(context)); in EvalQuantized()
1032 CpuBackendContext::GetFromContext(context)); in EvalQuantized()
1041 CpuBackendContext::GetFromContext(context)) in EvalQuantized()
1044 CpuBackendContext::GetFromContext(context)); in EvalQuantized()
1069 CpuBackendContext::GetFromContext(context)) in EvalQuantized()
[all …]
Dlstm_eval.h124 TfLiteTensor* cell_state, TfLiteTensor* output, CpuBackendContext* context);
170 bool* compute_row_sums, CpuBackendContext* context);
196 CpuBackendContext* context);
Dcpu_backend_gemm_eigen.h33 CpuBackendContext* /* context */);
Dgru_cell.h39 tflite::CpuBackendContext* cpu_backend_context);
Dcpu_backend_threadpool_test.cc65 CpuBackendContext context; in TestGenerateArrayOfIncrementingInts()
Dcpu_backend_gemm_gemmlowp.h87 CpuBackendContext* context) {
144 CpuBackendContext* context) {
Dcpu_backend_gemm_eigen.cc41 const GemmParams<float, float>& params, CpuBackendContext* /* context */) { in Run() argument
Dmirror_pad.cc201 CpuBackendContext* cpu_backend_context = in Eval()
202 CpuBackendContext::GetFromContext(context); in Eval()
Dlstm_eval.cc48 CpuBackendContext* cpu_backend_context) { in MatrixBatchVectorMultiplyAccumulate()
191 float* output, CpuBackendContext* context) { in CalculateLstmGateFloat()
309 CpuBackendContext* context) { in CalculateLstmOutputFloat()
371 CpuBackendContext* context, in CalculateLstmGateHybrid()
481 CpuBackendContext* context, float* scratch0, int8_t* scratch1, in CalculateLstmOutputHybrid()
553 CpuBackendContext* context, in CalculateLstmGateInteger8x8_16()
665 CpuBackendContext* context, int16_t* scratch0, int8_t* scratch1, in CalculateLstmOutputInteger8x8_16()
878 float* output_ptr, CpuBackendContext* context) { in LstmStepFloat()
1091 CpuBackendContext* context) { in LstmStepHybrid()
1435 int8_t* scratch4, int32_t* scratch5, CpuBackendContext* context) { in LstmStepInteger8x8_16()
[all …]
/external/tensorflow/tensorflow/lite/kernels/internal/
Dtensor_utils.h35 class CpuBackendContext; variable
47 CpuBackendContext* __restrict__ context);
55 bool* compute_row_sums, CpuBackendContext* context);
67 CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
108 int32_t* scratch, int16_t* output, CpuBackendContext* context);
139 int32_t* scratch, int8_t* output, CpuBackendContext* context);
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/
Dsse_tensor_utils_impl.h42 bool* compute_row_sums, CpuBackendContext* context);
60 float* __restrict__ result, CpuBackendContext* context);
69 bool* compute_row_sums, CpuBackendContext* context);
Dneon_tensor_utils_impl.h52 CpuBackendContext* context);
60 bool* compute_row_sums, CpuBackendContext* context);
94 int32_t* scratch, int8_t* output, CpuBackendContext* context);
100 int32_t* scratch, int16_t* output, CpuBackendContext* context);
Dneon_tensor_utils.h50 CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
60 bool* compute_row_sums, CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
109 int32_t* scratch, int16_t* output, CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
119 int32_t* scratch, int8_t* output, CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
Dsse_tensor_utils.h63 bool* compute_row_sums, CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
74 CpuBackendContext* __restrict__ context) { in MatrixBatchVectorMultiplyAccumulate()
123 int32_t* scratch, int16_t* output, CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
133 int32_t* scratch, int8_t* output, CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
Dbatch_matmul.h31 CpuBackendContext* context) { in BatchMatMul()
121 bool* compute_row_sums, CpuBackendContext* context) { in BatchMatMul()
275 CpuBackendContext* context) { in BatchMatMul()
381 CpuBackendContext* context) { in BatchMatMul()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/sparse_ops/
Dfully_connected.h85 int thread_end, const CpuBackendContext& cpu_backend_context) { in FullyConnectedSparseWeight1x16Impl()
121 int thread_end, const CpuBackendContext& cpu_backend_context) { in FullyConnectedSparseWeight1x4Impl()
161 int thread_end, const CpuBackendContext& cpu_backend_context_x) in FullyConnectedSparseWeight1x4Task()
196 const CpuBackendContext& cpu_backend_context;
205 CpuBackendContext* cpu_backend_context) { in FullyConnectedSparseWeight1x16()
229 CpuBackendContext* cpu_backend_context) { in FullyConnectedSparseWeight1x4()
/external/tensorflow/tensorflow/lite/kernels/internal/reference/
Dportable_tensor_utils_impl.h29 class CpuBackendContext; variable
72 bool* compute_row_sums, CpuBackendContext* context);
78 CpuBackendContext* context);
121 int32_t* scratch, int16_t* output, CpuBackendContext* context);
127 int32_t* scratch, int8_t* output, CpuBackendContext* context);
Dportable_tensor_utils.h80 bool* compute_row_sums, CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
93 CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
142 int32_t* scratch, int16_t* output, CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
152 int32_t* scratch, int8_t* output, CpuBackendContext* context) { in MatrixBatchVectorMultiplyAccumulate()
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/
Dfully_connected.h37 DstScalar* output_data, CpuBackendContext* cpu_backend_context) { in FullyConnectedPerChannel()
106 DstScalar* output_data, CpuBackendContext* cpu_backend_context) { in FullyConnected()

123