Home
last modified time | relevance | path

Searched refs:in0 (Results 1 – 22 of 22) sorted by relevance

/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/
Dfully_connected_quant8_signed.mod.py18 in0 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{4, 1, 5, 1}, 0.5f, -1") variable
26 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
29 input0 = {in0: # input 0
41 in0 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5}, 0.2, -128") # batch = 1, input_size = 5 variable
46 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
49 input0 = {in0: # input 0
60 in0 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 5}, 0.2, -128") # batch = 1, input_size = 5 variable
65 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
68 input0 = {in0: # input 0
83 in0 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{3, 1}, 0.5f, -128") variable
[all …]
/packages/services/BuiltInPrintService/jni/plugins/
Dwprint_scaler.c430 static inline void _scale_row_down_9in(uint8 *_RESTRICT_ in0, uint8 *_RESTRICT_ in1, in _scale_row_down_9in() argument
449 acc_r += (uint32) in0[(in_col * 3) + 0] * curr_weight * top_weight; in _scale_row_down_9in()
459 acc_g += (uint32) in0[(in_col * 3) + 1] * curr_weight * top_weight; in _scale_row_down_9in()
469 acc_b += (uint32) in0[(in_col * 3) + 2] * curr_weight * top_weight; in _scale_row_down_9in()
493 static inline void _scale_row_down_8in(uint8 *_RESTRICT_ in0, uint8 *_RESTRICT_ in1, in _scale_row_down_8in() argument
513 acc_r += (uint32) in0[(in_col * 3) + 0] * curr_weight * top_weight; in _scale_row_down_8in()
522 acc_g += (uint32) in0[(in_col * 3) + 1] * curr_weight * top_weight; in _scale_row_down_8in()
531 acc_b += (uint32) in0[(in_col * 3) + 2] * curr_weight * top_weight; in _scale_row_down_8in()
554 static inline void _scale_row_down_7in(uint8 *_RESTRICT_ in0, uint8 *_RESTRICT_ in1, in _scale_row_down_7in() argument
572 acc_r += (uint32) in0[(in_col * 3) + 0] * curr_weight * top_weight; in _scale_row_down_7in()
[all …]
/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/
Dfully_connected_v1_2.mod.py19 in0 = Input("op1", "TENSOR_FLOAT32", "{3, 1}") variable
24 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
27 in0: ("TENSOR_QUANT8_ASYMM", 0.5, 127),
34 input0 = {in0: # input 0
/packages/modules/NeuralNetworks/runtime/test/specs/V1_0/
Dfully_connected_float.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{3, 1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
26 input0 = {in0: # input 0
Dfully_connected_quant8.mod.py18 in0 = Input("op1", "TENSOR_QUANT8_ASYMM", "{3, 1}, 0.5f, 0") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
26 input0 = {in0: # input 0
Dfully_connected_float_2.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{2, 8}") variable
48 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
51 input0 = {in0: # input 0
Dfully_connected_quant8_2.mod.py18 in0 = Input("op1", "TENSOR_QUANT8_ASYMM", "{4, 1, 5, 1}, 0.5f, 127") variable
26 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
29 input0 = {in0: # input 0
Dfully_connected_float_3.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{2, 2}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
26 input0 = {in0: # input 0
Dfully_connected_quant8_large.mod.py18 in0 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0") # batch = 1, input_size = 5 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
26 input0 = {in0: # input 0
Dfully_connected_float_large.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{1, 5}") # batch = 1, input_size = 5 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
26 input0 = {in0: # input 0
Dfully_connected_float_large_weights_as_inputs.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{1, 5}") # batch = 1, input_size = 5 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
26 input0 = {in0: # input 0
Dfully_connected_quant8_weights_as_inputs.mod.py18 in0 = Input("op1", "TENSOR_QUANT8_ASYMM", "{3, 1}, 0.5f, 0") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
26 input0 = {in0: # input 0
Dfully_connected_quant8_large_weights_as_inputs.mod.py18 in0 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 5}, 0.2, 0") # batch = 1, input_size = 5 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
26 input0 = {in0: # input 0
Dfully_connected_float_weights_as_inputs.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{3, 1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
26 input0 = {in0: # input 0
/packages/modules/NeuralNetworks/runtime/test/specs/V1_1/
Dfully_connected_float_4d_simple.mod.py22 in0 = Input("op1", "TENSOR_FLOAT32", "{4, 1, 5, 1}") variable
31 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
34 input0 = {in0: # input 0
Dfully_connected_float_2_relaxed.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{2, 8}") variable
48 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0)
52 input0 = {in0: # input 0
Dfully_connected_float_4d_simple_relaxed.mod.py22 in0 = Input("op1", "TENSOR_FLOAT32", "{4, 1, 5, 1}") variable
31 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
35 input0 = {in0: # input 0
Dfully_connected_float_large_relaxed.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{1, 5}") # batch = 1, input_size = 5 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
27 input0 = {in0: # input 0
Dfully_connected_float_relaxed.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{3, 1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
27 input0 = {in0: # input 0
Dfully_connected_float_weights_as_inputs_relaxed.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{3, 1}") variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
27 input0 = {in0: # input 0
Dfully_connected_float_large_weights_as_inputs_relaxed.mod.py18 in0 = Input("op1", "TENSOR_FLOAT32", "{1, 5}") # batch = 1, input_size = 5 variable
23 model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
27 input0 = {in0: # input 0
/packages/modules/NeuralNetworks/runtime/test/
DTestValidation.cpp1299 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2]; in TEST_F() local
1301 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)), in TEST_F()
1311 const size_t memorySize = std::max(sizeof(in0), sizeof(out0)); in TEST_F()
1319 auto testTooLate = [this, execution, &in0, &out0, memory] { in TEST_F()
1338 ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)), in TEST_F()
1344 0, sizeof(in0)), in TEST_F()
1490 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2]; in testConcurrentExecution() local
1492 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)), in testConcurrentExecution()
3139 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2]; in TEST_F() local
3141 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)), in TEST_F()
[all …]