| /frameworks/ml/nn/runtime/test/generated/models/ |
| D | conv2d_v1_2.model.cpp | 4034 OperandType type68(Type::TENSOR_FLOAT16, {3}); in CreateModel_channel_nhwc_float16() local 4285 OperandType type68(Type::TENSOR_FLOAT16, {3}); in CreateModel_channel_nhwc_weight_as_input_float16() local 4547 OperandType type68(Type::TENSOR_FLOAT16, {3}); in CreateModel_channel_nchw_float16() local 4798 OperandType type68(Type::TENSOR_FLOAT16, {3}); in CreateModel_channel_nchw_weight_as_input_float16() local 5067 OperandType type68(Type::TENSOR_FLOAT16, {3}); in CreateModel_channel_dynamic_output_shape_nhwc_float16() local 5323 OperandType type68(Type::TENSOR_FLOAT16, {3}); in CreateModel_channel_dynamic_output_shape_nhwc_weight_as_input_float16() local 5590 OperandType type68(Type::TENSOR_FLOAT16, {3}); in CreateModel_channel_dynamic_output_shape_nchw_float16() local 5846 OperandType type68(Type::TENSOR_FLOAT16, {3}); in CreateModel_channel_dynamic_output_shape_nchw_weight_as_input_float16() local 6164 OperandType type68(Type::TENSOR_FLOAT16, {3}); in CreateModel_large_nhwc_float16() local 6466 OperandType type68(Type::TENSOR_FLOAT16, {3}); in CreateModel_large_nhwc_weight_as_input_float16() local [all …]
|
| D | transpose_conv2d.model.cpp | 3037 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 2}, 0.25f, 100); in CreateModel_nchw_none_channelQuant8() local 3084 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 2}, 0.25f, 100); in CreateModel_nchw_none_channelQuant8_weight_as_input() local 3132 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 2}, 0.25f, 100); in CreateModel_nchw_none_channelQuant8_2() local 3179 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 2}, 0.25f, 100); in CreateModel_nchw_none_channelQuant8_weight_as_input_2() local 3699 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 2}, 0.25f, 100); in CreateModel_nchw_relu_channelQuant8() local 3746 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 2}, 0.25f, 100); in CreateModel_nchw_relu_channelQuant8_weight_as_input() local 3794 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 2}, 0.25f, 100); in CreateModel_nchw_relu_channelQuant8_2() local 3841 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 2}, 0.25f, 100); in CreateModel_nchw_relu_channelQuant8_weight_as_input_2() local 4361 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 2}, 0.25f, 100); in CreateModel_nchw_relu1_channelQuant8() local 4408 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 1, 2, 2}, 0.25f, 100); in CreateModel_nchw_relu1_channelQuant8_weight_as_input() local [all …]
|
| D | grouped_conv2d.model.cpp | 6755 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 80); in CreateModel_dynamic_output_shape_nhwc_none_channelQuant8_2() local 6810 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 80); in CreateModel_dynamic_output_shape_nhwc_none_channelQuant8_weight_as_input_2() local 7529 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 80); in CreateModel_dynamic_output_shape_nhwc_relu_channelQuant8_2() local 7584 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 80); in CreateModel_dynamic_output_shape_nhwc_relu_channelQuant8_weight_as_input_2() local 8303 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 80); in CreateModel_dynamic_output_shape_nhwc_relu1_channelQuant8_2() local 8358 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 80); in CreateModel_dynamic_output_shape_nhwc_relu1_channelQuant8_weight_as_input_2() local 9077 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 80); in CreateModel_dynamic_output_shape_nhwc_relu6_channelQuant8_2() local 9132 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 80); in CreateModel_dynamic_output_shape_nhwc_relu6_channelQuant8_weight_as_input_2() local 9851 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 80); in CreateModel_dynamic_output_shape_nchw_none_channelQuant8_2() local 9906 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 80); in CreateModel_dynamic_output_shape_nchw_none_channelQuant8_weight_as_input_2() local [all …]
|
| D | avg_pool_v1_2.model.cpp | 4397 OperandType type68(Type::FLOAT16, {}); in CreateModel_zero_sized_nhwc_float16() local 4846 OperandType type68(Type::FLOAT16, {}); in CreateModel_zero_sized_nchw_float16() local 5297 OperandType type68(Type::FLOAT16, {}); in CreateModel_zero_sized_dynamic_output_shape_nhwc_float16() local 5746 OperandType type68(Type::FLOAT16, {}); in CreateModel_zero_sized_dynamic_output_shape_nchw_float16() local 6166 OperandType type68(Type::FLOAT16, {}); in CreateModel_zero_sized_nhwc_float16_2() local 6575 OperandType type68(Type::FLOAT16, {}); in CreateModel_zero_sized_nchw_float16_2() local 6990 OperandType type68(Type::FLOAT16, {}); in CreateModel_zero_sized_dynamic_output_shape_nhwc_float16_2() local 7403 OperandType type68(Type::FLOAT16, {}); in CreateModel_zero_sized_dynamic_output_shape_nchw_float16_2() local
|
| D | depthwise_conv2d_v1_2.model.cpp | 6875 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0); in CreateModel_large_nhwc_quant8_2() local 7147 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0); in CreateModel_large_nhwc_weight_as_input_quant8_2() local 7425 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0); in CreateModel_large_nchw_quant8_2() local 7700 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0); in CreateModel_large_nchw_weight_as_input_quant8_2() local 7977 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0); in CreateModel_large_dynamic_output_shape_nhwc_quant8_2() local 8249 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0); in CreateModel_large_dynamic_output_shape_nhwc_weight_as_input_quant8_2() local 8527 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0); in CreateModel_large_dynamic_output_shape_nchw_quant8_2() local 8802 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 0.25f, 0); in CreateModel_large_dynamic_output_shape_nchw_weight_as_input_quant8_2() local
|
| D | resize_nearest_neighbor.model.cpp | 8260 OperandType type68(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0); in CreateModel_zero_sized_nhwc_quant8() local 8624 OperandType type68(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0); in CreateModel_zero_sized_nchw_quant8() local 8991 OperandType type68(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0); in CreateModel_zero_sized_dynamic_output_shape_nhwc_quant8() local 9356 OperandType type68(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0); in CreateModel_zero_sized_dynamic_output_shape_nchw_quant8() local 9724 OperandType type68(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0); in CreateModel_zero_sized_nhwc_quant8_2() local 10088 OperandType type68(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0); in CreateModel_zero_sized_nchw_quant8_2() local 10455 OperandType type68(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0); in CreateModel_zero_sized_dynamic_output_shape_nhwc_quant8_2() local 10820 OperandType type68(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0); in CreateModel_zero_sized_dynamic_output_shape_nchw_quant8_2() local
|
| D | max_pool_v1_2.model.cpp | 4349 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 128); in CreateModel_zero_sized_dynamic_output_shape_nhwc_quant8() local 4799 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 128); in CreateModel_zero_sized_dynamic_output_shape_nchw_quant8() local 6051 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 128); in CreateModel_zero_sized_dynamic_output_shape_nhwc_quant8_2() local 6465 OperandType type68(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.1f, 128); in CreateModel_zero_sized_dynamic_output_shape_nchw_quant8_2() local
|
| D | softmax_v1_2.model.cpp | 10345 OperandType type68(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0); in CreateModel_zero_sized_quant8() local 10698 OperandType type68(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0); in CreateModel_zero_sized_dynamic_output_shape_quant8() local
|
| D | roi_align.model.cpp | 2788 OperandType type68(Type::TENSOR_FLOAT16, {5, 1, 2, 2}); in CreateModel_nchw_float16_4() local
|