/external/XNNPACK/src/operators/ |
D | convolution-nhwc.c | 114 xnn_operator_t convolution_op) in generate_gemms_up_to_max_mr() argument 117 if (convolution_op->code_cache == NULL) { in generate_gemms_up_to_max_mr() 120 convolution_op->ukernel.gemm.gemm_cases[0].generated_code_offset[XNN_UARCH_DEFAULT] = in generate_gemms_up_to_max_mr() 122 log2_input_element_size, convolution_op->code_cache); in generate_gemms_up_to_max_mr() 124 convolution_op->ukernel.gemm.gemm_cases[mr - 1].generated_code_offset[XNN_UARCH_DEFAULT] = in generate_gemms_up_to_max_mr() 126 log2_input_element_size, convolution_op->code_cache); in generate_gemms_up_to_max_mr() 180 xnn_operator_t convolution_op) in generate_igemms_up_to_max_mr() argument 183 if (convolution_op->code_cache == NULL) { in generate_igemms_up_to_max_mr() 186 convolution_op->ukernel.igemm.igemm_cases[0].generated_code_offset[XNN_UARCH_DEFAULT] = in generate_igemms_up_to_max_mr() 188 log2_input_element_size, kernel_size, 1, convolution_op->code_cache); in generate_igemms_up_to_max_mr() [all …]
|
D | convolution-nchw.c | 51 xnn_operator_t convolution_op = NULL; in xnn_create_convolution2d_nchw_f32() local 214 convolution_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator)); in xnn_create_convolution2d_nchw_f32() 215 if (convolution_op == NULL) { in xnn_create_convolution2d_nchw_f32() 223 convolution_op->weights_cache = caches->weights_cache; in xnn_create_convolution2d_nchw_f32() 300 convolution_op->packed_weights.pointer = xnn_allocate_simd_memory(packed_weights_size); in xnn_create_convolution2d_nchw_f32() 301 if (convolution_op->packed_weights.pointer == NULL) { in xnn_create_convolution2d_nchw_f32() 307 convolution_op->num_nonzero_values = num_nonzero_values; in xnn_create_convolution2d_nchw_f32() 308 convolution_op->num_nonzero_blocks = num_nonzero_blocks; in xnn_create_convolution2d_nchw_f32() 309 convolution_op->num_output_channel_blocks = num_output_channel_blocks; in xnn_create_convolution2d_nchw_f32() 311 float* nonzero_values = convolution_op->packed_weights.pointer; in xnn_create_convolution2d_nchw_f32() [all …]
|
/external/XNNPACK/bench/ |
D | f16-igemm.cc | 98 xnn_operator convolution_op = { }; in f16_igemm() local 99 convolution_op.indirection_buffer = reinterpret_cast<const void**>(i.data()); in f16_igemm() 100 convolution_op.input = a.data(); in f16_igemm() 101 convolution_op.input_pixel_stride = input_pixel_stride; in f16_igemm() 102 convolution_op.zero_buffer = z.data(); in f16_igemm() 103 convolution_op.groups = 1; in f16_igemm() 104 convolution_op.group_input_channels = group_input_channels; in f16_igemm() 105 convolution_op.batch_size = 1; in f16_igemm() 106 convolution_op.input_height = input_height; in f16_igemm() 107 convolution_op.input_width = input_width; in f16_igemm() [all …]
|
D | f16-dwconv.cc | 103 xnn_operator convolution_op = { }; in f16_dwconv() local 104 convolution_op.indirection_buffer = reinterpret_cast<const void**>(i.data()); in f16_dwconv() 105 convolution_op.input = a.data(); in f16_dwconv() 106 convolution_op.input_pixel_stride = channels; in f16_dwconv() 107 convolution_op.zero_buffer = z.data(); in f16_dwconv() 108 convolution_op.input_height = input_height; in f16_dwconv() 109 convolution_op.input_width = input_width; in f16_dwconv() 110 convolution_op.output_height = output_height; in f16_dwconv() 111 convolution_op.output_width = output_width; in f16_dwconv() 112 convolution_op.kernel_height = kernel_height; in f16_dwconv() [all …]
|
D | f32-igemm.cc | 95 xnn_operator convolution_op = { }; in f32_igemm() local 96 convolution_op.indirection_buffer = reinterpret_cast<const void**>(i.data()); in f32_igemm() 97 convolution_op.input = a.data(); in f32_igemm() 98 convolution_op.input_pixel_stride = input_pixel_stride; in f32_igemm() 99 convolution_op.zero_buffer = z.data(); in f32_igemm() 100 convolution_op.groups = 1; in f32_igemm() 101 convolution_op.group_input_channels = group_input_channels; in f32_igemm() 102 convolution_op.batch_size = 1; in f32_igemm() 103 convolution_op.input_height = input_height; in f32_igemm() 104 convolution_op.input_width = input_width; in f32_igemm() [all …]
|
D | f32-dwconv.cc | 95 xnn_operator convolution_op = { }; in f32_dwconv() local 96 convolution_op.indirection_buffer = reinterpret_cast<const void**>(i.data()); in f32_dwconv() 97 convolution_op.input = a.data(); in f32_dwconv() 98 convolution_op.input_pixel_stride = channels; in f32_dwconv() 99 convolution_op.zero_buffer = z.data(); in f32_dwconv() 100 convolution_op.input_height = input_height; in f32_dwconv() 101 convolution_op.input_width = input_width; in f32_dwconv() 102 convolution_op.output_height = output_height; in f32_dwconv() 103 convolution_op.output_width = output_width; in f32_dwconv() 104 convolution_op.kernel_height = kernel_height; in f32_dwconv() [all …]
|
D | qs8-dwconv.cc | 101 xnn_operator convolution_op = { }; in DWConvBenchmark() local 102 convolution_op.indirection_buffer = reinterpret_cast<const void**>(i.data()); in DWConvBenchmark() 103 convolution_op.input = a.data(); in DWConvBenchmark() 104 convolution_op.input_pixel_stride = channels; in DWConvBenchmark() 105 convolution_op.zero_buffer = z.data(); in DWConvBenchmark() 106 convolution_op.input_height = input_height; in DWConvBenchmark() 107 convolution_op.input_width = input_width; in DWConvBenchmark() 108 convolution_op.output_height = output_height; in DWConvBenchmark() 109 convolution_op.output_width = output_width; in DWConvBenchmark() 110 convolution_op.kernel_height = kernel_height; in DWConvBenchmark() [all …]
|
D | convolution.cc | 84 for (xnn_operator_t& convolution_op : convolution_operators) { in xnnpack_convolution_qu8() 96 0 /* flags */, NULL, &convolution_op); in xnnpack_convolution_qu8() 130 for (xnn_operator_t& convolution_op : convolution_operators) { in xnnpack_convolution_qu8() 131 status = xnn_delete_operator(convolution_op); in xnnpack_convolution_qu8() 136 convolution_op = nullptr; in xnnpack_convolution_qu8() 205 for (xnn_operator_t& convolution_op : convolution_operators) { in xnnpack_convolution_qs8() 216 0 /* flags */, NULL, &convolution_op); in xnnpack_convolution_qs8() 250 for (xnn_operator_t& convolution_op : convolution_operators) { in xnnpack_convolution_qs8() 251 status = xnn_delete_operator(convolution_op); in xnnpack_convolution_qs8() 256 convolution_op = nullptr; in xnnpack_convolution_qs8() [all …]
|
/external/XNNPACK/test/ |
D | convolution-operator-tester.h | 668 xnn_operator_t convolution_op = nullptr; in TestNHWCxQC8() local 692 &convolution_op); in TestNHWCxQC8() 697 ASSERT_NE(nullptr, convolution_op); in TestNHWCxQC8() 704 …tr<xnn_operator, decltype(&xnn_delete_operator)> auto_convolution_op(convolution_op, xnn_delete_op… in TestNHWCxQC8() 708 convolution_op, in TestNHWCxQC8() 714 xnn_run_operator(convolution_op, nullptr /* thread pool */)); in TestNHWCxQC8() 884 xnn_operator_t convolution_op = nullptr; in TestNHWCxQS8() local 908 &convolution_op); in TestNHWCxQS8() 913 ASSERT_NE(nullptr, convolution_op); in TestNHWCxQS8() 920 …tr<xnn_operator, decltype(&xnn_delete_operator)> auto_convolution_op(convolution_op, xnn_delete_op… in TestNHWCxQS8() [all …]
|
/external/XNNPACK/include/ |
D | xnnpack.h | 1696 xnn_operator_t convolution_op, 2121 xnn_operator_t convolution_op, 2420 xnn_operator_t convolution_op, 2923 xnn_operator_t convolution_op, 2988 xnn_operator_t convolution_op, 3296 xnn_operator_t convolution_op,
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.layers.-conv2-d-transpose.pbtxt | 192 name: "convolution_op"
|
D | tensorflow.layers.-separable-conv1-d.pbtxt | 192 name: "convolution_op"
|
D | tensorflow.layers.-conv2-d.pbtxt | 191 name: "convolution_op"
|
D | tensorflow.layers.-conv3-d-transpose.pbtxt | 192 name: "convolution_op"
|
D | tensorflow.layers.-conv3-d.pbtxt | 191 name: "convolution_op"
|
D | tensorflow.layers.-separable-conv2-d.pbtxt | 192 name: "convolution_op"
|
D | tensorflow.layers.-conv1-d.pbtxt | 191 name: "convolution_op"
|
/external/tensorflow/ |
D | RELEASE.md | 1300 * `tf.keras.layers.Conv` now includes a public `convolution_op` method. 1306 return self.convolution_op(inputs, (self.kernel - mean) / tf.sqrt(var + 1307 1e-10))` Alternatively, you can override `convolution_op`: `python class 1308 StandardizedConv2D(tf.keras.Layer): def convolution_op(self, inputs, 1311 super().convolution_op(inputs, (kernel - mean) / tf.sqrt(var + 1e-10))`
|