Home
last modified time | relevance | path

Searched refs:out_channels (Results 1 – 25 of 29) sorted by relevance

12

/external/libaom/libaom/av1/encoder/
Dcnn.c199 channels_per_branch[b] = layer_config->out_channels; in find_cnn_out_channels()
201 channels_per_branch[b] = layer_config->out_channels; in find_cnn_out_channels()
211 channels_per_branch[branch] = layer_config->out_channels; in find_cnn_out_channels()
236 int *out_height, int *out_channels) { in av1_find_cnn_output_size() argument
283 out_channels[output_num] = channels_per_branch[layer_config->branch]; in av1_find_cnn_output_size()
396 const int cstep = layer_config->in_channels * layer_config->out_channels; in av1_cnn_convolve_c()
405 for (int i = 0; i < layer_config->out_channels; ++i) { in av1_cnn_convolve_c()
418 int off = k * layer_config->out_channels + i; in av1_cnn_convolve_c()
445 for (int i = 0; i < layer_config->out_channels; ++i) { in av1_cnn_convolve_c()
458 int off = k * layer_config->out_channels + i; in av1_cnn_convolve_c()
[all …]
Dcnn.h109 int out_channels; member
168 int *out_height, int *out_channels);
/external/webrtc/modules/audio_coding/codecs/opus/test/
Dlapped_transform_unittest.cc30 size_t out_channels, in ProcessAudioBlock() argument
32 RTC_CHECK_EQ(in_channels, out_channels); in ProcessAudioBlock()
33 for (size_t i = 0; i < out_channels; ++i) { in ProcessAudioBlock()
52 size_t out_channels, in ProcessAudioBlock() argument
54 RTC_CHECK_EQ(in_channels, out_channels); in ProcessAudioBlock()
/external/tensorflow/tensorflow/tools/graph_transforms/
Dflatten_atrous.cc72 const int32 out_channels = filter.dim_size(3); in FlattenAtrousConv() local
81 in_channels, out_channels})); in FlattenAtrousConv()
90 for (int c_out = 0; c_out < out_channels; ++c_out) { in FlattenAtrousConv()
/external/tensorflow/tensorflow/python/kernel_tests/
Dcudnn_deterministic_base.py100 depth=3, height=3, width=3, in_channels=3, out_channels=2)
120 height=3, width=3, in_channels=8, out_channels=8)
134 height=7, width=7, in_channels=8, out_channels=128)
Ddepthtospace_op_test.py283 def compareToTranspose(self, batch_size, in_height, in_width, out_channels, argument
285 in_channels = out_channels * block_size * block_size
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_Conv3DBackpropFilter.pbtxt12 Shape `[depth, rows, cols, in_channels, out_channels]`.
20 out_channels]`.
Dapi_def_Conv3DBackpropInput.pbtxt12 Shape `[depth, rows, cols, in_channels, out_channels]`.
20 out_channels]`.
Dapi_def_Conv2DBackpropFilter.pbtxt14 `[filter_height, filter_width, in_channels, out_channels]` tensor.
20 4-D with shape `[batch, out_height, out_width, out_channels]`.
28 `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
Dapi_def_Conv3DBackpropFilterV2.pbtxt14 `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
22 out_channels]`.
Dapi_def_Conv3DBackpropInputV2.pbtxt14 Shape `[depth, rows, cols, in_channels, out_channels]`.
22 out_channels]`.
Dapi_def_DepthwiseConv2dNativeBackpropFilter.pbtxt24 out_backprop shape is `[batch, out_height, out_width, out_channels]`.
32 `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
Dapi_def_Conv2DBackpropInput.pbtxt14 `[filter_height, filter_width, in_channels, out_channels]`.
20 4-D with shape `[batch, out_height, out_width, out_channels]`.
Dapi_def_Conv2D.pbtxt14 `[filter_height, filter_width, in_channels, out_channels]`
71 `[filter_height, filter_width, in_channels, out_channels]`, this op
Dapi_def_FusedPadConv2D.pbtxt20 `[filter_height, filter_width, in_channels, out_channels]`.
Dapi_def_Conv3D.pbtxt13 out_channels]`. `in_channels` must match between `input` and `filter`.
Dapi_def_DepthwiseConv2dNativeBackpropInput.pbtxt23 out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Dapi_def_FusedResizeAndPadConv2D.pbtxt27 `[filter_height, filter_width, in_channels, out_channels]`.
/external/tensorflow/tensorflow/lite/examples/ios/simple/
Dios_image_load.mm26 int* out_channels) {
55 *out_channels = 0;
80 *out_channels = channels;
Dios_image_load.h21 int* out_height, int* out_channels);
/external/tensorflow/tensorflow/core/kernels/mkl/
Dmkl_fused_batch_norm_op_test.cc138 int out_channels = 6; in VerifyTensorsCloseForGrad() local
144 {filter_height, filter_width, in_channels, out_channels}); in VerifyTensorsCloseForGrad()
147 Tensor y_backprop(dtype, {batch, height, width, out_channels}); in VerifyTensorsCloseForGrad()
150 Tensor scale(dtype, {out_channels}); in VerifyTensorsCloseForGrad()
152 Tensor mean(dtype, {out_channels}); in VerifyTensorsCloseForGrad()
154 Tensor variance(dtype, {out_channels}); in VerifyTensorsCloseForGrad()
157 Tensor res_sp3(dtype, {out_channels}); in VerifyTensorsCloseForGrad()
/external/libaom/libaom/test/
Dcnn_test.cc39 int out_width, out_height, out_channels; in RunCNNTest() local
41 &out_height, &out_channels); in RunCNNTest()
47 (float *)aom_malloc(sizeof(*output_) * out_size * out_channels); in RunCNNTest()
49 for (int channel = 0; channel < out_channels; ++channel) { in RunCNNTest()
53 const int output_chs[1] = { out_channels }; in RunCNNTest()
124 layer_config->out_channels; in AssignLayerWeightsBiases()
125 bias_offset += layer_config->out_channels; in AssignLayerWeightsBiases()
/external/tensorflow/tensorflow/python/ops/
Dconv2d_benchmark.py180 out_channels = [4, 16, 32]
185 data_types, data_formats, in_channels, out_channels, hw_strides,
/external/webrtc/modules/audio_coding/test/
DTestStereo.h79 int out_channels,
/external/webrtc/modules/audio_processing/test/
Ddebug_dump_test.cc240 const int out_channels = config.num_channels(); in ReadAndDeinterleave() local
247 RTC_CHECK_LE(out_channels, channels); in ReadAndDeinterleave()
248 for (int channel = 0; channel < out_channels; ++channel) { in ReadAndDeinterleave()

12