/external/wpa_supplicant_8/src/p2p/ |
D | p2p_utils.c | 87 for (i = 0; i < a->channels; i++) { in p2p_reg_class_intersect() 88 for (j = 0; j < b->channels; j++) { in p2p_reg_class_intersect() 91 res->channel[res->channels] = a->channel[i]; in p2p_reg_class_intersect() 92 res->channels++; in p2p_reg_class_intersect() 93 if (res->channels == P2P_MAX_REG_CLASS_CHANNELS) in p2p_reg_class_intersect() 102 * @a: First set of supported channels 103 * @b: Second set of supported channels 104 * @res: Data structure for returning the intersection of support channels 106 * This function can be used to find a common set of supported channels. Both 107 * input channels sets are assumed to use the same country code. If different [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | resize_bilinear_op_gpu.cu.cc | 39 int channels, int out_height, in ResizeBilinearKernel() argument 42 // out_idx = c + channels * (x + out_width * (y + out_height * b)) in ResizeBilinearKernel() 44 const int c = idx % channels; in ResizeBilinearKernel() 45 idx /= channels; in ResizeBilinearKernel() 66 channels + in ResizeBilinearKernel() 70 channels + in ResizeBilinearKernel() 74 channels + in ResizeBilinearKernel() 78 channels + in ResizeBilinearKernel() 91 int channels, int resized_height, int resized_width, T* output_grad) { in ResizeBilinearGradKernel() argument 93 // in_idx = c + channels * (x + resized_width * (y + resized_height * b)) in ResizeBilinearGradKernel() [all …]
|
D | resize_nearest_neighbor_op_gpu.cu.cc | 38 const int in_width, const int channels, const int out_height, in ResizeNearestNeighborNHWC() argument 43 int c = n % channels; in ResizeNearestNeighborNHWC() 44 n /= channels; in ResizeNearestNeighborNHWC() 50 const T* bottom_data_n = bottom_data + n * channels * in_height * in_width; in ResizeNearestNeighborNHWC() 61 const int idx = (in_y * in_width + in_x) * channels + c; in ResizeNearestNeighborNHWC() 69 const int in_width, const int channels, const int out_height, in LegacyResizeNearestNeighborNHWC() argument 74 int c = n % channels; in LegacyResizeNearestNeighborNHWC() 75 n /= channels; in LegacyResizeNearestNeighborNHWC() 81 const T* bottom_data_n = bottom_data + n * channels * in_height * in_width; in LegacyResizeNearestNeighborNHWC() 90 const int idx = (in_y * in_width + in_x) * channels + c; in LegacyResizeNearestNeighborNHWC() [all …]
|
D | maxpooling_op_gpu.cu.cc | 62 // const int output_size = batch * channels * pooled_height * pooled_width; 67 const int nthreads, const dtype* bottom_data, const int channels, in MaxPoolForwardNCHW() argument 75 int c = (index / pooled_width / pooled_height) % channels; in MaxPoolForwardNCHW() 76 int n = index / pooled_width / pooled_height / channels; in MaxPoolForwardNCHW() 85 const int offset = n * channels * height * width; in MaxPoolForwardNCHW() 107 // (so channels = outer_channels, output_size = real output size / 4). 110 const int width, const int channels, const int pooled_height, in MaxPoolForwardNoMaskKernel_NCHW_VECT_C() argument 119 int c = (index / pooled_width / pooled_height) % channels; in MaxPoolForwardNoMaskKernel_NCHW_VECT_C() 120 int n = index / pooled_width / pooled_height / channels; in MaxPoolForwardNoMaskKernel_NCHW_VECT_C() 128 const int32* bottom_data_n = bottom_data + n * channels * height * width; in MaxPoolForwardNoMaskKernel_NCHW_VECT_C() [all …]
|
D | adjust_contrast_op.cc | 56 const int64 channels = input.dim_size(input.dims() - 1); in Compute() local 78 const int64 batch = input.NumElements() / (height * width * channels); in Compute() 79 const int64 shape[4] = {batch, height, width, channels}; in Compute() 153 int64 channels = 0; member 164 const int64 channels = input.dim_size(input.dims() - 1); in Compute() local 175 const int64 batch = input.NumElements() / (height * width * channels); in Compute() 183 options.channels = channels; in Compute() 206 const int64 channels = options.channels; in DoCompute() local 214 TensorShape({batch, channels}), &mean_values)); in DoCompute() 216 auto input_data = input->shaped<float, 3>({batch, image_size, channels}); in DoCompute() [all …]
|
D | resize_bicubic_op_test.cc | 116 const int channels = images.dimension(3); in ResizeBicubicBaseline() local 119 ASSERT_EQ(channels, output.dimension(3)); in ResizeBicubicBaseline() 139 for (int64 c = 0; c < channels; ++c) { in ResizeBicubicBaseline() 161 const int target_width, int channels) { in RunRandomTest() argument 163 << channels << " to " << target_height << "x" << target_width in RunRandomTest() 164 << "x" << channels; in RunRandomTest() 166 TensorShape({batch_size, in_height, in_width, channels})); in RunRandomTest() 174 TensorShape({batch_size, target_height, target_width, channels}))); in RunRandomTest() 186 void RunManyRandomTests(int channels) { in RunManyRandomTests() argument 193 channels); in RunManyRandomTests() [all …]
|
/external/libpng/contrib/tools/ |
D | cvtcolor.c | 53 int channels = 0; in main() local 92 ++channels; in main() 95 ++channels; in main() 98 ++channels; in main() 101 ++channels; in main() 108 int components = channels; in main() 114 if (components < channels) in main() 123 if ((channels & 1) == 0) in main() 125 double alpha = c[channels-1]; in main() 128 for (i=0; i<channels-1; ++i) c[i] /= alpha; in main() [all …]
|
/external/libopus/src/ |
D | opus_decoder.c | 58 int channels; member 84 celt_assert(st->channels == 1 || st->channels == 2); in validate_opus_decoder() 88 celt_assert(st->DecControl.nChannelsAPI == st->channels); in validate_opus_decoder() 102 int opus_decoder_get_size(int channels) in opus_decoder_get_size() argument 106 if (channels<1 || channels > 2) in opus_decoder_get_size() 112 celtDecSizeBytes = celt_decoder_get_size(channels); in opus_decoder_get_size() 116 int opus_decoder_init(OpusDecoder *st, opus_int32 Fs, int channels) in opus_decoder_init() argument 123 || (channels!=1&&channels!=2)) in opus_decoder_init() 126 OPUS_CLEAR((char*)st, opus_decoder_get_size(channels)); in opus_decoder_init() 137 st->stream_channels = st->channels = channels; in opus_decoder_init() [all …]
|
D | opus_encoder.c | 71 int channels; member 168 int opus_encoder_get_size(int channels) in opus_encoder_get_size() argument 172 if (channels<1 || channels > 2) in opus_encoder_get_size() 178 celtEncSizeBytes = celt_encoder_get_size(channels); in opus_encoder_get_size() 182 int opus_encoder_init(OpusEncoder* st, opus_int32 Fs, int channels, int application) in opus_encoder_init() argument 189 if((Fs!=48000&&Fs!=24000&&Fs!=16000&&Fs!=12000&&Fs!=8000)||(channels!=1&&channels!=2)|| in opus_encoder_init() 194 OPUS_CLEAR((char*)st, opus_encoder_get_size(channels)); in opus_encoder_init() 205 st->stream_channels = st->channels = channels; in opus_encoder_init() 215 st->silk_mode.nChannelsAPI = channels; in opus_encoder_init() 216 st->silk_mode.nChannelsInternal = channels; in opus_encoder_init() [all …]
|
/external/mesa3d/src/mesa/main/ |
D | format_parser.py | 52 self.name = None # Set when the channels are added to the format 53 self.shift = -1 # Set when the channels are added to the format 54 self.index = -1 # Set when the channels are added to the format 106 A Swizzle is a mapping from one set of channels in one format to the 107 channels in another. Each channel in the destination format is 119 case, the source channels are represented by the characters "x", "y", 123 channels maps to the first luminance-alpha channel and the alpha channel 208 a permutation with no channels added or removed, then this 229 …def __init__(self, name, layout, block_width, block_height, block_depth, channels, swizzle, colors… argument 230 """Constructs a Format from some metadata and a list of channels. [all …]
|
/external/mesa3d/prebuilt-intermediates/isl/ |
D | isl_format_layout.c | 37 .channels = { 57 .channels = { 77 .channels = { 97 .channels = { 117 .channels = { 137 .channels = { 157 .channels = { 177 .channels = { 197 .channels = { 217 .channels = { [all …]
|
/external/autotest/client/cros/audio/ |
D | sox_utils.py | 13 def _raw_format_args(channels, bits, rate): argument 16 @param channels: Number of channels. 24 args += _format_args(channels, bits, rate) 28 def _format_args(channels, bits, rate): argument 31 @param channels: Number of channels. 38 return ['-c', str(channels), '-b', str(bits), '-r', str(rate)] 42 filename, channels=2, bits=16, rate=48000, duration=None, frequencies=440, argument 47 @param channels: The number of channels. 59 args += _raw_format_args(channels, bits, rate) 61 args += _format_args(channels, bits, rate) [all …]
|
/external/autotest/client/site_tests/audio_Microphone/ |
D | audio_Microphone.py | 23 self, filesize, duration, channels, rate, bits=16): argument 24 expected = duration * channels * (bits / 8) * rate 29 def verify_alsa_capture(self, channels, rate, device, bits=16): argument 32 recorded_file.name, duration=DURATION, channels=channels, 36 DURATION, channels, rate, bits) 39 def verify_cras_capture(self, channels, rate): argument 42 recorded_file.name, duration=DURATION, channels=channels, 46 DURATION, channels, rate) 64 channels = alsa_utils.get_record_device_supported_channels( 66 if channels is None: [all …]
|
/external/webrtc/webrtc/modules/audio_coding/neteq/ |
D | normal_unittest.cc | 36 size_t channels = 1; in TEST() local 37 BackgroundNoise bgn(channels); in TEST() 41 Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels); in TEST() 50 size_t channels = 1; in TEST() local 51 BackgroundNoise bgn(channels); in TEST() 56 channels); in TEST() 60 rtc::scoped_ptr<int16_t[]> mute_factor_array(new int16_t[channels]); in TEST() 61 for (size_t i = 0; i < channels; ++i) { in TEST() 64 AudioMultiVector output(channels); in TEST() 96 size_t channels = 2; in TEST() local [all …]
|
/external/libopus/tests/ |
D | test_opus_projection.c | 189 void test_creation_arguments(const int channels, const int mapping_family) in test_creation_arguments() argument 202 int order_plus_one = (int)floor(sqrt((float)channels)); in test_creation_arguments() 203 int nondiegetic_channels = channels - order_plus_one * order_plus_one; in test_creation_arguments() 208 st_enc = opus_projection_ambisonics_encoder_create(Fs, channels, in test_creation_arguments() 226 st_dec = opus_projection_decoder_create(Fs, channels, streams, in test_creation_arguments() 240 fprintf(stderr, "Channels: %d, Family: %d\n", channels, mapping_family); in test_creation_arguments() 241 fprintf(stderr, "Order+1: %d, Non-diegetic Channels: %d\n", in test_creation_arguments() 249 void generate_music(short *buf, opus_int32 len, opus_int32 channels) in generate_music() argument 253 a = (opus_int32 *)malloc(sizeof(opus_int32) * channels); in generate_music() 254 b = (opus_int32 *)malloc(sizeof(opus_int32) * channels); in generate_music() [all …]
|
/external/webrtc/webrtc/modules/audio_processing/ |
D | audio_buffer.cc | 127 input_buffer_->fbuf()->channels()[0]); in CopyFrom() 128 data_ptr = input_buffer_->fbuf_const()->channels(); in CopyFrom() 136 process_buffer_->channels()[i], in CopyFrom() 139 data_ptr = process_buffer_->channels(); in CopyFrom() 146 data_->fbuf()->channels()[i]); in CopyFrom() 159 data_ptr = process_buffer_->channels(); in CopyTo() 162 FloatS16ToFloat(data_->fbuf()->channels()[i], in CopyTo() 192 return data_->ibuf_const()->channels(); in channels_const() 195 int16_t* const* AudioBuffer::channels() { in channels() function in webrtc::AudioBuffer 197 return data_->ibuf()->channels(); in channels() [all …]
|
/external/adhd/cras/src/server/ |
D | cras_audio_area.c | 42 if (!(src->channels[src_idx].ch_set & in cras_audio_area_copy() 43 dst->channels[dst_idx].ch_set)) in cras_audio_area_copy() 46 schan = src->channels[src_idx].buf + in cras_audio_area_copy() 47 src_offset * src->channels[src_idx].step_bytes; in cras_audio_area_copy() 48 dchan = dst->channels[dst_idx].buf + in cras_audio_area_copy() 49 dst_offset * dst->channels[dst_idx].step_bytes; in cras_audio_area_copy() 53 dst->channels[dst_idx].step_bytes, in cras_audio_area_copy() 54 src->channels[src_idx].step_bytes, in cras_audio_area_copy() 80 channel_area_set_channel(area->channels, CRAS_CH_FL); in cras_audio_area_config_channels() 81 channel_area_set_channel(area->channels, CRAS_CH_FR); in cras_audio_area_config_channels() [all …]
|
/external/wpa_supplicant_8/src/common/ |
D | ieee802_11_common.c | 714 * for HT40 and VHT. DFS channels are not covered. 740 /* 2.407 GHz, channels 1..13 */ in ieee80211_freq_to_channel_ext() 786 /* 5 GHz, channels 36..48 */ in ieee80211_freq_to_channel_ext() 805 /* 5 GHz, channels 52..64 */ in ieee80211_freq_to_channel_ext() 824 /* 5 GHz, channels 149..169 */ in ieee80211_freq_to_channel_ext() 845 /* 5 GHz, channels 100..140 */ in ieee80211_freq_to_channel_ext() 960 case 12: /* channels 1..11 */ in ieee80211_chan_to_freq_us() 961 case 32: /* channels 1..7; 40 MHz */ in ieee80211_chan_to_freq_us() 962 case 33: /* channels 5..11; 40 MHz */ in ieee80211_chan_to_freq_us() 966 case 1: /* channels 36,40,44,48 */ in ieee80211_chan_to_freq_us() [all …]
|
/external/adhd/cras/src/dsp/ |
D | dsp_util.c | 380 int channels, int frames) in dsp_util_deinterleave_s16le() argument 382 float *output_ptr[channels]; in dsp_util_deinterleave_s16le() 386 if (channels == 2) { in dsp_util_deinterleave_s16le() 392 for (i = 0; i < channels; i++) in dsp_util_deinterleave_s16le() 396 for (j = 0; j < channels; j++) in dsp_util_deinterleave_s16le() 402 int channels, int frames) in dsp_util_deinterleave_s24le() argument 404 float *output_ptr[channels]; in dsp_util_deinterleave_s24le() 407 for (i = 0; i < channels; i++) in dsp_util_deinterleave_s24le() 411 for (j = 0; j < channels; j++, input++) in dsp_util_deinterleave_s24le() 417 int channels, int frames) in dsp_util_deinterleave_s243le() argument [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/unittests/ExecutionEngine/Orc/ |
D | RPCUtilsTest.cpp | 172 auto Channels = createPairedQueueChannels(); in TEST() local 173 DummyRPCEndpoint Server(*Channels.first); in TEST() 178 auto Channels = createPairedQueueChannels(); in TEST() local 179 DummyRPCEndpoint Client(*Channels.first); in TEST() 180 DummyRPCEndpoint Server(*Channels.second); in TEST() 222 auto Channels = createPairedQueueChannels(); in TEST() local 223 DummyRPCEndpoint Client(*Channels.first); in TEST() 224 DummyRPCEndpoint Server(*Channels.second); in TEST() 267 auto Channels = createPairedQueueChannels(); in TEST() local 268 DummyRPCEndpoint Client(*Channels.first); in TEST() [all …]
|
/external/tensorflow/tensorflow/lite/examples/label_image/ |
D | bitmap_helpers.cc | 32 int height, int channels, bool top_down) { in decode_bmp() argument 33 std::vector<uint8_t> output(height * width * channels); in decode_bmp() 40 src_pos = ((height - 1 - i) * row_size) + j * channels; in decode_bmp() 42 src_pos = i * row_size + j * channels; in decode_bmp() 45 dst_pos = (i * width + j) * channels; in decode_bmp() 47 switch (channels) { in decode_bmp() 65 LOG(FATAL) << "Unexpected number of channels: " << channels; in decode_bmp() 74 int* height, int* channels, Settings* s) { in read_bmp() argument 99 *channels = bpp / 8; in read_bmp() 102 LOG(INFO) << "width, height, channels: " << *width << ", " << *height in read_bmp() [all …]
|
/external/libopus/include/ |
D | opus_multistream.h | 107 * single packet, enabling support for up to 255 channels. Unlike an 124 * is configured to decode them to either 1 or 2 channels, respectively. 141 * The output channels specified by the encoder 197 * encoded channels (<code>streams + 209 int channels, 220 * @param channels <tt>int</tt>: Number of channels in the input signal. 223 * coded channels (<code>streams + 227 * This must be no more than the number of channels. 233 * encoded channels (<code>streams + 235 * more than the number of input channels. [all …]
|
/external/webrtc/webrtc/common_audio/ |
D | blocker_unittest.cc | 164 input_cb.channels(), in TEST_F() 165 input_chunk_cb.channels(), in TEST_F() 166 actual_output_cb.channels(), in TEST_F() 167 output_chunk_cb.channels(), in TEST_F() 171 ValidateSignalEquality(expected_output_cb.channels(), in TEST_F() 172 actual_output_cb.channels(), in TEST_F() 217 input_cb.channels(), in TEST_F() 218 input_chunk_cb.channels(), in TEST_F() 219 actual_output_cb.channels(), in TEST_F() 220 output_chunk_cb.channels(), in TEST_F() [all …]
|
/external/tensorflow/tensorflow/python/keras/layers/ |
D | pooling.py | 216 `(batch, height, width, channels)` while `channels_first` corresponds to 217 inputs with shape `(batch, channels, height, width)`. 299 `(batch, height, width, channels)` while `channels_first` 301 `(batch, channels, height, width)`. 308 4D tensor with shape `(batch_size, rows, cols, channels)`. 310 4D tensor with shape `(batch_size, channels, rows, cols)`. 314 4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`. 316 4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`. 349 `(batch, height, width, channels)` while `channels_first` 351 `(batch, channels, height, width)`. [all …]
|
/external/python/cpython2/Lib/test/ |
D | test_ossaudiodev.py | 101 (fmt, channels, rate) = config 103 dsp.channels(channels) == channels and 112 result = dsp.setparameters(fmt, channels, rate, False) 113 self.assertEqual(result, (fmt, channels, rate), 116 result = dsp.setparameters(fmt, channels, rate, True) 117 self.assertEqual(result, (fmt, channels, rate), 122 # channels currently exceeds even Hollywood's ambitions, and 128 channels = 2 131 (fmt, channels, -50), # impossible rate 133 (fmt, channels, rate) = config [all …]
|