Home
last modified time | relevance | path

Searched refs:output_frame (Results 1 – 12 of 12) sorted by relevance

/external/webrtc/modules/audio_coding/acm2/
Dacm_receive_test.cc99 AudioFrame output_frame; in Run() local
102 acm_->PlayoutData10Ms(output_freq_hz_, &output_frame, &muted)); in Run()
103 ASSERT_EQ(output_freq_hz_, output_frame.sample_rate_hz_); in Run()
107 EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_); in Run()
109 if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) { in Run()
114 EXPECT_EQ(exptected_output_channels_, output_frame.num_channels_); in Run()
117 ASSERT_TRUE(audio_sink_->WriteAudioFrame(output_frame)); in Run()
/external/tensorflow/tensorflow/core/common_runtime/
Dpropagator_state.cc88 FrameState* output_frame = input_frame; in PropagateOutputs() local
94 DCHECK_EQ(input_frame, output_frame); in PropagateOutputs()
99 FindOrCreateChildFrame(input_frame, input_iter, *item, &output_frame); in PropagateOutputs()
101 mutex_lock l(output_frame->mu); in PropagateOutputs()
102 output_iter = output_frame->GetIteration(0); in PropagateOutputs()
105 output_frame->AddLoopInv(item, (*outputs)[0], ready); in PropagateOutputs()
107 int activated = output_frame->ActivateNodesLocked( in PropagateOutputs()
109 output_frame->AdjustOutstandingOpsLocked(output_iter, activated, ready); in PropagateOutputs()
111 output_frame->num_pending_inputs--; in PropagateOutputs()
124 output_frame = input_frame->parent_frame; in PropagateOutputs()
[all …]
/external/webrtc/modules/audio_processing/
Daudio_processing_performance_unittest.cc64 output_frame.resize(2); in AudioFrameData()
65 output_frame[0] = &output_frame_channels[0]; in AudioFrameData()
66 output_frame[1] = &output_frame_channels[max_frame_size]; in AudioFrameData()
70 std::vector<float*> output_frame; member
252 frame_data_.output_stream_config, &frame_data_.output_frame[0]); in ProcessCapture()
280 frame_data_.output_stream_config, &frame_data_.output_frame[0]); in ProcessRender()
Daudio_processing_impl_locking_unittest.cc96 output_frame.resize(2); in AudioFrameData()
97 output_frame[0] = &output_frame_channels[0]; in AudioFrameData()
98 output_frame[1] = &output_frame_channels[max_frame_size]; in AudioFrameData()
105 std::vector<float*> output_frame; member
646 &frame_data_.output_frame[0]); in CallApmCaptureSide()
877 output_stream_config, &frame_data_.output_frame[0]); in CallApmRenderSide()
/external/libaom/av1/encoder/
Dtemporal_filter.h106 YV12_BUFFER_CONFIG *output_frame; member
323 YV12_BUFFER_CONFIG *output_frame);
Dtemporal_filter.c892 accum, count, tf_ctx->output_frame); in av1_tf_do_filtering_row()
898 const int filter_y_stride = tf_ctx->output_frame->y_stride; in av1_tf_do_filtering_row()
906 tf_ctx->output_frame->y_buffer + filter_offset, filter_y_stride, in av1_tf_do_filtering_row()
1169 YV12_BUFFER_CONFIG *output_frame) { in init_tf_ctx() argument
1175 tf_ctx->output_frame = output_frame; in init_tf_ctx()
1240 YV12_BUFFER_CONFIG *output_frame) { in av1_temporal_filter() argument
1254 compute_frame_diff, output_frame); in av1_temporal_filter()
/external/webrtc/modules/desktop_capture/win/
Dwgc_capture_session.h45 HRESULT GetFrame(std::unique_ptr<DesktopFrame>* output_frame);
Dwgc_capture_session.cc213 std::unique_ptr<DesktopFrame>* output_frame) { in GetFrame() argument
354 *output_frame = std::make_unique<WgcDesktopFrame>(size, row_data_length, in GetFrame()
/external/webrtc/modules/audio_processing/ns/
Dnoise_suppressor.cc97 rtc::ArrayView<float, kNsFrameSize> output_frame) { in OverlapAndAdd() argument
99 output_frame[i] = overlap_memory[i] + extended_frame[i]; in OverlapAndAdd()
103 output_frame.begin() + kOverlapSize); in OverlapAndAdd()
/external/tensorflow/tensorflow/lite/g3doc/tutorials/
Dpose_classification.ipynb370 " output_frame = cv2.cvtColor(output_overlay, cv2.COLOR_RGB2BGR)\n",
371 " cv2.imwrite(os.path.join(images_out_folder, image_name), output_frame)\n",
/external/speex/doc/
Dmanual.lyx2413 speex_decode_int(dec_state, &bits, output_frame);
2428 output_frame
4542 speex_echo_cancellation(echo_state, input_frame, echo_frame, output_frame);
4562 output_frame
4607 speex_echo_cancellation(echo_state, input_frame, echo_frame, output_frame);
4690 speex_echo_capture(echo_state, input_frame, output_frame);
/external/libaom/doc/dev_guide/
Dav1_encoder.dox456 the output_frame, which is the frame to be