Home
last modified time | relevance | path

Searched refs:AudioFrame (Results 1 – 25 of 105) sorted by relevance

12345

/external/webrtc/api/audio/test/
Daudio_frame_unittest.cc22 bool AllSamplesAre(int16_t sample, const AudioFrame& frame) { in AllSamplesAre()
42 AudioFrame frame; in TEST()
48 AudioFrame frame; in TEST()
55 AudioFrame frame; in TEST()
67 AudioFrame frame; in TEST()
70 AudioFrame::kPLC, AudioFrame::kVadActive, kNumChannelsMono); in TEST()
75 EXPECT_EQ(AudioFrame::kPLC, frame.speech_type_); in TEST()
76 EXPECT_EQ(AudioFrame::kVadActive, frame.vad_activity_); in TEST()
84 kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive, in TEST()
91 AudioFrame frame; in TEST()
[all …]
/external/webrtc/audio/utility/
Daudio_frame_operations.h34 static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
39 RTC_DEPRECATED static int MonoToStereo(AudioFrame* frame);
44 RTC_DEPRECATED static int StereoToMono(AudioFrame* frame);
55 static int QuadToStereo(AudioFrame* frame);
70 static void DownmixChannels(size_t dst_channels, AudioFrame* frame);
77 AudioFrame* frame);
81 static void SwapStereoChannels(AudioFrame* frame);
88 static void Mute(AudioFrame* frame,
93 static void Mute(AudioFrame* frame);
96 static void ApplyHalfGain(AudioFrame* frame);
[all …]
Daudio_frame_operations.cc32 void AudioFrameOperations::Add(const AudioFrame& frame_to_add, in Add()
33 AudioFrame* result_frame) { in Add()
47 if (result_frame->vad_activity_ == AudioFrame::kVadActive || in Add()
48 frame_to_add.vad_activity_ == AudioFrame::kVadActive) { in Add()
49 result_frame->vad_activity_ = AudioFrame::kVadActive; in Add()
50 } else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown || in Add()
51 frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) { in Add()
52 result_frame->vad_activity_ = AudioFrame::kVadUnknown; in Add()
56 result_frame->speech_type_ = AudioFrame::kUndefined; in Add()
75 int AudioFrameOperations::MonoToStereo(AudioFrame* frame) { in MonoToStereo()
[all …]
Daudio_frame_operations_unittest.cc27 AudioFrame frame_;
36 AudioFrame* frame) { in SetFrameData()
46 void SetFrameData(int16_t left, int16_t right, AudioFrame* frame) { in SetFrameData()
54 void SetFrameData(int16_t data, AudioFrame* frame) { in SetFrameData()
62 void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) { in VerifyFramesAreEqual()
74 void InitFrame(AudioFrame* frame, in InitFrame()
81 RTC_DCHECK_GE(AudioFrame::kMaxDataSizeSamples, in InitFrame()
92 int16_t GetChannelData(const AudioFrame& frame, size_t channel, size_t index) { in GetChannelData()
98 void VerifyFrameDataBounds(const AudioFrame& frame, in VerifyFrameDataBounds()
112 frame_.samples_per_channel_ = AudioFrame::kMaxDataSizeSamples; in TEST_F()
[all …]
Dchannel_mixer_unittest.cc41 AudioFrame frame_;
44 void SetFrameData(int16_t data, AudioFrame* frame) { in SetFrameData()
52 void SetMonoData(int16_t center, AudioFrame* frame) { in SetMonoData()
61 void SetStereoData(int16_t left, int16_t right, AudioFrame* frame) { in SetStereoData()
78 AudioFrame* frame) { in SetFiveOneData()
101 AudioFrame* frame) { in SetSevenOneData()
118 bool AllSamplesEquals(int16_t sample, const AudioFrame* frame) { in AllSamplesEquals()
129 void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) { in VerifyFramesAreEqual()
173 AudioFrame::kNormalSpeech, AudioFrame::kVadActive, in TEST_F()
222 AudioFrame mono_frame; in TEST_F()
[all …]
/external/webrtc/api/audio/
Daudio_frame.cc22 AudioFrame::AudioFrame() { in AudioFrame() function in webrtc::AudioFrame
27 void swap(AudioFrame& a, AudioFrame& b) { in swap()
42 RTC_DCHECK_LE(length_a, AudioFrame::kMaxDataSizeSamples); in swap()
43 RTC_DCHECK_LE(length_b, AudioFrame::kMaxDataSizeSamples); in swap()
49 void AudioFrame::Reset() { in Reset()
54 void AudioFrame::ResetWithoutMuting() { in ResetWithoutMuting()
71 void AudioFrame::UpdateFrame(uint32_t timestamp, in UpdateFrame()
99 void AudioFrame::CopyFrom(const AudioFrame& src) { in CopyFrom()
124 void AudioFrame::UpdateProfileTimeStamp() { in UpdateProfileTimeStamp()
128 int64_t AudioFrame::ElapsedProfileTimeMs() const { in ElapsedProfileTimeMs()
[all …]
Daudio_frame.h36 class AudioFrame {
61 AudioFrame();
63 friend void swap(AudioFrame& a, AudioFrame& b);
81 void CopyFrom(const AudioFrame& src);
172 RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame);
/external/webrtc/modules/audio_mixer/
Dframe_combiner_unittest.cc58 AudioFrame frame1;
59 AudioFrame frame2;
60 AudioFrame audio_frame_for_mixing;
65 sample_rate_hz, AudioFrame::kNormalSpeech, in SetUpFrames()
66 AudioFrame::kVadActive, number_of_channels); in SetUpFrames()
76 const std::vector<AudioFrame*> all_frames = {&frame1, &frame2}; in TEST()
82 const std::vector<AudioFrame*> frames_to_combine( in TEST()
97 AudioFrame::kMaxDataSizeSamples) { in TEST()
100 const std::vector<AudioFrame*> all_frames = {&frame1, &frame2}; in TEST()
106 const std::vector<AudioFrame*> frames_to_combine( in TEST()
[all …]
Dframe_combiner.cc38 void SetAudioFrameFields(const std::vector<AudioFrame*>& mix_list, in SetAudioFrameFields()
42 AudioFrame* audio_frame_for_mixing) { in SetAudioFrameFields()
51 0, nullptr, samples_per_channel, sample_rate, AudioFrame::kUndefined, in SetAudioFrameFields()
52 AudioFrame::kVadUnknown, number_of_channels); in SetAudioFrameFields()
64 void MixFewFramesWithNoLimiter(const std::vector<AudioFrame*>& mix_list, in MixFewFramesWithNoLimiter()
65 AudioFrame* audio_frame_for_mixing) { in MixFewFramesWithNoLimiter()
77 void MixToFloatFrame(const std::vector<AudioFrame*>& mix_list, in MixToFloatFrame()
90 const AudioFrame* const frame = mix_list[i]; in MixToFloatFrame()
113 AudioFrame* audio_frame_for_mixing) { in InterleaveToAudioFrame()
137 AudioFrame::kMaxDataSizeSamples, in FrameCombiner()
[all …]
Daudio_mixer_impl_unittest.cc42 void ResetFrame(AudioFrame* frame) { in ResetFrame()
48 frame->vad_activity_ = AudioFrame::kVadActive; in ResetFrame()
49 frame->speech_type_ = AudioFrame::kNormalSpeech; in ResetFrame()
62 AudioFrame frame_for_mixing;
79 (int sample_rate_hz, AudioFrame* audio_frame),
85 AudioFrame* fake_frame() { return &fake_frame_; } in fake_frame()
93 AudioFrame* audio_frame) { in FakeAudioFrameWithInfo()
101 AudioFrame fake_frame_;
119 const std::vector<AudioFrame>& frames, in MixAndCompare()
150 AudioFrame* mix_frame, in MixMonoAtGivenNativeRate()
[all …]
Daudio_frame_manipulator.h22 uint32_t AudioMixerCalculateEnergy(const AudioFrame& audio_frame);
26 void Ramp(float start_gain, float target_gain, AudioFrame* audio_frame);
29 void RemixFrame(size_t target_number_of_channels, AudioFrame* frame);
/external/webrtc/modules/audio_coding/acm2/
Dcall_statistics.cc19 void CallStatistics::DecodedByNetEq(AudioFrame::SpeechType speech_type, in DecodedByNetEq()
26 case AudioFrame::kNormalSpeech: { in DecodedByNetEq()
30 case AudioFrame::kPLC: { in DecodedByNetEq()
34 case AudioFrame::kCodecPLC: { in DecodedByNetEq()
38 case AudioFrame::kCNG: { in DecodedByNetEq()
42 case AudioFrame::kPLCCNG: { in DecodedByNetEq()
46 case AudioFrame::kUndefined: { in DecodedByNetEq()
Dcall_statistics_unittest.cc38 call_stats.DecodedByNetEq(AudioFrame::kNormalSpeech, false); in TEST()
39 call_stats.DecodedByNetEq(AudioFrame::kPLC, false); in TEST()
40 call_stats.DecodedByNetEq(AudioFrame::kCodecPLC, false); in TEST()
41 call_stats.DecodedByNetEq(AudioFrame::kPLCCNG, true); // Let this be muted. in TEST()
42 call_stats.DecodedByNetEq(AudioFrame::kCNG, false); in TEST()
Dacm_remixing_unittest.cc30 AudioFrame in; in TEST()
47 AudioFrame in; in TEST()
66 AudioFrame in; in TEST()
85 AudioFrame in; in TEST()
109 AudioFrame in; in TEST()
132 AudioFrame in; in TEST()
150 AudioFrame in; in TEST()
167 AudioFrame in; in TEST()
Daudio_coding_module.cc59 int Add10MsData(const AudioFrame& audio_frame) override;
85 AudioFrame* audio_frame,
126 int Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data)
151 int PreprocessToAddData(const AudioFrame& in_frame,
152 const AudioFrame** ptr_out)
175 AudioFrame preprocess_frame_ RTC_GUARDED_BY(acm_mutex_);
333 int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) { in Add10MsData()
343 int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, in Add10MsDataInternal()
377 const AudioFrame* ptr_frame; in Add10MsDataInternal()
420 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, in PreprocessToAddData()
[all …]
Dacm_receiver_unittest.cc89 AudioFrame frame; in InsertOnePacketOfSilence()
161 AudioFrame frame; in TEST_F()
192 const AudioFrame::VADActivity expected_vad_activity = in RunVerifyAudioFrame()
193 output_sample_rate_hz > 16000 ? AudioFrame::kVadActive in RunVerifyAudioFrame()
194 : AudioFrame::kVadPassive; in RunVerifyAudioFrame()
203 AudioFrame frame; in RunVerifyAudioFrame()
218 EXPECT_EQ(AudioFrame::kNormalSpeech, frame.speech_type_); in RunVerifyAudioFrame()
265 AudioFrame frame; in TEST_F()
273 EXPECT_EQ(AudioFrame::kVadPassive, frame.vad_activity_); in TEST_F()
297 AudioFrame frame; in TEST_F()
[all …]
/external/webrtc/audio/
Daudio_transport_impl.cc35 AudioFrame* audio_frame) { in InitializeCaptureFrame()
51 AudioFrame* audio_frame) { in ProcessCaptureFrame()
68 int Resample(const AudioFrame& frame, in Resample()
115 AudioFrame::kMaxDataSizeBytes); in RecordedDataIsAvailable()
127 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame()); in RecordedDataIsAvailable()
143 if (audio_frame->vad_activity_ != AudioFrame::kVadUnknown) { in RecordedDataIsAvailable()
144 bool vad_active = audio_frame->vad_activity_ == AudioFrame::kVadActive; in RecordedDataIsAvailable()
159 std::unique_ptr<AudioFrame> audio_frame_copy(new AudioFrame()); in RecordedDataIsAvailable()
191 AudioFrame::kMaxDataSizeBytes); in NeedMorePlayData()
227 AudioFrame::kMaxDataSizeBytes); in PullRenderData()
Dremix_resample_unittest.cc41 AudioFrame src_frame_;
42 AudioFrame dst_frame_;
43 AudioFrame golden_frame_;
49 void SetMonoFrame(float data, int sample_rate_hz, AudioFrame* frame) { in SetMonoFrame()
61 void SetMonoFrame(float data, AudioFrame* frame) { in SetMonoFrame()
70 AudioFrame* frame) { in SetStereoFrame()
83 void SetStereoFrame(float left, float right, AudioFrame* frame) { in SetStereoFrame()
94 AudioFrame* frame) { in SetQuadFrame()
108 void VerifyParams(const AudioFrame& ref_frame, const AudioFrame& test_frame) { in VerifyParams()
117 float ComputeSNR(const AudioFrame& ref_frame, in ComputeSNR()
[all …]
Daudio_state_unittest.cc65 (int sample_rate_hz, AudioFrame*),
83 std::vector<uint32_t> ComputeChannelLevels(AudioFrame* audio_frame) { in ComputeChannelLevels()
124 ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)), in TEST()
125 ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u))))) in TEST()
128 ::testing::Invoke([](AudioFrame* audio_frame) { in TEST()
170 ::testing::Field(&AudioFrame::sample_rate_hz_, in TEST()
172 ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u))))) in TEST()
175 ::testing::Invoke([](AudioFrame* audio_frame) { in TEST()
182 ::testing::Field(&AudioFrame::sample_rate_hz_, in TEST()
184 ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u))))) in TEST()
[all …]
Dremix_resample.cc21 void RemixAndResample(const AudioFrame& src_frame, in RemixAndResample()
23 AudioFrame* dst_frame) { in RemixAndResample()
38 AudioFrame* dst_frame) { in RemixAndResample()
41 int16_t downmixed_audio[AudioFrame::kMaxDataSizeSamples]; in RemixAndResample()
71 AudioFrame::kMaxDataSizeSamples); in RemixAndResample()
Dremix_resample.h27 void RemixAndResample(const AudioFrame& src_frame,
29 AudioFrame* dst_frame);
39 AudioFrame* dst_frame);
/external/webrtc/modules/audio_coding/neteq/
Dneteq_unittest.cc302 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { in TEST_F()
324 for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) { in TEST_F()
362 AudioFrame output; in CheckBgn()
393 ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); in CheckBgn()
423 memset(output.mutable_data(), 1, AudioFrame::kMaxDataSizeBytes); in CheckBgn()
428 if (output.speech_type_ == AudioFrame::kPLCCNG) { in CheckBgn()
437 EXPECT_EQ(AudioFrame::kPLC, output.speech_type_); in CheckBgn()
503 EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_); in TEST_F()
517 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); in TEST_F()
533 EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_); in TEST_F()
[all …]
Dneteq_impl_unittest.cc202 AudioFrame output; in TestDtmfPacket()
208 EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); in TestDtmfPacket()
492 AudioFrame output; in TEST_F()
498 EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); in TEST_F()
585 AudioFrame output; in TEST_F()
590 EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); in TEST_F()
632 EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_); in TEST_F()
676 AudioFrame output; in TEST_F()
682 EXPECT_EQ(AudioFrame::kPLC, output.speech_type_); in TEST_F()
703 EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_) in TEST_F()
[all …]
/external/webrtc/modules/audio_processing/include/
Daudio_frame_proxies.h16 class AudioFrame; variable
27 int ProcessAudioFrame(AudioProcessing* ap, AudioFrame* frame);
37 int ProcessReverseAudioFrame(AudioProcessing* ap, AudioFrame* frame);
Daudio_frame_proxies.cc18 int ProcessAudioFrame(AudioProcessing* ap, AudioFrame* frame) { in ProcessAudioFrame()
36 ? AudioFrame::VADActivity::kVadActive in ProcessAudioFrame()
37 : AudioFrame::VADActivity::kVadPassive; in ProcessAudioFrame()
43 int ProcessReverseAudioFrame(AudioProcessing* ap, AudioFrame* frame) { in ProcessReverseAudioFrame()

12345