• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "api/audio/audio_frame.h"
12 
13 #include <stdint.h>
14 #include <string.h>  // memcmp
15 
16 #include "test/gtest.h"
17 
18 namespace webrtc {
19 
20 namespace {
21 
AllSamplesAre(int16_t sample,const AudioFrame & frame)22 bool AllSamplesAre(int16_t sample, const AudioFrame& frame) {
23   const int16_t* frame_data = frame.data();
24   for (size_t i = 0; i < frame.max_16bit_samples(); i++) {
25     if (frame_data[i] != sample) {
26       return false;
27     }
28   }
29   return true;
30 }
31 
32 constexpr uint32_t kTimestamp = 27;
33 constexpr int kSampleRateHz = 16000;
34 constexpr size_t kNumChannelsMono = 1;
35 constexpr size_t kNumChannelsStereo = 2;
36 constexpr size_t kNumChannels5_1 = 6;
37 constexpr size_t kSamplesPerChannel = kSampleRateHz / 100;
38 
39 }  // namespace
40 
TEST(AudioFrameTest,FrameStartsMuted)41 TEST(AudioFrameTest, FrameStartsMuted) {
42   AudioFrame frame;
43   EXPECT_TRUE(frame.muted());
44   EXPECT_TRUE(AllSamplesAre(0, frame));
45 }
46 
TEST(AudioFrameTest,UnmutedFrameIsInitiallyZeroed)47 TEST(AudioFrameTest, UnmutedFrameIsInitiallyZeroed) {
48   AudioFrame frame;
49   frame.mutable_data();
50   EXPECT_FALSE(frame.muted());
51   EXPECT_TRUE(AllSamplesAre(0, frame));
52 }
53 
TEST(AudioFrameTest,MutedFrameBufferIsZeroed)54 TEST(AudioFrameTest, MutedFrameBufferIsZeroed) {
55   AudioFrame frame;
56   int16_t* frame_data = frame.mutable_data();
57   for (size_t i = 0; i < frame.max_16bit_samples(); i++) {
58     frame_data[i] = 17;
59   }
60   ASSERT_TRUE(AllSamplesAre(17, frame));
61   frame.Mute();
62   EXPECT_TRUE(frame.muted());
63   EXPECT_TRUE(AllSamplesAre(0, frame));
64 }
65 
TEST(AudioFrameTest,UpdateFrameMono)66 TEST(AudioFrameTest, UpdateFrameMono) {
67   AudioFrame frame;
68   int16_t samples[kNumChannelsMono * kSamplesPerChannel] = {17};
69   frame.UpdateFrame(kTimestamp, samples, kSamplesPerChannel, kSampleRateHz,
70                     AudioFrame::kPLC, AudioFrame::kVadActive, kNumChannelsMono);
71 
72   EXPECT_EQ(kTimestamp, frame.timestamp_);
73   EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
74   EXPECT_EQ(kSampleRateHz, frame.sample_rate_hz());
75   EXPECT_EQ(AudioFrame::kPLC, frame.speech_type_);
76   EXPECT_EQ(AudioFrame::kVadActive, frame.vad_activity_);
77   EXPECT_EQ(kNumChannelsMono, frame.num_channels());
78   EXPECT_EQ(CHANNEL_LAYOUT_MONO, frame.channel_layout());
79 
80   EXPECT_FALSE(frame.muted());
81   EXPECT_EQ(0, memcmp(samples, frame.data(), sizeof(samples)));
82 
83   frame.UpdateFrame(kTimestamp, nullptr /* data*/, kSamplesPerChannel,
84                     kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
85                     kNumChannelsMono);
86   EXPECT_TRUE(frame.muted());
87   EXPECT_TRUE(AllSamplesAre(0, frame));
88 }
89 
TEST(AudioFrameTest,UpdateFrameMultiChannel)90 TEST(AudioFrameTest, UpdateFrameMultiChannel) {
91   AudioFrame frame;
92   frame.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
93                     kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
94                     kNumChannelsStereo);
95   EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
96   EXPECT_EQ(kNumChannelsStereo, frame.num_channels());
97   EXPECT_EQ(CHANNEL_LAYOUT_STEREO, frame.channel_layout());
98 
99   frame.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
100                     kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
101                     kNumChannels5_1);
102   EXPECT_EQ(kSamplesPerChannel, frame.samples_per_channel());
103   EXPECT_EQ(kNumChannels5_1, frame.num_channels());
104   EXPECT_EQ(CHANNEL_LAYOUT_5_1, frame.channel_layout());
105 }
106 
TEST(AudioFrameTest,CopyFrom)107 TEST(AudioFrameTest, CopyFrom) {
108   AudioFrame frame1;
109   AudioFrame frame2;
110 
111   int16_t samples[kNumChannelsMono * kSamplesPerChannel] = {17};
112   frame2.UpdateFrame(kTimestamp, samples, kSamplesPerChannel, kSampleRateHz,
113                      AudioFrame::kPLC, AudioFrame::kVadActive,
114                      kNumChannelsMono);
115   frame1.CopyFrom(frame2);
116 
117   EXPECT_EQ(frame2.timestamp_, frame1.timestamp_);
118   EXPECT_EQ(frame2.samples_per_channel_, frame1.samples_per_channel_);
119   EXPECT_EQ(frame2.sample_rate_hz_, frame1.sample_rate_hz_);
120   EXPECT_EQ(frame2.speech_type_, frame1.speech_type_);
121   EXPECT_EQ(frame2.vad_activity_, frame1.vad_activity_);
122   EXPECT_EQ(frame2.num_channels_, frame1.num_channels_);
123 
124   EXPECT_EQ(frame2.muted(), frame1.muted());
125   EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
126 
127   frame2.UpdateFrame(kTimestamp, nullptr /* data */, kSamplesPerChannel,
128                      kSampleRateHz, AudioFrame::kPLC, AudioFrame::kVadActive,
129                      kNumChannelsMono);
130   frame1.CopyFrom(frame2);
131 
132   EXPECT_EQ(frame2.muted(), frame1.muted());
133   EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples)));
134 }
135 
TEST(AudioFrameTest,SwapFrames)136 TEST(AudioFrameTest, SwapFrames) {
137   AudioFrame frame1, frame2;
138   int16_t samples1[kNumChannelsMono * kSamplesPerChannel];
139   for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) {
140     samples1[i] = i;
141   }
142   frame1.UpdateFrame(kTimestamp, samples1, kSamplesPerChannel, kSampleRateHz,
143                      AudioFrame::kPLC, AudioFrame::kVadActive,
144                      kNumChannelsMono);
145   frame1.set_absolute_capture_timestamp_ms(12345678);
146   const auto frame1_channel_layout = frame1.channel_layout();
147 
148   int16_t samples2[(kNumChannelsMono + 1) * (kSamplesPerChannel + 1)];
149   for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1);
150        ++i) {
151     samples2[i] = 1000 + i;
152   }
153   frame2.UpdateFrame(kTimestamp + 1, samples2, kSamplesPerChannel + 1,
154                      kSampleRateHz + 1, AudioFrame::kNormalSpeech,
155                      AudioFrame::kVadPassive, kNumChannelsMono + 1);
156   const auto frame2_channel_layout = frame2.channel_layout();
157 
158   swap(frame1, frame2);
159 
160   EXPECT_EQ(kTimestamp + 1, frame1.timestamp_);
161   ASSERT_EQ(kSamplesPerChannel + 1, frame1.samples_per_channel_);
162   EXPECT_EQ(kSampleRateHz + 1, frame1.sample_rate_hz_);
163   EXPECT_EQ(AudioFrame::kNormalSpeech, frame1.speech_type_);
164   EXPECT_EQ(AudioFrame::kVadPassive, frame1.vad_activity_);
165   ASSERT_EQ(kNumChannelsMono + 1, frame1.num_channels_);
166   for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1);
167        ++i) {
168     EXPECT_EQ(samples2[i], frame1.data()[i]);
169   }
170   EXPECT_FALSE(frame1.absolute_capture_timestamp_ms());
171   EXPECT_EQ(frame2_channel_layout, frame1.channel_layout());
172 
173   EXPECT_EQ(kTimestamp, frame2.timestamp_);
174   ASSERT_EQ(kSamplesPerChannel, frame2.samples_per_channel_);
175   EXPECT_EQ(kSampleRateHz, frame2.sample_rate_hz_);
176   EXPECT_EQ(AudioFrame::kPLC, frame2.speech_type_);
177   EXPECT_EQ(AudioFrame::kVadActive, frame2.vad_activity_);
178   ASSERT_EQ(kNumChannelsMono, frame2.num_channels_);
179   for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) {
180     EXPECT_EQ(samples1[i], frame2.data()[i]);
181   }
182   EXPECT_EQ(12345678, frame2.absolute_capture_timestamp_ms());
183   EXPECT_EQ(frame1_channel_layout, frame2.channel_layout());
184 }
185 
186 }  // namespace webrtc
187