• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "modules/video_coding/utility/simulcast_test_fixture_impl.h"
12 
13 #include <algorithm>
14 #include <map>
15 #include <memory>
16 #include <vector>
17 
18 #include "api/video/encoded_image.h"
19 #include "api/video_codecs/sdp_video_format.h"
20 #include "api/video_codecs/video_encoder.h"
21 #include "common_video/libyuv/include/webrtc_libyuv.h"
22 #include "modules/video_coding/include/video_codec_interface.h"
23 #include "modules/video_coding/include/video_coding_defines.h"
24 #include "rtc_base/checks.h"
25 #include "test/gtest.h"
26 
27 using ::testing::_;
28 using ::testing::AllOf;
29 using ::testing::Field;
30 using ::testing::Return;
31 
32 namespace webrtc {
33 namespace test {
34 
35 namespace {
36 
37 const int kDefaultWidth = 1280;
38 const int kDefaultHeight = 720;
39 const int kNumberOfSimulcastStreams = 3;
40 const int kColorY = 66;
41 const int kColorU = 22;
42 const int kColorV = 33;
43 const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
44 const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
45 const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
46 const float kMaxFramerates[kNumberOfSimulcastStreams] = {30, 30, 30};
47 const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
48 const int kNoTemporalLayerProfile[3] = {0, 0, 0};
49 
50 const VideoEncoder::Capabilities kCapabilities(false);
51 const VideoEncoder::Settings kSettings(kCapabilities, 1, 1200);
52 
53 template <typename T>
SetExpectedValues3(T value0,T value1,T value2,T * expected_values)54 void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
55   expected_values[0] = value0;
56   expected_values[1] = value1;
57   expected_values[2] = value2;
58 }
59 
60 enum PlaneType {
61   kYPlane = 0,
62   kUPlane = 1,
63   kVPlane = 2,
64   kNumOfPlanes = 3,
65 };
66 
67 }  // namespace
68 
69 class SimulcastTestFixtureImpl::TestEncodedImageCallback
70     : public EncodedImageCallback {
71  public:
TestEncodedImageCallback()72   TestEncodedImageCallback() {
73     memset(temporal_layer_, -1, sizeof(temporal_layer_));
74     memset(layer_sync_, false, sizeof(layer_sync_));
75   }
76 
OnEncodedImage(const EncodedImage & encoded_image,const CodecSpecificInfo * codec_specific_info,const RTPFragmentationHeader * fragmentation)77   Result OnEncodedImage(const EncodedImage& encoded_image,
78                         const CodecSpecificInfo* codec_specific_info,
79                         const RTPFragmentationHeader* fragmentation) override {
80     bool is_vp8 = (codec_specific_info->codecType == kVideoCodecVP8);
81     bool is_h264 = (codec_specific_info->codecType == kVideoCodecH264);
82     // Only store the base layer.
83     if (encoded_image.SpatialIndex().value_or(0) == 0) {
84       if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
85         // TODO(nisse): Why not size() ?
86         encoded_key_frame_.SetEncodedData(
87             EncodedImageBuffer::Create(encoded_image.capacity()));
88         encoded_key_frame_.set_size(encoded_image.size());
89         encoded_key_frame_._frameType = VideoFrameType::kVideoFrameKey;
90         encoded_key_frame_._completeFrame = encoded_image._completeFrame;
91         memcpy(encoded_key_frame_.data(), encoded_image.data(),
92                encoded_image.size());
93       } else {
94         encoded_frame_.SetEncodedData(
95             EncodedImageBuffer::Create(encoded_image.capacity()));
96         encoded_frame_.set_size(encoded_image.size());
97         memcpy(encoded_frame_.data(), encoded_image.data(),
98                encoded_image.size());
99       }
100     }
101     if (is_vp8) {
102       layer_sync_[encoded_image.SpatialIndex().value_or(0)] =
103           codec_specific_info->codecSpecific.VP8.layerSync;
104       temporal_layer_[encoded_image.SpatialIndex().value_or(0)] =
105           codec_specific_info->codecSpecific.VP8.temporalIdx;
106     } else if (is_h264) {
107       layer_sync_[encoded_image.SpatialIndex().value_or(0)] =
108           codec_specific_info->codecSpecific.H264.base_layer_sync;
109       temporal_layer_[encoded_image.SpatialIndex().value_or(0)] =
110           codec_specific_info->codecSpecific.H264.temporal_idx;
111     }
112     return Result(Result::OK, encoded_image.Timestamp());
113   }
114   // This method only makes sense for VP8.
GetLastEncodedFrameInfo(int * temporal_layer,bool * layer_sync,int stream)115   void GetLastEncodedFrameInfo(int* temporal_layer,
116                                bool* layer_sync,
117                                int stream) {
118     *temporal_layer = temporal_layer_[stream];
119     *layer_sync = layer_sync_[stream];
120   }
GetLastEncodedKeyFrame(EncodedImage * encoded_key_frame)121   void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
122     *encoded_key_frame = encoded_key_frame_;
123   }
GetLastEncodedFrame(EncodedImage * encoded_frame)124   void GetLastEncodedFrame(EncodedImage* encoded_frame) {
125     *encoded_frame = encoded_frame_;
126   }
127 
128  private:
129   EncodedImage encoded_key_frame_;
130   EncodedImage encoded_frame_;
131   int temporal_layer_[kNumberOfSimulcastStreams];
132   bool layer_sync_[kNumberOfSimulcastStreams];
133 };
134 
135 class SimulcastTestFixtureImpl::TestDecodedImageCallback
136     : public DecodedImageCallback {
137  public:
TestDecodedImageCallback()138   TestDecodedImageCallback() : decoded_frames_(0) {}
Decoded(VideoFrame & decoded_image)139   int32_t Decoded(VideoFrame& decoded_image) override {
140     rtc::scoped_refptr<I420BufferInterface> i420_buffer =
141         decoded_image.video_frame_buffer()->ToI420();
142     for (int i = 0; i < decoded_image.width(); ++i) {
143       EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
144     }
145 
146     // TODO(mikhal): Verify the difference between U,V and the original.
147     for (int i = 0; i < i420_buffer->ChromaWidth(); ++i) {
148       EXPECT_NEAR(kColorU, i420_buffer->DataU()[i], 4);
149       EXPECT_NEAR(kColorV, i420_buffer->DataV()[i], 4);
150     }
151     decoded_frames_++;
152     return 0;
153   }
Decoded(VideoFrame & decoded_image,int64_t decode_time_ms)154   int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
155     RTC_NOTREACHED();
156     return -1;
157   }
Decoded(VideoFrame & decoded_image,absl::optional<int32_t> decode_time_ms,absl::optional<uint8_t> qp)158   void Decoded(VideoFrame& decoded_image,
159                absl::optional<int32_t> decode_time_ms,
160                absl::optional<uint8_t> qp) override {
161     Decoded(decoded_image);
162   }
DecodedFrames()163   int DecodedFrames() { return decoded_frames_; }
164 
165  private:
166   int decoded_frames_;
167 };
168 
169 namespace {
170 
SetPlane(uint8_t * data,uint8_t value,int width,int height,int stride)171 void SetPlane(uint8_t* data, uint8_t value, int width, int height, int stride) {
172   for (int i = 0; i < height; i++, data += stride) {
173     // Setting allocated area to zero - setting only image size to
174     // requested values - will make it easier to distinguish between image
175     // size and frame size (accounting for stride).
176     memset(data, value, width);
177     memset(data + width, 0, stride - width);
178   }
179 }
180 
181 // Fills in an I420Buffer from |plane_colors|.
CreateImage(const rtc::scoped_refptr<I420Buffer> & buffer,int plane_colors[kNumOfPlanes])182 void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
183                  int plane_colors[kNumOfPlanes]) {
184   SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
185            buffer->height(), buffer->StrideY());
186 
187   SetPlane(buffer->MutableDataU(), plane_colors[1], buffer->ChromaWidth(),
188            buffer->ChromaHeight(), buffer->StrideU());
189 
190   SetPlane(buffer->MutableDataV(), plane_colors[2], buffer->ChromaWidth(),
191            buffer->ChromaHeight(), buffer->StrideV());
192 }
193 
ConfigureStream(int width,int height,int max_bitrate,int min_bitrate,int target_bitrate,float max_framerate,SimulcastStream * stream,int num_temporal_layers)194 void ConfigureStream(int width,
195                      int height,
196                      int max_bitrate,
197                      int min_bitrate,
198                      int target_bitrate,
199                      float max_framerate,
200                      SimulcastStream* stream,
201                      int num_temporal_layers) {
202   assert(stream);
203   stream->width = width;
204   stream->height = height;
205   stream->maxBitrate = max_bitrate;
206   stream->minBitrate = min_bitrate;
207   stream->targetBitrate = target_bitrate;
208   stream->maxFramerate = max_framerate;
209   if (num_temporal_layers >= 0) {
210     stream->numberOfTemporalLayers = num_temporal_layers;
211   }
212   stream->qpMax = 45;
213   stream->active = true;
214 }
215 
216 }  // namespace
217 
DefaultSettings(VideoCodec * settings,const int * temporal_layer_profile,VideoCodecType codec_type,bool reverse_layer_order)218 void SimulcastTestFixtureImpl::DefaultSettings(
219     VideoCodec* settings,
220     const int* temporal_layer_profile,
221     VideoCodecType codec_type,
222     bool reverse_layer_order) {
223   RTC_CHECK(settings);
224   memset(settings, 0, sizeof(VideoCodec));
225   settings->codecType = codec_type;
226   // 96 to 127 dynamic payload types for video codecs
227   settings->plType = 120;
228   settings->startBitrate = 300;
229   settings->minBitrate = 30;
230   settings->maxBitrate = 0;
231   settings->maxFramerate = 30;
232   settings->width = kDefaultWidth;
233   settings->height = kDefaultHeight;
234   settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
235   settings->active = true;
236   ASSERT_EQ(3, kNumberOfSimulcastStreams);
237   int layer_order[3] = {0, 1, 2};
238   if (reverse_layer_order) {
239     layer_order[0] = 2;
240     layer_order[2] = 0;
241   }
242   settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
243                                        kDefaultOutlierFrameSizePercent};
244   ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
245                   kMinBitrates[0], kTargetBitrates[0], kMaxFramerates[0],
246                   &settings->simulcastStream[layer_order[0]],
247                   temporal_layer_profile[0]);
248   ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
249                   kMinBitrates[1], kTargetBitrates[1], kMaxFramerates[1],
250                   &settings->simulcastStream[layer_order[1]],
251                   temporal_layer_profile[1]);
252   ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
253                   kMinBitrates[2], kTargetBitrates[2], kMaxFramerates[2],
254                   &settings->simulcastStream[layer_order[2]],
255                   temporal_layer_profile[2]);
256   if (codec_type == kVideoCodecVP8) {
257     settings->VP8()->denoisingOn = true;
258     settings->VP8()->automaticResizeOn = false;
259     settings->VP8()->frameDroppingOn = true;
260     settings->VP8()->keyFrameInterval = 3000;
261   } else {
262     settings->H264()->frameDroppingOn = true;
263     settings->H264()->keyFrameInterval = 3000;
264   }
265 }
266 
SimulcastTestFixtureImpl(std::unique_ptr<VideoEncoderFactory> encoder_factory,std::unique_ptr<VideoDecoderFactory> decoder_factory,SdpVideoFormat video_format)267 SimulcastTestFixtureImpl::SimulcastTestFixtureImpl(
268     std::unique_ptr<VideoEncoderFactory> encoder_factory,
269     std::unique_ptr<VideoDecoderFactory> decoder_factory,
270     SdpVideoFormat video_format)
271     : codec_type_(PayloadStringToCodecType(video_format.name)) {
272   encoder_ = encoder_factory->CreateVideoEncoder(video_format);
273   decoder_ = decoder_factory->CreateVideoDecoder(video_format);
274   SetUpCodec((codec_type_ == kVideoCodecVP8 || codec_type_ == kVideoCodecH264)
275                  ? kDefaultTemporalLayerProfile
276                  : kNoTemporalLayerProfile);
277 }
278 
~SimulcastTestFixtureImpl()279 SimulcastTestFixtureImpl::~SimulcastTestFixtureImpl() {
280   encoder_->Release();
281   decoder_->Release();
282 }
283 
SetUpCodec(const int * temporal_layer_profile)284 void SimulcastTestFixtureImpl::SetUpCodec(const int* temporal_layer_profile) {
285   encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
286   decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
287   DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
288   SetUpRateAllocator();
289   EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
290   EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
291   input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight);
292   input_buffer_->InitializeData();
293   input_frame_ = std::make_unique<webrtc::VideoFrame>(
294       webrtc::VideoFrame::Builder()
295           .set_video_frame_buffer(input_buffer_)
296           .set_rotation(webrtc::kVideoRotation_0)
297           .set_timestamp_us(0)
298           .build());
299 }
300 
SetUpRateAllocator()301 void SimulcastTestFixtureImpl::SetUpRateAllocator() {
302   rate_allocator_.reset(new SimulcastRateAllocator(settings_));
303 }
304 
SetRates(uint32_t bitrate_kbps,uint32_t fps)305 void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
306   encoder_->SetRates(VideoEncoder::RateControlParameters(
307       rate_allocator_->Allocate(
308           VideoBitrateAllocationParameters(bitrate_kbps * 1000, fps)),
309       static_cast<double>(fps)));
310 }
311 
RunActiveStreamsTest(const std::vector<bool> active_streams)312 void SimulcastTestFixtureImpl::RunActiveStreamsTest(
313     const std::vector<bool> active_streams) {
314   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
315                                           VideoFrameType::kVideoFrameDelta);
316   UpdateActiveStreams(active_streams);
317   // Set sufficient bitrate for all streams so we can test active without
318   // bitrate being an issue.
319   SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
320 
321   ExpectStreams(VideoFrameType::kVideoFrameKey, active_streams);
322   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
323   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
324 
325   ExpectStreams(VideoFrameType::kVideoFrameDelta, active_streams);
326   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
327   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
328 }
329 
UpdateActiveStreams(const std::vector<bool> active_streams)330 void SimulcastTestFixtureImpl::UpdateActiveStreams(
331     const std::vector<bool> active_streams) {
332   ASSERT_EQ(static_cast<int>(active_streams.size()), kNumberOfSimulcastStreams);
333   for (size_t i = 0; i < active_streams.size(); ++i) {
334     settings_.simulcastStream[i].active = active_streams[i];
335   }
336   // Re initialize the allocator and encoder with the new settings.
337   // TODO(bugs.webrtc.org/8807): Currently, we do a full "hard"
338   // reconfiguration of the allocator and encoder. When the video bitrate
339   // allocator has support for updating active streams without a
340   // reinitialization, we can just call that here instead.
341   SetUpRateAllocator();
342   EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
343 }
344 
ExpectStreams(VideoFrameType frame_type,const std::vector<bool> expected_streams_active)345 void SimulcastTestFixtureImpl::ExpectStreams(
346     VideoFrameType frame_type,
347     const std::vector<bool> expected_streams_active) {
348   ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
349             kNumberOfSimulcastStreams);
350   if (expected_streams_active[0]) {
351     EXPECT_CALL(
352         encoder_callback_,
353         OnEncodedImage(
354             AllOf(Field(&EncodedImage::_frameType, frame_type),
355                   Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
356                   Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
357             _, _))
358         .Times(1)
359         .WillRepeatedly(Return(
360             EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
361   }
362   if (expected_streams_active[1]) {
363     EXPECT_CALL(
364         encoder_callback_,
365         OnEncodedImage(
366             AllOf(Field(&EncodedImage::_frameType, frame_type),
367                   Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
368                   Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
369             _, _))
370         .Times(1)
371         .WillRepeatedly(Return(
372             EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
373   }
374   if (expected_streams_active[2]) {
375     EXPECT_CALL(encoder_callback_,
376                 OnEncodedImage(
377                     AllOf(Field(&EncodedImage::_frameType, frame_type),
378                           Field(&EncodedImage::_encodedWidth, kDefaultWidth),
379                           Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
380                     _, _))
381         .Times(1)
382         .WillRepeatedly(Return(
383             EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
384   }
385 }
386 
ExpectStreams(VideoFrameType frame_type,int expected_video_streams)387 void SimulcastTestFixtureImpl::ExpectStreams(VideoFrameType frame_type,
388                                              int expected_video_streams) {
389   ASSERT_GE(expected_video_streams, 0);
390   ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
391   std::vector<bool> expected_streams_active(kNumberOfSimulcastStreams, false);
392   for (int i = 0; i < expected_video_streams; ++i) {
393     expected_streams_active[i] = true;
394   }
395   ExpectStreams(frame_type, expected_streams_active);
396 }
397 
VerifyTemporalIdxAndSyncForAllSpatialLayers(TestEncodedImageCallback * encoder_callback,const int * expected_temporal_idx,const bool * expected_layer_sync,int num_spatial_layers)398 void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
399     TestEncodedImageCallback* encoder_callback,
400     const int* expected_temporal_idx,
401     const bool* expected_layer_sync,
402     int num_spatial_layers) {
403   int temporal_layer = -1;
404   bool layer_sync = false;
405   for (int i = 0; i < num_spatial_layers; i++) {
406     encoder_callback->GetLastEncodedFrameInfo(&temporal_layer, &layer_sync, i);
407     EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
408     EXPECT_EQ(expected_layer_sync[i], layer_sync);
409   }
410 }
411 
412 // We currently expect all active streams to generate a key frame even though
413 // a key frame was only requested for some of them.
TestKeyFrameRequestsOnAllStreams()414 void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
415   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
416   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
417                                           VideoFrameType::kVideoFrameDelta);
418   ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
419   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
420 
421   ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
422   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
423   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
424 
425   frame_types[0] = VideoFrameType::kVideoFrameKey;
426   ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
427   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
428   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
429 
430   std::fill(frame_types.begin(), frame_types.end(),
431             VideoFrameType::kVideoFrameDelta);
432   frame_types[1] = VideoFrameType::kVideoFrameKey;
433   ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
434   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
435   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
436 
437   std::fill(frame_types.begin(), frame_types.end(),
438             VideoFrameType::kVideoFrameDelta);
439   frame_types[2] = VideoFrameType::kVideoFrameKey;
440   ExpectStreams(VideoFrameType::kVideoFrameKey, kNumberOfSimulcastStreams);
441   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
442   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
443 
444   std::fill(frame_types.begin(), frame_types.end(),
445             VideoFrameType::kVideoFrameDelta);
446   ExpectStreams(VideoFrameType::kVideoFrameDelta, kNumberOfSimulcastStreams);
447   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
448   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
449 }
450 
TestPaddingAllStreams()451 void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
452   // We should always encode the base layer.
453   SetRates(kMinBitrates[0] - 1, 30);
454   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
455                                           VideoFrameType::kVideoFrameDelta);
456   ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
457   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
458 
459   ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
460   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
461   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
462 }
463 
TestPaddingTwoStreams()464 void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
465   // We have just enough to get only the first stream and padding for two.
466   SetRates(kMinBitrates[0], 30);
467   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
468                                           VideoFrameType::kVideoFrameDelta);
469   ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
470   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
471 
472   ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
473   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
474   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
475 }
476 
TestPaddingTwoStreamsOneMaxedOut()477 void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
478   // We are just below limit of sending second stream, so we should get
479   // the first stream maxed out (at |maxBitrate|), and padding for two.
480   SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
481   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
482                                           VideoFrameType::kVideoFrameDelta);
483   ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
484   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
485 
486   ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
487   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
488   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
489 }
490 
TestPaddingOneStream()491 void SimulcastTestFixtureImpl::TestPaddingOneStream() {
492   // We have just enough to send two streams, so padding for one stream.
493   SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
494   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
495                                           VideoFrameType::kVideoFrameDelta);
496   ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
497   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
498 
499   ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
500   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
501   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
502 }
503 
TestPaddingOneStreamTwoMaxedOut()504 void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
505   // We are just below limit of sending third stream, so we should get
506   // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
507   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
508   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
509                                           VideoFrameType::kVideoFrameDelta);
510   ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
511   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
512 
513   ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
514   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
515   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
516 }
517 
TestSendAllStreams()518 void SimulcastTestFixtureImpl::TestSendAllStreams() {
519   // We have just enough to send all streams.
520   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
521   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
522                                           VideoFrameType::kVideoFrameDelta);
523   ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
524   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
525 
526   ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
527   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
528   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
529 }
530 
TestDisablingStreams()531 void SimulcastTestFixtureImpl::TestDisablingStreams() {
532   // We should get three media streams.
533   SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
534   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
535                                           VideoFrameType::kVideoFrameDelta);
536   ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
537   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
538 
539   ExpectStreams(VideoFrameType::kVideoFrameDelta, 3);
540   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
541   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
542 
543   // We should only get two streams and padding for one.
544   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
545   ExpectStreams(VideoFrameType::kVideoFrameDelta, 2);
546   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
547   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
548 
549   // We should only get the first stream and padding for two.
550   SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
551   ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
552   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
553   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
554 
555   // We don't have enough bitrate for the thumbnail stream, but we should get
556   // it anyway with current configuration.
557   SetRates(kTargetBitrates[0] - 1, 30);
558   ExpectStreams(VideoFrameType::kVideoFrameDelta, 1);
559   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
560   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
561 
562   // We should only get two streams and padding for one.
563   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
564   // We get a key frame because a new stream is being enabled.
565   ExpectStreams(VideoFrameType::kVideoFrameKey, 2);
566   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
567   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
568 
569   // We should get all three streams.
570   SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
571   // We get a key frame because a new stream is being enabled.
572   ExpectStreams(VideoFrameType::kVideoFrameKey, 3);
573   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
574   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
575 }
576 
TestActiveStreams()577 void SimulcastTestFixtureImpl::TestActiveStreams() {
578   // All streams on.
579   RunActiveStreamsTest({true, true, true});
580   // All streams off.
581   RunActiveStreamsTest({false, false, false});
582   // Low stream off.
583   RunActiveStreamsTest({false, true, true});
584   // Middle stream off.
585   RunActiveStreamsTest({true, false, true});
586   // High stream off.
587   RunActiveStreamsTest({true, true, false});
588   // Only low stream turned on.
589   RunActiveStreamsTest({true, false, false});
590   // Only middle stream turned on.
591   RunActiveStreamsTest({false, true, false});
592   // Only high stream turned on.
593   RunActiveStreamsTest({false, false, true});
594 }
595 
SwitchingToOneStream(int width,int height)596 void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
597   const int* temporal_layer_profile = nullptr;
598   // Disable all streams except the last and set the bitrate of the last to
599   // 100 kbps. This verifies the way GTP switches to screenshare mode.
600   if (codec_type_ == kVideoCodecVP8) {
601     settings_.VP8()->numberOfTemporalLayers = 1;
602     temporal_layer_profile = kDefaultTemporalLayerProfile;
603   } else {
604     temporal_layer_profile = kNoTemporalLayerProfile;
605   }
606   settings_.maxBitrate = 100;
607   settings_.startBitrate = 100;
608   settings_.width = width;
609   settings_.height = height;
610   for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
611     settings_.simulcastStream[i].maxBitrate = 0;
612     settings_.simulcastStream[i].width = settings_.width;
613     settings_.simulcastStream[i].height = settings_.height;
614     settings_.simulcastStream[i].numberOfTemporalLayers = 1;
615   }
616   // Setting input image to new resolution.
617   input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
618   input_buffer_->InitializeData();
619 
620   input_frame_ = std::make_unique<webrtc::VideoFrame>(
621       webrtc::VideoFrame::Builder()
622           .set_video_frame_buffer(input_buffer_)
623           .set_rotation(webrtc::kVideoRotation_0)
624           .set_timestamp_us(0)
625           .build());
626 
627   // The for loop above did not set the bitrate of the highest layer.
628   settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].maxBitrate =
629       0;
630   // The highest layer has to correspond to the non-simulcast resolution.
631   settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
632       settings_.width;
633   settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
634       settings_.height;
635   SetUpRateAllocator();
636   EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
637 
638   // Encode one frame and verify.
639   SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
640   std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
641                                           VideoFrameType::kVideoFrameDelta);
642   EXPECT_CALL(
643       encoder_callback_,
644       OnEncodedImage(AllOf(Field(&EncodedImage::_frameType,
645                                  VideoFrameType::kVideoFrameKey),
646                            Field(&EncodedImage::_encodedWidth, width),
647                            Field(&EncodedImage::_encodedHeight, height)),
648                      _, _))
649       .Times(1)
650       .WillRepeatedly(Return(
651           EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
652   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
653 
654   // Switch back.
655   DefaultSettings(&settings_, temporal_layer_profile, codec_type_);
656   // Start at the lowest bitrate for enabling base stream.
657   settings_.startBitrate = kMinBitrates[0];
658   SetUpRateAllocator();
659   EXPECT_EQ(0, encoder_->InitEncode(&settings_, kSettings));
660   SetRates(settings_.startBitrate, 30);
661   ExpectStreams(VideoFrameType::kVideoFrameKey, 1);
662   // Resize |input_frame_| to the new resolution.
663   input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
664   input_buffer_->InitializeData();
665   input_frame_ = std::make_unique<webrtc::VideoFrame>(
666       webrtc::VideoFrame::Builder()
667           .set_video_frame_buffer(input_buffer_)
668           .set_rotation(webrtc::kVideoRotation_0)
669           .set_timestamp_us(0)
670           .build());
671   EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
672 }
673 
TestSwitchingToOneStream()674 void SimulcastTestFixtureImpl::TestSwitchingToOneStream() {
675   SwitchingToOneStream(1024, 768);
676 }
677 
TestSwitchingToOneOddStream()678 void SimulcastTestFixtureImpl::TestSwitchingToOneOddStream() {
679   SwitchingToOneStream(1023, 769);
680 }
681 
TestSwitchingToOneSmallStream()682 void SimulcastTestFixtureImpl::TestSwitchingToOneSmallStream() {
683   SwitchingToOneStream(4, 4);
684 }
685 
686 // Test the layer pattern and sync flag for various spatial-temporal patterns.
687 // 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
688 // temporal_layer id and layer_sync is expected for all streams.
TestSpatioTemporalLayers333PatternEncoder()689 void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
690   bool is_h264 = codec_type_ == kVideoCodecH264;
691   TestEncodedImageCallback encoder_callback;
692   encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
693   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
694 
695   int expected_temporal_idx[3] = {-1, -1, -1};
696   bool expected_layer_sync[3] = {false, false, false};
697 
698   // First frame: #0.
699   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
700   SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
701   SetExpectedValues3<bool>(!is_h264, !is_h264, !is_h264, expected_layer_sync);
702   VerifyTemporalIdxAndSyncForAllSpatialLayers(
703       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
704 
705   // Next frame: #1.
706   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
707   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
708   SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
709   SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
710   VerifyTemporalIdxAndSyncForAllSpatialLayers(
711       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
712 
713   // Next frame: #2.
714   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
715   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
716   SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
717   SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
718   VerifyTemporalIdxAndSyncForAllSpatialLayers(
719       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
720 
721   // Next frame: #3.
722   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
723   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
724   SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
725   SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
726   VerifyTemporalIdxAndSyncForAllSpatialLayers(
727       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
728 
729   // Next frame: #4.
730   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
731   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
732   SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
733   SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
734   VerifyTemporalIdxAndSyncForAllSpatialLayers(
735       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
736 
737   // Next frame: #5.
738   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
739   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
740   SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
741   SetExpectedValues3<bool>(is_h264, is_h264, is_h264, expected_layer_sync);
742   VerifyTemporalIdxAndSyncForAllSpatialLayers(
743       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
744 }
745 
746 // Test the layer pattern and sync flag for various spatial-temporal patterns.
747 // 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
748 // 1 temporal layer for highest resolution.
749 // For this profile, we expect the temporal index pattern to be:
750 // 1st stream: 0, 2, 1, 2, ....
751 // 2nd stream: 0, 1, 0, 1, ...
752 // 3rd stream: -1, -1, -1, -1, ....
753 // Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
754 // should always have temporal layer idx set to kNoTemporalIdx = -1.
755 // Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
756 // TODO(marpan): Although this seems safe for now, we should fix this.
TestSpatioTemporalLayers321PatternEncoder()757 void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
758   EXPECT_EQ(codec_type_, kVideoCodecVP8);
759   int temporal_layer_profile[3] = {3, 2, 1};
760   SetUpCodec(temporal_layer_profile);
761   TestEncodedImageCallback encoder_callback;
762   encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
763   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
764 
765   int expected_temporal_idx[3] = {-1, -1, -1};
766   bool expected_layer_sync[3] = {false, false, false};
767 
768   // First frame: #0.
769   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
770   SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
771   SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
772   VerifyTemporalIdxAndSyncForAllSpatialLayers(
773       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
774 
775   // Next frame: #1.
776   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
777   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
778   SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
779   SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
780   VerifyTemporalIdxAndSyncForAllSpatialLayers(
781       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
782 
783   // Next frame: #2.
784   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
785   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
786   SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
787   SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
788   VerifyTemporalIdxAndSyncForAllSpatialLayers(
789       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
790 
791   // Next frame: #3.
792   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
793   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
794   SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
795   SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
796   VerifyTemporalIdxAndSyncForAllSpatialLayers(
797       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
798 
799   // Next frame: #4.
800   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
801   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
802   SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
803   SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
804   VerifyTemporalIdxAndSyncForAllSpatialLayers(
805       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
806 
807   // Next frame: #5.
808   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
809   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
810   SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
811   SetExpectedValues3<bool>(false, true, false, expected_layer_sync);
812   VerifyTemporalIdxAndSyncForAllSpatialLayers(
813       &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
814 }
815 
TestStrideEncodeDecode()816 void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
817   TestEncodedImageCallback encoder_callback;
818   TestDecodedImageCallback decoder_callback;
819   encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
820   decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
821 
822   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
823   // Setting two (possibly) problematic use cases for stride:
824   // 1. stride > width 2. stride_y != stride_uv/2
825   int stride_y = kDefaultWidth + 20;
826   int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
827   input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
828                                      stride_uv, stride_uv);
829   input_frame_ = std::make_unique<webrtc::VideoFrame>(
830       webrtc::VideoFrame::Builder()
831           .set_video_frame_buffer(input_buffer_)
832           .set_rotation(webrtc::kVideoRotation_0)
833           .set_timestamp_us(0)
834           .build());
835 
836   // Set color.
837   int plane_offset[kNumOfPlanes];
838   plane_offset[kYPlane] = kColorY;
839   plane_offset[kUPlane] = kColorU;
840   plane_offset[kVPlane] = kColorV;
841   CreateImage(input_buffer_, plane_offset);
842 
843   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
844 
845   // Change color.
846   plane_offset[kYPlane] += 1;
847   plane_offset[kUPlane] += 1;
848   plane_offset[kVPlane] += 1;
849   CreateImage(input_buffer_, plane_offset);
850   input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
851   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
852 
853   EncodedImage encoded_frame;
854   // Only encoding one frame - so will be a key frame.
855   encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
856   EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, 0));
857   encoder_callback.GetLastEncodedFrame(&encoded_frame);
858   decoder_->Decode(encoded_frame, false, 0);
859   EXPECT_EQ(2, decoder_callback.DecodedFrames());
860 }
861 
TestDecodeWidthHeightSet()862 void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
863   MockEncodedImageCallback encoder_callback;
864   MockDecodedImageCallback decoder_callback;
865 
866   EncodedImage encoded_frame[3];
867   SetRates(kMaxBitrates[2], 30);  // To get all three streams.
868   encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
869   decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
870 
871   EXPECT_CALL(encoder_callback, OnEncodedImage(_, _, _))
872       .Times(3)
873       .WillRepeatedly(
874           ::testing::Invoke([&](const EncodedImage& encoded_image,
875                                 const CodecSpecificInfo* codec_specific_info,
876                                 const RTPFragmentationHeader* fragmentation) {
877             EXPECT_EQ(encoded_image._frameType, VideoFrameType::kVideoFrameKey);
878 
879             size_t index = encoded_image.SpatialIndex().value_or(0);
880             // TODO(nisse): Why not size()
881             encoded_frame[index].SetEncodedData(
882                 EncodedImageBuffer::Create(encoded_image.capacity()));
883             encoded_frame[index].set_size(encoded_image.size());
884             encoded_frame[index]._frameType = encoded_image._frameType;
885             encoded_frame[index]._completeFrame = encoded_image._completeFrame;
886             memcpy(encoded_frame[index].data(), encoded_image.data(),
887                    encoded_image.size());
888             return EncodedImageCallback::Result(
889                 EncodedImageCallback::Result::OK, 0);
890           }));
891   EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL));
892 
893   EXPECT_CALL(decoder_callback, Decoded(_, _, _))
894       .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
895                                      absl::optional<int32_t> decode_time_ms,
896                                      absl::optional<uint8_t> qp) {
897         EXPECT_EQ(decodedImage.width(), kDefaultWidth / 4);
898         EXPECT_EQ(decodedImage.height(), kDefaultHeight / 4);
899       }));
900   EXPECT_EQ(0, decoder_->Decode(encoded_frame[0], false, 0));
901 
902   EXPECT_CALL(decoder_callback, Decoded(_, _, _))
903       .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
904                                      absl::optional<int32_t> decode_time_ms,
905                                      absl::optional<uint8_t> qp) {
906         EXPECT_EQ(decodedImage.width(), kDefaultWidth / 2);
907         EXPECT_EQ(decodedImage.height(), kDefaultHeight / 2);
908       }));
909   EXPECT_EQ(0, decoder_->Decode(encoded_frame[1], false, 0));
910 
911   EXPECT_CALL(decoder_callback, Decoded(_, _, _))
912       .WillOnce(::testing::Invoke([](VideoFrame& decodedImage,
913                                      absl::optional<int32_t> decode_time_ms,
914                                      absl::optional<uint8_t> qp) {
915         EXPECT_EQ(decodedImage.width(), kDefaultWidth);
916         EXPECT_EQ(decodedImage.height(), kDefaultHeight);
917       }));
918   EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, 0));
919 }
920 
921 }  // namespace test
922 }  // namespace webrtc
923