1 /*
2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10 #include <algorithm> // max
11 #include <vector>
12
13 #include "testing/gtest/include/gtest/gtest.h"
14
15 #include "webrtc/base/bind.h"
16 #include "webrtc/base/checks.h"
17 #include "webrtc/base/criticalsection.h"
18 #include "webrtc/base/event.h"
19 #include "webrtc/base/logging.h"
20 #include "webrtc/base/platform_thread.h"
21 #include "webrtc/base/scoped_ptr.h"
22 #include "webrtc/call.h"
23 #include "webrtc/call/transport_adapter.h"
24 #include "webrtc/frame_callback.h"
25 #include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
26 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
27 #include "webrtc/modules/rtp_rtcp/source/rtcp_sender.h"
28 #include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
29 #include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h"
30 #include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
31 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
32 #include "webrtc/system_wrappers/include/ref_count.h"
33 #include "webrtc/system_wrappers/include/sleep.h"
34 #include "webrtc/test/call_test.h"
35 #include "webrtc/test/configurable_frame_size_encoder.h"
36 #include "webrtc/test/fake_texture_frame.h"
37 #include "webrtc/test/null_transport.h"
38 #include "webrtc/test/testsupport/perf_test.h"
39 #include "webrtc/video/send_statistics_proxy.h"
40 #include "webrtc/video_frame.h"
41 #include "webrtc/video_send_stream.h"
42
43 namespace webrtc {
44
45 enum VideoFormat { kGeneric, kVP8, };
46
47 void ExpectEqualFrames(const VideoFrame& frame1, const VideoFrame& frame2);
48 void ExpectEqualTextureFrames(const VideoFrame& frame1,
49 const VideoFrame& frame2);
50 void ExpectEqualBufferFrames(const VideoFrame& frame1,
51 const VideoFrame& frame2);
52 void ExpectEqualFramesVector(const std::vector<VideoFrame>& frames1,
53 const std::vector<VideoFrame>& frames2);
54 VideoFrame CreateVideoFrame(int width, int height, uint8_t data);
55
56 class VideoSendStreamTest : public test::CallTest {
57 protected:
58 void TestNackRetransmission(uint32_t retransmit_ssrc,
59 uint8_t retransmit_payload_type);
60 void TestPacketFragmentationSize(VideoFormat format, bool with_fec);
61
62 void TestVp9NonFlexMode(uint8_t num_temporal_layers,
63 uint8_t num_spatial_layers);
64 };
65
TEST_F(VideoSendStreamTest,CanStartStartedStream)66 TEST_F(VideoSendStreamTest, CanStartStartedStream) {
67 Call::Config call_config;
68 CreateSenderCall(call_config);
69
70 test::NullTransport transport;
71 CreateSendConfig(1, 0, &transport);
72 CreateVideoStreams();
73 video_send_stream_->Start();
74 video_send_stream_->Start();
75 DestroyStreams();
76 }
77
TEST_F(VideoSendStreamTest,CanStopStoppedStream)78 TEST_F(VideoSendStreamTest, CanStopStoppedStream) {
79 Call::Config call_config;
80 CreateSenderCall(call_config);
81
82 test::NullTransport transport;
83 CreateSendConfig(1, 0, &transport);
84 CreateVideoStreams();
85 video_send_stream_->Stop();
86 video_send_stream_->Stop();
87 DestroyStreams();
88 }
89
TEST_F(VideoSendStreamTest,SupportsCName)90 TEST_F(VideoSendStreamTest, SupportsCName) {
91 static std::string kCName = "PjQatC14dGfbVwGPUOA9IH7RlsFDbWl4AhXEiDsBizo=";
92 class CNameObserver : public test::SendTest {
93 public:
94 CNameObserver() : SendTest(kDefaultTimeoutMs) {}
95
96 private:
97 Action OnSendRtcp(const uint8_t* packet, size_t length) override {
98 RTCPUtility::RTCPParserV2 parser(packet, length, true);
99 EXPECT_TRUE(parser.IsValid());
100
101 RTCPUtility::RTCPPacketTypes packet_type = parser.Begin();
102 while (packet_type != RTCPUtility::RTCPPacketTypes::kInvalid) {
103 if (packet_type == RTCPUtility::RTCPPacketTypes::kSdesChunk) {
104 EXPECT_EQ(parser.Packet().CName.CName, kCName);
105 observation_complete_.Set();
106 }
107
108 packet_type = parser.Iterate();
109 }
110
111 return SEND_PACKET;
112 }
113
114 void ModifyVideoConfigs(
115 VideoSendStream::Config* send_config,
116 std::vector<VideoReceiveStream::Config>* receive_configs,
117 VideoEncoderConfig* encoder_config) override {
118 send_config->rtp.c_name = kCName;
119 }
120
121 void PerformTest() override {
122 EXPECT_TRUE(Wait()) << "Timed out while waiting for RTCP with CNAME.";
123 }
124 } test;
125
126 RunBaseTest(&test);
127 }
128
TEST_F(VideoSendStreamTest,SupportsAbsoluteSendTime)129 TEST_F(VideoSendStreamTest, SupportsAbsoluteSendTime) {
130 class AbsoluteSendTimeObserver : public test::SendTest {
131 public:
132 AbsoluteSendTimeObserver() : SendTest(kDefaultTimeoutMs) {
133 EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
134 kRtpExtensionAbsoluteSendTime, test::kAbsSendTimeExtensionId));
135 }
136
137 Action OnSendRtp(const uint8_t* packet, size_t length) override {
138 RTPHeader header;
139 EXPECT_TRUE(parser_->Parse(packet, length, &header));
140
141 EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
142 EXPECT_TRUE(header.extension.hasAbsoluteSendTime);
143 EXPECT_EQ(header.extension.transmissionTimeOffset, 0);
144 EXPECT_GT(header.extension.absoluteSendTime, 0u);
145 observation_complete_.Set();
146
147 return SEND_PACKET;
148 }
149
150 void ModifyVideoConfigs(
151 VideoSendStream::Config* send_config,
152 std::vector<VideoReceiveStream::Config>* receive_configs,
153 VideoEncoderConfig* encoder_config) override {
154 send_config->rtp.extensions.clear();
155 send_config->rtp.extensions.push_back(RtpExtension(
156 RtpExtension::kAbsSendTime, test::kAbsSendTimeExtensionId));
157 }
158
159 void PerformTest() override {
160 EXPECT_TRUE(Wait()) << "Timed out while waiting for single RTP packet.";
161 }
162 } test;
163
164 RunBaseTest(&test);
165 }
166
TEST_F(VideoSendStreamTest,SupportsTransmissionTimeOffset)167 TEST_F(VideoSendStreamTest, SupportsTransmissionTimeOffset) {
168 static const int kEncodeDelayMs = 5;
169 class TransmissionTimeOffsetObserver : public test::SendTest {
170 public:
171 TransmissionTimeOffsetObserver()
172 : SendTest(kDefaultTimeoutMs),
173 encoder_(Clock::GetRealTimeClock(), kEncodeDelayMs) {
174 EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
175 kRtpExtensionTransmissionTimeOffset, test::kTOffsetExtensionId));
176 }
177
178 private:
179 Action OnSendRtp(const uint8_t* packet, size_t length) override {
180 RTPHeader header;
181 EXPECT_TRUE(parser_->Parse(packet, length, &header));
182
183 EXPECT_TRUE(header.extension.hasTransmissionTimeOffset);
184 EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
185 EXPECT_GT(header.extension.transmissionTimeOffset, 0);
186 EXPECT_EQ(header.extension.absoluteSendTime, 0u);
187 observation_complete_.Set();
188
189 return SEND_PACKET;
190 }
191
192 void ModifyVideoConfigs(
193 VideoSendStream::Config* send_config,
194 std::vector<VideoReceiveStream::Config>* receive_configs,
195 VideoEncoderConfig* encoder_config) override {
196 send_config->encoder_settings.encoder = &encoder_;
197 send_config->rtp.extensions.clear();
198 send_config->rtp.extensions.push_back(
199 RtpExtension(RtpExtension::kTOffset, test::kTOffsetExtensionId));
200 }
201
202 void PerformTest() override {
203 EXPECT_TRUE(Wait()) << "Timed out while waiting for a single RTP packet.";
204 }
205
206 test::DelayedEncoder encoder_;
207 } test;
208
209 RunBaseTest(&test);
210 }
211
TEST_F(VideoSendStreamTest,SupportsTransportWideSequenceNumbers)212 TEST_F(VideoSendStreamTest, SupportsTransportWideSequenceNumbers) {
213 static const uint8_t kExtensionId = 13;
214 class TransportWideSequenceNumberObserver : public test::SendTest {
215 public:
216 TransportWideSequenceNumberObserver()
217 : SendTest(kDefaultTimeoutMs), encoder_(Clock::GetRealTimeClock()) {
218 EXPECT_TRUE(parser_->RegisterRtpHeaderExtension(
219 kRtpExtensionTransportSequenceNumber, kExtensionId));
220 }
221
222 private:
223 Action OnSendRtp(const uint8_t* packet, size_t length) override {
224 RTPHeader header;
225 EXPECT_TRUE(parser_->Parse(packet, length, &header));
226
227 EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
228 EXPECT_FALSE(header.extension.hasTransmissionTimeOffset);
229 EXPECT_FALSE(header.extension.hasAbsoluteSendTime);
230
231 observation_complete_.Set();
232
233 return SEND_PACKET;
234 }
235
236 void ModifyVideoConfigs(
237 VideoSendStream::Config* send_config,
238 std::vector<VideoReceiveStream::Config>* receive_configs,
239 VideoEncoderConfig* encoder_config) override {
240 send_config->encoder_settings.encoder = &encoder_;
241 send_config->rtp.extensions.clear();
242 send_config->rtp.extensions.push_back(
243 RtpExtension(RtpExtension::kTransportSequenceNumber, kExtensionId));
244 }
245
246 void PerformTest() override {
247 EXPECT_TRUE(Wait()) << "Timed out while waiting for a single RTP packet.";
248 }
249
250 test::FakeEncoder encoder_;
251 } test;
252
253 RunBaseTest(&test);
254 }
255
256 class FakeReceiveStatistics : public NullReceiveStatistics {
257 public:
FakeReceiveStatistics(uint32_t send_ssrc,uint32_t last_sequence_number,uint32_t cumulative_lost,uint8_t fraction_lost)258 FakeReceiveStatistics(uint32_t send_ssrc,
259 uint32_t last_sequence_number,
260 uint32_t cumulative_lost,
261 uint8_t fraction_lost)
262 : lossy_stats_(new LossyStatistician(last_sequence_number,
263 cumulative_lost,
264 fraction_lost)) {
265 stats_map_[send_ssrc] = lossy_stats_.get();
266 }
267
GetActiveStatisticians() const268 StatisticianMap GetActiveStatisticians() const override { return stats_map_; }
269
GetStatistician(uint32_t ssrc) const270 StreamStatistician* GetStatistician(uint32_t ssrc) const override {
271 return lossy_stats_.get();
272 }
273
274 private:
275 class LossyStatistician : public StreamStatistician {
276 public:
LossyStatistician(uint32_t extended_max_sequence_number,uint32_t cumulative_lost,uint8_t fraction_lost)277 LossyStatistician(uint32_t extended_max_sequence_number,
278 uint32_t cumulative_lost,
279 uint8_t fraction_lost) {
280 stats_.fraction_lost = fraction_lost;
281 stats_.cumulative_lost = cumulative_lost;
282 stats_.extended_max_sequence_number = extended_max_sequence_number;
283 }
GetStatistics(RtcpStatistics * statistics,bool reset)284 bool GetStatistics(RtcpStatistics* statistics, bool reset) override {
285 *statistics = stats_;
286 return true;
287 }
GetDataCounters(size_t * bytes_received,uint32_t * packets_received) const288 void GetDataCounters(size_t* bytes_received,
289 uint32_t* packets_received) const override {
290 *bytes_received = 0;
291 *packets_received = 0;
292 }
GetReceiveStreamDataCounters(StreamDataCounters * data_counters) const293 void GetReceiveStreamDataCounters(
294 StreamDataCounters* data_counters) const override {}
BitrateReceived() const295 uint32_t BitrateReceived() const override { return 0; }
IsRetransmitOfOldPacket(const RTPHeader & header,int64_t min_rtt) const296 bool IsRetransmitOfOldPacket(const RTPHeader& header,
297 int64_t min_rtt) const override {
298 return false;
299 }
300
IsPacketInOrder(uint16_t sequence_number) const301 bool IsPacketInOrder(uint16_t sequence_number) const override {
302 return true;
303 }
304
305 RtcpStatistics stats_;
306 };
307
308 rtc::scoped_ptr<LossyStatistician> lossy_stats_;
309 StatisticianMap stats_map_;
310 };
311
312 class FecObserver : public test::SendTest {
313 public:
FecObserver(bool header_extensions_enabled)314 explicit FecObserver(bool header_extensions_enabled)
315 : SendTest(VideoSendStreamTest::kDefaultTimeoutMs),
316 send_count_(0),
317 received_media_(false),
318 received_fec_(false),
319 header_extensions_enabled_(header_extensions_enabled) {}
320
321 private:
OnSendRtp(const uint8_t * packet,size_t length)322 Action OnSendRtp(const uint8_t* packet, size_t length) override {
323 RTPHeader header;
324 EXPECT_TRUE(parser_->Parse(packet, length, &header));
325
326 // Send lossy receive reports to trigger FEC enabling.
327 if (send_count_++ % 2 != 0) {
328 // Receive statistics reporting having lost 50% of the packets.
329 FakeReceiveStatistics lossy_receive_stats(
330 VideoSendStreamTest::kVideoSendSsrcs[0], header.sequenceNumber,
331 send_count_ / 2, 127);
332 RTCPSender rtcp_sender(false, Clock::GetRealTimeClock(),
333 &lossy_receive_stats, nullptr,
334 transport_adapter_.get());
335
336 rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize);
337 rtcp_sender.SetRemoteSSRC(VideoSendStreamTest::kVideoSendSsrcs[0]);
338
339 RTCPSender::FeedbackState feedback_state;
340
341 EXPECT_EQ(0, rtcp_sender.SendRTCP(feedback_state, kRtcpRr));
342 }
343
344 int encapsulated_payload_type = -1;
345 if (header.payloadType == VideoSendStreamTest::kRedPayloadType) {
346 encapsulated_payload_type = static_cast<int>(packet[header.headerLength]);
347 if (encapsulated_payload_type !=
348 VideoSendStreamTest::kFakeVideoSendPayloadType)
349 EXPECT_EQ(VideoSendStreamTest::kUlpfecPayloadType,
350 encapsulated_payload_type);
351 } else {
352 EXPECT_EQ(VideoSendStreamTest::kFakeVideoSendPayloadType,
353 header.payloadType);
354 }
355
356 if (header_extensions_enabled_) {
357 EXPECT_TRUE(header.extension.hasAbsoluteSendTime);
358 uint32_t kHalf24BitsSpace = 0xFFFFFF / 2;
359 if (header.extension.absoluteSendTime <= kHalf24BitsSpace &&
360 prev_header_.extension.absoluteSendTime > kHalf24BitsSpace) {
361 // 24 bits wrap.
362 EXPECT_GT(prev_header_.extension.absoluteSendTime,
363 header.extension.absoluteSendTime);
364 } else {
365 EXPECT_GE(header.extension.absoluteSendTime,
366 prev_header_.extension.absoluteSendTime);
367 }
368 EXPECT_TRUE(header.extension.hasTransportSequenceNumber);
369 uint16_t seq_num_diff = header.extension.transportSequenceNumber -
370 prev_header_.extension.transportSequenceNumber;
371 EXPECT_EQ(1, seq_num_diff);
372 }
373
374 if (encapsulated_payload_type != -1) {
375 if (encapsulated_payload_type ==
376 VideoSendStreamTest::kUlpfecPayloadType) {
377 received_fec_ = true;
378 } else {
379 received_media_ = true;
380 }
381 }
382
383 if (received_media_ && received_fec_ && send_count_ > 100)
384 observation_complete_.Set();
385
386 prev_header_ = header;
387
388 return SEND_PACKET;
389 }
390
ModifyVideoConfigs(VideoSendStream::Config * send_config,std::vector<VideoReceiveStream::Config> * receive_configs,VideoEncoderConfig * encoder_config)391 void ModifyVideoConfigs(
392 VideoSendStream::Config* send_config,
393 std::vector<VideoReceiveStream::Config>* receive_configs,
394 VideoEncoderConfig* encoder_config) override {
395 transport_adapter_.reset(
396 new internal::TransportAdapter(send_config->send_transport));
397 transport_adapter_->Enable();
398 send_config->rtp.fec.red_payload_type =
399 VideoSendStreamTest::kRedPayloadType;
400 send_config->rtp.fec.ulpfec_payload_type =
401 VideoSendStreamTest::kUlpfecPayloadType;
402 if (header_extensions_enabled_) {
403 send_config->rtp.extensions.push_back(RtpExtension(
404 RtpExtension::kAbsSendTime, test::kAbsSendTimeExtensionId));
405 send_config->rtp.extensions.push_back(
406 RtpExtension(RtpExtension::kTransportSequenceNumber,
407 test::kTransportSequenceNumberExtensionId));
408 }
409 }
410
PerformTest()411 void PerformTest() override {
412 EXPECT_TRUE(Wait()) << "Timed out waiting for FEC and media packets.";
413 }
414
415 rtc::scoped_ptr<internal::TransportAdapter> transport_adapter_;
416 int send_count_;
417 bool received_media_;
418 bool received_fec_;
419 bool header_extensions_enabled_;
420 RTPHeader prev_header_;
421 };
422
TEST_F(VideoSendStreamTest,SupportsFecWithExtensions)423 TEST_F(VideoSendStreamTest, SupportsFecWithExtensions) {
424 FecObserver test(true);
425
426 RunBaseTest(&test);
427 }
428
TEST_F(VideoSendStreamTest,SupportsFecWithoutExtensions)429 TEST_F(VideoSendStreamTest, SupportsFecWithoutExtensions) {
430 FecObserver test(false);
431
432 RunBaseTest(&test);
433 }
434
TestNackRetransmission(uint32_t retransmit_ssrc,uint8_t retransmit_payload_type)435 void VideoSendStreamTest::TestNackRetransmission(
436 uint32_t retransmit_ssrc,
437 uint8_t retransmit_payload_type) {
438 class NackObserver : public test::SendTest {
439 public:
440 explicit NackObserver(uint32_t retransmit_ssrc,
441 uint8_t retransmit_payload_type)
442 : SendTest(kDefaultTimeoutMs),
443 send_count_(0),
444 retransmit_ssrc_(retransmit_ssrc),
445 retransmit_payload_type_(retransmit_payload_type),
446 nacked_sequence_number_(-1) {
447 }
448
449 private:
450 Action OnSendRtp(const uint8_t* packet, size_t length) override {
451 RTPHeader header;
452 EXPECT_TRUE(parser_->Parse(packet, length, &header));
453
454 // Nack second packet after receiving the third one.
455 if (++send_count_ == 3) {
456 uint16_t nack_sequence_number = header.sequenceNumber - 1;
457 nacked_sequence_number_ = nack_sequence_number;
458 NullReceiveStatistics null_stats;
459 RTCPSender rtcp_sender(false, Clock::GetRealTimeClock(), &null_stats,
460 nullptr, transport_adapter_.get());
461
462 rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize);
463 rtcp_sender.SetRemoteSSRC(kVideoSendSsrcs[0]);
464
465 RTCPSender::FeedbackState feedback_state;
466
467 EXPECT_EQ(0,
468 rtcp_sender.SendRTCP(
469 feedback_state, kRtcpNack, 1, &nack_sequence_number));
470 }
471
472 uint16_t sequence_number = header.sequenceNumber;
473
474 if (header.ssrc == retransmit_ssrc_ &&
475 retransmit_ssrc_ != kVideoSendSsrcs[0]) {
476 // Not kVideoSendSsrcs[0], assume correct RTX packet. Extract sequence
477 // number.
478 const uint8_t* rtx_header = packet + header.headerLength;
479 sequence_number = (rtx_header[0] << 8) + rtx_header[1];
480 }
481
482 if (sequence_number == nacked_sequence_number_) {
483 EXPECT_EQ(retransmit_ssrc_, header.ssrc);
484 EXPECT_EQ(retransmit_payload_type_, header.payloadType);
485 observation_complete_.Set();
486 }
487
488 return SEND_PACKET;
489 }
490
491 void ModifyVideoConfigs(
492 VideoSendStream::Config* send_config,
493 std::vector<VideoReceiveStream::Config>* receive_configs,
494 VideoEncoderConfig* encoder_config) override {
495 transport_adapter_.reset(
496 new internal::TransportAdapter(send_config->send_transport));
497 transport_adapter_->Enable();
498 send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
499 send_config->rtp.rtx.payload_type = retransmit_payload_type_;
500 if (retransmit_ssrc_ != kVideoSendSsrcs[0])
501 send_config->rtp.rtx.ssrcs.push_back(retransmit_ssrc_);
502 }
503
504 void PerformTest() override {
505 EXPECT_TRUE(Wait()) << "Timed out while waiting for NACK retransmission.";
506 }
507
508 rtc::scoped_ptr<internal::TransportAdapter> transport_adapter_;
509 int send_count_;
510 uint32_t retransmit_ssrc_;
511 uint8_t retransmit_payload_type_;
512 int nacked_sequence_number_;
513 } test(retransmit_ssrc, retransmit_payload_type);
514
515 RunBaseTest(&test);
516 }
517
TEST_F(VideoSendStreamTest,RetransmitsNack)518 TEST_F(VideoSendStreamTest, RetransmitsNack) {
519 // Normal NACKs should use the send SSRC.
520 TestNackRetransmission(kVideoSendSsrcs[0], kFakeVideoSendPayloadType);
521 }
522
TEST_F(VideoSendStreamTest,RetransmitsNackOverRtx)523 TEST_F(VideoSendStreamTest, RetransmitsNackOverRtx) {
524 // NACKs over RTX should use a separate SSRC.
525 TestNackRetransmission(kSendRtxSsrcs[0], kSendRtxPayloadType);
526 }
527
TestPacketFragmentationSize(VideoFormat format,bool with_fec)528 void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format,
529 bool with_fec) {
530 // Use a fake encoder to output a frame of every size in the range [90, 290],
531 // for each size making sure that the exact number of payload bytes received
532 // is correct and that packets are fragmented to respect max packet size.
533 static const size_t kMaxPacketSize = 128;
534 static const size_t start = 90;
535 static const size_t stop = 290;
536
537 // Observer that verifies that the expected number of packets and bytes
538 // arrive for each frame size, from start_size to stop_size.
539 class FrameFragmentationTest : public test::SendTest,
540 public EncodedFrameObserver {
541 public:
542 FrameFragmentationTest(size_t max_packet_size,
543 size_t start_size,
544 size_t stop_size,
545 bool test_generic_packetization,
546 bool use_fec)
547 : SendTest(kLongTimeoutMs),
548 encoder_(stop),
549 max_packet_size_(max_packet_size),
550 stop_size_(stop_size),
551 test_generic_packetization_(test_generic_packetization),
552 use_fec_(use_fec),
553 packet_count_(0),
554 accumulated_size_(0),
555 accumulated_payload_(0),
556 fec_packet_received_(false),
557 current_size_rtp_(start_size),
558 current_size_frame_(static_cast<int32_t>(start_size)) {
559 // Fragmentation required, this test doesn't make sense without it.
560 encoder_.SetFrameSize(start_size);
561 RTC_DCHECK_GT(stop_size, max_packet_size);
562 }
563
564 private:
565 Action OnSendRtp(const uint8_t* packet, size_t size) override {
566 size_t length = size;
567 RTPHeader header;
568 EXPECT_TRUE(parser_->Parse(packet, length, &header));
569
570 EXPECT_LE(length, max_packet_size_);
571
572 if (use_fec_) {
573 uint8_t payload_type = packet[header.headerLength];
574 bool is_fec = header.payloadType == kRedPayloadType &&
575 payload_type == kUlpfecPayloadType;
576 if (is_fec) {
577 fec_packet_received_ = true;
578 return SEND_PACKET;
579 }
580 }
581
582 accumulated_size_ += length;
583
584 if (use_fec_)
585 TriggerLossReport(header);
586
587 if (test_generic_packetization_) {
588 size_t overhead = header.headerLength + header.paddingLength;
589 // Only remove payload header and RED header if the packet actually
590 // contains payload.
591 if (length > overhead) {
592 overhead += (1 /* Generic header */);
593 if (use_fec_)
594 overhead += 1; // RED for FEC header.
595 }
596 EXPECT_GE(length, overhead);
597 accumulated_payload_ += length - overhead;
598 }
599
600 // Marker bit set indicates last packet of a frame.
601 if (header.markerBit) {
602 if (use_fec_ && accumulated_payload_ == current_size_rtp_ - 1) {
603 // With FEC enabled, frame size is incremented asynchronously, so
604 // "old" frames one byte too small may arrive. Accept, but don't
605 // increase expected frame size.
606 accumulated_size_ = 0;
607 accumulated_payload_ = 0;
608 return SEND_PACKET;
609 }
610
611 EXPECT_GE(accumulated_size_, current_size_rtp_);
612 if (test_generic_packetization_) {
613 EXPECT_EQ(current_size_rtp_, accumulated_payload_);
614 }
615
616 // Last packet of frame; reset counters.
617 accumulated_size_ = 0;
618 accumulated_payload_ = 0;
619 if (current_size_rtp_ == stop_size_) {
620 // Done! (Don't increase size again, might arrive more @ stop_size).
621 observation_complete_.Set();
622 } else {
623 // Increase next expected frame size. If testing with FEC, make sure
624 // a FEC packet has been received for this frame size before
625 // proceeding, to make sure that redundancy packets don't exceed
626 // size limit.
627 if (!use_fec_) {
628 ++current_size_rtp_;
629 } else if (fec_packet_received_) {
630 fec_packet_received_ = false;
631 ++current_size_rtp_;
632 ++current_size_frame_;
633 }
634 }
635 }
636
637 return SEND_PACKET;
638 }
639
640 void TriggerLossReport(const RTPHeader& header) {
641 // Send lossy receive reports to trigger FEC enabling.
642 if (packet_count_++ % 2 != 0) {
643 // Receive statistics reporting having lost 50% of the packets.
644 FakeReceiveStatistics lossy_receive_stats(
645 kVideoSendSsrcs[0], header.sequenceNumber, packet_count_ / 2, 127);
646 RTCPSender rtcp_sender(false, Clock::GetRealTimeClock(),
647 &lossy_receive_stats, nullptr,
648 transport_adapter_.get());
649
650 rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize);
651 rtcp_sender.SetRemoteSSRC(kVideoSendSsrcs[0]);
652
653 RTCPSender::FeedbackState feedback_state;
654
655 EXPECT_EQ(0, rtcp_sender.SendRTCP(feedback_state, kRtcpRr));
656 }
657 }
658
659 virtual void EncodedFrameCallback(const EncodedFrame& encoded_frame) {
660 // Increase frame size for next encoded frame, in the context of the
661 // encoder thread.
662 if (!use_fec_ &&
663 current_size_frame_.Value() < static_cast<int32_t>(stop_size_)) {
664 ++current_size_frame_;
665 }
666 encoder_.SetFrameSize(static_cast<size_t>(current_size_frame_.Value()));
667 }
668
669 Call::Config GetSenderCallConfig() override {
670 Call::Config config;
671 const int kMinBitrateBps = 30000;
672 config.bitrate_config.min_bitrate_bps = kMinBitrateBps;
673 return config;
674 }
675
676 void ModifyVideoConfigs(
677 VideoSendStream::Config* send_config,
678 std::vector<VideoReceiveStream::Config>* receive_configs,
679 VideoEncoderConfig* encoder_config) override {
680 transport_adapter_.reset(
681 new internal::TransportAdapter(send_config->send_transport));
682 transport_adapter_->Enable();
683 if (use_fec_) {
684 send_config->rtp.fec.red_payload_type = kRedPayloadType;
685 send_config->rtp.fec.ulpfec_payload_type = kUlpfecPayloadType;
686 }
687
688 if (!test_generic_packetization_)
689 send_config->encoder_settings.payload_name = "VP8";
690
691 send_config->encoder_settings.encoder = &encoder_;
692 send_config->rtp.max_packet_size = kMaxPacketSize;
693 send_config->post_encode_callback = this;
694
695 // Make sure there is at least one extension header, to make the RTP
696 // header larger than the base length of 12 bytes.
697 EXPECT_FALSE(send_config->rtp.extensions.empty());
698 }
699
700 void PerformTest() override {
701 EXPECT_TRUE(Wait()) << "Timed out while observing incoming RTP packets.";
702 }
703
704 rtc::scoped_ptr<internal::TransportAdapter> transport_adapter_;
705 test::ConfigurableFrameSizeEncoder encoder_;
706
707 const size_t max_packet_size_;
708 const size_t stop_size_;
709 const bool test_generic_packetization_;
710 const bool use_fec_;
711
712 uint32_t packet_count_;
713 size_t accumulated_size_;
714 size_t accumulated_payload_;
715 bool fec_packet_received_;
716
717 size_t current_size_rtp_;
718 Atomic32 current_size_frame_;
719 };
720
721 // Don't auto increment if FEC is used; continue sending frame size until
722 // a FEC packet has been received.
723 FrameFragmentationTest test(
724 kMaxPacketSize, start, stop, format == kGeneric, with_fec);
725
726 RunBaseTest(&test);
727 }
728
729 // TODO(sprang): Is there any way of speeding up these tests?
TEST_F(VideoSendStreamTest,FragmentsGenericAccordingToMaxPacketSize)730 TEST_F(VideoSendStreamTest, FragmentsGenericAccordingToMaxPacketSize) {
731 TestPacketFragmentationSize(kGeneric, false);
732 }
733
TEST_F(VideoSendStreamTest,FragmentsGenericAccordingToMaxPacketSizeWithFec)734 TEST_F(VideoSendStreamTest, FragmentsGenericAccordingToMaxPacketSizeWithFec) {
735 TestPacketFragmentationSize(kGeneric, true);
736 }
737
TEST_F(VideoSendStreamTest,FragmentsVp8AccordingToMaxPacketSize)738 TEST_F(VideoSendStreamTest, FragmentsVp8AccordingToMaxPacketSize) {
739 TestPacketFragmentationSize(kVP8, false);
740 }
741
TEST_F(VideoSendStreamTest,FragmentsVp8AccordingToMaxPacketSizeWithFec)742 TEST_F(VideoSendStreamTest, FragmentsVp8AccordingToMaxPacketSizeWithFec) {
743 TestPacketFragmentationSize(kVP8, true);
744 }
745
746 // The test will go through a number of phases.
747 // 1. Start sending packets.
748 // 2. As soon as the RTP stream has been detected, signal a low REMB value to
749 // suspend the stream.
750 // 3. Wait until |kSuspendTimeFrames| have been captured without seeing any RTP
751 // packets.
752 // 4. Signal a high REMB and then wait for the RTP stream to start again.
753 // When the stream is detected again, and the stats show that the stream
754 // is no longer suspended, the test ends.
TEST_F(VideoSendStreamTest,SuspendBelowMinBitrate)755 TEST_F(VideoSendStreamTest, SuspendBelowMinBitrate) {
756 static const int kSuspendTimeFrames = 60; // Suspend for 2 seconds @ 30 fps.
757
758 class RembObserver : public test::SendTest, public I420FrameCallback {
759 public:
760 RembObserver()
761 : SendTest(kDefaultTimeoutMs),
762 clock_(Clock::GetRealTimeClock()),
763 test_state_(kBeforeSuspend),
764 rtp_count_(0),
765 last_sequence_number_(0),
766 suspended_frame_count_(0),
767 low_remb_bps_(0),
768 high_remb_bps_(0) {
769 }
770
771 private:
772 Action OnSendRtp(const uint8_t* packet, size_t length) override {
773 rtc::CritScope lock(&crit_);
774 ++rtp_count_;
775 RTPHeader header;
776 EXPECT_TRUE(parser_->Parse(packet, length, &header));
777 last_sequence_number_ = header.sequenceNumber;
778
779 if (test_state_ == kBeforeSuspend) {
780 // The stream has started. Try to suspend it.
781 SendRtcpFeedback(low_remb_bps_);
782 test_state_ = kDuringSuspend;
783 } else if (test_state_ == kDuringSuspend) {
784 if (header.paddingLength == 0) {
785 // Received non-padding packet during suspension period. Reset the
786 // counter.
787 suspended_frame_count_ = 0;
788 }
789 SendRtcpFeedback(0); // REMB is only sent if value is > 0.
790 } else if (test_state_ == kWaitingForPacket) {
791 if (header.paddingLength == 0) {
792 // Non-padding packet observed. Test is almost complete. Will just
793 // have to wait for the stats to change.
794 test_state_ = kWaitingForStats;
795 }
796 SendRtcpFeedback(0); // REMB is only sent if value is > 0.
797 } else if (test_state_ == kWaitingForStats) {
798 VideoSendStream::Stats stats = stream_->GetStats();
799 if (stats.suspended == false) {
800 // Stats flipped to false. Test is complete.
801 observation_complete_.Set();
802 }
803 SendRtcpFeedback(0); // REMB is only sent if value is > 0.
804 }
805
806 return SEND_PACKET;
807 }
808
809 // This method implements the I420FrameCallback.
810 void FrameCallback(VideoFrame* video_frame) override {
811 rtc::CritScope lock(&crit_);
812 if (test_state_ == kDuringSuspend &&
813 ++suspended_frame_count_ > kSuspendTimeFrames) {
814 VideoSendStream::Stats stats = stream_->GetStats();
815 EXPECT_TRUE(stats.suspended);
816 SendRtcpFeedback(high_remb_bps_);
817 test_state_ = kWaitingForPacket;
818 }
819 }
820
821 void set_low_remb_bps(int value) {
822 rtc::CritScope lock(&crit_);
823 low_remb_bps_ = value;
824 }
825
826 void set_high_remb_bps(int value) {
827 rtc::CritScope lock(&crit_);
828 high_remb_bps_ = value;
829 }
830
831 void OnVideoStreamsCreated(
832 VideoSendStream* send_stream,
833 const std::vector<VideoReceiveStream*>& receive_streams) override {
834 stream_ = send_stream;
835 }
836
837 void ModifyVideoConfigs(
838 VideoSendStream::Config* send_config,
839 std::vector<VideoReceiveStream::Config>* receive_configs,
840 VideoEncoderConfig* encoder_config) override {
841 transport_adapter_.reset(
842 new internal::TransportAdapter(send_config->send_transport));
843 transport_adapter_->Enable();
844 send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
845 send_config->pre_encode_callback = this;
846 send_config->suspend_below_min_bitrate = true;
847 int min_bitrate_bps = encoder_config->streams[0].min_bitrate_bps;
848 set_low_remb_bps(min_bitrate_bps - 10000);
849 int threshold_window = std::max(min_bitrate_bps / 10, 10000);
850 ASSERT_GT(encoder_config->streams[0].max_bitrate_bps,
851 min_bitrate_bps + threshold_window + 5000);
852 set_high_remb_bps(min_bitrate_bps + threshold_window + 5000);
853 }
854
855 void PerformTest() override {
856 EXPECT_TRUE(Wait()) << "Timed out during suspend-below-min-bitrate test.";
857 }
858
859 enum TestState {
860 kBeforeSuspend,
861 kDuringSuspend,
862 kWaitingForPacket,
863 kWaitingForStats
864 };
865
866 virtual void SendRtcpFeedback(int remb_value)
867 EXCLUSIVE_LOCKS_REQUIRED(crit_) {
868 FakeReceiveStatistics receive_stats(kVideoSendSsrcs[0],
869 last_sequence_number_, rtp_count_, 0);
870 RTCPSender rtcp_sender(false, clock_, &receive_stats, nullptr,
871 transport_adapter_.get());
872
873 rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize);
874 rtcp_sender.SetRemoteSSRC(kVideoSendSsrcs[0]);
875 if (remb_value > 0) {
876 rtcp_sender.SetREMBStatus(true);
877 rtcp_sender.SetREMBData(remb_value, std::vector<uint32_t>());
878 }
879 RTCPSender::FeedbackState feedback_state;
880 EXPECT_EQ(0, rtcp_sender.SendRTCP(feedback_state, kRtcpRr));
881 }
882
883 rtc::scoped_ptr<internal::TransportAdapter> transport_adapter_;
884 Clock* const clock_;
885 VideoSendStream* stream_;
886
887 rtc::CriticalSection crit_;
888 TestState test_state_ GUARDED_BY(crit_);
889 int rtp_count_ GUARDED_BY(crit_);
890 int last_sequence_number_ GUARDED_BY(crit_);
891 int suspended_frame_count_ GUARDED_BY(crit_);
892 int low_remb_bps_ GUARDED_BY(crit_);
893 int high_remb_bps_ GUARDED_BY(crit_);
894 } test;
895
896 RunBaseTest(&test);
897 }
898
TEST_F(VideoSendStreamTest,NoPaddingWhenVideoIsMuted)899 TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) {
900 class NoPaddingWhenVideoIsMuted : public test::SendTest {
901 public:
902 NoPaddingWhenVideoIsMuted()
903 : SendTest(kDefaultTimeoutMs),
904 clock_(Clock::GetRealTimeClock()),
905 last_packet_time_ms_(-1),
906 capturer_(nullptr) {
907 }
908
909 private:
910 Action OnSendRtp(const uint8_t* packet, size_t length) override {
911 rtc::CritScope lock(&crit_);
912 last_packet_time_ms_ = clock_->TimeInMilliseconds();
913 capturer_->Stop();
914 return SEND_PACKET;
915 }
916
917 Action OnSendRtcp(const uint8_t* packet, size_t length) override {
918 rtc::CritScope lock(&crit_);
919 const int kVideoMutedThresholdMs = 10000;
920 if (last_packet_time_ms_ > 0 &&
921 clock_->TimeInMilliseconds() - last_packet_time_ms_ >
922 kVideoMutedThresholdMs)
923 observation_complete_.Set();
924 // Receive statistics reporting having lost 50% of the packets.
925 FakeReceiveStatistics receive_stats(kVideoSendSsrcs[0], 1, 1, 0);
926 RTCPSender rtcp_sender(false, Clock::GetRealTimeClock(), &receive_stats,
927 nullptr, transport_adapter_.get());
928
929 rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize);
930 rtcp_sender.SetRemoteSSRC(kVideoSendSsrcs[0]);
931
932 RTCPSender::FeedbackState feedback_state;
933
934 EXPECT_EQ(0, rtcp_sender.SendRTCP(feedback_state, kRtcpRr));
935 return SEND_PACKET;
936 }
937
938 test::PacketTransport* CreateReceiveTransport() override {
939 test::PacketTransport* transport = new test::PacketTransport(
940 nullptr, this, test::PacketTransport::kReceiver,
941 FakeNetworkPipe::Config());
942 transport_adapter_.reset(new internal::TransportAdapter(transport));
943 transport_adapter_->Enable();
944 return transport;
945 }
946
947 size_t GetNumVideoStreams() const override { return 3; }
948
949 virtual void OnFrameGeneratorCapturerCreated(
950 test::FrameGeneratorCapturer* frame_generator_capturer) {
951 rtc::CritScope lock(&crit_);
952 capturer_ = frame_generator_capturer;
953 }
954
955 void PerformTest() override {
956 EXPECT_TRUE(Wait())
957 << "Timed out while waiting for RTP packets to stop being sent.";
958 }
959
960 Clock* const clock_;
961 rtc::scoped_ptr<internal::TransportAdapter> transport_adapter_;
962 rtc::CriticalSection crit_;
963 int64_t last_packet_time_ms_ GUARDED_BY(crit_);
964 test::FrameGeneratorCapturer* capturer_ GUARDED_BY(crit_);
965 } test;
966
967 RunBaseTest(&test);
968 }
969
970 // This test first observes "high" bitrate use at which point it sends a REMB to
971 // indicate that it should be lowered significantly. The test then observes that
972 // the bitrate observed is sinking well below the min-transmit-bitrate threshold
973 // to verify that the min-transmit bitrate respects incoming REMB.
974 //
975 // Note that the test starts at "high" bitrate and does not ramp up to "higher"
976 // bitrate since no receiver block or remb is sent in the initial phase.
TEST_F(VideoSendStreamTest,MinTransmitBitrateRespectsRemb)977 TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) {
978 static const int kMinTransmitBitrateBps = 400000;
979 static const int kHighBitrateBps = 150000;
980 static const int kRembBitrateBps = 80000;
981 static const int kRembRespectedBitrateBps = 100000;
982 class BitrateObserver : public test::SendTest {
983 public:
984 BitrateObserver()
985 : SendTest(kDefaultTimeoutMs),
986 bitrate_capped_(false) {
987 }
988
989 private:
990 virtual Action OnSendRtp(const uint8_t* packet, size_t length) {
991 if (RtpHeaderParser::IsRtcp(packet, length))
992 return DROP_PACKET;
993
994 RTPHeader header;
995 if (!parser_->Parse(packet, length, &header))
996 return DROP_PACKET;
997 RTC_DCHECK(stream_ != nullptr);
998 VideoSendStream::Stats stats = stream_->GetStats();
999 if (!stats.substreams.empty()) {
1000 EXPECT_EQ(1u, stats.substreams.size());
1001 int total_bitrate_bps =
1002 stats.substreams.begin()->second.total_bitrate_bps;
1003 test::PrintResult("bitrate_stats_",
1004 "min_transmit_bitrate_low_remb",
1005 "bitrate_bps",
1006 static_cast<size_t>(total_bitrate_bps),
1007 "bps",
1008 false);
1009 if (total_bitrate_bps > kHighBitrateBps) {
1010 rtp_rtcp_->SetREMBData(kRembBitrateBps,
1011 std::vector<uint32_t>(1, header.ssrc));
1012 rtp_rtcp_->Process();
1013 bitrate_capped_ = true;
1014 } else if (bitrate_capped_ &&
1015 total_bitrate_bps < kRembRespectedBitrateBps) {
1016 observation_complete_.Set();
1017 }
1018 }
1019 // Packets don't have to be delivered since the test is the receiver.
1020 return DROP_PACKET;
1021 }
1022
1023 void OnVideoStreamsCreated(
1024 VideoSendStream* send_stream,
1025 const std::vector<VideoReceiveStream*>& receive_streams) override {
1026 stream_ = send_stream;
1027 RtpRtcp::Configuration config;
1028 config.outgoing_transport = feedback_transport_.get();
1029 rtp_rtcp_.reset(RtpRtcp::CreateRtpRtcp(config));
1030 rtp_rtcp_->SetREMBStatus(true);
1031 rtp_rtcp_->SetRTCPStatus(RtcpMode::kReducedSize);
1032 }
1033
1034 void ModifyVideoConfigs(
1035 VideoSendStream::Config* send_config,
1036 std::vector<VideoReceiveStream::Config>* receive_configs,
1037 VideoEncoderConfig* encoder_config) override {
1038 feedback_transport_.reset(
1039 new internal::TransportAdapter(send_config->send_transport));
1040 feedback_transport_->Enable();
1041 encoder_config->min_transmit_bitrate_bps = kMinTransmitBitrateBps;
1042 }
1043
1044 void PerformTest() override {
1045 EXPECT_TRUE(Wait())
1046 << "Timeout while waiting for low bitrate stats after REMB.";
1047 }
1048
1049 rtc::scoped_ptr<RtpRtcp> rtp_rtcp_;
1050 rtc::scoped_ptr<internal::TransportAdapter> feedback_transport_;
1051 VideoSendStream* stream_;
1052 bool bitrate_capped_;
1053 } test;
1054
1055 RunBaseTest(&test);
1056 }
1057
TEST_F(VideoSendStreamTest,CanReconfigureToUseStartBitrateAbovePreviousMax)1058 TEST_F(VideoSendStreamTest, CanReconfigureToUseStartBitrateAbovePreviousMax) {
1059 class StartBitrateObserver : public test::FakeEncoder {
1060 public:
1061 StartBitrateObserver()
1062 : FakeEncoder(Clock::GetRealTimeClock()), start_bitrate_kbps_(0) {}
1063 int32_t InitEncode(const VideoCodec* config,
1064 int32_t number_of_cores,
1065 size_t max_payload_size) override {
1066 rtc::CritScope lock(&crit_);
1067 start_bitrate_kbps_ = config->startBitrate;
1068 return FakeEncoder::InitEncode(config, number_of_cores, max_payload_size);
1069 }
1070
1071 int32_t SetRates(uint32_t new_target_bitrate, uint32_t framerate) override {
1072 rtc::CritScope lock(&crit_);
1073 start_bitrate_kbps_ = new_target_bitrate;
1074 return FakeEncoder::SetRates(new_target_bitrate, framerate);
1075 }
1076
1077 int GetStartBitrateKbps() const {
1078 rtc::CritScope lock(&crit_);
1079 return start_bitrate_kbps_;
1080 }
1081
1082 private:
1083 mutable rtc::CriticalSection crit_;
1084 int start_bitrate_kbps_ GUARDED_BY(crit_);
1085 };
1086
1087 CreateSenderCall(Call::Config());
1088
1089 test::NullTransport transport;
1090 CreateSendConfig(1, 0, &transport);
1091
1092 Call::Config::BitrateConfig bitrate_config;
1093 bitrate_config.start_bitrate_bps =
1094 2 * video_encoder_config_.streams[0].max_bitrate_bps;
1095 sender_call_->SetBitrateConfig(bitrate_config);
1096
1097 StartBitrateObserver encoder;
1098 video_send_config_.encoder_settings.encoder = &encoder;
1099
1100 CreateVideoStreams();
1101
1102 EXPECT_EQ(video_encoder_config_.streams[0].max_bitrate_bps / 1000,
1103 encoder.GetStartBitrateKbps());
1104
1105 video_encoder_config_.streams[0].max_bitrate_bps =
1106 2 * bitrate_config.start_bitrate_bps;
1107 video_send_stream_->ReconfigureVideoEncoder(video_encoder_config_);
1108
1109 // New bitrate should be reconfigured above the previous max. As there's no
1110 // network connection this shouldn't be flaky, as no bitrate should've been
1111 // reported in between.
1112 EXPECT_EQ(bitrate_config.start_bitrate_bps / 1000,
1113 encoder.GetStartBitrateKbps());
1114
1115 DestroyStreams();
1116 }
1117
TEST_F(VideoSendStreamTest,CapturesTextureAndVideoFrames)1118 TEST_F(VideoSendStreamTest, CapturesTextureAndVideoFrames) {
1119 class FrameObserver : public I420FrameCallback {
1120 public:
1121 FrameObserver() : output_frame_event_(false, false) {}
1122
1123 void FrameCallback(VideoFrame* video_frame) override {
1124 output_frames_.push_back(*video_frame);
1125 output_frame_event_.Set();
1126 }
1127
1128 void WaitOutputFrame() {
1129 const int kWaitFrameTimeoutMs = 3000;
1130 EXPECT_TRUE(output_frame_event_.Wait(kWaitFrameTimeoutMs))
1131 << "Timeout while waiting for output frames.";
1132 }
1133
1134 const std::vector<VideoFrame>& output_frames() const {
1135 return output_frames_;
1136 }
1137
1138 private:
1139 // Delivered output frames.
1140 std::vector<VideoFrame> output_frames_;
1141
1142 // Indicate an output frame has arrived.
1143 rtc::Event output_frame_event_;
1144 };
1145
1146 // Initialize send stream.
1147 CreateSenderCall(Call::Config());
1148
1149 test::NullTransport transport;
1150 CreateSendConfig(1, 0, &transport);
1151 FrameObserver observer;
1152 video_send_config_.pre_encode_callback = &observer;
1153 CreateVideoStreams();
1154
1155 // Prepare five input frames. Send ordinary VideoFrame and texture frames
1156 // alternatively.
1157 std::vector<VideoFrame> input_frames;
1158 int width = static_cast<int>(video_encoder_config_.streams[0].width);
1159 int height = static_cast<int>(video_encoder_config_.streams[0].height);
1160 test::FakeNativeHandle* handle1 = new test::FakeNativeHandle();
1161 test::FakeNativeHandle* handle2 = new test::FakeNativeHandle();
1162 test::FakeNativeHandle* handle3 = new test::FakeNativeHandle();
1163 input_frames.push_back(test::FakeNativeHandle::CreateFrame(
1164 handle1, width, height, 1, 1, kVideoRotation_0));
1165 input_frames.push_back(test::FakeNativeHandle::CreateFrame(
1166 handle2, width, height, 2, 2, kVideoRotation_0));
1167 input_frames.push_back(CreateVideoFrame(width, height, 3));
1168 input_frames.push_back(CreateVideoFrame(width, height, 4));
1169 input_frames.push_back(test::FakeNativeHandle::CreateFrame(
1170 handle3, width, height, 5, 5, kVideoRotation_0));
1171
1172 video_send_stream_->Start();
1173 for (size_t i = 0; i < input_frames.size(); i++) {
1174 video_send_stream_->Input()->IncomingCapturedFrame(input_frames[i]);
1175 // Do not send the next frame too fast, so the frame dropper won't drop it.
1176 if (i < input_frames.size() - 1)
1177 SleepMs(1000 / video_encoder_config_.streams[0].max_framerate);
1178 // Wait until the output frame is received before sending the next input
1179 // frame. Or the previous input frame may be replaced without delivering.
1180 observer.WaitOutputFrame();
1181 }
1182 video_send_stream_->Stop();
1183
1184 // Test if the input and output frames are the same. render_time_ms and
1185 // timestamp are not compared because capturer sets those values.
1186 ExpectEqualFramesVector(input_frames, observer.output_frames());
1187
1188 DestroyStreams();
1189 }
1190
ExpectEqualFrames(const VideoFrame & frame1,const VideoFrame & frame2)1191 void ExpectEqualFrames(const VideoFrame& frame1, const VideoFrame& frame2) {
1192 if (frame1.native_handle() != nullptr || frame2.native_handle() != nullptr)
1193 ExpectEqualTextureFrames(frame1, frame2);
1194 else
1195 ExpectEqualBufferFrames(frame1, frame2);
1196 }
1197
ExpectEqualTextureFrames(const VideoFrame & frame1,const VideoFrame & frame2)1198 void ExpectEqualTextureFrames(const VideoFrame& frame1,
1199 const VideoFrame& frame2) {
1200 EXPECT_EQ(frame1.native_handle(), frame2.native_handle());
1201 EXPECT_EQ(frame1.width(), frame2.width());
1202 EXPECT_EQ(frame1.height(), frame2.height());
1203 EXPECT_EQ(frame1.render_time_ms(), frame2.render_time_ms());
1204 }
1205
ExpectEqualBufferFrames(const VideoFrame & frame1,const VideoFrame & frame2)1206 void ExpectEqualBufferFrames(const VideoFrame& frame1,
1207 const VideoFrame& frame2) {
1208 EXPECT_EQ(frame1.width(), frame2.width());
1209 EXPECT_EQ(frame1.height(), frame2.height());
1210 EXPECT_EQ(frame1.stride(kYPlane), frame2.stride(kYPlane));
1211 EXPECT_EQ(frame1.stride(kUPlane), frame2.stride(kUPlane));
1212 EXPECT_EQ(frame1.stride(kVPlane), frame2.stride(kVPlane));
1213 EXPECT_EQ(frame1.render_time_ms(), frame2.render_time_ms());
1214 ASSERT_EQ(frame1.allocated_size(kYPlane), frame2.allocated_size(kYPlane));
1215 EXPECT_EQ(0,
1216 memcmp(frame1.buffer(kYPlane),
1217 frame2.buffer(kYPlane),
1218 frame1.allocated_size(kYPlane)));
1219 ASSERT_EQ(frame1.allocated_size(kUPlane), frame2.allocated_size(kUPlane));
1220 EXPECT_EQ(0,
1221 memcmp(frame1.buffer(kUPlane),
1222 frame2.buffer(kUPlane),
1223 frame1.allocated_size(kUPlane)));
1224 ASSERT_EQ(frame1.allocated_size(kVPlane), frame2.allocated_size(kVPlane));
1225 EXPECT_EQ(0,
1226 memcmp(frame1.buffer(kVPlane),
1227 frame2.buffer(kVPlane),
1228 frame1.allocated_size(kVPlane)));
1229 }
1230
ExpectEqualFramesVector(const std::vector<VideoFrame> & frames1,const std::vector<VideoFrame> & frames2)1231 void ExpectEqualFramesVector(const std::vector<VideoFrame>& frames1,
1232 const std::vector<VideoFrame>& frames2) {
1233 EXPECT_EQ(frames1.size(), frames2.size());
1234 for (size_t i = 0; i < std::min(frames1.size(), frames2.size()); ++i)
1235 ExpectEqualFrames(frames1[i], frames2[i]);
1236 }
1237
CreateVideoFrame(int width,int height,uint8_t data)1238 VideoFrame CreateVideoFrame(int width, int height, uint8_t data) {
1239 const int kSizeY = width * height * 2;
1240 rtc::scoped_ptr<uint8_t[]> buffer(new uint8_t[kSizeY]);
1241 memset(buffer.get(), data, kSizeY);
1242 VideoFrame frame;
1243 frame.CreateFrame(buffer.get(), buffer.get(), buffer.get(), width, height,
1244 width, width / 2, width / 2);
1245 frame.set_timestamp(data);
1246 frame.set_render_time_ms(data);
1247 return frame;
1248 }
1249
TEST_F(VideoSendStreamTest,EncoderIsProperlyInitializedAndDestroyed)1250 TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) {
1251 class EncoderStateObserver : public test::SendTest, public VideoEncoder {
1252 public:
1253 EncoderStateObserver()
1254 : SendTest(kDefaultTimeoutMs),
1255 initialized_(false),
1256 callback_registered_(false),
1257 num_releases_(0),
1258 released_(false) {}
1259
1260 bool IsReleased() {
1261 rtc::CritScope lock(&crit_);
1262 return released_;
1263 }
1264
1265 bool IsReadyForEncode() {
1266 rtc::CritScope lock(&crit_);
1267 return initialized_ && callback_registered_;
1268 }
1269
1270 size_t num_releases() {
1271 rtc::CritScope lock(&crit_);
1272 return num_releases_;
1273 }
1274
1275 private:
1276 int32_t InitEncode(const VideoCodec* codecSettings,
1277 int32_t numberOfCores,
1278 size_t maxPayloadSize) override {
1279 rtc::CritScope lock(&crit_);
1280 EXPECT_FALSE(initialized_);
1281 initialized_ = true;
1282 released_ = false;
1283 return 0;
1284 }
1285
1286 int32_t Encode(const VideoFrame& inputImage,
1287 const CodecSpecificInfo* codecSpecificInfo,
1288 const std::vector<FrameType>* frame_types) override {
1289 EXPECT_TRUE(IsReadyForEncode());
1290
1291 observation_complete_.Set();
1292 return 0;
1293 }
1294
1295 int32_t RegisterEncodeCompleteCallback(
1296 EncodedImageCallback* callback) override {
1297 rtc::CritScope lock(&crit_);
1298 EXPECT_TRUE(initialized_);
1299 callback_registered_ = true;
1300 return 0;
1301 }
1302
1303 int32_t Release() override {
1304 rtc::CritScope lock(&crit_);
1305 EXPECT_TRUE(IsReadyForEncode());
1306 EXPECT_FALSE(released_);
1307 initialized_ = false;
1308 callback_registered_ = false;
1309 released_ = true;
1310 ++num_releases_;
1311 return 0;
1312 }
1313
1314 int32_t SetChannelParameters(uint32_t packetLoss, int64_t rtt) override {
1315 EXPECT_TRUE(IsReadyForEncode());
1316 return 0;
1317 }
1318
1319 int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) override {
1320 EXPECT_TRUE(IsReadyForEncode());
1321 return 0;
1322 }
1323
1324 void OnVideoStreamsCreated(
1325 VideoSendStream* send_stream,
1326 const std::vector<VideoReceiveStream*>& receive_streams) override {
1327 // Encoder initialization should be done in stream construction before
1328 // starting.
1329 EXPECT_TRUE(IsReadyForEncode());
1330 stream_ = send_stream;
1331 }
1332
1333 void ModifyVideoConfigs(
1334 VideoSendStream::Config* send_config,
1335 std::vector<VideoReceiveStream::Config>* receive_configs,
1336 VideoEncoderConfig* encoder_config) override {
1337 send_config->encoder_settings.encoder = this;
1338 encoder_config_ = *encoder_config;
1339 }
1340
1341 void PerformTest() override {
1342 EXPECT_TRUE(Wait()) << "Timed out while waiting for Encode.";
1343 EXPECT_EQ(0u, num_releases());
1344 stream_->ReconfigureVideoEncoder(encoder_config_);
1345 EXPECT_EQ(0u, num_releases());
1346 stream_->Stop();
1347 // Encoder should not be released before destroying the VideoSendStream.
1348 EXPECT_FALSE(IsReleased());
1349 EXPECT_TRUE(IsReadyForEncode());
1350 stream_->Start();
1351 // Sanity check, make sure we still encode frames with this encoder.
1352 EXPECT_TRUE(Wait()) << "Timed out while waiting for Encode.";
1353 }
1354
1355 rtc::CriticalSection crit_;
1356 VideoSendStream* stream_;
1357 bool initialized_ GUARDED_BY(crit_);
1358 bool callback_registered_ GUARDED_BY(crit_);
1359 size_t num_releases_ GUARDED_BY(crit_);
1360 bool released_ GUARDED_BY(crit_);
1361 VideoEncoderConfig encoder_config_;
1362 } test_encoder;
1363
1364 RunBaseTest(&test_encoder);
1365
1366 EXPECT_TRUE(test_encoder.IsReleased());
1367 EXPECT_EQ(1u, test_encoder.num_releases());
1368 }
1369
TEST_F(VideoSendStreamTest,EncoderSetupPropagatesCommonEncoderConfigValues)1370 TEST_F(VideoSendStreamTest, EncoderSetupPropagatesCommonEncoderConfigValues) {
1371 class VideoCodecConfigObserver : public test::SendTest,
1372 public test::FakeEncoder {
1373 public:
1374 VideoCodecConfigObserver()
1375 : SendTest(kDefaultTimeoutMs),
1376 FakeEncoder(Clock::GetRealTimeClock()),
1377 num_initializations_(0) {}
1378
1379 private:
1380 void ModifyVideoConfigs(
1381 VideoSendStream::Config* send_config,
1382 std::vector<VideoReceiveStream::Config>* receive_configs,
1383 VideoEncoderConfig* encoder_config) override {
1384 send_config->encoder_settings.encoder = this;
1385 encoder_config_ = *encoder_config;
1386 }
1387
1388 void OnVideoStreamsCreated(
1389 VideoSendStream* send_stream,
1390 const std::vector<VideoReceiveStream*>& receive_streams) override {
1391 stream_ = send_stream;
1392 }
1393
1394 int32_t InitEncode(const VideoCodec* config,
1395 int32_t number_of_cores,
1396 size_t max_payload_size) override {
1397 if (num_initializations_ == 0) {
1398 // Verify default values.
1399 EXPECT_EQ(kRealtimeVideo, config->mode);
1400 } else {
1401 // Verify that changed values are propagated.
1402 EXPECT_EQ(kScreensharing, config->mode);
1403 }
1404 ++num_initializations_;
1405 return FakeEncoder::InitEncode(config, number_of_cores, max_payload_size);
1406 }
1407
1408 void PerformTest() override {
1409 EXPECT_EQ(1u, num_initializations_) << "VideoEncoder not initialized.";
1410
1411 encoder_config_.content_type = VideoEncoderConfig::ContentType::kScreen;
1412 stream_->ReconfigureVideoEncoder(encoder_config_);
1413 EXPECT_EQ(2u, num_initializations_)
1414 << "ReconfigureVideoEncoder did not reinitialize the encoder with "
1415 "new encoder settings.";
1416 }
1417
1418 size_t num_initializations_;
1419 VideoSendStream* stream_;
1420 VideoEncoderConfig encoder_config_;
1421 } test;
1422
1423 RunBaseTest(&test);
1424 }
1425
1426 static const size_t kVideoCodecConfigObserverNumberOfTemporalLayers = 4;
1427 template <typename T>
1428 class VideoCodecConfigObserver : public test::SendTest,
1429 public test::FakeEncoder {
1430 public:
VideoCodecConfigObserver(VideoCodecType video_codec_type,const char * codec_name)1431 VideoCodecConfigObserver(VideoCodecType video_codec_type,
1432 const char* codec_name)
1433 : SendTest(VideoSendStreamTest::kDefaultTimeoutMs),
1434 FakeEncoder(Clock::GetRealTimeClock()),
1435 video_codec_type_(video_codec_type),
1436 codec_name_(codec_name),
1437 num_initializations_(0) {
1438 memset(&encoder_settings_, 0, sizeof(encoder_settings_));
1439 }
1440
1441 private:
ModifyVideoConfigs(VideoSendStream::Config * send_config,std::vector<VideoReceiveStream::Config> * receive_configs,VideoEncoderConfig * encoder_config)1442 void ModifyVideoConfigs(
1443 VideoSendStream::Config* send_config,
1444 std::vector<VideoReceiveStream::Config>* receive_configs,
1445 VideoEncoderConfig* encoder_config) override {
1446 send_config->encoder_settings.encoder = this;
1447 send_config->encoder_settings.payload_name = codec_name_;
1448
1449 for (size_t i = 0; i < encoder_config->streams.size(); ++i) {
1450 encoder_config->streams[i].temporal_layer_thresholds_bps.resize(
1451 kVideoCodecConfigObserverNumberOfTemporalLayers - 1);
1452 }
1453
1454 encoder_config->encoder_specific_settings = &encoder_settings_;
1455 encoder_config_ = *encoder_config;
1456 }
1457
OnVideoStreamsCreated(VideoSendStream * send_stream,const std::vector<VideoReceiveStream * > & receive_streams)1458 void OnVideoStreamsCreated(
1459 VideoSendStream* send_stream,
1460 const std::vector<VideoReceiveStream*>& receive_streams) override {
1461 stream_ = send_stream;
1462 }
1463
InitEncode(const VideoCodec * config,int32_t number_of_cores,size_t max_payload_size)1464 int32_t InitEncode(const VideoCodec* config,
1465 int32_t number_of_cores,
1466 size_t max_payload_size) override {
1467 EXPECT_EQ(video_codec_type_, config->codecType);
1468 VerifyCodecSpecifics(*config);
1469 ++num_initializations_;
1470 return FakeEncoder::InitEncode(config, number_of_cores, max_payload_size);
1471 }
1472
1473 void VerifyCodecSpecifics(const VideoCodec& config) const;
1474
PerformTest()1475 void PerformTest() override {
1476 EXPECT_EQ(1u, num_initializations_) << "VideoEncoder not initialized.";
1477
1478 encoder_settings_.frameDroppingOn = true;
1479 stream_->ReconfigureVideoEncoder(encoder_config_);
1480 EXPECT_EQ(2u, num_initializations_)
1481 << "ReconfigureVideoEncoder did not reinitialize the encoder with "
1482 "new encoder settings.";
1483 }
1484
Encode(const VideoFrame & input_image,const CodecSpecificInfo * codec_specific_info,const std::vector<FrameType> * frame_types)1485 int32_t Encode(const VideoFrame& input_image,
1486 const CodecSpecificInfo* codec_specific_info,
1487 const std::vector<FrameType>* frame_types) override {
1488 // Silently skip the encode, FakeEncoder::Encode doesn't produce VP8.
1489 return 0;
1490 }
1491
1492 T encoder_settings_;
1493 const VideoCodecType video_codec_type_;
1494 const char* const codec_name_;
1495 size_t num_initializations_;
1496 VideoSendStream* stream_;
1497 VideoEncoderConfig encoder_config_;
1498 };
1499
1500 template <>
VerifyCodecSpecifics(const VideoCodec & config) const1501 void VideoCodecConfigObserver<VideoCodecH264>::VerifyCodecSpecifics(
1502 const VideoCodec& config) const {
1503 EXPECT_EQ(0, memcmp(&config.codecSpecific.H264, &encoder_settings_,
1504 sizeof(encoder_settings_)));
1505 }
1506 template <>
VerifyCodecSpecifics(const VideoCodec & config) const1507 void VideoCodecConfigObserver<VideoCodecVP8>::VerifyCodecSpecifics(
1508 const VideoCodec& config) const {
1509 // Check that the number of temporal layers has propagated properly to
1510 // VideoCodec.
1511 EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
1512 config.codecSpecific.VP8.numberOfTemporalLayers);
1513
1514 for (unsigned char i = 0; i < config.numberOfSimulcastStreams; ++i) {
1515 EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
1516 config.simulcastStream[i].numberOfTemporalLayers);
1517 }
1518
1519 // Set expected temporal layers as they should have been set when
1520 // reconfiguring the encoder and not match the set config.
1521 VideoCodecVP8 encoder_settings = encoder_settings_;
1522 encoder_settings.numberOfTemporalLayers =
1523 kVideoCodecConfigObserverNumberOfTemporalLayers;
1524 EXPECT_EQ(0, memcmp(&config.codecSpecific.VP8, &encoder_settings,
1525 sizeof(encoder_settings_)));
1526 }
1527 template <>
VerifyCodecSpecifics(const VideoCodec & config) const1528 void VideoCodecConfigObserver<VideoCodecVP9>::VerifyCodecSpecifics(
1529 const VideoCodec& config) const {
1530 // Check that the number of temporal layers has propagated properly to
1531 // VideoCodec.
1532 EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
1533 config.codecSpecific.VP9.numberOfTemporalLayers);
1534
1535 for (unsigned char i = 0; i < config.numberOfSimulcastStreams; ++i) {
1536 EXPECT_EQ(kVideoCodecConfigObserverNumberOfTemporalLayers,
1537 config.simulcastStream[i].numberOfTemporalLayers);
1538 }
1539
1540 // Set expected temporal layers as they should have been set when
1541 // reconfiguring the encoder and not match the set config.
1542 VideoCodecVP9 encoder_settings = encoder_settings_;
1543 encoder_settings.numberOfTemporalLayers =
1544 kVideoCodecConfigObserverNumberOfTemporalLayers;
1545 EXPECT_EQ(0, memcmp(&config.codecSpecific.VP9, &encoder_settings,
1546 sizeof(encoder_settings_)));
1547 }
1548
TEST_F(VideoSendStreamTest,EncoderSetupPropagatesVp8Config)1549 TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp8Config) {
1550 VideoCodecConfigObserver<VideoCodecVP8> test(kVideoCodecVP8, "VP8");
1551 RunBaseTest(&test);
1552 }
1553
TEST_F(VideoSendStreamTest,EncoderSetupPropagatesVp9Config)1554 TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp9Config) {
1555 VideoCodecConfigObserver<VideoCodecVP9> test(kVideoCodecVP9, "VP9");
1556 RunBaseTest(&test);
1557 }
1558
TEST_F(VideoSendStreamTest,EncoderSetupPropagatesH264Config)1559 TEST_F(VideoSendStreamTest, EncoderSetupPropagatesH264Config) {
1560 VideoCodecConfigObserver<VideoCodecH264> test(kVideoCodecH264, "H264");
1561 RunBaseTest(&test);
1562 }
1563
TEST_F(VideoSendStreamTest,RtcpSenderReportContainsMediaBytesSent)1564 TEST_F(VideoSendStreamTest, RtcpSenderReportContainsMediaBytesSent) {
1565 class RtcpSenderReportTest : public test::SendTest {
1566 public:
1567 RtcpSenderReportTest() : SendTest(kDefaultTimeoutMs),
1568 rtp_packets_sent_(0),
1569 media_bytes_sent_(0) {}
1570
1571 private:
1572 Action OnSendRtp(const uint8_t* packet, size_t length) override {
1573 rtc::CritScope lock(&crit_);
1574 RTPHeader header;
1575 EXPECT_TRUE(parser_->Parse(packet, length, &header));
1576 ++rtp_packets_sent_;
1577 media_bytes_sent_ += length - header.headerLength - header.paddingLength;
1578 return SEND_PACKET;
1579 }
1580
1581 Action OnSendRtcp(const uint8_t* packet, size_t length) override {
1582 rtc::CritScope lock(&crit_);
1583 RTCPUtility::RTCPParserV2 parser(packet, length, true);
1584 EXPECT_TRUE(parser.IsValid());
1585
1586 RTCPUtility::RTCPPacketTypes packet_type = parser.Begin();
1587 while (packet_type != RTCPUtility::RTCPPacketTypes::kInvalid) {
1588 if (packet_type == RTCPUtility::RTCPPacketTypes::kSr) {
1589 // Only compare sent media bytes if SenderPacketCount matches the
1590 // number of sent rtp packets (a new rtp packet could be sent before
1591 // the rtcp packet).
1592 if (parser.Packet().SR.SenderOctetCount > 0 &&
1593 parser.Packet().SR.SenderPacketCount == rtp_packets_sent_) {
1594 EXPECT_EQ(media_bytes_sent_, parser.Packet().SR.SenderOctetCount);
1595 observation_complete_.Set();
1596 }
1597 }
1598 packet_type = parser.Iterate();
1599 }
1600
1601 return SEND_PACKET;
1602 }
1603
1604 void PerformTest() override {
1605 EXPECT_TRUE(Wait()) << "Timed out while waiting for RTCP sender report.";
1606 }
1607
1608 rtc::CriticalSection crit_;
1609 size_t rtp_packets_sent_ GUARDED_BY(&crit_);
1610 size_t media_bytes_sent_ GUARDED_BY(&crit_);
1611 } test;
1612
1613 RunBaseTest(&test);
1614 }
1615
TEST_F(VideoSendStreamTest,TranslatesTwoLayerScreencastToTargetBitrate)1616 TEST_F(VideoSendStreamTest, TranslatesTwoLayerScreencastToTargetBitrate) {
1617 static const int kScreencastTargetBitrateKbps = 200;
1618 class ScreencastTargetBitrateTest : public test::SendTest,
1619 public test::FakeEncoder {
1620 public:
1621 ScreencastTargetBitrateTest()
1622 : SendTest(kDefaultTimeoutMs),
1623 test::FakeEncoder(Clock::GetRealTimeClock()) {}
1624
1625 private:
1626 int32_t InitEncode(const VideoCodec* config,
1627 int32_t number_of_cores,
1628 size_t max_payload_size) override {
1629 EXPECT_EQ(static_cast<unsigned int>(kScreencastTargetBitrateKbps),
1630 config->targetBitrate);
1631 observation_complete_.Set();
1632 return test::FakeEncoder::InitEncode(
1633 config, number_of_cores, max_payload_size);
1634 }
1635 void ModifyVideoConfigs(
1636 VideoSendStream::Config* send_config,
1637 std::vector<VideoReceiveStream::Config>* receive_configs,
1638 VideoEncoderConfig* encoder_config) override {
1639 send_config->encoder_settings.encoder = this;
1640 EXPECT_EQ(1u, encoder_config->streams.size());
1641 EXPECT_TRUE(
1642 encoder_config->streams[0].temporal_layer_thresholds_bps.empty());
1643 encoder_config->streams[0].temporal_layer_thresholds_bps.push_back(
1644 kScreencastTargetBitrateKbps * 1000);
1645 encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
1646 }
1647
1648 void PerformTest() override {
1649 EXPECT_TRUE(Wait())
1650 << "Timed out while waiting for the encoder to be initialized.";
1651 }
1652 } test;
1653
1654 RunBaseTest(&test);
1655 }
1656
1657 // Disabled on LinuxAsan:
1658 // https://bugs.chromium.org/p/webrtc/issues/detail?id=5382
1659 #if defined(ADDRESS_SANITIZER) && defined(WEBRTC_LINUX)
1660 #define MAYBE_ReconfigureBitratesSetsEncoderBitratesCorrectly \
1661 DISABLED_ReconfigureBitratesSetsEncoderBitratesCorrectly
1662 #else
1663 #define MAYBE_ReconfigureBitratesSetsEncoderBitratesCorrectly \
1664 ReconfigureBitratesSetsEncoderBitratesCorrectly
1665 #endif
1666
TEST_F(VideoSendStreamTest,MAYBE_ReconfigureBitratesSetsEncoderBitratesCorrectly)1667 TEST_F(VideoSendStreamTest,
1668 MAYBE_ReconfigureBitratesSetsEncoderBitratesCorrectly) {
1669 // These are chosen to be "kind of odd" to not be accidentally checked against
1670 // default values.
1671 static const int kMinBitrateKbps = 137;
1672 static const int kStartBitrateKbps = 345;
1673 static const int kLowerMaxBitrateKbps = 312;
1674 static const int kMaxBitrateKbps = 413;
1675 static const int kIncreasedStartBitrateKbps = 451;
1676 static const int kIncreasedMaxBitrateKbps = 597;
1677 class EncoderBitrateThresholdObserver : public test::SendTest,
1678 public test::FakeEncoder {
1679 public:
1680 EncoderBitrateThresholdObserver()
1681 : SendTest(kDefaultTimeoutMs),
1682 FakeEncoder(Clock::GetRealTimeClock()),
1683 num_initializations_(0) {}
1684
1685 private:
1686 int32_t InitEncode(const VideoCodec* codecSettings,
1687 int32_t numberOfCores,
1688 size_t maxPayloadSize) override {
1689 if (num_initializations_ == 0) {
1690 EXPECT_EQ(static_cast<unsigned int>(kMinBitrateKbps),
1691 codecSettings->minBitrate);
1692 EXPECT_EQ(static_cast<unsigned int>(kStartBitrateKbps),
1693 codecSettings->startBitrate);
1694 EXPECT_EQ(static_cast<unsigned int>(kMaxBitrateKbps),
1695 codecSettings->maxBitrate);
1696 observation_complete_.Set();
1697 } else if (num_initializations_ == 1) {
1698 EXPECT_EQ(static_cast<unsigned int>(kLowerMaxBitrateKbps),
1699 codecSettings->maxBitrate);
1700 // The start bitrate should be kept (-1) and capped to the max bitrate.
1701 // Since this is not an end-to-end call no receiver should have been
1702 // returning a REMB that could lower this estimate.
1703 EXPECT_EQ(codecSettings->startBitrate, codecSettings->maxBitrate);
1704 } else if (num_initializations_ == 2) {
1705 EXPECT_EQ(static_cast<unsigned int>(kIncreasedMaxBitrateKbps),
1706 codecSettings->maxBitrate);
1707 EXPECT_EQ(static_cast<unsigned int>(kIncreasedStartBitrateKbps),
1708 codecSettings->startBitrate);
1709 }
1710 ++num_initializations_;
1711 return FakeEncoder::InitEncode(codecSettings, numberOfCores,
1712 maxPayloadSize);
1713 }
1714
1715 Call::Config GetSenderCallConfig() override {
1716 Call::Config config;
1717 config.bitrate_config.min_bitrate_bps = kMinBitrateKbps * 1000;
1718 config.bitrate_config.start_bitrate_bps = kStartBitrateKbps * 1000;
1719 config.bitrate_config.max_bitrate_bps = kMaxBitrateKbps * 1000;
1720 return config;
1721 }
1722
1723 void ModifyVideoConfigs(
1724 VideoSendStream::Config* send_config,
1725 std::vector<VideoReceiveStream::Config>* receive_configs,
1726 VideoEncoderConfig* encoder_config) override {
1727 send_config->encoder_settings.encoder = this;
1728 // Set bitrates lower/higher than min/max to make sure they are properly
1729 // capped.
1730 encoder_config->streams.front().min_bitrate_bps = kMinBitrateKbps * 1000;
1731 encoder_config->streams.front().max_bitrate_bps = kMaxBitrateKbps * 1000;
1732 encoder_config_ = *encoder_config;
1733 }
1734
1735 void OnCallsCreated(Call* sender_call, Call* receiver_call) override {
1736 call_ = sender_call;
1737 }
1738
1739 void OnVideoStreamsCreated(
1740 VideoSendStream* send_stream,
1741 const std::vector<VideoReceiveStream*>& receive_streams) override {
1742 send_stream_ = send_stream;
1743 }
1744
1745 void PerformTest() override {
1746 Call::Config::BitrateConfig bitrate_config;
1747 bitrate_config.start_bitrate_bps = kIncreasedStartBitrateKbps * 1000;
1748 bitrate_config.max_bitrate_bps = kIncreasedMaxBitrateKbps * 1000;
1749 call_->SetBitrateConfig(bitrate_config);
1750 EXPECT_TRUE(Wait())
1751 << "Timed out while waiting encoder to be configured.";
1752 encoder_config_.streams[0].min_bitrate_bps = 0;
1753 encoder_config_.streams[0].max_bitrate_bps = kLowerMaxBitrateKbps * 1000;
1754 send_stream_->ReconfigureVideoEncoder(encoder_config_);
1755 EXPECT_EQ(2, num_initializations_)
1756 << "Encoder should have been reconfigured with the new value.";
1757 encoder_config_.streams[0].target_bitrate_bps =
1758 encoder_config_.streams[0].min_bitrate_bps;
1759 encoder_config_.streams[0].max_bitrate_bps =
1760 kIncreasedMaxBitrateKbps * 1000;
1761 send_stream_->ReconfigureVideoEncoder(encoder_config_);
1762 EXPECT_EQ(3, num_initializations_)
1763 << "Encoder should have been reconfigured with the new value.";
1764 }
1765
1766 int num_initializations_;
1767 webrtc::Call* call_;
1768 webrtc::VideoSendStream* send_stream_;
1769 webrtc::VideoEncoderConfig encoder_config_;
1770 } test;
1771
1772 RunBaseTest(&test);
1773 }
1774
TEST_F(VideoSendStreamTest,ReportsSentResolution)1775 TEST_F(VideoSendStreamTest, ReportsSentResolution) {
1776 static const size_t kNumStreams = 3;
1777 // Unusual resolutions to make sure that they are the ones being reported.
1778 static const struct {
1779 int width;
1780 int height;
1781 } kEncodedResolution[kNumStreams] = {
1782 {241, 181}, {300, 121}, {121, 221}};
1783 class ScreencastTargetBitrateTest : public test::SendTest,
1784 public test::FakeEncoder {
1785 public:
1786 ScreencastTargetBitrateTest()
1787 : SendTest(kDefaultTimeoutMs),
1788 test::FakeEncoder(Clock::GetRealTimeClock()) {}
1789
1790 private:
1791 int32_t Encode(const VideoFrame& input_image,
1792 const CodecSpecificInfo* codecSpecificInfo,
1793 const std::vector<FrameType>* frame_types) override {
1794 CodecSpecificInfo specifics;
1795 memset(&specifics, 0, sizeof(specifics));
1796 specifics.codecType = kVideoCodecGeneric;
1797
1798 uint8_t buffer[16] = {0};
1799 EncodedImage encoded(buffer, sizeof(buffer), sizeof(buffer));
1800 encoded._timeStamp = input_image.timestamp();
1801 encoded.capture_time_ms_ = input_image.render_time_ms();
1802
1803 for (size_t i = 0; i < kNumStreams; ++i) {
1804 specifics.codecSpecific.generic.simulcast_idx = static_cast<uint8_t>(i);
1805 encoded._frameType = (*frame_types)[i];
1806 encoded._encodedWidth = kEncodedResolution[i].width;
1807 encoded._encodedHeight = kEncodedResolution[i].height;
1808 RTC_DCHECK(callback_ != nullptr);
1809 if (callback_->Encoded(encoded, &specifics, nullptr) != 0)
1810 return -1;
1811 }
1812
1813 observation_complete_.Set();
1814 return 0;
1815 }
1816 void ModifyVideoConfigs(
1817 VideoSendStream::Config* send_config,
1818 std::vector<VideoReceiveStream::Config>* receive_configs,
1819 VideoEncoderConfig* encoder_config) override {
1820 send_config->encoder_settings.encoder = this;
1821 EXPECT_EQ(kNumStreams, encoder_config->streams.size());
1822 }
1823
1824 size_t GetNumVideoStreams() const override { return kNumStreams; }
1825
1826 void PerformTest() override {
1827 EXPECT_TRUE(Wait())
1828 << "Timed out while waiting for the encoder to send one frame.";
1829 VideoSendStream::Stats stats = send_stream_->GetStats();
1830
1831 for (size_t i = 0; i < kNumStreams; ++i) {
1832 ASSERT_TRUE(stats.substreams.find(kVideoSendSsrcs[i]) !=
1833 stats.substreams.end())
1834 << "No stats for SSRC: " << kVideoSendSsrcs[i]
1835 << ", stats should exist as soon as frames have been encoded.";
1836 VideoSendStream::StreamStats ssrc_stats =
1837 stats.substreams[kVideoSendSsrcs[i]];
1838 EXPECT_EQ(kEncodedResolution[i].width, ssrc_stats.width);
1839 EXPECT_EQ(kEncodedResolution[i].height, ssrc_stats.height);
1840 }
1841 }
1842
1843 void OnVideoStreamsCreated(
1844 VideoSendStream* send_stream,
1845 const std::vector<VideoReceiveStream*>& receive_streams) override {
1846 send_stream_ = send_stream;
1847 }
1848
1849 VideoSendStream* send_stream_;
1850 } test;
1851
1852 RunBaseTest(&test);
1853 }
1854
1855 class Vp9HeaderObserver : public test::SendTest {
1856 public:
Vp9HeaderObserver()1857 Vp9HeaderObserver()
1858 : SendTest(VideoSendStreamTest::kLongTimeoutMs),
1859 vp9_encoder_(VP9Encoder::Create()),
1860 vp9_settings_(VideoEncoder::GetDefaultVp9Settings()),
1861 packets_sent_(0),
1862 frames_sent_(0) {}
1863
ModifyVideoConfigsHook(VideoSendStream::Config * send_config,std::vector<VideoReceiveStream::Config> * receive_configs,VideoEncoderConfig * encoder_config)1864 virtual void ModifyVideoConfigsHook(
1865 VideoSendStream::Config* send_config,
1866 std::vector<VideoReceiveStream::Config>* receive_configs,
1867 VideoEncoderConfig* encoder_config) {}
1868
1869 virtual void InspectHeader(const RTPVideoHeaderVP9& vp9) = 0;
1870
1871 private:
1872 const int kVp9PayloadType = 105;
1873
ModifyVideoConfigs(VideoSendStream::Config * send_config,std::vector<VideoReceiveStream::Config> * receive_configs,VideoEncoderConfig * encoder_config)1874 void ModifyVideoConfigs(
1875 VideoSendStream::Config* send_config,
1876 std::vector<VideoReceiveStream::Config>* receive_configs,
1877 VideoEncoderConfig* encoder_config) override {
1878 encoder_config->encoder_specific_settings = &vp9_settings_;
1879 send_config->encoder_settings.encoder = vp9_encoder_.get();
1880 send_config->encoder_settings.payload_name = "VP9";
1881 send_config->encoder_settings.payload_type = kVp9PayloadType;
1882 ModifyVideoConfigsHook(send_config, receive_configs, encoder_config);
1883 EXPECT_EQ(1u, encoder_config->streams.size());
1884 encoder_config->streams[0].temporal_layer_thresholds_bps.resize(
1885 vp9_settings_.numberOfTemporalLayers - 1);
1886 encoder_config_ = *encoder_config;
1887 }
1888
PerformTest()1889 void PerformTest() override {
1890 EXPECT_TRUE(Wait()) << "Test timed out waiting for VP9 packet, num frames "
1891 << frames_sent_;
1892 }
1893
OnSendRtp(const uint8_t * packet,size_t length)1894 Action OnSendRtp(const uint8_t* packet, size_t length) override {
1895 RTPHeader header;
1896 EXPECT_TRUE(parser_->Parse(packet, length, &header));
1897
1898 EXPECT_EQ(kVp9PayloadType, header.payloadType);
1899 const uint8_t* payload = packet + header.headerLength;
1900 size_t payload_length = length - header.headerLength - header.paddingLength;
1901
1902 bool new_packet = packets_sent_ == 0 ||
1903 IsNewerSequenceNumber(header.sequenceNumber,
1904 last_header_.sequenceNumber);
1905 if (payload_length > 0 && new_packet) {
1906 RtpDepacketizer::ParsedPayload parsed;
1907 RtpDepacketizerVp9 depacketizer;
1908 EXPECT_TRUE(depacketizer.Parse(&parsed, payload, payload_length));
1909 EXPECT_EQ(RtpVideoCodecTypes::kRtpVideoVp9, parsed.type.Video.codec);
1910 // Verify common fields for all configurations.
1911 VerifyCommonHeader(parsed.type.Video.codecHeader.VP9);
1912 CompareConsecutiveFrames(header, parsed.type.Video);
1913 // Verify configuration specific settings.
1914 InspectHeader(parsed.type.Video.codecHeader.VP9);
1915
1916 ++packets_sent_;
1917 if (header.markerBit) {
1918 ++frames_sent_;
1919 }
1920 last_header_ = header;
1921 last_vp9_ = parsed.type.Video.codecHeader.VP9;
1922 }
1923 return SEND_PACKET;
1924 }
1925
1926 protected:
ContinuousPictureId(const RTPVideoHeaderVP9 & vp9) const1927 bool ContinuousPictureId(const RTPVideoHeaderVP9& vp9) const {
1928 if (last_vp9_.picture_id > vp9.picture_id) {
1929 return vp9.picture_id == 0; // Wrap.
1930 } else {
1931 return vp9.picture_id == last_vp9_.picture_id + 1;
1932 }
1933 }
1934
VerifySpatialIdxWithinFrame(const RTPVideoHeaderVP9 & vp9) const1935 void VerifySpatialIdxWithinFrame(const RTPVideoHeaderVP9& vp9) const {
1936 bool new_layer = vp9.spatial_idx != last_vp9_.spatial_idx;
1937 EXPECT_EQ(new_layer, vp9.beginning_of_frame);
1938 EXPECT_EQ(new_layer, last_vp9_.end_of_frame);
1939 EXPECT_EQ(new_layer ? last_vp9_.spatial_idx + 1 : last_vp9_.spatial_idx,
1940 vp9.spatial_idx);
1941 }
1942
VerifyFixedTemporalLayerStructure(const RTPVideoHeaderVP9 & vp9,uint8_t num_layers) const1943 void VerifyFixedTemporalLayerStructure(const RTPVideoHeaderVP9& vp9,
1944 uint8_t num_layers) const {
1945 switch (num_layers) {
1946 case 0:
1947 VerifyTemporalLayerStructure0(vp9);
1948 break;
1949 case 1:
1950 VerifyTemporalLayerStructure1(vp9);
1951 break;
1952 case 2:
1953 VerifyTemporalLayerStructure2(vp9);
1954 break;
1955 case 3:
1956 VerifyTemporalLayerStructure3(vp9);
1957 break;
1958 default:
1959 RTC_NOTREACHED();
1960 }
1961 }
1962
VerifyTemporalLayerStructure0(const RTPVideoHeaderVP9 & vp9) const1963 void VerifyTemporalLayerStructure0(const RTPVideoHeaderVP9& vp9) const {
1964 EXPECT_EQ(kNoTl0PicIdx, vp9.tl0_pic_idx);
1965 EXPECT_EQ(kNoTemporalIdx, vp9.temporal_idx); // no tid
1966 EXPECT_FALSE(vp9.temporal_up_switch);
1967 }
1968
VerifyTemporalLayerStructure1(const RTPVideoHeaderVP9 & vp9) const1969 void VerifyTemporalLayerStructure1(const RTPVideoHeaderVP9& vp9) const {
1970 EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx);
1971 EXPECT_EQ(0, vp9.temporal_idx); // 0,0,0,...
1972 EXPECT_FALSE(vp9.temporal_up_switch);
1973 }
1974
VerifyTemporalLayerStructure2(const RTPVideoHeaderVP9 & vp9) const1975 void VerifyTemporalLayerStructure2(const RTPVideoHeaderVP9& vp9) const {
1976 EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx);
1977 EXPECT_GE(vp9.temporal_idx, 0); // 0,1,0,1,... (tid reset on I-frames).
1978 EXPECT_LE(vp9.temporal_idx, 1);
1979 EXPECT_EQ(vp9.temporal_idx > 0, vp9.temporal_up_switch);
1980 if (IsNewPictureId(vp9)) {
1981 uint8_t expected_tid =
1982 (!vp9.inter_pic_predicted || last_vp9_.temporal_idx == 1) ? 0 : 1;
1983 EXPECT_EQ(expected_tid, vp9.temporal_idx);
1984 }
1985 }
1986
VerifyTemporalLayerStructure3(const RTPVideoHeaderVP9 & vp9) const1987 void VerifyTemporalLayerStructure3(const RTPVideoHeaderVP9& vp9) const {
1988 EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx);
1989 EXPECT_GE(vp9.temporal_idx, 0); // 0,2,1,2,... (tid reset on I-frames).
1990 EXPECT_LE(vp9.temporal_idx, 2);
1991 if (IsNewPictureId(vp9) && vp9.inter_pic_predicted) {
1992 EXPECT_NE(vp9.temporal_idx, last_vp9_.temporal_idx);
1993 switch (vp9.temporal_idx) {
1994 case 0:
1995 EXPECT_EQ(2, last_vp9_.temporal_idx);
1996 EXPECT_FALSE(vp9.temporal_up_switch);
1997 break;
1998 case 1:
1999 EXPECT_EQ(2, last_vp9_.temporal_idx);
2000 EXPECT_TRUE(vp9.temporal_up_switch);
2001 break;
2002 case 2:
2003 EXPECT_EQ(last_vp9_.temporal_idx == 0, vp9.temporal_up_switch);
2004 break;
2005 }
2006 }
2007 }
2008
VerifyTl0Idx(const RTPVideoHeaderVP9 & vp9) const2009 void VerifyTl0Idx(const RTPVideoHeaderVP9& vp9) const {
2010 if (vp9.tl0_pic_idx == kNoTl0PicIdx)
2011 return;
2012
2013 uint8_t expected_tl0_idx = last_vp9_.tl0_pic_idx;
2014 if (vp9.temporal_idx == 0)
2015 ++expected_tl0_idx;
2016 EXPECT_EQ(expected_tl0_idx, vp9.tl0_pic_idx);
2017 }
2018
IsNewPictureId(const RTPVideoHeaderVP9 & vp9) const2019 bool IsNewPictureId(const RTPVideoHeaderVP9& vp9) const {
2020 return frames_sent_ > 0 && (vp9.picture_id != last_vp9_.picture_id);
2021 }
2022
2023 // Flexible mode (F=1): Non-flexible mode (F=0):
2024 //
2025 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
2026 // |I|P|L|F|B|E|V|-| |I|P|L|F|B|E|V|-|
2027 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
2028 // I: |M| PICTURE ID | I: |M| PICTURE ID |
2029 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
2030 // M: | EXTENDED PID | M: | EXTENDED PID |
2031 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
2032 // L: | T |U| S |D| L: | T |U| S |D|
2033 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
2034 // P,F: | P_DIFF |X|N| | TL0PICIDX |
2035 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
2036 // X: |EXTENDED P_DIFF| V: | SS .. |
2037 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
2038 // V: | SS .. |
2039 // +-+-+-+-+-+-+-+-+
VerifyCommonHeader(const RTPVideoHeaderVP9 & vp9) const2040 void VerifyCommonHeader(const RTPVideoHeaderVP9& vp9) const {
2041 EXPECT_EQ(kMaxTwoBytePictureId, vp9.max_picture_id); // M:1
2042 EXPECT_NE(kNoPictureId, vp9.picture_id); // I:1
2043 EXPECT_EQ(vp9_settings_.flexibleMode, vp9.flexible_mode); // F
2044 EXPECT_GE(vp9.spatial_idx, 0); // S
2045 EXPECT_LT(vp9.spatial_idx, vp9_settings_.numberOfSpatialLayers);
2046 if (vp9.ss_data_available) // V
2047 VerifySsData(vp9);
2048
2049 if (frames_sent_ == 0)
2050 EXPECT_FALSE(vp9.inter_pic_predicted); // P
2051
2052 if (!vp9.inter_pic_predicted) {
2053 EXPECT_TRUE(vp9.temporal_idx == 0 || vp9.temporal_idx == kNoTemporalIdx);
2054 EXPECT_FALSE(vp9.temporal_up_switch);
2055 }
2056 }
2057
2058 // Scalability structure (SS).
2059 //
2060 // +-+-+-+-+-+-+-+-+
2061 // V: | N_S |Y|G|-|-|-|
2062 // +-+-+-+-+-+-+-+-+
2063 // Y: | WIDTH | N_S + 1 times
2064 // +-+-+-+-+-+-+-+-+
2065 // | HEIGHT |
2066 // +-+-+-+-+-+-+-+-+
2067 // G: | N_G |
2068 // +-+-+-+-+-+-+-+-+
2069 // N_G: | T |U| R |-|-| N_G times
2070 // +-+-+-+-+-+-+-+-+
2071 // | P_DIFF | R times
2072 // +-+-+-+-+-+-+-+-+
VerifySsData(const RTPVideoHeaderVP9 & vp9) const2073 void VerifySsData(const RTPVideoHeaderVP9& vp9) const {
2074 EXPECT_TRUE(vp9.ss_data_available); // V
2075 EXPECT_EQ(vp9_settings_.numberOfSpatialLayers, // N_S + 1
2076 vp9.num_spatial_layers);
2077 EXPECT_TRUE(vp9.spatial_layer_resolution_present); // Y:1
2078 size_t expected_width = encoder_config_.streams[0].width;
2079 size_t expected_height = encoder_config_.streams[0].height;
2080 for (int i = vp9.num_spatial_layers - 1; i >= 0; --i) {
2081 EXPECT_EQ(expected_width, vp9.width[i]); // WIDTH
2082 EXPECT_EQ(expected_height, vp9.height[i]); // HEIGHT
2083 expected_width /= 2;
2084 expected_height /= 2;
2085 }
2086 }
2087
CompareConsecutiveFrames(const RTPHeader & header,const RTPVideoHeader & video) const2088 void CompareConsecutiveFrames(const RTPHeader& header,
2089 const RTPVideoHeader& video) const {
2090 const RTPVideoHeaderVP9& vp9 = video.codecHeader.VP9;
2091
2092 bool new_frame = packets_sent_ == 0 ||
2093 IsNewerTimestamp(header.timestamp, last_header_.timestamp);
2094 EXPECT_EQ(new_frame, video.isFirstPacket);
2095 if (!new_frame) {
2096 EXPECT_FALSE(last_header_.markerBit);
2097 EXPECT_EQ(last_header_.timestamp, header.timestamp);
2098 EXPECT_EQ(last_vp9_.picture_id, vp9.picture_id);
2099 EXPECT_EQ(last_vp9_.temporal_idx, vp9.temporal_idx);
2100 EXPECT_EQ(last_vp9_.tl0_pic_idx, vp9.tl0_pic_idx);
2101 VerifySpatialIdxWithinFrame(vp9);
2102 return;
2103 }
2104 // New frame.
2105 EXPECT_TRUE(vp9.beginning_of_frame);
2106
2107 // Compare with last packet in previous frame.
2108 if (frames_sent_ == 0)
2109 return;
2110 EXPECT_TRUE(last_vp9_.end_of_frame);
2111 EXPECT_TRUE(last_header_.markerBit);
2112 EXPECT_TRUE(ContinuousPictureId(vp9));
2113 VerifyTl0Idx(vp9);
2114 }
2115
2116 rtc::scoped_ptr<VP9Encoder> vp9_encoder_;
2117 VideoCodecVP9 vp9_settings_;
2118 webrtc::VideoEncoderConfig encoder_config_;
2119 RTPHeader last_header_;
2120 RTPVideoHeaderVP9 last_vp9_;
2121 size_t packets_sent_;
2122 size_t frames_sent_;
2123 };
2124
TEST_F(VideoSendStreamTest,Vp9NonFlexMode_1Tl1SLayers)2125 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_1Tl1SLayers) {
2126 const uint8_t kNumTemporalLayers = 1;
2127 const uint8_t kNumSpatialLayers = 1;
2128 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
2129 }
2130
TEST_F(VideoSendStreamTest,Vp9NonFlexMode_2Tl1SLayers)2131 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_2Tl1SLayers) {
2132 const uint8_t kNumTemporalLayers = 2;
2133 const uint8_t kNumSpatialLayers = 1;
2134 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
2135 }
2136
TEST_F(VideoSendStreamTest,Vp9NonFlexMode_3Tl1SLayers)2137 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_3Tl1SLayers) {
2138 const uint8_t kNumTemporalLayers = 3;
2139 const uint8_t kNumSpatialLayers = 1;
2140 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
2141 }
2142
TEST_F(VideoSendStreamTest,Vp9NonFlexMode_1Tl2SLayers)2143 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_1Tl2SLayers) {
2144 const uint8_t kNumTemporalLayers = 1;
2145 const uint8_t kNumSpatialLayers = 2;
2146 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
2147 }
2148
TEST_F(VideoSendStreamTest,Vp9NonFlexMode_2Tl2SLayers)2149 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_2Tl2SLayers) {
2150 const uint8_t kNumTemporalLayers = 2;
2151 const uint8_t kNumSpatialLayers = 2;
2152 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
2153 }
2154
TEST_F(VideoSendStreamTest,Vp9NonFlexMode_3Tl2SLayers)2155 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_3Tl2SLayers) {
2156 const uint8_t kNumTemporalLayers = 3;
2157 const uint8_t kNumSpatialLayers = 2;
2158 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers);
2159 }
2160
TestVp9NonFlexMode(uint8_t num_temporal_layers,uint8_t num_spatial_layers)2161 void VideoSendStreamTest::TestVp9NonFlexMode(uint8_t num_temporal_layers,
2162 uint8_t num_spatial_layers) {
2163 static const size_t kNumFramesToSend = 100;
2164 // Set to < kNumFramesToSend and coprime to length of temporal layer
2165 // structures to verify temporal id reset on key frame.
2166 static const int kKeyFrameInterval = 31;
2167 class NonFlexibleMode : public Vp9HeaderObserver {
2168 public:
2169 NonFlexibleMode(uint8_t num_temporal_layers, uint8_t num_spatial_layers)
2170 : num_temporal_layers_(num_temporal_layers),
2171 num_spatial_layers_(num_spatial_layers),
2172 l_field_(num_temporal_layers > 1 || num_spatial_layers > 1) {}
2173 void ModifyVideoConfigsHook(
2174 VideoSendStream::Config* send_config,
2175 std::vector<VideoReceiveStream::Config>* receive_configs,
2176 VideoEncoderConfig* encoder_config) override {
2177 vp9_settings_.flexibleMode = false;
2178 vp9_settings_.frameDroppingOn = false;
2179 vp9_settings_.keyFrameInterval = kKeyFrameInterval;
2180 vp9_settings_.numberOfTemporalLayers = num_temporal_layers_;
2181 vp9_settings_.numberOfSpatialLayers = num_spatial_layers_;
2182 }
2183
2184 void InspectHeader(const RTPVideoHeaderVP9& vp9) override {
2185 bool ss_data_expected = !vp9.inter_pic_predicted &&
2186 vp9.beginning_of_frame && vp9.spatial_idx == 0;
2187 EXPECT_EQ(ss_data_expected, vp9.ss_data_available);
2188 EXPECT_EQ(vp9.spatial_idx > 0, vp9.inter_layer_predicted); // D
2189 EXPECT_EQ(!vp9.inter_pic_predicted,
2190 frames_sent_ % kKeyFrameInterval == 0);
2191
2192 if (IsNewPictureId(vp9)) {
2193 EXPECT_EQ(0, vp9.spatial_idx);
2194 EXPECT_EQ(num_spatial_layers_ - 1, last_vp9_.spatial_idx);
2195 }
2196
2197 VerifyFixedTemporalLayerStructure(vp9,
2198 l_field_ ? num_temporal_layers_ : 0);
2199
2200 if (frames_sent_ > kNumFramesToSend)
2201 observation_complete_.Set();
2202 }
2203 const uint8_t num_temporal_layers_;
2204 const uint8_t num_spatial_layers_;
2205 const bool l_field_;
2206 } test(num_temporal_layers, num_spatial_layers);
2207
2208 RunBaseTest(&test);
2209 }
2210
2211 #if !defined(MEMORY_SANITIZER)
2212 // Fails under MemorySanitizer:
2213 // See https://code.google.com/p/webrtc/issues/detail?id=5402.
TEST_F(VideoSendStreamTest,Vp9FlexModeRefCount)2214 TEST_F(VideoSendStreamTest, Vp9FlexModeRefCount) {
2215 class FlexibleMode : public Vp9HeaderObserver {
2216 void ModifyVideoConfigsHook(
2217 VideoSendStream::Config* send_config,
2218 std::vector<VideoReceiveStream::Config>* receive_configs,
2219 VideoEncoderConfig* encoder_config) override {
2220 encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen;
2221 vp9_settings_.flexibleMode = true;
2222 vp9_settings_.numberOfTemporalLayers = 1;
2223 vp9_settings_.numberOfSpatialLayers = 2;
2224 }
2225
2226 void InspectHeader(const RTPVideoHeaderVP9& vp9_header) override {
2227 EXPECT_TRUE(vp9_header.flexible_mode);
2228 EXPECT_EQ(kNoTl0PicIdx, vp9_header.tl0_pic_idx);
2229 if (vp9_header.inter_pic_predicted) {
2230 EXPECT_GT(vp9_header.num_ref_pics, 0u);
2231 observation_complete_.Set();
2232 }
2233 }
2234 } test;
2235
2236 RunBaseTest(&test);
2237 }
2238 #endif
2239
2240 } // namespace webrtc
2241