1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "modules/rtp_rtcp/source/rtp_sender_video.h"
12
13 #include <stdlib.h>
14 #include <string.h>
15
16 #include <algorithm>
17 #include <limits>
18 #include <memory>
19 #include <string>
20 #include <utility>
21
22 #include "absl/algorithm/container.h"
23 #include "absl/memory/memory.h"
24 #include "absl/strings/match.h"
25 #include "api/crypto/frame_encryptor_interface.h"
26 #include "api/transport/rtp/dependency_descriptor.h"
27 #include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
28 #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
29 #include "modules/rtp_rtcp/source/absolute_capture_time_sender.h"
30 #include "modules/rtp_rtcp/source/byte_io.h"
31 #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
32 #include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
33 #include "modules/rtp_rtcp/source/rtp_format.h"
34 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
35 #include "modules/rtp_rtcp/source/rtp_header_extensions.h"
36 #include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
37 #include "modules/rtp_rtcp/source/time_util.h"
38 #include "rtc_base/checks.h"
39 #include "rtc_base/logging.h"
40 #include "rtc_base/trace_event.h"
41
42 namespace webrtc {
43
44 namespace {
45 constexpr size_t kRedForFecHeaderLength = 1;
46 constexpr int64_t kMaxUnretransmittableFrameIntervalMs = 33 * 4;
47
BuildRedPayload(const RtpPacketToSend & media_packet,RtpPacketToSend * red_packet)48 void BuildRedPayload(const RtpPacketToSend& media_packet,
49 RtpPacketToSend* red_packet) {
50 uint8_t* red_payload = red_packet->AllocatePayload(
51 kRedForFecHeaderLength + media_packet.payload_size());
52 RTC_DCHECK(red_payload);
53 red_payload[0] = media_packet.PayloadType();
54
55 auto media_payload = media_packet.payload();
56 memcpy(&red_payload[kRedForFecHeaderLength], media_payload.data(),
57 media_payload.size());
58 }
59
MinimizeDescriptor(RTPVideoHeader * video_header)60 bool MinimizeDescriptor(RTPVideoHeader* video_header) {
61 if (auto* vp8 =
62 absl::get_if<RTPVideoHeaderVP8>(&video_header->video_type_header)) {
63 // Set minimum fields the RtpPacketizer is using to create vp8 packets.
64 // nonReference is the only field that doesn't require extra space.
65 bool non_reference = vp8->nonReference;
66 vp8->InitRTPVideoHeaderVP8();
67 vp8->nonReference = non_reference;
68 return true;
69 }
70 // TODO(danilchap): Reduce vp9 codec specific descriptor too.
71 return false;
72 }
73
IsBaseLayer(const RTPVideoHeader & video_header)74 bool IsBaseLayer(const RTPVideoHeader& video_header) {
75 switch (video_header.codec) {
76 case kVideoCodecVP8: {
77 const auto& vp8 =
78 absl::get<RTPVideoHeaderVP8>(video_header.video_type_header);
79 return (vp8.temporalIdx == 0 || vp8.temporalIdx == kNoTemporalIdx);
80 }
81 case kVideoCodecVP9: {
82 const auto& vp9 =
83 absl::get<RTPVideoHeaderVP9>(video_header.video_type_header);
84 return (vp9.temporal_idx == 0 || vp9.temporal_idx == kNoTemporalIdx);
85 }
86 case kVideoCodecH264:
87 // TODO(kron): Implement logic for H264 once WebRTC supports temporal
88 // layers for H264.
89 break;
90 default:
91 break;
92 }
93 return true;
94 }
95
96 #if RTC_TRACE_EVENTS_ENABLED
FrameTypeToString(VideoFrameType frame_type)97 const char* FrameTypeToString(VideoFrameType frame_type) {
98 switch (frame_type) {
99 case VideoFrameType::kEmptyFrame:
100 return "empty";
101 case VideoFrameType::kVideoFrameKey:
102 return "video_key";
103 case VideoFrameType::kVideoFrameDelta:
104 return "video_delta";
105 default:
106 RTC_NOTREACHED();
107 return "";
108 }
109 }
110 #endif
111
IsNoopDelay(const PlayoutDelay & delay)112 bool IsNoopDelay(const PlayoutDelay& delay) {
113 return delay.min_ms == -1 && delay.max_ms == -1;
114 }
115
116 } // namespace
117
RTPSenderVideo(const Config & config)118 RTPSenderVideo::RTPSenderVideo(const Config& config)
119 : rtp_sender_(config.rtp_sender),
120 clock_(config.clock),
121 retransmission_settings_(
122 config.enable_retransmit_all_layers
123 ? kRetransmitAllLayers
124 : (kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers)),
125 last_rotation_(kVideoRotation_0),
126 transmit_color_space_next_frame_(false),
127 current_playout_delay_{-1, -1},
128 playout_delay_pending_(false),
129 red_payload_type_(config.red_payload_type),
130 fec_generator_(config.fec_generator),
131 fec_type_(config.fec_type),
132 fec_overhead_bytes_(config.fec_overhead_bytes),
133 video_bitrate_(1000, RateStatistics::kBpsScale),
134 packetization_overhead_bitrate_(1000, RateStatistics::kBpsScale),
135 frame_encryptor_(config.frame_encryptor),
136 require_frame_encryption_(config.require_frame_encryption),
137 generic_descriptor_auth_experiment_(!absl::StartsWith(
138 config.field_trials->Lookup("WebRTC-GenericDescriptorAuth"),
139 "Disabled")),
140 absolute_capture_time_sender_(config.clock),
141 frame_transformer_delegate_(
142 config.frame_transformer
143 ? new rtc::RefCountedObject<
144 RTPSenderVideoFrameTransformerDelegate>(
145 this,
146 config.frame_transformer,
147 rtp_sender_->SSRC(),
148 config.send_transport_queue)
149 : nullptr) {
150 if (frame_transformer_delegate_)
151 frame_transformer_delegate_->Init();
152 }
153
~RTPSenderVideo()154 RTPSenderVideo::~RTPSenderVideo() {
155 if (frame_transformer_delegate_)
156 frame_transformer_delegate_->Reset();
157 }
158
LogAndSendToNetwork(std::vector<std::unique_ptr<RtpPacketToSend>> packets,size_t unpacketized_payload_size)159 void RTPSenderVideo::LogAndSendToNetwork(
160 std::vector<std::unique_ptr<RtpPacketToSend>> packets,
161 size_t unpacketized_payload_size) {
162 int64_t now_ms = clock_->TimeInMilliseconds();
163 #if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
164 if (fec_generator_) {
165 uint32_t fec_rate_kbps = fec_generator_->CurrentFecRate().kbps();
166 for (const auto& packet : packets) {
167 if (packet->packet_type() ==
168 RtpPacketMediaType::kForwardErrorCorrection) {
169 const uint32_t ssrc = packet->Ssrc();
170 BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "VideoFecBitrate_kbps", now_ms,
171 fec_rate_kbps, ssrc);
172 }
173 }
174 }
175 #endif
176
177 {
178 MutexLock lock(&stats_mutex_);
179 size_t packetized_payload_size = 0;
180 for (const auto& packet : packets) {
181 if (*packet->packet_type() == RtpPacketMediaType::kVideo) {
182 video_bitrate_.Update(packet->size(), now_ms);
183 packetized_payload_size += packet->payload_size();
184 }
185 }
186 // AV1 and H264 packetizers may produce less packetized bytes than
187 // unpacketized.
188 if (packetized_payload_size >= unpacketized_payload_size) {
189 packetization_overhead_bitrate_.Update(
190 packetized_payload_size - unpacketized_payload_size,
191 clock_->TimeInMilliseconds());
192 }
193 }
194
195 rtp_sender_->EnqueuePackets(std::move(packets));
196 }
197
FecPacketOverhead() const198 size_t RTPSenderVideo::FecPacketOverhead() const {
199 size_t overhead = fec_overhead_bytes_;
200 if (red_enabled()) {
201 // The RED overhead is due to a small header.
202 overhead += kRedForFecHeaderLength;
203
204 if (fec_type_ == VideoFecGenerator::FecType::kUlpFec) {
205 // For ULPFEC, the overhead is the FEC headers plus RED for FEC header
206 // (see above) plus anything in RTP header beyond the 12 bytes base header
207 // (CSRC list, extensions...)
208 // This reason for the header extensions to be included here is that
209 // from an FEC viewpoint, they are part of the payload to be protected.
210 // (The base RTP header is already protected by the FEC header.)
211 overhead +=
212 rtp_sender_->FecOrPaddingPacketMaxRtpHeaderLength() - kRtpHeaderSize;
213 }
214 }
215 return overhead;
216 }
217
SetVideoStructure(const FrameDependencyStructure * video_structure)218 void RTPSenderVideo::SetVideoStructure(
219 const FrameDependencyStructure* video_structure) {
220 if (frame_transformer_delegate_) {
221 frame_transformer_delegate_->SetVideoStructureUnderLock(video_structure);
222 return;
223 }
224 // Lock is being held by SetVideoStructure() caller.
225 SetVideoStructureUnderLock(video_structure);
226 }
227
SetVideoStructureUnderLock(const FrameDependencyStructure * video_structure)228 void RTPSenderVideo::SetVideoStructureUnderLock(
229 const FrameDependencyStructure* video_structure) {
230 RTC_DCHECK_RUNS_SERIALIZED(&send_checker_);
231 if (video_structure == nullptr) {
232 video_structure_ = nullptr;
233 return;
234 }
235 // Simple sanity checks video structure is set up.
236 RTC_DCHECK_GT(video_structure->num_decode_targets, 0);
237 RTC_DCHECK_GT(video_structure->templates.size(), 0);
238
239 int structure_id = 0;
240 if (video_structure_) {
241 if (*video_structure_ == *video_structure) {
242 // Same structure (just a new key frame), no update required.
243 return;
244 }
245 // When setting different video structure make sure structure_id is updated
246 // so that templates from different structures do not collide.
247 static constexpr int kMaxTemplates = 64;
248 structure_id =
249 (video_structure_->structure_id + video_structure_->templates.size()) %
250 kMaxTemplates;
251 }
252
253 video_structure_ =
254 std::make_unique<FrameDependencyStructure>(*video_structure);
255 video_structure_->structure_id = structure_id;
256 }
257
AddRtpHeaderExtensions(const RTPVideoHeader & video_header,const absl::optional<AbsoluteCaptureTime> & absolute_capture_time,bool first_packet,bool last_packet,RtpPacketToSend * packet) const258 void RTPSenderVideo::AddRtpHeaderExtensions(
259 const RTPVideoHeader& video_header,
260 const absl::optional<AbsoluteCaptureTime>& absolute_capture_time,
261 bool first_packet,
262 bool last_packet,
263 RtpPacketToSend* packet) const {
264 // Send color space when changed or if the frame is a key frame. Keep
265 // sending color space information until the first base layer frame to
266 // guarantee that the information is retrieved by the receiver.
267 bool set_color_space =
268 video_header.color_space != last_color_space_ ||
269 video_header.frame_type == VideoFrameType::kVideoFrameKey ||
270 transmit_color_space_next_frame_;
271 // Color space requires two-byte header extensions if HDR metadata is
272 // included. Therefore, it's best to add this extension first so that the
273 // other extensions in the same packet are written as two-byte headers at
274 // once.
275 if (last_packet && set_color_space && video_header.color_space)
276 packet->SetExtension<ColorSpaceExtension>(video_header.color_space.value());
277
278 // According to
279 // http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
280 // ts_126114v120700p.pdf Section 7.4.5:
281 // The MTSI client shall add the payload bytes as defined in this clause
282 // onto the last RTP packet in each group of packets which make up a key
283 // frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
284 // (HEVC)). The MTSI client may also add the payload bytes onto the last RTP
285 // packet in each group of packets which make up another type of frame
286 // (e.g. a P-Frame) only if the current value is different from the previous
287 // value sent.
288 // Set rotation when key frame or when changed (to follow standard).
289 // Or when different from 0 (to follow current receiver implementation).
290 bool set_video_rotation =
291 video_header.frame_type == VideoFrameType::kVideoFrameKey ||
292 video_header.rotation != last_rotation_ ||
293 video_header.rotation != kVideoRotation_0;
294 if (last_packet && set_video_rotation)
295 packet->SetExtension<VideoOrientation>(video_header.rotation);
296
297 // Report content type only for key frames.
298 if (last_packet &&
299 video_header.frame_type == VideoFrameType::kVideoFrameKey &&
300 video_header.content_type != VideoContentType::UNSPECIFIED)
301 packet->SetExtension<VideoContentTypeExtension>(video_header.content_type);
302
303 if (last_packet &&
304 video_header.video_timing.flags != VideoSendTiming::kInvalid)
305 packet->SetExtension<VideoTimingExtension>(video_header.video_timing);
306
307 // If transmitted, add to all packets; ack logic depends on this.
308 if (playout_delay_pending_) {
309 packet->SetExtension<PlayoutDelayLimits>(current_playout_delay_);
310 }
311
312 if (first_packet && absolute_capture_time) {
313 packet->SetExtension<AbsoluteCaptureTimeExtension>(*absolute_capture_time);
314 }
315
316 if (video_header.generic) {
317 bool extension_is_set = false;
318 if (video_structure_ != nullptr) {
319 DependencyDescriptor descriptor;
320 descriptor.first_packet_in_frame = first_packet;
321 descriptor.last_packet_in_frame = last_packet;
322 descriptor.frame_number = video_header.generic->frame_id & 0xFFFF;
323 descriptor.frame_dependencies.spatial_id =
324 video_header.generic->spatial_index;
325 descriptor.frame_dependencies.temporal_id =
326 video_header.generic->temporal_index;
327 for (int64_t dep : video_header.generic->dependencies) {
328 descriptor.frame_dependencies.frame_diffs.push_back(
329 video_header.generic->frame_id - dep);
330 }
331 descriptor.frame_dependencies.chain_diffs =
332 video_header.generic->chain_diffs;
333 descriptor.frame_dependencies.decode_target_indications =
334 video_header.generic->decode_target_indications;
335 RTC_DCHECK_EQ(
336 descriptor.frame_dependencies.decode_target_indications.size(),
337 video_structure_->num_decode_targets);
338
339 if (first_packet) {
340 descriptor.active_decode_targets_bitmask =
341 active_decode_targets_tracker_.ActiveDecodeTargetsBitmask();
342 }
343 // To avoid extra structure copy, temporary share ownership of the
344 // video_structure with the dependency descriptor.
345 if (video_header.frame_type == VideoFrameType::kVideoFrameKey &&
346 first_packet) {
347 descriptor.attached_structure =
348 absl::WrapUnique(video_structure_.get());
349 }
350 extension_is_set = packet->SetExtension<RtpDependencyDescriptorExtension>(
351 *video_structure_,
352 active_decode_targets_tracker_.ActiveChainsBitmask(), descriptor);
353
354 // Remove the temporary shared ownership.
355 descriptor.attached_structure.release();
356 }
357
358 // Do not use generic frame descriptor when dependency descriptor is stored.
359 if (!extension_is_set) {
360 RtpGenericFrameDescriptor generic_descriptor;
361 generic_descriptor.SetFirstPacketInSubFrame(first_packet);
362 generic_descriptor.SetLastPacketInSubFrame(last_packet);
363
364 if (first_packet) {
365 generic_descriptor.SetFrameId(
366 static_cast<uint16_t>(video_header.generic->frame_id));
367 for (int64_t dep : video_header.generic->dependencies) {
368 generic_descriptor.AddFrameDependencyDiff(
369 video_header.generic->frame_id - dep);
370 }
371
372 uint8_t spatial_bimask = 1 << video_header.generic->spatial_index;
373 generic_descriptor.SetSpatialLayersBitmask(spatial_bimask);
374
375 generic_descriptor.SetTemporalLayer(
376 video_header.generic->temporal_index);
377
378 if (video_header.frame_type == VideoFrameType::kVideoFrameKey) {
379 generic_descriptor.SetResolution(video_header.width,
380 video_header.height);
381 }
382 }
383
384 packet->SetExtension<RtpGenericFrameDescriptorExtension00>(
385 generic_descriptor);
386 }
387 }
388 }
389
SendVideo(int payload_type,absl::optional<VideoCodecType> codec_type,uint32_t rtp_timestamp,int64_t capture_time_ms,rtc::ArrayView<const uint8_t> payload,RTPVideoHeader video_header,absl::optional<int64_t> expected_retransmission_time_ms)390 bool RTPSenderVideo::SendVideo(
391 int payload_type,
392 absl::optional<VideoCodecType> codec_type,
393 uint32_t rtp_timestamp,
394 int64_t capture_time_ms,
395 rtc::ArrayView<const uint8_t> payload,
396 RTPVideoHeader video_header,
397 absl::optional<int64_t> expected_retransmission_time_ms) {
398 #if RTC_TRACE_EVENTS_ENABLED
399 TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms, "Send", "type",
400 FrameTypeToString(video_header.frame_type));
401 #endif
402 RTC_CHECK_RUNS_SERIALIZED(&send_checker_);
403
404 if (video_header.frame_type == VideoFrameType::kEmptyFrame)
405 return true;
406
407 if (payload.empty())
408 return false;
409
410 int32_t retransmission_settings = retransmission_settings_;
411 if (codec_type == VideoCodecType::kVideoCodecH264) {
412 // Backward compatibility for older receivers without temporal layer logic.
413 retransmission_settings = kRetransmitBaseLayer | kRetransmitHigherLayers;
414 }
415
416 MaybeUpdateCurrentPlayoutDelay(video_header);
417 if (video_header.frame_type == VideoFrameType::kVideoFrameKey &&
418 !IsNoopDelay(current_playout_delay_)) {
419 // Force playout delay on key-frames, if set.
420 playout_delay_pending_ = true;
421 }
422
423 if (video_structure_ != nullptr && video_header.generic) {
424 active_decode_targets_tracker_.OnFrame(
425 video_structure_->decode_target_protected_by_chain,
426 video_header.generic->active_decode_targets,
427 video_header.frame_type == VideoFrameType::kVideoFrameKey,
428 video_header.generic->frame_id, video_header.generic->chain_diffs);
429 }
430
431 // Maximum size of packet including rtp headers.
432 // Extra space left in case packet will be resent using fec or rtx.
433 int packet_capacity = rtp_sender_->MaxRtpPacketSize() - FecPacketOverhead() -
434 (rtp_sender_->RtxStatus() ? kRtxHeaderSize : 0);
435
436 std::unique_ptr<RtpPacketToSend> single_packet =
437 rtp_sender_->AllocatePacket();
438 RTC_DCHECK_LE(packet_capacity, single_packet->capacity());
439 single_packet->SetPayloadType(payload_type);
440 single_packet->SetTimestamp(rtp_timestamp);
441 single_packet->set_capture_time_ms(capture_time_ms);
442
443 const absl::optional<AbsoluteCaptureTime> absolute_capture_time =
444 absolute_capture_time_sender_.OnSendPacket(
445 AbsoluteCaptureTimeSender::GetSource(single_packet->Ssrc(),
446 single_packet->Csrcs()),
447 single_packet->Timestamp(), kVideoPayloadTypeFrequency,
448 Int64MsToUQ32x32(single_packet->capture_time_ms() + NtpOffsetMs()),
449 /*estimated_capture_clock_offset=*/absl::nullopt);
450
451 auto first_packet = std::make_unique<RtpPacketToSend>(*single_packet);
452 auto middle_packet = std::make_unique<RtpPacketToSend>(*single_packet);
453 auto last_packet = std::make_unique<RtpPacketToSend>(*single_packet);
454 // Simplest way to estimate how much extensions would occupy is to set them.
455 AddRtpHeaderExtensions(video_header, absolute_capture_time,
456 /*first_packet=*/true, /*last_packet=*/true,
457 single_packet.get());
458 AddRtpHeaderExtensions(video_header, absolute_capture_time,
459 /*first_packet=*/true, /*last_packet=*/false,
460 first_packet.get());
461 AddRtpHeaderExtensions(video_header, absolute_capture_time,
462 /*first_packet=*/false, /*last_packet=*/false,
463 middle_packet.get());
464 AddRtpHeaderExtensions(video_header, absolute_capture_time,
465 /*first_packet=*/false, /*last_packet=*/true,
466 last_packet.get());
467
468 RTC_DCHECK_GT(packet_capacity, single_packet->headers_size());
469 RTC_DCHECK_GT(packet_capacity, first_packet->headers_size());
470 RTC_DCHECK_GT(packet_capacity, middle_packet->headers_size());
471 RTC_DCHECK_GT(packet_capacity, last_packet->headers_size());
472 RtpPacketizer::PayloadSizeLimits limits;
473 limits.max_payload_len = packet_capacity - middle_packet->headers_size();
474
475 RTC_DCHECK_GE(single_packet->headers_size(), middle_packet->headers_size());
476 limits.single_packet_reduction_len =
477 single_packet->headers_size() - middle_packet->headers_size();
478
479 RTC_DCHECK_GE(first_packet->headers_size(), middle_packet->headers_size());
480 limits.first_packet_reduction_len =
481 first_packet->headers_size() - middle_packet->headers_size();
482
483 RTC_DCHECK_GE(last_packet->headers_size(), middle_packet->headers_size());
484 limits.last_packet_reduction_len =
485 last_packet->headers_size() - middle_packet->headers_size();
486
487 bool has_generic_descriptor =
488 first_packet->HasExtension<RtpGenericFrameDescriptorExtension00>() ||
489 first_packet->HasExtension<RtpDependencyDescriptorExtension>();
490
491 // Minimization of the vp8 descriptor may erase temporal_id, so save it.
492 const uint8_t temporal_id = GetTemporalId(video_header);
493 if (has_generic_descriptor) {
494 MinimizeDescriptor(&video_header);
495 }
496
497 // TODO(benwright@webrtc.org) - Allocate enough to always encrypt inline.
498 rtc::Buffer encrypted_video_payload;
499 if (frame_encryptor_ != nullptr) {
500 if (!has_generic_descriptor) {
501 return false;
502 }
503
504 const size_t max_ciphertext_size =
505 frame_encryptor_->GetMaxCiphertextByteSize(cricket::MEDIA_TYPE_VIDEO,
506 payload.size());
507 encrypted_video_payload.SetSize(max_ciphertext_size);
508
509 size_t bytes_written = 0;
510
511 // Enable header authentication if the field trial isn't disabled.
512 std::vector<uint8_t> additional_data;
513 if (generic_descriptor_auth_experiment_) {
514 additional_data = RtpDescriptorAuthentication(video_header);
515 }
516
517 if (frame_encryptor_->Encrypt(
518 cricket::MEDIA_TYPE_VIDEO, first_packet->Ssrc(), additional_data,
519 payload, encrypted_video_payload, &bytes_written) != 0) {
520 return false;
521 }
522
523 encrypted_video_payload.SetSize(bytes_written);
524 payload = encrypted_video_payload;
525 } else if (require_frame_encryption_) {
526 RTC_LOG(LS_WARNING)
527 << "No FrameEncryptor is attached to this video sending stream but "
528 "one is required since require_frame_encryptor is set";
529 }
530
531 std::unique_ptr<RtpPacketizer> packetizer =
532 RtpPacketizer::Create(codec_type, payload, limits, video_header);
533
534 // TODO(bugs.webrtc.org/10714): retransmission_settings_ should generally be
535 // replaced by expected_retransmission_time_ms.has_value(). For now, though,
536 // only VP8 with an injected frame buffer controller actually controls it.
537 const bool allow_retransmission =
538 expected_retransmission_time_ms.has_value()
539 ? AllowRetransmission(temporal_id, retransmission_settings,
540 expected_retransmission_time_ms.value())
541 : false;
542 const size_t num_packets = packetizer->NumPackets();
543
544 if (num_packets == 0)
545 return false;
546
547 bool first_frame = first_frame_sent_();
548 std::vector<std::unique_ptr<RtpPacketToSend>> rtp_packets;
549 for (size_t i = 0; i < num_packets; ++i) {
550 std::unique_ptr<RtpPacketToSend> packet;
551 int expected_payload_capacity;
552 // Choose right packet template:
553 if (num_packets == 1) {
554 packet = std::move(single_packet);
555 expected_payload_capacity =
556 limits.max_payload_len - limits.single_packet_reduction_len;
557 } else if (i == 0) {
558 packet = std::move(first_packet);
559 expected_payload_capacity =
560 limits.max_payload_len - limits.first_packet_reduction_len;
561 } else if (i == num_packets - 1) {
562 packet = std::move(last_packet);
563 expected_payload_capacity =
564 limits.max_payload_len - limits.last_packet_reduction_len;
565 } else {
566 packet = std::make_unique<RtpPacketToSend>(*middle_packet);
567 expected_payload_capacity = limits.max_payload_len;
568 }
569
570 packet->set_first_packet_of_frame(i == 0);
571
572 if (!packetizer->NextPacket(packet.get()))
573 return false;
574 RTC_DCHECK_LE(packet->payload_size(), expected_payload_capacity);
575 if (!rtp_sender_->AssignSequenceNumber(packet.get()))
576 return false;
577
578 packet->set_allow_retransmission(allow_retransmission);
579
580 // Put packetization finish timestamp into extension.
581 if (packet->HasExtension<VideoTimingExtension>()) {
582 packet->set_packetization_finish_time_ms(clock_->TimeInMilliseconds());
583 }
584
585 // No FEC protection for upper temporal layers, if used.
586 if (fec_type_.has_value() &&
587 (temporal_id == 0 || temporal_id == kNoTemporalIdx)) {
588 if (fec_generator_) {
589 fec_generator_->AddPacketAndGenerateFec(*packet);
590 } else {
591 // Deferred FEC generation, just mark packet.
592 packet->set_fec_protect_packet(true);
593 }
594 }
595
596 if (red_enabled()) {
597 std::unique_ptr<RtpPacketToSend> red_packet(new RtpPacketToSend(*packet));
598 BuildRedPayload(*packet, red_packet.get());
599 red_packet->SetPayloadType(*red_payload_type_);
600 red_packet->set_is_red(true);
601
602 // Send |red_packet| instead of |packet| for allocated sequence number.
603 red_packet->set_packet_type(RtpPacketMediaType::kVideo);
604 red_packet->set_allow_retransmission(packet->allow_retransmission());
605 rtp_packets.emplace_back(std::move(red_packet));
606 } else {
607 packet->set_packet_type(RtpPacketMediaType::kVideo);
608 rtp_packets.emplace_back(std::move(packet));
609 }
610
611 if (first_frame) {
612 if (i == 0) {
613 RTC_LOG(LS_INFO)
614 << "Sent first RTP packet of the first video frame (pre-pacer)";
615 }
616 if (i == num_packets - 1) {
617 RTC_LOG(LS_INFO)
618 << "Sent last RTP packet of the first video frame (pre-pacer)";
619 }
620 }
621 }
622
623 if (fec_generator_) {
624 // Fetch any FEC packets generated from the media frame and add them to
625 // the list of packets to send.
626 auto fec_packets = fec_generator_->GetFecPackets();
627 const bool generate_sequence_numbers = !fec_generator_->FecSsrc();
628 for (auto& fec_packet : fec_packets) {
629 if (generate_sequence_numbers) {
630 rtp_sender_->AssignSequenceNumber(fec_packet.get());
631 }
632 rtp_packets.emplace_back(std::move(fec_packet));
633 }
634 }
635
636 LogAndSendToNetwork(std::move(rtp_packets), payload.size());
637
638 // Update details about the last sent frame.
639 last_rotation_ = video_header.rotation;
640
641 if (video_header.color_space != last_color_space_) {
642 last_color_space_ = video_header.color_space;
643 transmit_color_space_next_frame_ = !IsBaseLayer(video_header);
644 } else {
645 transmit_color_space_next_frame_ =
646 transmit_color_space_next_frame_ ? !IsBaseLayer(video_header) : false;
647 }
648
649 if (video_header.frame_type == VideoFrameType::kVideoFrameKey ||
650 (IsBaseLayer(video_header) &&
651 !(video_header.generic.has_value()
652 ? absl::c_linear_search(
653 video_header.generic->decode_target_indications,
654 DecodeTargetIndication::kDiscardable)
655 : false))) {
656 // This frame has guaranteed delivery, no need to populate playout
657 // delay extensions until it changes again.
658 playout_delay_pending_ = false;
659 }
660
661 TRACE_EVENT_ASYNC_END1("webrtc", "Video", capture_time_ms, "timestamp",
662 rtp_timestamp);
663 return true;
664 }
665
SendEncodedImage(int payload_type,absl::optional<VideoCodecType> codec_type,uint32_t rtp_timestamp,const EncodedImage & encoded_image,RTPVideoHeader video_header,absl::optional<int64_t> expected_retransmission_time_ms)666 bool RTPSenderVideo::SendEncodedImage(
667 int payload_type,
668 absl::optional<VideoCodecType> codec_type,
669 uint32_t rtp_timestamp,
670 const EncodedImage& encoded_image,
671 RTPVideoHeader video_header,
672 absl::optional<int64_t> expected_retransmission_time_ms) {
673 if (frame_transformer_delegate_) {
674 // The frame will be sent async once transformed.
675 return frame_transformer_delegate_->TransformFrame(
676 payload_type, codec_type, rtp_timestamp, encoded_image, video_header,
677 expected_retransmission_time_ms);
678 }
679 return SendVideo(payload_type, codec_type, rtp_timestamp,
680 encoded_image.capture_time_ms_, encoded_image, video_header,
681 expected_retransmission_time_ms);
682 }
683
VideoBitrateSent() const684 uint32_t RTPSenderVideo::VideoBitrateSent() const {
685 MutexLock lock(&stats_mutex_);
686 return video_bitrate_.Rate(clock_->TimeInMilliseconds()).value_or(0);
687 }
688
PacketizationOverheadBps() const689 uint32_t RTPSenderVideo::PacketizationOverheadBps() const {
690 MutexLock lock(&stats_mutex_);
691 return packetization_overhead_bitrate_.Rate(clock_->TimeInMilliseconds())
692 .value_or(0);
693 }
694
AllowRetransmission(uint8_t temporal_id,int32_t retransmission_settings,int64_t expected_retransmission_time_ms)695 bool RTPSenderVideo::AllowRetransmission(
696 uint8_t temporal_id,
697 int32_t retransmission_settings,
698 int64_t expected_retransmission_time_ms) {
699 if (retransmission_settings == kRetransmitOff)
700 return false;
701
702 MutexLock lock(&stats_mutex_);
703 // Media packet storage.
704 if ((retransmission_settings & kConditionallyRetransmitHigherLayers) &&
705 UpdateConditionalRetransmit(temporal_id,
706 expected_retransmission_time_ms)) {
707 retransmission_settings |= kRetransmitHigherLayers;
708 }
709
710 if (temporal_id == kNoTemporalIdx)
711 return true;
712
713 if ((retransmission_settings & kRetransmitBaseLayer) && temporal_id == 0)
714 return true;
715
716 if ((retransmission_settings & kRetransmitHigherLayers) && temporal_id > 0)
717 return true;
718
719 return false;
720 }
721
GetTemporalId(const RTPVideoHeader & header)722 uint8_t RTPSenderVideo::GetTemporalId(const RTPVideoHeader& header) {
723 struct TemporalIdGetter {
724 uint8_t operator()(const RTPVideoHeaderVP8& vp8) { return vp8.temporalIdx; }
725 uint8_t operator()(const RTPVideoHeaderVP9& vp9) {
726 return vp9.temporal_idx;
727 }
728 uint8_t operator()(const RTPVideoHeaderH264&) { return kNoTemporalIdx; }
729 uint8_t operator()(const RTPVideoHeaderLegacyGeneric&) {
730 return kNoTemporalIdx;
731 }
732 uint8_t operator()(const absl::monostate&) { return kNoTemporalIdx; }
733 };
734 return absl::visit(TemporalIdGetter(), header.video_type_header);
735 }
736
UpdateConditionalRetransmit(uint8_t temporal_id,int64_t expected_retransmission_time_ms)737 bool RTPSenderVideo::UpdateConditionalRetransmit(
738 uint8_t temporal_id,
739 int64_t expected_retransmission_time_ms) {
740 int64_t now_ms = clock_->TimeInMilliseconds();
741 // Update stats for any temporal layer.
742 TemporalLayerStats* current_layer_stats =
743 &frame_stats_by_temporal_layer_[temporal_id];
744 current_layer_stats->frame_rate_fp1000s.Update(1, now_ms);
745 int64_t tl_frame_interval = now_ms - current_layer_stats->last_frame_time_ms;
746 current_layer_stats->last_frame_time_ms = now_ms;
747
748 // Conditional retransmit only applies to upper layers.
749 if (temporal_id != kNoTemporalIdx && temporal_id > 0) {
750 if (tl_frame_interval >= kMaxUnretransmittableFrameIntervalMs) {
751 // Too long since a retransmittable frame in this layer, enable NACK
752 // protection.
753 return true;
754 } else {
755 // Estimate when the next frame of any lower layer will be sent.
756 const int64_t kUndefined = std::numeric_limits<int64_t>::max();
757 int64_t expected_next_frame_time = kUndefined;
758 for (int i = temporal_id - 1; i >= 0; --i) {
759 TemporalLayerStats* stats = &frame_stats_by_temporal_layer_[i];
760 absl::optional<uint32_t> rate = stats->frame_rate_fp1000s.Rate(now_ms);
761 if (rate) {
762 int64_t tl_next = stats->last_frame_time_ms + 1000000 / *rate;
763 if (tl_next - now_ms > -expected_retransmission_time_ms &&
764 tl_next < expected_next_frame_time) {
765 expected_next_frame_time = tl_next;
766 }
767 }
768 }
769
770 if (expected_next_frame_time == kUndefined ||
771 expected_next_frame_time - now_ms > expected_retransmission_time_ms) {
772 // The next frame in a lower layer is expected at a later time (or
773 // unable to tell due to lack of data) than a retransmission is
774 // estimated to be able to arrive, so allow this packet to be nacked.
775 return true;
776 }
777 }
778 }
779
780 return false;
781 }
782
MaybeUpdateCurrentPlayoutDelay(const RTPVideoHeader & header)783 void RTPSenderVideo::MaybeUpdateCurrentPlayoutDelay(
784 const RTPVideoHeader& header) {
785 if (IsNoopDelay(header.playout_delay)) {
786 return;
787 }
788
789 PlayoutDelay requested_delay = header.playout_delay;
790
791 if (requested_delay.min_ms > PlayoutDelayLimits::kMaxMs ||
792 requested_delay.max_ms > PlayoutDelayLimits::kMaxMs) {
793 RTC_DLOG(LS_ERROR)
794 << "Requested playout delay values out of range, ignored";
795 return;
796 }
797 if (requested_delay.max_ms != -1 &&
798 requested_delay.min_ms > requested_delay.max_ms) {
799 RTC_DLOG(LS_ERROR) << "Requested playout delay values out of order";
800 return;
801 }
802
803 if (!playout_delay_pending_) {
804 current_playout_delay_ = requested_delay;
805 playout_delay_pending_ = true;
806 return;
807 }
808
809 if ((requested_delay.min_ms == -1 ||
810 requested_delay.min_ms == current_playout_delay_.min_ms) &&
811 (requested_delay.max_ms == -1 ||
812 requested_delay.max_ms == current_playout_delay_.max_ms)) {
813 // No change, ignore.
814 return;
815 }
816
817 if (requested_delay.min_ms == -1) {
818 RTC_DCHECK_GE(requested_delay.max_ms, 0);
819 requested_delay.min_ms =
820 std::min(current_playout_delay_.min_ms, requested_delay.max_ms);
821 }
822 if (requested_delay.max_ms == -1) {
823 requested_delay.max_ms =
824 std::max(current_playout_delay_.max_ms, requested_delay.min_ms);
825 }
826
827 current_playout_delay_ = requested_delay;
828 playout_delay_pending_ = true;
829 }
830
831 } // namespace webrtc
832