1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "video/rtp_video_stream_receiver.h"
12
13 #include <algorithm>
14 #include <limits>
15 #include <memory>
16 #include <utility>
17 #include <vector>
18
19 #include "absl/algorithm/container.h"
20 #include "absl/base/macros.h"
21 #include "absl/memory/memory.h"
22 #include "absl/types/optional.h"
23 #include "media/base/media_constants.h"
24 #include "modules/pacing/packet_router.h"
25 #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
26 #include "modules/rtp_rtcp/include/receive_statistics.h"
27 #include "modules/rtp_rtcp/include/rtp_cvo.h"
28 #include "modules/rtp_rtcp/include/ulpfec_receiver.h"
29 #include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
30 #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
31 #include "modules/rtp_rtcp/source/rtp_format.h"
32 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
33 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
34 #include "modules/rtp_rtcp/source/rtp_header_extensions.h"
35 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
36 #include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
37 #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h"
38 #include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
39 #include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h"
40 #include "modules/utility/include/process_thread.h"
41 #include "modules/video_coding/deprecated/nack_module.h"
42 #include "modules/video_coding/frame_object.h"
43 #include "modules/video_coding/h264_sprop_parameter_sets.h"
44 #include "modules/video_coding/h264_sps_pps_tracker.h"
45 #include "modules/video_coding/packet_buffer.h"
46 #include "rtc_base/checks.h"
47 #include "rtc_base/location.h"
48 #include "rtc_base/logging.h"
49 #include "rtc_base/strings/string_builder.h"
50 #include "system_wrappers/include/field_trial.h"
51 #include "system_wrappers/include/metrics.h"
52 #include "system_wrappers/include/ntp_time.h"
53 #include "video/receive_statistics_proxy.h"
54
55 namespace webrtc {
56
57 namespace {
58 // TODO(philipel): Change kPacketBufferStartSize back to 32 in M63 see:
59 // crbug.com/752886
60 constexpr int kPacketBufferStartSize = 512;
61 constexpr int kPacketBufferMaxSize = 2048;
62
PacketBufferMaxSize()63 int PacketBufferMaxSize() {
64 // The group here must be a positive power of 2, in which case that is used as
65 // size. All other values shall result in the default value being used.
66 const std::string group_name =
67 webrtc::field_trial::FindFullName("WebRTC-PacketBufferMaxSize");
68 int packet_buffer_max_size = kPacketBufferMaxSize;
69 if (!group_name.empty() &&
70 (sscanf(group_name.c_str(), "%d", &packet_buffer_max_size) != 1 ||
71 packet_buffer_max_size <= 0 ||
72 // Verify that the number is a positive power of 2.
73 (packet_buffer_max_size & (packet_buffer_max_size - 1)) != 0)) {
74 RTC_LOG(LS_WARNING) << "Invalid packet buffer max size: " << group_name;
75 packet_buffer_max_size = kPacketBufferMaxSize;
76 }
77 return packet_buffer_max_size;
78 }
79
CreateRtpRtcpModule(Clock * clock,ReceiveStatistics * receive_statistics,Transport * outgoing_transport,RtcpRttStats * rtt_stats,RtcpPacketTypeCounterObserver * rtcp_packet_type_counter_observer,RtcpCnameCallback * rtcp_cname_callback,uint32_t local_ssrc)80 std::unique_ptr<RtpRtcp> CreateRtpRtcpModule(
81 Clock* clock,
82 ReceiveStatistics* receive_statistics,
83 Transport* outgoing_transport,
84 RtcpRttStats* rtt_stats,
85 RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
86 RtcpCnameCallback* rtcp_cname_callback,
87 uint32_t local_ssrc) {
88 RtpRtcpInterface::Configuration configuration;
89 configuration.clock = clock;
90 configuration.audio = false;
91 configuration.receiver_only = true;
92 configuration.receive_statistics = receive_statistics;
93 configuration.outgoing_transport = outgoing_transport;
94 configuration.rtt_stats = rtt_stats;
95 configuration.rtcp_packet_type_counter_observer =
96 rtcp_packet_type_counter_observer;
97 configuration.rtcp_cname_callback = rtcp_cname_callback;
98 configuration.local_media_ssrc = local_ssrc;
99
100 std::unique_ptr<RtpRtcp> rtp_rtcp = RtpRtcp::DEPRECATED_Create(configuration);
101 rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
102
103 return rtp_rtcp;
104 }
105
106 static const int kPacketLogIntervalMs = 10000;
107
108 } // namespace
109
RtcpFeedbackBuffer(KeyFrameRequestSender * key_frame_request_sender,NackSender * nack_sender,LossNotificationSender * loss_notification_sender)110 RtpVideoStreamReceiver::RtcpFeedbackBuffer::RtcpFeedbackBuffer(
111 KeyFrameRequestSender* key_frame_request_sender,
112 NackSender* nack_sender,
113 LossNotificationSender* loss_notification_sender)
114 : key_frame_request_sender_(key_frame_request_sender),
115 nack_sender_(nack_sender),
116 loss_notification_sender_(loss_notification_sender),
117 request_key_frame_(false) {
118 RTC_DCHECK(key_frame_request_sender_);
119 RTC_DCHECK(nack_sender_);
120 RTC_DCHECK(loss_notification_sender_);
121 }
122
RequestKeyFrame()123 void RtpVideoStreamReceiver::RtcpFeedbackBuffer::RequestKeyFrame() {
124 MutexLock lock(&mutex_);
125 request_key_frame_ = true;
126 }
127
SendNack(const std::vector<uint16_t> & sequence_numbers,bool buffering_allowed)128 void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendNack(
129 const std::vector<uint16_t>& sequence_numbers,
130 bool buffering_allowed) {
131 RTC_DCHECK(!sequence_numbers.empty());
132 MutexLock lock(&mutex_);
133 nack_sequence_numbers_.insert(nack_sequence_numbers_.end(),
134 sequence_numbers.cbegin(),
135 sequence_numbers.cend());
136 if (!buffering_allowed) {
137 // Note that while *buffering* is not allowed, *batching* is, meaning that
138 // previously buffered messages may be sent along with the current message.
139 SendRtcpFeedback(ConsumeRtcpFeedbackLocked());
140 }
141 }
142
SendLossNotification(uint16_t last_decoded_seq_num,uint16_t last_received_seq_num,bool decodability_flag,bool buffering_allowed)143 void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendLossNotification(
144 uint16_t last_decoded_seq_num,
145 uint16_t last_received_seq_num,
146 bool decodability_flag,
147 bool buffering_allowed) {
148 RTC_DCHECK(buffering_allowed);
149 MutexLock lock(&mutex_);
150 RTC_DCHECK(!lntf_state_)
151 << "SendLossNotification() called twice in a row with no call to "
152 "SendBufferedRtcpFeedback() in between.";
153 lntf_state_ = absl::make_optional<LossNotificationState>(
154 last_decoded_seq_num, last_received_seq_num, decodability_flag);
155 }
156
SendBufferedRtcpFeedback()157 void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendBufferedRtcpFeedback() {
158 SendRtcpFeedback(ConsumeRtcpFeedback());
159 }
160
161 RtpVideoStreamReceiver::RtcpFeedbackBuffer::ConsumedRtcpFeedback
ConsumeRtcpFeedback()162 RtpVideoStreamReceiver::RtcpFeedbackBuffer::ConsumeRtcpFeedback() {
163 MutexLock lock(&mutex_);
164 return ConsumeRtcpFeedbackLocked();
165 }
166
167 RtpVideoStreamReceiver::RtcpFeedbackBuffer::ConsumedRtcpFeedback
ConsumeRtcpFeedbackLocked()168 RtpVideoStreamReceiver::RtcpFeedbackBuffer::ConsumeRtcpFeedbackLocked() {
169 ConsumedRtcpFeedback feedback;
170 std::swap(feedback.request_key_frame, request_key_frame_);
171 std::swap(feedback.nack_sequence_numbers, nack_sequence_numbers_);
172 std::swap(feedback.lntf_state, lntf_state_);
173 return feedback;
174 }
175
SendRtcpFeedback(ConsumedRtcpFeedback feedback)176 void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendRtcpFeedback(
177 ConsumedRtcpFeedback feedback) {
178 if (feedback.lntf_state) {
179 // If either a NACK or a key frame request is sent, we should buffer
180 // the LNTF and wait for them (NACK or key frame request) to trigger
181 // the compound feedback message.
182 // Otherwise, the LNTF should be sent out immediately.
183 const bool buffering_allowed =
184 feedback.request_key_frame || !feedback.nack_sequence_numbers.empty();
185
186 loss_notification_sender_->SendLossNotification(
187 feedback.lntf_state->last_decoded_seq_num,
188 feedback.lntf_state->last_received_seq_num,
189 feedback.lntf_state->decodability_flag, buffering_allowed);
190 }
191
192 if (feedback.request_key_frame) {
193 key_frame_request_sender_->RequestKeyFrame();
194 } else if (!feedback.nack_sequence_numbers.empty()) {
195 nack_sender_->SendNack(feedback.nack_sequence_numbers, true);
196 }
197 }
198
199 // DEPRECATED
RtpVideoStreamReceiver(Clock * clock,Transport * transport,RtcpRttStats * rtt_stats,PacketRouter * packet_router,const VideoReceiveStream::Config * config,ReceiveStatistics * rtp_receive_statistics,ReceiveStatisticsProxy * receive_stats_proxy,ProcessThread * process_thread,NackSender * nack_sender,KeyFrameRequestSender * keyframe_request_sender,video_coding::OnCompleteFrameCallback * complete_frame_callback,rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)200 RtpVideoStreamReceiver::RtpVideoStreamReceiver(
201 Clock* clock,
202 Transport* transport,
203 RtcpRttStats* rtt_stats,
204 PacketRouter* packet_router,
205 const VideoReceiveStream::Config* config,
206 ReceiveStatistics* rtp_receive_statistics,
207 ReceiveStatisticsProxy* receive_stats_proxy,
208 ProcessThread* process_thread,
209 NackSender* nack_sender,
210 KeyFrameRequestSender* keyframe_request_sender,
211 video_coding::OnCompleteFrameCallback* complete_frame_callback,
212 rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
213 rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)
214 : RtpVideoStreamReceiver(clock,
215 transport,
216 rtt_stats,
217 packet_router,
218 config,
219 rtp_receive_statistics,
220 receive_stats_proxy,
221 receive_stats_proxy,
222 process_thread,
223 nack_sender,
224 keyframe_request_sender,
225 complete_frame_callback,
226 frame_decryptor,
227 frame_transformer) {}
228
RtpVideoStreamReceiver(Clock * clock,Transport * transport,RtcpRttStats * rtt_stats,PacketRouter * packet_router,const VideoReceiveStream::Config * config,ReceiveStatistics * rtp_receive_statistics,RtcpPacketTypeCounterObserver * rtcp_packet_type_counter_observer,RtcpCnameCallback * rtcp_cname_callback,ProcessThread * process_thread,NackSender * nack_sender,KeyFrameRequestSender * keyframe_request_sender,video_coding::OnCompleteFrameCallback * complete_frame_callback,rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)229 RtpVideoStreamReceiver::RtpVideoStreamReceiver(
230 Clock* clock,
231 Transport* transport,
232 RtcpRttStats* rtt_stats,
233 PacketRouter* packet_router,
234 const VideoReceiveStream::Config* config,
235 ReceiveStatistics* rtp_receive_statistics,
236 RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
237 RtcpCnameCallback* rtcp_cname_callback,
238 ProcessThread* process_thread,
239 NackSender* nack_sender,
240 KeyFrameRequestSender* keyframe_request_sender,
241 video_coding::OnCompleteFrameCallback* complete_frame_callback,
242 rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
243 rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)
244 : clock_(clock),
245 config_(*config),
246 packet_router_(packet_router),
247 process_thread_(process_thread),
248 ntp_estimator_(clock),
249 rtp_header_extensions_(config_.rtp.extensions),
250 forced_playout_delay_max_ms_("max_ms", absl::nullopt),
251 forced_playout_delay_min_ms_("min_ms", absl::nullopt),
252 rtp_receive_statistics_(rtp_receive_statistics),
253 ulpfec_receiver_(UlpfecReceiver::Create(config->rtp.remote_ssrc,
254 this,
255 config->rtp.extensions)),
256 receiving_(false),
257 last_packet_log_ms_(-1),
258 rtp_rtcp_(CreateRtpRtcpModule(clock,
259 rtp_receive_statistics_,
260 transport,
261 rtt_stats,
262 rtcp_packet_type_counter_observer,
263 rtcp_cname_callback,
264 config_.rtp.local_ssrc)),
265 complete_frame_callback_(complete_frame_callback),
266 keyframe_request_sender_(keyframe_request_sender),
267 // TODO(bugs.webrtc.org/10336): Let |rtcp_feedback_buffer_| communicate
268 // directly with |rtp_rtcp_|.
269 rtcp_feedback_buffer_(this, nack_sender, this),
270 packet_buffer_(clock_, kPacketBufferStartSize, PacketBufferMaxSize()),
271 has_received_frame_(false),
272 frames_decryptable_(false),
273 absolute_capture_time_receiver_(clock) {
274 constexpr bool remb_candidate = true;
275 if (packet_router_)
276 packet_router_->AddReceiveRtpModule(rtp_rtcp_.get(), remb_candidate);
277
278 RTC_DCHECK(config_.rtp.rtcp_mode != RtcpMode::kOff)
279 << "A stream should not be configured with RTCP disabled. This value is "
280 "reserved for internal usage.";
281 // TODO(pbos): What's an appropriate local_ssrc for receive-only streams?
282 RTC_DCHECK(config_.rtp.local_ssrc != 0);
283 RTC_DCHECK(config_.rtp.remote_ssrc != config_.rtp.local_ssrc);
284
285 rtp_rtcp_->SetRTCPStatus(config_.rtp.rtcp_mode);
286 rtp_rtcp_->SetRemoteSSRC(config_.rtp.remote_ssrc);
287
288 static const int kMaxPacketAgeToNack = 450;
289 const int max_reordering_threshold = (config_.rtp.nack.rtp_history_ms > 0)
290 ? kMaxPacketAgeToNack
291 : kDefaultMaxReorderingThreshold;
292 rtp_receive_statistics_->SetMaxReorderingThreshold(config_.rtp.remote_ssrc,
293 max_reordering_threshold);
294 // TODO(nisse): For historic reasons, we applied the above
295 // max_reordering_threshold also for RTX stats, which makes little sense since
296 // we don't NACK rtx packets. Consider deleting the below block, and rely on
297 // the default threshold.
298 if (config_.rtp.rtx_ssrc) {
299 rtp_receive_statistics_->SetMaxReorderingThreshold(
300 config_.rtp.rtx_ssrc, max_reordering_threshold);
301 }
302 if (config_.rtp.rtcp_xr.receiver_reference_time_report)
303 rtp_rtcp_->SetRtcpXrRrtrStatus(true);
304
305 ParseFieldTrial(
306 {&forced_playout_delay_max_ms_, &forced_playout_delay_min_ms_},
307 field_trial::FindFullName("WebRTC-ForcePlayoutDelay"));
308
309 process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE);
310
311 if (config_.rtp.lntf.enabled) {
312 loss_notification_controller_ =
313 std::make_unique<LossNotificationController>(&rtcp_feedback_buffer_,
314 &rtcp_feedback_buffer_);
315 }
316
317 if (config_.rtp.nack.rtp_history_ms != 0) {
318 nack_module_ = std::make_unique<DEPRECATED_NackModule>(
319 clock_, &rtcp_feedback_buffer_, &rtcp_feedback_buffer_);
320 process_thread_->RegisterModule(nack_module_.get(), RTC_FROM_HERE);
321 }
322
323 reference_finder_ =
324 std::make_unique<video_coding::RtpFrameReferenceFinder>(this);
325
326 // Only construct the encrypted receiver if frame encryption is enabled.
327 if (config_.crypto_options.sframe.require_frame_encryption) {
328 buffered_frame_decryptor_ =
329 std::make_unique<BufferedFrameDecryptor>(this, this);
330 if (frame_decryptor != nullptr) {
331 buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
332 }
333 }
334
335 if (frame_transformer) {
336 frame_transformer_delegate_ = new rtc::RefCountedObject<
337 RtpVideoStreamReceiverFrameTransformerDelegate>(
338 this, std::move(frame_transformer), rtc::Thread::Current(),
339 config_.rtp.remote_ssrc);
340 frame_transformer_delegate_->Init();
341 }
342 }
343
~RtpVideoStreamReceiver()344 RtpVideoStreamReceiver::~RtpVideoStreamReceiver() {
345 RTC_DCHECK(secondary_sinks_.empty());
346
347 if (nack_module_) {
348 process_thread_->DeRegisterModule(nack_module_.get());
349 }
350
351 process_thread_->DeRegisterModule(rtp_rtcp_.get());
352
353 if (packet_router_)
354 packet_router_->RemoveReceiveRtpModule(rtp_rtcp_.get());
355 UpdateHistograms();
356 if (frame_transformer_delegate_)
357 frame_transformer_delegate_->Reset();
358 }
359
AddReceiveCodec(const VideoCodec & video_codec,const std::map<std::string,std::string> & codec_params,bool raw_payload)360 void RtpVideoStreamReceiver::AddReceiveCodec(
361 const VideoCodec& video_codec,
362 const std::map<std::string, std::string>& codec_params,
363 bool raw_payload) {
364 payload_type_map_.emplace(
365 video_codec.plType,
366 raw_payload ? std::make_unique<VideoRtpDepacketizerRaw>()
367 : CreateVideoRtpDepacketizer(video_codec.codecType));
368 pt_codec_params_.emplace(video_codec.plType, codec_params);
369 }
370
GetSyncInfo() const371 absl::optional<Syncable::Info> RtpVideoStreamReceiver::GetSyncInfo() const {
372 Syncable::Info info;
373 if (rtp_rtcp_->RemoteNTP(&info.capture_time_ntp_secs,
374 &info.capture_time_ntp_frac, nullptr, nullptr,
375 &info.capture_time_source_clock) != 0) {
376 return absl::nullopt;
377 }
378 {
379 MutexLock lock(&sync_info_lock_);
380 if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) {
381 return absl::nullopt;
382 }
383 info.latest_received_capture_timestamp = *last_received_rtp_timestamp_;
384 info.latest_receive_time_ms = *last_received_rtp_system_time_ms_;
385 }
386
387 // Leaves info.current_delay_ms uninitialized.
388 return info;
389 }
390
391 RtpVideoStreamReceiver::ParseGenericDependenciesResult
ParseGenericDependenciesExtension(const RtpPacketReceived & rtp_packet,RTPVideoHeader * video_header)392 RtpVideoStreamReceiver::ParseGenericDependenciesExtension(
393 const RtpPacketReceived& rtp_packet,
394 RTPVideoHeader* video_header) {
395 if (rtp_packet.HasExtension<RtpDependencyDescriptorExtension>()) {
396 webrtc::DependencyDescriptor dependency_descriptor;
397 if (!rtp_packet.GetExtension<RtpDependencyDescriptorExtension>(
398 video_structure_.get(), &dependency_descriptor)) {
399 // Descriptor is there, but failed to parse. Either it is invalid,
400 // or too old packet (after relevant video_structure_ changed),
401 // or too new packet (before relevant video_structure_ arrived).
402 // Drop such packet to be on the safe side.
403 // TODO(bugs.webrtc.org/10342): Stash too new packet.
404 RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
405 << " Failed to parse dependency descriptor.";
406 return kDropPacket;
407 }
408 if (dependency_descriptor.attached_structure != nullptr &&
409 !dependency_descriptor.first_packet_in_frame) {
410 RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
411 << "Invalid dependency descriptor: structure "
412 "attached to non first packet of a frame.";
413 return kDropPacket;
414 }
415 video_header->is_first_packet_in_frame =
416 dependency_descriptor.first_packet_in_frame;
417 video_header->is_last_packet_in_frame =
418 dependency_descriptor.last_packet_in_frame;
419
420 int64_t frame_id =
421 frame_id_unwrapper_.Unwrap(dependency_descriptor.frame_number);
422 auto& generic_descriptor_info = video_header->generic.emplace();
423 generic_descriptor_info.frame_id = frame_id;
424 generic_descriptor_info.spatial_index =
425 dependency_descriptor.frame_dependencies.spatial_id;
426 generic_descriptor_info.temporal_index =
427 dependency_descriptor.frame_dependencies.temporal_id;
428 for (int fdiff : dependency_descriptor.frame_dependencies.frame_diffs) {
429 generic_descriptor_info.dependencies.push_back(frame_id - fdiff);
430 }
431 generic_descriptor_info.decode_target_indications =
432 dependency_descriptor.frame_dependencies.decode_target_indications;
433 if (dependency_descriptor.resolution) {
434 video_header->width = dependency_descriptor.resolution->Width();
435 video_header->height = dependency_descriptor.resolution->Height();
436 }
437
438 // FrameDependencyStructure is sent in dependency descriptor of the first
439 // packet of a key frame and required for parsed dependency descriptor in
440 // all the following packets until next key frame.
441 // Save it if there is a (potentially) new structure.
442 if (dependency_descriptor.attached_structure) {
443 RTC_DCHECK(dependency_descriptor.first_packet_in_frame);
444 if (video_structure_frame_id_ > frame_id) {
445 RTC_LOG(LS_WARNING)
446 << "Arrived key frame with id " << frame_id << " and structure id "
447 << dependency_descriptor.attached_structure->structure_id
448 << " is older than the latest received key frame with id "
449 << *video_structure_frame_id_ << " and structure id "
450 << video_structure_->structure_id;
451 return kDropPacket;
452 }
453 video_structure_ = std::move(dependency_descriptor.attached_structure);
454 video_structure_frame_id_ = frame_id;
455 video_header->frame_type = VideoFrameType::kVideoFrameKey;
456 } else {
457 video_header->frame_type = VideoFrameType::kVideoFrameDelta;
458 }
459 return kHasGenericDescriptor;
460 }
461
462 RtpGenericFrameDescriptor generic_frame_descriptor;
463 if (!rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
464 &generic_frame_descriptor)) {
465 return kNoGenericDescriptor;
466 }
467
468 video_header->is_first_packet_in_frame =
469 generic_frame_descriptor.FirstPacketInSubFrame();
470 video_header->is_last_packet_in_frame =
471 generic_frame_descriptor.LastPacketInSubFrame();
472
473 if (generic_frame_descriptor.FirstPacketInSubFrame()) {
474 video_header->frame_type =
475 generic_frame_descriptor.FrameDependenciesDiffs().empty()
476 ? VideoFrameType::kVideoFrameKey
477 : VideoFrameType::kVideoFrameDelta;
478
479 auto& generic_descriptor_info = video_header->generic.emplace();
480 int64_t frame_id =
481 frame_id_unwrapper_.Unwrap(generic_frame_descriptor.FrameId());
482 generic_descriptor_info.frame_id = frame_id;
483 generic_descriptor_info.spatial_index =
484 generic_frame_descriptor.SpatialLayer();
485 generic_descriptor_info.temporal_index =
486 generic_frame_descriptor.TemporalLayer();
487 for (uint16_t fdiff : generic_frame_descriptor.FrameDependenciesDiffs()) {
488 generic_descriptor_info.dependencies.push_back(frame_id - fdiff);
489 }
490 }
491 video_header->width = generic_frame_descriptor.Width();
492 video_header->height = generic_frame_descriptor.Height();
493 return kHasGenericDescriptor;
494 }
495
OnReceivedPayloadData(rtc::CopyOnWriteBuffer codec_payload,const RtpPacketReceived & rtp_packet,const RTPVideoHeader & video)496 void RtpVideoStreamReceiver::OnReceivedPayloadData(
497 rtc::CopyOnWriteBuffer codec_payload,
498 const RtpPacketReceived& rtp_packet,
499 const RTPVideoHeader& video) {
500 RTC_DCHECK_RUN_ON(&worker_task_checker_);
501 auto packet = std::make_unique<video_coding::PacketBuffer::Packet>(
502 rtp_packet, video, ntp_estimator_.Estimate(rtp_packet.Timestamp()),
503 clock_->TimeInMilliseconds());
504
505 // Try to extrapolate absolute capture time if it is missing.
506 packet->packet_info.set_absolute_capture_time(
507 absolute_capture_time_receiver_.OnReceivePacket(
508 AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(),
509 packet->packet_info.csrcs()),
510 packet->packet_info.rtp_timestamp(),
511 // Assume frequency is the same one for all video frames.
512 kVideoPayloadTypeFrequency,
513 packet->packet_info.absolute_capture_time()));
514
515 RTPVideoHeader& video_header = packet->video_header;
516 video_header.rotation = kVideoRotation_0;
517 video_header.content_type = VideoContentType::UNSPECIFIED;
518 video_header.video_timing.flags = VideoSendTiming::kInvalid;
519 video_header.is_last_packet_in_frame |= rtp_packet.Marker();
520
521 if (const auto* vp9_header =
522 absl::get_if<RTPVideoHeaderVP9>(&video_header.video_type_header)) {
523 video_header.is_last_packet_in_frame |= vp9_header->end_of_frame;
524 video_header.is_first_packet_in_frame |= vp9_header->beginning_of_frame;
525 }
526
527 rtp_packet.GetExtension<VideoOrientation>(&video_header.rotation);
528 rtp_packet.GetExtension<VideoContentTypeExtension>(
529 &video_header.content_type);
530 rtp_packet.GetExtension<VideoTimingExtension>(&video_header.video_timing);
531 if (forced_playout_delay_max_ms_ && forced_playout_delay_min_ms_) {
532 video_header.playout_delay.max_ms = *forced_playout_delay_max_ms_;
533 video_header.playout_delay.min_ms = *forced_playout_delay_min_ms_;
534 } else {
535 rtp_packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
536 }
537
538 ParseGenericDependenciesResult generic_descriptor_state =
539 ParseGenericDependenciesExtension(rtp_packet, &video_header);
540 if (generic_descriptor_state == kDropPacket)
541 return;
542
543 // Color space should only be transmitted in the last packet of a frame,
544 // therefore, neglect it otherwise so that last_color_space_ is not reset by
545 // mistake.
546 if (video_header.is_last_packet_in_frame) {
547 video_header.color_space = rtp_packet.GetExtension<ColorSpaceExtension>();
548 if (video_header.color_space ||
549 video_header.frame_type == VideoFrameType::kVideoFrameKey) {
550 // Store color space since it's only transmitted when changed or for key
551 // frames. Color space will be cleared if a key frame is transmitted
552 // without color space information.
553 last_color_space_ = video_header.color_space;
554 } else if (last_color_space_) {
555 video_header.color_space = last_color_space_;
556 }
557 }
558
559 if (loss_notification_controller_) {
560 if (rtp_packet.recovered()) {
561 // TODO(bugs.webrtc.org/10336): Implement support for reordering.
562 RTC_LOG(LS_INFO)
563 << "LossNotificationController does not support reordering.";
564 } else if (generic_descriptor_state == kNoGenericDescriptor) {
565 RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
566 "frame descriptor, but it is missing.";
567 } else {
568 if (video_header.is_first_packet_in_frame) {
569 RTC_DCHECK(video_header.generic);
570 LossNotificationController::FrameDetails frame;
571 frame.is_keyframe =
572 video_header.frame_type == VideoFrameType::kVideoFrameKey;
573 frame.frame_id = video_header.generic->frame_id;
574 frame.frame_dependencies = video_header.generic->dependencies;
575 loss_notification_controller_->OnReceivedPacket(
576 rtp_packet.SequenceNumber(), &frame);
577 } else {
578 loss_notification_controller_->OnReceivedPacket(
579 rtp_packet.SequenceNumber(), nullptr);
580 }
581 }
582 }
583
584 if (nack_module_) {
585 const bool is_keyframe =
586 video_header.is_first_packet_in_frame &&
587 video_header.frame_type == VideoFrameType::kVideoFrameKey;
588
589 packet->times_nacked = nack_module_->OnReceivedPacket(
590 rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
591 } else {
592 packet->times_nacked = -1;
593 }
594
595 if (codec_payload.size() == 0) {
596 NotifyReceiverOfEmptyPacket(packet->seq_num);
597 rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
598 return;
599 }
600
601 if (packet->codec() == kVideoCodecH264) {
602 // Only when we start to receive packets will we know what payload type
603 // that will be used. When we know the payload type insert the correct
604 // sps/pps into the tracker.
605 if (packet->payload_type != last_payload_type_) {
606 last_payload_type_ = packet->payload_type;
607 InsertSpsPpsIntoTracker(packet->payload_type);
608 }
609
610 video_coding::H264SpsPpsTracker::FixedBitstream fixed =
611 tracker_.CopyAndFixBitstream(
612 rtc::MakeArrayView(codec_payload.cdata(), codec_payload.size()),
613 &packet->video_header);
614
615 switch (fixed.action) {
616 case video_coding::H264SpsPpsTracker::kRequestKeyframe:
617 rtcp_feedback_buffer_.RequestKeyFrame();
618 rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
619 ABSL_FALLTHROUGH_INTENDED;
620 case video_coding::H264SpsPpsTracker::kDrop:
621 return;
622 case video_coding::H264SpsPpsTracker::kInsert:
623 packet->video_payload = std::move(fixed.bitstream);
624 break;
625 }
626
627 } else {
628 packet->video_payload = std::move(codec_payload);
629 }
630
631 rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
632 frame_counter_.Add(packet->timestamp);
633 OnInsertedPacket(packet_buffer_.InsertPacket(std::move(packet)));
634 }
635
OnRecoveredPacket(const uint8_t * rtp_packet,size_t rtp_packet_length)636 void RtpVideoStreamReceiver::OnRecoveredPacket(const uint8_t* rtp_packet,
637 size_t rtp_packet_length) {
638 RtpPacketReceived packet;
639 if (!packet.Parse(rtp_packet, rtp_packet_length))
640 return;
641 if (packet.PayloadType() == config_.rtp.red_payload_type) {
642 RTC_LOG(LS_WARNING) << "Discarding recovered packet with RED encapsulation";
643 return;
644 }
645
646 packet.IdentifyExtensions(rtp_header_extensions_);
647 packet.set_payload_type_frequency(kVideoPayloadTypeFrequency);
648 // TODO(nisse): UlpfecReceiverImpl::ProcessReceivedFec passes both
649 // original (decapsulated) media packets and recovered packets to
650 // this callback. We need a way to distinguish, for setting
651 // packet.recovered() correctly. Ideally, move RED decapsulation out
652 // of the Ulpfec implementation.
653
654 ReceivePacket(packet);
655 }
656
657 // This method handles both regular RTP packets and packets recovered
658 // via FlexFEC.
OnRtpPacket(const RtpPacketReceived & packet)659 void RtpVideoStreamReceiver::OnRtpPacket(const RtpPacketReceived& packet) {
660 RTC_DCHECK_RUN_ON(&worker_task_checker_);
661
662 if (!receiving_) {
663 return;
664 }
665
666 if (!packet.recovered()) {
667 // TODO(nisse): Exclude out-of-order packets?
668 int64_t now_ms = clock_->TimeInMilliseconds();
669 {
670 MutexLock lock(&sync_info_lock_);
671 last_received_rtp_timestamp_ = packet.Timestamp();
672 last_received_rtp_system_time_ms_ = now_ms;
673 }
674 // Periodically log the RTP header of incoming packets.
675 if (now_ms - last_packet_log_ms_ > kPacketLogIntervalMs) {
676 rtc::StringBuilder ss;
677 ss << "Packet received on SSRC: " << packet.Ssrc()
678 << " with payload type: " << static_cast<int>(packet.PayloadType())
679 << ", timestamp: " << packet.Timestamp()
680 << ", sequence number: " << packet.SequenceNumber()
681 << ", arrival time: " << packet.arrival_time_ms();
682 int32_t time_offset;
683 if (packet.GetExtension<TransmissionOffset>(&time_offset)) {
684 ss << ", toffset: " << time_offset;
685 }
686 uint32_t send_time;
687 if (packet.GetExtension<AbsoluteSendTime>(&send_time)) {
688 ss << ", abs send time: " << send_time;
689 }
690 RTC_LOG(LS_INFO) << ss.str();
691 last_packet_log_ms_ = now_ms;
692 }
693 }
694
695 ReceivePacket(packet);
696
697 // Update receive statistics after ReceivePacket.
698 // Receive statistics will be reset if the payload type changes (make sure
699 // that the first packet is included in the stats).
700 if (!packet.recovered()) {
701 rtp_receive_statistics_->OnRtpPacket(packet);
702 }
703
704 for (RtpPacketSinkInterface* secondary_sink : secondary_sinks_) {
705 secondary_sink->OnRtpPacket(packet);
706 }
707 }
708
RequestKeyFrame()709 void RtpVideoStreamReceiver::RequestKeyFrame() {
710 // TODO(bugs.webrtc.org/10336): Allow the sender to ignore key frame requests
711 // issued by anything other than the LossNotificationController if it (the
712 // sender) is relying on LNTF alone.
713 if (keyframe_request_sender_) {
714 keyframe_request_sender_->RequestKeyFrame();
715 } else {
716 rtp_rtcp_->SendPictureLossIndication();
717 }
718 }
719
SendLossNotification(uint16_t last_decoded_seq_num,uint16_t last_received_seq_num,bool decodability_flag,bool buffering_allowed)720 void RtpVideoStreamReceiver::SendLossNotification(
721 uint16_t last_decoded_seq_num,
722 uint16_t last_received_seq_num,
723 bool decodability_flag,
724 bool buffering_allowed) {
725 RTC_DCHECK(config_.rtp.lntf.enabled);
726 rtp_rtcp_->SendLossNotification(last_decoded_seq_num, last_received_seq_num,
727 decodability_flag, buffering_allowed);
728 }
729
IsUlpfecEnabled() const730 bool RtpVideoStreamReceiver::IsUlpfecEnabled() const {
731 return config_.rtp.ulpfec_payload_type != -1;
732 }
733
IsRetransmissionsEnabled() const734 bool RtpVideoStreamReceiver::IsRetransmissionsEnabled() const {
735 return config_.rtp.nack.rtp_history_ms > 0;
736 }
737
RequestPacketRetransmit(const std::vector<uint16_t> & sequence_numbers)738 void RtpVideoStreamReceiver::RequestPacketRetransmit(
739 const std::vector<uint16_t>& sequence_numbers) {
740 rtp_rtcp_->SendNack(sequence_numbers);
741 }
742
IsDecryptable() const743 bool RtpVideoStreamReceiver::IsDecryptable() const {
744 return frames_decryptable_.load();
745 }
746
OnInsertedPacket(video_coding::PacketBuffer::InsertResult result)747 void RtpVideoStreamReceiver::OnInsertedPacket(
748 video_coding::PacketBuffer::InsertResult result) {
749 video_coding::PacketBuffer::Packet* first_packet = nullptr;
750 int max_nack_count;
751 int64_t min_recv_time;
752 int64_t max_recv_time;
753 std::vector<rtc::ArrayView<const uint8_t>> payloads;
754 RtpPacketInfos::vector_type packet_infos;
755
756 bool frame_boundary = true;
757 for (auto& packet : result.packets) {
758 // PacketBuffer promisses frame boundaries are correctly set on each
759 // packet. Document that assumption with the DCHECKs.
760 RTC_DCHECK_EQ(frame_boundary, packet->is_first_packet_in_frame());
761 if (packet->is_first_packet_in_frame()) {
762 first_packet = packet.get();
763 max_nack_count = packet->times_nacked;
764 min_recv_time = packet->packet_info.receive_time_ms();
765 max_recv_time = packet->packet_info.receive_time_ms();
766 payloads.clear();
767 packet_infos.clear();
768 } else {
769 max_nack_count = std::max(max_nack_count, packet->times_nacked);
770 min_recv_time =
771 std::min(min_recv_time, packet->packet_info.receive_time_ms());
772 max_recv_time =
773 std::max(max_recv_time, packet->packet_info.receive_time_ms());
774 }
775 payloads.emplace_back(packet->video_payload);
776 packet_infos.push_back(packet->packet_info);
777
778 frame_boundary = packet->is_last_packet_in_frame();
779 if (packet->is_last_packet_in_frame()) {
780 auto depacketizer_it = payload_type_map_.find(first_packet->payload_type);
781 RTC_CHECK(depacketizer_it != payload_type_map_.end());
782
783 rtc::scoped_refptr<EncodedImageBuffer> bitstream =
784 depacketizer_it->second->AssembleFrame(payloads);
785 if (!bitstream) {
786 // Failed to assemble a frame. Discard and continue.
787 continue;
788 }
789
790 const video_coding::PacketBuffer::Packet& last_packet = *packet;
791 OnAssembledFrame(std::make_unique<video_coding::RtpFrameObject>(
792 first_packet->seq_num, //
793 last_packet.seq_num, //
794 last_packet.marker_bit, //
795 max_nack_count, //
796 min_recv_time, //
797 max_recv_time, //
798 first_packet->timestamp, //
799 first_packet->ntp_time_ms, //
800 last_packet.video_header.video_timing, //
801 first_packet->payload_type, //
802 first_packet->codec(), //
803 last_packet.video_header.rotation, //
804 last_packet.video_header.content_type, //
805 first_packet->video_header, //
806 last_packet.video_header.color_space, //
807 RtpPacketInfos(std::move(packet_infos)), //
808 std::move(bitstream)));
809 }
810 }
811 RTC_DCHECK(frame_boundary);
812 if (result.buffer_cleared) {
813 RequestKeyFrame();
814 }
815 }
816
OnAssembledFrame(std::unique_ptr<video_coding::RtpFrameObject> frame)817 void RtpVideoStreamReceiver::OnAssembledFrame(
818 std::unique_ptr<video_coding::RtpFrameObject> frame) {
819 RTC_DCHECK_RUN_ON(&network_tc_);
820 RTC_DCHECK(frame);
821
822 const absl::optional<RTPVideoHeader::GenericDescriptorInfo>& descriptor =
823 frame->GetRtpVideoHeader().generic;
824
825 if (loss_notification_controller_ && descriptor) {
826 loss_notification_controller_->OnAssembledFrame(
827 frame->first_seq_num(), descriptor->frame_id,
828 absl::c_linear_search(descriptor->decode_target_indications,
829 DecodeTargetIndication::kDiscardable),
830 descriptor->dependencies);
831 }
832
833 // If frames arrive before a key frame, they would not be decodable.
834 // In that case, request a key frame ASAP.
835 if (!has_received_frame_) {
836 if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
837 // |loss_notification_controller_|, if present, would have already
838 // requested a key frame when the first packet for the non-key frame
839 // had arrived, so no need to replicate the request.
840 if (!loss_notification_controller_) {
841 RequestKeyFrame();
842 }
843 }
844 has_received_frame_ = true;
845 }
846
847 MutexLock lock(&reference_finder_lock_);
848 // Reset |reference_finder_| if |frame| is new and the codec have changed.
849 if (current_codec_) {
850 bool frame_is_newer =
851 AheadOf(frame->Timestamp(), last_assembled_frame_rtp_timestamp_);
852
853 if (frame->codec_type() != current_codec_) {
854 if (frame_is_newer) {
855 // When we reset the |reference_finder_| we don't want new picture ids
856 // to overlap with old picture ids. To ensure that doesn't happen we
857 // start from the |last_completed_picture_id_| and add an offset in case
858 // of reordering.
859 reference_finder_ =
860 std::make_unique<video_coding::RtpFrameReferenceFinder>(
861 this, last_completed_picture_id_ +
862 std::numeric_limits<uint16_t>::max());
863 current_codec_ = frame->codec_type();
864 } else {
865 // Old frame from before the codec switch, discard it.
866 return;
867 }
868 }
869
870 if (frame_is_newer) {
871 last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
872 }
873 } else {
874 current_codec_ = frame->codec_type();
875 last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
876 }
877
878 if (buffered_frame_decryptor_ != nullptr) {
879 buffered_frame_decryptor_->ManageEncryptedFrame(std::move(frame));
880 } else if (frame_transformer_delegate_) {
881 frame_transformer_delegate_->TransformFrame(std::move(frame));
882 } else {
883 reference_finder_->ManageFrame(std::move(frame));
884 }
885 }
886
OnCompleteFrame(std::unique_ptr<video_coding::EncodedFrame> frame)887 void RtpVideoStreamReceiver::OnCompleteFrame(
888 std::unique_ptr<video_coding::EncodedFrame> frame) {
889 {
890 MutexLock lock(&last_seq_num_mutex_);
891 video_coding::RtpFrameObject* rtp_frame =
892 static_cast<video_coding::RtpFrameObject*>(frame.get());
893 last_seq_num_for_pic_id_[rtp_frame->id.picture_id] =
894 rtp_frame->last_seq_num();
895 }
896 last_completed_picture_id_ =
897 std::max(last_completed_picture_id_, frame->id.picture_id);
898 complete_frame_callback_->OnCompleteFrame(std::move(frame));
899 }
900
OnDecryptedFrame(std::unique_ptr<video_coding::RtpFrameObject> frame)901 void RtpVideoStreamReceiver::OnDecryptedFrame(
902 std::unique_ptr<video_coding::RtpFrameObject> frame) {
903 MutexLock lock(&reference_finder_lock_);
904 reference_finder_->ManageFrame(std::move(frame));
905 }
906
OnDecryptionStatusChange(FrameDecryptorInterface::Status status)907 void RtpVideoStreamReceiver::OnDecryptionStatusChange(
908 FrameDecryptorInterface::Status status) {
909 frames_decryptable_.store(
910 (status == FrameDecryptorInterface::Status::kOk) ||
911 (status == FrameDecryptorInterface::Status::kRecoverable));
912 }
913
SetFrameDecryptor(rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor)914 void RtpVideoStreamReceiver::SetFrameDecryptor(
915 rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor) {
916 RTC_DCHECK_RUN_ON(&network_tc_);
917 if (buffered_frame_decryptor_ == nullptr) {
918 buffered_frame_decryptor_ =
919 std::make_unique<BufferedFrameDecryptor>(this, this);
920 }
921 buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
922 }
923
SetDepacketizerToDecoderFrameTransformer(rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)924 void RtpVideoStreamReceiver::SetDepacketizerToDecoderFrameTransformer(
925 rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {
926 RTC_DCHECK_RUN_ON(&network_tc_);
927 frame_transformer_delegate_ =
928 new rtc::RefCountedObject<RtpVideoStreamReceiverFrameTransformerDelegate>(
929 this, std::move(frame_transformer), rtc::Thread::Current(),
930 config_.rtp.remote_ssrc);
931 frame_transformer_delegate_->Init();
932 }
933
UpdateRtt(int64_t max_rtt_ms)934 void RtpVideoStreamReceiver::UpdateRtt(int64_t max_rtt_ms) {
935 if (nack_module_)
936 nack_module_->UpdateRtt(max_rtt_ms);
937 }
938
LastReceivedPacketMs() const939 absl::optional<int64_t> RtpVideoStreamReceiver::LastReceivedPacketMs() const {
940 return packet_buffer_.LastReceivedPacketMs();
941 }
942
LastReceivedKeyframePacketMs() const943 absl::optional<int64_t> RtpVideoStreamReceiver::LastReceivedKeyframePacketMs()
944 const {
945 return packet_buffer_.LastReceivedKeyframePacketMs();
946 }
947
AddSecondarySink(RtpPacketSinkInterface * sink)948 void RtpVideoStreamReceiver::AddSecondarySink(RtpPacketSinkInterface* sink) {
949 RTC_DCHECK_RUN_ON(&worker_task_checker_);
950 RTC_DCHECK(!absl::c_linear_search(secondary_sinks_, sink));
951 secondary_sinks_.push_back(sink);
952 }
953
RemoveSecondarySink(const RtpPacketSinkInterface * sink)954 void RtpVideoStreamReceiver::RemoveSecondarySink(
955 const RtpPacketSinkInterface* sink) {
956 RTC_DCHECK_RUN_ON(&worker_task_checker_);
957 auto it = absl::c_find(secondary_sinks_, sink);
958 if (it == secondary_sinks_.end()) {
959 // We might be rolling-back a call whose setup failed mid-way. In such a
960 // case, it's simpler to remove "everything" rather than remember what
961 // has already been added.
962 RTC_LOG(LS_WARNING) << "Removal of unknown sink.";
963 return;
964 }
965 secondary_sinks_.erase(it);
966 }
967
ManageFrame(std::unique_ptr<video_coding::RtpFrameObject> frame)968 void RtpVideoStreamReceiver::ManageFrame(
969 std::unique_ptr<video_coding::RtpFrameObject> frame) {
970 MutexLock lock(&reference_finder_lock_);
971 reference_finder_->ManageFrame(std::move(frame));
972 }
973
ReceivePacket(const RtpPacketReceived & packet)974 void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
975 if (packet.payload_size() == 0) {
976 // Padding or keep-alive packet.
977 // TODO(nisse): Could drop empty packets earlier, but need to figure out how
978 // they should be counted in stats.
979 NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
980 return;
981 }
982 if (packet.PayloadType() == config_.rtp.red_payload_type) {
983 ParseAndHandleEncapsulatingHeader(packet);
984 return;
985 }
986
987 const auto type_it = payload_type_map_.find(packet.PayloadType());
988 if (type_it == payload_type_map_.end()) {
989 return;
990 }
991 absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
992 type_it->second->Parse(packet.PayloadBuffer());
993 if (parsed_payload == absl::nullopt) {
994 RTC_LOG(LS_WARNING) << "Failed parsing payload.";
995 return;
996 }
997
998 OnReceivedPayloadData(std::move(parsed_payload->video_payload), packet,
999 parsed_payload->video_header);
1000 }
1001
ParseAndHandleEncapsulatingHeader(const RtpPacketReceived & packet)1002 void RtpVideoStreamReceiver::ParseAndHandleEncapsulatingHeader(
1003 const RtpPacketReceived& packet) {
1004 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1005 if (packet.PayloadType() == config_.rtp.red_payload_type &&
1006 packet.payload_size() > 0) {
1007 if (packet.payload()[0] == config_.rtp.ulpfec_payload_type) {
1008 // Notify video_receiver about received FEC packets to avoid NACKing these
1009 // packets.
1010 NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
1011 }
1012 if (!ulpfec_receiver_->AddReceivedRedPacket(
1013 packet, config_.rtp.ulpfec_payload_type)) {
1014 return;
1015 }
1016 ulpfec_receiver_->ProcessReceivedFec();
1017 }
1018 }
1019
1020 // In the case of a video stream without picture ids and no rtx the
1021 // RtpFrameReferenceFinder will need to know about padding to
1022 // correctly calculate frame references.
NotifyReceiverOfEmptyPacket(uint16_t seq_num)1023 void RtpVideoStreamReceiver::NotifyReceiverOfEmptyPacket(uint16_t seq_num) {
1024 {
1025 MutexLock lock(&reference_finder_lock_);
1026 reference_finder_->PaddingReceived(seq_num);
1027 }
1028 OnInsertedPacket(packet_buffer_.InsertPadding(seq_num));
1029 if (nack_module_) {
1030 nack_module_->OnReceivedPacket(seq_num, /* is_keyframe = */ false,
1031 /* is _recovered = */ false);
1032 }
1033 if (loss_notification_controller_) {
1034 // TODO(bugs.webrtc.org/10336): Handle empty packets.
1035 RTC_LOG(LS_WARNING)
1036 << "LossNotificationController does not expect empty packets.";
1037 }
1038 }
1039
DeliverRtcp(const uint8_t * rtcp_packet,size_t rtcp_packet_length)1040 bool RtpVideoStreamReceiver::DeliverRtcp(const uint8_t* rtcp_packet,
1041 size_t rtcp_packet_length) {
1042 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1043
1044 if (!receiving_) {
1045 return false;
1046 }
1047
1048 rtp_rtcp_->IncomingRtcpPacket(rtcp_packet, rtcp_packet_length);
1049
1050 int64_t rtt = 0;
1051 rtp_rtcp_->RTT(config_.rtp.remote_ssrc, &rtt, nullptr, nullptr, nullptr);
1052 if (rtt == 0) {
1053 // Waiting for valid rtt.
1054 return true;
1055 }
1056 uint32_t ntp_secs = 0;
1057 uint32_t ntp_frac = 0;
1058 uint32_t rtp_timestamp = 0;
1059 uint32_t recieved_ntp_secs = 0;
1060 uint32_t recieved_ntp_frac = 0;
1061 if (rtp_rtcp_->RemoteNTP(&ntp_secs, &ntp_frac, &recieved_ntp_secs,
1062 &recieved_ntp_frac, &rtp_timestamp) != 0) {
1063 // Waiting for RTCP.
1064 return true;
1065 }
1066 NtpTime recieved_ntp(recieved_ntp_secs, recieved_ntp_frac);
1067 int64_t time_since_recieved =
1068 clock_->CurrentNtpInMilliseconds() - recieved_ntp.ToMs();
1069 // Don't use old SRs to estimate time.
1070 if (time_since_recieved <= 1) {
1071 ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
1072 absl::optional<int64_t> remote_to_local_clock_offset_ms =
1073 ntp_estimator_.EstimateRemoteToLocalClockOffsetMs();
1074 if (remote_to_local_clock_offset_ms.has_value()) {
1075 absolute_capture_time_receiver_.SetRemoteToLocalClockOffset(
1076 Int64MsToQ32x32(*remote_to_local_clock_offset_ms));
1077 }
1078 }
1079
1080 return true;
1081 }
1082
FrameContinuous(int64_t picture_id)1083 void RtpVideoStreamReceiver::FrameContinuous(int64_t picture_id) {
1084 if (!nack_module_)
1085 return;
1086
1087 int seq_num = -1;
1088 {
1089 MutexLock lock(&last_seq_num_mutex_);
1090 auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
1091 if (seq_num_it != last_seq_num_for_pic_id_.end())
1092 seq_num = seq_num_it->second;
1093 }
1094 if (seq_num != -1)
1095 nack_module_->ClearUpTo(seq_num);
1096 }
1097
FrameDecoded(int64_t picture_id)1098 void RtpVideoStreamReceiver::FrameDecoded(int64_t picture_id) {
1099 int seq_num = -1;
1100 {
1101 MutexLock lock(&last_seq_num_mutex_);
1102 auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
1103 if (seq_num_it != last_seq_num_for_pic_id_.end()) {
1104 seq_num = seq_num_it->second;
1105 last_seq_num_for_pic_id_.erase(last_seq_num_for_pic_id_.begin(),
1106 ++seq_num_it);
1107 }
1108 }
1109 if (seq_num != -1) {
1110 packet_buffer_.ClearTo(seq_num);
1111 MutexLock lock(&reference_finder_lock_);
1112 reference_finder_->ClearTo(seq_num);
1113 }
1114 }
1115
SignalNetworkState(NetworkState state)1116 void RtpVideoStreamReceiver::SignalNetworkState(NetworkState state) {
1117 rtp_rtcp_->SetRTCPStatus(state == kNetworkUp ? config_.rtp.rtcp_mode
1118 : RtcpMode::kOff);
1119 }
1120
StartReceive()1121 void RtpVideoStreamReceiver::StartReceive() {
1122 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1123 receiving_ = true;
1124 }
1125
StopReceive()1126 void RtpVideoStreamReceiver::StopReceive() {
1127 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1128 receiving_ = false;
1129 }
1130
UpdateHistograms()1131 void RtpVideoStreamReceiver::UpdateHistograms() {
1132 FecPacketCounter counter = ulpfec_receiver_->GetPacketCounter();
1133 if (counter.first_packet_time_ms == -1)
1134 return;
1135
1136 int64_t elapsed_sec =
1137 (clock_->TimeInMilliseconds() - counter.first_packet_time_ms) / 1000;
1138 if (elapsed_sec < metrics::kMinRunTimeInSeconds)
1139 return;
1140
1141 if (counter.num_packets > 0) {
1142 RTC_HISTOGRAM_PERCENTAGE(
1143 "WebRTC.Video.ReceivedFecPacketsInPercent",
1144 static_cast<int>(counter.num_fec_packets * 100 / counter.num_packets));
1145 }
1146 if (counter.num_fec_packets > 0) {
1147 RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.RecoveredMediaPacketsInPercentOfFec",
1148 static_cast<int>(counter.num_recovered_packets *
1149 100 / counter.num_fec_packets));
1150 }
1151 if (config_.rtp.ulpfec_payload_type != -1) {
1152 RTC_HISTOGRAM_COUNTS_10000(
1153 "WebRTC.Video.FecBitrateReceivedInKbps",
1154 static_cast<int>(counter.num_bytes * 8 / elapsed_sec / 1000));
1155 }
1156 }
1157
InsertSpsPpsIntoTracker(uint8_t payload_type)1158 void RtpVideoStreamReceiver::InsertSpsPpsIntoTracker(uint8_t payload_type) {
1159 auto codec_params_it = pt_codec_params_.find(payload_type);
1160 if (codec_params_it == pt_codec_params_.end())
1161 return;
1162
1163 RTC_LOG(LS_INFO) << "Found out of band supplied codec parameters for"
1164 " payload type: "
1165 << static_cast<int>(payload_type);
1166
1167 H264SpropParameterSets sprop_decoder;
1168 auto sprop_base64_it =
1169 codec_params_it->second.find(cricket::kH264FmtpSpropParameterSets);
1170
1171 if (sprop_base64_it == codec_params_it->second.end())
1172 return;
1173
1174 if (!sprop_decoder.DecodeSprop(sprop_base64_it->second.c_str()))
1175 return;
1176
1177 tracker_.InsertSpsPpsNalus(sprop_decoder.sps_nalu(),
1178 sprop_decoder.pps_nalu());
1179 }
1180
1181 } // namespace webrtc
1182