1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "video/rtp_video_stream_receiver2.h"
12
13 #include <algorithm>
14 #include <limits>
15 #include <memory>
16 #include <utility>
17 #include <vector>
18
19 #include "absl/algorithm/container.h"
20 #include "absl/base/macros.h"
21 #include "absl/memory/memory.h"
22 #include "absl/types/optional.h"
23 #include "media/base/media_constants.h"
24 #include "modules/pacing/packet_router.h"
25 #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
26 #include "modules/rtp_rtcp/include/receive_statistics.h"
27 #include "modules/rtp_rtcp/include/rtp_cvo.h"
28 #include "modules/rtp_rtcp/include/ulpfec_receiver.h"
29 #include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
30 #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
31 #include "modules/rtp_rtcp/source/rtp_format.h"
32 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
33 #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
34 #include "modules/rtp_rtcp/source/rtp_header_extensions.h"
35 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
36 #include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
37 #include "modules/rtp_rtcp/source/video_rtp_depacketizer.h"
38 #include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h"
39 #include "modules/utility/include/process_thread.h"
40 #include "modules/video_coding/frame_object.h"
41 #include "modules/video_coding/h264_sprop_parameter_sets.h"
42 #include "modules/video_coding/h264_sps_pps_tracker.h"
43 #include "modules/video_coding/nack_module2.h"
44 #include "modules/video_coding/packet_buffer.h"
45 #include "rtc_base/checks.h"
46 #include "rtc_base/location.h"
47 #include "rtc_base/logging.h"
48 #include "rtc_base/strings/string_builder.h"
49 #include "system_wrappers/include/field_trial.h"
50 #include "system_wrappers/include/metrics.h"
51 #include "system_wrappers/include/ntp_time.h"
52 #include "video/receive_statistics_proxy2.h"
53
54 namespace webrtc {
55
56 namespace {
57 // TODO(philipel): Change kPacketBufferStartSize back to 32 in M63 see:
58 // crbug.com/752886
59 constexpr int kPacketBufferStartSize = 512;
60 constexpr int kPacketBufferMaxSize = 2048;
61
PacketBufferMaxSize()62 int PacketBufferMaxSize() {
63 // The group here must be a positive power of 2, in which case that is used as
64 // size. All other values shall result in the default value being used.
65 const std::string group_name =
66 webrtc::field_trial::FindFullName("WebRTC-PacketBufferMaxSize");
67 int packet_buffer_max_size = kPacketBufferMaxSize;
68 if (!group_name.empty() &&
69 (sscanf(group_name.c_str(), "%d", &packet_buffer_max_size) != 1 ||
70 packet_buffer_max_size <= 0 ||
71 // Verify that the number is a positive power of 2.
72 (packet_buffer_max_size & (packet_buffer_max_size - 1)) != 0)) {
73 RTC_LOG(LS_WARNING) << "Invalid packet buffer max size: " << group_name;
74 packet_buffer_max_size = kPacketBufferMaxSize;
75 }
76 return packet_buffer_max_size;
77 }
78
CreateRtpRtcpModule(Clock * clock,ReceiveStatistics * receive_statistics,Transport * outgoing_transport,RtcpRttStats * rtt_stats,RtcpPacketTypeCounterObserver * rtcp_packet_type_counter_observer,RtcpCnameCallback * rtcp_cname_callback,uint32_t local_ssrc)79 std::unique_ptr<ModuleRtpRtcpImpl2> CreateRtpRtcpModule(
80 Clock* clock,
81 ReceiveStatistics* receive_statistics,
82 Transport* outgoing_transport,
83 RtcpRttStats* rtt_stats,
84 RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
85 RtcpCnameCallback* rtcp_cname_callback,
86 uint32_t local_ssrc) {
87 RtpRtcpInterface::Configuration configuration;
88 configuration.clock = clock;
89 configuration.audio = false;
90 configuration.receiver_only = true;
91 configuration.receive_statistics = receive_statistics;
92 configuration.outgoing_transport = outgoing_transport;
93 configuration.rtt_stats = rtt_stats;
94 configuration.rtcp_packet_type_counter_observer =
95 rtcp_packet_type_counter_observer;
96 configuration.rtcp_cname_callback = rtcp_cname_callback;
97 configuration.local_media_ssrc = local_ssrc;
98
99 std::unique_ptr<ModuleRtpRtcpImpl2> rtp_rtcp =
100 ModuleRtpRtcpImpl2::Create(configuration);
101 rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound);
102
103 return rtp_rtcp;
104 }
105
MaybeConstructNackModule(TaskQueueBase * current_queue,const VideoReceiveStream::Config & config,Clock * clock,NackSender * nack_sender,KeyFrameRequestSender * keyframe_request_sender)106 std::unique_ptr<NackModule2> MaybeConstructNackModule(
107 TaskQueueBase* current_queue,
108 const VideoReceiveStream::Config& config,
109 Clock* clock,
110 NackSender* nack_sender,
111 KeyFrameRequestSender* keyframe_request_sender) {
112 if (config.rtp.nack.rtp_history_ms == 0)
113 return nullptr;
114
115 return std::make_unique<NackModule2>(current_queue, clock, nack_sender,
116 keyframe_request_sender);
117 }
118
119 static const int kPacketLogIntervalMs = 10000;
120
121 } // namespace
122
RtcpFeedbackBuffer(KeyFrameRequestSender * key_frame_request_sender,NackSender * nack_sender,LossNotificationSender * loss_notification_sender)123 RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RtcpFeedbackBuffer(
124 KeyFrameRequestSender* key_frame_request_sender,
125 NackSender* nack_sender,
126 LossNotificationSender* loss_notification_sender)
127 : key_frame_request_sender_(key_frame_request_sender),
128 nack_sender_(nack_sender),
129 loss_notification_sender_(loss_notification_sender),
130 request_key_frame_(false) {
131 RTC_DCHECK(key_frame_request_sender_);
132 RTC_DCHECK(nack_sender_);
133 RTC_DCHECK(loss_notification_sender_);
134 }
135
RequestKeyFrame()136 void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RequestKeyFrame() {
137 RTC_DCHECK_RUN_ON(&worker_task_checker_);
138 request_key_frame_ = true;
139 }
140
SendNack(const std::vector<uint16_t> & sequence_numbers,bool buffering_allowed)141 void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendNack(
142 const std::vector<uint16_t>& sequence_numbers,
143 bool buffering_allowed) {
144 RTC_DCHECK_RUN_ON(&worker_task_checker_);
145 RTC_DCHECK(!sequence_numbers.empty());
146 nack_sequence_numbers_.insert(nack_sequence_numbers_.end(),
147 sequence_numbers.cbegin(),
148 sequence_numbers.cend());
149 if (!buffering_allowed) {
150 // Note that while *buffering* is not allowed, *batching* is, meaning that
151 // previously buffered messages may be sent along with the current message.
152 SendBufferedRtcpFeedback();
153 }
154 }
155
SendLossNotification(uint16_t last_decoded_seq_num,uint16_t last_received_seq_num,bool decodability_flag,bool buffering_allowed)156 void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendLossNotification(
157 uint16_t last_decoded_seq_num,
158 uint16_t last_received_seq_num,
159 bool decodability_flag,
160 bool buffering_allowed) {
161 RTC_DCHECK_RUN_ON(&worker_task_checker_);
162 RTC_DCHECK(buffering_allowed);
163 RTC_DCHECK(!lntf_state_)
164 << "SendLossNotification() called twice in a row with no call to "
165 "SendBufferedRtcpFeedback() in between.";
166 lntf_state_ = absl::make_optional<LossNotificationState>(
167 last_decoded_seq_num, last_received_seq_num, decodability_flag);
168 }
169
SendBufferedRtcpFeedback()170 void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendBufferedRtcpFeedback() {
171 RTC_DCHECK_RUN_ON(&worker_task_checker_);
172
173 bool request_key_frame = false;
174 std::vector<uint16_t> nack_sequence_numbers;
175 absl::optional<LossNotificationState> lntf_state;
176
177 std::swap(request_key_frame, request_key_frame_);
178 std::swap(nack_sequence_numbers, nack_sequence_numbers_);
179 std::swap(lntf_state, lntf_state_);
180
181 if (lntf_state) {
182 // If either a NACK or a key frame request is sent, we should buffer
183 // the LNTF and wait for them (NACK or key frame request) to trigger
184 // the compound feedback message.
185 // Otherwise, the LNTF should be sent out immediately.
186 const bool buffering_allowed =
187 request_key_frame || !nack_sequence_numbers.empty();
188
189 loss_notification_sender_->SendLossNotification(
190 lntf_state->last_decoded_seq_num, lntf_state->last_received_seq_num,
191 lntf_state->decodability_flag, buffering_allowed);
192 }
193
194 if (request_key_frame) {
195 key_frame_request_sender_->RequestKeyFrame();
196 } else if (!nack_sequence_numbers.empty()) {
197 nack_sender_->SendNack(nack_sequence_numbers, true);
198 }
199 }
200
RtpVideoStreamReceiver2(TaskQueueBase * current_queue,Clock * clock,Transport * transport,RtcpRttStats * rtt_stats,PacketRouter * packet_router,const VideoReceiveStream::Config * config,ReceiveStatistics * rtp_receive_statistics,RtcpPacketTypeCounterObserver * rtcp_packet_type_counter_observer,RtcpCnameCallback * rtcp_cname_callback,ProcessThread * process_thread,NackSender * nack_sender,KeyFrameRequestSender * keyframe_request_sender,video_coding::OnCompleteFrameCallback * complete_frame_callback,rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)201 RtpVideoStreamReceiver2::RtpVideoStreamReceiver2(
202 TaskQueueBase* current_queue,
203 Clock* clock,
204 Transport* transport,
205 RtcpRttStats* rtt_stats,
206 PacketRouter* packet_router,
207 const VideoReceiveStream::Config* config,
208 ReceiveStatistics* rtp_receive_statistics,
209 RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer,
210 RtcpCnameCallback* rtcp_cname_callback,
211 ProcessThread* process_thread,
212 NackSender* nack_sender,
213 KeyFrameRequestSender* keyframe_request_sender,
214 video_coding::OnCompleteFrameCallback* complete_frame_callback,
215 rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
216 rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)
217 : clock_(clock),
218 config_(*config),
219 packet_router_(packet_router),
220 process_thread_(process_thread),
221 ntp_estimator_(clock),
222 rtp_header_extensions_(config_.rtp.extensions),
223 forced_playout_delay_max_ms_("max_ms", absl::nullopt),
224 forced_playout_delay_min_ms_("min_ms", absl::nullopt),
225 rtp_receive_statistics_(rtp_receive_statistics),
226 ulpfec_receiver_(UlpfecReceiver::Create(config->rtp.remote_ssrc,
227 this,
228 config->rtp.extensions)),
229 receiving_(false),
230 last_packet_log_ms_(-1),
231 rtp_rtcp_(CreateRtpRtcpModule(clock,
232 rtp_receive_statistics_,
233 transport,
234 rtt_stats,
235 rtcp_packet_type_counter_observer,
236 rtcp_cname_callback,
237 config_.rtp.local_ssrc)),
238 complete_frame_callback_(complete_frame_callback),
239 keyframe_request_sender_(keyframe_request_sender),
240 // TODO(bugs.webrtc.org/10336): Let |rtcp_feedback_buffer_| communicate
241 // directly with |rtp_rtcp_|.
242 rtcp_feedback_buffer_(this, nack_sender, this),
243 nack_module_(MaybeConstructNackModule(current_queue,
244 config_,
245 clock_,
246 &rtcp_feedback_buffer_,
247 &rtcp_feedback_buffer_)),
248 packet_buffer_(clock_, kPacketBufferStartSize, PacketBufferMaxSize()),
249 has_received_frame_(false),
250 frames_decryptable_(false),
251 absolute_capture_time_receiver_(clock) {
252 constexpr bool remb_candidate = true;
253 if (packet_router_)
254 packet_router_->AddReceiveRtpModule(rtp_rtcp_.get(), remb_candidate);
255
256 RTC_DCHECK(config_.rtp.rtcp_mode != RtcpMode::kOff)
257 << "A stream should not be configured with RTCP disabled. This value is "
258 "reserved for internal usage.";
259 // TODO(pbos): What's an appropriate local_ssrc for receive-only streams?
260 RTC_DCHECK(config_.rtp.local_ssrc != 0);
261 RTC_DCHECK(config_.rtp.remote_ssrc != config_.rtp.local_ssrc);
262
263 rtp_rtcp_->SetRTCPStatus(config_.rtp.rtcp_mode);
264 rtp_rtcp_->SetRemoteSSRC(config_.rtp.remote_ssrc);
265
266 static const int kMaxPacketAgeToNack = 450;
267 const int max_reordering_threshold = (config_.rtp.nack.rtp_history_ms > 0)
268 ? kMaxPacketAgeToNack
269 : kDefaultMaxReorderingThreshold;
270 rtp_receive_statistics_->SetMaxReorderingThreshold(config_.rtp.remote_ssrc,
271 max_reordering_threshold);
272 // TODO(nisse): For historic reasons, we applied the above
273 // max_reordering_threshold also for RTX stats, which makes little sense since
274 // we don't NACK rtx packets. Consider deleting the below block, and rely on
275 // the default threshold.
276 if (config_.rtp.rtx_ssrc) {
277 rtp_receive_statistics_->SetMaxReorderingThreshold(
278 config_.rtp.rtx_ssrc, max_reordering_threshold);
279 }
280 if (config_.rtp.rtcp_xr.receiver_reference_time_report)
281 rtp_rtcp_->SetRtcpXrRrtrStatus(true);
282
283 ParseFieldTrial(
284 {&forced_playout_delay_max_ms_, &forced_playout_delay_min_ms_},
285 field_trial::FindFullName("WebRTC-ForcePlayoutDelay"));
286
287 process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE);
288
289 if (config_.rtp.lntf.enabled) {
290 loss_notification_controller_ =
291 std::make_unique<LossNotificationController>(&rtcp_feedback_buffer_,
292 &rtcp_feedback_buffer_);
293 }
294
295 reference_finder_ =
296 std::make_unique<video_coding::RtpFrameReferenceFinder>(this);
297
298 // Only construct the encrypted receiver if frame encryption is enabled.
299 if (config_.crypto_options.sframe.require_frame_encryption) {
300 buffered_frame_decryptor_ =
301 std::make_unique<BufferedFrameDecryptor>(this, this);
302 if (frame_decryptor != nullptr) {
303 buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
304 }
305 }
306
307 if (frame_transformer) {
308 frame_transformer_delegate_ = new rtc::RefCountedObject<
309 RtpVideoStreamReceiverFrameTransformerDelegate>(
310 this, std::move(frame_transformer), rtc::Thread::Current(),
311 config_.rtp.remote_ssrc);
312 frame_transformer_delegate_->Init();
313 }
314 }
315
~RtpVideoStreamReceiver2()316 RtpVideoStreamReceiver2::~RtpVideoStreamReceiver2() {
317 RTC_DCHECK(secondary_sinks_.empty());
318
319 process_thread_->DeRegisterModule(rtp_rtcp_.get());
320
321 if (packet_router_)
322 packet_router_->RemoveReceiveRtpModule(rtp_rtcp_.get());
323 UpdateHistograms();
324 if (frame_transformer_delegate_)
325 frame_transformer_delegate_->Reset();
326 }
327
AddReceiveCodec(const VideoCodec & video_codec,const std::map<std::string,std::string> & codec_params,bool raw_payload)328 void RtpVideoStreamReceiver2::AddReceiveCodec(
329 const VideoCodec& video_codec,
330 const std::map<std::string, std::string>& codec_params,
331 bool raw_payload) {
332 RTC_DCHECK_RUN_ON(&worker_task_checker_);
333 payload_type_map_.emplace(
334 video_codec.plType,
335 raw_payload ? std::make_unique<VideoRtpDepacketizerRaw>()
336 : CreateVideoRtpDepacketizer(video_codec.codecType));
337 pt_codec_params_.emplace(video_codec.plType, codec_params);
338 }
339
GetSyncInfo() const340 absl::optional<Syncable::Info> RtpVideoStreamReceiver2::GetSyncInfo() const {
341 RTC_DCHECK_RUN_ON(&worker_task_checker_);
342 Syncable::Info info;
343 if (rtp_rtcp_->RemoteNTP(&info.capture_time_ntp_secs,
344 &info.capture_time_ntp_frac, nullptr, nullptr,
345 &info.capture_time_source_clock) != 0) {
346 return absl::nullopt;
347 }
348
349 if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) {
350 return absl::nullopt;
351 }
352 info.latest_received_capture_timestamp = *last_received_rtp_timestamp_;
353 info.latest_receive_time_ms = *last_received_rtp_system_time_ms_;
354
355 // Leaves info.current_delay_ms uninitialized.
356 return info;
357 }
358
359 RtpVideoStreamReceiver2::ParseGenericDependenciesResult
ParseGenericDependenciesExtension(const RtpPacketReceived & rtp_packet,RTPVideoHeader * video_header)360 RtpVideoStreamReceiver2::ParseGenericDependenciesExtension(
361 const RtpPacketReceived& rtp_packet,
362 RTPVideoHeader* video_header) {
363 if (rtp_packet.HasExtension<RtpDependencyDescriptorExtension>()) {
364 webrtc::DependencyDescriptor dependency_descriptor;
365 if (!rtp_packet.GetExtension<RtpDependencyDescriptorExtension>(
366 video_structure_.get(), &dependency_descriptor)) {
367 // Descriptor is there, but failed to parse. Either it is invalid,
368 // or too old packet (after relevant video_structure_ changed),
369 // or too new packet (before relevant video_structure_ arrived).
370 // Drop such packet to be on the safe side.
371 // TODO(bugs.webrtc.org/10342): Stash too new packet.
372 RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
373 << " Failed to parse dependency descriptor.";
374 return kDropPacket;
375 }
376 if (dependency_descriptor.attached_structure != nullptr &&
377 !dependency_descriptor.first_packet_in_frame) {
378 RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc()
379 << "Invalid dependency descriptor: structure "
380 "attached to non first packet of a frame.";
381 return kDropPacket;
382 }
383 video_header->is_first_packet_in_frame =
384 dependency_descriptor.first_packet_in_frame;
385 video_header->is_last_packet_in_frame =
386 dependency_descriptor.last_packet_in_frame;
387
388 int64_t frame_id =
389 frame_id_unwrapper_.Unwrap(dependency_descriptor.frame_number);
390 auto& generic_descriptor_info = video_header->generic.emplace();
391 generic_descriptor_info.frame_id = frame_id;
392 generic_descriptor_info.spatial_index =
393 dependency_descriptor.frame_dependencies.spatial_id;
394 generic_descriptor_info.temporal_index =
395 dependency_descriptor.frame_dependencies.temporal_id;
396 for (int fdiff : dependency_descriptor.frame_dependencies.frame_diffs) {
397 generic_descriptor_info.dependencies.push_back(frame_id - fdiff);
398 }
399 generic_descriptor_info.decode_target_indications =
400 dependency_descriptor.frame_dependencies.decode_target_indications;
401 if (dependency_descriptor.resolution) {
402 video_header->width = dependency_descriptor.resolution->Width();
403 video_header->height = dependency_descriptor.resolution->Height();
404 }
405
406 // FrameDependencyStructure is sent in dependency descriptor of the first
407 // packet of a key frame and required for parsed dependency descriptor in
408 // all the following packets until next key frame.
409 // Save it if there is a (potentially) new structure.
410 if (dependency_descriptor.attached_structure) {
411 RTC_DCHECK(dependency_descriptor.first_packet_in_frame);
412 if (video_structure_frame_id_ > frame_id) {
413 RTC_LOG(LS_WARNING)
414 << "Arrived key frame with id " << frame_id << " and structure id "
415 << dependency_descriptor.attached_structure->structure_id
416 << " is older than the latest received key frame with id "
417 << *video_structure_frame_id_ << " and structure id "
418 << video_structure_->structure_id;
419 return kDropPacket;
420 }
421 video_structure_ = std::move(dependency_descriptor.attached_structure);
422 video_structure_frame_id_ = frame_id;
423 video_header->frame_type = VideoFrameType::kVideoFrameKey;
424 } else {
425 video_header->frame_type = VideoFrameType::kVideoFrameDelta;
426 }
427 return kHasGenericDescriptor;
428 }
429
430 RtpGenericFrameDescriptor generic_frame_descriptor;
431 if (!rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
432 &generic_frame_descriptor)) {
433 return kNoGenericDescriptor;
434 }
435
436 video_header->is_first_packet_in_frame =
437 generic_frame_descriptor.FirstPacketInSubFrame();
438 video_header->is_last_packet_in_frame =
439 generic_frame_descriptor.LastPacketInSubFrame();
440
441 if (generic_frame_descriptor.FirstPacketInSubFrame()) {
442 video_header->frame_type =
443 generic_frame_descriptor.FrameDependenciesDiffs().empty()
444 ? VideoFrameType::kVideoFrameKey
445 : VideoFrameType::kVideoFrameDelta;
446
447 auto& generic_descriptor_info = video_header->generic.emplace();
448 int64_t frame_id =
449 frame_id_unwrapper_.Unwrap(generic_frame_descriptor.FrameId());
450 generic_descriptor_info.frame_id = frame_id;
451 generic_descriptor_info.spatial_index =
452 generic_frame_descriptor.SpatialLayer();
453 generic_descriptor_info.temporal_index =
454 generic_frame_descriptor.TemporalLayer();
455 for (uint16_t fdiff : generic_frame_descriptor.FrameDependenciesDiffs()) {
456 generic_descriptor_info.dependencies.push_back(frame_id - fdiff);
457 }
458 }
459 video_header->width = generic_frame_descriptor.Width();
460 video_header->height = generic_frame_descriptor.Height();
461 return kHasGenericDescriptor;
462 }
463
OnReceivedPayloadData(rtc::CopyOnWriteBuffer codec_payload,const RtpPacketReceived & rtp_packet,const RTPVideoHeader & video)464 void RtpVideoStreamReceiver2::OnReceivedPayloadData(
465 rtc::CopyOnWriteBuffer codec_payload,
466 const RtpPacketReceived& rtp_packet,
467 const RTPVideoHeader& video) {
468 RTC_DCHECK_RUN_ON(&worker_task_checker_);
469 auto packet = std::make_unique<video_coding::PacketBuffer::Packet>(
470 rtp_packet, video, ntp_estimator_.Estimate(rtp_packet.Timestamp()),
471 clock_->TimeInMilliseconds());
472
473 // Try to extrapolate absolute capture time if it is missing.
474 packet->packet_info.set_absolute_capture_time(
475 absolute_capture_time_receiver_.OnReceivePacket(
476 AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(),
477 packet->packet_info.csrcs()),
478 packet->packet_info.rtp_timestamp(),
479 // Assume frequency is the same one for all video frames.
480 kVideoPayloadTypeFrequency,
481 packet->packet_info.absolute_capture_time()));
482
483 RTPVideoHeader& video_header = packet->video_header;
484 video_header.rotation = kVideoRotation_0;
485 video_header.content_type = VideoContentType::UNSPECIFIED;
486 video_header.video_timing.flags = VideoSendTiming::kInvalid;
487 video_header.is_last_packet_in_frame |= rtp_packet.Marker();
488
489 if (const auto* vp9_header =
490 absl::get_if<RTPVideoHeaderVP9>(&video_header.video_type_header)) {
491 video_header.is_last_packet_in_frame |= vp9_header->end_of_frame;
492 video_header.is_first_packet_in_frame |= vp9_header->beginning_of_frame;
493 }
494
495 rtp_packet.GetExtension<VideoOrientation>(&video_header.rotation);
496 rtp_packet.GetExtension<VideoContentTypeExtension>(
497 &video_header.content_type);
498 rtp_packet.GetExtension<VideoTimingExtension>(&video_header.video_timing);
499 if (forced_playout_delay_max_ms_ && forced_playout_delay_min_ms_) {
500 video_header.playout_delay.max_ms = *forced_playout_delay_max_ms_;
501 video_header.playout_delay.min_ms = *forced_playout_delay_min_ms_;
502 } else {
503 rtp_packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
504 }
505
506 ParseGenericDependenciesResult generic_descriptor_state =
507 ParseGenericDependenciesExtension(rtp_packet, &video_header);
508 if (generic_descriptor_state == kDropPacket)
509 return;
510
511 // Color space should only be transmitted in the last packet of a frame,
512 // therefore, neglect it otherwise so that last_color_space_ is not reset by
513 // mistake.
514 if (video_header.is_last_packet_in_frame) {
515 video_header.color_space = rtp_packet.GetExtension<ColorSpaceExtension>();
516 if (video_header.color_space ||
517 video_header.frame_type == VideoFrameType::kVideoFrameKey) {
518 // Store color space since it's only transmitted when changed or for key
519 // frames. Color space will be cleared if a key frame is transmitted
520 // without color space information.
521 last_color_space_ = video_header.color_space;
522 } else if (last_color_space_) {
523 video_header.color_space = last_color_space_;
524 }
525 }
526
527 if (loss_notification_controller_) {
528 if (rtp_packet.recovered()) {
529 // TODO(bugs.webrtc.org/10336): Implement support for reordering.
530 RTC_LOG(LS_INFO)
531 << "LossNotificationController does not support reordering.";
532 } else if (generic_descriptor_state == kNoGenericDescriptor) {
533 RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
534 "frame descriptor, but it is missing.";
535 } else {
536 if (video_header.is_first_packet_in_frame) {
537 RTC_DCHECK(video_header.generic);
538 LossNotificationController::FrameDetails frame;
539 frame.is_keyframe =
540 video_header.frame_type == VideoFrameType::kVideoFrameKey;
541 frame.frame_id = video_header.generic->frame_id;
542 frame.frame_dependencies = video_header.generic->dependencies;
543 loss_notification_controller_->OnReceivedPacket(
544 rtp_packet.SequenceNumber(), &frame);
545 } else {
546 loss_notification_controller_->OnReceivedPacket(
547 rtp_packet.SequenceNumber(), nullptr);
548 }
549 }
550 }
551
552 if (nack_module_) {
553 const bool is_keyframe =
554 video_header.is_first_packet_in_frame &&
555 video_header.frame_type == VideoFrameType::kVideoFrameKey;
556
557 packet->times_nacked = nack_module_->OnReceivedPacket(
558 rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
559 } else {
560 packet->times_nacked = -1;
561 }
562
563 if (codec_payload.size() == 0) {
564 NotifyReceiverOfEmptyPacket(packet->seq_num);
565 rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
566 return;
567 }
568
569 if (packet->codec() == kVideoCodecH264) {
570 // Only when we start to receive packets will we know what payload type
571 // that will be used. When we know the payload type insert the correct
572 // sps/pps into the tracker.
573 if (packet->payload_type != last_payload_type_) {
574 last_payload_type_ = packet->payload_type;
575 InsertSpsPpsIntoTracker(packet->payload_type);
576 }
577
578 video_coding::H264SpsPpsTracker::FixedBitstream fixed =
579 tracker_.CopyAndFixBitstream(
580 rtc::MakeArrayView(codec_payload.cdata(), codec_payload.size()),
581 &packet->video_header);
582
583 switch (fixed.action) {
584 case video_coding::H264SpsPpsTracker::kRequestKeyframe:
585 rtcp_feedback_buffer_.RequestKeyFrame();
586 rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
587 ABSL_FALLTHROUGH_INTENDED;
588 case video_coding::H264SpsPpsTracker::kDrop:
589 return;
590 case video_coding::H264SpsPpsTracker::kInsert:
591 packet->video_payload = std::move(fixed.bitstream);
592 break;
593 }
594
595 } else {
596 packet->video_payload = std::move(codec_payload);
597 }
598
599 rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
600 frame_counter_.Add(packet->timestamp);
601 OnInsertedPacket(packet_buffer_.InsertPacket(std::move(packet)));
602 }
603
OnRecoveredPacket(const uint8_t * rtp_packet,size_t rtp_packet_length)604 void RtpVideoStreamReceiver2::OnRecoveredPacket(const uint8_t* rtp_packet,
605 size_t rtp_packet_length) {
606 RtpPacketReceived packet;
607 if (!packet.Parse(rtp_packet, rtp_packet_length))
608 return;
609 if (packet.PayloadType() == config_.rtp.red_payload_type) {
610 RTC_LOG(LS_WARNING) << "Discarding recovered packet with RED encapsulation";
611 return;
612 }
613
614 packet.IdentifyExtensions(rtp_header_extensions_);
615 packet.set_payload_type_frequency(kVideoPayloadTypeFrequency);
616 // TODO(nisse): UlpfecReceiverImpl::ProcessReceivedFec passes both
617 // original (decapsulated) media packets and recovered packets to
618 // this callback. We need a way to distinguish, for setting
619 // packet.recovered() correctly. Ideally, move RED decapsulation out
620 // of the Ulpfec implementation.
621
622 ReceivePacket(packet);
623 }
624
625 // This method handles both regular RTP packets and packets recovered
626 // via FlexFEC.
OnRtpPacket(const RtpPacketReceived & packet)627 void RtpVideoStreamReceiver2::OnRtpPacket(const RtpPacketReceived& packet) {
628 RTC_DCHECK_RUN_ON(&worker_task_checker_);
629
630 if (!receiving_) {
631 return;
632 }
633
634 if (!packet.recovered()) {
635 // TODO(nisse): Exclude out-of-order packets?
636 int64_t now_ms = clock_->TimeInMilliseconds();
637
638 last_received_rtp_timestamp_ = packet.Timestamp();
639 last_received_rtp_system_time_ms_ = now_ms;
640
641 // Periodically log the RTP header of incoming packets.
642 if (now_ms - last_packet_log_ms_ > kPacketLogIntervalMs) {
643 rtc::StringBuilder ss;
644 ss << "Packet received on SSRC: " << packet.Ssrc()
645 << " with payload type: " << static_cast<int>(packet.PayloadType())
646 << ", timestamp: " << packet.Timestamp()
647 << ", sequence number: " << packet.SequenceNumber()
648 << ", arrival time: " << packet.arrival_time_ms();
649 int32_t time_offset;
650 if (packet.GetExtension<TransmissionOffset>(&time_offset)) {
651 ss << ", toffset: " << time_offset;
652 }
653 uint32_t send_time;
654 if (packet.GetExtension<AbsoluteSendTime>(&send_time)) {
655 ss << ", abs send time: " << send_time;
656 }
657 RTC_LOG(LS_INFO) << ss.str();
658 last_packet_log_ms_ = now_ms;
659 }
660 }
661
662 ReceivePacket(packet);
663
664 // Update receive statistics after ReceivePacket.
665 // Receive statistics will be reset if the payload type changes (make sure
666 // that the first packet is included in the stats).
667 if (!packet.recovered()) {
668 rtp_receive_statistics_->OnRtpPacket(packet);
669 }
670
671 for (RtpPacketSinkInterface* secondary_sink : secondary_sinks_) {
672 secondary_sink->OnRtpPacket(packet);
673 }
674 }
675
RequestKeyFrame()676 void RtpVideoStreamReceiver2::RequestKeyFrame() {
677 RTC_DCHECK_RUN_ON(&worker_task_checker_);
678 // TODO(bugs.webrtc.org/10336): Allow the sender to ignore key frame requests
679 // issued by anything other than the LossNotificationController if it (the
680 // sender) is relying on LNTF alone.
681 if (keyframe_request_sender_) {
682 keyframe_request_sender_->RequestKeyFrame();
683 } else {
684 rtp_rtcp_->SendPictureLossIndication();
685 }
686 }
687
SendLossNotification(uint16_t last_decoded_seq_num,uint16_t last_received_seq_num,bool decodability_flag,bool buffering_allowed)688 void RtpVideoStreamReceiver2::SendLossNotification(
689 uint16_t last_decoded_seq_num,
690 uint16_t last_received_seq_num,
691 bool decodability_flag,
692 bool buffering_allowed) {
693 RTC_DCHECK(config_.rtp.lntf.enabled);
694 rtp_rtcp_->SendLossNotification(last_decoded_seq_num, last_received_seq_num,
695 decodability_flag, buffering_allowed);
696 }
697
IsUlpfecEnabled() const698 bool RtpVideoStreamReceiver2::IsUlpfecEnabled() const {
699 return config_.rtp.ulpfec_payload_type != -1;
700 }
701
IsRetransmissionsEnabled() const702 bool RtpVideoStreamReceiver2::IsRetransmissionsEnabled() const {
703 return config_.rtp.nack.rtp_history_ms > 0;
704 }
705
RequestPacketRetransmit(const std::vector<uint16_t> & sequence_numbers)706 void RtpVideoStreamReceiver2::RequestPacketRetransmit(
707 const std::vector<uint16_t>& sequence_numbers) {
708 RTC_DCHECK_RUN_ON(&worker_task_checker_);
709 rtp_rtcp_->SendNack(sequence_numbers);
710 }
711
IsDecryptable() const712 bool RtpVideoStreamReceiver2::IsDecryptable() const {
713 RTC_DCHECK_RUN_ON(&worker_task_checker_);
714 return frames_decryptable_;
715 }
716
OnInsertedPacket(video_coding::PacketBuffer::InsertResult result)717 void RtpVideoStreamReceiver2::OnInsertedPacket(
718 video_coding::PacketBuffer::InsertResult result) {
719 RTC_DCHECK_RUN_ON(&worker_task_checker_);
720 video_coding::PacketBuffer::Packet* first_packet = nullptr;
721 int max_nack_count;
722 int64_t min_recv_time;
723 int64_t max_recv_time;
724 std::vector<rtc::ArrayView<const uint8_t>> payloads;
725 RtpPacketInfos::vector_type packet_infos;
726
727 bool frame_boundary = true;
728 for (auto& packet : result.packets) {
729 // PacketBuffer promisses frame boundaries are correctly set on each
730 // packet. Document that assumption with the DCHECKs.
731 RTC_DCHECK_EQ(frame_boundary, packet->is_first_packet_in_frame());
732 if (packet->is_first_packet_in_frame()) {
733 first_packet = packet.get();
734 max_nack_count = packet->times_nacked;
735 min_recv_time = packet->packet_info.receive_time_ms();
736 max_recv_time = packet->packet_info.receive_time_ms();
737 payloads.clear();
738 packet_infos.clear();
739 } else {
740 max_nack_count = std::max(max_nack_count, packet->times_nacked);
741 min_recv_time =
742 std::min(min_recv_time, packet->packet_info.receive_time_ms());
743 max_recv_time =
744 std::max(max_recv_time, packet->packet_info.receive_time_ms());
745 }
746 payloads.emplace_back(packet->video_payload);
747 packet_infos.push_back(packet->packet_info);
748
749 frame_boundary = packet->is_last_packet_in_frame();
750 if (packet->is_last_packet_in_frame()) {
751 auto depacketizer_it = payload_type_map_.find(first_packet->payload_type);
752 RTC_CHECK(depacketizer_it != payload_type_map_.end());
753
754 rtc::scoped_refptr<EncodedImageBuffer> bitstream =
755 depacketizer_it->second->AssembleFrame(payloads);
756 if (!bitstream) {
757 // Failed to assemble a frame. Discard and continue.
758 continue;
759 }
760
761 const video_coding::PacketBuffer::Packet& last_packet = *packet;
762 OnAssembledFrame(std::make_unique<video_coding::RtpFrameObject>(
763 first_packet->seq_num, //
764 last_packet.seq_num, //
765 last_packet.marker_bit, //
766 max_nack_count, //
767 min_recv_time, //
768 max_recv_time, //
769 first_packet->timestamp, //
770 first_packet->ntp_time_ms, //
771 last_packet.video_header.video_timing, //
772 first_packet->payload_type, //
773 first_packet->codec(), //
774 last_packet.video_header.rotation, //
775 last_packet.video_header.content_type, //
776 first_packet->video_header, //
777 last_packet.video_header.color_space, //
778 RtpPacketInfos(std::move(packet_infos)), //
779 std::move(bitstream)));
780 }
781 }
782 RTC_DCHECK(frame_boundary);
783 if (result.buffer_cleared) {
784 RequestKeyFrame();
785 }
786 }
787
OnAssembledFrame(std::unique_ptr<video_coding::RtpFrameObject> frame)788 void RtpVideoStreamReceiver2::OnAssembledFrame(
789 std::unique_ptr<video_coding::RtpFrameObject> frame) {
790 RTC_DCHECK_RUN_ON(&worker_task_checker_);
791 RTC_DCHECK(frame);
792
793 const absl::optional<RTPVideoHeader::GenericDescriptorInfo>& descriptor =
794 frame->GetRtpVideoHeader().generic;
795
796 if (loss_notification_controller_ && descriptor) {
797 loss_notification_controller_->OnAssembledFrame(
798 frame->first_seq_num(), descriptor->frame_id,
799 absl::c_linear_search(descriptor->decode_target_indications,
800 DecodeTargetIndication::kDiscardable),
801 descriptor->dependencies);
802 }
803
804 // If frames arrive before a key frame, they would not be decodable.
805 // In that case, request a key frame ASAP.
806 if (!has_received_frame_) {
807 if (frame->FrameType() != VideoFrameType::kVideoFrameKey) {
808 // |loss_notification_controller_|, if present, would have already
809 // requested a key frame when the first packet for the non-key frame
810 // had arrived, so no need to replicate the request.
811 if (!loss_notification_controller_) {
812 RequestKeyFrame();
813 }
814 }
815 has_received_frame_ = true;
816 }
817
818 // Reset |reference_finder_| if |frame| is new and the codec have changed.
819 if (current_codec_) {
820 bool frame_is_newer =
821 AheadOf(frame->Timestamp(), last_assembled_frame_rtp_timestamp_);
822
823 if (frame->codec_type() != current_codec_) {
824 if (frame_is_newer) {
825 // When we reset the |reference_finder_| we don't want new picture ids
826 // to overlap with old picture ids. To ensure that doesn't happen we
827 // start from the |last_completed_picture_id_| and add an offset in case
828 // of reordering.
829 reference_finder_ =
830 std::make_unique<video_coding::RtpFrameReferenceFinder>(
831 this, last_completed_picture_id_ +
832 std::numeric_limits<uint16_t>::max());
833 current_codec_ = frame->codec_type();
834 } else {
835 // Old frame from before the codec switch, discard it.
836 return;
837 }
838 }
839
840 if (frame_is_newer) {
841 last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
842 }
843 } else {
844 current_codec_ = frame->codec_type();
845 last_assembled_frame_rtp_timestamp_ = frame->Timestamp();
846 }
847
848 if (buffered_frame_decryptor_ != nullptr) {
849 buffered_frame_decryptor_->ManageEncryptedFrame(std::move(frame));
850 } else if (frame_transformer_delegate_) {
851 frame_transformer_delegate_->TransformFrame(std::move(frame));
852 } else {
853 reference_finder_->ManageFrame(std::move(frame));
854 }
855 }
856
OnCompleteFrame(std::unique_ptr<video_coding::EncodedFrame> frame)857 void RtpVideoStreamReceiver2::OnCompleteFrame(
858 std::unique_ptr<video_coding::EncodedFrame> frame) {
859 RTC_DCHECK_RUN_ON(&worker_task_checker_);
860 video_coding::RtpFrameObject* rtp_frame =
861 static_cast<video_coding::RtpFrameObject*>(frame.get());
862 last_seq_num_for_pic_id_[rtp_frame->id.picture_id] =
863 rtp_frame->last_seq_num();
864
865 last_completed_picture_id_ =
866 std::max(last_completed_picture_id_, frame->id.picture_id);
867 complete_frame_callback_->OnCompleteFrame(std::move(frame));
868 }
869
OnDecryptedFrame(std::unique_ptr<video_coding::RtpFrameObject> frame)870 void RtpVideoStreamReceiver2::OnDecryptedFrame(
871 std::unique_ptr<video_coding::RtpFrameObject> frame) {
872 RTC_DCHECK_RUN_ON(&worker_task_checker_);
873 reference_finder_->ManageFrame(std::move(frame));
874 }
875
OnDecryptionStatusChange(FrameDecryptorInterface::Status status)876 void RtpVideoStreamReceiver2::OnDecryptionStatusChange(
877 FrameDecryptorInterface::Status status) {
878 RTC_DCHECK_RUN_ON(&worker_task_checker_);
879 // Called from BufferedFrameDecryptor::DecryptFrame.
880 frames_decryptable_ =
881 (status == FrameDecryptorInterface::Status::kOk) ||
882 (status == FrameDecryptorInterface::Status::kRecoverable);
883 }
884
SetFrameDecryptor(rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor)885 void RtpVideoStreamReceiver2::SetFrameDecryptor(
886 rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor) {
887 RTC_DCHECK_RUN_ON(&worker_task_checker_);
888 if (buffered_frame_decryptor_ == nullptr) {
889 buffered_frame_decryptor_ =
890 std::make_unique<BufferedFrameDecryptor>(this, this);
891 }
892 buffered_frame_decryptor_->SetFrameDecryptor(std::move(frame_decryptor));
893 }
894
SetDepacketizerToDecoderFrameTransformer(rtc::scoped_refptr<FrameTransformerInterface> frame_transformer)895 void RtpVideoStreamReceiver2::SetDepacketizerToDecoderFrameTransformer(
896 rtc::scoped_refptr<FrameTransformerInterface> frame_transformer) {
897 RTC_DCHECK_RUN_ON(&worker_task_checker_);
898 frame_transformer_delegate_ =
899 new rtc::RefCountedObject<RtpVideoStreamReceiverFrameTransformerDelegate>(
900 this, std::move(frame_transformer), rtc::Thread::Current(),
901 config_.rtp.remote_ssrc);
902 frame_transformer_delegate_->Init();
903 }
904
UpdateRtt(int64_t max_rtt_ms)905 void RtpVideoStreamReceiver2::UpdateRtt(int64_t max_rtt_ms) {
906 RTC_DCHECK_RUN_ON(&worker_task_checker_);
907 if (nack_module_)
908 nack_module_->UpdateRtt(max_rtt_ms);
909 }
910
LastReceivedPacketMs() const911 absl::optional<int64_t> RtpVideoStreamReceiver2::LastReceivedPacketMs() const {
912 return packet_buffer_.LastReceivedPacketMs();
913 }
914
LastReceivedKeyframePacketMs() const915 absl::optional<int64_t> RtpVideoStreamReceiver2::LastReceivedKeyframePacketMs()
916 const {
917 return packet_buffer_.LastReceivedKeyframePacketMs();
918 }
919
AddSecondarySink(RtpPacketSinkInterface * sink)920 void RtpVideoStreamReceiver2::AddSecondarySink(RtpPacketSinkInterface* sink) {
921 RTC_DCHECK_RUN_ON(&worker_task_checker_);
922 RTC_DCHECK(!absl::c_linear_search(secondary_sinks_, sink));
923 secondary_sinks_.push_back(sink);
924 }
925
RemoveSecondarySink(const RtpPacketSinkInterface * sink)926 void RtpVideoStreamReceiver2::RemoveSecondarySink(
927 const RtpPacketSinkInterface* sink) {
928 RTC_DCHECK_RUN_ON(&worker_task_checker_);
929 auto it = absl::c_find(secondary_sinks_, sink);
930 if (it == secondary_sinks_.end()) {
931 // We might be rolling-back a call whose setup failed mid-way. In such a
932 // case, it's simpler to remove "everything" rather than remember what
933 // has already been added.
934 RTC_LOG(LS_WARNING) << "Removal of unknown sink.";
935 return;
936 }
937 secondary_sinks_.erase(it);
938 }
939
ManageFrame(std::unique_ptr<video_coding::RtpFrameObject> frame)940 void RtpVideoStreamReceiver2::ManageFrame(
941 std::unique_ptr<video_coding::RtpFrameObject> frame) {
942 RTC_DCHECK_RUN_ON(&worker_task_checker_);
943 reference_finder_->ManageFrame(std::move(frame));
944 }
945
ReceivePacket(const RtpPacketReceived & packet)946 void RtpVideoStreamReceiver2::ReceivePacket(const RtpPacketReceived& packet) {
947 RTC_DCHECK_RUN_ON(&worker_task_checker_);
948 if (packet.payload_size() == 0) {
949 // Padding or keep-alive packet.
950 // TODO(nisse): Could drop empty packets earlier, but need to figure out how
951 // they should be counted in stats.
952 NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
953 return;
954 }
955 if (packet.PayloadType() == config_.rtp.red_payload_type) {
956 ParseAndHandleEncapsulatingHeader(packet);
957 return;
958 }
959
960 const auto type_it = payload_type_map_.find(packet.PayloadType());
961 if (type_it == payload_type_map_.end()) {
962 return;
963 }
964 absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
965 type_it->second->Parse(packet.PayloadBuffer());
966 if (parsed_payload == absl::nullopt) {
967 RTC_LOG(LS_WARNING) << "Failed parsing payload.";
968 return;
969 }
970
971 OnReceivedPayloadData(std::move(parsed_payload->video_payload), packet,
972 parsed_payload->video_header);
973 }
974
ParseAndHandleEncapsulatingHeader(const RtpPacketReceived & packet)975 void RtpVideoStreamReceiver2::ParseAndHandleEncapsulatingHeader(
976 const RtpPacketReceived& packet) {
977 RTC_DCHECK_RUN_ON(&worker_task_checker_);
978 if (packet.PayloadType() == config_.rtp.red_payload_type &&
979 packet.payload_size() > 0) {
980 if (packet.payload()[0] == config_.rtp.ulpfec_payload_type) {
981 // Notify video_receiver about received FEC packets to avoid NACKing these
982 // packets.
983 NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
984 }
985 if (!ulpfec_receiver_->AddReceivedRedPacket(
986 packet, config_.rtp.ulpfec_payload_type)) {
987 return;
988 }
989 ulpfec_receiver_->ProcessReceivedFec();
990 }
991 }
992
993 // In the case of a video stream without picture ids and no rtx the
994 // RtpFrameReferenceFinder will need to know about padding to
995 // correctly calculate frame references.
NotifyReceiverOfEmptyPacket(uint16_t seq_num)996 void RtpVideoStreamReceiver2::NotifyReceiverOfEmptyPacket(uint16_t seq_num) {
997 RTC_DCHECK_RUN_ON(&worker_task_checker_);
998
999 reference_finder_->PaddingReceived(seq_num);
1000
1001 OnInsertedPacket(packet_buffer_.InsertPadding(seq_num));
1002 if (nack_module_) {
1003 nack_module_->OnReceivedPacket(seq_num, /* is_keyframe = */ false,
1004 /* is _recovered = */ false);
1005 }
1006 if (loss_notification_controller_) {
1007 // TODO(bugs.webrtc.org/10336): Handle empty packets.
1008 RTC_LOG(LS_WARNING)
1009 << "LossNotificationController does not expect empty packets.";
1010 }
1011 }
1012
DeliverRtcp(const uint8_t * rtcp_packet,size_t rtcp_packet_length)1013 bool RtpVideoStreamReceiver2::DeliverRtcp(const uint8_t* rtcp_packet,
1014 size_t rtcp_packet_length) {
1015 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1016
1017 if (!receiving_) {
1018 return false;
1019 }
1020
1021 rtp_rtcp_->IncomingRtcpPacket(rtcp_packet, rtcp_packet_length);
1022
1023 int64_t rtt = 0;
1024 rtp_rtcp_->RTT(config_.rtp.remote_ssrc, &rtt, nullptr, nullptr, nullptr);
1025 if (rtt == 0) {
1026 // Waiting for valid rtt.
1027 return true;
1028 }
1029 uint32_t ntp_secs = 0;
1030 uint32_t ntp_frac = 0;
1031 uint32_t rtp_timestamp = 0;
1032 uint32_t recieved_ntp_secs = 0;
1033 uint32_t recieved_ntp_frac = 0;
1034 if (rtp_rtcp_->RemoteNTP(&ntp_secs, &ntp_frac, &recieved_ntp_secs,
1035 &recieved_ntp_frac, &rtp_timestamp) != 0) {
1036 // Waiting for RTCP.
1037 return true;
1038 }
1039 NtpTime recieved_ntp(recieved_ntp_secs, recieved_ntp_frac);
1040 int64_t time_since_recieved =
1041 clock_->CurrentNtpInMilliseconds() - recieved_ntp.ToMs();
1042 // Don't use old SRs to estimate time.
1043 if (time_since_recieved <= 1) {
1044 ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
1045 absl::optional<int64_t> remote_to_local_clock_offset_ms =
1046 ntp_estimator_.EstimateRemoteToLocalClockOffsetMs();
1047 if (remote_to_local_clock_offset_ms.has_value()) {
1048 absolute_capture_time_receiver_.SetRemoteToLocalClockOffset(
1049 Int64MsToQ32x32(*remote_to_local_clock_offset_ms));
1050 }
1051 }
1052
1053 return true;
1054 }
1055
FrameContinuous(int64_t picture_id)1056 void RtpVideoStreamReceiver2::FrameContinuous(int64_t picture_id) {
1057 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1058 if (!nack_module_)
1059 return;
1060
1061 int seq_num = -1;
1062 auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
1063 if (seq_num_it != last_seq_num_for_pic_id_.end())
1064 seq_num = seq_num_it->second;
1065 if (seq_num != -1)
1066 nack_module_->ClearUpTo(seq_num);
1067 }
1068
FrameDecoded(int64_t picture_id)1069 void RtpVideoStreamReceiver2::FrameDecoded(int64_t picture_id) {
1070 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1071 // Running on the decoder thread.
1072 int seq_num = -1;
1073 auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
1074 if (seq_num_it != last_seq_num_for_pic_id_.end()) {
1075 seq_num = seq_num_it->second;
1076 last_seq_num_for_pic_id_.erase(last_seq_num_for_pic_id_.begin(),
1077 ++seq_num_it);
1078 }
1079
1080 if (seq_num != -1) {
1081 packet_buffer_.ClearTo(seq_num);
1082 reference_finder_->ClearTo(seq_num);
1083 }
1084 }
1085
SignalNetworkState(NetworkState state)1086 void RtpVideoStreamReceiver2::SignalNetworkState(NetworkState state) {
1087 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1088 rtp_rtcp_->SetRTCPStatus(state == kNetworkUp ? config_.rtp.rtcp_mode
1089 : RtcpMode::kOff);
1090 }
1091
StartReceive()1092 void RtpVideoStreamReceiver2::StartReceive() {
1093 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1094 receiving_ = true;
1095 }
1096
StopReceive()1097 void RtpVideoStreamReceiver2::StopReceive() {
1098 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1099 receiving_ = false;
1100 }
1101
UpdateHistograms()1102 void RtpVideoStreamReceiver2::UpdateHistograms() {
1103 FecPacketCounter counter = ulpfec_receiver_->GetPacketCounter();
1104 if (counter.first_packet_time_ms == -1)
1105 return;
1106
1107 int64_t elapsed_sec =
1108 (clock_->TimeInMilliseconds() - counter.first_packet_time_ms) / 1000;
1109 if (elapsed_sec < metrics::kMinRunTimeInSeconds)
1110 return;
1111
1112 if (counter.num_packets > 0) {
1113 RTC_HISTOGRAM_PERCENTAGE(
1114 "WebRTC.Video.ReceivedFecPacketsInPercent",
1115 static_cast<int>(counter.num_fec_packets * 100 / counter.num_packets));
1116 }
1117 if (counter.num_fec_packets > 0) {
1118 RTC_HISTOGRAM_PERCENTAGE("WebRTC.Video.RecoveredMediaPacketsInPercentOfFec",
1119 static_cast<int>(counter.num_recovered_packets *
1120 100 / counter.num_fec_packets));
1121 }
1122 if (config_.rtp.ulpfec_payload_type != -1) {
1123 RTC_HISTOGRAM_COUNTS_10000(
1124 "WebRTC.Video.FecBitrateReceivedInKbps",
1125 static_cast<int>(counter.num_bytes * 8 / elapsed_sec / 1000));
1126 }
1127 }
1128
InsertSpsPpsIntoTracker(uint8_t payload_type)1129 void RtpVideoStreamReceiver2::InsertSpsPpsIntoTracker(uint8_t payload_type) {
1130 RTC_DCHECK_RUN_ON(&worker_task_checker_);
1131
1132 auto codec_params_it = pt_codec_params_.find(payload_type);
1133 if (codec_params_it == pt_codec_params_.end())
1134 return;
1135
1136 RTC_LOG(LS_INFO) << "Found out of band supplied codec parameters for"
1137 " payload type: "
1138 << static_cast<int>(payload_type);
1139
1140 H264SpropParameterSets sprop_decoder;
1141 auto sprop_base64_it =
1142 codec_params_it->second.find(cricket::kH264FmtpSpropParameterSets);
1143
1144 if (sprop_base64_it == codec_params_it->second.end())
1145 return;
1146
1147 if (!sprop_decoder.DecodeSprop(sprop_base64_it->second.c_str()))
1148 return;
1149
1150 tracker_.InsertSpsPpsNalus(sprop_decoder.sps_nalu(),
1151 sprop_decoder.pps_nalu());
1152 }
1153
1154 } // namespace webrtc
1155