1 /*
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 *
10 */
11
12 #ifdef RTC_ENABLE_VP9
13
14 #include "modules/video_coding/codecs/vp9/vp9_impl.h"
15
16 #include <algorithm>
17 #include <limits>
18 #include <utility>
19 #include <vector>
20
21 #include "absl/memory/memory.h"
22 #include "api/video/color_space.h"
23 #include "api/video/i010_buffer.h"
24 #include "common_video/include/video_frame_buffer.h"
25 #include "common_video/libyuv/include/webrtc_libyuv.h"
26 #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
27 #include "modules/video_coding/codecs/vp9/svc_rate_allocator.h"
28 #include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
29 #include "rtc_base/checks.h"
30 #include "rtc_base/experiments/rate_control_settings.h"
31 #include "rtc_base/keep_ref_until_done.h"
32 #include "rtc_base/logging.h"
33 #include "rtc_base/time_utils.h"
34 #include "rtc_base/trace_event.h"
35 #include "system_wrappers/include/field_trial.h"
36 #include "vpx/vp8cx.h"
37 #include "vpx/vp8dx.h"
38 #include "vpx/vpx_decoder.h"
39 #include "vpx/vpx_encoder.h"
40
41 namespace webrtc {
42
43 namespace {
44 // Maps from gof_idx to encoder internal reference frame buffer index. These
45 // maps work for 1,2 and 3 temporal layers with GOF length of 1,2 and 4 frames.
46 uint8_t kRefBufIdx[4] = {0, 0, 0, 1};
47 uint8_t kUpdBufIdx[4] = {0, 0, 1, 0};
48
49 // Maximum allowed PID difference for differnet per-layer frame-rate case.
50 const int kMaxAllowedPidDiff = 30;
51
52 constexpr double kLowRateFactor = 1.0;
53 constexpr double kHighRateFactor = 2.0;
54
55 // TODO(ilink): Tune these thresholds further.
56 // Selected using ConverenceMotion_1280_720_50.yuv clip.
57 // No toggling observed on any link capacity from 100-2000kbps.
58 // HD was reached consistently when link capacity was 1500kbps.
59 // Set resolutions are a bit more conservative than svc_config.cc sets, e.g.
60 // for 300kbps resolution converged to 270p instead of 360p.
61 constexpr int kLowVp9QpThreshold = 149;
62 constexpr int kHighVp9QpThreshold = 205;
63
64 // These settings correspond to the settings in vpx_codec_enc_cfg.
65 struct Vp9RateSettings {
66 uint32_t rc_undershoot_pct;
67 uint32_t rc_overshoot_pct;
68 uint32_t rc_buf_sz;
69 uint32_t rc_buf_optimal_sz;
70 uint32_t rc_dropframe_thresh;
71 };
72
73 // Only positive speeds, range for real-time coding currently is: 5 - 8.
74 // Lower means slower/better quality, higher means fastest/lower quality.
GetCpuSpeed(int width,int height)75 int GetCpuSpeed(int width, int height) {
76 #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || defined(ANDROID)
77 return 8;
78 #else
79 // For smaller resolutions, use lower speed setting (get some coding gain at
80 // the cost of increased encoding complexity).
81 if (width * height <= 352 * 288)
82 return 5;
83 else
84 return 7;
85 #endif
86 }
87 // Helper class for extracting VP9 colorspace.
ExtractVP9ColorSpace(vpx_color_space_t space_t,vpx_color_range_t range_t,unsigned int bit_depth)88 ColorSpace ExtractVP9ColorSpace(vpx_color_space_t space_t,
89 vpx_color_range_t range_t,
90 unsigned int bit_depth) {
91 ColorSpace::PrimaryID primaries = ColorSpace::PrimaryID::kUnspecified;
92 ColorSpace::TransferID transfer = ColorSpace::TransferID::kUnspecified;
93 ColorSpace::MatrixID matrix = ColorSpace::MatrixID::kUnspecified;
94 switch (space_t) {
95 case VPX_CS_BT_601:
96 case VPX_CS_SMPTE_170:
97 primaries = ColorSpace::PrimaryID::kSMPTE170M;
98 transfer = ColorSpace::TransferID::kSMPTE170M;
99 matrix = ColorSpace::MatrixID::kSMPTE170M;
100 break;
101 case VPX_CS_SMPTE_240:
102 primaries = ColorSpace::PrimaryID::kSMPTE240M;
103 transfer = ColorSpace::TransferID::kSMPTE240M;
104 matrix = ColorSpace::MatrixID::kSMPTE240M;
105 break;
106 case VPX_CS_BT_709:
107 primaries = ColorSpace::PrimaryID::kBT709;
108 transfer = ColorSpace::TransferID::kBT709;
109 matrix = ColorSpace::MatrixID::kBT709;
110 break;
111 case VPX_CS_BT_2020:
112 primaries = ColorSpace::PrimaryID::kBT2020;
113 switch (bit_depth) {
114 case 8:
115 transfer = ColorSpace::TransferID::kBT709;
116 break;
117 case 10:
118 transfer = ColorSpace::TransferID::kBT2020_10;
119 break;
120 default:
121 RTC_NOTREACHED();
122 break;
123 }
124 matrix = ColorSpace::MatrixID::kBT2020_NCL;
125 break;
126 case VPX_CS_SRGB:
127 primaries = ColorSpace::PrimaryID::kBT709;
128 transfer = ColorSpace::TransferID::kIEC61966_2_1;
129 matrix = ColorSpace::MatrixID::kBT709;
130 break;
131 default:
132 break;
133 }
134
135 ColorSpace::RangeID range = ColorSpace::RangeID::kInvalid;
136 switch (range_t) {
137 case VPX_CR_STUDIO_RANGE:
138 range = ColorSpace::RangeID::kLimited;
139 break;
140 case VPX_CR_FULL_RANGE:
141 range = ColorSpace::RangeID::kFull;
142 break;
143 default:
144 break;
145 }
146 return ColorSpace(primaries, transfer, matrix, range);
147 }
148
GetActiveLayers(const VideoBitrateAllocation & allocation)149 std::pair<size_t, size_t> GetActiveLayers(
150 const VideoBitrateAllocation& allocation) {
151 for (size_t sl_idx = 0; sl_idx < kMaxSpatialLayers; ++sl_idx) {
152 if (allocation.GetSpatialLayerSum(sl_idx) > 0) {
153 size_t last_layer = sl_idx + 1;
154 while (last_layer < kMaxSpatialLayers &&
155 allocation.GetSpatialLayerSum(last_layer) > 0) {
156 ++last_layer;
157 }
158 return std::make_pair(sl_idx, last_layer);
159 }
160 }
161 return {0, 0};
162 }
163
Interpolate(uint32_t low,uint32_t high,double bandwidth_headroom_factor)164 uint32_t Interpolate(uint32_t low,
165 uint32_t high,
166 double bandwidth_headroom_factor) {
167 RTC_DCHECK_GE(bandwidth_headroom_factor, kLowRateFactor);
168 RTC_DCHECK_LE(bandwidth_headroom_factor, kHighRateFactor);
169
170 // |factor| is between 0.0 and 1.0.
171 const double factor = bandwidth_headroom_factor - kLowRateFactor;
172
173 return static_cast<uint32_t>(((1.0 - factor) * low) + (factor * high) + 0.5);
174 }
175
GetRateSettings(double bandwidth_headroom_factor)176 Vp9RateSettings GetRateSettings(double bandwidth_headroom_factor) {
177 static const Vp9RateSettings low_settings{100u, 0u, 100u, 33u, 40u};
178 static const Vp9RateSettings high_settings{50u, 50u, 1000u, 700u, 5u};
179
180 if (bandwidth_headroom_factor <= kLowRateFactor) {
181 return low_settings;
182 } else if (bandwidth_headroom_factor >= kHighRateFactor) {
183 return high_settings;
184 }
185
186 Vp9RateSettings settings;
187 settings.rc_undershoot_pct =
188 Interpolate(low_settings.rc_undershoot_pct,
189 high_settings.rc_undershoot_pct, bandwidth_headroom_factor);
190 settings.rc_overshoot_pct =
191 Interpolate(low_settings.rc_overshoot_pct, high_settings.rc_overshoot_pct,
192 bandwidth_headroom_factor);
193 settings.rc_buf_sz =
194 Interpolate(low_settings.rc_buf_sz, high_settings.rc_buf_sz,
195 bandwidth_headroom_factor);
196 settings.rc_buf_optimal_sz =
197 Interpolate(low_settings.rc_buf_optimal_sz,
198 high_settings.rc_buf_optimal_sz, bandwidth_headroom_factor);
199 settings.rc_dropframe_thresh =
200 Interpolate(low_settings.rc_dropframe_thresh,
201 high_settings.rc_dropframe_thresh, bandwidth_headroom_factor);
202 return settings;
203 }
204
UpdateRateSettings(vpx_codec_enc_cfg_t * config,const Vp9RateSettings & new_settings)205 void UpdateRateSettings(vpx_codec_enc_cfg_t* config,
206 const Vp9RateSettings& new_settings) {
207 config->rc_undershoot_pct = new_settings.rc_undershoot_pct;
208 config->rc_overshoot_pct = new_settings.rc_overshoot_pct;
209 config->rc_buf_sz = new_settings.rc_buf_sz;
210 config->rc_buf_optimal_sz = new_settings.rc_buf_optimal_sz;
211 config->rc_dropframe_thresh = new_settings.rc_dropframe_thresh;
212 }
213
214 } // namespace
215
EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt * pkt,void * user_data)216 void VP9EncoderImpl::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
217 void* user_data) {
218 VP9EncoderImpl* enc = static_cast<VP9EncoderImpl*>(user_data);
219 enc->GetEncodedLayerFrame(pkt);
220 }
221
VP9EncoderImpl(const cricket::VideoCodec & codec)222 VP9EncoderImpl::VP9EncoderImpl(const cricket::VideoCodec& codec)
223 : encoded_image_(),
224 encoded_complete_callback_(nullptr),
225 profile_(
226 ParseSdpForVP9Profile(codec.params).value_or(VP9Profile::kProfile0)),
227 inited_(false),
228 timestamp_(0),
229 cpu_speed_(3),
230 rc_max_intra_target_(0),
231 encoder_(nullptr),
232 config_(nullptr),
233 raw_(nullptr),
234 input_image_(nullptr),
235 force_key_frame_(true),
236 pics_since_key_(0),
237 num_temporal_layers_(0),
238 num_spatial_layers_(0),
239 num_active_spatial_layers_(0),
240 first_active_layer_(0),
241 layer_deactivation_requires_key_frame_(
242 field_trial::IsEnabled("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation")),
243 is_svc_(false),
244 inter_layer_pred_(InterLayerPredMode::kOn),
245 external_ref_control_(false), // Set in InitEncode because of tests.
246 trusted_rate_controller_(RateControlSettings::ParseFromFieldTrials()
247 .LibvpxVp9TrustedRateController()),
248 dynamic_rate_settings_(
249 RateControlSettings::ParseFromFieldTrials().Vp9DynamicRateSettings()),
250 layer_buffering_(false),
251 full_superframe_drop_(true),
252 first_frame_in_picture_(true),
253 ss_info_needed_(false),
254 force_all_active_layers_(false),
255 is_flexible_mode_(false),
256 variable_framerate_experiment_(ParseVariableFramerateConfig(
257 "WebRTC-VP9VariableFramerateScreenshare")),
258 variable_framerate_controller_(
259 variable_framerate_experiment_.framerate_limit),
260 quality_scaler_experiment_(
261 ParseQualityScalerConfig("WebRTC-VP9QualityScaler")),
262 num_steady_state_frames_(0),
263 config_changed_(true) {
264 codec_ = {};
265 memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
266 }
267
~VP9EncoderImpl()268 VP9EncoderImpl::~VP9EncoderImpl() {
269 Release();
270 }
271
SetFecControllerOverride(FecControllerOverride * fec_controller_override)272 void VP9EncoderImpl::SetFecControllerOverride(
273 FecControllerOverride* fec_controller_override) {
274 // Ignored.
275 }
276
Release()277 int VP9EncoderImpl::Release() {
278 int ret_val = WEBRTC_VIDEO_CODEC_OK;
279
280 if (encoder_ != nullptr) {
281 if (inited_) {
282 if (vpx_codec_destroy(encoder_)) {
283 ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
284 }
285 }
286 delete encoder_;
287 encoder_ = nullptr;
288 }
289 if (config_ != nullptr) {
290 delete config_;
291 config_ = nullptr;
292 }
293 if (raw_ != nullptr) {
294 vpx_img_free(raw_);
295 raw_ = nullptr;
296 }
297 inited_ = false;
298 return ret_val;
299 }
300
ExplicitlyConfiguredSpatialLayers() const301 bool VP9EncoderImpl::ExplicitlyConfiguredSpatialLayers() const {
302 // We check target_bitrate_bps of the 0th layer to see if the spatial layers
303 // (i.e. bitrates) were explicitly configured.
304 return codec_.spatialLayers[0].targetBitrate > 0;
305 }
306
SetSvcRates(const VideoBitrateAllocation & bitrate_allocation)307 bool VP9EncoderImpl::SetSvcRates(
308 const VideoBitrateAllocation& bitrate_allocation) {
309 std::pair<size_t, size_t> current_layers =
310 GetActiveLayers(current_bitrate_allocation_);
311 std::pair<size_t, size_t> new_layers = GetActiveLayers(bitrate_allocation);
312
313 const bool layer_activation_requires_key_frame =
314 inter_layer_pred_ == InterLayerPredMode::kOff ||
315 inter_layer_pred_ == InterLayerPredMode::kOnKeyPic;
316 const bool lower_layers_enabled = new_layers.first < current_layers.first;
317 const bool higher_layers_enabled = new_layers.second > current_layers.second;
318 const bool disabled_layers = new_layers.first > current_layers.first ||
319 new_layers.second < current_layers.second;
320
321 if (lower_layers_enabled ||
322 (higher_layers_enabled && layer_activation_requires_key_frame) ||
323 (disabled_layers && layer_deactivation_requires_key_frame_)) {
324 force_key_frame_ = true;
325 }
326
327 if (current_layers != new_layers) {
328 ss_info_needed_ = true;
329 }
330
331 config_->rc_target_bitrate = bitrate_allocation.get_sum_kbps();
332
333 if (ExplicitlyConfiguredSpatialLayers()) {
334 for (size_t sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
335 const bool was_layer_active = (config_->ss_target_bitrate[sl_idx] > 0);
336 config_->ss_target_bitrate[sl_idx] =
337 bitrate_allocation.GetSpatialLayerSum(sl_idx) / 1000;
338
339 for (size_t tl_idx = 0; tl_idx < num_temporal_layers_; ++tl_idx) {
340 config_->layer_target_bitrate[sl_idx * num_temporal_layers_ + tl_idx] =
341 bitrate_allocation.GetTemporalLayerSum(sl_idx, tl_idx) / 1000;
342 }
343
344 if (!was_layer_active) {
345 // Reset frame rate controller if layer is resumed after pause.
346 framerate_controller_[sl_idx].Reset();
347 }
348
349 framerate_controller_[sl_idx].SetTargetRate(
350 codec_.spatialLayers[sl_idx].maxFramerate);
351 }
352 } else {
353 float rate_ratio[VPX_MAX_LAYERS] = {0};
354 float total = 0;
355 for (int i = 0; i < num_spatial_layers_; ++i) {
356 if (svc_params_.scaling_factor_num[i] <= 0 ||
357 svc_params_.scaling_factor_den[i] <= 0) {
358 RTC_LOG(LS_ERROR) << "Scaling factors not specified!";
359 return false;
360 }
361 rate_ratio[i] = static_cast<float>(svc_params_.scaling_factor_num[i]) /
362 svc_params_.scaling_factor_den[i];
363 total += rate_ratio[i];
364 }
365
366 for (int i = 0; i < num_spatial_layers_; ++i) {
367 RTC_CHECK_GT(total, 0);
368 config_->ss_target_bitrate[i] = static_cast<unsigned int>(
369 config_->rc_target_bitrate * rate_ratio[i] / total);
370 if (num_temporal_layers_ == 1) {
371 config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
372 } else if (num_temporal_layers_ == 2) {
373 config_->layer_target_bitrate[i * num_temporal_layers_] =
374 config_->ss_target_bitrate[i] * 2 / 3;
375 config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
376 config_->ss_target_bitrate[i];
377 } else if (num_temporal_layers_ == 3) {
378 config_->layer_target_bitrate[i * num_temporal_layers_] =
379 config_->ss_target_bitrate[i] / 2;
380 config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
381 config_->layer_target_bitrate[i * num_temporal_layers_] +
382 (config_->ss_target_bitrate[i] / 4);
383 config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
384 config_->ss_target_bitrate[i];
385 } else {
386 RTC_LOG(LS_ERROR) << "Unsupported number of temporal layers: "
387 << num_temporal_layers_;
388 return false;
389 }
390
391 framerate_controller_[i].SetTargetRate(codec_.maxFramerate);
392 }
393 }
394
395 num_active_spatial_layers_ = 0;
396 first_active_layer_ = 0;
397 bool seen_active_layer = false;
398 bool expect_no_more_active_layers = false;
399 for (int i = 0; i < num_spatial_layers_; ++i) {
400 if (config_->ss_target_bitrate[i] > 0) {
401 RTC_DCHECK(!expect_no_more_active_layers) << "Only middle layer is "
402 "deactivated.";
403 if (!seen_active_layer) {
404 first_active_layer_ = i;
405 }
406 num_active_spatial_layers_ = i + 1;
407 seen_active_layer = true;
408 } else {
409 expect_no_more_active_layers = seen_active_layer;
410 }
411 }
412
413 if (higher_layers_enabled && !force_key_frame_) {
414 // Prohibit drop of all layers for the next frame, so newly enabled
415 // layer would have a valid spatial reference.
416 for (size_t i = 0; i < num_spatial_layers_; ++i) {
417 svc_drop_frame_.framedrop_thresh[i] = 0;
418 }
419 force_all_active_layers_ = true;
420 }
421
422 current_bitrate_allocation_ = bitrate_allocation;
423 config_changed_ = true;
424 return true;
425 }
426
SetRates(const RateControlParameters & parameters)427 void VP9EncoderImpl::SetRates(const RateControlParameters& parameters) {
428 if (!inited_) {
429 RTC_LOG(LS_WARNING) << "SetRates() calll while uninitialzied.";
430 return;
431 }
432 if (encoder_->err) {
433 RTC_LOG(LS_WARNING) << "Encoder in error state: " << encoder_->err;
434 return;
435 }
436 if (parameters.framerate_fps < 1.0) {
437 RTC_LOG(LS_WARNING) << "Unsupported framerate: "
438 << parameters.framerate_fps;
439 return;
440 }
441
442 codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
443
444 if (dynamic_rate_settings_) {
445 // Tweak rate control settings based on available network headroom.
446 UpdateRateSettings(
447 config_, GetRateSettings(parameters.bandwidth_allocation.bps<double>() /
448 parameters.bitrate.get_sum_bps()));
449 }
450
451 bool res = SetSvcRates(parameters.bitrate);
452 RTC_DCHECK(res) << "Failed to set new bitrate allocation";
453 config_changed_ = true;
454 }
455
456 // TODO(eladalon): s/inst/codec_settings/g.
InitEncode(const VideoCodec * inst,const Settings & settings)457 int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
458 const Settings& settings) {
459 if (inst == nullptr) {
460 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
461 }
462 if (inst->maxFramerate < 1) {
463 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
464 }
465 // Allow zero to represent an unspecified maxBitRate
466 if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
467 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
468 }
469 if (inst->width < 1 || inst->height < 1) {
470 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
471 }
472 if (settings.number_of_cores < 1) {
473 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
474 }
475 if (inst->VP9().numberOfTemporalLayers > 3) {
476 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
477 }
478 // libvpx probably does not support more than 3 spatial layers.
479 if (inst->VP9().numberOfSpatialLayers > 3) {
480 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
481 }
482
483 int ret_val = Release();
484 if (ret_val < 0) {
485 return ret_val;
486 }
487 if (encoder_ == nullptr) {
488 encoder_ = new vpx_codec_ctx_t;
489 }
490 if (config_ == nullptr) {
491 config_ = new vpx_codec_enc_cfg_t;
492 }
493 timestamp_ = 0;
494 if (&codec_ != inst) {
495 codec_ = *inst;
496 }
497
498 force_key_frame_ = true;
499 pics_since_key_ = 0;
500
501 num_spatial_layers_ = inst->VP9().numberOfSpatialLayers;
502 RTC_DCHECK_GT(num_spatial_layers_, 0);
503 num_temporal_layers_ = inst->VP9().numberOfTemporalLayers;
504 if (num_temporal_layers_ == 0) {
505 num_temporal_layers_ = 1;
506 }
507
508 framerate_controller_ = std::vector<FramerateController>(
509 num_spatial_layers_, FramerateController(codec_.maxFramerate));
510
511 is_svc_ = (num_spatial_layers_ > 1 || num_temporal_layers_ > 1);
512
513 encoded_image_._completeFrame = true;
514 // Populate encoder configuration with default values.
515 if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
516 return WEBRTC_VIDEO_CODEC_ERROR;
517 }
518
519 vpx_img_fmt img_fmt = VPX_IMG_FMT_NONE;
520 unsigned int bits_for_storage = 8;
521 switch (profile_) {
522 case VP9Profile::kProfile0:
523 img_fmt = VPX_IMG_FMT_I420;
524 bits_for_storage = 8;
525 config_->g_bit_depth = VPX_BITS_8;
526 config_->g_profile = 0;
527 config_->g_input_bit_depth = 8;
528 break;
529 case VP9Profile::kProfile1:
530 // Encoding of profile 1 is not implemented. It would require extended
531 // support for I444, I422, and I440 buffers.
532 RTC_NOTREACHED();
533 break;
534 case VP9Profile::kProfile2:
535 img_fmt = VPX_IMG_FMT_I42016;
536 bits_for_storage = 16;
537 config_->g_bit_depth = VPX_BITS_10;
538 config_->g_profile = 2;
539 config_->g_input_bit_depth = 10;
540 break;
541 }
542
543 // Creating a wrapper to the image - setting image data to nullptr. Actual
544 // pointer will be set in encode. Setting align to 1, as it is meaningless
545 // (actual memory is not allocated).
546 raw_ =
547 vpx_img_wrap(nullptr, img_fmt, codec_.width, codec_.height, 1, nullptr);
548 raw_->bit_depth = bits_for_storage;
549
550 config_->g_w = codec_.width;
551 config_->g_h = codec_.height;
552 config_->rc_target_bitrate = inst->startBitrate; // in kbit/s
553 config_->g_error_resilient = is_svc_ ? VPX_ERROR_RESILIENT_DEFAULT : 0;
554 // Setting the time base of the codec.
555 config_->g_timebase.num = 1;
556 config_->g_timebase.den = 90000;
557 config_->g_lag_in_frames = 0; // 0- no frame lagging
558 config_->g_threads = 1;
559 // Rate control settings.
560 config_->rc_dropframe_thresh = inst->VP9().frameDroppingOn ? 30 : 0;
561 config_->rc_end_usage = VPX_CBR;
562 config_->g_pass = VPX_RC_ONE_PASS;
563 config_->rc_min_quantizer =
564 codec_.mode == VideoCodecMode::kScreensharing ? 8 : 2;
565 config_->rc_max_quantizer = 52;
566 config_->rc_undershoot_pct = 50;
567 config_->rc_overshoot_pct = 50;
568 config_->rc_buf_initial_sz = 500;
569 config_->rc_buf_optimal_sz = 600;
570 config_->rc_buf_sz = 1000;
571 // Set the maximum target size of any key-frame.
572 rc_max_intra_target_ = MaxIntraTarget(config_->rc_buf_optimal_sz);
573 // Key-frame interval is enforced manually by this wrapper.
574 config_->kf_mode = VPX_KF_DISABLED;
575 // TODO(webm:1592): work-around for libvpx issue, as it can still
576 // put some key-frames at will even in VPX_KF_DISABLED kf_mode.
577 config_->kf_max_dist = inst->VP9().keyFrameInterval;
578 config_->kf_min_dist = config_->kf_max_dist;
579 if (quality_scaler_experiment_.enabled) {
580 // In that experiment webrtc wide quality scaler is used instead of libvpx
581 // internal scaler.
582 config_->rc_resize_allowed = 0;
583 } else {
584 config_->rc_resize_allowed = inst->VP9().automaticResizeOn ? 1 : 0;
585 }
586 // Determine number of threads based on the image size and #cores.
587 config_->g_threads =
588 NumberOfThreads(config_->g_w, config_->g_h, settings.number_of_cores);
589
590 cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h);
591
592 is_flexible_mode_ = inst->VP9().flexibleMode;
593
594 inter_layer_pred_ = inst->VP9().interLayerPred;
595
596 if (num_spatial_layers_ > 1 &&
597 codec_.mode == VideoCodecMode::kScreensharing && !is_flexible_mode_) {
598 RTC_LOG(LS_ERROR) << "Flexible mode is required for screenshare with "
599 "several spatial layers";
600 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
601 }
602
603 // External reference control is required for different frame rate on spatial
604 // layers because libvpx generates rtp incompatible references in this case.
605 external_ref_control_ =
606 !field_trial::IsDisabled("WebRTC-Vp9ExternalRefCtrl") ||
607 (num_spatial_layers_ > 1 &&
608 codec_.mode == VideoCodecMode::kScreensharing) ||
609 inter_layer_pred_ == InterLayerPredMode::kOn;
610
611 if (num_temporal_layers_ == 1) {
612 gof_.SetGofInfoVP9(kTemporalStructureMode1);
613 config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING;
614 config_->ts_number_layers = 1;
615 config_->ts_rate_decimator[0] = 1;
616 config_->ts_periodicity = 1;
617 config_->ts_layer_id[0] = 0;
618 } else if (num_temporal_layers_ == 2) {
619 gof_.SetGofInfoVP9(kTemporalStructureMode2);
620 config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0101;
621 config_->ts_number_layers = 2;
622 config_->ts_rate_decimator[0] = 2;
623 config_->ts_rate_decimator[1] = 1;
624 config_->ts_periodicity = 2;
625 config_->ts_layer_id[0] = 0;
626 config_->ts_layer_id[1] = 1;
627 } else if (num_temporal_layers_ == 3) {
628 gof_.SetGofInfoVP9(kTemporalStructureMode3);
629 config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0212;
630 config_->ts_number_layers = 3;
631 config_->ts_rate_decimator[0] = 4;
632 config_->ts_rate_decimator[1] = 2;
633 config_->ts_rate_decimator[2] = 1;
634 config_->ts_periodicity = 4;
635 config_->ts_layer_id[0] = 0;
636 config_->ts_layer_id[1] = 2;
637 config_->ts_layer_id[2] = 1;
638 config_->ts_layer_id[3] = 2;
639 } else {
640 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
641 }
642
643 if (external_ref_control_) {
644 config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
645 if (num_temporal_layers_ > 1 && num_spatial_layers_ > 1 &&
646 codec_.mode == VideoCodecMode::kScreensharing) {
647 // External reference control for several temporal layers with different
648 // frame rates on spatial layers is not implemented yet.
649 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
650 }
651 }
652 ref_buf_.clear();
653
654 return InitAndSetControlSettings(inst);
655 }
656
NumberOfThreads(int width,int height,int number_of_cores)657 int VP9EncoderImpl::NumberOfThreads(int width,
658 int height,
659 int number_of_cores) {
660 // Keep the number of encoder threads equal to the possible number of column
661 // tiles, which is (1, 2, 4, 8). See comments below for VP9E_SET_TILE_COLUMNS.
662 if (width * height >= 1280 * 720 && number_of_cores > 4) {
663 return 4;
664 } else if (width * height >= 640 * 360 && number_of_cores > 2) {
665 return 2;
666 } else {
667 // Use 2 threads for low res on ARM.
668 #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
669 defined(WEBRTC_ANDROID)
670 if (width * height >= 320 * 180 && number_of_cores > 2) {
671 return 2;
672 }
673 #endif
674 // 1 thread less than VGA.
675 return 1;
676 }
677 }
678
InitAndSetControlSettings(const VideoCodec * inst)679 int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
680 // Set QP-min/max per spatial and temporal layer.
681 int tot_num_layers = num_spatial_layers_ * num_temporal_layers_;
682 for (int i = 0; i < tot_num_layers; ++i) {
683 svc_params_.max_quantizers[i] = config_->rc_max_quantizer;
684 svc_params_.min_quantizers[i] = config_->rc_min_quantizer;
685 }
686 config_->ss_number_layers = num_spatial_layers_;
687 if (ExplicitlyConfiguredSpatialLayers()) {
688 for (int i = 0; i < num_spatial_layers_; ++i) {
689 const auto& layer = codec_.spatialLayers[i];
690 RTC_CHECK_GT(layer.width, 0);
691 const int scale_factor = codec_.width / layer.width;
692 RTC_DCHECK_GT(scale_factor, 0);
693
694 // Ensure scaler factor is integer.
695 if (scale_factor * layer.width != codec_.width) {
696 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
697 }
698
699 // Ensure scale factor is the same in both dimensions.
700 if (scale_factor * layer.height != codec_.height) {
701 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
702 }
703
704 // Ensure scale factor is power of two.
705 const bool is_pow_of_two = (scale_factor & (scale_factor - 1)) == 0;
706 if (!is_pow_of_two) {
707 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
708 }
709
710 svc_params_.scaling_factor_num[i] = 1;
711 svc_params_.scaling_factor_den[i] = scale_factor;
712
713 RTC_DCHECK_GT(codec_.spatialLayers[i].maxFramerate, 0);
714 RTC_DCHECK_LE(codec_.spatialLayers[i].maxFramerate, codec_.maxFramerate);
715 if (i > 0) {
716 // Frame rate of high spatial layer is supposed to be equal or higher
717 // than frame rate of low spatial layer.
718 RTC_DCHECK_GE(codec_.spatialLayers[i].maxFramerate,
719 codec_.spatialLayers[i - 1].maxFramerate);
720 }
721 }
722 } else {
723 int scaling_factor_num = 256;
724 for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
725 // 1:2 scaling in each dimension.
726 svc_params_.scaling_factor_num[i] = scaling_factor_num;
727 svc_params_.scaling_factor_den[i] = 256;
728 }
729 }
730
731 SvcRateAllocator init_allocator(codec_);
732 current_bitrate_allocation_ =
733 init_allocator.Allocate(VideoBitrateAllocationParameters(
734 inst->startBitrate * 1000, inst->maxFramerate));
735 if (!SetSvcRates(current_bitrate_allocation_)) {
736 return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
737 }
738
739 const vpx_codec_err_t rv = vpx_codec_enc_init(
740 encoder_, vpx_codec_vp9_cx(), config_,
741 config_->g_bit_depth == VPX_BITS_8 ? 0 : VPX_CODEC_USE_HIGHBITDEPTH);
742 if (rv != VPX_CODEC_OK) {
743 RTC_LOG(LS_ERROR) << "Init error: " << vpx_codec_err_to_string(rv);
744 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
745 }
746 vpx_codec_control(encoder_, VP8E_SET_CPUUSED, cpu_speed_);
747 vpx_codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT,
748 rc_max_intra_target_);
749 vpx_codec_control(encoder_, VP9E_SET_AQ_MODE,
750 inst->VP9().adaptiveQpMode ? 3 : 0);
751
752 vpx_codec_control(encoder_, VP9E_SET_FRAME_PARALLEL_DECODING, 0);
753 vpx_codec_control(encoder_, VP9E_SET_SVC_GF_TEMPORAL_REF, 0);
754
755 if (is_svc_) {
756 vpx_codec_control(encoder_, VP9E_SET_SVC, 1);
757 vpx_codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, &svc_params_);
758 }
759
760 if (num_spatial_layers_ > 1) {
761 switch (inter_layer_pred_) {
762 case InterLayerPredMode::kOn:
763 vpx_codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 0);
764 break;
765 case InterLayerPredMode::kOff:
766 vpx_codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 1);
767 break;
768 case InterLayerPredMode::kOnKeyPic:
769 vpx_codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 2);
770 break;
771 default:
772 RTC_NOTREACHED();
773 }
774
775 memset(&svc_drop_frame_, 0, sizeof(svc_drop_frame_));
776 const bool reverse_constrained_drop_mode =
777 inter_layer_pred_ == InterLayerPredMode::kOn &&
778 codec_.mode == VideoCodecMode::kScreensharing &&
779 num_spatial_layers_ > 1;
780 if (reverse_constrained_drop_mode) {
781 // Screenshare dropping mode: drop a layer only together with all lower
782 // layers. This ensures that drops on lower layers won't reduce frame-rate
783 // for higher layers and reference structure is RTP-compatible.
784 #if 0
785 // CONSTRAINED_FROM_ABOVE_DROP is not defined in the available version of
786 // libvpx
787 svc_drop_frame_.framedrop_mode = CONSTRAINED_FROM_ABOVE_DROP;
788 #else
789 abort();
790 #endif
791 svc_drop_frame_.max_consec_drop = 5;
792 for (size_t i = 0; i < num_spatial_layers_; ++i) {
793 svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
794 }
795 // No buffering is needed because the highest layer is always present in
796 // all frames in CONSTRAINED_FROM_ABOVE drop mode.
797 layer_buffering_ = false;
798 } else {
799 // Configure encoder to drop entire superframe whenever it needs to drop
800 // a layer. This mode is preferred over per-layer dropping which causes
801 // quality flickering and is not compatible with RTP non-flexible mode.
802 svc_drop_frame_.framedrop_mode =
803 full_superframe_drop_ ? FULL_SUPERFRAME_DROP : CONSTRAINED_LAYER_DROP;
804 // Buffering is needed only for constrained layer drop, as it's not clear
805 // which frame is the last.
806 layer_buffering_ = !full_superframe_drop_;
807 svc_drop_frame_.max_consec_drop = std::numeric_limits<int>::max();
808 for (size_t i = 0; i < num_spatial_layers_; ++i) {
809 svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
810 }
811 }
812 vpx_codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER,
813 &svc_drop_frame_);
814 }
815
816 // Register callback for getting each spatial layer.
817 vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
818 VP9EncoderImpl::EncoderOutputCodedPacketCallback,
819 reinterpret_cast<void*>(this)};
820 vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK,
821 reinterpret_cast<void*>(&cbp));
822
823 // Control function to set the number of column tiles in encoding a frame, in
824 // log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns.
825 // The number tile columns will be capped by the encoder based on image size
826 // (minimum width of tile column is 256 pixels, maximum is 4096).
827 vpx_codec_control(encoder_, VP9E_SET_TILE_COLUMNS, (config_->g_threads >> 1));
828
829 // Turn on row-based multithreading.
830 vpx_codec_control(encoder_, VP9E_SET_ROW_MT, 1);
831
832 #if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) && \
833 !defined(ANDROID)
834 // Do not enable the denoiser on ARM since optimization is pending.
835 // Denoiser is on by default on other platforms.
836 vpx_codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
837 inst->VP9().denoisingOn ? 1 : 0);
838 #endif
839
840 if (codec_.mode == VideoCodecMode::kScreensharing) {
841 // Adjust internal parameters to screen content.
842 vpx_codec_control(encoder_, VP9E_SET_TUNE_CONTENT, 1);
843 }
844 // Enable encoder skip of static/low content blocks.
845 vpx_codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
846 inited_ = true;
847 config_changed_ = true;
848 return WEBRTC_VIDEO_CODEC_OK;
849 }
850
MaxIntraTarget(uint32_t optimal_buffer_size)851 uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) {
852 // Set max to the optimal buffer level (normalized by target BR),
853 // and scaled by a scale_par.
854 // Max target size = scale_par * optimal_buffer_size * targetBR[Kbps].
855 // This value is presented in percentage of perFrameBw:
856 // perFrameBw = targetBR[Kbps] * 1000 / framerate.
857 // The target in % is as follows:
858 float scale_par = 0.5;
859 uint32_t target_pct =
860 optimal_buffer_size * scale_par * codec_.maxFramerate / 10;
861 // Don't go below 3 times the per frame bandwidth.
862 const uint32_t min_intra_size = 300;
863 return (target_pct < min_intra_size) ? min_intra_size : target_pct;
864 }
865
Encode(const VideoFrame & input_image,const std::vector<VideoFrameType> * frame_types)866 int VP9EncoderImpl::Encode(const VideoFrame& input_image,
867 const std::vector<VideoFrameType>* frame_types) {
868 if (!inited_) {
869 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
870 }
871 if (encoded_complete_callback_ == nullptr) {
872 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
873 }
874 if (num_active_spatial_layers_ == 0) {
875 // All spatial layers are disabled, return without encoding anything.
876 return WEBRTC_VIDEO_CODEC_OK;
877 }
878
879 // We only support one stream at the moment.
880 if (frame_types && !frame_types->empty()) {
881 if ((*frame_types)[0] == VideoFrameType::kVideoFrameKey) {
882 force_key_frame_ = true;
883 }
884 }
885
886 if (pics_since_key_ + 1 ==
887 static_cast<size_t>(codec_.VP9()->keyFrameInterval)) {
888 force_key_frame_ = true;
889 }
890
891 vpx_svc_layer_id_t layer_id = {0};
892 if (!force_key_frame_) {
893 const size_t gof_idx = (pics_since_key_ + 1) % gof_.num_frames_in_gof;
894 layer_id.temporal_layer_id = gof_.temporal_idx[gof_idx];
895
896 if (VideoCodecMode::kScreensharing == codec_.mode) {
897 const uint32_t frame_timestamp_ms =
898 1000 * input_image.timestamp() / kVideoPayloadTypeFrequency;
899
900 // To ensure that several rate-limiters with different limits don't
901 // interfere, they must be queried in order of increasing limit.
902
903 bool use_steady_state_limiter =
904 variable_framerate_experiment_.enabled &&
905 input_image.update_rect().IsEmpty() &&
906 num_steady_state_frames_ >=
907 variable_framerate_experiment_.frames_before_steady_state;
908
909 // Need to check all frame limiters, even if lower layers are disabled,
910 // because variable frame-rate limiter should be checked after the first
911 // layer. It's easier to overwrite active layers after, then check all
912 // cases.
913 for (uint8_t sl_idx = 0; sl_idx < num_active_spatial_layers_; ++sl_idx) {
914 const float layer_fps =
915 framerate_controller_[layer_id.spatial_layer_id].GetTargetRate();
916 // Use steady state rate-limiter at the correct place.
917 if (use_steady_state_limiter &&
918 layer_fps > variable_framerate_experiment_.framerate_limit - 1e-9) {
919 if (variable_framerate_controller_.DropFrame(frame_timestamp_ms)) {
920 layer_id.spatial_layer_id = num_active_spatial_layers_;
921 }
922 // Break always: if rate limiter triggered frame drop, no need to
923 // continue; otherwise, the rate is less than the next limiters.
924 break;
925 }
926 if (framerate_controller_[sl_idx].DropFrame(frame_timestamp_ms)) {
927 ++layer_id.spatial_layer_id;
928 } else {
929 break;
930 }
931 }
932
933 if (use_steady_state_limiter &&
934 layer_id.spatial_layer_id < num_active_spatial_layers_) {
935 variable_framerate_controller_.AddFrame(frame_timestamp_ms);
936 }
937 }
938
939 if (force_all_active_layers_) {
940 layer_id.spatial_layer_id = first_active_layer_;
941 force_all_active_layers_ = false;
942 }
943
944 RTC_DCHECK_LE(layer_id.spatial_layer_id, num_active_spatial_layers_);
945 if (layer_id.spatial_layer_id >= num_active_spatial_layers_) {
946 // Drop entire picture.
947 return WEBRTC_VIDEO_CODEC_OK;
948 }
949 }
950
951 // Need to set temporal layer id on ALL layers, even disabled ones.
952 // Otherwise libvpx might produce frames on a disabled layer:
953 // http://crbug.com/1051476
954 for (int sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
955 layer_id.temporal_layer_id_per_spatial[sl_idx] = layer_id.temporal_layer_id;
956 }
957
958 if (layer_id.spatial_layer_id < first_active_layer_) {
959 layer_id.spatial_layer_id = first_active_layer_;
960 }
961
962 vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id);
963
964 if (num_spatial_layers_ > 1) {
965 // Update frame dropping settings as they may change on per-frame basis.
966 vpx_codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER,
967 &svc_drop_frame_);
968 }
969
970 if (config_changed_) {
971 if (vpx_codec_enc_config_set(encoder_, config_)) {
972 return WEBRTC_VIDEO_CODEC_ERROR;
973 }
974 config_changed_ = false;
975 }
976
977 RTC_DCHECK_EQ(input_image.width(), raw_->d_w);
978 RTC_DCHECK_EQ(input_image.height(), raw_->d_h);
979
980 // Set input image for use in the callback.
981 // This was necessary since you need some information from input_image.
982 // You can save only the necessary information (such as timestamp) instead of
983 // doing this.
984 input_image_ = &input_image;
985
986 // Keep reference to buffer until encode completes.
987 rtc::scoped_refptr<I420BufferInterface> i420_buffer;
988 const I010BufferInterface* i010_buffer;
989 rtc::scoped_refptr<const I010BufferInterface> i010_copy;
990 switch (profile_) {
991 case VP9Profile::kProfile0: {
992 i420_buffer = input_image.video_frame_buffer()->ToI420();
993 // Image in vpx_image_t format.
994 // Input image is const. VPX's raw image is not defined as const.
995 raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(i420_buffer->DataY());
996 raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(i420_buffer->DataU());
997 raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(i420_buffer->DataV());
998 raw_->stride[VPX_PLANE_Y] = i420_buffer->StrideY();
999 raw_->stride[VPX_PLANE_U] = i420_buffer->StrideU();
1000 raw_->stride[VPX_PLANE_V] = i420_buffer->StrideV();
1001 break;
1002 }
1003 case VP9Profile::kProfile1: {
1004 RTC_NOTREACHED();
1005 break;
1006 }
1007 case VP9Profile::kProfile2: {
1008 // We can inject kI010 frames directly for encode. All other formats
1009 // should be converted to it.
1010 switch (input_image.video_frame_buffer()->type()) {
1011 case VideoFrameBuffer::Type::kI010: {
1012 i010_buffer = input_image.video_frame_buffer()->GetI010();
1013 break;
1014 }
1015 default: {
1016 i010_copy =
1017 I010Buffer::Copy(*input_image.video_frame_buffer()->ToI420());
1018 i010_buffer = i010_copy.get();
1019 }
1020 }
1021 raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(
1022 reinterpret_cast<const uint8_t*>(i010_buffer->DataY()));
1023 raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(
1024 reinterpret_cast<const uint8_t*>(i010_buffer->DataU()));
1025 raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(
1026 reinterpret_cast<const uint8_t*>(i010_buffer->DataV()));
1027 raw_->stride[VPX_PLANE_Y] = i010_buffer->StrideY() * 2;
1028 raw_->stride[VPX_PLANE_U] = i010_buffer->StrideU() * 2;
1029 raw_->stride[VPX_PLANE_V] = i010_buffer->StrideV() * 2;
1030 break;
1031 }
1032 }
1033
1034 vpx_enc_frame_flags_t flags = 0;
1035 if (force_key_frame_) {
1036 flags = VPX_EFLAG_FORCE_KF;
1037 }
1038
1039 if (external_ref_control_) {
1040 vpx_svc_ref_frame_config_t ref_config =
1041 SetReferences(force_key_frame_, layer_id.spatial_layer_id);
1042
1043 if (VideoCodecMode::kScreensharing == codec_.mode) {
1044 for (uint8_t sl_idx = 0; sl_idx < num_active_spatial_layers_; ++sl_idx) {
1045 ref_config.duration[sl_idx] = static_cast<int64_t>(
1046 90000 / (std::min(static_cast<float>(codec_.maxFramerate),
1047 framerate_controller_[sl_idx].GetTargetRate())));
1048 }
1049 }
1050
1051 vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &ref_config);
1052 }
1053
1054 first_frame_in_picture_ = true;
1055
1056 // TODO(ssilkin): Frame duration should be specified per spatial layer
1057 // since their frame rate can be different. For now calculate frame duration
1058 // based on target frame rate of the highest spatial layer, which frame rate
1059 // is supposed to be equal or higher than frame rate of low spatial layers.
1060 // Also, timestamp should represent actual time passed since previous frame
1061 // (not 'expected' time). Then rate controller can drain buffer more
1062 // accurately.
1063 RTC_DCHECK_GE(framerate_controller_.size(), num_active_spatial_layers_);
1064 float target_framerate_fps =
1065 (codec_.mode == VideoCodecMode::kScreensharing)
1066 ? std::min(static_cast<float>(codec_.maxFramerate),
1067 framerate_controller_[num_active_spatial_layers_ - 1]
1068 .GetTargetRate())
1069 : codec_.maxFramerate;
1070 uint32_t duration = static_cast<uint32_t>(90000 / target_framerate_fps);
1071 const vpx_codec_err_t rv = vpx_codec_encode(encoder_, raw_, timestamp_,
1072 duration, flags, VPX_DL_REALTIME);
1073 if (rv != VPX_CODEC_OK) {
1074 RTC_LOG(LS_ERROR) << "Encoding error: " << vpx_codec_err_to_string(rv)
1075 << "\n"
1076 "Details: "
1077 << vpx_codec_error(encoder_) << "\n"
1078 << vpx_codec_error_detail(encoder_);
1079 return WEBRTC_VIDEO_CODEC_ERROR;
1080 }
1081 timestamp_ += duration;
1082
1083 if (layer_buffering_) {
1084 const bool end_of_picture = true;
1085 DeliverBufferedFrame(end_of_picture);
1086 }
1087
1088 return WEBRTC_VIDEO_CODEC_OK;
1089 }
1090
PopulateCodecSpecific(CodecSpecificInfo * codec_specific,absl::optional<int> * spatial_idx,const vpx_codec_cx_pkt & pkt,uint32_t timestamp)1091 void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
1092 absl::optional<int>* spatial_idx,
1093 const vpx_codec_cx_pkt& pkt,
1094 uint32_t timestamp) {
1095 RTC_CHECK(codec_specific != nullptr);
1096 codec_specific->codecType = kVideoCodecVP9;
1097 CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9);
1098
1099 vp9_info->first_frame_in_picture = first_frame_in_picture_;
1100 vp9_info->flexible_mode = is_flexible_mode_;
1101
1102 if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
1103 pics_since_key_ = 0;
1104 } else if (first_frame_in_picture_) {
1105 ++pics_since_key_;
1106 }
1107
1108 vpx_svc_layer_id_t layer_id = {0};
1109 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
1110
1111 // Can't have keyframe with non-zero temporal layer.
1112 RTC_DCHECK(pics_since_key_ != 0 || layer_id.temporal_layer_id == 0);
1113
1114 RTC_CHECK_GT(num_temporal_layers_, 0);
1115 RTC_CHECK_GT(num_active_spatial_layers_, 0);
1116 if (num_temporal_layers_ == 1) {
1117 RTC_CHECK_EQ(layer_id.temporal_layer_id, 0);
1118 vp9_info->temporal_idx = kNoTemporalIdx;
1119 } else {
1120 vp9_info->temporal_idx = layer_id.temporal_layer_id;
1121 }
1122 if (num_active_spatial_layers_ == 1) {
1123 RTC_CHECK_EQ(layer_id.spatial_layer_id, 0);
1124 *spatial_idx = absl::nullopt;
1125 } else {
1126 *spatial_idx = layer_id.spatial_layer_id;
1127 }
1128
1129 // TODO(asapersson): this info has to be obtained from the encoder.
1130 vp9_info->temporal_up_switch = false;
1131
1132 const bool is_key_pic = (pics_since_key_ == 0);
1133 const bool is_inter_layer_pred_allowed =
1134 (inter_layer_pred_ == InterLayerPredMode::kOn ||
1135 (inter_layer_pred_ == InterLayerPredMode::kOnKeyPic && is_key_pic));
1136
1137 // Always set inter_layer_predicted to true on high layer frame if inter-layer
1138 // prediction (ILP) is allowed even if encoder didn't actually use it.
1139 // Setting inter_layer_predicted to false would allow receiver to decode high
1140 // layer frame without decoding low layer frame. If that would happen (e.g.
1141 // if low layer frame is lost) then receiver won't be able to decode next high
1142 // layer frame which uses ILP.
1143 vp9_info->inter_layer_predicted =
1144 first_frame_in_picture_ ? false : is_inter_layer_pred_allowed;
1145
1146 // Mark all low spatial layer frames as references (not just frames of
1147 // active low spatial layers) if inter-layer prediction is enabled since
1148 // these frames are indirect references of high spatial layer, which can
1149 // later be enabled without key frame.
1150 vp9_info->non_ref_for_inter_layer_pred =
1151 !is_inter_layer_pred_allowed ||
1152 layer_id.spatial_layer_id + 1 == num_spatial_layers_;
1153
1154 // Always populate this, so that the packetizer can properly set the marker
1155 // bit.
1156 vp9_info->num_spatial_layers = num_active_spatial_layers_;
1157 vp9_info->first_active_layer = first_active_layer_;
1158
1159 vp9_info->num_ref_pics = 0;
1160 FillReferenceIndices(pkt, pics_since_key_, vp9_info->inter_layer_predicted,
1161 vp9_info);
1162 if (vp9_info->flexible_mode) {
1163 vp9_info->gof_idx = kNoGofIdx;
1164 } else {
1165 vp9_info->gof_idx =
1166 static_cast<uint8_t>(pics_since_key_ % gof_.num_frames_in_gof);
1167 vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx];
1168 RTC_DCHECK(vp9_info->num_ref_pics == gof_.num_ref_pics[vp9_info->gof_idx] ||
1169 vp9_info->num_ref_pics == 0);
1170 }
1171
1172 vp9_info->inter_pic_predicted = (!is_key_pic && vp9_info->num_ref_pics > 0);
1173
1174 // Write SS on key frame of independently coded spatial layers and on base
1175 // temporal/spatial layer frame if number of layers changed without issuing
1176 // of key picture (inter-layer prediction is enabled).
1177 const bool is_key_frame = is_key_pic && !vp9_info->inter_layer_predicted;
1178 if (is_key_frame || (ss_info_needed_ && layer_id.temporal_layer_id == 0 &&
1179 layer_id.spatial_layer_id == first_active_layer_)) {
1180 vp9_info->ss_data_available = true;
1181 vp9_info->spatial_layer_resolution_present = true;
1182 // Signal disabled layers.
1183 for (size_t i = 0; i < first_active_layer_; ++i) {
1184 vp9_info->width[i] = 0;
1185 vp9_info->height[i] = 0;
1186 }
1187 for (size_t i = first_active_layer_; i < num_active_spatial_layers_; ++i) {
1188 vp9_info->width[i] = codec_.width * svc_params_.scaling_factor_num[i] /
1189 svc_params_.scaling_factor_den[i];
1190 vp9_info->height[i] = codec_.height * svc_params_.scaling_factor_num[i] /
1191 svc_params_.scaling_factor_den[i];
1192 }
1193 if (vp9_info->flexible_mode) {
1194 vp9_info->gof.num_frames_in_gof = 0;
1195 } else {
1196 vp9_info->gof.CopyGofInfoVP9(gof_);
1197 }
1198
1199 ss_info_needed_ = false;
1200 } else {
1201 vp9_info->ss_data_available = false;
1202 }
1203
1204 first_frame_in_picture_ = false;
1205 }
1206
FillReferenceIndices(const vpx_codec_cx_pkt & pkt,const size_t pic_num,const bool inter_layer_predicted,CodecSpecificInfoVP9 * vp9_info)1207 void VP9EncoderImpl::FillReferenceIndices(const vpx_codec_cx_pkt& pkt,
1208 const size_t pic_num,
1209 const bool inter_layer_predicted,
1210 CodecSpecificInfoVP9* vp9_info) {
1211 vpx_svc_layer_id_t layer_id = {0};
1212 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
1213
1214 const bool is_key_frame =
1215 (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
1216
1217 std::vector<RefFrameBuffer> ref_buf_list;
1218
1219 if (is_svc_) {
1220 vpx_svc_ref_frame_config_t enc_layer_conf = {{0}};
1221 vpx_codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG, &enc_layer_conf);
1222 int ref_buf_flags = 0;
1223
1224 if (enc_layer_conf.reference_last[layer_id.spatial_layer_id]) {
1225 const size_t fb_idx =
1226 enc_layer_conf.lst_fb_idx[layer_id.spatial_layer_id];
1227 RTC_DCHECK(ref_buf_.find(fb_idx) != ref_buf_.end());
1228 if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
1229 ref_buf_.at(fb_idx)) == ref_buf_list.end()) {
1230 ref_buf_list.push_back(ref_buf_.at(fb_idx));
1231 ref_buf_flags |= 1 << fb_idx;
1232 }
1233 }
1234
1235 if (enc_layer_conf.reference_alt_ref[layer_id.spatial_layer_id]) {
1236 const size_t fb_idx =
1237 enc_layer_conf.alt_fb_idx[layer_id.spatial_layer_id];
1238 RTC_DCHECK(ref_buf_.find(fb_idx) != ref_buf_.end());
1239 if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
1240 ref_buf_.at(fb_idx)) == ref_buf_list.end()) {
1241 ref_buf_list.push_back(ref_buf_.at(fb_idx));
1242 ref_buf_flags |= 1 << fb_idx;
1243 }
1244 }
1245
1246 if (enc_layer_conf.reference_golden[layer_id.spatial_layer_id]) {
1247 const size_t fb_idx =
1248 enc_layer_conf.gld_fb_idx[layer_id.spatial_layer_id];
1249 RTC_DCHECK(ref_buf_.find(fb_idx) != ref_buf_.end());
1250 if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
1251 ref_buf_.at(fb_idx)) == ref_buf_list.end()) {
1252 ref_buf_list.push_back(ref_buf_.at(fb_idx));
1253 ref_buf_flags |= 1 << fb_idx;
1254 }
1255 }
1256
1257 RTC_LOG(LS_VERBOSE) << "Frame " << pic_num << " sl "
1258 << layer_id.spatial_layer_id << " tl "
1259 << layer_id.temporal_layer_id << " refered buffers "
1260 << (ref_buf_flags & (1 << 0) ? 1 : 0)
1261 << (ref_buf_flags & (1 << 1) ? 1 : 0)
1262 << (ref_buf_flags & (1 << 2) ? 1 : 0)
1263 << (ref_buf_flags & (1 << 3) ? 1 : 0)
1264 << (ref_buf_flags & (1 << 4) ? 1 : 0)
1265 << (ref_buf_flags & (1 << 5) ? 1 : 0)
1266 << (ref_buf_flags & (1 << 6) ? 1 : 0)
1267 << (ref_buf_flags & (1 << 7) ? 1 : 0);
1268
1269 } else if (!is_key_frame) {
1270 RTC_DCHECK_EQ(num_spatial_layers_, 1);
1271 RTC_DCHECK_EQ(num_temporal_layers_, 1);
1272 // In non-SVC mode encoder doesn't provide reference list. Assume each frame
1273 // refers previous one, which is stored in buffer 0.
1274 ref_buf_list.push_back(ref_buf_.at(0));
1275 }
1276
1277 size_t max_ref_temporal_layer_id = 0;
1278
1279 std::vector<size_t> ref_pid_list;
1280
1281 vp9_info->num_ref_pics = 0;
1282 for (const RefFrameBuffer& ref_buf : ref_buf_list) {
1283 RTC_DCHECK_LE(ref_buf.pic_num, pic_num);
1284 if (ref_buf.pic_num < pic_num) {
1285 if (inter_layer_pred_ != InterLayerPredMode::kOn) {
1286 // RTP spec limits temporal prediction to the same spatial layer.
1287 // It is safe to ignore this requirement if inter-layer prediction is
1288 // enabled for all frames when all base frames are relayed to receiver.
1289 RTC_DCHECK_EQ(ref_buf.spatial_layer_id, layer_id.spatial_layer_id);
1290 } else {
1291 RTC_DCHECK_LE(ref_buf.spatial_layer_id, layer_id.spatial_layer_id);
1292 }
1293 RTC_DCHECK_LE(ref_buf.temporal_layer_id, layer_id.temporal_layer_id);
1294
1295 // Encoder may reference several spatial layers on the same previous
1296 // frame in case if some spatial layers are skipped on the current frame.
1297 // We shouldn't put duplicate references as it may break some old
1298 // clients and isn't RTP compatible.
1299 if (std::find(ref_pid_list.begin(), ref_pid_list.end(),
1300 ref_buf.pic_num) != ref_pid_list.end()) {
1301 continue;
1302 }
1303 ref_pid_list.push_back(ref_buf.pic_num);
1304
1305 const size_t p_diff = pic_num - ref_buf.pic_num;
1306 RTC_DCHECK_LE(p_diff, 127UL);
1307
1308 vp9_info->p_diff[vp9_info->num_ref_pics] = static_cast<uint8_t>(p_diff);
1309 ++vp9_info->num_ref_pics;
1310
1311 max_ref_temporal_layer_id =
1312 std::max(max_ref_temporal_layer_id, ref_buf.temporal_layer_id);
1313 } else {
1314 RTC_DCHECK(inter_layer_predicted);
1315 // RTP spec only allows to use previous spatial layer for inter-layer
1316 // prediction.
1317 RTC_DCHECK_EQ(ref_buf.spatial_layer_id + 1, layer_id.spatial_layer_id);
1318 }
1319 }
1320
1321 vp9_info->temporal_up_switch =
1322 (max_ref_temporal_layer_id <
1323 static_cast<size_t>(layer_id.temporal_layer_id));
1324 }
1325
UpdateReferenceBuffers(const vpx_codec_cx_pkt & pkt,const size_t pic_num)1326 void VP9EncoderImpl::UpdateReferenceBuffers(const vpx_codec_cx_pkt& pkt,
1327 const size_t pic_num) {
1328 vpx_svc_layer_id_t layer_id = {0};
1329 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
1330
1331 RefFrameBuffer frame_buf(pic_num, layer_id.spatial_layer_id,
1332 layer_id.temporal_layer_id);
1333
1334 if (is_svc_) {
1335 vpx_svc_ref_frame_config_t enc_layer_conf = {{0}};
1336 vpx_codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG, &enc_layer_conf);
1337 const int update_buffer_slot =
1338 enc_layer_conf.update_buffer_slot[layer_id.spatial_layer_id];
1339
1340 for (size_t i = 0; i < kNumVp9Buffers; ++i) {
1341 if (update_buffer_slot & (1 << i)) {
1342 ref_buf_[i] = frame_buf;
1343 }
1344 }
1345
1346 RTC_LOG(LS_VERBOSE) << "Frame " << pic_num << " sl "
1347 << layer_id.spatial_layer_id << " tl "
1348 << layer_id.temporal_layer_id << " updated buffers "
1349 << (update_buffer_slot & (1 << 0) ? 1 : 0)
1350 << (update_buffer_slot & (1 << 1) ? 1 : 0)
1351 << (update_buffer_slot & (1 << 2) ? 1 : 0)
1352 << (update_buffer_slot & (1 << 3) ? 1 : 0)
1353 << (update_buffer_slot & (1 << 4) ? 1 : 0)
1354 << (update_buffer_slot & (1 << 5) ? 1 : 0)
1355 << (update_buffer_slot & (1 << 6) ? 1 : 0)
1356 << (update_buffer_slot & (1 << 7) ? 1 : 0);
1357 } else {
1358 RTC_DCHECK_EQ(num_spatial_layers_, 1);
1359 RTC_DCHECK_EQ(num_temporal_layers_, 1);
1360 // In non-svc mode encoder doesn't provide reference list. Assume each frame
1361 // is reference and stored in buffer 0.
1362 ref_buf_[0] = frame_buf;
1363 }
1364 }
1365
SetReferences(bool is_key_pic,size_t first_active_spatial_layer_id)1366 vpx_svc_ref_frame_config_t VP9EncoderImpl::SetReferences(
1367 bool is_key_pic,
1368 size_t first_active_spatial_layer_id) {
1369 // kRefBufIdx, kUpdBufIdx need to be updated to support longer GOFs.
1370 RTC_DCHECK_LE(gof_.num_frames_in_gof, 4);
1371
1372 vpx_svc_ref_frame_config_t ref_config;
1373 memset(&ref_config, 0, sizeof(ref_config));
1374
1375 const size_t num_temporal_refs = std::max(1, num_temporal_layers_ - 1);
1376 const bool is_inter_layer_pred_allowed =
1377 inter_layer_pred_ == InterLayerPredMode::kOn ||
1378 (inter_layer_pred_ == InterLayerPredMode::kOnKeyPic && is_key_pic);
1379 absl::optional<int> last_updated_buf_idx;
1380
1381 // Put temporal reference to LAST and spatial reference to GOLDEN. Update
1382 // frame buffer (i.e. store encoded frame) if current frame is a temporal
1383 // reference (i.e. it belongs to a low temporal layer) or it is a spatial
1384 // reference. In later case, always store spatial reference in the last
1385 // reference frame buffer.
1386 // For the case of 3 temporal and 3 spatial layers we need 6 frame buffers
1387 // for temporal references plus 1 buffer for spatial reference. 7 buffers
1388 // in total.
1389
1390 for (size_t sl_idx = first_active_spatial_layer_id;
1391 sl_idx < num_active_spatial_layers_; ++sl_idx) {
1392 const size_t curr_pic_num = is_key_pic ? 0 : pics_since_key_ + 1;
1393 const size_t gof_idx = curr_pic_num % gof_.num_frames_in_gof;
1394
1395 if (!is_key_pic) {
1396 // Set up temporal reference.
1397 const int buf_idx = sl_idx * num_temporal_refs + kRefBufIdx[gof_idx];
1398
1399 // Last reference frame buffer is reserved for spatial reference. It is
1400 // not supposed to be used for temporal prediction.
1401 RTC_DCHECK_LT(buf_idx, kNumVp9Buffers - 1);
1402
1403 const int pid_diff = curr_pic_num - ref_buf_[buf_idx].pic_num;
1404 // Incorrect spatial layer may be in the buffer due to a key-frame.
1405 const bool same_spatial_layer =
1406 ref_buf_[buf_idx].spatial_layer_id == sl_idx;
1407 bool correct_pid = false;
1408 if (is_flexible_mode_) {
1409 correct_pid = pid_diff > 0 && pid_diff < kMaxAllowedPidDiff;
1410 } else {
1411 // Below code assumes single temporal referecence.
1412 RTC_DCHECK_EQ(gof_.num_ref_pics[gof_idx], 1);
1413 correct_pid = pid_diff == gof_.pid_diff[gof_idx][0];
1414 }
1415
1416 if (same_spatial_layer && correct_pid) {
1417 ref_config.lst_fb_idx[sl_idx] = buf_idx;
1418 ref_config.reference_last[sl_idx] = 1;
1419 } else {
1420 // This reference doesn't match with one specified by GOF. This can
1421 // only happen if spatial layer is enabled dynamically without key
1422 // frame. Spatial prediction is supposed to be enabled in this case.
1423 RTC_DCHECK(is_inter_layer_pred_allowed &&
1424 sl_idx > first_active_spatial_layer_id);
1425 }
1426 }
1427
1428 if (is_inter_layer_pred_allowed && sl_idx > first_active_spatial_layer_id) {
1429 // Set up spatial reference.
1430 RTC_DCHECK(last_updated_buf_idx);
1431 ref_config.gld_fb_idx[sl_idx] = *last_updated_buf_idx;
1432 ref_config.reference_golden[sl_idx] = 1;
1433 } else {
1434 RTC_DCHECK(ref_config.reference_last[sl_idx] != 0 ||
1435 sl_idx == first_active_spatial_layer_id ||
1436 inter_layer_pred_ == InterLayerPredMode::kOff);
1437 }
1438
1439 last_updated_buf_idx.reset();
1440
1441 if (gof_.temporal_idx[gof_idx] < num_temporal_layers_ - 1 ||
1442 num_temporal_layers_ == 1) {
1443 last_updated_buf_idx = sl_idx * num_temporal_refs + kUpdBufIdx[gof_idx];
1444
1445 // Ensure last frame buffer is not used for temporal prediction (it is
1446 // reserved for spatial reference).
1447 RTC_DCHECK_LT(*last_updated_buf_idx, kNumVp9Buffers - 1);
1448 } else if (is_inter_layer_pred_allowed) {
1449 last_updated_buf_idx = kNumVp9Buffers - 1;
1450 }
1451
1452 if (last_updated_buf_idx) {
1453 ref_config.update_buffer_slot[sl_idx] = 1 << *last_updated_buf_idx;
1454 }
1455 }
1456
1457 return ref_config;
1458 }
1459
GetEncodedLayerFrame(const vpx_codec_cx_pkt * pkt)1460 int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
1461 RTC_DCHECK_EQ(pkt->kind, VPX_CODEC_CX_FRAME_PKT);
1462
1463 if (pkt->data.frame.sz == 0) {
1464 // Ignore dropped frame.
1465 return WEBRTC_VIDEO_CODEC_OK;
1466 }
1467
1468 vpx_svc_layer_id_t layer_id = {0};
1469 vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
1470
1471 if (layer_buffering_) {
1472 // Deliver buffered low spatial layer frame.
1473 const bool end_of_picture = false;
1474 DeliverBufferedFrame(end_of_picture);
1475 }
1476
1477 // TODO(nisse): Introduce some buffer cache or buffer pool, to reduce
1478 // allocations and/or copy operations.
1479 encoded_image_.SetEncodedData(EncodedImageBuffer::Create(
1480 static_cast<const uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz));
1481
1482 const bool is_key_frame =
1483 (pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
1484 // Ensure encoder issued key frame on request.
1485 RTC_DCHECK(is_key_frame || !force_key_frame_);
1486
1487 // Check if encoded frame is a key frame.
1488 encoded_image_._frameType = VideoFrameType::kVideoFrameDelta;
1489 if (is_key_frame) {
1490 encoded_image_._frameType = VideoFrameType::kVideoFrameKey;
1491 force_key_frame_ = false;
1492 }
1493 RTC_DCHECK_LE(encoded_image_.size(), encoded_image_.capacity());
1494
1495 codec_specific_ = {};
1496 absl::optional<int> spatial_index;
1497 PopulateCodecSpecific(&codec_specific_, &spatial_index, *pkt,
1498 input_image_->timestamp());
1499 encoded_image_.SetSpatialIndex(spatial_index);
1500
1501 UpdateReferenceBuffers(*pkt, pics_since_key_);
1502
1503 TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
1504 encoded_image_.SetTimestamp(input_image_->timestamp());
1505 encoded_image_._encodedHeight =
1506 pkt->data.frame.height[layer_id.spatial_layer_id];
1507 encoded_image_._encodedWidth =
1508 pkt->data.frame.width[layer_id.spatial_layer_id];
1509 int qp = -1;
1510 vpx_codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
1511 encoded_image_.qp_ = qp;
1512
1513 if (!layer_buffering_) {
1514 const bool end_of_picture = encoded_image_.SpatialIndex().value_or(0) + 1 ==
1515 num_active_spatial_layers_;
1516 DeliverBufferedFrame(end_of_picture);
1517 }
1518
1519 return WEBRTC_VIDEO_CODEC_OK;
1520 }
1521
DeliverBufferedFrame(bool end_of_picture)1522 void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
1523 if (encoded_image_.size() > 0) {
1524 if (num_spatial_layers_ > 1) {
1525 // Restore frame dropping settings, as dropping may be temporary forbidden
1526 // due to dynamically enabled layers.
1527 for (size_t i = 0; i < num_spatial_layers_; ++i) {
1528 svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
1529 }
1530 }
1531
1532 codec_specific_.codecSpecific.VP9.end_of_picture = end_of_picture;
1533
1534 // No data partitioning in VP9, so 1 partition only.
1535 int part_idx = 0;
1536 RTPFragmentationHeader frag_info;
1537 frag_info.VerifyAndAllocateFragmentationHeader(1);
1538 frag_info.fragmentationOffset[part_idx] = 0;
1539 frag_info.fragmentationLength[part_idx] = encoded_image_.size();
1540
1541 encoded_complete_callback_->OnEncodedImage(encoded_image_, &codec_specific_,
1542 &frag_info);
1543
1544 if (codec_.mode == VideoCodecMode::kScreensharing) {
1545 const uint8_t spatial_idx = encoded_image_.SpatialIndex().value_or(0);
1546 const uint32_t frame_timestamp_ms =
1547 1000 * encoded_image_.Timestamp() / kVideoPayloadTypeFrequency;
1548 framerate_controller_[spatial_idx].AddFrame(frame_timestamp_ms);
1549
1550 const size_t steady_state_size = SteadyStateSize(
1551 spatial_idx, codec_specific_.codecSpecific.VP9.temporal_idx);
1552
1553 // Only frames on spatial layers, which may be limited in a steady state
1554 // are considered for steady state detection.
1555 if (framerate_controller_[spatial_idx].GetTargetRate() >
1556 variable_framerate_experiment_.framerate_limit + 1e-9) {
1557 if (encoded_image_.qp_ <=
1558 variable_framerate_experiment_.steady_state_qp &&
1559 encoded_image_.size() <= steady_state_size) {
1560 ++num_steady_state_frames_;
1561 } else {
1562 num_steady_state_frames_ = 0;
1563 }
1564 }
1565 }
1566 encoded_image_.set_size(0);
1567 }
1568 }
1569
RegisterEncodeCompleteCallback(EncodedImageCallback * callback)1570 int VP9EncoderImpl::RegisterEncodeCompleteCallback(
1571 EncodedImageCallback* callback) {
1572 encoded_complete_callback_ = callback;
1573 return WEBRTC_VIDEO_CODEC_OK;
1574 }
1575
GetEncoderInfo() const1576 VideoEncoder::EncoderInfo VP9EncoderImpl::GetEncoderInfo() const {
1577 EncoderInfo info;
1578 info.supports_native_handle = false;
1579 info.implementation_name = "libvpx";
1580 if (quality_scaler_experiment_.enabled) {
1581 info.scaling_settings = VideoEncoder::ScalingSettings(
1582 quality_scaler_experiment_.low_qp, quality_scaler_experiment_.high_qp);
1583 } else {
1584 info.scaling_settings = VideoEncoder::ScalingSettings::kOff;
1585 }
1586 info.has_trusted_rate_controller = trusted_rate_controller_;
1587 info.is_hardware_accelerated = false;
1588 info.has_internal_source = false;
1589 if (inited_) {
1590 // Find the max configured fps of any active spatial layer.
1591 float max_fps = 0.0;
1592 for (size_t si = 0; si < num_spatial_layers_; ++si) {
1593 if (codec_.spatialLayers[si].active &&
1594 codec_.spatialLayers[si].maxFramerate > max_fps) {
1595 max_fps = codec_.spatialLayers[si].maxFramerate;
1596 }
1597 }
1598
1599 for (size_t si = 0; si < num_spatial_layers_; ++si) {
1600 info.fps_allocation[si].clear();
1601 if (!codec_.spatialLayers[si].active) {
1602 continue;
1603 }
1604
1605 // This spatial layer may already use a fraction of the total frame rate.
1606 const float sl_fps_fraction =
1607 codec_.spatialLayers[si].maxFramerate / max_fps;
1608 for (size_t ti = 0; ti < num_temporal_layers_; ++ti) {
1609 const uint32_t decimator =
1610 num_temporal_layers_ <= 1 ? 1 : config_->ts_rate_decimator[ti];
1611 RTC_DCHECK_GT(decimator, 0);
1612 info.fps_allocation[si].push_back(
1613 rtc::saturated_cast<uint8_t>(EncoderInfo::kMaxFramerateFraction *
1614 (sl_fps_fraction / decimator)));
1615 }
1616 }
1617 }
1618 return info;
1619 }
1620
SteadyStateSize(int sid,int tid)1621 size_t VP9EncoderImpl::SteadyStateSize(int sid, int tid) {
1622 const size_t bitrate_bps = current_bitrate_allocation_.GetBitrate(
1623 sid, tid == kNoTemporalIdx ? 0 : tid);
1624 const float fps = (codec_.mode == VideoCodecMode::kScreensharing)
1625 ? std::min(static_cast<float>(codec_.maxFramerate),
1626 framerate_controller_[sid].GetTargetRate())
1627 : codec_.maxFramerate;
1628 return static_cast<size_t>(
1629 bitrate_bps / (8 * fps) *
1630 (100 -
1631 variable_framerate_experiment_.steady_state_undershoot_percentage) /
1632 100 +
1633 0.5);
1634 }
1635
1636 // static
1637 VP9EncoderImpl::VariableFramerateExperiment
ParseVariableFramerateConfig(std::string group_name)1638 VP9EncoderImpl::ParseVariableFramerateConfig(std::string group_name) {
1639 FieldTrialFlag enabled = FieldTrialFlag("Enabled");
1640 FieldTrialParameter<double> framerate_limit("min_fps", 5.0);
1641 FieldTrialParameter<int> qp("min_qp", 32);
1642 FieldTrialParameter<int> undershoot_percentage("undershoot", 30);
1643 FieldTrialParameter<int> frames_before_steady_state(
1644 "frames_before_steady_state", 5);
1645 ParseFieldTrial({&enabled, &framerate_limit, &qp, &undershoot_percentage,
1646 &frames_before_steady_state},
1647 field_trial::FindFullName(group_name));
1648 VariableFramerateExperiment config;
1649 config.enabled = enabled.Get();
1650 config.framerate_limit = framerate_limit.Get();
1651 config.steady_state_qp = qp.Get();
1652 config.steady_state_undershoot_percentage = undershoot_percentage.Get();
1653 config.frames_before_steady_state = frames_before_steady_state.Get();
1654
1655 return config;
1656 }
1657
1658 // static
1659 VP9EncoderImpl::QualityScalerExperiment
ParseQualityScalerConfig(std::string group_name)1660 VP9EncoderImpl::ParseQualityScalerConfig(std::string group_name) {
1661 FieldTrialFlag disabled = FieldTrialFlag("Disabled");
1662 FieldTrialParameter<int> low_qp("low_qp", kLowVp9QpThreshold);
1663 FieldTrialParameter<int> high_qp("hihg_qp", kHighVp9QpThreshold);
1664 ParseFieldTrial({&disabled, &low_qp, &high_qp},
1665 field_trial::FindFullName(group_name));
1666 QualityScalerExperiment config;
1667 config.enabled = !disabled.Get();
1668 RTC_LOG(LS_INFO) << "Webrtc quality scaler for vp9 is "
1669 << (config.enabled ? "enabled." : "disabled");
1670 config.low_qp = low_qp.Get();
1671 config.high_qp = high_qp.Get();
1672
1673 return config;
1674 }
1675
VP9DecoderImpl()1676 VP9DecoderImpl::VP9DecoderImpl()
1677 : decode_complete_callback_(nullptr),
1678 inited_(false),
1679 decoder_(nullptr),
1680 key_frame_required_(true) {}
1681
~VP9DecoderImpl()1682 VP9DecoderImpl::~VP9DecoderImpl() {
1683 inited_ = true; // in order to do the actual release
1684 Release();
1685 int num_buffers_in_use = frame_buffer_pool_.GetNumBuffersInUse();
1686 if (num_buffers_in_use > 0) {
1687 // The frame buffers are reference counted and frames are exposed after
1688 // decoding. There may be valid usage cases where previous frames are still
1689 // referenced after ~VP9DecoderImpl that is not a leak.
1690 RTC_LOG(LS_INFO) << num_buffers_in_use
1691 << " Vp9FrameBuffers are still "
1692 "referenced during ~VP9DecoderImpl.";
1693 }
1694 }
1695
InitDecode(const VideoCodec * inst,int number_of_cores)1696 int VP9DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
1697 int ret_val = Release();
1698 if (ret_val < 0) {
1699 return ret_val;
1700 }
1701
1702 if (decoder_ == nullptr) {
1703 decoder_ = new vpx_codec_ctx_t;
1704 }
1705 vpx_codec_dec_cfg_t cfg;
1706 memset(&cfg, 0, sizeof(cfg));
1707
1708 #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1709 // We focus on webrtc fuzzing here, not libvpx itself. Use single thread for
1710 // fuzzing, because:
1711 // - libvpx's VP9 single thread decoder is more fuzzer friendly. It detects
1712 // errors earlier than the multi-threads version.
1713 // - Make peak CPU usage under control (not depending on input)
1714 cfg.threads = 1;
1715 #else
1716 if (!inst) {
1717 // No config provided - don't know resolution to decode yet.
1718 // Set thread count to one in the meantime.
1719 cfg.threads = 1;
1720 } else {
1721 // We want to use multithreading when decoding high resolution videos. But
1722 // not too many in order to avoid overhead when many stream are decoded
1723 // concurrently.
1724 // Set 2 thread as target for 1280x720 pixel count, and then scale up
1725 // linearly from there - but cap at physical core count.
1726 // For common resolutions this results in:
1727 // 1 for 360p
1728 // 2 for 720p
1729 // 4 for 1080p
1730 // 8 for 1440p
1731 // 18 for 4K
1732 int num_threads =
1733 std::max(1, 2 * (inst->width * inst->height) / (1280 * 720));
1734 cfg.threads = std::min(number_of_cores, num_threads);
1735 current_codec_ = *inst;
1736 }
1737 #endif
1738
1739 num_cores_ = number_of_cores;
1740
1741 vpx_codec_flags_t flags = 0;
1742 if (vpx_codec_dec_init(decoder_, vpx_codec_vp9_dx(), &cfg, flags)) {
1743 return WEBRTC_VIDEO_CODEC_MEMORY;
1744 }
1745
1746 if (!frame_buffer_pool_.InitializeVpxUsePool(decoder_)) {
1747 return WEBRTC_VIDEO_CODEC_MEMORY;
1748 }
1749
1750 inited_ = true;
1751 // Always start with a complete key frame.
1752 key_frame_required_ = true;
1753 if (inst && inst->buffer_pool_size) {
1754 if (!frame_buffer_pool_.Resize(*inst->buffer_pool_size)) {
1755 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
1756 }
1757 }
1758
1759 vpx_codec_err_t status =
1760 vpx_codec_control(decoder_, VP9D_SET_LOOP_FILTER_OPT, 1);
1761 if (status != VPX_CODEC_OK) {
1762 RTC_LOG(LS_ERROR) << "Failed to enable VP9D_SET_LOOP_FILTER_OPT. "
1763 << vpx_codec_error(decoder_);
1764 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
1765 }
1766
1767 return WEBRTC_VIDEO_CODEC_OK;
1768 }
1769
Decode(const EncodedImage & input_image,bool missing_frames,int64_t)1770 int VP9DecoderImpl::Decode(const EncodedImage& input_image,
1771 bool missing_frames,
1772 int64_t /*render_time_ms*/) {
1773 if (!inited_) {
1774 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
1775 }
1776 if (decode_complete_callback_ == nullptr) {
1777 return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
1778 }
1779
1780 if (input_image._frameType == VideoFrameType::kVideoFrameKey) {
1781 absl::optional<vp9::FrameInfo> frame_info =
1782 vp9::ParseIntraFrameInfo(input_image.data(), input_image.size());
1783 if (frame_info) {
1784 if (frame_info->frame_width != current_codec_.width ||
1785 frame_info->frame_height != current_codec_.height) {
1786 // Resolution has changed, tear down and re-init a new decoder in
1787 // order to get correct sizing.
1788 Release();
1789 current_codec_.width = frame_info->frame_width;
1790 current_codec_.height = frame_info->frame_height;
1791 int reinit_status = InitDecode(¤t_codec_, num_cores_);
1792 if (reinit_status != WEBRTC_VIDEO_CODEC_OK) {
1793 RTC_LOG(LS_WARNING) << "Failed to re-init decoder.";
1794 return reinit_status;
1795 }
1796 }
1797 } else {
1798 RTC_LOG(LS_WARNING) << "Failed to parse VP9 header from key-frame.";
1799 }
1800 }
1801
1802 // Always start with a complete key frame.
1803 if (key_frame_required_) {
1804 if (input_image._frameType != VideoFrameType::kVideoFrameKey)
1805 return WEBRTC_VIDEO_CODEC_ERROR;
1806 // We have a key frame - is it complete?
1807 if (input_image._completeFrame) {
1808 key_frame_required_ = false;
1809 } else {
1810 return WEBRTC_VIDEO_CODEC_ERROR;
1811 }
1812 }
1813 vpx_codec_iter_t iter = nullptr;
1814 vpx_image_t* img;
1815 const uint8_t* buffer = input_image.data();
1816 if (input_image.size() == 0) {
1817 buffer = nullptr; // Triggers full frame concealment.
1818 }
1819 // During decode libvpx may get and release buffers from |frame_buffer_pool_|.
1820 // In practice libvpx keeps a few (~3-4) buffers alive at a time.
1821 if (vpx_codec_decode(decoder_, buffer,
1822 static_cast<unsigned int>(input_image.size()), 0,
1823 VPX_DL_REALTIME)) {
1824 return WEBRTC_VIDEO_CODEC_ERROR;
1825 }
1826 // |img->fb_priv| contains the image data, a reference counted Vp9FrameBuffer.
1827 // It may be released by libvpx during future vpx_codec_decode or
1828 // vpx_codec_destroy calls.
1829 img = vpx_codec_get_frame(decoder_, &iter);
1830 int qp;
1831 vpx_codec_err_t vpx_ret =
1832 vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
1833 RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
1834 int ret =
1835 ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace());
1836 if (ret != 0) {
1837 return ret;
1838 }
1839 return WEBRTC_VIDEO_CODEC_OK;
1840 }
1841
ReturnFrame(const vpx_image_t * img,uint32_t timestamp,int qp,const webrtc::ColorSpace * explicit_color_space)1842 int VP9DecoderImpl::ReturnFrame(
1843 const vpx_image_t* img,
1844 uint32_t timestamp,
1845 int qp,
1846 const webrtc::ColorSpace* explicit_color_space) {
1847 if (img == nullptr) {
1848 // Decoder OK and nullptr image => No show frame.
1849 return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
1850 }
1851
1852 // This buffer contains all of |img|'s image data, a reference counted
1853 // Vp9FrameBuffer. (libvpx is done with the buffers after a few
1854 // vpx_codec_decode calls or vpx_codec_destroy).
1855 Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer =
1856 static_cast<Vp9FrameBufferPool::Vp9FrameBuffer*>(img->fb_priv);
1857
1858 // The buffer can be used directly by the VideoFrame (without copy) by
1859 // using a Wrapped*Buffer.
1860 rtc::scoped_refptr<VideoFrameBuffer> img_wrapped_buffer;
1861 switch (img->bit_depth) {
1862 case 8:
1863 if (img->fmt == VPX_IMG_FMT_I420) {
1864 img_wrapped_buffer = WrapI420Buffer(
1865 img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
1866 img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
1867 img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
1868 img->stride[VPX_PLANE_V],
1869 // WrappedI420Buffer's mechanism for allowing the release of its
1870 // frame buffer is through a callback function. This is where we
1871 // should release |img_buffer|.
1872 rtc::KeepRefUntilDone(img_buffer));
1873 } else if (img->fmt == VPX_IMG_FMT_I444) {
1874 img_wrapped_buffer = WrapI444Buffer(
1875 img->d_w, img->d_h, img->planes[VPX_PLANE_Y],
1876 img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U],
1877 img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V],
1878 img->stride[VPX_PLANE_V],
1879 // WrappedI444Buffer's mechanism for allowing the release of its
1880 // frame buffer is through a callback function. This is where we
1881 // should release |img_buffer|.
1882 rtc::KeepRefUntilDone(img_buffer));
1883 } else {
1884 RTC_LOG(LS_ERROR)
1885 << "Unsupported pixel format produced by the decoder: "
1886 << static_cast<int>(img->fmt);
1887 return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
1888 }
1889 break;
1890 case 10:
1891 img_wrapped_buffer = WrapI010Buffer(
1892 img->d_w, img->d_h,
1893 reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_Y]),
1894 img->stride[VPX_PLANE_Y] / 2,
1895 reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_U]),
1896 img->stride[VPX_PLANE_U] / 2,
1897 reinterpret_cast<const uint16_t*>(img->planes[VPX_PLANE_V]),
1898 img->stride[VPX_PLANE_V] / 2, rtc::KeepRefUntilDone(img_buffer));
1899 break;
1900 default:
1901 RTC_LOG(LS_ERROR) << "Unsupported bit depth produced by the decoder: "
1902 << img->bit_depth;
1903 return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
1904 }
1905
1906 auto builder = VideoFrame::Builder()
1907 .set_video_frame_buffer(img_wrapped_buffer)
1908 .set_timestamp_rtp(timestamp);
1909 if (explicit_color_space) {
1910 builder.set_color_space(*explicit_color_space);
1911 } else {
1912 builder.set_color_space(
1913 ExtractVP9ColorSpace(img->cs, img->range, img->bit_depth));
1914 }
1915 VideoFrame decoded_image = builder.build();
1916
1917 decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
1918 return WEBRTC_VIDEO_CODEC_OK;
1919 }
1920
RegisterDecodeCompleteCallback(DecodedImageCallback * callback)1921 int VP9DecoderImpl::RegisterDecodeCompleteCallback(
1922 DecodedImageCallback* callback) {
1923 decode_complete_callback_ = callback;
1924 return WEBRTC_VIDEO_CODEC_OK;
1925 }
1926
Release()1927 int VP9DecoderImpl::Release() {
1928 int ret_val = WEBRTC_VIDEO_CODEC_OK;
1929
1930 if (decoder_ != nullptr) {
1931 if (inited_) {
1932 // When a codec is destroyed libvpx will release any buffers of
1933 // |frame_buffer_pool_| it is currently using.
1934 if (vpx_codec_destroy(decoder_)) {
1935 ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
1936 }
1937 }
1938 delete decoder_;
1939 decoder_ = nullptr;
1940 }
1941 // Releases buffers from the pool. Any buffers not in use are deleted. Buffers
1942 // still referenced externally are deleted once fully released, not returning
1943 // to the pool.
1944 frame_buffer_pool_.ClearPool();
1945 inited_ = false;
1946 return ret_val;
1947 }
1948
ImplementationName() const1949 const char* VP9DecoderImpl::ImplementationName() const {
1950 return "libvpx";
1951 }
1952
1953 } // namespace webrtc
1954
1955 #endif // RTC_ENABLE_VP9
1956