1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27 #include "dm_services.h"
28 #include "dm_helpers.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "dccg.h"
32 #include "dce/dce_hwseq.h"
33 #include "clk_mgr.h"
34 #include "reg_helper.h"
35 #include "abm.h"
36 #include "hubp.h"
37 #include "dchubbub.h"
38 #include "timing_generator.h"
39 #include "opp.h"
40 #include "ipp.h"
41 #include "mpc.h"
42 #include "mcif_wb.h"
43 #include "dc_dmub_srv.h"
44 #include "dcn35_hwseq.h"
45 #include "dcn35/dcn35_dccg.h"
46 #include "link_hwss.h"
47 #include "dpcd_defs.h"
48 #include "dce/dmub_outbox.h"
49 #include "link.h"
50 #include "dcn10/dcn10_hwseq.h"
51 #include "inc/link_enc_cfg.h"
52 #include "dcn30/dcn30_vpg.h"
53 #include "dce/dce_i2c_hw.h"
54 #include "dsc.h"
55 #include "dcn20/dcn20_optc.h"
56 #include "dcn30/dcn30_cm_common.h"
57 #include "dcn31/dcn31_hwseq.h"
58 #include "dcn20/dcn20_hwseq.h"
59 #include "dc_state_priv.h"
60
61 #define DC_LOGGER_INIT(logger) \
62 struct dal_logger *dc_logger = logger
63
64 #define CTX \
65 hws->ctx
66 #define REG(reg)\
67 hws->regs->reg
68 #define DC_LOGGER \
69 dc_logger
70
71
72 #undef FN
73 #define FN(reg_name, field_name) \
74 hws->shifts->field_name, hws->masks->field_name
75 #if 0
76 static void enable_memory_low_power(struct dc *dc)
77 {
78 struct dce_hwseq *hws = dc->hwseq;
79 int i;
80
81 if (dc->debug.enable_mem_low_power.bits.dmcu) {
82 // Force ERAM to shutdown if DMCU is not enabled
83 if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
84 REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
85 }
86 }
87 /*dcn35 has default MEM_PWR enabled, make sure wake them up*/
88 // Set default OPTC memory power states
89 if (dc->debug.enable_mem_low_power.bits.optc) {
90 // Shutdown when unassigned and light sleep in VBLANK
91 REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
92 }
93
94 if (dc->debug.enable_mem_low_power.bits.vga) {
95 // Power down VGA memory
96 REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
97 }
98
99 if (dc->debug.enable_mem_low_power.bits.mpc &&
100 dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode)
101 dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
102
103 if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
104 // Power down VPGs
105 for (i = 0; i < dc->res_pool->stream_enc_count; i++)
106 dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
107 #if defined(CONFIG_DRM_AMD_DC_DP2_0)
108 for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
109 dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
110 #endif
111 }
112
113 }
114 #endif
115
dcn35_set_dmu_fgcg(struct dce_hwseq * hws,bool enable)116 void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable)
117 {
118 REG_UPDATE_3(DMU_CLK_CNTL,
119 RBBMIF_FGCG_REP_DIS, !enable,
120 IHC_FGCG_REP_DIS, !enable,
121 LONO_FGCG_REP_DIS, !enable
122 );
123 }
124
dcn35_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)125 void dcn35_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
126 {
127 REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
128 }
129
dcn35_init_hw(struct dc * dc)130 void dcn35_init_hw(struct dc *dc)
131 {
132 struct abm **abms = dc->res_pool->multiple_abms;
133 struct dce_hwseq *hws = dc->hwseq;
134 struct dc_bios *dcb = dc->ctx->dc_bios;
135 struct resource_pool *res_pool = dc->res_pool;
136 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
137 uint32_t user_level = MAX_BACKLIGHT_LEVEL;
138 int i;
139
140 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
141 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
142
143 //dcn35_set_dmu_fgcg(hws, dc->debug.enable_fine_grain_clock_gating.bits.dmu);
144
145 if (!dcb->funcs->is_accelerated_mode(dcb)) {
146 /*this calls into dmubfw to do the init*/
147 hws->funcs.bios_golden_init(dc);
148 }
149
150 // Initialize the dccg
151 if (res_pool->dccg->funcs->dccg_init)
152 res_pool->dccg->funcs->dccg_init(res_pool->dccg);
153
154 //enable_memory_low_power(dc);
155
156 if (dc->ctx->dc_bios->fw_info_valid) {
157 res_pool->ref_clocks.xtalin_clock_inKhz =
158 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
159
160 if (res_pool->hubbub) {
161
162 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
163 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
164 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
165
166 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
167 res_pool->ref_clocks.dccg_ref_clock_inKhz,
168 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
169 } else {
170 // Not all ASICs have DCCG sw component
171 res_pool->ref_clocks.dccg_ref_clock_inKhz =
172 res_pool->ref_clocks.xtalin_clock_inKhz;
173 res_pool->ref_clocks.dchub_ref_clock_inKhz =
174 res_pool->ref_clocks.xtalin_clock_inKhz;
175 }
176 } else
177 ASSERT_CRITICAL(false);
178
179 for (i = 0; i < dc->link_count; i++) {
180 /* Power up AND update implementation according to the
181 * required signal (which may be different from the
182 * default signal on connector).
183 */
184 struct dc_link *link = dc->links[i];
185
186 if (link->ep_type != DISPLAY_ENDPOINT_PHY)
187 continue;
188
189 link->link_enc->funcs->hw_init(link->link_enc);
190
191 /* Check for enabled DIG to identify enabled display */
192 if (link->link_enc->funcs->is_dig_enabled &&
193 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
194 link->link_status.link_active = true;
195 if (link->link_enc->funcs->fec_is_active &&
196 link->link_enc->funcs->fec_is_active(link->link_enc))
197 link->fec_state = dc_link_fec_enabled;
198 }
199 }
200
201 /* we want to turn off all dp displays before doing detection */
202 dc->link_srv->blank_all_dp_displays(dc);
203 /*
204 if (hws->funcs.enable_power_gating_plane)
205 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
206 */
207 if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init)
208 res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
209 /* If taking control over from VBIOS, we may want to optimize our first
210 * mode set, so we need to skip powering down pipes until we know which
211 * pipes we want to use.
212 * Otherwise, if taking control is not possible, we need to power
213 * everything down.
214 */
215 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
216
217 // we want to turn off edp displays if odm is enabled and no seamless boot
218 if (!dc->caps.seamless_odm) {
219 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
220 struct timing_generator *tg = dc->res_pool->timing_generators[i];
221 uint32_t num_opps, opp_id_src0, opp_id_src1;
222
223 num_opps = 1;
224 if (tg) {
225 if (tg->funcs->is_tg_enabled(tg) && tg->funcs->get_optc_source) {
226 tg->funcs->get_optc_source(tg, &num_opps,
227 &opp_id_src0, &opp_id_src1);
228 }
229 }
230
231 if (num_opps > 1) {
232 dc->link_srv->blank_all_edp_displays(dc);
233 break;
234 }
235 }
236 }
237
238 hws->funcs.init_pipes(dc, dc->current_state);
239 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
240 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
241 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
242 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
243 }
244 if (res_pool->dccg->funcs->dccg_root_gate_disable_control) {
245 for (i = 0; i < res_pool->pipe_count; i++)
246 res_pool->dccg->funcs->dccg_root_gate_disable_control(res_pool->dccg, i, 0);
247 }
248
249 for (i = 0; i < res_pool->audio_count; i++) {
250 struct audio *audio = res_pool->audios[i];
251
252 audio->funcs->hw_init(audio);
253 }
254
255 for (i = 0; i < dc->link_count; i++) {
256 struct dc_link *link = dc->links[i];
257
258 if (link->panel_cntl) {
259 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
260 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
261 }
262 }
263 if (dc->ctx->dmub_srv) {
264 for (i = 0; i < dc->res_pool->pipe_count; i++) {
265 if (abms[i] != NULL && abms[i]->funcs != NULL)
266 abms[i]->funcs->abm_init(abms[i], backlight, user_level);
267 }
268 }
269
270 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
271 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
272
273 // Set i2c to light sleep until engine is setup
274 if (dc->debug.enable_mem_low_power.bits.i2c)
275 REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 0);
276
277 if (hws->funcs.setup_hpo_hw_control)
278 hws->funcs.setup_hpo_hw_control(hws, false);
279
280 if (!dc->debug.disable_clock_gate) {
281 /* enable all DCN clock gating */
282 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
283 }
284
285 if (dc->debug.disable_mem_low_power) {
286 REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
287 }
288 if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
289 dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
290
291 if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
292 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
293
294 if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
295 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
296
297
298
299 if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
300 dc->res_pool->hubbub->funcs->force_pstate_change_control(
301 dc->res_pool->hubbub, false, false);
302
303 if (dc->res_pool->hubbub->funcs->init_crb)
304 dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
305
306 if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
307 dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
308 // Get DMCUB capabilities
309 if (dc->ctx->dmub_srv) {
310 dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
311 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
312 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
313 }
314
315 if (dc->res_pool->pg_cntl) {
316 if (dc->res_pool->pg_cntl->funcs->init_pg_status)
317 dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
318 }
319 }
320
update_dsc_on_stream(struct pipe_ctx * pipe_ctx,bool enable)321 static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
322 {
323 struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
324 struct dc_stream_state *stream = pipe_ctx->stream;
325 struct pipe_ctx *odm_pipe;
326 int opp_cnt = 1;
327
328 DC_LOGGER_INIT(stream->ctx->logger);
329
330 ASSERT(dsc);
331 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
332 opp_cnt++;
333
334 if (enable) {
335 struct dsc_config dsc_cfg;
336 struct dsc_optc_config dsc_optc_cfg = {0};
337 enum optc_dsc_mode optc_dsc_mode;
338 struct dcn_dsc_state dsc_state = {0};
339
340 if (!dsc) {
341 DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
342 return;
343 }
344
345 if (dsc->funcs->dsc_read_state) {
346 dsc->funcs->dsc_read_state(dsc, &dsc_state);
347 if (!dsc_state.dsc_fw_en) {
348 DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
349 return;
350 }
351 }
352 /* Enable DSC hw block */
353 dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
354 dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
355 dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
356 dsc_cfg.color_depth = stream->timing.display_color_depth;
357 dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
358 dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
359 ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
360 dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
361
362 dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
363 dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
364 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
365 struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
366
367 ASSERT(odm_dsc);
368 odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
369 odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
370 }
371 dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
372 dsc_cfg.pic_width *= opp_cnt;
373
374 optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
375
376 /* Enable DSC in OPTC */
377 DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
378 pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
379 optc_dsc_mode,
380 dsc_optc_cfg.bytes_per_pixel,
381 dsc_optc_cfg.slice_width);
382 } else {
383 /* disable DSC in OPTC */
384 pipe_ctx->stream_res.tg->funcs->set_dsc_config(
385 pipe_ctx->stream_res.tg,
386 OPTC_DSC_DISABLED, 0, 0);
387
388 /* disable DSC block */
389 dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
390 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
391 ASSERT(odm_pipe->stream_res.dsc);
392 odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
393 }
394 }
395 }
396
397 // Given any pipe_ctx, return the total ODM combine factor, and optionally return
398 // the OPPids which are used
get_odm_config(struct pipe_ctx * pipe_ctx,unsigned int * opp_instances)399 static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_instances)
400 {
401 unsigned int opp_count = 1;
402 struct pipe_ctx *odm_pipe;
403
404 // First get to the top pipe
405 for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe)
406 ;
407
408 // First pipe is always used
409 if (opp_instances)
410 opp_instances[0] = odm_pipe->stream_res.opp->inst;
411
412 // Find and count odm pipes, if any
413 for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
414 if (opp_instances)
415 opp_instances[opp_count] = odm_pipe->stream_res.opp->inst;
416 opp_count++;
417 }
418
419 return opp_count;
420 }
421
dcn35_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * pipe_ctx)422 void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
423 {
424 struct pipe_ctx *odm_pipe;
425 int opp_cnt = 0;
426 int opp_inst[MAX_PIPES] = {0};
427 int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
428 int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
429
430 opp_cnt = get_odm_config(pipe_ctx, opp_inst);
431
432 if (opp_cnt > 1)
433 pipe_ctx->stream_res.tg->funcs->set_odm_combine(
434 pipe_ctx->stream_res.tg,
435 opp_inst, opp_cnt,
436 odm_slice_width, last_odm_slice_width);
437 else
438 pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
439 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
440
441 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
442 odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
443 odm_pipe->stream_res.opp,
444 true);
445 }
446
447 if (pipe_ctx->stream_res.dsc) {
448 struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
449
450 update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
451
452 /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
453 if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
454 current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
455 struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
456 /* disconnect DSC block from stream */
457 dsc->funcs->dsc_disconnect(dsc);
458 }
459 }
460 }
461
dcn35_dpp_root_clock_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool clock_on)462 void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
463 {
464 if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
465 return;
466
467 if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control) {
468 hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control(
469 hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
470 }
471 }
472
dcn35_dpstream_root_clock_control(struct dce_hwseq * hws,unsigned int dp_hpo_inst,bool clock_on)473 void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on)
474 {
475 if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream)
476 return;
477
478 if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) {
479 hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating(
480 hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on);
481 }
482 }
483
dcn35_physymclk_root_clock_control(struct dce_hwseq * hws,unsigned int phy_inst,bool clock_on)484 void dcn35_physymclk_root_clock_control(struct dce_hwseq *hws, unsigned int phy_inst, bool clock_on)
485 {
486 if (!hws->ctx->dc->debug.root_clock_optimization.bits.physymclk)
487 return;
488
489 if (hws->ctx->dc->res_pool->dccg->funcs->set_physymclk_root_clock_gating) {
490 hws->ctx->dc->res_pool->dccg->funcs->set_physymclk_root_clock_gating(
491 hws->ctx->dc->res_pool->dccg, phy_inst, clock_on);
492 }
493 }
494
dcn35_dsc_pg_control(struct dce_hwseq * hws,unsigned int dsc_inst,bool power_on)495 void dcn35_dsc_pg_control(
496 struct dce_hwseq *hws,
497 unsigned int dsc_inst,
498 bool power_on)
499 {
500 uint32_t power_gate = power_on ? 0 : 1;
501 uint32_t pwr_status = power_on ? 0 : 2;
502 uint32_t org_ip_request_cntl = 0;
503
504 if (hws->ctx->dc->debug.disable_dsc_power_gate)
505 return;
506 if (hws->ctx->dc->debug.ignore_pg)
507 return;
508 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
509 if (org_ip_request_cntl == 0)
510 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
511
512 switch (dsc_inst) {
513 case 0: /* DSC0 */
514 REG_UPDATE(DOMAIN16_PG_CONFIG,
515 DOMAIN_POWER_GATE, power_gate);
516
517 REG_WAIT(DOMAIN16_PG_STATUS,
518 DOMAIN_PGFSM_PWR_STATUS, pwr_status,
519 1, 1000);
520 break;
521 case 1: /* DSC1 */
522 REG_UPDATE(DOMAIN17_PG_CONFIG,
523 DOMAIN_POWER_GATE, power_gate);
524
525 REG_WAIT(DOMAIN17_PG_STATUS,
526 DOMAIN_PGFSM_PWR_STATUS, pwr_status,
527 1, 1000);
528 break;
529 case 2: /* DSC2 */
530 REG_UPDATE(DOMAIN18_PG_CONFIG,
531 DOMAIN_POWER_GATE, power_gate);
532
533 REG_WAIT(DOMAIN18_PG_STATUS,
534 DOMAIN_PGFSM_PWR_STATUS, pwr_status,
535 1, 1000);
536 break;
537 case 3: /* DSC3 */
538 REG_UPDATE(DOMAIN19_PG_CONFIG,
539 DOMAIN_POWER_GATE, power_gate);
540
541 REG_WAIT(DOMAIN19_PG_STATUS,
542 DOMAIN_PGFSM_PWR_STATUS, pwr_status,
543 1, 1000);
544 break;
545 default:
546 BREAK_TO_DEBUGGER();
547 break;
548 }
549
550 if (org_ip_request_cntl == 0)
551 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
552 }
553
dcn35_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)554 void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
555 {
556 bool force_on = true; /* disable power gating */
557 uint32_t org_ip_request_cntl = 0;
558
559 if (hws->ctx->dc->debug.disable_hubp_power_gate)
560 return;
561 if (hws->ctx->dc->debug.ignore_pg)
562 return;
563 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
564 if (org_ip_request_cntl == 0)
565 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
566 /* DCHUBP0/1/2/3/4/5 */
567 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
568 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
569 /* DPP0/1/2/3/4/5 */
570 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
571 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
572
573 force_on = true; /* disable power gating */
574 if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
575 force_on = false;
576
577 /* DCS0/1/2/3/4 */
578 REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
579 REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
580 REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
581 REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
582
583
584 }
585
586 /* In headless boot cases, DIG may be turned
587 * on which causes HW/SW discrepancies.
588 * To avoid this, power down hardware on boot
589 * if DIG is turned on
590 */
dcn35_power_down_on_boot(struct dc * dc)591 void dcn35_power_down_on_boot(struct dc *dc)
592 {
593 struct dc_link *edp_links[MAX_NUM_EDP];
594 struct dc_link *edp_link = NULL;
595 int edp_num;
596 int i = 0;
597
598 dc_get_edp_links(dc, edp_links, &edp_num);
599 if (edp_num)
600 edp_link = edp_links[0];
601
602 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
603 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
604 dc->hwseq->funcs.edp_backlight_control &&
605 dc->hwseq->funcs.power_down &&
606 dc->hwss.edp_power_control) {
607 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
608 dc->hwseq->funcs.power_down(dc);
609 dc->hwss.edp_power_control(edp_link, false);
610 } else {
611 for (i = 0; i < dc->link_count; i++) {
612 struct dc_link *link = dc->links[i];
613
614 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
615 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
616 dc->hwseq->funcs.power_down) {
617 dc->hwseq->funcs.power_down(dc);
618 break;
619 }
620
621 }
622 }
623
624 /*
625 * Call update_clocks with empty context
626 * to send DISPLAY_OFF
627 * Otherwise DISPLAY_OFF may not be asserted
628 */
629 if (dc->clk_mgr->funcs->set_low_power_state)
630 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
631
632 if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER)
633 dc_allow_idle_optimizations(dc, true);
634 }
635
dcn35_apply_idle_power_optimizations(struct dc * dc,bool enable)636 bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
637 {
638 if (dc->debug.dmcub_emulation)
639 return true;
640
641 if (enable) {
642 uint32_t num_active_edp = 0;
643 int i;
644
645 for (i = 0; i < dc->current_state->stream_count; ++i) {
646 struct dc_stream_state *stream = dc->current_state->streams[i];
647 struct dc_link *link = stream->link;
648 bool is_psr = link && !link->panel_config.psr.disable_psr &&
649 (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
650 link->psr_settings.psr_version == DC_PSR_VERSION_SU_1);
651 bool is_replay = link && link->replay_settings.replay_feature_enabled;
652
653 /* Ignore streams that disabled. */
654 if (stream->dpms_off)
655 continue;
656
657 /* Active external displays block idle optimizations. */
658 if (!dc_is_embedded_signal(stream->signal))
659 return false;
660
661 /* If not PWRSEQ0 can't enter idle optimizations */
662 if (link && link->link_index != 0)
663 return false;
664
665 /* Check for panel power features required for idle optimizations. */
666 if (!is_psr && !is_replay)
667 return false;
668
669 num_active_edp += 1;
670 }
671
672 /* If more than one active eDP then disallow. */
673 if (num_active_edp > 1)
674 return false;
675 }
676
677 // TODO: review other cases when idle optimization is allowed
678 dc_dmub_srv_apply_idle_power_optimizations(dc, enable);
679
680 return true;
681 }
682
dcn35_z10_restore(const struct dc * dc)683 void dcn35_z10_restore(const struct dc *dc)
684 {
685 if (dc->debug.disable_z10)
686 return;
687
688 dc_dmub_srv_apply_idle_power_optimizations(dc, false);
689
690 dcn31_z10_restore(dc);
691 }
692
dcn35_init_pipes(struct dc * dc,struct dc_state * context)693 void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
694 {
695 int i;
696 struct dce_hwseq *hws = dc->hwseq;
697 struct hubbub *hubbub = dc->res_pool->hubbub;
698 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
699 bool can_apply_seamless_boot = false;
700 bool tg_enabled[MAX_PIPES] = {false};
701
702 for (i = 0; i < context->stream_count; i++) {
703 if (context->streams[i]->apply_seamless_boot_optimization) {
704 can_apply_seamless_boot = true;
705 break;
706 }
707 }
708
709 for (i = 0; i < dc->res_pool->pipe_count; i++) {
710 struct timing_generator *tg = dc->res_pool->timing_generators[i];
711 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
712
713 /* There is assumption that pipe_ctx is not mapping irregularly
714 * to non-preferred front end. If pipe_ctx->stream is not NULL,
715 * we will use the pipe, so don't disable
716 */
717 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
718 continue;
719
720 /* Blank controller using driver code instead of
721 * command table.
722 */
723 if (tg->funcs->is_tg_enabled(tg)) {
724 if (hws->funcs.init_blank != NULL) {
725 hws->funcs.init_blank(dc, tg);
726 tg->funcs->lock(tg);
727 } else {
728 tg->funcs->lock(tg);
729 tg->funcs->set_blank(tg, true);
730 hwss_wait_for_blank_complete(tg);
731 }
732 }
733 }
734
735 /* Reset det size */
736 for (i = 0; i < dc->res_pool->pipe_count; i++) {
737 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
738 struct hubp *hubp = dc->res_pool->hubps[i];
739
740 /* Do not need to reset for seamless boot */
741 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
742 continue;
743
744 if (hubbub && hubp) {
745 if (hubbub->funcs->program_det_size)
746 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
747 if (hubbub->funcs->program_det_segments)
748 hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
749 }
750 }
751
752 /* num_opp will be equal to number of mpcc */
753 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
754 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
755
756 /* Cannot reset the MPC mux if seamless boot */
757 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
758 continue;
759
760 dc->res_pool->mpc->funcs->mpc_init_single_inst(
761 dc->res_pool->mpc, i);
762 }
763
764 for (i = 0; i < dc->res_pool->pipe_count; i++) {
765 struct timing_generator *tg = dc->res_pool->timing_generators[i];
766 struct hubp *hubp = dc->res_pool->hubps[i];
767 struct dpp *dpp = dc->res_pool->dpps[i];
768 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
769
770 /* There is assumption that pipe_ctx is not mapping irregularly
771 * to non-preferred front end. If pipe_ctx->stream is not NULL,
772 * we will use the pipe, so don't disable
773 */
774 if (can_apply_seamless_boot &&
775 pipe_ctx->stream != NULL &&
776 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
777 pipe_ctx->stream_res.tg)) {
778 // Enable double buffering for OTG_BLANK no matter if
779 // seamless boot is enabled or not to suppress global sync
780 // signals when OTG blanked. This is to prevent pipe from
781 // requesting data while in PSR.
782 tg->funcs->tg_init(tg);
783 hubp->power_gated = true;
784 tg_enabled[i] = true;
785 continue;
786 }
787
788 /* Disable on the current state so the new one isn't cleared. */
789 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
790
791 hubp->funcs->hubp_reset(hubp);
792 dpp->funcs->dpp_reset(dpp);
793
794 pipe_ctx->stream_res.tg = tg;
795 pipe_ctx->pipe_idx = i;
796
797 pipe_ctx->plane_res.hubp = hubp;
798 pipe_ctx->plane_res.dpp = dpp;
799 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
800 hubp->mpcc_id = dpp->inst;
801 hubp->opp_id = OPP_ID_INVALID;
802 hubp->power_gated = false;
803
804 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
805 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
806 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
807 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
808
809 hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
810
811 if (tg->funcs->is_tg_enabled(tg))
812 tg->funcs->unlock(tg);
813
814 dc->hwss.disable_plane(dc, context, pipe_ctx);
815
816 pipe_ctx->stream_res.tg = NULL;
817 pipe_ctx->plane_res.hubp = NULL;
818
819 if (tg->funcs->is_tg_enabled(tg)) {
820 if (tg->funcs->init_odm)
821 tg->funcs->init_odm(tg);
822 }
823
824 tg->funcs->tg_init(tg);
825 }
826
827 /* Clean up MPC tree */
828 for (i = 0; i < dc->res_pool->pipe_count; i++) {
829 if (tg_enabled[i]) {
830 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
831 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
832 int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
833
834 if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
835 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
836 }
837 }
838 }
839 }
840
841 if (pg_cntl != NULL) {
842 if (pg_cntl->funcs->dsc_pg_control != NULL) {
843 uint32_t num_opps = 0;
844 uint32_t opp_id_src0 = OPP_ID_INVALID;
845 uint32_t opp_id_src1 = OPP_ID_INVALID;
846
847 // Step 1: To find out which OPTC is running & OPTC DSC is ON
848 // We can't use res_pool->res_cap->num_timing_generator to check
849 // Because it records display pipes default setting built in driver,
850 // not display pipes of the current chip.
851 // Some ASICs would be fused display pipes less than the default setting.
852 // In dcnxx_resource_construct function, driver would obatin real information.
853 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
854 uint32_t optc_dsc_state = 0;
855 struct timing_generator *tg = dc->res_pool->timing_generators[i];
856
857 if (tg->funcs->is_tg_enabled(tg)) {
858 if (tg->funcs->get_dsc_status)
859 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
860 // Only one OPTC with DSC is ON, so if we got one result,
861 // we would exit this block. non-zero value is DSC enabled
862 if (optc_dsc_state != 0) {
863 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
864 break;
865 }
866 }
867 }
868
869 // Step 2: To power down DSC but skip DSC of running OPTC
870 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
871 struct dcn_dsc_state s = {0};
872
873 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
874
875 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
876 s.dsc_clock_en && s.dsc_fw_en)
877 continue;
878
879 pg_cntl->funcs->dsc_pg_control(pg_cntl, dc->res_pool->dscs[i]->inst, false);
880 }
881 }
882 }
883 }
884
dcn35_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)885 void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
886 struct dc_state *context)
887 {
888 /* enable DCFCLK current DCHUB */
889 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
890
891 /* initialize HUBP on power up */
892 pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
893
894 /* make sure OPP_PIPE_CLOCK_EN = 1 */
895 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
896 pipe_ctx->stream_res.opp,
897 true);
898 /*to do: insert PG here*/
899 if (dc->vm_pa_config.valid) {
900 struct vm_system_aperture_param apt;
901
902 apt.sys_default.quad_part = 0;
903
904 apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
905 apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
906
907 // Program system aperture settings
908 pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
909 }
910
911 if (!pipe_ctx->top_pipe
912 && pipe_ctx->plane_state
913 && pipe_ctx->plane_state->flip_int_enabled
914 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
915 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
916 }
917
918 /* disable HW used by plane.
919 * note: cannot disable until disconnect is complete
920 */
dcn35_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)921 void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
922 {
923 struct hubp *hubp = pipe_ctx->plane_res.hubp;
924 struct dpp *dpp = pipe_ctx->plane_res.dpp;
925
926 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
927
928 /* In flip immediate with pipe splitting case GSL is used for
929 * synchronization so we must disable it when the plane is disabled.
930 */
931 if (pipe_ctx->stream_res.gsl_group != 0)
932 dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
933 /*
934 if (hubp->funcs->hubp_update_mall_sel)
935 hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
936 */
937 dc->hwss.set_flip_control_gsl(pipe_ctx, false);
938
939 hubp->funcs->hubp_clk_cntl(hubp, false);
940
941 dpp->funcs->dpp_dppclk_control(dpp, false, false);
942 /*to do, need to support both case*/
943 hubp->power_gated = true;
944
945 hubp->funcs->hubp_reset(hubp);
946 dpp->funcs->dpp_reset(dpp);
947
948 pipe_ctx->stream = NULL;
949 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
950 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
951 pipe_ctx->top_pipe = NULL;
952 pipe_ctx->bottom_pipe = NULL;
953 pipe_ctx->plane_state = NULL;
954 }
955
dcn35_disable_plane(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)956 void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
957 {
958 struct dce_hwseq *hws = dc->hwseq;
959 bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
960 struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
961
962 DC_LOGGER_INIT(dc->ctx->logger);
963
964 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
965 return;
966
967 if (hws->funcs.plane_atomic_disable)
968 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
969
970 /* Turn back off the phantom OTG after the phantom plane is fully disabled
971 */
972 if (is_phantom)
973 if (tg && tg->funcs->disable_phantom_crtc)
974 tg->funcs->disable_phantom_crtc(tg);
975
976 DC_LOG_DC("Power down front end %d\n",
977 pipe_ctx->pipe_idx);
978 }
979
dcn35_calc_blocks_to_gate(struct dc * dc,struct dc_state * context,struct pg_block_update * update_state)980 void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
981 struct pg_block_update *update_state)
982 {
983 bool hpo_frl_stream_enc_acquired = false;
984 bool hpo_dp_stream_enc_acquired = false;
985 int i = 0, j = 0;
986 int edp_num = 0;
987 struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
988
989 memset(update_state, 0, sizeof(struct pg_block_update));
990
991 for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) {
992 if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
993 dc->res_pool->hpo_dp_stream_enc[i]) {
994 hpo_dp_stream_enc_acquired = true;
995 break;
996 }
997 }
998
999 if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
1000 update_state->pg_res_update[PG_HPO] = true;
1001
1002 update_state->pg_res_update[PG_DWB] = true;
1003
1004 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1005 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1006
1007 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
1008 update_state->pg_pipe_res_update[j][i] = true;
1009
1010 if (!pipe_ctx)
1011 continue;
1012
1013 if (pipe_ctx->plane_res.hubp)
1014 update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->plane_res.hubp->inst] = false;
1015
1016 if (pipe_ctx->plane_res.dpp && pipe_ctx->plane_res.hubp)
1017 update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
1018
1019 if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
1020 update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
1021
1022 if (pipe_ctx->stream_res.dsc) {
1023 update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false;
1024 if (dc->caps.sequential_ono) {
1025 update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
1026 update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
1027
1028 /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
1029 if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
1030 pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) {
1031 for (j = 0; j < dc->res_pool->pipe_count; ++j) {
1032 update_state->pg_pipe_res_update[PG_HUBP][j] = false;
1033 update_state->pg_pipe_res_update[PG_DPP][j] = false;
1034 }
1035 }
1036 }
1037 }
1038
1039 if (pipe_ctx->stream_res.opp)
1040 update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
1041
1042 if (pipe_ctx->stream_res.hpo_dp_stream_enc)
1043 update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false;
1044 }
1045
1046 for (i = 0; i < dc->link_count; i++) {
1047 update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = true;
1048 if (dc->links[i]->type != dc_connection_none)
1049 update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = false;
1050 }
1051
1052 /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
1053 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1054 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1055 if (tg && tg->funcs->is_tg_enabled(tg)) {
1056 update_state->pg_pipe_res_update[PG_OPTC][i] = false;
1057 break;
1058 }
1059 }
1060
1061 dc_get_edp_links(dc, edp_links, &edp_num);
1062 if (edp_num == 0 ||
1063 ((!edp_links[0] || !edp_links[0]->edp_sink_present) &&
1064 (!edp_links[1] || !edp_links[1]->edp_sink_present))) {
1065 /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/
1066 update_state->pg_pipe_res_update[PG_OPTC][0] = false;
1067 }
1068
1069 if (dc->caps.sequential_ono) {
1070 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1071 if (!update_state->pg_pipe_res_update[PG_HUBP][i] &&
1072 !update_state->pg_pipe_res_update[PG_DPP][i]) {
1073 for (j = i - 1; j >= 0; j--) {
1074 update_state->pg_pipe_res_update[PG_HUBP][j] = false;
1075 update_state->pg_pipe_res_update[PG_DPP][j] = false;
1076 }
1077
1078 break;
1079 }
1080 }
1081 }
1082 }
1083
dcn35_calc_blocks_to_ungate(struct dc * dc,struct dc_state * context,struct pg_block_update * update_state)1084 void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
1085 struct pg_block_update *update_state)
1086 {
1087 bool hpo_frl_stream_enc_acquired = false;
1088 bool hpo_dp_stream_enc_acquired = false;
1089 int i = 0, j = 0;
1090
1091 memset(update_state, 0, sizeof(struct pg_block_update));
1092
1093 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1094 struct pipe_ctx *cur_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1095 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
1096
1097 if (cur_pipe == NULL || new_pipe == NULL)
1098 continue;
1099
1100 if ((!cur_pipe->plane_state && new_pipe->plane_state) ||
1101 (!cur_pipe->stream && new_pipe->stream) ||
1102 (cur_pipe->stream != new_pipe->stream && new_pipe->stream)) {
1103 // New pipe addition
1104 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) {
1105 if (j == PG_HUBP && new_pipe->plane_res.hubp)
1106 update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true;
1107
1108 if (j == PG_DPP && new_pipe->plane_res.dpp)
1109 update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true;
1110
1111 if (j == PG_MPCC && new_pipe->plane_res.dpp)
1112 update_state->pg_pipe_res_update[j][new_pipe->plane_res.mpcc_inst] = true;
1113
1114 if (j == PG_DSC && new_pipe->stream_res.dsc)
1115 update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true;
1116
1117 if (j == PG_OPP && new_pipe->stream_res.opp)
1118 update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true;
1119
1120 if (j == PG_OPTC && new_pipe->stream_res.tg)
1121 update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
1122
1123 if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc)
1124 update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
1125 }
1126 } else if (cur_pipe->plane_state == new_pipe->plane_state ||
1127 cur_pipe == new_pipe) {
1128 //unchanged pipes
1129 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) {
1130 if (j == PG_HUBP &&
1131 cur_pipe->plane_res.hubp != new_pipe->plane_res.hubp &&
1132 new_pipe->plane_res.hubp)
1133 update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true;
1134
1135 if (j == PG_DPP &&
1136 cur_pipe->plane_res.dpp != new_pipe->plane_res.dpp &&
1137 new_pipe->plane_res.dpp)
1138 update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true;
1139
1140 if (j == PG_OPP &&
1141 cur_pipe->stream_res.opp != new_pipe->stream_res.opp &&
1142 new_pipe->stream_res.opp)
1143 update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true;
1144
1145 if (j == PG_DSC &&
1146 cur_pipe->stream_res.dsc != new_pipe->stream_res.dsc &&
1147 new_pipe->stream_res.dsc)
1148 update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true;
1149
1150 if (j == PG_OPTC &&
1151 cur_pipe->stream_res.tg != new_pipe->stream_res.tg &&
1152 new_pipe->stream_res.tg)
1153 update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
1154
1155 if (j == PG_DPSTREAM &&
1156 cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc &&
1157 new_pipe->stream_res.hpo_dp_stream_enc)
1158 update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
1159 }
1160 }
1161 }
1162
1163 for (i = 0; i < dc->link_count; i++)
1164 if (dc->links[i]->type != dc_connection_none)
1165 update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = true;
1166
1167 for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) {
1168 if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
1169 dc->res_pool->hpo_dp_stream_enc[i]) {
1170 hpo_dp_stream_enc_acquired = true;
1171 break;
1172 }
1173 }
1174
1175 if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired)
1176 update_state->pg_res_update[PG_HPO] = true;
1177
1178 if (hpo_frl_stream_enc_acquired)
1179 update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
1180
1181 if (dc->caps.sequential_ono) {
1182 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1183 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
1184
1185 if (new_pipe->stream_res.dsc && !new_pipe->top_pipe &&
1186 update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) {
1187 update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true;
1188 update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true;
1189
1190 /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
1191 if (new_pipe->plane_res.hubp &&
1192 new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) {
1193 for (j = 0; j < dc->res_pool->pipe_count; ++j) {
1194 update_state->pg_pipe_res_update[PG_HUBP][j] = true;
1195 update_state->pg_pipe_res_update[PG_DPP][j] = true;
1196 }
1197 }
1198 }
1199 }
1200
1201 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1202 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1203 update_state->pg_pipe_res_update[PG_DPP][i]) {
1204 for (j = i - 1; j >= 0; j--) {
1205 update_state->pg_pipe_res_update[PG_HUBP][j] = true;
1206 update_state->pg_pipe_res_update[PG_DPP][j] = true;
1207 }
1208
1209 break;
1210 }
1211 }
1212 }
1213 }
1214
1215 /**
1216 * dcn35_hw_block_power_down() - power down sequence
1217 *
1218 * The following sequence describes the ON-OFF (ONO) for power down:
1219 *
1220 * ONO Region 3, DCPG 25: hpo - SKIPPED
1221 * ONO Region 4, DCPG 0: dchubp0, dpp0
1222 * ONO Region 6, DCPG 1: dchubp1, dpp1
1223 * ONO Region 8, DCPG 2: dchubp2, dpp2
1224 * ONO Region 10, DCPG 3: dchubp3, dpp3
1225 * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
1226 * ONO Region 5, DCPG 16: dsc0
1227 * ONO Region 7, DCPG 17: dsc1
1228 * ONO Region 9, DCPG 18: dsc2
1229 * ONO Region 11, DCPG 19: dsc3
1230 * ONO Region 2, DCPG 24: mpc opp optc dwb
1231 * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
1232 *
1233 * If sequential ONO is specified the order is modified from ONO Region 11 -> ONO Region 0 descending.
1234 *
1235 * @dc: Current DC state
1236 * @update_state: update PG sequence states for HW block
1237 */
dcn35_hw_block_power_down(struct dc * dc,struct pg_block_update * update_state)1238 void dcn35_hw_block_power_down(struct dc *dc,
1239 struct pg_block_update *update_state)
1240 {
1241 int i = 0;
1242 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1243
1244 if (!pg_cntl)
1245 return;
1246 if (dc->debug.ignore_pg)
1247 return;
1248
1249 if (update_state->pg_res_update[PG_HPO]) {
1250 if (pg_cntl->funcs->hpo_pg_control)
1251 pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
1252 }
1253
1254 if (!dc->caps.sequential_ono) {
1255 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1256 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1257 update_state->pg_pipe_res_update[PG_DPP][i]) {
1258 if (pg_cntl->funcs->hubp_dpp_pg_control)
1259 pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
1260 }
1261 }
1262
1263 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1264 if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1265 if (pg_cntl->funcs->dsc_pg_control)
1266 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
1267 }
1268 }
1269 } else {
1270 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1271 if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1272 if (pg_cntl->funcs->dsc_pg_control)
1273 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
1274 }
1275
1276 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1277 update_state->pg_pipe_res_update[PG_DPP][i]) {
1278 if (pg_cntl->funcs->hubp_dpp_pg_control)
1279 pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
1280 }
1281 }
1282 }
1283
1284 /*this will need all the clients to unregister optc interruts let dmubfw handle this*/
1285 if (pg_cntl->funcs->plane_otg_pg_control)
1286 pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
1287
1288 //domain22, 23, 25 currently always on.
1289
1290 }
1291
1292 /**
1293 * dcn35_hw_block_power_up() - power up sequence
1294 *
1295 * The following sequence describes the ON-OFF (ONO) for power up:
1296 *
1297 * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
1298 * ONO Region 2, DCPG 24: mpc opp optc dwb
1299 * ONO Region 5, DCPG 16: dsc0
1300 * ONO Region 7, DCPG 17: dsc1
1301 * ONO Region 9, DCPG 18: dsc2
1302 * ONO Region 11, DCPG 19: dsc3
1303 * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
1304 * ONO Region 4, DCPG 0: dchubp0, dpp0
1305 * ONO Region 6, DCPG 1: dchubp1, dpp1
1306 * ONO Region 8, DCPG 2: dchubp2, dpp2
1307 * ONO Region 10, DCPG 3: dchubp3, dpp3
1308 * ONO Region 3, DCPG 25: hpo - SKIPPED
1309 *
1310 * If sequential ONO is specified the order is modified from ONO Region 0 -> ONO Region 11 ascending.
1311 *
1312 * @dc: Current DC state
1313 * @update_state: update PG sequence states for HW block
1314 */
dcn35_hw_block_power_up(struct dc * dc,struct pg_block_update * update_state)1315 void dcn35_hw_block_power_up(struct dc *dc,
1316 struct pg_block_update *update_state)
1317 {
1318 int i = 0;
1319 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1320
1321 if (!pg_cntl)
1322 return;
1323 if (dc->debug.ignore_pg)
1324 return;
1325 //domain22, 23, 25 currently always on.
1326 /*this will need all the clients to unregister optc interruts let dmubfw handle this*/
1327 if (pg_cntl->funcs->plane_otg_pg_control)
1328 pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
1329
1330 if (!dc->caps.sequential_ono) {
1331 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
1332 if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1333 if (pg_cntl->funcs->dsc_pg_control)
1334 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
1335 }
1336 }
1337
1338 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1339 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1340 update_state->pg_pipe_res_update[PG_DPP][i]) {
1341 if (pg_cntl->funcs->hubp_dpp_pg_control)
1342 pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
1343 }
1344
1345 if (dc->caps.sequential_ono) {
1346 if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1347 if (pg_cntl->funcs->dsc_pg_control)
1348 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
1349 }
1350 }
1351 }
1352 if (update_state->pg_res_update[PG_HPO]) {
1353 if (pg_cntl->funcs->hpo_pg_control)
1354 pg_cntl->funcs->hpo_pg_control(pg_cntl, true);
1355 }
1356 }
dcn35_root_clock_control(struct dc * dc,struct pg_block_update * update_state,bool power_on)1357 void dcn35_root_clock_control(struct dc *dc,
1358 struct pg_block_update *update_state, bool power_on)
1359 {
1360 int i = 0;
1361 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1362
1363 if (!pg_cntl)
1364 return;
1365 /*enable root clock first when power up*/
1366 if (power_on) {
1367 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1368 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1369 update_state->pg_pipe_res_update[PG_DPP][i]) {
1370 if (dc->hwseq->funcs.dpp_root_clock_control)
1371 dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
1372 }
1373 if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
1374 if (dc->hwseq->funcs.dpstream_root_clock_control)
1375 dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
1376 }
1377
1378 for (i = 0; i < dc->res_pool->dig_link_enc_count; i++)
1379 if (update_state->pg_pipe_res_update[PG_PHYSYMCLK][i])
1380 if (dc->hwseq->funcs.physymclk_root_clock_control)
1381 dc->hwseq->funcs.physymclk_root_clock_control(dc->hwseq, i, power_on);
1382
1383 }
1384 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1385 if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1386 if (power_on) {
1387 if (dc->res_pool->dccg->funcs->enable_dsc)
1388 dc->res_pool->dccg->funcs->enable_dsc(dc->res_pool->dccg, i);
1389 } else {
1390 if (dc->res_pool->dccg->funcs->disable_dsc)
1391 dc->res_pool->dccg->funcs->disable_dsc(dc->res_pool->dccg, i);
1392 }
1393 }
1394 }
1395 /*disable root clock first when power down*/
1396 if (!power_on) {
1397 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1398 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1399 update_state->pg_pipe_res_update[PG_DPP][i]) {
1400 if (dc->hwseq->funcs.dpp_root_clock_control)
1401 dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
1402 }
1403 if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
1404 if (dc->hwseq->funcs.dpstream_root_clock_control)
1405 dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
1406 }
1407
1408 for (i = 0; i < dc->res_pool->dig_link_enc_count; i++)
1409 if (update_state->pg_pipe_res_update[PG_PHYSYMCLK][i])
1410 if (dc->hwseq->funcs.physymclk_root_clock_control)
1411 dc->hwseq->funcs.physymclk_root_clock_control(dc->hwseq, i, power_on);
1412
1413 }
1414 }
1415
dcn35_prepare_bandwidth(struct dc * dc,struct dc_state * context)1416 void dcn35_prepare_bandwidth(
1417 struct dc *dc,
1418 struct dc_state *context)
1419 {
1420 struct pg_block_update pg_update_state;
1421
1422 if (dc->hwss.calc_blocks_to_ungate) {
1423 dc->hwss.calc_blocks_to_ungate(dc, context, &pg_update_state);
1424
1425 if (dc->hwss.root_clock_control)
1426 dc->hwss.root_clock_control(dc, &pg_update_state, true);
1427 /*power up required HW block*/
1428 if (dc->hwss.hw_block_power_up)
1429 dc->hwss.hw_block_power_up(dc, &pg_update_state);
1430 }
1431
1432 dcn20_prepare_bandwidth(dc, context);
1433 }
1434
dcn35_optimize_bandwidth(struct dc * dc,struct dc_state * context)1435 void dcn35_optimize_bandwidth(
1436 struct dc *dc,
1437 struct dc_state *context)
1438 {
1439 struct pg_block_update pg_update_state;
1440
1441 dcn20_optimize_bandwidth(dc, context);
1442
1443 if (dc->hwss.calc_blocks_to_gate) {
1444 dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state);
1445 /*try to power down unused block*/
1446 if (dc->hwss.hw_block_power_down)
1447 dc->hwss.hw_block_power_down(dc, &pg_update_state);
1448
1449 if (dc->hwss.root_clock_control)
1450 dc->hwss.root_clock_control(dc, &pg_update_state, false);
1451 }
1452 }
1453
dcn35_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)1454 void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
1455 int num_pipes, struct dc_crtc_timing_adjust adjust)
1456 {
1457 int i = 0;
1458 struct drr_params params = {0};
1459 // DRR set trigger event mapped to OTG_TRIG_A
1460 unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A
1461 // Note DRR trigger events are generated regardless of whether num frames met.
1462 unsigned int num_frames = 2;
1463
1464 params.vertical_total_max = adjust.v_total_max;
1465 params.vertical_total_min = adjust.v_total_min;
1466 params.vertical_total_mid = adjust.v_total_mid;
1467 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
1468
1469 for (i = 0; i < num_pipes; i++) {
1470 /* dc_state_destruct() might null the stream resources, so fetch tg
1471 * here first to avoid a race condition. The lifetime of the pointee
1472 * itself (the timing_generator object) is not a problem here.
1473 */
1474 struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
1475
1476 if ((tg != NULL) && tg->funcs) {
1477 if (pipe_ctx[i]->stream && pipe_ctx[i]->stream->ctx->dc->debug.static_screen_wait_frames) {
1478 struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
1479 struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
1480 unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
1481
1482 if (frame_rate >= 120 && dc->caps.ips_support &&
1483 dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
1484 /*ips enable case*/
1485 num_frames = 2 * (frame_rate % 60);
1486 }
1487 }
1488 set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms);
1489 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
1490 if (tg->funcs->set_static_screen_control)
1491 tg->funcs->set_static_screen_control(
1492 tg, event_triggers, num_frames);
1493 }
1494 }
1495 }
dcn35_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)1496 void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
1497 int num_pipes, const struct dc_static_screen_params *params)
1498 {
1499 unsigned int i;
1500 unsigned int triggers = 0;
1501
1502 if (params->triggers.surface_update)
1503 triggers |= 0x200;/*bit 9 : 10 0000 0000*/
1504 if (params->triggers.cursor_update)
1505 triggers |= 0x8;/*bit3*/
1506 if (params->triggers.force_trigger)
1507 triggers |= 0x1;
1508 for (i = 0; i < num_pipes; i++)
1509 pipe_ctx[i]->stream_res.tg->funcs->
1510 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
1511 triggers, params->num_frames);
1512 }
1513
dcn35_set_long_vblank(struct pipe_ctx ** pipe_ctx,int num_pipes,uint32_t v_total_min,uint32_t v_total_max)1514 void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
1515 int num_pipes, uint32_t v_total_min, uint32_t v_total_max)
1516 {
1517 int i = 0;
1518 struct long_vtotal_params params = {0};
1519
1520 params.vertical_total_max = v_total_max;
1521 params.vertical_total_min = v_total_min;
1522
1523 for (i = 0; i < num_pipes; i++) {
1524 if (!pipe_ctx[i])
1525 continue;
1526
1527 if (pipe_ctx[i]->stream) {
1528 struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
1529
1530 if (timing)
1531 params.vertical_blank_start = timing->v_total - timing->v_front_porch;
1532 else
1533 params.vertical_blank_start = 0;
1534
1535 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs &&
1536 pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal)
1537 pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, ¶ms);
1538 }
1539 }
1540 }
1541
should_avoid_empty_tu(struct pipe_ctx * pipe_ctx)1542 static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
1543 {
1544 /* Calculate average pixel count per TU, return false if under ~2.00 to
1545 * avoid empty TUs. This is only required for DPIA tunneling as empty TUs
1546 * are legal to generate for native DP links. Assume TU size 64 as there
1547 * is currently no scenario where it's reprogrammed from HW default.
1548 * MTPs have no such limitation, so this does not affect MST use cases.
1549 */
1550 unsigned int pix_clk_mhz;
1551 unsigned int symclk_mhz;
1552 unsigned int avg_pix_per_tu_x1000;
1553 unsigned int tu_size_bytes = 64;
1554 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
1555 struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
1556 const struct dc *dc = pipe_ctx->stream->link->dc;
1557
1558 if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
1559 return false;
1560
1561 // Not necessary for MST configurations
1562 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
1563 return false;
1564
1565 pix_clk_mhz = timing->pix_clk_100hz / 10000;
1566
1567 // If this is true, can't block due to dynamic ODM
1568 if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz)
1569 return false;
1570
1571 switch (link_settings->link_rate) {
1572 case LINK_RATE_LOW:
1573 symclk_mhz = 162;
1574 break;
1575 case LINK_RATE_HIGH:
1576 symclk_mhz = 270;
1577 break;
1578 case LINK_RATE_HIGH2:
1579 symclk_mhz = 540;
1580 break;
1581 case LINK_RATE_HIGH3:
1582 symclk_mhz = 810;
1583 break;
1584 default:
1585 // We shouldn't be tunneling any other rates, something is wrong
1586 ASSERT(0);
1587 return false;
1588 }
1589
1590 avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes)
1591 / (symclk_mhz * link_settings->lane_count);
1592
1593 // Add small empirically-decided margin to account for potential jitter
1594 return (avg_pix_per_tu_x1000 < 2020);
1595 }
1596
dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx * pipe_ctx)1597 bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
1598 {
1599 struct dc *dc = pipe_ctx->stream->ctx->dc;
1600
1601 if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
1602 return false;
1603
1604 if (should_avoid_empty_tu(pipe_ctx))
1605 return false;
1606
1607 if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
1608 dc->debug.enable_dp_dig_pixel_rate_div_policy)
1609 return true;
1610
1611 return false;
1612 }
1613