1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hwseq.h"
33 #include "dcn10/dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10/dcn10_optc.h"
38 #include "dcn10/dcn10_dpp.h"
39 #include "dcn10/dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10/dcn10_hubp.h"
46 #include "dcn10/dcn10_hubbub.h"
47 #include "dcn10/dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59 #include "dc_state_priv.h"
60
61 #define DC_LOGGER \
62 dc_logger
63 #define DC_LOGGER_INIT(logger) \
64 struct dal_logger *dc_logger = logger
65
66 #define CTX \
67 hws->ctx
68 #define REG(reg)\
69 hws->regs->reg
70
71 #undef FN
72 #define FN(reg_name, field_name) \
73 hws->shifts->field_name, hws->masks->field_name
74
75 /*print is 17 wide, first two characters are spaces*/
76 #define DTN_INFO_MICRO_SEC(ref_cycle) \
77 print_microsec(dc_ctx, log_ctx, ref_cycle)
78
79 #define GAMMA_HW_POINTS_NUM 256
80
81 #define PGFSM_POWER_ON 0
82 #define PGFSM_POWER_OFF 2
83
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)84 static void print_microsec(struct dc_context *dc_ctx,
85 struct dc_log_buffer_ctx *log_ctx,
86 uint32_t ref_cycle)
87 {
88 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
89 static const unsigned int frac = 1000;
90 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
91
92 DTN_INFO(" %11d.%03d",
93 us_x10 / frac,
94 us_x10 % frac);
95 }
96
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)97 void dcn10_lock_all_pipes(struct dc *dc,
98 struct dc_state *context,
99 bool lock)
100 {
101 struct pipe_ctx *pipe_ctx;
102 struct pipe_ctx *old_pipe_ctx;
103 struct timing_generator *tg;
104 int i;
105
106 for (i = 0; i < dc->res_pool->pipe_count; i++) {
107 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
108 pipe_ctx = &context->res_ctx.pipe_ctx[i];
109 tg = pipe_ctx->stream_res.tg;
110
111 /*
112 * Only lock the top pipe's tg to prevent redundant
113 * (un)locking. Also skip if pipe is disabled.
114 */
115 if (pipe_ctx->top_pipe ||
116 !pipe_ctx->stream ||
117 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
118 !tg->funcs->is_tg_enabled(tg) ||
119 dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
120 continue;
121
122 if (lock)
123 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
124 else
125 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
126 }
127 }
128
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)129 static void log_mpc_crc(struct dc *dc,
130 struct dc_log_buffer_ctx *log_ctx)
131 {
132 struct dc_context *dc_ctx = dc->ctx;
133 struct dce_hwseq *hws = dc->hwseq;
134
135 if (REG(MPC_CRC_RESULT_GB))
136 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
137 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
138 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
139 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
140 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
141 }
142
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)143 static void dcn10_log_hubbub_state(struct dc *dc,
144 struct dc_log_buffer_ctx *log_ctx)
145 {
146 struct dc_context *dc_ctx = dc->ctx;
147 struct dcn_hubbub_wm wm;
148 int i;
149
150 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
151 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
152
153 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
154 " sr_enter sr_exit dram_clk_change\n");
155
156 for (i = 0; i < 4; i++) {
157 struct dcn_hubbub_wm_set *s;
158
159 s = &wm.sets[i];
160 DTN_INFO("WM_Set[%d]:", s->wm_set);
161 DTN_INFO_MICRO_SEC(s->data_urgent);
162 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
163 DTN_INFO_MICRO_SEC(s->sr_enter);
164 DTN_INFO_MICRO_SEC(s->sr_exit);
165 DTN_INFO_MICRO_SEC(s->dram_clk_change);
166 DTN_INFO("\n");
167 }
168
169 DTN_INFO("\n");
170 }
171
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)172 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
173 {
174 struct dc_context *dc_ctx = dc->ctx;
175 struct resource_pool *pool = dc->res_pool;
176 int i;
177
178 DTN_INFO(
179 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
180 for (i = 0; i < pool->pipe_count; i++) {
181 struct hubp *hubp = pool->hubps[i];
182 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
183
184 hubp->funcs->hubp_read_state(hubp);
185
186 if (!s->blank_en) {
187 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
188 hubp->inst,
189 s->pixel_format,
190 s->inuse_addr_hi,
191 s->viewport_width,
192 s->viewport_height,
193 s->rotation_angle,
194 s->h_mirror_en,
195 s->sw_mode,
196 s->dcc_en,
197 s->blank_en,
198 s->clock_en,
199 s->ttu_disable,
200 s->underflow_status);
201 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
202 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
203 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
204 DTN_INFO("\n");
205 }
206 }
207
208 DTN_INFO("\n=========RQ========\n");
209 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
210 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
211 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
212 for (i = 0; i < pool->pipe_count; i++) {
213 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
214 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
215
216 if (!s->blank_en)
217 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
218 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
219 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
220 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
221 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
222 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
223 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
224 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
225 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
226 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
227 }
228
229 DTN_INFO("========DLG========\n");
230 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
231 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
232 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
233 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
234 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
235 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
236 " x_rp_dlay x_rr_sfl rc_td_grp\n");
237
238 for (i = 0; i < pool->pipe_count; i++) {
239 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
240 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
241
242 if (!s->blank_en)
243 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
244 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
245 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %xh\n",
246 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
247 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
248 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
249 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
250 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
251 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
252 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
253 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
254 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
255 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
256 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
257 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
258 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
259 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
260 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
261 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
262 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
263 dlg_regs->xfc_reg_remote_surface_flip_latency, dlg_regs->refcyc_per_tdlut_group);
264 }
265
266 DTN_INFO("========TTU========\n");
267 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
268 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
269 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
270 for (i = 0; i < pool->pipe_count; i++) {
271 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
272 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
273
274 if (!s->blank_en)
275 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
276 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
277 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
278 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
279 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
280 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
281 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
282 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
283 }
284 DTN_INFO("\n");
285 }
286
dcn10_log_color_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)287 static void dcn10_log_color_state(struct dc *dc,
288 struct dc_log_buffer_ctx *log_ctx)
289 {
290 struct dc_context *dc_ctx = dc->ctx;
291 struct resource_pool *pool = dc->res_pool;
292 bool is_gamut_remap_available = false;
293 int i;
294
295 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
296 " GAMUT adjust "
297 "C11 C12 C13 C14 "
298 "C21 C22 C23 C24 "
299 "C31 C32 C33 C34 \n");
300 for (i = 0; i < pool->pipe_count; i++) {
301 struct dpp *dpp = pool->dpps[i];
302 struct dcn_dpp_state s = {0};
303
304 dpp->funcs->dpp_read_state(dpp, &s);
305 if (dpp->funcs->dpp_get_gamut_remap) {
306 dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
307 is_gamut_remap_available = true;
308 }
309
310 if (!s.is_enabled)
311 continue;
312
313 DTN_INFO("[%2d]: %11xh %11s %9s %9s",
314 dpp->inst,
315 s.igam_input_format,
316 (s.igam_lut_mode == 0) ? "BypassFixed" :
317 ((s.igam_lut_mode == 1) ? "BypassFloat" :
318 ((s.igam_lut_mode == 2) ? "RAM" :
319 ((s.igam_lut_mode == 3) ? "RAM" :
320 "Unknown"))),
321 (s.dgam_lut_mode == 0) ? "Bypass" :
322 ((s.dgam_lut_mode == 1) ? "sRGB" :
323 ((s.dgam_lut_mode == 2) ? "Ycc" :
324 ((s.dgam_lut_mode == 3) ? "RAM" :
325 ((s.dgam_lut_mode == 4) ? "RAM" :
326 "Unknown")))),
327 (s.rgam_lut_mode == 0) ? "Bypass" :
328 ((s.rgam_lut_mode == 1) ? "sRGB" :
329 ((s.rgam_lut_mode == 2) ? "Ycc" :
330 ((s.rgam_lut_mode == 3) ? "RAM" :
331 ((s.rgam_lut_mode == 4) ? "RAM" :
332 "Unknown")))));
333 if (is_gamut_remap_available)
334 DTN_INFO(" %12s "
335 "%010lld %010lld %010lld %010lld "
336 "%010lld %010lld %010lld %010lld "
337 "%010lld %010lld %010lld %010lld",
338 (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
339 ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"),
340 s.gamut_remap.temperature_matrix[0].value,
341 s.gamut_remap.temperature_matrix[1].value,
342 s.gamut_remap.temperature_matrix[2].value,
343 s.gamut_remap.temperature_matrix[3].value,
344 s.gamut_remap.temperature_matrix[4].value,
345 s.gamut_remap.temperature_matrix[5].value,
346 s.gamut_remap.temperature_matrix[6].value,
347 s.gamut_remap.temperature_matrix[7].value,
348 s.gamut_remap.temperature_matrix[8].value,
349 s.gamut_remap.temperature_matrix[9].value,
350 s.gamut_remap.temperature_matrix[10].value,
351 s.gamut_remap.temperature_matrix[11].value);
352
353 DTN_INFO("\n");
354 }
355 DTN_INFO("\n");
356 DTN_INFO("DPP Color Caps: input_lut_shared:%d icsc:%d"
357 " dgam_ram:%d dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
358 " post_csc:%d gamcor:%d dgam_rom_for_yuv:%d 3d_lut:%d"
359 " blnd_lut:%d oscs:%d\n\n",
360 dc->caps.color.dpp.input_lut_shared,
361 dc->caps.color.dpp.icsc,
362 dc->caps.color.dpp.dgam_ram,
363 dc->caps.color.dpp.dgam_rom_caps.srgb,
364 dc->caps.color.dpp.dgam_rom_caps.bt2020,
365 dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
366 dc->caps.color.dpp.dgam_rom_caps.pq,
367 dc->caps.color.dpp.dgam_rom_caps.hlg,
368 dc->caps.color.dpp.post_csc,
369 dc->caps.color.dpp.gamma_corr,
370 dc->caps.color.dpp.dgam_rom_for_yuv,
371 dc->caps.color.dpp.hw_3d_lut,
372 dc->caps.color.dpp.ogam_ram,
373 dc->caps.color.dpp.ocsc);
374
375 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
376 for (i = 0; i < pool->mpcc_count; i++) {
377 struct mpcc_state s = {0};
378
379 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
380 if (s.opp_id != 0xf)
381 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
382 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
383 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
384 s.idle);
385 }
386 DTN_INFO("\n");
387 DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
388 dc->caps.color.mpc.gamut_remap,
389 dc->caps.color.mpc.num_3dluts,
390 dc->caps.color.mpc.ogam_ram,
391 dc->caps.color.mpc.ocsc);
392 }
393
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)394 void dcn10_log_hw_state(struct dc *dc,
395 struct dc_log_buffer_ctx *log_ctx)
396 {
397 struct dc_context *dc_ctx = dc->ctx;
398 struct resource_pool *pool = dc->res_pool;
399 int i;
400
401 DTN_INFO_BEGIN();
402
403 dcn10_log_hubbub_state(dc, log_ctx);
404
405 dcn10_log_hubp_states(dc, log_ctx);
406
407 if (dc->hwss.log_color_state)
408 dc->hwss.log_color_state(dc, log_ctx);
409 else
410 dcn10_log_color_state(dc, log_ctx);
411
412 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
413
414 for (i = 0; i < pool->timing_generator_count; i++) {
415 struct timing_generator *tg = pool->timing_generators[i];
416 struct dcn_otg_state s = {0};
417 /* Read shared OTG state registers for all DCNx */
418 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
419
420 /*
421 * For DCN2 and greater, a register on the OPP is used to
422 * determine if the CRTC is blanked instead of the OTG. So use
423 * dpg_is_blanked() if exists, otherwise fallback on otg.
424 *
425 * TODO: Implement DCN-specific read_otg_state hooks.
426 */
427 if (pool->opps[i]->funcs->dpg_is_blanked)
428 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
429 else
430 s.blank_enabled = tg->funcs->is_blanked(tg);
431
432 //only print if OTG master is enabled
433 if ((s.otg_enabled & 1) == 0)
434 continue;
435
436 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
437 tg->inst,
438 s.v_blank_start,
439 s.v_blank_end,
440 s.v_sync_a_start,
441 s.v_sync_a_end,
442 s.v_sync_a_pol,
443 s.v_total_max,
444 s.v_total_min,
445 s.v_total_max_sel,
446 s.v_total_min_sel,
447 s.h_blank_start,
448 s.h_blank_end,
449 s.h_sync_a_start,
450 s.h_sync_a_end,
451 s.h_sync_a_pol,
452 s.h_total,
453 s.v_total,
454 s.underflow_occurred_status,
455 s.blank_enabled);
456
457 // Clear underflow for debug purposes
458 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
459 // This function is called only from Windows or Diags test environment, hence it's safe to clear
460 // it from here without affecting the original intent.
461 tg->funcs->clear_optc_underflow(tg);
462 }
463 DTN_INFO("\n");
464
465 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
466 // TODO: Update golden log header to reflect this name change
467 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
468 for (i = 0; i < pool->res_cap->num_dsc; i++) {
469 struct display_stream_compressor *dsc = pool->dscs[i];
470 struct dcn_dsc_state s = {0};
471
472 dsc->funcs->dsc_read_state(dsc, &s);
473 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
474 dsc->inst,
475 s.dsc_clock_en,
476 s.dsc_slice_width,
477 s.dsc_bits_per_pixel);
478 DTN_INFO("\n");
479 }
480 DTN_INFO("\n");
481
482 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
483 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
484 for (i = 0; i < pool->stream_enc_count; i++) {
485 struct stream_encoder *enc = pool->stream_enc[i];
486 struct enc_state s = {0};
487
488 if (enc->funcs->enc_read_state) {
489 enc->funcs->enc_read_state(enc, &s);
490 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
491 enc->id,
492 s.dsc_mode,
493 s.sec_gsp_pps_line_num,
494 s.vbid6_line_reference,
495 s.vbid6_line_num,
496 s.sec_gsp_pps_enable,
497 s.sec_stream_enable);
498 DTN_INFO("\n");
499 }
500 }
501 DTN_INFO("\n");
502
503 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
504 for (i = 0; i < dc->link_count; i++) {
505 struct link_encoder *lenc = dc->links[i]->link_enc;
506
507 struct link_enc_state s = {0};
508
509 if (lenc && lenc->funcs->read_state) {
510 lenc->funcs->read_state(lenc, &s);
511 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
512 i,
513 s.dphy_fec_en,
514 s.dphy_fec_ready_shadow,
515 s.dphy_fec_active_status,
516 s.dp_link_training_complete);
517 DTN_INFO("\n");
518 }
519 }
520 DTN_INFO("\n");
521
522 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
523 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
524 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
525 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
526 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
527 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
528 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
529 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
530 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
531
532 log_mpc_crc(dc, log_ctx);
533
534 {
535 if (pool->hpo_dp_stream_enc_count > 0) {
536 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
537 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
538 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
539 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
540
541 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
542 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
543
544 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
545 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
546 hpo_dp_se_state.stream_enc_enabled,
547 hpo_dp_se_state.otg_inst,
548 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
549 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
550 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
551 (hpo_dp_se_state.component_depth == 0) ? 6 :
552 ((hpo_dp_se_state.component_depth == 1) ? 8 :
553 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
554 hpo_dp_se_state.vid_stream_enabled,
555 hpo_dp_se_state.sdp_enabled,
556 hpo_dp_se_state.compressed_format,
557 hpo_dp_se_state.mapped_to_link_enc);
558 }
559 }
560
561 DTN_INFO("\n");
562 }
563
564 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
565 if (pool->hpo_dp_link_enc_count) {
566 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
567
568 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
569 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
570 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
571
572 if (hpo_dp_link_enc->funcs->read_state) {
573 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
574 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
575 hpo_dp_link_enc->inst,
576 hpo_dp_le_state.link_enc_enabled,
577 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
578 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
579 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
580 hpo_dp_le_state.lane_count,
581 hpo_dp_le_state.stream_src[0],
582 hpo_dp_le_state.slot_count[0],
583 hpo_dp_le_state.vc_rate_x[0],
584 hpo_dp_le_state.vc_rate_y[0]);
585 DTN_INFO("\n");
586 }
587 }
588
589 DTN_INFO("\n");
590 }
591 }
592
593 DTN_INFO_END();
594 }
595
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)596 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
597 {
598 struct hubp *hubp = pipe_ctx->plane_res.hubp;
599 struct timing_generator *tg = pipe_ctx->stream_res.tg;
600
601 if (tg->funcs->is_optc_underflow_occurred(tg)) {
602 tg->funcs->clear_optc_underflow(tg);
603 return true;
604 }
605
606 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
607 hubp->funcs->hubp_clear_underflow(hubp);
608 return true;
609 }
610 return false;
611 }
612
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)613 void dcn10_enable_power_gating_plane(
614 struct dce_hwseq *hws,
615 bool enable)
616 {
617 bool force_on = true; /* disable power gating */
618
619 if (enable)
620 force_on = false;
621
622 /* DCHUBP0/1/2/3 */
623 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
624 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
625 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
626 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
627
628 /* DPP0/1/2/3 */
629 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
630 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
631 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
632 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
633 }
634
dcn10_disable_vga(struct dce_hwseq * hws)635 void dcn10_disable_vga(
636 struct dce_hwseq *hws)
637 {
638 unsigned int in_vga1_mode = 0;
639 unsigned int in_vga2_mode = 0;
640 unsigned int in_vga3_mode = 0;
641 unsigned int in_vga4_mode = 0;
642
643 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
644 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
645 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
646 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
647
648 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
649 in_vga3_mode == 0 && in_vga4_mode == 0)
650 return;
651
652 REG_WRITE(D1VGA_CONTROL, 0);
653 REG_WRITE(D2VGA_CONTROL, 0);
654 REG_WRITE(D3VGA_CONTROL, 0);
655 REG_WRITE(D4VGA_CONTROL, 0);
656
657 /* HW Engineer's Notes:
658 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
659 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
660 *
661 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
662 * VGA_TEST_ENABLE, to leave it in the same state as before.
663 */
664 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
665 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
666 }
667
668 /**
669 * dcn10_dpp_pg_control - DPP power gate control.
670 *
671 * @hws: dce_hwseq reference.
672 * @dpp_inst: DPP instance reference.
673 * @power_on: true if we want to enable power gate, false otherwise.
674 *
675 * Enable or disable power gate in the specific DPP instance.
676 */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)677 void dcn10_dpp_pg_control(
678 struct dce_hwseq *hws,
679 unsigned int dpp_inst,
680 bool power_on)
681 {
682 uint32_t power_gate = power_on ? 0 : 1;
683 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
684
685 if (hws->ctx->dc->debug.disable_dpp_power_gate)
686 return;
687 if (REG(DOMAIN1_PG_CONFIG) == 0)
688 return;
689
690 switch (dpp_inst) {
691 case 0: /* DPP0 */
692 REG_UPDATE(DOMAIN1_PG_CONFIG,
693 DOMAIN1_POWER_GATE, power_gate);
694
695 REG_WAIT(DOMAIN1_PG_STATUS,
696 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
697 1, 1000);
698 break;
699 case 1: /* DPP1 */
700 REG_UPDATE(DOMAIN3_PG_CONFIG,
701 DOMAIN3_POWER_GATE, power_gate);
702
703 REG_WAIT(DOMAIN3_PG_STATUS,
704 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
705 1, 1000);
706 break;
707 case 2: /* DPP2 */
708 REG_UPDATE(DOMAIN5_PG_CONFIG,
709 DOMAIN5_POWER_GATE, power_gate);
710
711 REG_WAIT(DOMAIN5_PG_STATUS,
712 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
713 1, 1000);
714 break;
715 case 3: /* DPP3 */
716 REG_UPDATE(DOMAIN7_PG_CONFIG,
717 DOMAIN7_POWER_GATE, power_gate);
718
719 REG_WAIT(DOMAIN7_PG_STATUS,
720 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
721 1, 1000);
722 break;
723 default:
724 BREAK_TO_DEBUGGER();
725 break;
726 }
727 }
728
729 /**
730 * dcn10_hubp_pg_control - HUBP power gate control.
731 *
732 * @hws: dce_hwseq reference.
733 * @hubp_inst: DPP instance reference.
734 * @power_on: true if we want to enable power gate, false otherwise.
735 *
736 * Enable or disable power gate in the specific HUBP instance.
737 */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)738 void dcn10_hubp_pg_control(
739 struct dce_hwseq *hws,
740 unsigned int hubp_inst,
741 bool power_on)
742 {
743 uint32_t power_gate = power_on ? 0 : 1;
744 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
745
746 if (hws->ctx->dc->debug.disable_hubp_power_gate)
747 return;
748 if (REG(DOMAIN0_PG_CONFIG) == 0)
749 return;
750
751 switch (hubp_inst) {
752 case 0: /* DCHUBP0 */
753 REG_UPDATE(DOMAIN0_PG_CONFIG,
754 DOMAIN0_POWER_GATE, power_gate);
755
756 REG_WAIT(DOMAIN0_PG_STATUS,
757 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
758 1, 1000);
759 break;
760 case 1: /* DCHUBP1 */
761 REG_UPDATE(DOMAIN2_PG_CONFIG,
762 DOMAIN2_POWER_GATE, power_gate);
763
764 REG_WAIT(DOMAIN2_PG_STATUS,
765 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
766 1, 1000);
767 break;
768 case 2: /* DCHUBP2 */
769 REG_UPDATE(DOMAIN4_PG_CONFIG,
770 DOMAIN4_POWER_GATE, power_gate);
771
772 REG_WAIT(DOMAIN4_PG_STATUS,
773 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
774 1, 1000);
775 break;
776 case 3: /* DCHUBP3 */
777 REG_UPDATE(DOMAIN6_PG_CONFIG,
778 DOMAIN6_POWER_GATE, power_gate);
779
780 REG_WAIT(DOMAIN6_PG_STATUS,
781 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
782 1, 1000);
783 break;
784 default:
785 BREAK_TO_DEBUGGER();
786 break;
787 }
788 }
789
power_on_plane_resources(struct dce_hwseq * hws,int plane_id)790 static void power_on_plane_resources(
791 struct dce_hwseq *hws,
792 int plane_id)
793 {
794 DC_LOGGER_INIT(hws->ctx->logger);
795
796 if (hws->funcs.dpp_root_clock_control)
797 hws->funcs.dpp_root_clock_control(hws, plane_id, true);
798
799 if (REG(DC_IP_REQUEST_CNTL)) {
800 REG_SET(DC_IP_REQUEST_CNTL, 0,
801 IP_REQUEST_EN, 1);
802
803 if (hws->funcs.dpp_pg_control)
804 hws->funcs.dpp_pg_control(hws, plane_id, true);
805
806 if (hws->funcs.hubp_pg_control)
807 hws->funcs.hubp_pg_control(hws, plane_id, true);
808
809 REG_SET(DC_IP_REQUEST_CNTL, 0,
810 IP_REQUEST_EN, 0);
811 DC_LOG_DEBUG(
812 "Un-gated front end for pipe %d\n", plane_id);
813 }
814 }
815
undo_DEGVIDCN10_253_wa(struct dc * dc)816 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
817 {
818 struct dce_hwseq *hws = dc->hwseq;
819 struct hubp *hubp = dc->res_pool->hubps[0];
820
821 if (!hws->wa_state.DEGVIDCN10_253_applied)
822 return;
823
824 hubp->funcs->set_blank(hubp, true);
825
826 REG_SET(DC_IP_REQUEST_CNTL, 0,
827 IP_REQUEST_EN, 1);
828
829 hws->funcs.hubp_pg_control(hws, 0, false);
830 REG_SET(DC_IP_REQUEST_CNTL, 0,
831 IP_REQUEST_EN, 0);
832
833 hws->wa_state.DEGVIDCN10_253_applied = false;
834 }
835
apply_DEGVIDCN10_253_wa(struct dc * dc)836 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
837 {
838 struct dce_hwseq *hws = dc->hwseq;
839 struct hubp *hubp = dc->res_pool->hubps[0];
840 int i;
841
842 if (dc->debug.disable_stutter)
843 return;
844
845 if (!hws->wa.DEGVIDCN10_253)
846 return;
847
848 for (i = 0; i < dc->res_pool->pipe_count; i++) {
849 if (!dc->res_pool->hubps[i]->power_gated)
850 return;
851 }
852
853 /* all pipe power gated, apply work around to enable stutter. */
854
855 REG_SET(DC_IP_REQUEST_CNTL, 0,
856 IP_REQUEST_EN, 1);
857
858 hws->funcs.hubp_pg_control(hws, 0, true);
859 REG_SET(DC_IP_REQUEST_CNTL, 0,
860 IP_REQUEST_EN, 0);
861
862 hubp->funcs->set_hubp_blank_en(hubp, false);
863 hws->wa_state.DEGVIDCN10_253_applied = true;
864 }
865
dcn10_bios_golden_init(struct dc * dc)866 void dcn10_bios_golden_init(struct dc *dc)
867 {
868 struct dce_hwseq *hws = dc->hwseq;
869 struct dc_bios *bp = dc->ctx->dc_bios;
870 int i;
871 bool allow_self_fresh_force_enable = true;
872
873 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
874 return;
875
876 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
877 allow_self_fresh_force_enable =
878 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
879
880
881 /* WA for making DF sleep when idle after resume from S0i3.
882 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
883 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
884 * before calling command table and it changed to 1 after,
885 * it should be set back to 0.
886 */
887
888 /* initialize dcn global */
889 bp->funcs->enable_disp_power_gating(bp,
890 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
891
892 for (i = 0; i < dc->res_pool->pipe_count; i++) {
893 /* initialize dcn per pipe */
894 bp->funcs->enable_disp_power_gating(bp,
895 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
896 }
897
898 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
899 if (allow_self_fresh_force_enable == false &&
900 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
901 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
902 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
903
904 }
905
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)906 static void false_optc_underflow_wa(
907 struct dc *dc,
908 const struct dc_stream_state *stream,
909 struct timing_generator *tg)
910 {
911 int i;
912 bool underflow;
913
914 if (!dc->hwseq->wa.false_optc_underflow)
915 return;
916
917 underflow = tg->funcs->is_optc_underflow_occurred(tg);
918
919 for (i = 0; i < dc->res_pool->pipe_count; i++) {
920 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
921
922 if (old_pipe_ctx->stream != stream)
923 continue;
924
925 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
926 }
927
928 if (tg->funcs->set_blank_data_double_buffer)
929 tg->funcs->set_blank_data_double_buffer(tg, true);
930
931 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
932 tg->funcs->clear_optc_underflow(tg);
933 }
934
calculate_vready_offset_for_group(struct pipe_ctx * pipe)935 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
936 {
937 struct pipe_ctx *other_pipe;
938 int vready_offset = pipe->pipe_dlg_param.vready_offset;
939
940 /* Always use the largest vready_offset of all connected pipes */
941 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
942 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
943 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
944 }
945 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
946 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
947 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
948 }
949 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
950 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
951 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
952 }
953 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
954 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
955 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
956 }
957
958 return vready_offset;
959 }
960
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)961 enum dc_status dcn10_enable_stream_timing(
962 struct pipe_ctx *pipe_ctx,
963 struct dc_state *context,
964 struct dc *dc)
965 {
966 struct dc_stream_state *stream = pipe_ctx->stream;
967 enum dc_color_space color_space;
968 struct tg_color black_color = {0};
969
970 /* by upper caller loop, pipe0 is parent pipe and be called first.
971 * back end is set up by for pipe0. Other children pipe share back end
972 * with pipe 0. No program is needed.
973 */
974 if (pipe_ctx->top_pipe != NULL)
975 return DC_OK;
976
977 /* TODO check if timing_changed, disable stream if timing changed */
978
979 /* HW program guide assume display already disable
980 * by unplug sequence. OTG assume stop.
981 */
982 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
983
984 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
985 pipe_ctx->clock_source,
986 &pipe_ctx->stream_res.pix_clk_params,
987 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
988 &pipe_ctx->pll_settings)) {
989 BREAK_TO_DEBUGGER();
990 return DC_ERROR_UNEXPECTED;
991 }
992
993 if (dc_is_hdmi_tmds_signal(stream->signal)) {
994 stream->link->phy_state.symclk_ref_cnts.otg = 1;
995 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
996 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
997 else
998 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
999 }
1000
1001 pipe_ctx->stream_res.tg->funcs->program_timing(
1002 pipe_ctx->stream_res.tg,
1003 &stream->timing,
1004 calculate_vready_offset_for_group(pipe_ctx),
1005 pipe_ctx->pipe_dlg_param.vstartup_start,
1006 pipe_ctx->pipe_dlg_param.vupdate_offset,
1007 pipe_ctx->pipe_dlg_param.vupdate_width,
1008 pipe_ctx->pipe_dlg_param.pstate_keepout,
1009 pipe_ctx->stream->signal,
1010 true);
1011
1012 #if 0 /* move to after enable_crtc */
1013 /* TODO: OPP FMT, ABM. etc. should be done here. */
1014 /* or FPGA now. instance 0 only. TODO: move to opp.c */
1015
1016 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
1017
1018 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
1019 pipe_ctx->stream_res.opp,
1020 &stream->bit_depth_params,
1021 &stream->clamping);
1022 #endif
1023 /* program otg blank color */
1024 color_space = stream->output_color_space;
1025 color_space_to_black_color(dc, color_space, &black_color);
1026
1027 /*
1028 * The way 420 is packed, 2 channels carry Y component, 1 channel
1029 * alternate between Cb and Cr, so both channels need the pixel
1030 * value for Y
1031 */
1032 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1033 black_color.color_r_cr = black_color.color_g_y;
1034
1035 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
1036 pipe_ctx->stream_res.tg->funcs->set_blank_color(
1037 pipe_ctx->stream_res.tg,
1038 &black_color);
1039
1040 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
1041 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
1042 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
1043 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
1044 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
1045 }
1046
1047 /* VTG is within DCHUB command block. DCFCLK is always on */
1048 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
1049 BREAK_TO_DEBUGGER();
1050 return DC_ERROR_UNEXPECTED;
1051 }
1052
1053 /* TODO program crtc source select for non-virtual signal*/
1054 /* TODO program FMT */
1055 /* TODO setup link_enc */
1056 /* TODO set stream attributes */
1057 /* TODO program audio */
1058 /* TODO enable stream if timing changed */
1059 /* TODO unblank stream if DP */
1060
1061 return DC_OK;
1062 }
1063
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1064 static void dcn10_reset_back_end_for_pipe(
1065 struct dc *dc,
1066 struct pipe_ctx *pipe_ctx,
1067 struct dc_state *context)
1068 {
1069 int i;
1070 struct dc_link *link;
1071 DC_LOGGER_INIT(dc->ctx->logger);
1072 if (pipe_ctx->stream_res.stream_enc == NULL) {
1073 pipe_ctx->stream = NULL;
1074 return;
1075 }
1076
1077 link = pipe_ctx->stream->link;
1078 /* DPMS may already disable or */
1079 /* dpms_off status is incorrect due to fastboot
1080 * feature. When system resume from S4 with second
1081 * screen only, the dpms_off would be true but
1082 * VBIOS lit up eDP, so check link status too.
1083 */
1084 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1085 dc->link_srv->set_dpms_off(pipe_ctx);
1086 else if (pipe_ctx->stream_res.audio)
1087 dc->hwss.disable_audio_stream(pipe_ctx);
1088
1089 if (pipe_ctx->stream_res.audio) {
1090 /*disable az_endpoint*/
1091 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1092
1093 /*free audio*/
1094 if (dc->caps.dynamic_audio == true) {
1095 /*we have to dynamic arbitrate the audio endpoints*/
1096 /*we free the resource, need reset is_audio_acquired*/
1097 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1098 pipe_ctx->stream_res.audio, false);
1099 pipe_ctx->stream_res.audio = NULL;
1100 }
1101 }
1102
1103 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1104 * back end share by all pipes and will be disable only when disable
1105 * parent pipe.
1106 */
1107 if (pipe_ctx->top_pipe == NULL) {
1108
1109 if (pipe_ctx->stream_res.abm)
1110 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1111
1112 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1113
1114 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1115 set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
1116 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1117 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1118 }
1119
1120 for (i = 0; i < dc->res_pool->pipe_count; i++)
1121 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1122 break;
1123
1124 if (i == dc->res_pool->pipe_count)
1125 return;
1126
1127 pipe_ctx->stream = NULL;
1128 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1129 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1130 }
1131
dcn10_hw_wa_force_recovery(struct dc * dc)1132 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1133 {
1134 struct hubp *hubp ;
1135 unsigned int i;
1136
1137 if (!dc->debug.recovery_enabled)
1138 return false;
1139 /*
1140 DCHUBP_CNTL:HUBP_BLANK_EN=1
1141 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1142 DCHUBP_CNTL:HUBP_DISABLE=1
1143 DCHUBP_CNTL:HUBP_DISABLE=0
1144 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1145 DCSURF_PRIMARY_SURFACE_ADDRESS
1146 DCHUBP_CNTL:HUBP_BLANK_EN=0
1147 */
1148
1149 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1150 struct pipe_ctx *pipe_ctx =
1151 &dc->current_state->res_ctx.pipe_ctx[i];
1152 if (pipe_ctx != NULL) {
1153 hubp = pipe_ctx->plane_res.hubp;
1154 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1155 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1156 hubp->funcs->set_hubp_blank_en(hubp, true);
1157 }
1158 }
1159 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1160 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1161
1162 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1163 struct pipe_ctx *pipe_ctx =
1164 &dc->current_state->res_ctx.pipe_ctx[i];
1165 if (pipe_ctx != NULL) {
1166 hubp = pipe_ctx->plane_res.hubp;
1167 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1168 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1169 hubp->funcs->hubp_disable_control(hubp, true);
1170 }
1171 }
1172 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1173 struct pipe_ctx *pipe_ctx =
1174 &dc->current_state->res_ctx.pipe_ctx[i];
1175 if (pipe_ctx != NULL) {
1176 hubp = pipe_ctx->plane_res.hubp;
1177 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1178 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1179 hubp->funcs->hubp_disable_control(hubp, true);
1180 }
1181 }
1182 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1183 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1184 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1185 struct pipe_ctx *pipe_ctx =
1186 &dc->current_state->res_ctx.pipe_ctx[i];
1187 if (pipe_ctx != NULL) {
1188 hubp = pipe_ctx->plane_res.hubp;
1189 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1190 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1191 hubp->funcs->set_hubp_blank_en(hubp, true);
1192 }
1193 }
1194 return true;
1195
1196 }
1197
dcn10_verify_allow_pstate_change_high(struct dc * dc)1198 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1199 {
1200 struct hubbub *hubbub = dc->res_pool->hubbub;
1201 static bool should_log_hw_state; /* prevent hw state log by default */
1202
1203 if (!hubbub->funcs->verify_allow_pstate_change_high)
1204 return;
1205
1206 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1207 int i = 0;
1208
1209 if (should_log_hw_state)
1210 dcn10_log_hw_state(dc, NULL);
1211
1212 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1213 BREAK_TO_DEBUGGER();
1214 if (dcn10_hw_wa_force_recovery(dc)) {
1215 /*check again*/
1216 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1217 BREAK_TO_DEBUGGER();
1218 }
1219 }
1220 }
1221
1222 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1223 void dcn10_plane_atomic_disconnect(struct dc *dc,
1224 struct dc_state *state,
1225 struct pipe_ctx *pipe_ctx)
1226 {
1227 struct dce_hwseq *hws = dc->hwseq;
1228 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1229 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1230 struct mpc *mpc = dc->res_pool->mpc;
1231 struct mpc_tree *mpc_tree_params;
1232 struct mpcc *mpcc_to_remove = NULL;
1233 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1234
1235 mpc_tree_params = &(opp->mpc_tree_params);
1236 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1237
1238 /*Already reset*/
1239 if (mpcc_to_remove == NULL)
1240 return;
1241
1242 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1243 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1244 // so don't wait for MPCC_IDLE in the programming sequence
1245 if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
1246 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1247
1248 dc->optimized_required = true;
1249
1250 if (hubp->funcs->hubp_disconnect)
1251 hubp->funcs->hubp_disconnect(hubp);
1252
1253 if (dc->debug.sanity_checks)
1254 hws->funcs.verify_allow_pstate_change_high(dc);
1255 }
1256
1257 /**
1258 * dcn10_plane_atomic_power_down - Power down plane components.
1259 *
1260 * @dc: dc struct reference. used for grab hwseq.
1261 * @dpp: dpp struct reference.
1262 * @hubp: hubp struct reference.
1263 *
1264 * Keep in mind that this operation requires a power gate configuration;
1265 * however, requests for switch power gate are precisely controlled to avoid
1266 * problems. For this reason, power gate request is usually disabled. This
1267 * function first needs to enable the power gate request before disabling DPP
1268 * and HUBP. Finally, it disables the power gate request again.
1269 */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1270 void dcn10_plane_atomic_power_down(struct dc *dc,
1271 struct dpp *dpp,
1272 struct hubp *hubp)
1273 {
1274 struct dce_hwseq *hws = dc->hwseq;
1275 DC_LOGGER_INIT(dc->ctx->logger);
1276
1277 if (REG(DC_IP_REQUEST_CNTL)) {
1278 REG_SET(DC_IP_REQUEST_CNTL, 0,
1279 IP_REQUEST_EN, 1);
1280
1281 if (hws->funcs.dpp_pg_control)
1282 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1283
1284 if (hws->funcs.hubp_pg_control)
1285 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1286
1287 hubp->funcs->hubp_reset(hubp);
1288 dpp->funcs->dpp_reset(dpp);
1289
1290 REG_SET(DC_IP_REQUEST_CNTL, 0,
1291 IP_REQUEST_EN, 0);
1292 DC_LOG_DEBUG(
1293 "Power gated front end %d\n", hubp->inst);
1294 }
1295
1296 if (hws->funcs.dpp_root_clock_control)
1297 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1298 }
1299
1300 /* disable HW used by plane.
1301 * note: cannot disable until disconnect is complete
1302 */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1303 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1304 {
1305 struct dce_hwseq *hws = dc->hwseq;
1306 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1307 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1308 int opp_id = hubp->opp_id;
1309
1310 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1311
1312 hubp->funcs->hubp_clk_cntl(hubp, false);
1313
1314 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1315
1316 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1317 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1318 pipe_ctx->stream_res.opp,
1319 false);
1320
1321 hubp->power_gated = true;
1322 dc->optimized_required = false; /* We're powering off, no need to optimize */
1323
1324 hws->funcs.plane_atomic_power_down(dc,
1325 pipe_ctx->plane_res.dpp,
1326 pipe_ctx->plane_res.hubp);
1327
1328 pipe_ctx->stream = NULL;
1329 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1330 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1331 pipe_ctx->top_pipe = NULL;
1332 pipe_ctx->bottom_pipe = NULL;
1333 pipe_ctx->plane_state = NULL;
1334 }
1335
dcn10_disable_plane(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)1336 void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
1337 {
1338 struct dce_hwseq *hws = dc->hwseq;
1339 DC_LOGGER_INIT(dc->ctx->logger);
1340
1341 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1342 return;
1343
1344 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1345
1346 apply_DEGVIDCN10_253_wa(dc);
1347
1348 DC_LOG_DC("Power down front end %d\n",
1349 pipe_ctx->pipe_idx);
1350 }
1351
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1352 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1353 {
1354 int i;
1355 struct dce_hwseq *hws = dc->hwseq;
1356 struct hubbub *hubbub = dc->res_pool->hubbub;
1357 bool can_apply_seamless_boot = false;
1358 bool tg_enabled[MAX_PIPES] = {false};
1359
1360 for (i = 0; i < context->stream_count; i++) {
1361 if (context->streams[i]->apply_seamless_boot_optimization) {
1362 can_apply_seamless_boot = true;
1363 break;
1364 }
1365 }
1366
1367 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1368 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1369 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1370
1371 /* There is assumption that pipe_ctx is not mapping irregularly
1372 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1373 * we will use the pipe, so don't disable
1374 */
1375 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1376 continue;
1377
1378 /* Blank controller using driver code instead of
1379 * command table.
1380 */
1381 if (tg->funcs->is_tg_enabled(tg)) {
1382 if (hws->funcs.init_blank != NULL) {
1383 hws->funcs.init_blank(dc, tg);
1384 tg->funcs->lock(tg);
1385 } else {
1386 tg->funcs->lock(tg);
1387 tg->funcs->set_blank(tg, true);
1388 hwss_wait_for_blank_complete(tg);
1389 }
1390 }
1391 }
1392
1393 /* Reset det size */
1394 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1395 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1396 struct hubp *hubp = dc->res_pool->hubps[i];
1397
1398 /* Do not need to reset for seamless boot */
1399 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1400 continue;
1401
1402 if (hubbub && hubp) {
1403 if (hubbub->funcs->program_det_size)
1404 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1405 if (hubbub->funcs->program_det_segments)
1406 hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
1407 }
1408 }
1409
1410 /* num_opp will be equal to number of mpcc */
1411 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1412 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1413
1414 /* Cannot reset the MPC mux if seamless boot */
1415 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1416 continue;
1417
1418 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1419 dc->res_pool->mpc, i);
1420 }
1421
1422 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1423 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1424 struct hubp *hubp = dc->res_pool->hubps[i];
1425 struct dpp *dpp = dc->res_pool->dpps[i];
1426 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1427
1428 /* There is assumption that pipe_ctx is not mapping irregularly
1429 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1430 * we will use the pipe, so don't disable
1431 */
1432 if (can_apply_seamless_boot &&
1433 pipe_ctx->stream != NULL &&
1434 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1435 pipe_ctx->stream_res.tg)) {
1436 // Enable double buffering for OTG_BLANK no matter if
1437 // seamless boot is enabled or not to suppress global sync
1438 // signals when OTG blanked. This is to prevent pipe from
1439 // requesting data while in PSR.
1440 tg->funcs->tg_init(tg);
1441 hubp->power_gated = true;
1442 tg_enabled[i] = true;
1443 continue;
1444 }
1445
1446 /* Disable on the current state so the new one isn't cleared. */
1447 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1448
1449 hubp->funcs->hubp_reset(hubp);
1450 dpp->funcs->dpp_reset(dpp);
1451
1452 pipe_ctx->stream_res.tg = tg;
1453 pipe_ctx->pipe_idx = i;
1454
1455 pipe_ctx->plane_res.hubp = hubp;
1456 pipe_ctx->plane_res.dpp = dpp;
1457 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1458 hubp->mpcc_id = dpp->inst;
1459 hubp->opp_id = OPP_ID_INVALID;
1460 hubp->power_gated = false;
1461
1462 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1463 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1464 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1465 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1466
1467 hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
1468
1469 if (tg->funcs->is_tg_enabled(tg))
1470 tg->funcs->unlock(tg);
1471
1472 dc->hwss.disable_plane(dc, context, pipe_ctx);
1473
1474 pipe_ctx->stream_res.tg = NULL;
1475 pipe_ctx->plane_res.hubp = NULL;
1476
1477 if (tg->funcs->is_tg_enabled(tg)) {
1478 if (tg->funcs->init_odm)
1479 tg->funcs->init_odm(tg);
1480 }
1481
1482 tg->funcs->tg_init(tg);
1483 }
1484
1485 /* Clean up MPC tree */
1486 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1487 if (tg_enabled[i]) {
1488 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
1489 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
1490 int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
1491
1492 if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
1493 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1494 }
1495 }
1496 }
1497 }
1498
1499 /* Power gate DSCs */
1500 if (hws->funcs.dsc_pg_control != NULL) {
1501 uint32_t num_opps = 0;
1502 uint32_t opp_id_src0 = OPP_ID_INVALID;
1503 uint32_t opp_id_src1 = OPP_ID_INVALID;
1504
1505 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1506 // We can't use res_pool->res_cap->num_timing_generator to check
1507 // Because it records display pipes default setting built in driver,
1508 // not display pipes of the current chip.
1509 // Some ASICs would be fused display pipes less than the default setting.
1510 // In dcnxx_resource_construct function, driver would obatin real information.
1511 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1512 uint32_t optc_dsc_state = 0;
1513 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1514
1515 if (tg->funcs->is_tg_enabled(tg)) {
1516 if (tg->funcs->get_dsc_status)
1517 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1518 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1519 // non-zero value is DSC enabled
1520 if (optc_dsc_state != 0) {
1521 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1522 break;
1523 }
1524 }
1525 }
1526
1527 // Step 2: To power down DSC but skip DSC of running OPTC
1528 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1529 struct dcn_dsc_state s = {0};
1530
1531 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1532
1533 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1534 s.dsc_clock_en && s.dsc_fw_en)
1535 continue;
1536
1537 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1538 }
1539 }
1540 }
1541
dcn10_init_hw(struct dc * dc)1542 void dcn10_init_hw(struct dc *dc)
1543 {
1544 int i;
1545 struct abm *abm = dc->res_pool->abm;
1546 struct dmcu *dmcu = dc->res_pool->dmcu;
1547 struct dce_hwseq *hws = dc->hwseq;
1548 struct dc_bios *dcb = dc->ctx->dc_bios;
1549 struct resource_pool *res_pool = dc->res_pool;
1550 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1551 uint32_t user_level = MAX_BACKLIGHT_LEVEL;
1552 bool is_optimized_init_done = false;
1553
1554 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1555 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1556
1557 /* Align bw context with hw config when system resume. */
1558 if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1559 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1560 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1561 }
1562
1563 // Initialize the dccg
1564 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1565 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1566
1567 if (!dcb->funcs->is_accelerated_mode(dcb))
1568 hws->funcs.disable_vga(dc->hwseq);
1569
1570 if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1571 hws->funcs.bios_golden_init(dc);
1572
1573
1574 if (dc->ctx->dc_bios->fw_info_valid) {
1575 res_pool->ref_clocks.xtalin_clock_inKhz =
1576 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1577
1578 if (res_pool->dccg && res_pool->hubbub) {
1579
1580 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1581 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1582 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1583
1584 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1585 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1586 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1587 } else {
1588 // Not all ASICs have DCCG sw component
1589 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1590 res_pool->ref_clocks.xtalin_clock_inKhz;
1591 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1592 res_pool->ref_clocks.xtalin_clock_inKhz;
1593 }
1594 } else
1595 ASSERT_CRITICAL(false);
1596
1597 for (i = 0; i < dc->link_count; i++) {
1598 /* Power up AND update implementation according to the
1599 * required signal (which may be different from the
1600 * default signal on connector).
1601 */
1602 struct dc_link *link = dc->links[i];
1603
1604 if (!is_optimized_init_done)
1605 link->link_enc->funcs->hw_init(link->link_enc);
1606
1607 /* Check for enabled DIG to identify enabled display */
1608 if (link->link_enc->funcs->is_dig_enabled &&
1609 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1610 link->link_status.link_active = true;
1611 if (link->link_enc->funcs->fec_is_active &&
1612 link->link_enc->funcs->fec_is_active(link->link_enc))
1613 link->fec_state = dc_link_fec_enabled;
1614 }
1615 }
1616
1617 /* we want to turn off all dp displays before doing detection */
1618 dc->link_srv->blank_all_dp_displays(dc);
1619
1620 if (hws->funcs.enable_power_gating_plane)
1621 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1622
1623 /* If taking control over from VBIOS, we may want to optimize our first
1624 * mode set, so we need to skip powering down pipes until we know which
1625 * pipes we want to use.
1626 * Otherwise, if taking control is not possible, we need to power
1627 * everything down.
1628 */
1629 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1630 if (!is_optimized_init_done) {
1631 hws->funcs.init_pipes(dc, dc->current_state);
1632 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1633 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1634 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1635 }
1636 }
1637
1638 if (!is_optimized_init_done) {
1639
1640 for (i = 0; i < res_pool->audio_count; i++) {
1641 struct audio *audio = res_pool->audios[i];
1642
1643 audio->funcs->hw_init(audio);
1644 }
1645
1646 for (i = 0; i < dc->link_count; i++) {
1647 struct dc_link *link = dc->links[i];
1648
1649 if (link->panel_cntl) {
1650 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1651 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
1652 }
1653 }
1654
1655 if (abm != NULL)
1656 abm->funcs->abm_init(abm, backlight, user_level);
1657
1658 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1659 dmcu->funcs->dmcu_init(dmcu);
1660 }
1661
1662 if (abm != NULL && dmcu != NULL)
1663 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1664
1665 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1666 if (!is_optimized_init_done)
1667 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1668
1669 if (!dc->debug.disable_clock_gate) {
1670 /* enable all DCN clock gating */
1671 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1672
1673 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1674
1675 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1676 }
1677
1678 if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
1679 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1680 }
1681
1682 /* In headless boot cases, DIG may be turned
1683 * on which causes HW/SW discrepancies.
1684 * To avoid this, power down hardware on boot
1685 * if DIG is turned on
1686 */
dcn10_power_down_on_boot(struct dc * dc)1687 void dcn10_power_down_on_boot(struct dc *dc)
1688 {
1689 struct dc_link *edp_links[MAX_NUM_EDP];
1690 struct dc_link *edp_link = NULL;
1691 int edp_num;
1692 int i = 0;
1693
1694 dc_get_edp_links(dc, edp_links, &edp_num);
1695 if (edp_num)
1696 edp_link = edp_links[0];
1697
1698 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1699 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1700 dc->hwseq->funcs.edp_backlight_control &&
1701 dc->hwseq->funcs.power_down &&
1702 dc->hwss.edp_power_control) {
1703 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1704 dc->hwseq->funcs.power_down(dc);
1705 dc->hwss.edp_power_control(edp_link, false);
1706 } else {
1707 for (i = 0; i < dc->link_count; i++) {
1708 struct dc_link *link = dc->links[i];
1709
1710 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1711 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1712 dc->hwseq->funcs.power_down) {
1713 dc->hwseq->funcs.power_down(dc);
1714 break;
1715 }
1716
1717 }
1718 }
1719
1720 /*
1721 * Call update_clocks with empty context
1722 * to send DISPLAY_OFF
1723 * Otherwise DISPLAY_OFF may not be asserted
1724 */
1725 if (dc->clk_mgr->funcs->set_low_power_state)
1726 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1727 }
1728
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1729 void dcn10_reset_hw_ctx_wrap(
1730 struct dc *dc,
1731 struct dc_state *context)
1732 {
1733 int i;
1734 struct dce_hwseq *hws = dc->hwseq;
1735
1736 /* Reset Back End*/
1737 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1738 struct pipe_ctx *pipe_ctx_old =
1739 &dc->current_state->res_ctx.pipe_ctx[i];
1740 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1741
1742 if (!pipe_ctx_old->stream)
1743 continue;
1744
1745 if (pipe_ctx_old->top_pipe)
1746 continue;
1747
1748 if (!pipe_ctx->stream ||
1749 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1750 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1751
1752 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1753 if (hws->funcs.enable_stream_gating)
1754 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1755 if (old_clk)
1756 old_clk->funcs->cs_power_down(old_clk);
1757 }
1758 }
1759 }
1760
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1761 static bool patch_address_for_sbs_tb_stereo(
1762 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1763 {
1764 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1765 bool sec_split = pipe_ctx->top_pipe &&
1766 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1767 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1768 (pipe_ctx->stream->timing.timing_3d_format ==
1769 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1770 pipe_ctx->stream->timing.timing_3d_format ==
1771 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1772 *addr = plane_state->address.grph_stereo.left_addr;
1773 plane_state->address.grph_stereo.left_addr =
1774 plane_state->address.grph_stereo.right_addr;
1775 return true;
1776 } else {
1777 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1778 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1779 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1780 plane_state->address.grph_stereo.right_addr =
1781 plane_state->address.grph_stereo.left_addr;
1782 plane_state->address.grph_stereo.right_meta_addr =
1783 plane_state->address.grph_stereo.left_meta_addr;
1784 }
1785 }
1786 return false;
1787 }
1788
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1789 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1790 {
1791 bool addr_patched = false;
1792 PHYSICAL_ADDRESS_LOC addr;
1793 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1794
1795 if (plane_state == NULL)
1796 return;
1797
1798 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1799
1800 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1801 pipe_ctx->plane_res.hubp,
1802 &plane_state->address,
1803 plane_state->flip_immediate);
1804
1805 plane_state->status.requested_address = plane_state->address;
1806
1807 if (plane_state->flip_immediate)
1808 plane_state->status.current_address = plane_state->address;
1809
1810 if (addr_patched)
1811 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1812 }
1813
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1814 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1815 const struct dc_plane_state *plane_state)
1816 {
1817 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1818 const struct dc_transfer_func *tf = NULL;
1819 bool result = true;
1820
1821 if (dpp_base == NULL)
1822 return false;
1823
1824 tf = &plane_state->in_transfer_func;
1825
1826 if (!dpp_base->ctx->dc->debug.always_use_regamma
1827 && !plane_state->gamma_correction.is_identity
1828 && dce_use_lut(plane_state->format))
1829 dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction);
1830
1831 if (tf->type == TF_TYPE_PREDEFINED) {
1832 switch (tf->tf) {
1833 case TRANSFER_FUNCTION_SRGB:
1834 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1835 break;
1836 case TRANSFER_FUNCTION_BT709:
1837 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1838 break;
1839 case TRANSFER_FUNCTION_LINEAR:
1840 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1841 break;
1842 case TRANSFER_FUNCTION_PQ:
1843 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1844 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1845 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1846 result = true;
1847 break;
1848 default:
1849 result = false;
1850 break;
1851 }
1852 } else if (tf->type == TF_TYPE_BYPASS) {
1853 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1854 } else {
1855 cm_helper_translate_curve_to_degamma_hw_format(tf,
1856 &dpp_base->degamma_params);
1857 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1858 &dpp_base->degamma_params);
1859 result = true;
1860 }
1861
1862 return result;
1863 }
1864
1865 #define MAX_NUM_HW_POINTS 0x200
1866
log_tf(struct dc_context * ctx,const struct dc_transfer_func * tf,uint32_t hw_points_num)1867 static void log_tf(struct dc_context *ctx,
1868 const struct dc_transfer_func *tf, uint32_t hw_points_num)
1869 {
1870 // DC_LOG_GAMMA is default logging of all hw points
1871 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1872 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1873 int i = 0;
1874
1875 DC_LOG_GAMMA("Gamma Correction TF");
1876 DC_LOG_ALL_GAMMA("Logging all tf points...");
1877 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1878
1879 for (i = 0; i < hw_points_num; i++) {
1880 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1881 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1882 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1883 }
1884
1885 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1886 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1887 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1888 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1889 }
1890 }
1891
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1892 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1893 const struct dc_stream_state *stream)
1894 {
1895 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1896
1897 if (!stream)
1898 return false;
1899
1900 if (dpp == NULL)
1901 return false;
1902
1903 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1904
1905 if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED &&
1906 stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB)
1907 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1908
1909 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1910 * update.
1911 */
1912 else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
1913 &stream->out_transfer_func,
1914 &dpp->regamma_params, false)) {
1915 dpp->funcs->dpp_program_regamma_pwl(
1916 dpp,
1917 &dpp->regamma_params, OPP_REGAMMA_USER);
1918 } else
1919 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1920
1921 if (stream->ctx) {
1922 log_tf(stream->ctx,
1923 &stream->out_transfer_func,
1924 dpp->regamma_params.hw_points_num);
1925 }
1926
1927 return true;
1928 }
1929
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1930 void dcn10_pipe_control_lock(
1931 struct dc *dc,
1932 struct pipe_ctx *pipe,
1933 bool lock)
1934 {
1935 struct dce_hwseq *hws = dc->hwseq;
1936
1937 /* use TG master update lock to lock everything on the TG
1938 * therefore only top pipe need to lock
1939 */
1940 if (!pipe || pipe->top_pipe)
1941 return;
1942
1943 if (dc->debug.sanity_checks)
1944 hws->funcs.verify_allow_pstate_change_high(dc);
1945
1946 if (lock)
1947 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1948 else
1949 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1950
1951 if (dc->debug.sanity_checks)
1952 hws->funcs.verify_allow_pstate_change_high(dc);
1953 }
1954
1955 /**
1956 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1957 *
1958 * Software keepout workaround to prevent cursor update locking from stalling
1959 * out cursor updates indefinitely or from old values from being retained in
1960 * the case where the viewport changes in the same frame as the cursor.
1961 *
1962 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1963 * too close to VUPDATE, then stall out until VUPDATE finishes.
1964 *
1965 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1966 * to avoid the need for this workaround.
1967 *
1968 * @dc: Current DC state
1969 * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1970 *
1971 * Return: void
1972 */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1973 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1974 {
1975 struct dc_stream_state *stream = pipe_ctx->stream;
1976 struct crtc_position position;
1977 uint32_t vupdate_start, vupdate_end;
1978 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1979 unsigned int us_per_line, us_vupdate;
1980
1981 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1982 return;
1983
1984 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1985 return;
1986
1987 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1988 &vupdate_end);
1989
1990 dc->hwss.get_position(&pipe_ctx, 1, &position);
1991 vpos = position.vertical_count;
1992
1993 if (vpos <= vupdate_start) {
1994 /* VPOS is in VACTIVE or back porch. */
1995 lines_to_vupdate = vupdate_start - vpos;
1996 } else {
1997 lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
1998 }
1999
2000 /* Calculate time until VUPDATE in microseconds. */
2001 us_per_line =
2002 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
2003 us_to_vupdate = lines_to_vupdate * us_per_line;
2004
2005 /* Stall out until the cursor update completes. */
2006 if (vupdate_end < vupdate_start)
2007 vupdate_end += stream->timing.v_total;
2008
2009 /* Position is in the range of vupdate start and end*/
2010 if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
2011 us_to_vupdate = 0;
2012
2013 /* 70 us is a conservative estimate of cursor update time*/
2014 if (us_to_vupdate > 70)
2015 return;
2016
2017 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
2018 udelay(us_to_vupdate + us_vupdate);
2019 }
2020
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)2021 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
2022 {
2023 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
2024 if (!pipe || pipe->top_pipe)
2025 return;
2026
2027 /* Prevent cursor lock from stalling out cursor updates. */
2028 if (lock)
2029 delay_cursor_until_vupdate(dc, pipe);
2030
2031 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
2032 union dmub_hw_lock_flags hw_locks = { 0 };
2033 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2034
2035 hw_locks.bits.lock_cursor = 1;
2036 inst_flags.opp_inst = pipe->stream_res.opp->inst;
2037
2038 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2039 lock,
2040 &hw_locks,
2041 &inst_flags);
2042 } else
2043 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
2044 pipe->stream_res.opp->inst, lock);
2045 }
2046
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)2047 static bool wait_for_reset_trigger_to_occur(
2048 struct dc_context *dc_ctx,
2049 struct timing_generator *tg)
2050 {
2051 bool rc = false;
2052
2053 DC_LOGGER_INIT(dc_ctx->logger);
2054
2055 /* To avoid endless loop we wait at most
2056 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2057 const uint32_t frames_to_wait_on_triggered_reset = 10;
2058 int i;
2059
2060 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2061
2062 if (!tg->funcs->is_counter_moving(tg)) {
2063 DC_ERROR("TG counter is not moving!\n");
2064 break;
2065 }
2066
2067 if (tg->funcs->did_triggered_reset_occur(tg)) {
2068 rc = true;
2069 /* usually occurs at i=1 */
2070 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2071 i);
2072 break;
2073 }
2074
2075 /* Wait for one frame. */
2076 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2077 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2078 }
2079
2080 if (false == rc)
2081 DC_ERROR("GSL: Timeout on reset trigger!\n");
2082
2083 return rc;
2084 }
2085
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2086 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2087 uint64_t *denominator,
2088 bool checkUint32Bounary)
2089 {
2090 int i;
2091 bool ret = checkUint32Bounary == false;
2092 uint64_t max_int32 = 0xffffffff;
2093 uint64_t num, denom;
2094 static const uint16_t prime_numbers[] = {
2095 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2096 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2097 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2098 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2099 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2100 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2101 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2102 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2103 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2104 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2105 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2106 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2107 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2108 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2109 941, 947, 953, 967, 971, 977, 983, 991, 997};
2110 int count = ARRAY_SIZE(prime_numbers);
2111
2112 num = *numerator;
2113 denom = *denominator;
2114 for (i = 0; i < count; i++) {
2115 uint32_t num_remainder, denom_remainder;
2116 uint64_t num_result, denom_result;
2117 if (checkUint32Bounary &&
2118 num <= max_int32 && denom <= max_int32) {
2119 ret = true;
2120 break;
2121 }
2122 do {
2123 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2124 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2125 if (num_remainder == 0 && denom_remainder == 0) {
2126 num = num_result;
2127 denom = denom_result;
2128 }
2129 } while (num_remainder == 0 && denom_remainder == 0);
2130 }
2131 *numerator = num;
2132 *denominator = denom;
2133 return ret;
2134 }
2135
is_low_refresh_rate(struct pipe_ctx * pipe)2136 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2137 {
2138 uint32_t master_pipe_refresh_rate =
2139 pipe->stream->timing.pix_clk_100hz * 100 /
2140 pipe->stream->timing.h_total /
2141 pipe->stream->timing.v_total;
2142 return master_pipe_refresh_rate <= 30;
2143 }
2144
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2145 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2146 bool account_low_refresh_rate)
2147 {
2148 uint32_t clock_divider = 1;
2149 uint32_t numpipes = 1;
2150
2151 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2152 clock_divider *= 2;
2153
2154 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2155 clock_divider *= 2;
2156
2157 while (pipe->next_odm_pipe) {
2158 pipe = pipe->next_odm_pipe;
2159 numpipes++;
2160 }
2161 clock_divider *= numpipes;
2162
2163 return clock_divider;
2164 }
2165
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2166 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2167 struct pipe_ctx *grouped_pipes[])
2168 {
2169 struct dc_context *dc_ctx = dc->ctx;
2170 int i, master = -1, embedded = -1;
2171 struct dc_crtc_timing *hw_crtc_timing;
2172 uint64_t phase[MAX_PIPES];
2173 uint64_t modulo[MAX_PIPES];
2174 unsigned int pclk = 0;
2175
2176 uint32_t embedded_pix_clk_100hz;
2177 uint16_t embedded_h_total;
2178 uint16_t embedded_v_total;
2179 uint32_t dp_ref_clk_100hz =
2180 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2181
2182 DC_LOGGER_INIT(dc_ctx->logger);
2183
2184 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2185 if (!hw_crtc_timing)
2186 return master;
2187
2188 if (dc->config.vblank_alignment_dto_params &&
2189 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2190 embedded_h_total =
2191 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2192 embedded_v_total =
2193 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2194 embedded_pix_clk_100hz =
2195 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2196
2197 for (i = 0; i < group_size; i++) {
2198 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2199 grouped_pipes[i]->stream_res.tg,
2200 &hw_crtc_timing[i]);
2201 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2202 dc->res_pool->dp_clock_source,
2203 grouped_pipes[i]->stream_res.tg->inst,
2204 &pclk);
2205 hw_crtc_timing[i].pix_clk_100hz = pclk;
2206 if (dc_is_embedded_signal(
2207 grouped_pipes[i]->stream->signal)) {
2208 embedded = i;
2209 master = i;
2210 phase[i] = embedded_pix_clk_100hz*(uint64_t)100;
2211 modulo[i] = dp_ref_clk_100hz*100;
2212 } else {
2213
2214 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2215 hw_crtc_timing[i].h_total*
2216 hw_crtc_timing[i].v_total;
2217 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2218 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2219 embedded_h_total*
2220 embedded_v_total;
2221
2222 if (reduceSizeAndFraction(&phase[i],
2223 &modulo[i], true) == false) {
2224 /*
2225 * this will help to stop reporting
2226 * this timing synchronizable
2227 */
2228 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2229 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2230 }
2231 }
2232 }
2233
2234 for (i = 0; i < group_size; i++) {
2235 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2236 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2237 dc->res_pool->dp_clock_source,
2238 grouped_pipes[i]->stream_res.tg->inst,
2239 phase[i], modulo[i]);
2240 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2241 dc->res_pool->dp_clock_source,
2242 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2243 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2244 pclk*get_clock_divider(grouped_pipes[i], false);
2245 if (master == -1)
2246 master = i;
2247 }
2248 }
2249
2250 }
2251
2252 kfree(hw_crtc_timing);
2253 return master;
2254 }
2255
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2256 void dcn10_enable_vblanks_synchronization(
2257 struct dc *dc,
2258 int group_index,
2259 int group_size,
2260 struct pipe_ctx *grouped_pipes[])
2261 {
2262 struct dc_context *dc_ctx = dc->ctx;
2263 struct output_pixel_processor *opp;
2264 struct timing_generator *tg;
2265 int i, width = 0, height = 0, master;
2266
2267 DC_LOGGER_INIT(dc_ctx->logger);
2268
2269 for (i = 1; i < group_size; i++) {
2270 opp = grouped_pipes[i]->stream_res.opp;
2271 tg = grouped_pipes[i]->stream_res.tg;
2272 tg->funcs->get_otg_active_size(tg, &width, &height);
2273
2274 if (!tg->funcs->is_tg_enabled(tg)) {
2275 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2276 return;
2277 }
2278
2279 if (opp->funcs->opp_program_dpg_dimensions)
2280 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2281 }
2282
2283 for (i = 0; i < group_size; i++) {
2284 if (grouped_pipes[i]->stream == NULL)
2285 continue;
2286 grouped_pipes[i]->stream->vblank_synchronized = false;
2287 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2288 }
2289
2290 DC_SYNC_INFO("Aligning DP DTOs\n");
2291
2292 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2293
2294 DC_SYNC_INFO("Synchronizing VBlanks\n");
2295
2296 if (master >= 0) {
2297 for (i = 0; i < group_size; i++) {
2298 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2299 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2300 grouped_pipes[master]->stream_res.tg,
2301 grouped_pipes[i]->stream_res.tg,
2302 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2303 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2304 get_clock_divider(grouped_pipes[master], false),
2305 get_clock_divider(grouped_pipes[i], false));
2306 grouped_pipes[i]->stream->vblank_synchronized = true;
2307 }
2308 grouped_pipes[master]->stream->vblank_synchronized = true;
2309 DC_SYNC_INFO("Sync complete\n");
2310 }
2311
2312 for (i = 1; i < group_size; i++) {
2313 opp = grouped_pipes[i]->stream_res.opp;
2314 tg = grouped_pipes[i]->stream_res.tg;
2315 tg->funcs->get_otg_active_size(tg, &width, &height);
2316 if (opp->funcs->opp_program_dpg_dimensions)
2317 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2318 }
2319 }
2320
dcn10_enable_timing_synchronization(struct dc * dc,struct dc_state * state,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2321 void dcn10_enable_timing_synchronization(
2322 struct dc *dc,
2323 struct dc_state *state,
2324 int group_index,
2325 int group_size,
2326 struct pipe_ctx *grouped_pipes[])
2327 {
2328 struct dc_context *dc_ctx = dc->ctx;
2329 struct output_pixel_processor *opp;
2330 struct timing_generator *tg;
2331 int i, width = 0, height = 0;
2332
2333 DC_LOGGER_INIT(dc_ctx->logger);
2334
2335 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2336
2337 for (i = 1; i < group_size; i++) {
2338 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2339 continue;
2340
2341 opp = grouped_pipes[i]->stream_res.opp;
2342 tg = grouped_pipes[i]->stream_res.tg;
2343 tg->funcs->get_otg_active_size(tg, &width, &height);
2344
2345 if (!tg->funcs->is_tg_enabled(tg)) {
2346 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2347 return;
2348 }
2349
2350 if (opp->funcs->opp_program_dpg_dimensions)
2351 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2352 }
2353
2354 for (i = 0; i < group_size; i++) {
2355 if (grouped_pipes[i]->stream == NULL)
2356 continue;
2357
2358 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2359 continue;
2360
2361 grouped_pipes[i]->stream->vblank_synchronized = false;
2362 }
2363
2364 for (i = 1; i < group_size; i++) {
2365 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2366 continue;
2367
2368 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2369 grouped_pipes[i]->stream_res.tg,
2370 grouped_pipes[0]->stream_res.tg->inst);
2371 }
2372
2373 DC_SYNC_INFO("Waiting for trigger\n");
2374
2375 /* Need to get only check 1 pipe for having reset as all the others are
2376 * synchronized. Look at last pipe programmed to reset.
2377 */
2378
2379 if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
2380 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2381
2382 for (i = 1; i < group_size; i++) {
2383 if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2384 continue;
2385
2386 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2387 grouped_pipes[i]->stream_res.tg);
2388 }
2389
2390 for (i = 1; i < group_size; i++) {
2391 if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
2392 continue;
2393
2394 opp = grouped_pipes[i]->stream_res.opp;
2395 tg = grouped_pipes[i]->stream_res.tg;
2396 tg->funcs->get_otg_active_size(tg, &width, &height);
2397 if (opp->funcs->opp_program_dpg_dimensions)
2398 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2399 }
2400
2401 DC_SYNC_INFO("Sync complete\n");
2402 }
2403
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2404 void dcn10_enable_per_frame_crtc_position_reset(
2405 struct dc *dc,
2406 int group_size,
2407 struct pipe_ctx *grouped_pipes[])
2408 {
2409 struct dc_context *dc_ctx = dc->ctx;
2410 int i;
2411
2412 DC_LOGGER_INIT(dc_ctx->logger);
2413
2414 DC_SYNC_INFO("Setting up\n");
2415 for (i = 0; i < group_size; i++)
2416 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2417 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2418 grouped_pipes[i]->stream_res.tg,
2419 0,
2420 &grouped_pipes[i]->stream->triggered_crtc_reset);
2421
2422 DC_SYNC_INFO("Waiting for trigger\n");
2423
2424 for (i = 0; i < group_size; i++)
2425 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2426
2427 DC_SYNC_INFO("Multi-display sync is complete\n");
2428 }
2429
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2430 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2431 struct vm_system_aperture_param *apt,
2432 struct dce_hwseq *hws)
2433 {
2434 PHYSICAL_ADDRESS_LOC physical_page_number;
2435 uint32_t logical_addr_low;
2436 uint32_t logical_addr_high;
2437
2438 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2439 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2440 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2441 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2442
2443 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2444 LOGICAL_ADDR, &logical_addr_low);
2445
2446 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2447 LOGICAL_ADDR, &logical_addr_high);
2448
2449 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2450 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2451 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2452 }
2453
2454 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2455 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2456 struct vm_context0_param *vm0,
2457 struct dce_hwseq *hws)
2458 {
2459 PHYSICAL_ADDRESS_LOC fb_base;
2460 PHYSICAL_ADDRESS_LOC fb_offset;
2461 uint32_t fb_base_value;
2462 uint32_t fb_offset_value;
2463
2464 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2465 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2466
2467 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2468 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2469 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2470 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2471
2472 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2473 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2474 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2475 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2476
2477 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2478 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2479 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2480 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2481
2482 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2483 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2484 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2485 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2486
2487 /*
2488 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2489 * Therefore we need to do
2490 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2491 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2492 */
2493 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2494 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2495 vm0->pte_base.quad_part += fb_base.quad_part;
2496 vm0->pte_base.quad_part -= fb_offset.quad_part;
2497 }
2498
2499
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2500 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2501 {
2502 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2503 struct vm_system_aperture_param apt = {0};
2504 struct vm_context0_param vm0 = {0};
2505
2506 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2507 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2508
2509 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2510 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2511 }
2512
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2513 static void dcn10_enable_plane(
2514 struct dc *dc,
2515 struct pipe_ctx *pipe_ctx,
2516 struct dc_state *context)
2517 {
2518 struct dce_hwseq *hws = dc->hwseq;
2519
2520 if (dc->debug.sanity_checks) {
2521 hws->funcs.verify_allow_pstate_change_high(dc);
2522 }
2523
2524 undo_DEGVIDCN10_253_wa(dc);
2525
2526 power_on_plane_resources(dc->hwseq,
2527 pipe_ctx->plane_res.hubp->inst);
2528
2529 /* enable DCFCLK current DCHUB */
2530 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2531
2532 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2533 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2534 pipe_ctx->stream_res.opp,
2535 true);
2536
2537 if (dc->config.gpu_vm_support)
2538 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2539
2540 if (dc->debug.sanity_checks) {
2541 hws->funcs.verify_allow_pstate_change_high(dc);
2542 }
2543
2544 if (!pipe_ctx->top_pipe
2545 && pipe_ctx->plane_state
2546 && pipe_ctx->plane_state->flip_int_enabled
2547 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2548 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2549
2550 }
2551
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2552 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2553 {
2554 int i = 0;
2555 struct dpp_grph_csc_adjustment adjust;
2556 memset(&adjust, 0, sizeof(adjust));
2557 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2558
2559
2560 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2561 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2562 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2563 adjust.temperature_matrix[i] =
2564 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2565 } else if (pipe_ctx->plane_state &&
2566 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2567 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2568 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2569 adjust.temperature_matrix[i] =
2570 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2571 }
2572
2573 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2574 }
2575
2576
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2577 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2578 {
2579 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2580 if (pipe_ctx->top_pipe) {
2581 struct pipe_ctx *top = pipe_ctx->top_pipe;
2582
2583 while (top->top_pipe)
2584 top = top->top_pipe; // Traverse to top pipe_ctx
2585 if (top->plane_state && top->plane_state->layer_index == 0 && !top->plane_state->global_alpha)
2586 // Global alpha used by top plane for PIP overlay
2587 // Pre-multiplied/per-pixel alpha used by MPO
2588 // Check top plane's global alpha to ensure layer_index > 0 not caused by PIP
2589 return true; // MPO in use and front plane not hidden
2590 }
2591 }
2592 return false;
2593 }
2594
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2595 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2596 {
2597 // Override rear plane RGB bias to fix MPO brightness
2598 uint16_t rgb_bias = matrix[3];
2599
2600 matrix[3] = 0;
2601 matrix[7] = 0;
2602 matrix[11] = 0;
2603 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2604 matrix[3] = rgb_bias;
2605 matrix[7] = rgb_bias;
2606 matrix[11] = rgb_bias;
2607 }
2608
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2609 void dcn10_program_output_csc(struct dc *dc,
2610 struct pipe_ctx *pipe_ctx,
2611 enum dc_color_space colorspace,
2612 uint16_t *matrix,
2613 int opp_id)
2614 {
2615 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2616 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2617
2618 /* MPO is broken with RGB colorspaces when OCSC matrix
2619 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2620 * Blending adds offsets from front + rear to rear plane
2621 *
2622 * Fix is to set RGB bias to 0 on rear plane, top plane
2623 * black value pixels add offset instead of rear + front
2624 */
2625
2626 int16_t rgb_bias = matrix[3];
2627 // matrix[3/7/11] are all the same offset value
2628
2629 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2630 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2631 } else {
2632 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2633 }
2634 }
2635 } else {
2636 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2637 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2638 }
2639 }
2640
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2641 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2642 {
2643 struct dc_bias_and_scale bns_params = {0};
2644
2645 // program the input csc
2646 dpp->funcs->dpp_setup(dpp,
2647 plane_state->format,
2648 EXPANSION_MODE_ZERO,
2649 plane_state->input_csc_color_matrix,
2650 plane_state->color_space,
2651 NULL);
2652
2653 //set scale and bias registers
2654 build_prescale_params(&bns_params, plane_state);
2655 if (dpp->funcs->dpp_program_bias_and_scale)
2656 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2657 }
2658
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,int mpcc_id)2659 void dcn10_update_visual_confirm_color(struct dc *dc,
2660 struct pipe_ctx *pipe_ctx,
2661 int mpcc_id)
2662 {
2663 struct mpc *mpc = dc->res_pool->mpc;
2664
2665 if (mpc->funcs->set_bg_color) {
2666 memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2667 mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2668 }
2669 }
2670
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2671 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2672 {
2673 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2674 struct mpcc_blnd_cfg blnd_cfg = {0};
2675 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2676 int mpcc_id;
2677 struct mpcc *new_mpcc;
2678 struct mpc *mpc = dc->res_pool->mpc;
2679 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2680
2681 blnd_cfg.overlap_only = false;
2682 blnd_cfg.global_gain = 0xff;
2683
2684 if (per_pixel_alpha) {
2685 /* DCN1.0 has output CM before MPC which seems to screw with
2686 * pre-multiplied alpha.
2687 */
2688 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2689 pipe_ctx->stream->output_color_space)
2690 && pipe_ctx->plane_state->pre_multiplied_alpha);
2691 if (pipe_ctx->plane_state->global_alpha) {
2692 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2693 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2694 } else {
2695 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2696 }
2697 } else {
2698 blnd_cfg.pre_multiplied_alpha = false;
2699 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2700 }
2701
2702 if (pipe_ctx->plane_state->global_alpha)
2703 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2704 else
2705 blnd_cfg.global_alpha = 0xff;
2706
2707 /*
2708 * TODO: remove hack
2709 * Note: currently there is a bug in init_hw such that
2710 * on resume from hibernate, BIOS sets up MPCC0, and
2711 * we do mpcc_remove but the mpcc cannot go to idle
2712 * after remove. This cause us to pick mpcc1 here,
2713 * which causes a pstate hang for yet unknown reason.
2714 */
2715 mpcc_id = hubp->inst;
2716
2717 /* If there is no full update, don't need to touch MPC tree*/
2718 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2719 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2720 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2721 return;
2722 }
2723
2724 /* check if this MPCC is already being used */
2725 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2726 /* remove MPCC if being used */
2727 if (new_mpcc != NULL)
2728 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2729 else
2730 if (dc->debug.sanity_checks)
2731 mpc->funcs->assert_mpcc_idle_before_connect(
2732 dc->res_pool->mpc, mpcc_id);
2733
2734 /* Call MPC to insert new plane */
2735 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2736 mpc_tree_params,
2737 &blnd_cfg,
2738 NULL,
2739 NULL,
2740 hubp->inst,
2741 mpcc_id);
2742 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2743
2744 ASSERT(new_mpcc != NULL);
2745 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2746 hubp->mpcc_id = mpcc_id;
2747 }
2748
update_scaler(struct pipe_ctx * pipe_ctx)2749 static void update_scaler(struct pipe_ctx *pipe_ctx)
2750 {
2751 bool per_pixel_alpha =
2752 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2753
2754 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2755 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2756 /* scaler configuration */
2757 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2758 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2759 }
2760
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2761 static void dcn10_update_dchubp_dpp(
2762 struct dc *dc,
2763 struct pipe_ctx *pipe_ctx,
2764 struct dc_state *context)
2765 {
2766 struct dce_hwseq *hws = dc->hwseq;
2767 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2768 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2769 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2770 struct plane_size size = plane_state->plane_size;
2771 unsigned int compat_level = 0;
2772 bool should_divided_by_2 = false;
2773
2774 /* depends on DML calculation, DPP clock value may change dynamically */
2775 /* If request max dpp clk is lower than current dispclk, no need to
2776 * divided by 2
2777 */
2778 if (plane_state->update_flags.bits.full_update) {
2779
2780 /* new calculated dispclk, dppclk are stored in
2781 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2782 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2783 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2784 * dispclk will put in use after optimize_bandwidth when
2785 * ramp_up_dispclk_with_dpp is called.
2786 * there are two places for dppclk be put in use. One location
2787 * is the same as the location as dispclk. Another is within
2788 * update_dchubp_dpp which happens between pre_bandwidth and
2789 * optimize_bandwidth.
2790 * dppclk updated within update_dchubp_dpp will cause new
2791 * clock values of dispclk and dppclk not be in use at the same
2792 * time. when clocks are decreased, this may cause dppclk is
2793 * lower than previous configuration and let pipe stuck.
2794 * for example, eDP + external dp, change resolution of DP from
2795 * 1920x1080x144hz to 1280x960x60hz.
2796 * before change: dispclk = 337889 dppclk = 337889
2797 * change mode, dcn10_validate_bandwidth calculate
2798 * dispclk = 143122 dppclk = 143122
2799 * update_dchubp_dpp be executed before dispclk be updated,
2800 * dispclk = 337889, but dppclk use new value dispclk /2 =
2801 * 168944. this will cause pipe pstate warning issue.
2802 * solution: between pre_bandwidth and optimize_bandwidth, while
2803 * dispclk is going to be decreased, keep dppclk = dispclk
2804 **/
2805 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2806 dc->clk_mgr->clks.dispclk_khz)
2807 should_divided_by_2 = false;
2808 else
2809 should_divided_by_2 =
2810 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2811 dc->clk_mgr->clks.dispclk_khz / 2;
2812
2813 dpp->funcs->dpp_dppclk_control(
2814 dpp,
2815 should_divided_by_2,
2816 true);
2817
2818 if (dc->res_pool->dccg)
2819 dc->res_pool->dccg->funcs->update_dpp_dto(
2820 dc->res_pool->dccg,
2821 dpp->inst,
2822 pipe_ctx->plane_res.bw.dppclk_khz);
2823 else
2824 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2825 dc->clk_mgr->clks.dispclk_khz / 2 :
2826 dc->clk_mgr->clks.dispclk_khz;
2827 }
2828
2829 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2830 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2831 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2832 */
2833 if (plane_state->update_flags.bits.full_update) {
2834 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2835
2836 hubp->funcs->hubp_setup(
2837 hubp,
2838 &pipe_ctx->dlg_regs,
2839 &pipe_ctx->ttu_regs,
2840 &pipe_ctx->rq_regs,
2841 &pipe_ctx->pipe_dlg_param);
2842 hubp->funcs->hubp_setup_interdependent(
2843 hubp,
2844 &pipe_ctx->dlg_regs,
2845 &pipe_ctx->ttu_regs);
2846 }
2847
2848 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2849
2850 if (plane_state->update_flags.bits.full_update ||
2851 plane_state->update_flags.bits.bpp_change)
2852 dcn10_update_dpp(dpp, plane_state);
2853
2854 if (plane_state->update_flags.bits.full_update ||
2855 plane_state->update_flags.bits.per_pixel_alpha_change ||
2856 plane_state->update_flags.bits.global_alpha_change)
2857 hws->funcs.update_mpcc(dc, pipe_ctx);
2858
2859 if (plane_state->update_flags.bits.full_update ||
2860 plane_state->update_flags.bits.per_pixel_alpha_change ||
2861 plane_state->update_flags.bits.global_alpha_change ||
2862 plane_state->update_flags.bits.scaling_change ||
2863 plane_state->update_flags.bits.position_change) {
2864 update_scaler(pipe_ctx);
2865 }
2866
2867 if (plane_state->update_flags.bits.full_update ||
2868 plane_state->update_flags.bits.scaling_change ||
2869 plane_state->update_flags.bits.position_change) {
2870 hubp->funcs->mem_program_viewport(
2871 hubp,
2872 &pipe_ctx->plane_res.scl_data.viewport,
2873 &pipe_ctx->plane_res.scl_data.viewport_c);
2874 }
2875
2876 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2877 dc->hwss.set_cursor_attribute(pipe_ctx);
2878 dc->hwss.set_cursor_position(pipe_ctx);
2879
2880 if (dc->hwss.set_cursor_sdr_white_level)
2881 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2882 }
2883
2884 if (plane_state->update_flags.bits.full_update) {
2885 /*gamut remap*/
2886 dc->hwss.program_gamut_remap(pipe_ctx);
2887
2888 dc->hwss.program_output_csc(dc,
2889 pipe_ctx,
2890 pipe_ctx->stream->output_color_space,
2891 pipe_ctx->stream->csc_color_matrix.matrix,
2892 pipe_ctx->stream_res.opp->inst);
2893 }
2894
2895 if (plane_state->update_flags.bits.full_update ||
2896 plane_state->update_flags.bits.pixel_format_change ||
2897 plane_state->update_flags.bits.horizontal_mirror_change ||
2898 plane_state->update_flags.bits.rotation_change ||
2899 plane_state->update_flags.bits.swizzle_change ||
2900 plane_state->update_flags.bits.dcc_change ||
2901 plane_state->update_flags.bits.bpp_change ||
2902 plane_state->update_flags.bits.scaling_change ||
2903 plane_state->update_flags.bits.plane_size_change) {
2904 hubp->funcs->hubp_program_surface_config(
2905 hubp,
2906 plane_state->format,
2907 &plane_state->tiling_info,
2908 &size,
2909 plane_state->rotation,
2910 &plane_state->dcc,
2911 plane_state->horizontal_mirror,
2912 compat_level);
2913 }
2914
2915 hubp->power_gated = false;
2916
2917 dc->hwss.update_plane_addr(dc, pipe_ctx);
2918
2919 if (is_pipe_tree_visible(pipe_ctx))
2920 hubp->funcs->set_blank(hubp, false);
2921 }
2922
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2923 void dcn10_blank_pixel_data(
2924 struct dc *dc,
2925 struct pipe_ctx *pipe_ctx,
2926 bool blank)
2927 {
2928 enum dc_color_space color_space;
2929 struct tg_color black_color = {0};
2930 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2931 struct dc_stream_state *stream = pipe_ctx->stream;
2932
2933 /* program otg blank color */
2934 color_space = stream->output_color_space;
2935 color_space_to_black_color(dc, color_space, &black_color);
2936
2937 /*
2938 * The way 420 is packed, 2 channels carry Y component, 1 channel
2939 * alternate between Cb and Cr, so both channels need the pixel
2940 * value for Y
2941 */
2942 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2943 black_color.color_r_cr = black_color.color_g_y;
2944
2945
2946 if (stream_res->tg->funcs->set_blank_color)
2947 stream_res->tg->funcs->set_blank_color(
2948 stream_res->tg,
2949 &black_color);
2950
2951 if (!blank) {
2952 if (stream_res->tg->funcs->set_blank)
2953 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2954 if (stream_res->abm) {
2955 dc->hwss.set_pipe(pipe_ctx);
2956 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2957 }
2958 } else {
2959 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2960 if (stream_res->tg->funcs->set_blank) {
2961 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2962 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2963 }
2964 }
2965 }
2966
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2967 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2968 {
2969 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2970 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2971 struct custom_float_format fmt;
2972
2973 fmt.exponenta_bits = 6;
2974 fmt.mantissa_bits = 12;
2975 fmt.sign = true;
2976
2977
2978 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2979 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2980
2981 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2982 pipe_ctx->plane_res.dpp, hw_mult);
2983 }
2984
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2985 void dcn10_program_pipe(
2986 struct dc *dc,
2987 struct pipe_ctx *pipe_ctx,
2988 struct dc_state *context)
2989 {
2990 struct dce_hwseq *hws = dc->hwseq;
2991
2992 if (pipe_ctx->top_pipe == NULL) {
2993 bool blank = !is_pipe_tree_visible(pipe_ctx);
2994
2995 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2996 pipe_ctx->stream_res.tg,
2997 calculate_vready_offset_for_group(pipe_ctx),
2998 pipe_ctx->pipe_dlg_param.vstartup_start,
2999 pipe_ctx->pipe_dlg_param.vupdate_offset,
3000 pipe_ctx->pipe_dlg_param.vupdate_width,
3001 pipe_ctx->pipe_dlg_param.pstate_keepout);
3002
3003 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
3004 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
3005
3006 if (hws->funcs.setup_vupdate_interrupt)
3007 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
3008
3009 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
3010 }
3011
3012 if (pipe_ctx->plane_state->update_flags.bits.full_update)
3013 dcn10_enable_plane(dc, pipe_ctx, context);
3014
3015 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
3016
3017 hws->funcs.set_hdr_multiplier(pipe_ctx);
3018
3019 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
3020 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
3021 pipe_ctx->plane_state->update_flags.bits.gamma_change)
3022 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
3023
3024 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
3025 * only do gamma programming for full update.
3026 * TODO: This can be further optimized/cleaned up
3027 * Always call this for now since it does memcmp inside before
3028 * doing heavy calculation and programming
3029 */
3030 if (pipe_ctx->plane_state->update_flags.bits.full_update)
3031 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
3032 }
3033
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)3034 void dcn10_wait_for_pending_cleared(struct dc *dc,
3035 struct dc_state *context)
3036 {
3037 struct pipe_ctx *pipe_ctx;
3038 struct timing_generator *tg;
3039 int i;
3040
3041 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3042 pipe_ctx = &context->res_ctx.pipe_ctx[i];
3043 tg = pipe_ctx->stream_res.tg;
3044
3045 /*
3046 * Only wait for top pipe's tg penindg bit
3047 * Also skip if pipe is disabled.
3048 */
3049 if (pipe_ctx->top_pipe ||
3050 !pipe_ctx->stream || !pipe_ctx->plane_state ||
3051 !tg->funcs->is_tg_enabled(tg))
3052 continue;
3053
3054 /*
3055 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3056 * For some reason waiting for OTG_UPDATE_PENDING cleared
3057 * seems to not trigger the update right away, and if we
3058 * lock again before VUPDATE then we don't get a separated
3059 * operation.
3060 */
3061 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3062 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3063 }
3064 }
3065
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)3066 void dcn10_post_unlock_program_front_end(
3067 struct dc *dc,
3068 struct dc_state *context)
3069 {
3070 int i;
3071
3072 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3073 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3074
3075 if (!pipe_ctx->top_pipe &&
3076 !pipe_ctx->prev_odm_pipe &&
3077 pipe_ctx->stream) {
3078 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3079
3080 if (context->stream_status[i].plane_count == 0)
3081 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3082 }
3083 }
3084
3085 for (i = 0; i < dc->res_pool->pipe_count; i++)
3086 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3087 dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3088
3089 for (i = 0; i < dc->res_pool->pipe_count; i++)
3090 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3091 dc->hwss.optimize_bandwidth(dc, context);
3092 break;
3093 }
3094
3095 if (dc->hwseq->wa.DEGVIDCN10_254)
3096 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3097 }
3098
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3099 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3100 {
3101 uint8_t i;
3102
3103 for (i = 0; i < context->stream_count; i++) {
3104 if (context->streams[i]->timing.timing_3d_format
3105 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3106 /*
3107 * Disable stutter
3108 */
3109 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3110 break;
3111 }
3112 }
3113 }
3114
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3115 void dcn10_prepare_bandwidth(
3116 struct dc *dc,
3117 struct dc_state *context)
3118 {
3119 struct dce_hwseq *hws = dc->hwseq;
3120 struct hubbub *hubbub = dc->res_pool->hubbub;
3121 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3122
3123 if (dc->debug.sanity_checks)
3124 hws->funcs.verify_allow_pstate_change_high(dc);
3125
3126 if (context->stream_count == 0)
3127 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3128
3129 dc->clk_mgr->funcs->update_clocks(
3130 dc->clk_mgr,
3131 context,
3132 false);
3133
3134 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3135 &context->bw_ctx.bw.dcn.watermarks,
3136 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3137 true);
3138 dcn10_stereo_hw_frame_pack_wa(dc, context);
3139
3140 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3141 DC_FP_START();
3142 dcn_get_soc_clks(
3143 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3144 DC_FP_END();
3145 dcn_bw_notify_pplib_of_wm_ranges(
3146 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3147 }
3148
3149 if (dc->debug.sanity_checks)
3150 hws->funcs.verify_allow_pstate_change_high(dc);
3151 }
3152
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3153 void dcn10_optimize_bandwidth(
3154 struct dc *dc,
3155 struct dc_state *context)
3156 {
3157 struct dce_hwseq *hws = dc->hwseq;
3158 struct hubbub *hubbub = dc->res_pool->hubbub;
3159 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3160
3161 if (dc->debug.sanity_checks)
3162 hws->funcs.verify_allow_pstate_change_high(dc);
3163
3164 if (context->stream_count == 0)
3165 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3166
3167 dc->clk_mgr->funcs->update_clocks(
3168 dc->clk_mgr,
3169 context,
3170 true);
3171
3172 hubbub->funcs->program_watermarks(hubbub,
3173 &context->bw_ctx.bw.dcn.watermarks,
3174 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3175 true);
3176
3177 dcn10_stereo_hw_frame_pack_wa(dc, context);
3178
3179 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3180 DC_FP_START();
3181 dcn_get_soc_clks(
3182 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3183 DC_FP_END();
3184 dcn_bw_notify_pplib_of_wm_ranges(
3185 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3186 }
3187
3188 if (dc->debug.sanity_checks)
3189 hws->funcs.verify_allow_pstate_change_high(dc);
3190 }
3191
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3192 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3193 int num_pipes, struct dc_crtc_timing_adjust adjust)
3194 {
3195 int i = 0;
3196 struct drr_params params = {0};
3197 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3198 unsigned int event_triggers = 0x800;
3199 // Note DRR trigger events are generated regardless of whether num frames met.
3200 unsigned int num_frames = 2;
3201
3202 params.vertical_total_max = adjust.v_total_max;
3203 params.vertical_total_min = adjust.v_total_min;
3204 params.vertical_total_mid = adjust.v_total_mid;
3205 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3206 /* TODO: If multiple pipes are to be supported, you need
3207 * some GSL stuff. Static screen triggers may be programmed differently
3208 * as well.
3209 */
3210 for (i = 0; i < num_pipes; i++) {
3211 /* dc_state_destruct() might null the stream resources, so fetch tg
3212 * here first to avoid a race condition. The lifetime of the pointee
3213 * itself (the timing_generator object) is not a problem here.
3214 */
3215 struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
3216
3217 if ((tg != NULL) && tg->funcs) {
3218 set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms);
3219 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3220 if (tg->funcs->set_static_screen_control)
3221 tg->funcs->set_static_screen_control(
3222 tg, event_triggers, num_frames);
3223 }
3224 }
3225 }
3226
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3227 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3228 int num_pipes,
3229 struct crtc_position *position)
3230 {
3231 int i = 0;
3232
3233 /* TODO: handle pipes > 1
3234 */
3235 for (i = 0; i < num_pipes; i++)
3236 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3237 }
3238
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3239 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3240 int num_pipes, const struct dc_static_screen_params *params)
3241 {
3242 unsigned int i;
3243 unsigned int triggers = 0;
3244
3245 if (params->triggers.surface_update)
3246 triggers |= 0x80;
3247 if (params->triggers.cursor_update)
3248 triggers |= 0x2;
3249 if (params->triggers.force_trigger)
3250 triggers |= 0x1;
3251
3252 for (i = 0; i < num_pipes; i++)
3253 pipe_ctx[i]->stream_res.tg->funcs->
3254 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3255 triggers, params->num_frames);
3256 }
3257
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3258 static void dcn10_config_stereo_parameters(
3259 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3260 {
3261 enum view_3d_format view_format = stream->view_format;
3262 enum dc_timing_3d_format timing_3d_format =\
3263 stream->timing.timing_3d_format;
3264 bool non_stereo_timing = false;
3265
3266 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3267 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3268 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3269 non_stereo_timing = true;
3270
3271 if (non_stereo_timing == false &&
3272 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3273
3274 flags->PROGRAM_STEREO = 1;
3275 flags->PROGRAM_POLARITY = 1;
3276 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3277 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3278 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3279 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3280
3281 if (stream->link && stream->link->ddc) {
3282 enum display_dongle_type dongle = \
3283 stream->link->ddc->dongle_type;
3284
3285 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3286 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3287 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3288 flags->DISABLE_STEREO_DP_SYNC = 1;
3289 }
3290 }
3291 flags->RIGHT_EYE_POLARITY =\
3292 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3293 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3294 flags->FRAME_PACKED = 1;
3295 }
3296
3297 return;
3298 }
3299
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3300 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3301 {
3302 struct crtc_stereo_flags flags = { 0 };
3303 struct dc_stream_state *stream = pipe_ctx->stream;
3304
3305 dcn10_config_stereo_parameters(stream, &flags);
3306
3307 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3308 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3309 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3310 } else {
3311 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3312 }
3313
3314 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3315 pipe_ctx->stream_res.opp,
3316 flags.PROGRAM_STEREO == 1,
3317 &stream->timing);
3318
3319 pipe_ctx->stream_res.tg->funcs->program_stereo(
3320 pipe_ctx->stream_res.tg,
3321 &stream->timing,
3322 &flags);
3323
3324 return;
3325 }
3326
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3327 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3328 {
3329 int i;
3330
3331 for (i = 0; i < res_pool->pipe_count; i++) {
3332 if (res_pool->hubps[i]->inst == mpcc_inst)
3333 return res_pool->hubps[i];
3334 }
3335 ASSERT(false);
3336 return NULL;
3337 }
3338
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3339 void dcn10_wait_for_mpcc_disconnect(
3340 struct dc *dc,
3341 struct resource_pool *res_pool,
3342 struct pipe_ctx *pipe_ctx)
3343 {
3344 struct dce_hwseq *hws = dc->hwseq;
3345 int mpcc_inst;
3346
3347 if (dc->debug.sanity_checks) {
3348 hws->funcs.verify_allow_pstate_change_high(dc);
3349 }
3350
3351 if (!pipe_ctx->stream_res.opp)
3352 return;
3353
3354 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3355 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3356 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3357
3358 if (pipe_ctx->stream_res.tg &&
3359 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3360 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3361 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3362 hubp->funcs->set_blank(hubp, true);
3363 }
3364 }
3365
3366 if (dc->debug.sanity_checks) {
3367 hws->funcs.verify_allow_pstate_change_high(dc);
3368 }
3369
3370 }
3371
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3372 bool dcn10_dummy_display_power_gating(
3373 struct dc *dc,
3374 uint8_t controller_id,
3375 struct dc_bios *dcb,
3376 enum pipe_gating_control power_gating)
3377 {
3378 return true;
3379 }
3380
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3381 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3382 {
3383 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3384 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3385 bool flip_pending;
3386 struct dc *dc = pipe_ctx->stream->ctx->dc;
3387
3388 if (plane_state == NULL)
3389 return;
3390
3391 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3392 pipe_ctx->plane_res.hubp);
3393
3394 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3395
3396 if (!flip_pending)
3397 plane_state->status.current_address = plane_state->status.requested_address;
3398
3399 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3400 tg->funcs->is_stereo_left_eye) {
3401 plane_state->status.is_right_eye =
3402 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3403 }
3404
3405 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3406 struct dce_hwseq *hwseq = dc->hwseq;
3407 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3408 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3409
3410 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3411 struct hubbub *hubbub = dc->res_pool->hubbub;
3412
3413 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3414 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3415 }
3416 }
3417 }
3418
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3419 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3420 {
3421 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3422
3423 /* In DCN, this programming sequence is owned by the hubbub */
3424 hubbub->funcs->update_dchub(hubbub, dh_data);
3425 }
3426
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3427 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3428 {
3429 struct pipe_ctx *test_pipe, *split_pipe;
3430 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3431 struct rect r1 = scl_data->recout, r2, r2_half;
3432 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3433 int cur_layer = pipe_ctx->plane_state->layer_index;
3434
3435 /**
3436 * Disable the cursor if there's another pipe above this with a
3437 * plane that contains this pipe's viewport to prevent double cursor
3438 * and incorrect scaling artifacts.
3439 */
3440 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3441 test_pipe = test_pipe->top_pipe) {
3442 // Skip invisible layer and pipe-split plane on same layer
3443 if (!test_pipe->plane_state ||
3444 !test_pipe->plane_state->visible ||
3445 test_pipe->plane_state->layer_index == cur_layer)
3446 continue;
3447
3448 r2 = test_pipe->plane_res.scl_data.recout;
3449 r2_r = r2.x + r2.width;
3450 r2_b = r2.y + r2.height;
3451 split_pipe = test_pipe;
3452
3453 /**
3454 * There is another half plane on same layer because of
3455 * pipe-split, merge together per same height.
3456 */
3457 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3458 split_pipe = split_pipe->top_pipe)
3459 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3460 r2_half = split_pipe->plane_res.scl_data.recout;
3461 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3462 r2.width = r2.width + r2_half.width;
3463 r2_r = r2.x + r2.width;
3464 break;
3465 }
3466
3467 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3468 return true;
3469 }
3470
3471 return false;
3472 }
3473
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3474 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3475 {
3476 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3477 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3478 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3479 struct dc_cursor_mi_param param = {
3480 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3481 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3482 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3483 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3484 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3485 .rotation = pipe_ctx->plane_state->rotation,
3486 .mirror = pipe_ctx->plane_state->horizontal_mirror,
3487 .stream = pipe_ctx->stream,
3488 };
3489 bool pipe_split_on = false;
3490 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3491 (pipe_ctx->prev_odm_pipe != NULL);
3492
3493 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3494 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3495 int x_pos = pos_cpy.x;
3496 int y_pos = pos_cpy.y;
3497
3498 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3499 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3500 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3501 pipe_split_on = true;
3502 }
3503 }
3504
3505 /**
3506 * DC cursor is stream space, HW cursor is plane space and drawn
3507 * as part of the framebuffer.
3508 *
3509 * Cursor position can't be negative, but hotspot can be used to
3510 * shift cursor out of the plane bounds. Hotspot must be smaller
3511 * than the cursor size.
3512 */
3513
3514 /**
3515 * Translate cursor from stream space to plane space.
3516 *
3517 * If the cursor is scaled then we need to scale the position
3518 * to be in the approximately correct place. We can't do anything
3519 * about the actual size being incorrect, that's a limitation of
3520 * the hardware.
3521 */
3522 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3523 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3524 pipe_ctx->plane_state->dst_rect.width;
3525 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3526 pipe_ctx->plane_state->dst_rect.height;
3527 } else {
3528 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3529 pipe_ctx->plane_state->dst_rect.width;
3530 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3531 pipe_ctx->plane_state->dst_rect.height;
3532 }
3533
3534 /**
3535 * If the cursor's source viewport is clipped then we need to
3536 * translate the cursor to appear in the correct position on
3537 * the screen.
3538 *
3539 * This translation isn't affected by scaling so it needs to be
3540 * done *after* we adjust the position for the scale factor.
3541 *
3542 * This is only done by opt-in for now since there are still
3543 * some usecases like tiled display that might enable the
3544 * cursor on both streams while expecting dc to clip it.
3545 */
3546 if (pos_cpy.translate_by_source) {
3547 x_pos += pipe_ctx->plane_state->src_rect.x;
3548 y_pos += pipe_ctx->plane_state->src_rect.y;
3549 }
3550
3551 /**
3552 * If the position is negative then we need to add to the hotspot
3553 * to shift the cursor outside the plane.
3554 */
3555
3556 if (x_pos < 0) {
3557 pos_cpy.x_hotspot -= x_pos;
3558 x_pos = 0;
3559 }
3560
3561 if (y_pos < 0) {
3562 pos_cpy.y_hotspot -= y_pos;
3563 y_pos = 0;
3564 }
3565
3566 pos_cpy.x = (uint32_t)x_pos;
3567 pos_cpy.y = (uint32_t)y_pos;
3568
3569 if (pipe_ctx->plane_state->address.type
3570 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3571 pos_cpy.enable = false;
3572
3573 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3574 pos_cpy.enable = false;
3575
3576
3577 if (param.rotation == ROTATION_ANGLE_0) {
3578 int viewport_width =
3579 pipe_ctx->plane_res.scl_data.viewport.width;
3580 int viewport_x =
3581 pipe_ctx->plane_res.scl_data.viewport.x;
3582
3583 if (param.mirror) {
3584 if (pipe_split_on || odm_combine_on) {
3585 if (pos_cpy.x >= viewport_width + viewport_x) {
3586 pos_cpy.x = 2 * viewport_width
3587 - pos_cpy.x + 2 * viewport_x;
3588 } else {
3589 uint32_t temp_x = pos_cpy.x;
3590
3591 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3592 if (temp_x >= viewport_x +
3593 (int)hubp->curs_attr.width || pos_cpy.x
3594 <= (int)hubp->curs_attr.width +
3595 pipe_ctx->plane_state->src_rect.x) {
3596 pos_cpy.x = 2 * viewport_width - temp_x;
3597 }
3598 }
3599 } else {
3600 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3601 }
3602 }
3603 }
3604 // Swap axis and mirror horizontally
3605 else if (param.rotation == ROTATION_ANGLE_90) {
3606 uint32_t temp_x = pos_cpy.x;
3607
3608 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3609 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3610 pos_cpy.y = temp_x;
3611 }
3612 // Swap axis and mirror vertically
3613 else if (param.rotation == ROTATION_ANGLE_270) {
3614 uint32_t temp_y = pos_cpy.y;
3615 int viewport_height =
3616 pipe_ctx->plane_res.scl_data.viewport.height;
3617 int viewport_y =
3618 pipe_ctx->plane_res.scl_data.viewport.y;
3619
3620 /**
3621 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3622 * For pipe split cases:
3623 * - apply offset of viewport.y to normalize pos_cpy.x
3624 * - calculate the pos_cpy.y as before
3625 * - shift pos_cpy.y back by same offset to get final value
3626 * - since we iterate through both pipes, use the lower
3627 * viewport.y for offset
3628 * For non pipe split cases, use the same calculation for
3629 * pos_cpy.y as the 180 degree rotation case below,
3630 * but use pos_cpy.x as our input because we are rotating
3631 * 270 degrees
3632 */
3633 if (pipe_split_on || odm_combine_on) {
3634 int pos_cpy_x_offset;
3635 int other_pipe_viewport_y;
3636
3637 if (pipe_split_on) {
3638 if (pipe_ctx->bottom_pipe) {
3639 other_pipe_viewport_y =
3640 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3641 } else {
3642 other_pipe_viewport_y =
3643 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3644 }
3645 } else {
3646 if (pipe_ctx->next_odm_pipe) {
3647 other_pipe_viewport_y =
3648 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3649 } else {
3650 other_pipe_viewport_y =
3651 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3652 }
3653 }
3654 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3655 other_pipe_viewport_y : viewport_y;
3656 pos_cpy.x -= pos_cpy_x_offset;
3657 if (pos_cpy.x > viewport_height) {
3658 pos_cpy.x = pos_cpy.x - viewport_height;
3659 pos_cpy.y = viewport_height - pos_cpy.x;
3660 } else {
3661 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3662 }
3663 pos_cpy.y += pos_cpy_x_offset;
3664 } else {
3665 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3666 }
3667 pos_cpy.x = temp_y;
3668 }
3669 // Mirror horizontally and vertically
3670 else if (param.rotation == ROTATION_ANGLE_180) {
3671 int viewport_width =
3672 pipe_ctx->plane_res.scl_data.viewport.width;
3673 int viewport_x =
3674 pipe_ctx->plane_res.scl_data.viewport.x;
3675
3676 if (!param.mirror) {
3677 if (pipe_split_on || odm_combine_on) {
3678 if (pos_cpy.x >= viewport_width + viewport_x) {
3679 pos_cpy.x = 2 * viewport_width
3680 - pos_cpy.x + 2 * viewport_x;
3681 } else {
3682 uint32_t temp_x = pos_cpy.x;
3683
3684 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3685 if (temp_x >= viewport_x +
3686 (int)hubp->curs_attr.width || pos_cpy.x
3687 <= (int)hubp->curs_attr.width +
3688 pipe_ctx->plane_state->src_rect.x) {
3689 pos_cpy.x = temp_x + viewport_width;
3690 }
3691 }
3692 } else {
3693 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3694 }
3695 }
3696
3697 /**
3698 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3699 * Calculation:
3700 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3701 * pos_cpy.y_new = viewport.y + delta_from_bottom
3702 * Simplify it as:
3703 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3704 */
3705 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3706 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3707 }
3708
3709 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3710 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3711 }
3712
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3713 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3714 {
3715 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3716
3717 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3718 pipe_ctx->plane_res.hubp, attributes);
3719 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3720 pipe_ctx->plane_res.dpp, attributes);
3721 }
3722
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3723 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3724 {
3725 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3726 struct fixed31_32 multiplier;
3727 struct dpp_cursor_attributes opt_attr = { 0 };
3728 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3729 struct custom_float_format fmt;
3730
3731 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3732 return;
3733
3734 fmt.exponenta_bits = 5;
3735 fmt.mantissa_bits = 10;
3736 fmt.sign = true;
3737
3738 if (sdr_white_level > 80) {
3739 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3740 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3741 }
3742
3743 opt_attr.scale = hw_scale;
3744 opt_attr.bias = 0;
3745
3746 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3747 pipe_ctx->plane_res.dpp, &opt_attr);
3748 }
3749
3750 /*
3751 * apply_front_porch_workaround TODO FPGA still need?
3752 *
3753 * This is a workaround for a bug that has existed since R5xx and has not been
3754 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3755 */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3756 static void apply_front_porch_workaround(
3757 struct dc_crtc_timing *timing)
3758 {
3759 if (timing->flags.INTERLACE == 1) {
3760 if (timing->v_front_porch < 2)
3761 timing->v_front_porch = 2;
3762 } else {
3763 if (timing->v_front_porch < 1)
3764 timing->v_front_porch = 1;
3765 }
3766 }
3767
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3768 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3769 {
3770 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3771 struct dc_crtc_timing patched_crtc_timing;
3772 int vesa_sync_start;
3773 int asic_blank_end;
3774 int interlace_factor;
3775
3776 patched_crtc_timing = *dc_crtc_timing;
3777 apply_front_porch_workaround(&patched_crtc_timing);
3778
3779 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3780
3781 vesa_sync_start = patched_crtc_timing.v_addressable +
3782 patched_crtc_timing.v_border_bottom +
3783 patched_crtc_timing.v_front_porch;
3784
3785 asic_blank_end = (patched_crtc_timing.v_total -
3786 vesa_sync_start -
3787 patched_crtc_timing.v_border_top)
3788 * interlace_factor;
3789
3790 return asic_blank_end -
3791 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3792 }
3793
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3794 void dcn10_calc_vupdate_position(
3795 struct dc *dc,
3796 struct pipe_ctx *pipe_ctx,
3797 uint32_t *start_line,
3798 uint32_t *end_line)
3799 {
3800 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3801 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3802
3803 if (vupdate_pos >= 0)
3804 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3805 else
3806 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3807 *end_line = (*start_line + 2) % timing->v_total;
3808 }
3809
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3810 static void dcn10_cal_vline_position(
3811 struct dc *dc,
3812 struct pipe_ctx *pipe_ctx,
3813 uint32_t *start_line,
3814 uint32_t *end_line)
3815 {
3816 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3817 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3818
3819 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3820 if (vline_pos > 0)
3821 vline_pos--;
3822 else if (vline_pos < 0)
3823 vline_pos++;
3824
3825 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3826 if (vline_pos >= 0)
3827 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3828 else
3829 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3830 *end_line = (*start_line + 2) % timing->v_total;
3831 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3832 // vsync is line 0 so start_line is just the requested line offset
3833 *start_line = vline_pos;
3834 *end_line = (*start_line + 2) % timing->v_total;
3835 } else
3836 ASSERT(0);
3837 }
3838
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3839 void dcn10_setup_periodic_interrupt(
3840 struct dc *dc,
3841 struct pipe_ctx *pipe_ctx)
3842 {
3843 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3844 uint32_t start_line = 0;
3845 uint32_t end_line = 0;
3846
3847 dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3848
3849 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3850 }
3851
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3852 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3853 {
3854 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3855 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3856
3857 if (start_line < 0) {
3858 ASSERT(0);
3859 start_line = 0;
3860 }
3861
3862 if (tg->funcs->setup_vertical_interrupt2)
3863 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3864 }
3865
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3866 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3867 struct dc_link_settings *link_settings)
3868 {
3869 struct encoder_unblank_param params = {0};
3870 struct dc_stream_state *stream = pipe_ctx->stream;
3871 struct dc_link *link = stream->link;
3872 struct dce_hwseq *hws = link->dc->hwseq;
3873
3874 /* only 3 items below are used by unblank */
3875 params.timing = pipe_ctx->stream->timing;
3876
3877 params.link_settings.link_rate = link_settings->link_rate;
3878
3879 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3880 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3881 params.timing.pix_clk_100hz /= 2;
3882 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3883 }
3884
3885 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3886 hws->funcs.edp_backlight_control(link, true);
3887 }
3888 }
3889
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3890 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3891 const uint8_t *custom_sdp_message,
3892 unsigned int sdp_message_size)
3893 {
3894 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3895 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3896 pipe_ctx->stream_res.stream_enc,
3897 custom_sdp_message,
3898 sdp_message_size);
3899 }
3900 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3901 enum dc_status dcn10_set_clock(struct dc *dc,
3902 enum dc_clock_type clock_type,
3903 uint32_t clk_khz,
3904 uint32_t stepping)
3905 {
3906 struct dc_state *context = dc->current_state;
3907 struct dc_clock_config clock_cfg = {0};
3908 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3909
3910 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3911 return DC_FAIL_UNSUPPORTED_1;
3912
3913 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3914 context, clock_type, &clock_cfg);
3915
3916 if (clk_khz > clock_cfg.max_clock_khz)
3917 return DC_FAIL_CLK_EXCEED_MAX;
3918
3919 if (clk_khz < clock_cfg.min_clock_khz)
3920 return DC_FAIL_CLK_BELOW_MIN;
3921
3922 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3923 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3924
3925 /*update internal request clock for update clock use*/
3926 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3927 current_clocks->dispclk_khz = clk_khz;
3928 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3929 current_clocks->dppclk_khz = clk_khz;
3930 else
3931 return DC_ERROR_UNEXPECTED;
3932
3933 if (dc->clk_mgr->funcs->update_clocks)
3934 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3935 context, true);
3936 return DC_OK;
3937
3938 }
3939
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3940 void dcn10_get_clock(struct dc *dc,
3941 enum dc_clock_type clock_type,
3942 struct dc_clock_config *clock_cfg)
3943 {
3944 struct dc_state *context = dc->current_state;
3945
3946 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3947 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3948
3949 }
3950
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3951 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3952 {
3953 struct resource_pool *pool = dc->res_pool;
3954 int i;
3955
3956 for (i = 0; i < pool->pipe_count; i++) {
3957 struct hubp *hubp = pool->hubps[i];
3958 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3959
3960 hubp->funcs->hubp_read_state(hubp);
3961
3962 if (!s->blank_en)
3963 dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3964 }
3965 }
3966