1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27
28 #include "dm_services.h"
29
30 #include "dc.h"
31
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
36
37 #include "resource.h"
38
39 #include "clk_mgr.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
42
43 #include "bios_parser_interface.h"
44 #include "bios/bios_parser_helper.h"
45 #include "include/irq_service_interface.h"
46 #include "transform.h"
47 #include "dmcu.h"
48 #include "dpp.h"
49 #include "timing_generator.h"
50 #include "abm.h"
51 #include "virtual/virtual_link_encoder.h"
52 #include "hubp.h"
53
54 #include "link_hwss.h"
55 #include "link_encoder.h"
56 #include "link_enc_cfg.h"
57
58 #include "dc_link.h"
59 #include "dc_link_ddc.h"
60 #include "dm_helpers.h"
61 #include "mem_input.h"
62
63 #include "dc_link_dp.h"
64 #include "dc_dmub_srv.h"
65
66 #include "dsc.h"
67
68 #include "vm_helper.h"
69
70 #include "dce/dce_i2c.h"
71
72 #include "dmub/dmub_srv.h"
73
74 #include "i2caux_interface.h"
75 #include "dce/dmub_hw_lock_mgr.h"
76
77 #include "dc_trace.h"
78
79 #define CTX \
80 dc->ctx
81
82 #define DC_LOGGER \
83 dc->ctx->logger
84
85 static const char DC_BUILD_ID[] = "production-build";
86
87 /**
88 * DOC: Overview
89 *
90 * DC is the OS-agnostic component of the amdgpu DC driver.
91 *
92 * DC maintains and validates a set of structs representing the state of the
93 * driver and writes that state to AMD hardware
94 *
95 * Main DC HW structs:
96 *
97 * struct dc - The central struct. One per driver. Created on driver load,
98 * destroyed on driver unload.
99 *
100 * struct dc_context - One per driver.
101 * Used as a backpointer by most other structs in dc.
102 *
103 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
104 * plugpoints). Created on driver load, destroyed on driver unload.
105 *
106 * struct dc_sink - One per display. Created on boot or hotplug.
107 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
108 * (the display directly attached). It may also have one or more remote
109 * sinks (in the Multi-Stream Transport case)
110 *
111 * struct resource_pool - One per driver. Represents the hw blocks not in the
112 * main pipeline. Not directly accessible by dm.
113 *
114 * Main dc state structs:
115 *
116 * These structs can be created and destroyed as needed. There is a full set of
117 * these structs in dc->current_state representing the currently programmed state.
118 *
119 * struct dc_state - The global DC state to track global state information,
120 * such as bandwidth values.
121 *
122 * struct dc_stream_state - Represents the hw configuration for the pipeline from
123 * a framebuffer to a display. Maps one-to-one with dc_sink.
124 *
125 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
126 * and may have more in the Multi-Plane Overlay case.
127 *
128 * struct resource_context - Represents the programmable state of everything in
129 * the resource_pool. Not directly accessible by dm.
130 *
131 * struct pipe_ctx - A member of struct resource_context. Represents the
132 * internal hardware pipeline components. Each dc_plane_state has either
133 * one or two (in the pipe-split case).
134 */
135
136 /*******************************************************************************
137 * Private functions
138 ******************************************************************************/
139
elevate_update_type(enum surface_update_type * original,enum surface_update_type new)140 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
141 {
142 if (new > *original)
143 *original = new;
144 }
145
destroy_links(struct dc * dc)146 static void destroy_links(struct dc *dc)
147 {
148 uint32_t i;
149
150 for (i = 0; i < dc->link_count; i++) {
151 if (NULL != dc->links[i])
152 link_destroy(&dc->links[i]);
153 }
154 }
155
get_num_of_internal_disp(struct dc_link ** links,uint32_t num_links)156 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
157 {
158 int i;
159 uint32_t count = 0;
160
161 for (i = 0; i < num_links; i++) {
162 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
163 links[i]->is_internal_display)
164 count++;
165 }
166
167 return count;
168 }
169
get_seamless_boot_stream_count(struct dc_state * ctx)170 static int get_seamless_boot_stream_count(struct dc_state *ctx)
171 {
172 uint8_t i;
173 uint8_t seamless_boot_stream_count = 0;
174
175 for (i = 0; i < ctx->stream_count; i++)
176 if (ctx->streams[i]->apply_seamless_boot_optimization)
177 seamless_boot_stream_count++;
178
179 return seamless_boot_stream_count;
180 }
181
create_links(struct dc * dc,uint32_t num_virtual_links)182 static bool create_links(
183 struct dc *dc,
184 uint32_t num_virtual_links)
185 {
186 int i;
187 int connectors_num;
188 struct dc_bios *bios = dc->ctx->dc_bios;
189
190 dc->link_count = 0;
191
192 connectors_num = bios->funcs->get_connectors_number(bios);
193
194 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
195
196 if (connectors_num > ENUM_ID_COUNT) {
197 dm_error(
198 "DC: Number of connectors %d exceeds maximum of %d!\n",
199 connectors_num,
200 ENUM_ID_COUNT);
201 return false;
202 }
203
204 dm_output_to_console(
205 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
206 __func__,
207 connectors_num,
208 num_virtual_links);
209
210 for (i = 0; i < connectors_num; i++) {
211 struct link_init_data link_init_params = {0};
212 struct dc_link *link;
213
214 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
215
216 link_init_params.ctx = dc->ctx;
217 /* next BIOS object table connector */
218 link_init_params.connector_index = i;
219 link_init_params.link_index = dc->link_count;
220 link_init_params.dc = dc;
221 link = link_create(&link_init_params);
222
223 if (link) {
224 dc->links[dc->link_count] = link;
225 link->dc = dc;
226 ++dc->link_count;
227 }
228 }
229
230 DC_LOG_DC("BIOS object table - end");
231
232 for (i = 0; i < num_virtual_links; i++) {
233 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
234 struct encoder_init_data enc_init = {0};
235
236 if (link == NULL) {
237 BREAK_TO_DEBUGGER();
238 goto failed_alloc;
239 }
240
241 link->link_index = dc->link_count;
242 dc->links[dc->link_count] = link;
243 dc->link_count++;
244
245 link->ctx = dc->ctx;
246 link->dc = dc;
247 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
248 link->link_id.type = OBJECT_TYPE_CONNECTOR;
249 link->link_id.id = CONNECTOR_ID_VIRTUAL;
250 link->link_id.enum_id = ENUM_ID_1;
251 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
252
253 if (!link->link_enc) {
254 BREAK_TO_DEBUGGER();
255 goto failed_alloc;
256 }
257
258 link->link_status.dpcd_caps = &link->dpcd_caps;
259
260 enc_init.ctx = dc->ctx;
261 enc_init.channel = CHANNEL_ID_UNKNOWN;
262 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
263 enc_init.transmitter = TRANSMITTER_UNKNOWN;
264 enc_init.connector = link->link_id;
265 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
266 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
267 enc_init.encoder.enum_id = ENUM_ID_1;
268 virtual_link_encoder_construct(link->link_enc, &enc_init);
269 }
270
271 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
272
273 return true;
274
275 failed_alloc:
276 return false;
277 }
278
dc_perf_trace_create(void)279 static struct dc_perf_trace *dc_perf_trace_create(void)
280 {
281 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
282 }
283
dc_perf_trace_destroy(struct dc_perf_trace ** perf_trace)284 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
285 {
286 kfree(*perf_trace);
287 *perf_trace = NULL;
288 }
289
290 /**
291 * dc_stream_adjust_vmin_vmax:
292 *
293 * Looks up the pipe context of dc_stream_state and updates the
294 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
295 * Rate, which is a power-saving feature that targets reducing panel
296 * refresh rate while the screen is static
297 *
298 * @dc: dc reference
299 * @stream: Initial dc stream state
300 * @adjust: Updated parameters for vertical_total_min and vertical_total_max
301 */
dc_stream_adjust_vmin_vmax(struct dc * dc,struct dc_stream_state * stream,struct dc_crtc_timing_adjust * adjust)302 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
303 struct dc_stream_state *stream,
304 struct dc_crtc_timing_adjust *adjust)
305 {
306 int i;
307 bool ret = false;
308
309 stream->adjust.v_total_max = adjust->v_total_max;
310 stream->adjust.v_total_mid = adjust->v_total_mid;
311 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
312 stream->adjust.v_total_min = adjust->v_total_min;
313
314 for (i = 0; i < MAX_PIPES; i++) {
315 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
316
317 if (pipe->stream == stream && pipe->stream_res.tg) {
318 dc->hwss.set_drr(&pipe,
319 1,
320 *adjust);
321
322 ret = true;
323 }
324 }
325 return ret;
326 }
327
328 /**
329 *****************************************************************************
330 * Function: dc_stream_get_last_vrr_vtotal
331 *
332 * @brief
333 * Looks up the pipe context of dc_stream_state and gets the
334 * last VTOTAL used by DRR (Dynamic Refresh Rate)
335 *
336 * @param [in] dc: dc reference
337 * @param [in] stream: Initial dc stream state
338 * @param [in] adjust: Updated parameters for vertical_total_min and
339 * vertical_total_max
340 *****************************************************************************
341 */
dc_stream_get_last_used_drr_vtotal(struct dc * dc,struct dc_stream_state * stream,uint32_t * refresh_rate)342 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
343 struct dc_stream_state *stream,
344 uint32_t *refresh_rate)
345 {
346 bool status = false;
347
348 int i = 0;
349
350 for (i = 0; i < MAX_PIPES; i++) {
351 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
352
353 if (pipe->stream == stream && pipe->stream_res.tg) {
354 /* Only execute if a function pointer has been defined for
355 * the DC version in question
356 */
357 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
358 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
359
360 status = true;
361
362 break;
363 }
364 }
365 }
366
367 return status;
368 }
369
dc_stream_get_crtc_position(struct dc * dc,struct dc_stream_state ** streams,int num_streams,unsigned int * v_pos,unsigned int * nom_v_pos)370 bool dc_stream_get_crtc_position(struct dc *dc,
371 struct dc_stream_state **streams, int num_streams,
372 unsigned int *v_pos, unsigned int *nom_v_pos)
373 {
374 /* TODO: Support multiple streams */
375 const struct dc_stream_state *stream = streams[0];
376 int i;
377 bool ret = false;
378 struct crtc_position position;
379
380 for (i = 0; i < MAX_PIPES; i++) {
381 struct pipe_ctx *pipe =
382 &dc->current_state->res_ctx.pipe_ctx[i];
383
384 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
385 dc->hwss.get_position(&pipe, 1, &position);
386
387 *v_pos = position.vertical_count;
388 *nom_v_pos = position.nominal_vcount;
389 ret = true;
390 }
391 }
392 return ret;
393 }
394
395 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
dc_stream_forward_dmcu_crc_window(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window)396 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
397 struct crc_params *crc_window)
398 {
399 int i;
400 struct dmcu *dmcu = dc->res_pool->dmcu;
401 struct pipe_ctx *pipe;
402 struct crc_region tmp_win, *crc_win;
403 struct otg_phy_mux mapping_tmp, *mux_mapping;
404
405 /*crc window can't be null*/
406 if (!crc_window)
407 return false;
408
409 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
410 crc_win = &tmp_win;
411 mux_mapping = &mapping_tmp;
412 /*set crc window*/
413 tmp_win.x_start = crc_window->windowa_x_start;
414 tmp_win.y_start = crc_window->windowa_y_start;
415 tmp_win.x_end = crc_window->windowa_x_end;
416 tmp_win.y_end = crc_window->windowa_y_end;
417
418 for (i = 0; i < MAX_PIPES; i++) {
419 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
420 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
421 break;
422 }
423
424 /* Stream not found */
425 if (i == MAX_PIPES)
426 return false;
427
428
429 /*set mux routing info*/
430 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
431 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
432
433 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
434 } else {
435 DC_LOG_DC("dmcu is not initialized");
436 return false;
437 }
438
439 return true;
440 }
441
dc_stream_stop_dmcu_crc_win_update(struct dc * dc,struct dc_stream_state * stream)442 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
443 {
444 int i;
445 struct dmcu *dmcu = dc->res_pool->dmcu;
446 struct pipe_ctx *pipe;
447 struct otg_phy_mux mapping_tmp, *mux_mapping;
448
449 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
450 mux_mapping = &mapping_tmp;
451
452 for (i = 0; i < MAX_PIPES; i++) {
453 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
454 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
455 break;
456 }
457
458 /* Stream not found */
459 if (i == MAX_PIPES)
460 return false;
461
462
463 /*set mux routing info*/
464 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
465 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
466
467 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
468 } else {
469 DC_LOG_DC("dmcu is not initialized");
470 return false;
471 }
472
473 return true;
474 }
475 #endif
476
477 /**
478 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
479 * @dc: DC Object
480 * @stream: The stream to configure CRC on.
481 * @enable: Enable CRC if true, disable otherwise.
482 * @crc_window: CRC window (x/y start/end) information
483 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
484 * once.
485 *
486 * By default, only CRC0 is configured, and the entire frame is used to
487 * calculate the crc.
488 */
dc_stream_configure_crc(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window,bool enable,bool continuous)489 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
490 struct crc_params *crc_window, bool enable, bool continuous)
491 {
492 int i;
493 struct pipe_ctx *pipe;
494 struct crc_params param;
495 struct timing_generator *tg;
496
497 for (i = 0; i < MAX_PIPES; i++) {
498 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
499 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
500 break;
501 }
502 /* Stream not found */
503 if (i == MAX_PIPES)
504 return false;
505
506 /* By default, capture the full frame */
507 param.windowa_x_start = 0;
508 param.windowa_y_start = 0;
509 param.windowa_x_end = pipe->stream->timing.h_addressable;
510 param.windowa_y_end = pipe->stream->timing.v_addressable;
511 param.windowb_x_start = 0;
512 param.windowb_y_start = 0;
513 param.windowb_x_end = pipe->stream->timing.h_addressable;
514 param.windowb_y_end = pipe->stream->timing.v_addressable;
515
516 if (crc_window) {
517 param.windowa_x_start = crc_window->windowa_x_start;
518 param.windowa_y_start = crc_window->windowa_y_start;
519 param.windowa_x_end = crc_window->windowa_x_end;
520 param.windowa_y_end = crc_window->windowa_y_end;
521 param.windowb_x_start = crc_window->windowb_x_start;
522 param.windowb_y_start = crc_window->windowb_y_start;
523 param.windowb_x_end = crc_window->windowb_x_end;
524 param.windowb_y_end = crc_window->windowb_y_end;
525 }
526
527 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
528 param.odm_mode = pipe->next_odm_pipe ? 1:0;
529
530 /* Default to the union of both windows */
531 param.selection = UNION_WINDOW_A_B;
532 param.continuous_mode = continuous;
533 param.enable = enable;
534
535 tg = pipe->stream_res.tg;
536
537 /* Only call if supported */
538 if (tg->funcs->configure_crc)
539 return tg->funcs->configure_crc(tg, ¶m);
540 DC_LOG_WARNING("CRC capture not supported.");
541 return false;
542 }
543
544 /**
545 * dc_stream_get_crc() - Get CRC values for the given stream.
546 * @dc: DC object
547 * @stream: The DC stream state of the stream to get CRCs from.
548 * @r_cr: CRC value for the first of the 3 channels stored here.
549 * @g_y: CRC value for the second of the 3 channels stored here.
550 * @b_cb: CRC value for the third of the 3 channels stored here.
551 *
552 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
553 * Return false if stream is not found, or if CRCs are not enabled.
554 */
dc_stream_get_crc(struct dc * dc,struct dc_stream_state * stream,uint32_t * r_cr,uint32_t * g_y,uint32_t * b_cb)555 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
556 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
557 {
558 int i;
559 struct pipe_ctx *pipe;
560 struct timing_generator *tg;
561
562 for (i = 0; i < MAX_PIPES; i++) {
563 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
564 if (pipe->stream == stream)
565 break;
566 }
567 /* Stream not found */
568 if (i == MAX_PIPES)
569 return false;
570
571 tg = pipe->stream_res.tg;
572
573 if (tg->funcs->get_crc)
574 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
575 DC_LOG_WARNING("CRC capture not supported.");
576 return false;
577 }
578
dc_stream_set_dyn_expansion(struct dc * dc,struct dc_stream_state * stream,enum dc_dynamic_expansion option)579 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
580 enum dc_dynamic_expansion option)
581 {
582 /* OPP FMT dyn expansion updates*/
583 int i;
584 struct pipe_ctx *pipe_ctx;
585
586 for (i = 0; i < MAX_PIPES; i++) {
587 if (dc->current_state->res_ctx.pipe_ctx[i].stream
588 == stream) {
589 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
590 pipe_ctx->stream_res.opp->dyn_expansion = option;
591 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
592 pipe_ctx->stream_res.opp,
593 COLOR_SPACE_YCBCR601,
594 stream->timing.display_color_depth,
595 stream->signal);
596 }
597 }
598 }
599
dc_stream_set_dither_option(struct dc_stream_state * stream,enum dc_dither_option option)600 void dc_stream_set_dither_option(struct dc_stream_state *stream,
601 enum dc_dither_option option)
602 {
603 struct bit_depth_reduction_params params;
604 struct dc_link *link = stream->link;
605 struct pipe_ctx *pipes = NULL;
606 int i;
607
608 for (i = 0; i < MAX_PIPES; i++) {
609 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
610 stream) {
611 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
612 break;
613 }
614 }
615
616 if (!pipes)
617 return;
618 if (option > DITHER_OPTION_MAX)
619 return;
620
621 stream->dither_option = option;
622
623 memset(¶ms, 0, sizeof(params));
624 resource_build_bit_depth_reduction_params(stream, ¶ms);
625 stream->bit_depth_params = params;
626
627 if (pipes->plane_res.xfm &&
628 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
629 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
630 pipes->plane_res.xfm,
631 pipes->plane_res.scl_data.lb_params.depth,
632 &stream->bit_depth_params);
633 }
634
635 pipes->stream_res.opp->funcs->
636 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
637 }
638
dc_stream_set_gamut_remap(struct dc * dc,const struct dc_stream_state * stream)639 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
640 {
641 int i;
642 bool ret = false;
643 struct pipe_ctx *pipes;
644
645 for (i = 0; i < MAX_PIPES; i++) {
646 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
647 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
648 dc->hwss.program_gamut_remap(pipes);
649 ret = true;
650 }
651 }
652
653 return ret;
654 }
655
dc_stream_program_csc_matrix(struct dc * dc,struct dc_stream_state * stream)656 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
657 {
658 int i;
659 bool ret = false;
660 struct pipe_ctx *pipes;
661
662 for (i = 0; i < MAX_PIPES; i++) {
663 if (dc->current_state->res_ctx.pipe_ctx[i].stream
664 == stream) {
665
666 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
667 dc->hwss.program_output_csc(dc,
668 pipes,
669 stream->output_color_space,
670 stream->csc_color_matrix.matrix,
671 pipes->stream_res.opp->inst);
672 ret = true;
673 }
674 }
675
676 return ret;
677 }
678
dc_stream_set_static_screen_params(struct dc * dc,struct dc_stream_state ** streams,int num_streams,const struct dc_static_screen_params * params)679 void dc_stream_set_static_screen_params(struct dc *dc,
680 struct dc_stream_state **streams,
681 int num_streams,
682 const struct dc_static_screen_params *params)
683 {
684 int i, j;
685 struct pipe_ctx *pipes_affected[MAX_PIPES];
686 int num_pipes_affected = 0;
687
688 for (i = 0; i < num_streams; i++) {
689 struct dc_stream_state *stream = streams[i];
690
691 for (j = 0; j < MAX_PIPES; j++) {
692 if (dc->current_state->res_ctx.pipe_ctx[j].stream
693 == stream) {
694 pipes_affected[num_pipes_affected++] =
695 &dc->current_state->res_ctx.pipe_ctx[j];
696 }
697 }
698 }
699
700 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
701 }
702
dc_destruct(struct dc * dc)703 static void dc_destruct(struct dc *dc)
704 {
705 if (dc->current_state) {
706 dc_release_state(dc->current_state);
707 dc->current_state = NULL;
708 }
709
710 destroy_links(dc);
711
712 if (dc->clk_mgr) {
713 dc_destroy_clk_mgr(dc->clk_mgr);
714 dc->clk_mgr = NULL;
715 }
716
717 dc_destroy_resource_pool(dc);
718
719 if (dc->ctx->gpio_service)
720 dal_gpio_service_destroy(&dc->ctx->gpio_service);
721
722 if (dc->ctx->created_bios)
723 dal_bios_parser_destroy(&dc->ctx->dc_bios);
724
725 dc_perf_trace_destroy(&dc->ctx->perf_trace);
726
727 kfree(dc->ctx);
728 dc->ctx = NULL;
729
730 kfree(dc->bw_vbios);
731 dc->bw_vbios = NULL;
732
733 kfree(dc->bw_dceip);
734 dc->bw_dceip = NULL;
735
736 #ifdef CONFIG_DRM_AMD_DC_DCN
737 kfree(dc->dcn_soc);
738 dc->dcn_soc = NULL;
739
740 kfree(dc->dcn_ip);
741 dc->dcn_ip = NULL;
742
743 #endif
744 kfree(dc->vm_helper);
745 dc->vm_helper = NULL;
746
747 }
748
dc_construct_ctx(struct dc * dc,const struct dc_init_data * init_params)749 static bool dc_construct_ctx(struct dc *dc,
750 const struct dc_init_data *init_params)
751 {
752 struct dc_context *dc_ctx;
753 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
754
755 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
756 if (!dc_ctx)
757 return false;
758
759 dc_ctx->cgs_device = init_params->cgs_device;
760 dc_ctx->driver_context = init_params->driver;
761 dc_ctx->dc = dc;
762 dc_ctx->asic_id = init_params->asic_id;
763 dc_ctx->dc_sink_id_count = 0;
764 dc_ctx->dc_stream_id_count = 0;
765 dc_ctx->dce_environment = init_params->dce_environment;
766
767 /* Create logger */
768
769 dc_version = resource_parse_asic_id(init_params->asic_id);
770 dc_ctx->dce_version = dc_version;
771
772 dc_ctx->perf_trace = dc_perf_trace_create();
773 if (!dc_ctx->perf_trace) {
774 kfree(dc_ctx);
775 ASSERT_CRITICAL(false);
776 return false;
777 }
778
779 dc->ctx = dc_ctx;
780
781 return true;
782 }
783
dc_construct(struct dc * dc,const struct dc_init_data * init_params)784 static bool dc_construct(struct dc *dc,
785 const struct dc_init_data *init_params)
786 {
787 struct dc_context *dc_ctx;
788 struct bw_calcs_dceip *dc_dceip;
789 struct bw_calcs_vbios *dc_vbios;
790 #ifdef CONFIG_DRM_AMD_DC_DCN
791 struct dcn_soc_bounding_box *dcn_soc;
792 struct dcn_ip_params *dcn_ip;
793 #endif
794
795 dc->config = init_params->flags;
796
797 // Allocate memory for the vm_helper
798 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
799 if (!dc->vm_helper) {
800 dm_error("%s: failed to create dc->vm_helper\n", __func__);
801 goto fail;
802 }
803
804 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
805
806 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
807 if (!dc_dceip) {
808 dm_error("%s: failed to create dceip\n", __func__);
809 goto fail;
810 }
811
812 dc->bw_dceip = dc_dceip;
813
814 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
815 if (!dc_vbios) {
816 dm_error("%s: failed to create vbios\n", __func__);
817 goto fail;
818 }
819
820 dc->bw_vbios = dc_vbios;
821 #ifdef CONFIG_DRM_AMD_DC_DCN
822 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
823 if (!dcn_soc) {
824 dm_error("%s: failed to create dcn_soc\n", __func__);
825 goto fail;
826 }
827
828 dc->dcn_soc = dcn_soc;
829
830 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
831 if (!dcn_ip) {
832 dm_error("%s: failed to create dcn_ip\n", __func__);
833 goto fail;
834 }
835
836 dc->dcn_ip = dcn_ip;
837 #endif
838
839 if (!dc_construct_ctx(dc, init_params)) {
840 dm_error("%s: failed to create ctx\n", __func__);
841 goto fail;
842 }
843
844 dc_ctx = dc->ctx;
845
846 /* Resource should construct all asic specific resources.
847 * This should be the only place where we need to parse the asic id
848 */
849 if (init_params->vbios_override)
850 dc_ctx->dc_bios = init_params->vbios_override;
851 else {
852 /* Create BIOS parser */
853 struct bp_init_data bp_init_data;
854
855 bp_init_data.ctx = dc_ctx;
856 bp_init_data.bios = init_params->asic_id.atombios_base_address;
857
858 dc_ctx->dc_bios = dal_bios_parser_create(
859 &bp_init_data, dc_ctx->dce_version);
860
861 if (!dc_ctx->dc_bios) {
862 ASSERT_CRITICAL(false);
863 goto fail;
864 }
865
866 dc_ctx->created_bios = true;
867 }
868
869 dc->vendor_signature = init_params->vendor_signature;
870
871 /* Create GPIO service */
872 dc_ctx->gpio_service = dal_gpio_service_create(
873 dc_ctx->dce_version,
874 dc_ctx->dce_environment,
875 dc_ctx);
876
877 if (!dc_ctx->gpio_service) {
878 ASSERT_CRITICAL(false);
879 goto fail;
880 }
881
882 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
883 if (!dc->res_pool)
884 goto fail;
885
886 /* set i2c speed if not done by the respective dcnxxx__resource.c */
887 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
888 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
889 if (dc->caps.max_optimizable_video_width == 0)
890 dc->caps.max_optimizable_video_width = 5120;
891 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
892 if (!dc->clk_mgr)
893 goto fail;
894 #ifdef CONFIG_DRM_AMD_DC_DCN
895 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
896
897 if (dc->res_pool->funcs->update_bw_bounding_box) {
898 DC_FP_START();
899 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
900 DC_FP_END();
901 }
902 #endif
903
904 /* Creation of current_state must occur after dc->dml
905 * is initialized in dc_create_resource_pool because
906 * on creation it copies the contents of dc->dml
907 */
908
909 dc->current_state = dc_create_state(dc);
910
911 if (!dc->current_state) {
912 dm_error("%s: failed to create validate ctx\n", __func__);
913 goto fail;
914 }
915
916 dc_resource_state_construct(dc, dc->current_state);
917
918 if (!create_links(dc, init_params->num_virtual_links))
919 goto fail;
920
921 /* Initialise DIG link encoder resource tracking variables. */
922 link_enc_cfg_init(dc, dc->current_state);
923
924 return true;
925
926 fail:
927 return false;
928 }
929
disable_all_writeback_pipes_for_stream(const struct dc * dc,struct dc_stream_state * stream,struct dc_state * context)930 static void disable_all_writeback_pipes_for_stream(
931 const struct dc *dc,
932 struct dc_stream_state *stream,
933 struct dc_state *context)
934 {
935 int i;
936
937 for (i = 0; i < stream->num_wb_info; i++)
938 stream->writeback_info[i].wb_enabled = false;
939 }
940
apply_ctx_interdependent_lock(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,bool lock)941 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
942 struct dc_stream_state *stream, bool lock)
943 {
944 int i;
945
946 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
947 if (dc->hwss.interdependent_update_lock)
948 dc->hwss.interdependent_update_lock(dc, context, lock);
949 else {
950 for (i = 0; i < dc->res_pool->pipe_count; i++) {
951 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
952 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
953
954 // Copied conditions that were previously in dce110_apply_ctx_for_surface
955 if (stream == pipe_ctx->stream) {
956 if (!pipe_ctx->top_pipe &&
957 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
958 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
959 }
960 }
961 }
962 }
963
disable_dangling_plane(struct dc * dc,struct dc_state * context)964 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
965 {
966 int i, j;
967 struct dc_state *dangling_context = dc_create_state(dc);
968 struct dc_state *current_ctx;
969
970 if (dangling_context == NULL)
971 return;
972
973 dc_resource_state_copy_construct(dc->current_state, dangling_context);
974
975 for (i = 0; i < dc->res_pool->pipe_count; i++) {
976 struct dc_stream_state *old_stream =
977 dc->current_state->res_ctx.pipe_ctx[i].stream;
978 bool should_disable = true;
979
980 for (j = 0; j < context->stream_count; j++) {
981 if (old_stream == context->streams[j]) {
982 should_disable = false;
983 break;
984 }
985 }
986 if (should_disable && old_stream) {
987 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
988 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
989
990 if (dc->hwss.apply_ctx_for_surface) {
991 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
992 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
993 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
994 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
995 }
996 if (dc->hwss.program_front_end_for_ctx) {
997 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
998 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
999 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1000 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1001 }
1002 }
1003 }
1004
1005 current_ctx = dc->current_state;
1006 dc->current_state = dangling_context;
1007 dc_release_state(current_ctx);
1008 }
1009
disable_vbios_mode_if_required(struct dc * dc,struct dc_state * context)1010 static void disable_vbios_mode_if_required(
1011 struct dc *dc,
1012 struct dc_state *context)
1013 {
1014 unsigned int i, j;
1015
1016 /* check if timing_changed, disable stream*/
1017 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1018 struct dc_stream_state *stream = NULL;
1019 struct dc_link *link = NULL;
1020 struct pipe_ctx *pipe = NULL;
1021
1022 pipe = &context->res_ctx.pipe_ctx[i];
1023 stream = pipe->stream;
1024 if (stream == NULL)
1025 continue;
1026
1027 if (stream->apply_seamless_boot_optimization)
1028 continue;
1029
1030 // only looking for first odm pipe
1031 if (pipe->prev_odm_pipe)
1032 continue;
1033
1034 if (stream->link->local_sink &&
1035 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1036 link = stream->link;
1037 }
1038
1039 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1040 unsigned int enc_inst, tg_inst = 0;
1041 unsigned int pix_clk_100hz;
1042
1043 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1044 if (enc_inst != ENGINE_ID_UNKNOWN) {
1045 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1046 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1047 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1048 dc->res_pool->stream_enc[j]);
1049 break;
1050 }
1051 }
1052
1053 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1054 dc->res_pool->dp_clock_source,
1055 tg_inst, &pix_clk_100hz);
1056
1057 if (link->link_status.link_active) {
1058 uint32_t requested_pix_clk_100hz =
1059 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1060
1061 if (pix_clk_100hz != requested_pix_clk_100hz) {
1062 core_link_disable_stream(pipe);
1063 pipe->stream->dpms_off = false;
1064 }
1065 }
1066 }
1067 }
1068 }
1069 }
1070
wait_for_no_pipes_pending(struct dc * dc,struct dc_state * context)1071 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1072 {
1073 int i;
1074 PERF_TRACE();
1075 for (i = 0; i < MAX_PIPES; i++) {
1076 int count = 0;
1077 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1078
1079 if (!pipe->plane_state)
1080 continue;
1081
1082 /* Timeout 100 ms */
1083 while (count < 100000) {
1084 /* Must set to false to start with, due to OR in update function */
1085 pipe->plane_state->status.is_flip_pending = false;
1086 dc->hwss.update_pending_status(pipe);
1087 if (!pipe->plane_state->status.is_flip_pending)
1088 break;
1089 udelay(1);
1090 count++;
1091 }
1092 ASSERT(!pipe->plane_state->status.is_flip_pending);
1093 }
1094 PERF_TRACE();
1095 }
1096
1097 /*******************************************************************************
1098 * Public functions
1099 ******************************************************************************/
1100
dc_create(const struct dc_init_data * init_params)1101 struct dc *dc_create(const struct dc_init_data *init_params)
1102 {
1103 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1104 unsigned int full_pipe_count;
1105
1106 if (!dc)
1107 return NULL;
1108
1109 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1110 if (!dc_construct_ctx(dc, init_params))
1111 goto destruct_dc;
1112 } else {
1113 if (!dc_construct(dc, init_params))
1114 goto destruct_dc;
1115
1116 full_pipe_count = dc->res_pool->pipe_count;
1117 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1118 full_pipe_count--;
1119 dc->caps.max_streams = min(
1120 full_pipe_count,
1121 dc->res_pool->stream_enc_count);
1122
1123 dc->caps.max_links = dc->link_count;
1124 dc->caps.max_audios = dc->res_pool->audio_count;
1125 dc->caps.linear_pitch_alignment = 64;
1126
1127 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1128
1129 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1130
1131 if (dc->res_pool->dmcu != NULL)
1132 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1133 }
1134
1135 /* Populate versioning information */
1136 dc->versions.dc_ver = DC_VER;
1137
1138 dc->build_id = DC_BUILD_ID;
1139
1140 DC_LOG_DC("Display Core initialized\n");
1141
1142
1143
1144 return dc;
1145
1146 destruct_dc:
1147 dc_destruct(dc);
1148 kfree(dc);
1149 return NULL;
1150 }
1151
detect_edp_presence(struct dc * dc)1152 static void detect_edp_presence(struct dc *dc)
1153 {
1154 struct dc_link *edp_links[MAX_NUM_EDP];
1155 struct dc_link *edp_link = NULL;
1156 enum dc_connection_type type;
1157 int i;
1158 int edp_num;
1159
1160 get_edp_links(dc, edp_links, &edp_num);
1161 if (!edp_num)
1162 return;
1163
1164 for (i = 0; i < edp_num; i++) {
1165 edp_link = edp_links[i];
1166 if (dc->config.edp_not_connected) {
1167 edp_link->edp_sink_present = false;
1168 } else {
1169 dc_link_detect_sink(edp_link, &type);
1170 edp_link->edp_sink_present = (type != dc_connection_none);
1171 }
1172 }
1173 }
1174
dc_hardware_init(struct dc * dc)1175 void dc_hardware_init(struct dc *dc)
1176 {
1177
1178 detect_edp_presence(dc);
1179 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1180 dc->hwss.init_hw(dc);
1181 }
1182
dc_init_callbacks(struct dc * dc,const struct dc_callback_init * init_params)1183 void dc_init_callbacks(struct dc *dc,
1184 const struct dc_callback_init *init_params)
1185 {
1186 #ifdef CONFIG_DRM_AMD_DC_HDCP
1187 dc->ctx->cp_psp = init_params->cp_psp;
1188 #endif
1189 }
1190
dc_deinit_callbacks(struct dc * dc)1191 void dc_deinit_callbacks(struct dc *dc)
1192 {
1193 #ifdef CONFIG_DRM_AMD_DC_HDCP
1194 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1195 #endif
1196 }
1197
dc_destroy(struct dc ** dc)1198 void dc_destroy(struct dc **dc)
1199 {
1200 dc_destruct(*dc);
1201 kfree(*dc);
1202 *dc = NULL;
1203 }
1204
enable_timing_multisync(struct dc * dc,struct dc_state * ctx)1205 static void enable_timing_multisync(
1206 struct dc *dc,
1207 struct dc_state *ctx)
1208 {
1209 int i, multisync_count = 0;
1210 int pipe_count = dc->res_pool->pipe_count;
1211 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1212
1213 for (i = 0; i < pipe_count; i++) {
1214 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1215 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1216 continue;
1217 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1218 continue;
1219 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1220 multisync_count++;
1221 }
1222
1223 if (multisync_count > 0) {
1224 dc->hwss.enable_per_frame_crtc_position_reset(
1225 dc, multisync_count, multisync_pipes);
1226 }
1227 }
1228
program_timing_sync(struct dc * dc,struct dc_state * ctx)1229 static void program_timing_sync(
1230 struct dc *dc,
1231 struct dc_state *ctx)
1232 {
1233 int i, j, k;
1234 int group_index = 0;
1235 int num_group = 0;
1236 int pipe_count = dc->res_pool->pipe_count;
1237 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1238
1239 for (i = 0; i < pipe_count; i++) {
1240 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
1241 continue;
1242
1243 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1244 }
1245
1246 for (i = 0; i < pipe_count; i++) {
1247 int group_size = 1;
1248 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1249 struct pipe_ctx *pipe_set[MAX_PIPES];
1250
1251 if (!unsynced_pipes[i])
1252 continue;
1253
1254 pipe_set[0] = unsynced_pipes[i];
1255 unsynced_pipes[i] = NULL;
1256
1257 /* Add tg to the set, search rest of the tg's for ones with
1258 * same timing, add all tgs with same timing to the group
1259 */
1260 for (j = i + 1; j < pipe_count; j++) {
1261 if (!unsynced_pipes[j])
1262 continue;
1263 if (sync_type != TIMING_SYNCHRONIZABLE &&
1264 dc->hwss.enable_vblanks_synchronization &&
1265 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1266 resource_are_vblanks_synchronizable(
1267 unsynced_pipes[j]->stream,
1268 pipe_set[0]->stream)) {
1269 sync_type = VBLANK_SYNCHRONIZABLE;
1270 pipe_set[group_size] = unsynced_pipes[j];
1271 unsynced_pipes[j] = NULL;
1272 group_size++;
1273 } else
1274 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1275 resource_are_streams_timing_synchronizable(
1276 unsynced_pipes[j]->stream,
1277 pipe_set[0]->stream)) {
1278 sync_type = TIMING_SYNCHRONIZABLE;
1279 pipe_set[group_size] = unsynced_pipes[j];
1280 unsynced_pipes[j] = NULL;
1281 group_size++;
1282 }
1283 }
1284
1285 /* set first unblanked pipe as master */
1286 for (j = 0; j < group_size; j++) {
1287 bool is_blanked;
1288
1289 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1290 is_blanked =
1291 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1292 else
1293 is_blanked =
1294 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1295 if (!is_blanked) {
1296 if (j == 0)
1297 break;
1298
1299 swap(pipe_set[0], pipe_set[j]);
1300 break;
1301 }
1302 }
1303
1304 for (k = 0; k < group_size; k++) {
1305 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1306
1307 status->timing_sync_info.group_id = num_group;
1308 status->timing_sync_info.group_size = group_size;
1309 if (k == 0)
1310 status->timing_sync_info.master = true;
1311 else
1312 status->timing_sync_info.master = false;
1313
1314 }
1315 /* remove any other unblanked pipes as they have already been synced */
1316 for (j = j + 1; j < group_size; j++) {
1317 bool is_blanked;
1318
1319 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1320 is_blanked =
1321 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1322 else
1323 is_blanked =
1324 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1325 if (!is_blanked) {
1326 group_size--;
1327 pipe_set[j] = pipe_set[group_size];
1328 j--;
1329 }
1330 }
1331
1332 if (group_size > 1) {
1333 if (sync_type == TIMING_SYNCHRONIZABLE) {
1334 dc->hwss.enable_timing_synchronization(
1335 dc, group_index, group_size, pipe_set);
1336 } else
1337 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1338 dc->hwss.enable_vblanks_synchronization(
1339 dc, group_index, group_size, pipe_set);
1340 }
1341 group_index++;
1342 }
1343 num_group++;
1344 }
1345 }
1346
context_changed(struct dc * dc,struct dc_state * context)1347 static bool context_changed(
1348 struct dc *dc,
1349 struct dc_state *context)
1350 {
1351 uint8_t i;
1352
1353 if (context->stream_count != dc->current_state->stream_count)
1354 return true;
1355
1356 for (i = 0; i < dc->current_state->stream_count; i++) {
1357 if (dc->current_state->streams[i] != context->streams[i])
1358 return true;
1359 }
1360
1361 return false;
1362 }
1363
dc_validate_seamless_boot_timing(const struct dc * dc,const struct dc_sink * sink,struct dc_crtc_timing * crtc_timing)1364 bool dc_validate_seamless_boot_timing(const struct dc *dc,
1365 const struct dc_sink *sink,
1366 struct dc_crtc_timing *crtc_timing)
1367 {
1368 struct timing_generator *tg;
1369 struct stream_encoder *se = NULL;
1370
1371 struct dc_crtc_timing hw_crtc_timing = {0};
1372
1373 struct dc_link *link = sink->link;
1374 unsigned int i, enc_inst, tg_inst = 0;
1375
1376 /* Support seamless boot on EDP displays only */
1377 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1378 return false;
1379 }
1380
1381 /* Check for enabled DIG to identify enabled display */
1382 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1383 return false;
1384
1385 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1386
1387 if (enc_inst == ENGINE_ID_UNKNOWN)
1388 return false;
1389
1390 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1391 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1392
1393 se = dc->res_pool->stream_enc[i];
1394
1395 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1396 dc->res_pool->stream_enc[i]);
1397 break;
1398 }
1399 }
1400
1401 // tg_inst not found
1402 if (i == dc->res_pool->stream_enc_count)
1403 return false;
1404
1405 if (tg_inst >= dc->res_pool->timing_generator_count)
1406 return false;
1407
1408 tg = dc->res_pool->timing_generators[tg_inst];
1409
1410 if (!tg->funcs->get_hw_timing)
1411 return false;
1412
1413 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1414 return false;
1415
1416 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1417 return false;
1418
1419 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1420 return false;
1421
1422 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1423 return false;
1424
1425 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1426 return false;
1427
1428 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1429 return false;
1430
1431 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1432 return false;
1433
1434 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1435 return false;
1436
1437 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1438 return false;
1439
1440 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1441 return false;
1442
1443 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1444 return false;
1445
1446 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1447 return false;
1448
1449 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1450 return false;
1451
1452 /* block DSC for now, as VBIOS does not currently support DSC timings */
1453 if (crtc_timing->flags.DSC)
1454 return false;
1455
1456 if (dc_is_dp_signal(link->connector_signal)) {
1457 unsigned int pix_clk_100hz;
1458
1459 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1460 dc->res_pool->dp_clock_source,
1461 tg_inst, &pix_clk_100hz);
1462
1463 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1464 return false;
1465
1466 if (!se->funcs->dp_get_pixel_format)
1467 return false;
1468
1469 if (!se->funcs->dp_get_pixel_format(
1470 se,
1471 &hw_crtc_timing.pixel_encoding,
1472 &hw_crtc_timing.display_color_depth))
1473 return false;
1474
1475 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1476 return false;
1477
1478 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1479 return false;
1480 }
1481
1482 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1483 return false;
1484 }
1485
1486 if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1487 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1488 return false;
1489 }
1490
1491 return true;
1492 }
1493
should_update_pipe_for_stream(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_stream_state * stream)1494 static inline bool should_update_pipe_for_stream(
1495 struct dc_state *context,
1496 struct pipe_ctx *pipe_ctx,
1497 struct dc_stream_state *stream)
1498 {
1499 return (pipe_ctx->stream && pipe_ctx->stream == stream);
1500 }
1501
should_update_pipe_for_plane(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_plane_state * plane_state)1502 static inline bool should_update_pipe_for_plane(
1503 struct dc_state *context,
1504 struct pipe_ctx *pipe_ctx,
1505 struct dc_plane_state *plane_state)
1506 {
1507 return (pipe_ctx->plane_state == plane_state);
1508 }
1509
dc_enable_stereo(struct dc * dc,struct dc_state * context,struct dc_stream_state * streams[],uint8_t stream_count)1510 void dc_enable_stereo(
1511 struct dc *dc,
1512 struct dc_state *context,
1513 struct dc_stream_state *streams[],
1514 uint8_t stream_count)
1515 {
1516 int i, j;
1517 struct pipe_ctx *pipe;
1518
1519 for (i = 0; i < MAX_PIPES; i++) {
1520 if (context != NULL) {
1521 pipe = &context->res_ctx.pipe_ctx[i];
1522 } else {
1523 context = dc->current_state;
1524 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1525 }
1526
1527 for (j = 0; pipe && j < stream_count; j++) {
1528 if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1529 dc->hwss.setup_stereo)
1530 dc->hwss.setup_stereo(pipe, dc);
1531 }
1532 }
1533 }
1534
dc_trigger_sync(struct dc * dc,struct dc_state * context)1535 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1536 {
1537 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1538 enable_timing_multisync(dc, context);
1539 program_timing_sync(dc, context);
1540 }
1541 }
1542
get_stream_mask(struct dc * dc,struct dc_state * context)1543 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1544 {
1545 int i;
1546 unsigned int stream_mask = 0;
1547
1548 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1549 if (context->res_ctx.pipe_ctx[i].stream)
1550 stream_mask |= 1 << i;
1551 }
1552
1553 return stream_mask;
1554 }
1555
1556 #if defined(CONFIG_DRM_AMD_DC_DCN)
dc_z10_restore(struct dc * dc)1557 void dc_z10_restore(struct dc *dc)
1558 {
1559 if (dc->hwss.z10_restore)
1560 dc->hwss.z10_restore(dc);
1561 }
1562
dc_z10_save_init(struct dc * dc)1563 void dc_z10_save_init(struct dc *dc)
1564 {
1565 if (dc->hwss.z10_save_init)
1566 dc->hwss.z10_save_init(dc);
1567 }
1568 #endif
1569 /*
1570 * Applies given context to HW and copy it into current context.
1571 * It's up to the user to release the src context afterwards.
1572 */
dc_commit_state_no_check(struct dc * dc,struct dc_state * context)1573 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1574 {
1575 struct dc_bios *dcb = dc->ctx->dc_bios;
1576 enum dc_status result = DC_ERROR_UNEXPECTED;
1577 struct pipe_ctx *pipe;
1578 int i, k, l;
1579 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1580
1581 #if defined(CONFIG_DRM_AMD_DC_DCN)
1582 dc_z10_restore(dc);
1583 dc_allow_idle_optimizations(dc, false);
1584 #endif
1585
1586 for (i = 0; i < context->stream_count; i++)
1587 dc_streams[i] = context->streams[i];
1588
1589 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1590 disable_vbios_mode_if_required(dc, context);
1591 dc->hwss.enable_accelerated_mode(dc, context);
1592 }
1593
1594 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1595 context->stream_count == 0)
1596 dc->hwss.prepare_bandwidth(dc, context);
1597
1598 disable_dangling_plane(dc, context);
1599 /* re-program planes for existing stream, in case we need to
1600 * free up plane resource for later use
1601 */
1602 if (dc->hwss.apply_ctx_for_surface) {
1603 for (i = 0; i < context->stream_count; i++) {
1604 if (context->streams[i]->mode_changed)
1605 continue;
1606 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1607 dc->hwss.apply_ctx_for_surface(
1608 dc, context->streams[i],
1609 context->stream_status[i].plane_count,
1610 context); /* use new pipe config in new context */
1611 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1612 dc->hwss.post_unlock_program_front_end(dc, context);
1613 }
1614 }
1615
1616 /* Program hardware */
1617 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1618 pipe = &context->res_ctx.pipe_ctx[i];
1619 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1620 }
1621
1622 result = dc->hwss.apply_ctx_to_hw(dc, context);
1623
1624 if (result != DC_OK)
1625 return result;
1626
1627 dc_trigger_sync(dc, context);
1628
1629 /* Program all planes within new context*/
1630 if (dc->hwss.program_front_end_for_ctx) {
1631 dc->hwss.interdependent_update_lock(dc, context, true);
1632 dc->hwss.program_front_end_for_ctx(dc, context);
1633 dc->hwss.interdependent_update_lock(dc, context, false);
1634 dc->hwss.post_unlock_program_front_end(dc, context);
1635 }
1636 for (i = 0; i < context->stream_count; i++) {
1637 const struct dc_link *link = context->streams[i]->link;
1638
1639 if (!context->streams[i]->mode_changed)
1640 continue;
1641
1642 if (dc->hwss.apply_ctx_for_surface) {
1643 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1644 dc->hwss.apply_ctx_for_surface(
1645 dc, context->streams[i],
1646 context->stream_status[i].plane_count,
1647 context);
1648 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1649 dc->hwss.post_unlock_program_front_end(dc, context);
1650 }
1651
1652 /*
1653 * enable stereo
1654 * TODO rework dc_enable_stereo call to work with validation sets?
1655 */
1656 for (k = 0; k < MAX_PIPES; k++) {
1657 pipe = &context->res_ctx.pipe_ctx[k];
1658
1659 for (l = 0 ; pipe && l < context->stream_count; l++) {
1660 if (context->streams[l] &&
1661 context->streams[l] == pipe->stream &&
1662 dc->hwss.setup_stereo)
1663 dc->hwss.setup_stereo(pipe, dc);
1664 }
1665 }
1666
1667 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1668 context->streams[i]->timing.h_addressable,
1669 context->streams[i]->timing.v_addressable,
1670 context->streams[i]->timing.h_total,
1671 context->streams[i]->timing.v_total,
1672 context->streams[i]->timing.pix_clk_100hz / 10);
1673 }
1674
1675 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1676
1677 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1678 context->stream_count == 0) {
1679 /* Must wait for no flips to be pending before doing optimize bw */
1680 wait_for_no_pipes_pending(dc, context);
1681 /* pplib is notified if disp_num changed */
1682 dc->hwss.optimize_bandwidth(dc, context);
1683 /* Need to do otg sync again as otg could be out of sync due to otg
1684 * workaround applied during clock update
1685 */
1686 dc_trigger_sync(dc, context);
1687 }
1688
1689 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1690 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1691 else
1692 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1693
1694 context->stream_mask = get_stream_mask(dc, context);
1695
1696 if (context->stream_mask != dc->current_state->stream_mask)
1697 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1698
1699 for (i = 0; i < context->stream_count; i++)
1700 context->streams[i]->mode_changed = false;
1701
1702 dc_release_state(dc->current_state);
1703
1704 dc->current_state = context;
1705
1706 dc_retain_state(dc->current_state);
1707
1708 return result;
1709 }
1710
dc_commit_state(struct dc * dc,struct dc_state * context)1711 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1712 {
1713 enum dc_status result = DC_ERROR_UNEXPECTED;
1714 int i;
1715
1716 if (!context_changed(dc, context))
1717 return DC_OK;
1718
1719 DC_LOG_DC("%s: %d streams\n",
1720 __func__, context->stream_count);
1721
1722 for (i = 0; i < context->stream_count; i++) {
1723 struct dc_stream_state *stream = context->streams[i];
1724
1725 dc_stream_log(dc, stream);
1726 }
1727
1728 result = dc_commit_state_no_check(dc, context);
1729
1730 return (result == DC_OK);
1731 }
1732
1733 #if defined(CONFIG_DRM_AMD_DC_DCN)
dc_acquire_release_mpc_3dlut(struct dc * dc,bool acquire,struct dc_stream_state * stream,struct dc_3dlut ** lut,struct dc_transfer_func ** shaper)1734 bool dc_acquire_release_mpc_3dlut(
1735 struct dc *dc, bool acquire,
1736 struct dc_stream_state *stream,
1737 struct dc_3dlut **lut,
1738 struct dc_transfer_func **shaper)
1739 {
1740 int pipe_idx;
1741 bool ret = false;
1742 bool found_pipe_idx = false;
1743 const struct resource_pool *pool = dc->res_pool;
1744 struct resource_context *res_ctx = &dc->current_state->res_ctx;
1745 int mpcc_id = 0;
1746
1747 if (pool && res_ctx) {
1748 if (acquire) {
1749 /*find pipe idx for the given stream*/
1750 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1751 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1752 found_pipe_idx = true;
1753 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1754 break;
1755 }
1756 }
1757 } else
1758 found_pipe_idx = true;/*for release pipe_idx is not required*/
1759
1760 if (found_pipe_idx) {
1761 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1762 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1763 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1764 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1765 }
1766 }
1767 return ret;
1768 }
1769 #endif
is_flip_pending_in_pipes(struct dc * dc,struct dc_state * context)1770 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1771 {
1772 int i;
1773 struct pipe_ctx *pipe;
1774
1775 for (i = 0; i < MAX_PIPES; i++) {
1776 pipe = &context->res_ctx.pipe_ctx[i];
1777
1778 if (!pipe->plane_state)
1779 continue;
1780
1781 /* Must set to false to start with, due to OR in update function */
1782 pipe->plane_state->status.is_flip_pending = false;
1783 dc->hwss.update_pending_status(pipe);
1784 if (pipe->plane_state->status.is_flip_pending)
1785 return true;
1786 }
1787 return false;
1788 }
1789
dc_post_update_surfaces_to_stream(struct dc * dc)1790 void dc_post_update_surfaces_to_stream(struct dc *dc)
1791 {
1792 int i;
1793 struct dc_state *context = dc->current_state;
1794
1795 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1796 return;
1797
1798 post_surface_trace(dc);
1799
1800 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1801 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1802 else
1803 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1804
1805 if (is_flip_pending_in_pipes(dc, context))
1806 return;
1807
1808 for (i = 0; i < dc->res_pool->pipe_count; i++)
1809 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1810 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1811 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1812 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1813 }
1814
1815 dc->hwss.optimize_bandwidth(dc, context);
1816
1817 dc->optimized_required = false;
1818 dc->wm_optimized_required = false;
1819 }
1820
init_state(struct dc * dc,struct dc_state * context)1821 static void init_state(struct dc *dc, struct dc_state *context)
1822 {
1823 /* Each context must have their own instance of VBA and in order to
1824 * initialize and obtain IP and SOC the base DML instance from DC is
1825 * initially copied into every context
1826 */
1827 #ifdef CONFIG_DRM_AMD_DC_DCN
1828 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1829 #endif
1830 }
1831
dc_create_state(struct dc * dc)1832 struct dc_state *dc_create_state(struct dc *dc)
1833 {
1834 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1835 GFP_KERNEL);
1836
1837 if (!context)
1838 return NULL;
1839
1840 init_state(dc, context);
1841
1842 kref_init(&context->refcount);
1843
1844 return context;
1845 }
1846
dc_copy_state(struct dc_state * src_ctx)1847 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1848 {
1849 int i, j;
1850 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1851
1852 if (!new_ctx)
1853 return NULL;
1854 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1855
1856 for (i = 0; i < MAX_PIPES; i++) {
1857 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1858
1859 if (cur_pipe->top_pipe)
1860 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1861
1862 if (cur_pipe->bottom_pipe)
1863 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1864
1865 if (cur_pipe->prev_odm_pipe)
1866 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1867
1868 if (cur_pipe->next_odm_pipe)
1869 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1870
1871 }
1872
1873 for (i = 0; i < new_ctx->stream_count; i++) {
1874 dc_stream_retain(new_ctx->streams[i]);
1875 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
1876 dc_plane_state_retain(
1877 new_ctx->stream_status[i].plane_states[j]);
1878 }
1879
1880 kref_init(&new_ctx->refcount);
1881
1882 return new_ctx;
1883 }
1884
dc_retain_state(struct dc_state * context)1885 void dc_retain_state(struct dc_state *context)
1886 {
1887 kref_get(&context->refcount);
1888 }
1889
dc_state_free(struct kref * kref)1890 static void dc_state_free(struct kref *kref)
1891 {
1892 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1893 dc_resource_state_destruct(context);
1894 kvfree(context);
1895 }
1896
dc_release_state(struct dc_state * context)1897 void dc_release_state(struct dc_state *context)
1898 {
1899 kref_put(&context->refcount, dc_state_free);
1900 }
1901
dc_set_generic_gpio_for_stereo(bool enable,struct gpio_service * gpio_service)1902 bool dc_set_generic_gpio_for_stereo(bool enable,
1903 struct gpio_service *gpio_service)
1904 {
1905 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
1906 struct gpio_pin_info pin_info;
1907 struct gpio *generic;
1908 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
1909 GFP_KERNEL);
1910
1911 if (!config)
1912 return false;
1913 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
1914
1915 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
1916 kfree(config);
1917 return false;
1918 } else {
1919 generic = dal_gpio_service_create_generic_mux(
1920 gpio_service,
1921 pin_info.offset,
1922 pin_info.mask);
1923 }
1924
1925 if (!generic) {
1926 kfree(config);
1927 return false;
1928 }
1929
1930 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
1931
1932 config->enable_output_from_mux = enable;
1933 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
1934
1935 if (gpio_result == GPIO_RESULT_OK)
1936 gpio_result = dal_mux_setup_config(generic, config);
1937
1938 if (gpio_result == GPIO_RESULT_OK) {
1939 dal_gpio_close(generic);
1940 dal_gpio_destroy_generic_mux(&generic);
1941 kfree(config);
1942 return true;
1943 } else {
1944 dal_gpio_close(generic);
1945 dal_gpio_destroy_generic_mux(&generic);
1946 kfree(config);
1947 return false;
1948 }
1949 }
1950
is_surface_in_context(const struct dc_state * context,const struct dc_plane_state * plane_state)1951 static bool is_surface_in_context(
1952 const struct dc_state *context,
1953 const struct dc_plane_state *plane_state)
1954 {
1955 int j;
1956
1957 for (j = 0; j < MAX_PIPES; j++) {
1958 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1959
1960 if (plane_state == pipe_ctx->plane_state) {
1961 return true;
1962 }
1963 }
1964
1965 return false;
1966 }
1967
get_plane_info_update_type(const struct dc_surface_update * u)1968 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1969 {
1970 union surface_update_flags *update_flags = &u->surface->update_flags;
1971 enum surface_update_type update_type = UPDATE_TYPE_FAST;
1972
1973 if (!u->plane_info)
1974 return UPDATE_TYPE_FAST;
1975
1976 if (u->plane_info->color_space != u->surface->color_space) {
1977 update_flags->bits.color_space_change = 1;
1978 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1979 }
1980
1981 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
1982 update_flags->bits.horizontal_mirror_change = 1;
1983 elevate_update_type(&update_type, UPDATE_TYPE_MED);
1984 }
1985
1986 if (u->plane_info->rotation != u->surface->rotation) {
1987 update_flags->bits.rotation_change = 1;
1988 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1989 }
1990
1991 if (u->plane_info->format != u->surface->format) {
1992 update_flags->bits.pixel_format_change = 1;
1993 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1994 }
1995
1996 if (u->plane_info->stereo_format != u->surface->stereo_format) {
1997 update_flags->bits.stereo_format_change = 1;
1998 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
1999 }
2000
2001 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2002 update_flags->bits.per_pixel_alpha_change = 1;
2003 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2004 }
2005
2006 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2007 update_flags->bits.global_alpha_change = 1;
2008 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2009 }
2010
2011 if (u->plane_info->dcc.enable != u->surface->dcc.enable
2012 || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
2013 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2014 /* During DCC on/off, stutter period is calculated before
2015 * DCC has fully transitioned. This results in incorrect
2016 * stutter period calculation. Triggering a full update will
2017 * recalculate stutter period.
2018 */
2019 update_flags->bits.dcc_change = 1;
2020 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2021 }
2022
2023 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2024 resource_pixel_format_to_bpp(u->surface->format)) {
2025 /* different bytes per element will require full bandwidth
2026 * and DML calculation
2027 */
2028 update_flags->bits.bpp_change = 1;
2029 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2030 }
2031
2032 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2033 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2034 update_flags->bits.plane_size_change = 1;
2035 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2036 }
2037
2038
2039 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2040 sizeof(union dc_tiling_info)) != 0) {
2041 update_flags->bits.swizzle_change = 1;
2042 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2043
2044 /* todo: below are HW dependent, we should add a hook to
2045 * DCE/N resource and validated there.
2046 */
2047 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2048 /* swizzled mode requires RQ to be setup properly,
2049 * thus need to run DML to calculate RQ settings
2050 */
2051 update_flags->bits.bandwidth_change = 1;
2052 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2053 }
2054 }
2055
2056 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2057 return update_type;
2058 }
2059
get_scaling_info_update_type(const struct dc * dc,const struct dc_surface_update * u)2060 static enum surface_update_type get_scaling_info_update_type(
2061 const struct dc *dc,
2062 const struct dc_surface_update *u)
2063 {
2064 union surface_update_flags *update_flags = &u->surface->update_flags;
2065
2066 if (!u->scaling_info)
2067 return UPDATE_TYPE_FAST;
2068
2069 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2070 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2071 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2072 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2073 || u->scaling_info->scaling_quality.integer_scaling !=
2074 u->surface->scaling_quality.integer_scaling
2075 ) {
2076 update_flags->bits.scaling_change = 1;
2077
2078 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2079 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2080 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2081 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2082 /* Making dst rect smaller requires a bandwidth change */
2083 update_flags->bits.bandwidth_change = 1;
2084 }
2085
2086 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2087 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2088
2089 update_flags->bits.scaling_change = 1;
2090 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2091 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2092 /* Making src rect bigger requires a bandwidth change */
2093 update_flags->bits.clock_change = 1;
2094 }
2095
2096 if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
2097 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
2098 u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
2099 /* Changing clip size of a large surface may result in MPC slice count change */
2100 update_flags->bits.bandwidth_change = 1;
2101
2102 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2103 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2104 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2105 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2106 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2107 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2108 update_flags->bits.position_change = 1;
2109
2110 if (update_flags->bits.clock_change
2111 || update_flags->bits.bandwidth_change
2112 || update_flags->bits.scaling_change)
2113 return UPDATE_TYPE_FULL;
2114
2115 if (update_flags->bits.position_change)
2116 return UPDATE_TYPE_MED;
2117
2118 return UPDATE_TYPE_FAST;
2119 }
2120
det_surface_update(const struct dc * dc,const struct dc_surface_update * u)2121 static enum surface_update_type det_surface_update(const struct dc *dc,
2122 const struct dc_surface_update *u)
2123 {
2124 const struct dc_state *context = dc->current_state;
2125 enum surface_update_type type;
2126 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2127 union surface_update_flags *update_flags = &u->surface->update_flags;
2128
2129 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2130 update_flags->raw = 0xFFFFFFFF;
2131 return UPDATE_TYPE_FULL;
2132 }
2133
2134 update_flags->raw = 0; // Reset all flags
2135
2136 type = get_plane_info_update_type(u);
2137 elevate_update_type(&overall_type, type);
2138
2139 type = get_scaling_info_update_type(dc, u);
2140 elevate_update_type(&overall_type, type);
2141
2142 if (u->flip_addr)
2143 update_flags->bits.addr_update = 1;
2144
2145 if (u->in_transfer_func)
2146 update_flags->bits.in_transfer_func_change = 1;
2147
2148 if (u->input_csc_color_matrix)
2149 update_flags->bits.input_csc_change = 1;
2150
2151 if (u->coeff_reduction_factor)
2152 update_flags->bits.coeff_reduction_change = 1;
2153
2154 if (u->gamut_remap_matrix)
2155 update_flags->bits.gamut_remap_change = 1;
2156
2157 if (u->gamma) {
2158 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2159
2160 if (u->plane_info)
2161 format = u->plane_info->format;
2162 else if (u->surface)
2163 format = u->surface->format;
2164
2165 if (dce_use_lut(format))
2166 update_flags->bits.gamma_change = 1;
2167 }
2168
2169 if (u->hdr_mult.value)
2170 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2171 update_flags->bits.hdr_mult = 1;
2172 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2173 }
2174
2175 if (update_flags->bits.in_transfer_func_change) {
2176 type = UPDATE_TYPE_MED;
2177 elevate_update_type(&overall_type, type);
2178 }
2179
2180 if (update_flags->bits.input_csc_change
2181 || update_flags->bits.coeff_reduction_change
2182 || update_flags->bits.gamma_change
2183 || update_flags->bits.gamut_remap_change) {
2184 type = UPDATE_TYPE_FULL;
2185 elevate_update_type(&overall_type, type);
2186 }
2187
2188 return overall_type;
2189 }
2190
check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2191 static enum surface_update_type check_update_surfaces_for_stream(
2192 struct dc *dc,
2193 struct dc_surface_update *updates,
2194 int surface_count,
2195 struct dc_stream_update *stream_update,
2196 const struct dc_stream_status *stream_status)
2197 {
2198 int i;
2199 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2200
2201 #if defined(CONFIG_DRM_AMD_DC_DCN)
2202 if (dc->idle_optimizations_allowed)
2203 overall_type = UPDATE_TYPE_FULL;
2204
2205 #endif
2206 if (stream_status == NULL || stream_status->plane_count != surface_count)
2207 overall_type = UPDATE_TYPE_FULL;
2208
2209 if (stream_update && stream_update->pending_test_pattern) {
2210 overall_type = UPDATE_TYPE_FULL;
2211 }
2212
2213 /* some stream updates require passive update */
2214 if (stream_update) {
2215 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2216
2217 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2218 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2219 stream_update->integer_scaling_update)
2220 su_flags->bits.scaling = 1;
2221
2222 if (stream_update->out_transfer_func)
2223 su_flags->bits.out_tf = 1;
2224
2225 if (stream_update->abm_level)
2226 su_flags->bits.abm_level = 1;
2227
2228 if (stream_update->dpms_off)
2229 su_flags->bits.dpms_off = 1;
2230
2231 if (stream_update->gamut_remap)
2232 su_flags->bits.gamut_remap = 1;
2233
2234 if (stream_update->wb_update)
2235 su_flags->bits.wb_update = 1;
2236
2237 if (stream_update->dsc_config)
2238 su_flags->bits.dsc_changed = 1;
2239
2240 if (su_flags->raw != 0)
2241 overall_type = UPDATE_TYPE_FULL;
2242
2243 if (stream_update->output_csc_transform || stream_update->output_color_space)
2244 su_flags->bits.out_csc = 1;
2245 }
2246
2247 for (i = 0 ; i < surface_count; i++) {
2248 enum surface_update_type type =
2249 det_surface_update(dc, &updates[i]);
2250
2251 elevate_update_type(&overall_type, type);
2252 }
2253
2254 return overall_type;
2255 }
2256
2257 /*
2258 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2259 *
2260 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2261 */
dc_check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2262 enum surface_update_type dc_check_update_surfaces_for_stream(
2263 struct dc *dc,
2264 struct dc_surface_update *updates,
2265 int surface_count,
2266 struct dc_stream_update *stream_update,
2267 const struct dc_stream_status *stream_status)
2268 {
2269 int i;
2270 enum surface_update_type type;
2271
2272 if (stream_update)
2273 stream_update->stream->update_flags.raw = 0;
2274 for (i = 0; i < surface_count; i++)
2275 updates[i].surface->update_flags.raw = 0;
2276
2277 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2278 if (type == UPDATE_TYPE_FULL) {
2279 if (stream_update) {
2280 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2281 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2282 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2283 }
2284 for (i = 0; i < surface_count; i++)
2285 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2286 }
2287
2288 if (type == UPDATE_TYPE_FAST) {
2289 // If there's an available clock comparator, we use that.
2290 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2291 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2292 dc->optimized_required = true;
2293 // Else we fallback to mem compare.
2294 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2295 dc->optimized_required = true;
2296 }
2297
2298 dc->optimized_required |= dc->wm_optimized_required;
2299 }
2300
2301 return type;
2302 }
2303
stream_get_status(struct dc_state * ctx,struct dc_stream_state * stream)2304 static struct dc_stream_status *stream_get_status(
2305 struct dc_state *ctx,
2306 struct dc_stream_state *stream)
2307 {
2308 uint8_t i;
2309
2310 for (i = 0; i < ctx->stream_count; i++) {
2311 if (stream == ctx->streams[i]) {
2312 return &ctx->stream_status[i];
2313 }
2314 }
2315
2316 return NULL;
2317 }
2318
2319 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2320
copy_surface_update_to_plane(struct dc_plane_state * surface,struct dc_surface_update * srf_update)2321 static void copy_surface_update_to_plane(
2322 struct dc_plane_state *surface,
2323 struct dc_surface_update *srf_update)
2324 {
2325 if (srf_update->flip_addr) {
2326 surface->address = srf_update->flip_addr->address;
2327 surface->flip_immediate =
2328 srf_update->flip_addr->flip_immediate;
2329 surface->time.time_elapsed_in_us[surface->time.index] =
2330 srf_update->flip_addr->flip_timestamp_in_us -
2331 surface->time.prev_update_time_in_us;
2332 surface->time.prev_update_time_in_us =
2333 srf_update->flip_addr->flip_timestamp_in_us;
2334 surface->time.index++;
2335 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2336 surface->time.index = 0;
2337
2338 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2339 }
2340
2341 if (srf_update->scaling_info) {
2342 surface->scaling_quality =
2343 srf_update->scaling_info->scaling_quality;
2344 surface->dst_rect =
2345 srf_update->scaling_info->dst_rect;
2346 surface->src_rect =
2347 srf_update->scaling_info->src_rect;
2348 surface->clip_rect =
2349 srf_update->scaling_info->clip_rect;
2350 }
2351
2352 if (srf_update->plane_info) {
2353 surface->color_space =
2354 srf_update->plane_info->color_space;
2355 surface->format =
2356 srf_update->plane_info->format;
2357 surface->plane_size =
2358 srf_update->plane_info->plane_size;
2359 surface->rotation =
2360 srf_update->plane_info->rotation;
2361 surface->horizontal_mirror =
2362 srf_update->plane_info->horizontal_mirror;
2363 surface->stereo_format =
2364 srf_update->plane_info->stereo_format;
2365 surface->tiling_info =
2366 srf_update->plane_info->tiling_info;
2367 surface->visible =
2368 srf_update->plane_info->visible;
2369 surface->per_pixel_alpha =
2370 srf_update->plane_info->per_pixel_alpha;
2371 surface->global_alpha =
2372 srf_update->plane_info->global_alpha;
2373 surface->global_alpha_value =
2374 srf_update->plane_info->global_alpha_value;
2375 surface->dcc =
2376 srf_update->plane_info->dcc;
2377 surface->layer_index =
2378 srf_update->plane_info->layer_index;
2379 }
2380
2381 if (srf_update->gamma &&
2382 (surface->gamma_correction !=
2383 srf_update->gamma)) {
2384 memcpy(&surface->gamma_correction->entries,
2385 &srf_update->gamma->entries,
2386 sizeof(struct dc_gamma_entries));
2387 surface->gamma_correction->is_identity =
2388 srf_update->gamma->is_identity;
2389 surface->gamma_correction->num_entries =
2390 srf_update->gamma->num_entries;
2391 surface->gamma_correction->type =
2392 srf_update->gamma->type;
2393 }
2394
2395 if (srf_update->in_transfer_func &&
2396 (surface->in_transfer_func !=
2397 srf_update->in_transfer_func)) {
2398 surface->in_transfer_func->sdr_ref_white_level =
2399 srf_update->in_transfer_func->sdr_ref_white_level;
2400 surface->in_transfer_func->tf =
2401 srf_update->in_transfer_func->tf;
2402 surface->in_transfer_func->type =
2403 srf_update->in_transfer_func->type;
2404 memcpy(&surface->in_transfer_func->tf_pts,
2405 &srf_update->in_transfer_func->tf_pts,
2406 sizeof(struct dc_transfer_func_distributed_points));
2407 }
2408
2409 if (srf_update->func_shaper &&
2410 (surface->in_shaper_func !=
2411 srf_update->func_shaper))
2412 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2413 sizeof(*surface->in_shaper_func));
2414
2415 if (srf_update->lut3d_func &&
2416 (surface->lut3d_func !=
2417 srf_update->lut3d_func))
2418 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2419 sizeof(*surface->lut3d_func));
2420
2421 if (srf_update->hdr_mult.value)
2422 surface->hdr_mult =
2423 srf_update->hdr_mult;
2424
2425 if (srf_update->blend_tf &&
2426 (surface->blend_tf !=
2427 srf_update->blend_tf))
2428 memcpy(surface->blend_tf, srf_update->blend_tf,
2429 sizeof(*surface->blend_tf));
2430
2431 if (srf_update->input_csc_color_matrix)
2432 surface->input_csc_color_matrix =
2433 *srf_update->input_csc_color_matrix;
2434
2435 if (srf_update->coeff_reduction_factor)
2436 surface->coeff_reduction_factor =
2437 *srf_update->coeff_reduction_factor;
2438
2439 if (srf_update->gamut_remap_matrix)
2440 surface->gamut_remap_matrix =
2441 *srf_update->gamut_remap_matrix;
2442 }
2443
copy_stream_update_to_stream(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,struct dc_stream_update * update)2444 static void copy_stream_update_to_stream(struct dc *dc,
2445 struct dc_state *context,
2446 struct dc_stream_state *stream,
2447 struct dc_stream_update *update)
2448 {
2449 struct dc_context *dc_ctx = dc->ctx;
2450
2451 if (update == NULL || stream == NULL)
2452 return;
2453
2454 if (update->src.height && update->src.width)
2455 stream->src = update->src;
2456
2457 if (update->dst.height && update->dst.width)
2458 stream->dst = update->dst;
2459
2460 if (update->out_transfer_func &&
2461 stream->out_transfer_func != update->out_transfer_func) {
2462 stream->out_transfer_func->sdr_ref_white_level =
2463 update->out_transfer_func->sdr_ref_white_level;
2464 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2465 stream->out_transfer_func->type =
2466 update->out_transfer_func->type;
2467 memcpy(&stream->out_transfer_func->tf_pts,
2468 &update->out_transfer_func->tf_pts,
2469 sizeof(struct dc_transfer_func_distributed_points));
2470 }
2471
2472 if (update->hdr_static_metadata)
2473 stream->hdr_static_metadata = *update->hdr_static_metadata;
2474
2475 if (update->abm_level)
2476 stream->abm_level = *update->abm_level;
2477
2478 if (update->periodic_interrupt)
2479 stream->periodic_interrupt = *update->periodic_interrupt;
2480
2481 if (update->gamut_remap)
2482 stream->gamut_remap_matrix = *update->gamut_remap;
2483
2484 /* Note: this being updated after mode set is currently not a use case
2485 * however if it arises OCSC would need to be reprogrammed at the
2486 * minimum
2487 */
2488 if (update->output_color_space)
2489 stream->output_color_space = *update->output_color_space;
2490
2491 if (update->output_csc_transform)
2492 stream->csc_color_matrix = *update->output_csc_transform;
2493
2494 if (update->vrr_infopacket)
2495 stream->vrr_infopacket = *update->vrr_infopacket;
2496
2497 if (update->dpms_off)
2498 stream->dpms_off = *update->dpms_off;
2499
2500 if (update->vsc_infopacket)
2501 stream->vsc_infopacket = *update->vsc_infopacket;
2502
2503 if (update->vsp_infopacket)
2504 stream->vsp_infopacket = *update->vsp_infopacket;
2505
2506 if (update->dither_option)
2507 stream->dither_option = *update->dither_option;
2508
2509 if (update->pending_test_pattern)
2510 stream->test_pattern = *update->pending_test_pattern;
2511 /* update current stream with writeback info */
2512 if (update->wb_update) {
2513 int i;
2514
2515 stream->num_wb_info = update->wb_update->num_wb_info;
2516 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2517 for (i = 0; i < stream->num_wb_info; i++)
2518 stream->writeback_info[i] =
2519 update->wb_update->writeback_info[i];
2520 }
2521 if (update->dsc_config) {
2522 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2523 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2524 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2525 update->dsc_config->num_slices_v != 0);
2526
2527 /* Use temporarry context for validating new DSC config */
2528 struct dc_state *dsc_validate_context = dc_create_state(dc);
2529
2530 if (dsc_validate_context) {
2531 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2532
2533 stream->timing.dsc_cfg = *update->dsc_config;
2534 stream->timing.flags.DSC = enable_dsc;
2535 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2536 stream->timing.dsc_cfg = old_dsc_cfg;
2537 stream->timing.flags.DSC = old_dsc_enabled;
2538 update->dsc_config = NULL;
2539 }
2540
2541 dc_release_state(dsc_validate_context);
2542 } else {
2543 DC_ERROR("Failed to allocate new validate context for DSC change\n");
2544 update->dsc_config = NULL;
2545 }
2546 }
2547 }
2548
dc_reset_state(struct dc * dc,struct dc_state * context)2549 void dc_reset_state(struct dc *dc, struct dc_state *context)
2550 {
2551 dc_resource_state_destruct(context);
2552
2553 /* clear the structure, but don't reset the reference count */
2554 memset(context, 0, offsetof(struct dc_state, refcount));
2555
2556 init_state(dc, context);
2557 }
2558
update_planes_and_stream_state(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type * new_update_type,struct dc_state ** new_context)2559 static bool update_planes_and_stream_state(struct dc *dc,
2560 struct dc_surface_update *srf_updates, int surface_count,
2561 struct dc_stream_state *stream,
2562 struct dc_stream_update *stream_update,
2563 enum surface_update_type *new_update_type,
2564 struct dc_state **new_context)
2565 {
2566 struct dc_state *context;
2567 int i, j;
2568 enum surface_update_type update_type;
2569 const struct dc_stream_status *stream_status;
2570 struct dc_context *dc_ctx = dc->ctx;
2571
2572 stream_status = dc_stream_get_status(stream);
2573
2574 if (!stream_status) {
2575 if (surface_count) /* Only an error condition if surf_count non-zero*/
2576 ASSERT(false);
2577
2578 return false; /* Cannot commit surface to stream that is not committed */
2579 }
2580
2581 context = dc->current_state;
2582
2583 update_type = dc_check_update_surfaces_for_stream(
2584 dc, srf_updates, surface_count, stream_update, stream_status);
2585
2586 /* update current stream with the new updates */
2587 copy_stream_update_to_stream(dc, context, stream, stream_update);
2588
2589 /* do not perform surface update if surface has invalid dimensions
2590 * (all zero) and no scaling_info is provided
2591 */
2592 if (surface_count > 0) {
2593 for (i = 0; i < surface_count; i++) {
2594 if ((srf_updates[i].surface->src_rect.width == 0 ||
2595 srf_updates[i].surface->src_rect.height == 0 ||
2596 srf_updates[i].surface->dst_rect.width == 0 ||
2597 srf_updates[i].surface->dst_rect.height == 0) &&
2598 (!srf_updates[i].scaling_info ||
2599 srf_updates[i].scaling_info->src_rect.width == 0 ||
2600 srf_updates[i].scaling_info->src_rect.height == 0 ||
2601 srf_updates[i].scaling_info->dst_rect.width == 0 ||
2602 srf_updates[i].scaling_info->dst_rect.height == 0)) {
2603 DC_ERROR("Invalid src/dst rects in surface update!\n");
2604 return false;
2605 }
2606 }
2607 }
2608
2609 if (update_type >= update_surface_trace_level)
2610 update_surface_trace(dc, srf_updates, surface_count);
2611
2612 if (update_type >= UPDATE_TYPE_FULL) {
2613 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
2614
2615 for (i = 0; i < surface_count; i++)
2616 new_planes[i] = srf_updates[i].surface;
2617
2618 /* initialize scratch memory for building context */
2619 context = dc_create_state(dc);
2620 if (context == NULL) {
2621 DC_ERROR("Failed to allocate new validate context!\n");
2622 return false;
2623 }
2624
2625 dc_resource_state_copy_construct(
2626 dc->current_state, context);
2627
2628 /*remove old surfaces from context */
2629 if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
2630
2631 BREAK_TO_DEBUGGER();
2632 goto fail;
2633 }
2634
2635 /* add surface to context */
2636 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
2637
2638 BREAK_TO_DEBUGGER();
2639 goto fail;
2640 }
2641 }
2642
2643 /* save update parameters into surface */
2644 for (i = 0; i < surface_count; i++) {
2645 struct dc_plane_state *surface = srf_updates[i].surface;
2646
2647 copy_surface_update_to_plane(surface, &srf_updates[i]);
2648
2649 if (update_type >= UPDATE_TYPE_MED) {
2650 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2651 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2652
2653 if (pipe_ctx->plane_state != surface)
2654 continue;
2655
2656 resource_build_scaling_params(pipe_ctx);
2657 }
2658 }
2659 }
2660
2661 if (update_type == UPDATE_TYPE_FULL) {
2662 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
2663 BREAK_TO_DEBUGGER();
2664 goto fail;
2665 }
2666 }
2667
2668 *new_context = context;
2669 *new_update_type = update_type;
2670
2671 return true;
2672
2673 fail:
2674 dc_release_state(context);
2675
2676 return false;
2677
2678 }
2679
commit_planes_do_stream_update(struct dc * dc,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)2680 static void commit_planes_do_stream_update(struct dc *dc,
2681 struct dc_stream_state *stream,
2682 struct dc_stream_update *stream_update,
2683 enum surface_update_type update_type,
2684 struct dc_state *context)
2685 {
2686 int j;
2687
2688 // Stream updates
2689 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2690 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2691
2692 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2693
2694 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
2695 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
2696
2697 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2698 stream_update->vrr_infopacket ||
2699 stream_update->vsc_infopacket ||
2700 stream_update->vsp_infopacket) {
2701 resource_build_info_frame(pipe_ctx);
2702 dc->hwss.update_info_frame(pipe_ctx);
2703 }
2704
2705 if (stream_update->hdr_static_metadata &&
2706 stream->use_dynamic_meta &&
2707 dc->hwss.set_dmdata_attributes &&
2708 pipe_ctx->stream->dmdata_address.quad_part != 0)
2709 dc->hwss.set_dmdata_attributes(pipe_ctx);
2710
2711 if (stream_update->gamut_remap)
2712 dc_stream_set_gamut_remap(dc, stream);
2713
2714 if (stream_update->output_csc_transform)
2715 dc_stream_program_csc_matrix(dc, stream);
2716
2717 if (stream_update->dither_option) {
2718 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2719 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2720 &pipe_ctx->stream->bit_depth_params);
2721 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2722 &stream->bit_depth_params,
2723 &stream->clamping);
2724 while (odm_pipe) {
2725 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2726 &stream->bit_depth_params,
2727 &stream->clamping);
2728 odm_pipe = odm_pipe->next_odm_pipe;
2729 }
2730 }
2731
2732
2733 /* Full fe update*/
2734 if (update_type == UPDATE_TYPE_FAST)
2735 continue;
2736
2737 if (stream_update->dsc_config)
2738 dp_update_dsc_config(pipe_ctx);
2739
2740 if (stream_update->pending_test_pattern) {
2741 dc_link_dp_set_test_pattern(stream->link,
2742 stream->test_pattern.type,
2743 stream->test_pattern.color_space,
2744 stream->test_pattern.p_link_settings,
2745 stream->test_pattern.p_custom_pattern,
2746 stream->test_pattern.cust_pattern_size);
2747 }
2748
2749 if (stream_update->dpms_off) {
2750 if (*stream_update->dpms_off) {
2751 core_link_disable_stream(pipe_ctx);
2752 /* for dpms, keep acquired resources*/
2753 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2754 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2755
2756 dc->optimized_required = true;
2757
2758 } else {
2759 if (get_seamless_boot_stream_count(context) == 0)
2760 dc->hwss.prepare_bandwidth(dc, dc->current_state);
2761
2762 core_link_enable_stream(dc->current_state, pipe_ctx);
2763 }
2764 }
2765
2766 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2767 bool should_program_abm = true;
2768
2769 // if otg funcs defined check if blanked before programming
2770 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2771 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2772 should_program_abm = false;
2773
2774 if (should_program_abm) {
2775 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2776 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2777 } else {
2778 pipe_ctx->stream_res.abm->funcs->set_abm_level(
2779 pipe_ctx->stream_res.abm, stream->abm_level);
2780 }
2781 }
2782 }
2783 }
2784 }
2785 }
2786
commit_planes_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)2787 static void commit_planes_for_stream(struct dc *dc,
2788 struct dc_surface_update *srf_updates,
2789 int surface_count,
2790 struct dc_stream_state *stream,
2791 struct dc_stream_update *stream_update,
2792 enum surface_update_type update_type,
2793 struct dc_state *context)
2794 {
2795 int i, j;
2796 struct pipe_ctx *top_pipe_to_program = NULL;
2797 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
2798
2799 #if defined(CONFIG_DRM_AMD_DC_DCN)
2800 dc_z10_restore(dc);
2801 #endif
2802
2803 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
2804 /* Optimize seamless boot flag keeps clocks and watermarks high until
2805 * first flip. After first flip, optimization is required to lower
2806 * bandwidth. Important to note that it is expected UEFI will
2807 * only light up a single display on POST, therefore we only expect
2808 * one stream with seamless boot flag set.
2809 */
2810 if (stream->apply_seamless_boot_optimization) {
2811 stream->apply_seamless_boot_optimization = false;
2812
2813 if (get_seamless_boot_stream_count(context) == 0)
2814 dc->optimized_required = true;
2815 }
2816 }
2817
2818 if (update_type == UPDATE_TYPE_FULL) {
2819 #if defined(CONFIG_DRM_AMD_DC_DCN)
2820 dc_allow_idle_optimizations(dc, false);
2821
2822 #endif
2823 if (get_seamless_boot_stream_count(context) == 0)
2824 dc->hwss.prepare_bandwidth(dc, context);
2825
2826 context_clock_trace(dc, context);
2827 }
2828
2829 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2830 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2831
2832 if (!pipe_ctx->top_pipe &&
2833 !pipe_ctx->prev_odm_pipe &&
2834 pipe_ctx->stream &&
2835 pipe_ctx->stream == stream) {
2836 top_pipe_to_program = pipe_ctx;
2837 }
2838 }
2839
2840 #ifdef CONFIG_DRM_AMD_DC_DCN
2841 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
2842 struct pipe_ctx *mpcc_pipe;
2843 struct pipe_ctx *odm_pipe;
2844
2845 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
2846 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2847 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
2848 }
2849 #endif
2850
2851 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2852 if (top_pipe_to_program &&
2853 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2854 if (should_use_dmub_lock(stream->link)) {
2855 union dmub_hw_lock_flags hw_locks = { 0 };
2856 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2857
2858 hw_locks.bits.lock_dig = 1;
2859 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2860
2861 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2862 true,
2863 &hw_locks,
2864 &inst_flags);
2865 } else
2866 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2867 top_pipe_to_program->stream_res.tg);
2868 }
2869
2870 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
2871 dc->hwss.interdependent_update_lock(dc, context, true);
2872 else
2873 /* Lock the top pipe while updating plane addrs, since freesync requires
2874 * plane addr update event triggers to be synchronized.
2875 * top_pipe_to_program is expected to never be NULL
2876 */
2877 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2878
2879 // Stream updates
2880 if (stream_update)
2881 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2882
2883 if (surface_count == 0) {
2884 /*
2885 * In case of turning off screen, no need to program front end a second time.
2886 * just return after program blank.
2887 */
2888 if (dc->hwss.apply_ctx_for_surface)
2889 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2890 if (dc->hwss.program_front_end_for_ctx)
2891 dc->hwss.program_front_end_for_ctx(dc, context);
2892
2893 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
2894 dc->hwss.interdependent_update_lock(dc, context, false);
2895 else
2896 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2897 dc->hwss.post_unlock_program_front_end(dc, context);
2898 return;
2899 }
2900
2901 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2902 for (i = 0; i < surface_count; i++) {
2903 struct dc_plane_state *plane_state = srf_updates[i].surface;
2904 /*set logical flag for lock/unlock use*/
2905 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2906 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2907 if (!pipe_ctx->plane_state)
2908 continue;
2909 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
2910 continue;
2911 pipe_ctx->plane_state->triplebuffer_flips = false;
2912 if (update_type == UPDATE_TYPE_FAST &&
2913 dc->hwss.program_triplebuffer != NULL &&
2914 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
2915 /*triple buffer for VUpdate only*/
2916 pipe_ctx->plane_state->triplebuffer_flips = true;
2917 }
2918 }
2919 if (update_type == UPDATE_TYPE_FULL) {
2920 /* force vsync flip when reconfiguring pipes to prevent underflow */
2921 plane_state->flip_immediate = false;
2922 }
2923 }
2924 }
2925
2926 // Update Type FULL, Surface updates
2927 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2928 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2929
2930 if (!pipe_ctx->top_pipe &&
2931 !pipe_ctx->prev_odm_pipe &&
2932 should_update_pipe_for_stream(context, pipe_ctx, stream)) {
2933 struct dc_stream_status *stream_status = NULL;
2934
2935 if (!pipe_ctx->plane_state)
2936 continue;
2937
2938 /* Full fe update*/
2939 if (update_type == UPDATE_TYPE_FAST)
2940 continue;
2941
2942 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2943
2944 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2945 /*turn off triple buffer for full update*/
2946 dc->hwss.program_triplebuffer(
2947 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2948 }
2949 stream_status =
2950 stream_get_status(context, pipe_ctx->stream);
2951
2952 if (dc->hwss.apply_ctx_for_surface)
2953 dc->hwss.apply_ctx_for_surface(
2954 dc, pipe_ctx->stream, stream_status->plane_count, context);
2955 }
2956 }
2957 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
2958 dc->hwss.program_front_end_for_ctx(dc, context);
2959 #ifdef CONFIG_DRM_AMD_DC_DCN
2960 if (dc->debug.validate_dml_output) {
2961 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2962 struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i];
2963 if (cur_pipe.stream == NULL)
2964 continue;
2965
2966 cur_pipe.plane_res.hubp->funcs->validate_dml_output(
2967 cur_pipe.plane_res.hubp, dc->ctx,
2968 &context->res_ctx.pipe_ctx[i].rq_regs,
2969 &context->res_ctx.pipe_ctx[i].dlg_regs,
2970 &context->res_ctx.pipe_ctx[i].ttu_regs);
2971 }
2972 }
2973 #endif
2974 }
2975
2976 // Update Type FAST, Surface updates
2977 if (update_type == UPDATE_TYPE_FAST) {
2978 if (dc->hwss.set_flip_control_gsl)
2979 for (i = 0; i < surface_count; i++) {
2980 struct dc_plane_state *plane_state = srf_updates[i].surface;
2981
2982 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2983 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2984
2985 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
2986 continue;
2987
2988 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
2989 continue;
2990
2991 // GSL has to be used for flip immediate
2992 dc->hwss.set_flip_control_gsl(pipe_ctx,
2993 pipe_ctx->plane_state->flip_immediate);
2994 }
2995 }
2996
2997 /* Perform requested Updates */
2998 for (i = 0; i < surface_count; i++) {
2999 struct dc_plane_state *plane_state = srf_updates[i].surface;
3000
3001 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3002 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3003
3004 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3005 continue;
3006
3007 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3008 continue;
3009
3010 /*program triple buffer after lock based on flip type*/
3011 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3012 /*only enable triplebuffer for fast_update*/
3013 dc->hwss.program_triplebuffer(
3014 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3015 }
3016 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3017 dc->hwss.update_plane_addr(dc, pipe_ctx);
3018 }
3019 }
3020
3021 }
3022
3023 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
3024 dc->hwss.interdependent_update_lock(dc, context, false);
3025 else
3026 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3027
3028 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3029 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3030 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3031 top_pipe_to_program->stream_res.tg,
3032 CRTC_STATE_VACTIVE);
3033 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3034 top_pipe_to_program->stream_res.tg,
3035 CRTC_STATE_VBLANK);
3036 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3037 top_pipe_to_program->stream_res.tg,
3038 CRTC_STATE_VACTIVE);
3039
3040 if (stream && should_use_dmub_lock(stream->link)) {
3041 union dmub_hw_lock_flags hw_locks = { 0 };
3042 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3043
3044 hw_locks.bits.lock_dig = 1;
3045 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3046
3047 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3048 false,
3049 &hw_locks,
3050 &inst_flags);
3051 } else
3052 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3053 top_pipe_to_program->stream_res.tg);
3054 }
3055
3056 if (update_type != UPDATE_TYPE_FAST)
3057 dc->hwss.post_unlock_program_front_end(dc, context);
3058
3059 // Fire manual trigger only when bottom plane is flipped
3060 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3061 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3062
3063 if (!pipe_ctx->plane_state)
3064 continue;
3065
3066 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3067 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3068 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3069 pipe_ctx->plane_state->skip_manual_trigger)
3070 continue;
3071
3072 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3073 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3074 }
3075 }
3076
commit_minimal_transition_state(struct dc * dc,struct dc_state * transition_base_context)3077 static bool commit_minimal_transition_state(struct dc *dc,
3078 struct dc_state *transition_base_context)
3079 {
3080 struct dc_state *transition_context = dc_create_state(dc);
3081 enum pipe_split_policy tmp_policy;
3082 enum dc_status ret = DC_ERROR_UNEXPECTED;
3083 unsigned int i, j;
3084
3085 if (!transition_context)
3086 return false;
3087
3088 tmp_policy = dc->debug.pipe_split_policy;
3089 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
3090
3091 dc_resource_state_copy_construct(transition_base_context, transition_context);
3092
3093 //commit minimal state
3094 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
3095 for (i = 0; i < transition_context->stream_count; i++) {
3096 struct dc_stream_status *stream_status = &transition_context->stream_status[i];
3097
3098 for (j = 0; j < stream_status->plane_count; j++) {
3099 struct dc_plane_state *plane_state = stream_status->plane_states[j];
3100
3101 /* force vsync flip when reconfiguring pipes to prevent underflow
3102 * and corruption
3103 */
3104 plane_state->flip_immediate = false;
3105 }
3106 }
3107
3108 ret = dc_commit_state_no_check(dc, transition_context);
3109 }
3110
3111 //always release as dc_commit_state_no_check retains in good case
3112 dc_release_state(transition_context);
3113
3114 //restore previous pipe split policy
3115 dc->debug.pipe_split_policy = tmp_policy;
3116
3117 if (ret != DC_OK) {
3118 //this should never happen
3119 BREAK_TO_DEBUGGER();
3120 return false;
3121 }
3122
3123 //force full surface update
3124 for (i = 0; i < dc->current_state->stream_count; i++) {
3125 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
3126 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
3127 }
3128 }
3129
3130 return true;
3131 }
3132
dc_update_planes_and_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update)3133 bool dc_update_planes_and_stream(struct dc *dc,
3134 struct dc_surface_update *srf_updates, int surface_count,
3135 struct dc_stream_state *stream,
3136 struct dc_stream_update *stream_update)
3137 {
3138 struct dc_state *context;
3139 enum surface_update_type update_type;
3140 int i;
3141
3142 /* In cases where MPO and split or ODM are used transitions can
3143 * cause underflow. Apply stream configuration with minimal pipe
3144 * split first to avoid unsupported transitions for active pipes.
3145 */
3146 bool force_minimal_pipe_splitting = false;
3147 bool is_plane_addition = false;
3148
3149 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3150
3151 if (cur_stream_status &&
3152 dc->current_state->stream_count > 0 &&
3153 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3154 /* determine if minimal transition is required */
3155 if (cur_stream_status->plane_count > surface_count) {
3156 force_minimal_pipe_splitting = true;
3157 } else if (cur_stream_status->plane_count < surface_count) {
3158 force_minimal_pipe_splitting = true;
3159 is_plane_addition = true;
3160 }
3161 }
3162
3163 /* on plane addition, minimal state is the current one */
3164 if (force_minimal_pipe_splitting && is_plane_addition &&
3165 !commit_minimal_transition_state(dc, dc->current_state))
3166 return false;
3167
3168 if (!update_planes_and_stream_state(
3169 dc,
3170 srf_updates,
3171 surface_count,
3172 stream,
3173 stream_update,
3174 &update_type,
3175 &context))
3176 return false;
3177
3178 /* on plane addition, minimal state is the new one */
3179 if (force_minimal_pipe_splitting && !is_plane_addition) {
3180 if (!commit_minimal_transition_state(dc, context)) {
3181 dc_release_state(context);
3182 return false;
3183 }
3184
3185 update_type = UPDATE_TYPE_FULL;
3186 }
3187
3188 commit_planes_for_stream(
3189 dc,
3190 srf_updates,
3191 surface_count,
3192 stream,
3193 stream_update,
3194 update_type,
3195 context);
3196
3197 if (dc->current_state != context) {
3198
3199 /* Since memory free requires elevated IRQL, an interrupt
3200 * request is generated by mem free. If this happens
3201 * between freeing and reassigning the context, our vsync
3202 * interrupt will call into dc and cause a memory
3203 * corruption BSOD. Hence, we first reassign the context,
3204 * then free the old context.
3205 */
3206
3207 struct dc_state *old = dc->current_state;
3208
3209 dc->current_state = context;
3210 dc_release_state(old);
3211
3212 // clear any forced full updates
3213 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3214 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3215
3216 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3217 pipe_ctx->plane_state->force_full_update = false;
3218 }
3219 }
3220 return true;
3221 }
3222
dc_commit_updates_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_state * state)3223 void dc_commit_updates_for_stream(struct dc *dc,
3224 struct dc_surface_update *srf_updates,
3225 int surface_count,
3226 struct dc_stream_state *stream,
3227 struct dc_stream_update *stream_update,
3228 struct dc_state *state)
3229 {
3230 const struct dc_stream_status *stream_status;
3231 enum surface_update_type update_type;
3232 struct dc_state *context;
3233 struct dc_context *dc_ctx = dc->ctx;
3234 int i, j;
3235
3236 stream_status = dc_stream_get_status(stream);
3237 context = dc->current_state;
3238
3239 update_type = dc_check_update_surfaces_for_stream(
3240 dc, srf_updates, surface_count, stream_update, stream_status);
3241
3242 if (update_type >= update_surface_trace_level)
3243 update_surface_trace(dc, srf_updates, surface_count);
3244
3245
3246 if (update_type >= UPDATE_TYPE_FULL) {
3247
3248 /* initialize scratch memory for building context */
3249 context = dc_create_state(dc);
3250 if (context == NULL) {
3251 DC_ERROR("Failed to allocate new validate context!\n");
3252 return;
3253 }
3254
3255 dc_resource_state_copy_construct(state, context);
3256
3257 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3258 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3259 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3260
3261 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3262 new_pipe->plane_state->force_full_update = true;
3263 }
3264 } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3265 /*
3266 * Previous frame finished and HW is ready for optimization.
3267 *
3268 * Only relevant for DCN behavior where we can guarantee the optimization
3269 * is safe to apply - retain the legacy behavior for DCE.
3270 */
3271 dc_post_update_surfaces_to_stream(dc);
3272 }
3273
3274
3275 for (i = 0; i < surface_count; i++) {
3276 struct dc_plane_state *surface = srf_updates[i].surface;
3277
3278 copy_surface_update_to_plane(surface, &srf_updates[i]);
3279
3280 if (update_type >= UPDATE_TYPE_MED) {
3281 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3282 struct pipe_ctx *pipe_ctx =
3283 &context->res_ctx.pipe_ctx[j];
3284
3285 if (pipe_ctx->plane_state != surface)
3286 continue;
3287
3288 resource_build_scaling_params(pipe_ctx);
3289 }
3290 }
3291 }
3292
3293 copy_stream_update_to_stream(dc, context, stream, stream_update);
3294
3295 if (update_type >= UPDATE_TYPE_FULL) {
3296 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3297 DC_ERROR("Mode validation failed for stream update!\n");
3298 dc_release_state(context);
3299 return;
3300 }
3301 }
3302
3303 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3304
3305 commit_planes_for_stream(
3306 dc,
3307 srf_updates,
3308 surface_count,
3309 stream,
3310 stream_update,
3311 update_type,
3312 context);
3313 /*update current_State*/
3314 if (dc->current_state != context) {
3315
3316 struct dc_state *old = dc->current_state;
3317
3318 dc->current_state = context;
3319 dc_release_state(old);
3320
3321 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3322 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3323
3324 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3325 pipe_ctx->plane_state->force_full_update = false;
3326 }
3327 }
3328
3329 /* Legacy optimization path for DCE. */
3330 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3331 dc_post_update_surfaces_to_stream(dc);
3332 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3333 }
3334
3335 return;
3336
3337 }
3338
dc_get_current_stream_count(struct dc * dc)3339 uint8_t dc_get_current_stream_count(struct dc *dc)
3340 {
3341 return dc->current_state->stream_count;
3342 }
3343
dc_get_stream_at_index(struct dc * dc,uint8_t i)3344 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3345 {
3346 if (i < dc->current_state->stream_count)
3347 return dc->current_state->streams[i];
3348 return NULL;
3349 }
3350
dc_stream_find_from_link(const struct dc_link * link)3351 struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link)
3352 {
3353 uint8_t i;
3354 struct dc_context *ctx = link->ctx;
3355
3356 for (i = 0; i < ctx->dc->current_state->stream_count; i++) {
3357 if (ctx->dc->current_state->streams[i]->link == link)
3358 return ctx->dc->current_state->streams[i];
3359 }
3360
3361 return NULL;
3362 }
3363
dc_interrupt_to_irq_source(struct dc * dc,uint32_t src_id,uint32_t ext_id)3364 enum dc_irq_source dc_interrupt_to_irq_source(
3365 struct dc *dc,
3366 uint32_t src_id,
3367 uint32_t ext_id)
3368 {
3369 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3370 }
3371
3372 /*
3373 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
3374 */
dc_interrupt_set(struct dc * dc,enum dc_irq_source src,bool enable)3375 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3376 {
3377
3378 if (dc == NULL)
3379 return false;
3380
3381 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3382 }
3383
dc_interrupt_ack(struct dc * dc,enum dc_irq_source src)3384 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3385 {
3386 dal_irq_service_ack(dc->res_pool->irqs, src);
3387 }
3388
dc_power_down_on_boot(struct dc * dc)3389 void dc_power_down_on_boot(struct dc *dc)
3390 {
3391 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3392 dc->hwss.power_down_on_boot)
3393 dc->hwss.power_down_on_boot(dc);
3394 }
3395
dc_set_power_state(struct dc * dc,enum dc_acpi_cm_power_state power_state)3396 void dc_set_power_state(
3397 struct dc *dc,
3398 enum dc_acpi_cm_power_state power_state)
3399 {
3400 struct kref refcount;
3401 struct display_mode_lib *dml;
3402
3403 if (!dc->current_state)
3404 return;
3405
3406 switch (power_state) {
3407 case DC_ACPI_CM_POWER_STATE_D0:
3408 dc_resource_state_construct(dc, dc->current_state);
3409
3410 #if defined(CONFIG_DRM_AMD_DC_DCN)
3411 dc_z10_restore(dc);
3412 #endif
3413 if (dc->ctx->dmub_srv)
3414 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3415
3416 dc->hwss.init_hw(dc);
3417
3418 if (dc->hwss.init_sys_ctx != NULL &&
3419 dc->vm_pa_config.valid) {
3420 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3421 }
3422
3423 break;
3424 default:
3425 ASSERT(dc->current_state->stream_count == 0);
3426 /* Zero out the current context so that on resume we start with
3427 * clean state, and dc hw programming optimizations will not
3428 * cause any trouble.
3429 */
3430 dml = kzalloc(sizeof(struct display_mode_lib),
3431 GFP_KERNEL);
3432
3433 ASSERT(dml);
3434 if (!dml)
3435 return;
3436
3437 /* Preserve refcount */
3438 refcount = dc->current_state->refcount;
3439 /* Preserve display mode lib */
3440 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3441
3442 dc_resource_state_destruct(dc->current_state);
3443 memset(dc->current_state, 0,
3444 sizeof(*dc->current_state));
3445
3446 dc->current_state->refcount = refcount;
3447 dc->current_state->bw_ctx.dml = *dml;
3448
3449 kfree(dml);
3450
3451 break;
3452 }
3453 }
3454
dc_resume(struct dc * dc)3455 void dc_resume(struct dc *dc)
3456 {
3457 uint32_t i;
3458
3459 for (i = 0; i < dc->link_count; i++)
3460 core_link_resume(dc->links[i]);
3461 }
3462
dc_is_dmcu_initialized(struct dc * dc)3463 bool dc_is_dmcu_initialized(struct dc *dc)
3464 {
3465 struct dmcu *dmcu = dc->res_pool->dmcu;
3466
3467 if (dmcu)
3468 return dmcu->funcs->is_dmcu_initialized(dmcu);
3469 return false;
3470 }
3471
dc_submit_i2c(struct dc * dc,uint32_t link_index,struct i2c_command * cmd)3472 bool dc_submit_i2c(
3473 struct dc *dc,
3474 uint32_t link_index,
3475 struct i2c_command *cmd)
3476 {
3477
3478 struct dc_link *link = dc->links[link_index];
3479 struct ddc_service *ddc = link->ddc;
3480 return dce_i2c_submit_command(
3481 dc->res_pool,
3482 ddc->ddc_pin,
3483 cmd);
3484 }
3485
dc_submit_i2c_oem(struct dc * dc,struct i2c_command * cmd)3486 bool dc_submit_i2c_oem(
3487 struct dc *dc,
3488 struct i2c_command *cmd)
3489 {
3490 struct ddc_service *ddc = dc->res_pool->oem_device;
3491 return dce_i2c_submit_command(
3492 dc->res_pool,
3493 ddc->ddc_pin,
3494 cmd);
3495 }
3496
link_add_remote_sink_helper(struct dc_link * dc_link,struct dc_sink * sink)3497 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3498 {
3499 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3500 BREAK_TO_DEBUGGER();
3501 return false;
3502 }
3503
3504 dc_sink_retain(sink);
3505
3506 dc_link->remote_sinks[dc_link->sink_count] = sink;
3507 dc_link->sink_count++;
3508
3509 return true;
3510 }
3511
3512 /*
3513 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3514 *
3515 * EDID length is in bytes
3516 */
dc_link_add_remote_sink(struct dc_link * link,const uint8_t * edid,int len,struct dc_sink_init_data * init_data)3517 struct dc_sink *dc_link_add_remote_sink(
3518 struct dc_link *link,
3519 const uint8_t *edid,
3520 int len,
3521 struct dc_sink_init_data *init_data)
3522 {
3523 struct dc_sink *dc_sink;
3524 enum dc_edid_status edid_status;
3525
3526 if (len > DC_MAX_EDID_BUFFER_SIZE) {
3527 dm_error("Max EDID buffer size breached!\n");
3528 return NULL;
3529 }
3530
3531 if (!init_data) {
3532 BREAK_TO_DEBUGGER();
3533 return NULL;
3534 }
3535
3536 if (!init_data->link) {
3537 BREAK_TO_DEBUGGER();
3538 return NULL;
3539 }
3540
3541 dc_sink = dc_sink_create(init_data);
3542
3543 if (!dc_sink)
3544 return NULL;
3545
3546 memmove(dc_sink->dc_edid.raw_edid, edid, len);
3547 dc_sink->dc_edid.length = len;
3548
3549 if (!link_add_remote_sink_helper(
3550 link,
3551 dc_sink))
3552 goto fail_add_sink;
3553
3554 edid_status = dm_helpers_parse_edid_caps(
3555 link->ctx,
3556 &dc_sink->dc_edid,
3557 &dc_sink->edid_caps);
3558
3559 /*
3560 * Treat device as no EDID device if EDID
3561 * parsing fails
3562 */
3563 if (edid_status != EDID_OK) {
3564 dc_sink->dc_edid.length = 0;
3565 dm_error("Bad EDID, status%d!\n", edid_status);
3566 }
3567
3568 return dc_sink;
3569
3570 fail_add_sink:
3571 dc_sink_release(dc_sink);
3572 return NULL;
3573 }
3574
3575 /*
3576 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
3577 *
3578 * Note that this just removes the struct dc_sink - it doesn't
3579 * program hardware or alter other members of dc_link
3580 */
dc_link_remove_remote_sink(struct dc_link * link,struct dc_sink * sink)3581 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
3582 {
3583 int i;
3584
3585 if (!link->sink_count) {
3586 BREAK_TO_DEBUGGER();
3587 return;
3588 }
3589
3590 for (i = 0; i < link->sink_count; i++) {
3591 if (link->remote_sinks[i] == sink) {
3592 dc_sink_release(sink);
3593 link->remote_sinks[i] = NULL;
3594
3595 /* shrink array to remove empty place */
3596 while (i < link->sink_count - 1) {
3597 link->remote_sinks[i] = link->remote_sinks[i+1];
3598 i++;
3599 }
3600 link->remote_sinks[i] = NULL;
3601 link->sink_count--;
3602 return;
3603 }
3604 }
3605 }
3606
get_clock_requirements_for_state(struct dc_state * state,struct AsicStateEx * info)3607 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
3608 {
3609 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
3610 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
3611 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
3612 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
3613 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
3614 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
3615 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
3616 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
3617 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
3618 }
dc_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3619 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
3620 {
3621 if (dc->hwss.set_clock)
3622 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
3623 return DC_ERROR_UNEXPECTED;
3624 }
dc_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3625 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
3626 {
3627 if (dc->hwss.get_clock)
3628 dc->hwss.get_clock(dc, clock_type, clock_cfg);
3629 }
3630
3631 /* enable/disable eDP PSR without specify stream for eDP */
dc_set_psr_allow_active(struct dc * dc,bool enable)3632 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
3633 {
3634 int i;
3635
3636 for (i = 0; i < dc->current_state->stream_count ; i++) {
3637 struct dc_link *link;
3638 struct dc_stream_state *stream = dc->current_state->streams[i];
3639
3640 link = stream->link;
3641 if (!link)
3642 continue;
3643
3644 if (link->psr_settings.psr_feature_enabled) {
3645 if (enable && !link->psr_settings.psr_allow_active) {
3646 if (!dc_link_set_psr_allow_active(link, true, false, false))
3647 return false;
3648 } else if (!enable && link->psr_settings.psr_allow_active) {
3649 if (!dc_link_set_psr_allow_active(link, false, true, false))
3650 return false;
3651 }
3652 }
3653 }
3654
3655 return true;
3656 }
3657
3658 #if defined(CONFIG_DRM_AMD_DC_DCN)
3659
dc_allow_idle_optimizations(struct dc * dc,bool allow)3660 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
3661 {
3662 if (dc->debug.disable_idle_power_optimizations)
3663 return;
3664
3665 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
3666 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
3667 return;
3668
3669 if (allow == dc->idle_optimizations_allowed)
3670 return;
3671
3672 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
3673 dc->idle_optimizations_allowed = allow;
3674 }
3675
3676 /*
3677 * blank all streams, and set min and max memory clock to
3678 * lowest and highest DPM level, respectively
3679 */
dc_unlock_memory_clock_frequency(struct dc * dc)3680 void dc_unlock_memory_clock_frequency(struct dc *dc)
3681 {
3682 unsigned int i;
3683
3684 for (i = 0; i < MAX_PIPES; i++)
3685 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3686 core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
3687
3688 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
3689 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3690 }
3691
3692 /*
3693 * set min memory clock to the min required for current mode,
3694 * max to maxDPM, and unblank streams
3695 */
dc_lock_memory_clock_frequency(struct dc * dc)3696 void dc_lock_memory_clock_frequency(struct dc *dc)
3697 {
3698 unsigned int i;
3699
3700 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
3701 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
3702 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3703
3704 for (i = 0; i < MAX_PIPES; i++)
3705 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3706 core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3707 }
3708
dc_is_plane_eligible_for_idle_optimizations(struct dc * dc,struct dc_plane_state * plane,struct dc_cursor_attributes * cursor_attr)3709 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
3710 struct dc_cursor_attributes *cursor_attr)
3711 {
3712 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
3713 return true;
3714 return false;
3715 }
3716
3717 /* cleanup on driver unload */
dc_hardware_release(struct dc * dc)3718 void dc_hardware_release(struct dc *dc)
3719 {
3720 if (dc->hwss.hardware_release)
3721 dc->hwss.hardware_release(dc);
3722 }
3723 #endif
3724
3725 /**
3726 * dc_enable_dmub_notifications - Returns whether dmub notification can be enabled
3727 * @dc: dc structure
3728 *
3729 * Returns: True to enable dmub notifications, False otherwise
3730 */
dc_enable_dmub_notifications(struct dc * dc)3731 bool dc_enable_dmub_notifications(struct dc *dc)
3732 {
3733 /* dmub aux needs dmub notifications to be enabled */
3734 return dc->debug.enable_dmub_aux_for_legacy_ddc;
3735 }
3736
3737 /**
3738 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
3739 * Sets port index appropriately for legacy DDC
3740 * @dc: dc structure
3741 * @link_index: link index
3742 * @payload: aux payload
3743 *
3744 * Returns: True if successful, False if failure
3745 */
dc_process_dmub_aux_transfer_async(struct dc * dc,uint32_t link_index,struct aux_payload * payload)3746 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
3747 uint32_t link_index,
3748 struct aux_payload *payload)
3749 {
3750 uint8_t action;
3751 union dmub_rb_cmd cmd = {0};
3752 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3753
3754 ASSERT(payload->length <= 16);
3755
3756 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
3757 cmd.dp_aux_access.header.payload_bytes = 0;
3758 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
3759 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
3760 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
3761 cmd.dp_aux_access.aux_control.timeout = 0;
3762 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
3763 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
3764 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
3765
3766 /* set aux action */
3767 if (payload->i2c_over_aux) {
3768 if (payload->write) {
3769 if (payload->mot)
3770 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
3771 else
3772 action = DP_AUX_REQ_ACTION_I2C_WRITE;
3773 } else {
3774 if (payload->mot)
3775 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
3776 else
3777 action = DP_AUX_REQ_ACTION_I2C_READ;
3778 }
3779 } else {
3780 if (payload->write)
3781 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
3782 else
3783 action = DP_AUX_REQ_ACTION_DPCD_READ;
3784 }
3785
3786 cmd.dp_aux_access.aux_control.dpaux.action = action;
3787
3788 if (payload->length && payload->write) {
3789 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
3790 payload->data,
3791 payload->length
3792 );
3793 }
3794
3795 dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
3796 dc_dmub_srv_cmd_execute(dmub_srv);
3797 dc_dmub_srv_wait_idle(dmub_srv);
3798
3799 return true;
3800 }
3801
3802 /**
3803 * dc_disable_accelerated_mode - disable accelerated mode
3804 * @dc: dc structure
3805 */
dc_disable_accelerated_mode(struct dc * dc)3806 void dc_disable_accelerated_mode(struct dc *dc)
3807 {
3808 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
3809 }
3810