1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
33
34 #include "resource.h"
35
36 #include "gpio_service_interface.h"
37 #include "clk_mgr.h"
38 #include "clock_source.h"
39 #include "dc_bios_types.h"
40
41 #include "bios_parser_interface.h"
42 #include "bios/bios_parser_helper.h"
43 #include "include/irq_service_interface.h"
44 #include "transform.h"
45 #include "dmcu.h"
46 #include "dpp.h"
47 #include "timing_generator.h"
48 #include "abm.h"
49 #include "virtual/virtual_link_encoder.h"
50 #include "hubp.h"
51
52 #include "link_hwss.h"
53 #include "link_encoder.h"
54 #include "link_enc_cfg.h"
55
56 #include "link.h"
57 #include "dm_helpers.h"
58 #include "mem_input.h"
59
60 #include "dc_dmub_srv.h"
61
62 #include "dsc.h"
63
64 #include "vm_helper.h"
65
66 #include "dce/dce_i2c.h"
67
68 #include "dmub/dmub_srv.h"
69
70 #include "dce/dmub_psr.h"
71
72 #include "dce/dmub_hw_lock_mgr.h"
73
74 #include "dc_trace.h"
75
76 #include "hw_sequencer_private.h"
77
78 #include "dce/dmub_outbox.h"
79
80 #define CTX \
81 dc->ctx
82
83 #define DC_LOGGER \
84 dc->ctx->logger
85
86 static const char DC_BUILD_ID[] = "production-build";
87
88 /**
89 * DOC: Overview
90 *
91 * DC is the OS-agnostic component of the amdgpu DC driver.
92 *
93 * DC maintains and validates a set of structs representing the state of the
94 * driver and writes that state to AMD hardware
95 *
96 * Main DC HW structs:
97 *
98 * struct dc - The central struct. One per driver. Created on driver load,
99 * destroyed on driver unload.
100 *
101 * struct dc_context - One per driver.
102 * Used as a backpointer by most other structs in dc.
103 *
104 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
105 * plugpoints). Created on driver load, destroyed on driver unload.
106 *
107 * struct dc_sink - One per display. Created on boot or hotplug.
108 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
109 * (the display directly attached). It may also have one or more remote
110 * sinks (in the Multi-Stream Transport case)
111 *
112 * struct resource_pool - One per driver. Represents the hw blocks not in the
113 * main pipeline. Not directly accessible by dm.
114 *
115 * Main dc state structs:
116 *
117 * These structs can be created and destroyed as needed. There is a full set of
118 * these structs in dc->current_state representing the currently programmed state.
119 *
120 * struct dc_state - The global DC state to track global state information,
121 * such as bandwidth values.
122 *
123 * struct dc_stream_state - Represents the hw configuration for the pipeline from
124 * a framebuffer to a display. Maps one-to-one with dc_sink.
125 *
126 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
127 * and may have more in the Multi-Plane Overlay case.
128 *
129 * struct resource_context - Represents the programmable state of everything in
130 * the resource_pool. Not directly accessible by dm.
131 *
132 * struct pipe_ctx - A member of struct resource_context. Represents the
133 * internal hardware pipeline components. Each dc_plane_state has either
134 * one or two (in the pipe-split case).
135 */
136
137 /* Private functions */
138
elevate_update_type(enum surface_update_type * original,enum surface_update_type new)139 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
140 {
141 if (new > *original)
142 *original = new;
143 }
144
destroy_links(struct dc * dc)145 static void destroy_links(struct dc *dc)
146 {
147 uint32_t i;
148
149 for (i = 0; i < dc->link_count; i++) {
150 if (NULL != dc->links[i])
151 dc->link_srv->destroy_link(&dc->links[i]);
152 }
153 }
154
get_num_of_internal_disp(struct dc_link ** links,uint32_t num_links)155 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
156 {
157 int i;
158 uint32_t count = 0;
159
160 for (i = 0; i < num_links; i++) {
161 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
162 links[i]->is_internal_display)
163 count++;
164 }
165
166 return count;
167 }
168
get_seamless_boot_stream_count(struct dc_state * ctx)169 static int get_seamless_boot_stream_count(struct dc_state *ctx)
170 {
171 uint8_t i;
172 uint8_t seamless_boot_stream_count = 0;
173
174 for (i = 0; i < ctx->stream_count; i++)
175 if (ctx->streams[i]->apply_seamless_boot_optimization)
176 seamless_boot_stream_count++;
177
178 return seamless_boot_stream_count;
179 }
180
create_links(struct dc * dc,uint32_t num_virtual_links)181 static bool create_links(
182 struct dc *dc,
183 uint32_t num_virtual_links)
184 {
185 int i;
186 int connectors_num;
187 struct dc_bios *bios = dc->ctx->dc_bios;
188
189 dc->link_count = 0;
190
191 connectors_num = bios->funcs->get_connectors_number(bios);
192
193 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
194
195 if (connectors_num > ENUM_ID_COUNT) {
196 dm_error(
197 "DC: Number of connectors %d exceeds maximum of %d!\n",
198 connectors_num,
199 ENUM_ID_COUNT);
200 return false;
201 }
202
203 dm_output_to_console(
204 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
205 __func__,
206 connectors_num,
207 num_virtual_links);
208
209 for (i = 0; i < connectors_num; i++) {
210 struct link_init_data link_init_params = {0};
211 struct dc_link *link;
212
213 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
214
215 link_init_params.ctx = dc->ctx;
216 /* next BIOS object table connector */
217 link_init_params.connector_index = i;
218 link_init_params.link_index = dc->link_count;
219 link_init_params.dc = dc;
220 link = dc->link_srv->create_link(&link_init_params);
221
222 if (link) {
223 dc->links[dc->link_count] = link;
224 link->dc = dc;
225 ++dc->link_count;
226 }
227 }
228
229 DC_LOG_DC("BIOS object table - end");
230
231 /* Create a link for each usb4 dpia port */
232 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
233 struct link_init_data link_init_params = {0};
234 struct dc_link *link;
235
236 link_init_params.ctx = dc->ctx;
237 link_init_params.connector_index = i;
238 link_init_params.link_index = dc->link_count;
239 link_init_params.dc = dc;
240 link_init_params.is_dpia_link = true;
241
242 link = dc->link_srv->create_link(&link_init_params);
243 if (link) {
244 dc->links[dc->link_count] = link;
245 link->dc = dc;
246 ++dc->link_count;
247 }
248 }
249
250 for (i = 0; i < num_virtual_links; i++) {
251 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
252 struct encoder_init_data enc_init = {0};
253
254 if (link == NULL) {
255 BREAK_TO_DEBUGGER();
256 goto failed_alloc;
257 }
258
259 link->link_index = dc->link_count;
260 dc->links[dc->link_count] = link;
261 dc->link_count++;
262
263 link->ctx = dc->ctx;
264 link->dc = dc;
265 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
266 link->link_id.type = OBJECT_TYPE_CONNECTOR;
267 link->link_id.id = CONNECTOR_ID_VIRTUAL;
268 link->link_id.enum_id = ENUM_ID_1;
269 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
270
271 if (!link->link_enc) {
272 BREAK_TO_DEBUGGER();
273 goto failed_alloc;
274 }
275
276 link->link_status.dpcd_caps = &link->dpcd_caps;
277
278 enc_init.ctx = dc->ctx;
279 enc_init.channel = CHANNEL_ID_UNKNOWN;
280 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
281 enc_init.transmitter = TRANSMITTER_UNKNOWN;
282 enc_init.connector = link->link_id;
283 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
284 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
285 enc_init.encoder.enum_id = ENUM_ID_1;
286 virtual_link_encoder_construct(link->link_enc, &enc_init);
287 }
288
289 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
290
291 return true;
292
293 failed_alloc:
294 return false;
295 }
296
297 /* Create additional DIG link encoder objects if fewer than the platform
298 * supports were created during link construction. This can happen if the
299 * number of physical connectors is less than the number of DIGs.
300 */
create_link_encoders(struct dc * dc)301 static bool create_link_encoders(struct dc *dc)
302 {
303 bool res = true;
304 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
305 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
306 int i;
307
308 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
309 * link encoders and physical display endpoints and does not require
310 * additional link encoder objects.
311 */
312 if (num_usb4_dpia == 0)
313 return res;
314
315 /* Create as many link encoder objects as the platform supports. DPIA
316 * endpoints can be programmably mapped to any DIG.
317 */
318 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
319 for (i = 0; i < num_dig_link_enc; i++) {
320 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
321
322 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
323 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
324 (enum engine_id)(ENGINE_ID_DIGA + i));
325 if (link_enc) {
326 dc->res_pool->link_encoders[i] = link_enc;
327 dc->res_pool->dig_link_enc_count++;
328 } else {
329 res = false;
330 }
331 }
332 }
333 }
334
335 return res;
336 }
337
338 /* Destroy any additional DIG link encoder objects created by
339 * create_link_encoders().
340 * NB: Must only be called after destroy_links().
341 */
destroy_link_encoders(struct dc * dc)342 static void destroy_link_encoders(struct dc *dc)
343 {
344 unsigned int num_usb4_dpia;
345 unsigned int num_dig_link_enc;
346 int i;
347
348 if (!dc->res_pool)
349 return;
350
351 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
352 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
353
354 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
355 * link encoders and physical display endpoints and does not require
356 * additional link encoder objects.
357 */
358 if (num_usb4_dpia == 0)
359 return;
360
361 for (i = 0; i < num_dig_link_enc; i++) {
362 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
363
364 if (link_enc) {
365 link_enc->funcs->destroy(&link_enc);
366 dc->res_pool->link_encoders[i] = NULL;
367 dc->res_pool->dig_link_enc_count--;
368 }
369 }
370 }
371
dc_perf_trace_create(void)372 static struct dc_perf_trace *dc_perf_trace_create(void)
373 {
374 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
375 }
376
dc_perf_trace_destroy(struct dc_perf_trace ** perf_trace)377 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
378 {
379 kfree(*perf_trace);
380 *perf_trace = NULL;
381 }
382
383 /**
384 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
385 * @dc: dc reference
386 * @stream: Initial dc stream state
387 * @adjust: Updated parameters for vertical_total_min and vertical_total_max
388 *
389 * Looks up the pipe context of dc_stream_state and updates the
390 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
391 * Rate, which is a power-saving feature that targets reducing panel
392 * refresh rate while the screen is static
393 *
394 * Return: %true if the pipe context is found and adjusted;
395 * %false if the pipe context is not found.
396 */
dc_stream_adjust_vmin_vmax(struct dc * dc,struct dc_stream_state * stream,struct dc_crtc_timing_adjust * adjust)397 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
398 struct dc_stream_state *stream,
399 struct dc_crtc_timing_adjust *adjust)
400 {
401 int i;
402
403 /*
404 * Don't adjust DRR while there's bandwidth optimizations pending to
405 * avoid conflicting with firmware updates.
406 */
407 if (dc->ctx->dce_version > DCE_VERSION_MAX)
408 if (dc->optimized_required || dc->wm_optimized_required)
409 return false;
410
411 stream->adjust.v_total_max = adjust->v_total_max;
412 stream->adjust.v_total_mid = adjust->v_total_mid;
413 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
414 stream->adjust.v_total_min = adjust->v_total_min;
415
416 for (i = 0; i < MAX_PIPES; i++) {
417 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
418
419 if (pipe->stream == stream && pipe->stream_res.tg) {
420 dc->hwss.set_drr(&pipe,
421 1,
422 *adjust);
423
424 return true;
425 }
426 }
427 return false;
428 }
429
430 /**
431 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
432 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
433 *
434 * @dc: [in] dc reference
435 * @stream: [in] Initial dc stream state
436 * @refresh_rate: [in] new refresh_rate
437 *
438 * Return: %true if the pipe context is found and there is an associated
439 * timing_generator for the DC;
440 * %false if the pipe context is not found or there is no
441 * timing_generator for the DC.
442 */
dc_stream_get_last_used_drr_vtotal(struct dc * dc,struct dc_stream_state * stream,uint32_t * refresh_rate)443 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
444 struct dc_stream_state *stream,
445 uint32_t *refresh_rate)
446 {
447 bool status = false;
448
449 int i = 0;
450
451 for (i = 0; i < MAX_PIPES; i++) {
452 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
453
454 if (pipe->stream == stream && pipe->stream_res.tg) {
455 /* Only execute if a function pointer has been defined for
456 * the DC version in question
457 */
458 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
459 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
460
461 status = true;
462
463 break;
464 }
465 }
466 }
467
468 return status;
469 }
470
dc_stream_get_crtc_position(struct dc * dc,struct dc_stream_state ** streams,int num_streams,unsigned int * v_pos,unsigned int * nom_v_pos)471 bool dc_stream_get_crtc_position(struct dc *dc,
472 struct dc_stream_state **streams, int num_streams,
473 unsigned int *v_pos, unsigned int *nom_v_pos)
474 {
475 /* TODO: Support multiple streams */
476 const struct dc_stream_state *stream = streams[0];
477 int i;
478 bool ret = false;
479 struct crtc_position position;
480
481 for (i = 0; i < MAX_PIPES; i++) {
482 struct pipe_ctx *pipe =
483 &dc->current_state->res_ctx.pipe_ctx[i];
484
485 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
486 dc->hwss.get_position(&pipe, 1, &position);
487
488 *v_pos = position.vertical_count;
489 *nom_v_pos = position.nominal_vcount;
490 ret = true;
491 }
492 }
493 return ret;
494 }
495
496 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
497 static inline void
dc_stream_forward_dmub_crc_window(struct dc_dmub_srv * dmub_srv,struct rect * rect,struct otg_phy_mux * mux_mapping,bool is_stop)498 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
499 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
500 {
501 union dmub_rb_cmd cmd = {0};
502
503 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
504 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
505
506 if (is_stop) {
507 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
508 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
509 } else {
510 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
511 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
512 cmd.secure_display.roi_info.x_start = rect->x;
513 cmd.secure_display.roi_info.y_start = rect->y;
514 cmd.secure_display.roi_info.x_end = rect->x + rect->width;
515 cmd.secure_display.roi_info.y_end = rect->y + rect->height;
516 }
517
518 dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
519 }
520
521 static inline void
dc_stream_forward_dmcu_crc_window(struct dmcu * dmcu,struct rect * rect,struct otg_phy_mux * mux_mapping,bool is_stop)522 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
523 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
524 {
525 if (is_stop)
526 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
527 else
528 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
529 }
530
531 bool
dc_stream_forward_crc_window(struct dc_stream_state * stream,struct rect * rect,bool is_stop)532 dc_stream_forward_crc_window(struct dc_stream_state *stream,
533 struct rect *rect, bool is_stop)
534 {
535 struct dmcu *dmcu;
536 struct dc_dmub_srv *dmub_srv;
537 struct otg_phy_mux mux_mapping;
538 struct pipe_ctx *pipe;
539 int i;
540 struct dc *dc = stream->ctx->dc;
541
542 for (i = 0; i < MAX_PIPES; i++) {
543 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
544 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
545 break;
546 }
547
548 /* Stream not found */
549 if (i == MAX_PIPES)
550 return false;
551
552 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
553 mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
554
555 dmcu = dc->res_pool->dmcu;
556 dmub_srv = dc->ctx->dmub_srv;
557
558 /* forward to dmub */
559 if (dmub_srv)
560 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
561 /* forward to dmcu */
562 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
563 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
564 else
565 return false;
566
567 return true;
568 }
569 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
570
571 /**
572 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
573 * @dc: DC Object
574 * @stream: The stream to configure CRC on.
575 * @enable: Enable CRC if true, disable otherwise.
576 * @crc_window: CRC window (x/y start/end) information
577 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
578 * once.
579 *
580 * By default, only CRC0 is configured, and the entire frame is used to
581 * calculate the CRC.
582 *
583 * Return: %false if the stream is not found or CRC capture is not supported;
584 * %true if the stream has been configured.
585 */
dc_stream_configure_crc(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window,bool enable,bool continuous)586 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
587 struct crc_params *crc_window, bool enable, bool continuous)
588 {
589 struct pipe_ctx *pipe;
590 struct crc_params param;
591 struct timing_generator *tg;
592
593 pipe = resource_get_otg_master_for_stream(
594 &dc->current_state->res_ctx, stream);
595
596 /* Stream not found */
597 if (pipe == NULL)
598 return false;
599
600 /* By default, capture the full frame */
601 param.windowa_x_start = 0;
602 param.windowa_y_start = 0;
603 param.windowa_x_end = pipe->stream->timing.h_addressable;
604 param.windowa_y_end = pipe->stream->timing.v_addressable;
605 param.windowb_x_start = 0;
606 param.windowb_y_start = 0;
607 param.windowb_x_end = pipe->stream->timing.h_addressable;
608 param.windowb_y_end = pipe->stream->timing.v_addressable;
609
610 if (crc_window) {
611 param.windowa_x_start = crc_window->windowa_x_start;
612 param.windowa_y_start = crc_window->windowa_y_start;
613 param.windowa_x_end = crc_window->windowa_x_end;
614 param.windowa_y_end = crc_window->windowa_y_end;
615 param.windowb_x_start = crc_window->windowb_x_start;
616 param.windowb_y_start = crc_window->windowb_y_start;
617 param.windowb_x_end = crc_window->windowb_x_end;
618 param.windowb_y_end = crc_window->windowb_y_end;
619 }
620
621 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
622 param.odm_mode = pipe->next_odm_pipe ? 1:0;
623
624 /* Default to the union of both windows */
625 param.selection = UNION_WINDOW_A_B;
626 param.continuous_mode = continuous;
627 param.enable = enable;
628
629 tg = pipe->stream_res.tg;
630
631 /* Only call if supported */
632 if (tg->funcs->configure_crc)
633 return tg->funcs->configure_crc(tg, ¶m);
634 DC_LOG_WARNING("CRC capture not supported.");
635 return false;
636 }
637
638 /**
639 * dc_stream_get_crc() - Get CRC values for the given stream.
640 *
641 * @dc: DC object.
642 * @stream: The DC stream state of the stream to get CRCs from.
643 * @r_cr: CRC value for the red component.
644 * @g_y: CRC value for the green component.
645 * @b_cb: CRC value for the blue component.
646 *
647 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
648 *
649 * Return:
650 * %false if stream is not found, or if CRCs are not enabled.
651 */
dc_stream_get_crc(struct dc * dc,struct dc_stream_state * stream,uint32_t * r_cr,uint32_t * g_y,uint32_t * b_cb)652 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
653 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
654 {
655 int i;
656 struct pipe_ctx *pipe;
657 struct timing_generator *tg;
658
659 for (i = 0; i < MAX_PIPES; i++) {
660 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
661 if (pipe->stream == stream)
662 break;
663 }
664 /* Stream not found */
665 if (i == MAX_PIPES)
666 return false;
667
668 tg = pipe->stream_res.tg;
669
670 if (tg->funcs->get_crc)
671 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
672 DC_LOG_WARNING("CRC capture not supported.");
673 return false;
674 }
675
dc_stream_set_dyn_expansion(struct dc * dc,struct dc_stream_state * stream,enum dc_dynamic_expansion option)676 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
677 enum dc_dynamic_expansion option)
678 {
679 /* OPP FMT dyn expansion updates*/
680 int i;
681 struct pipe_ctx *pipe_ctx;
682
683 for (i = 0; i < MAX_PIPES; i++) {
684 if (dc->current_state->res_ctx.pipe_ctx[i].stream
685 == stream) {
686 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
687 pipe_ctx->stream_res.opp->dyn_expansion = option;
688 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
689 pipe_ctx->stream_res.opp,
690 COLOR_SPACE_YCBCR601,
691 stream->timing.display_color_depth,
692 stream->signal);
693 }
694 }
695 }
696
dc_stream_set_dither_option(struct dc_stream_state * stream,enum dc_dither_option option)697 void dc_stream_set_dither_option(struct dc_stream_state *stream,
698 enum dc_dither_option option)
699 {
700 struct bit_depth_reduction_params params;
701 struct dc_link *link = stream->link;
702 struct pipe_ctx *pipes = NULL;
703 int i;
704
705 for (i = 0; i < MAX_PIPES; i++) {
706 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
707 stream) {
708 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
709 break;
710 }
711 }
712
713 if (!pipes)
714 return;
715 if (option > DITHER_OPTION_MAX)
716 return;
717
718 stream->dither_option = option;
719
720 memset(¶ms, 0, sizeof(params));
721 resource_build_bit_depth_reduction_params(stream, ¶ms);
722 stream->bit_depth_params = params;
723
724 if (pipes->plane_res.xfm &&
725 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
726 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
727 pipes->plane_res.xfm,
728 pipes->plane_res.scl_data.lb_params.depth,
729 &stream->bit_depth_params);
730 }
731
732 pipes->stream_res.opp->funcs->
733 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
734 }
735
dc_stream_set_gamut_remap(struct dc * dc,const struct dc_stream_state * stream)736 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
737 {
738 int i;
739 bool ret = false;
740 struct pipe_ctx *pipes;
741
742 for (i = 0; i < MAX_PIPES; i++) {
743 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
744 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
745 dc->hwss.program_gamut_remap(pipes);
746 ret = true;
747 }
748 }
749
750 return ret;
751 }
752
dc_stream_program_csc_matrix(struct dc * dc,struct dc_stream_state * stream)753 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
754 {
755 int i;
756 bool ret = false;
757 struct pipe_ctx *pipes;
758
759 for (i = 0; i < MAX_PIPES; i++) {
760 if (dc->current_state->res_ctx.pipe_ctx[i].stream
761 == stream) {
762
763 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
764 dc->hwss.program_output_csc(dc,
765 pipes,
766 stream->output_color_space,
767 stream->csc_color_matrix.matrix,
768 pipes->stream_res.opp->inst);
769 ret = true;
770 }
771 }
772
773 return ret;
774 }
775
dc_stream_set_static_screen_params(struct dc * dc,struct dc_stream_state ** streams,int num_streams,const struct dc_static_screen_params * params)776 void dc_stream_set_static_screen_params(struct dc *dc,
777 struct dc_stream_state **streams,
778 int num_streams,
779 const struct dc_static_screen_params *params)
780 {
781 int i, j;
782 struct pipe_ctx *pipes_affected[MAX_PIPES];
783 int num_pipes_affected = 0;
784
785 for (i = 0; i < num_streams; i++) {
786 struct dc_stream_state *stream = streams[i];
787
788 for (j = 0; j < MAX_PIPES; j++) {
789 if (dc->current_state->res_ctx.pipe_ctx[j].stream
790 == stream) {
791 pipes_affected[num_pipes_affected++] =
792 &dc->current_state->res_ctx.pipe_ctx[j];
793 }
794 }
795 }
796
797 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
798 }
799
dc_destruct(struct dc * dc)800 static void dc_destruct(struct dc *dc)
801 {
802 // reset link encoder assignment table on destruct
803 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
804 link_enc_cfg_init(dc, dc->current_state);
805
806 if (dc->current_state) {
807 dc_release_state(dc->current_state);
808 dc->current_state = NULL;
809 }
810
811 destroy_links(dc);
812
813 destroy_link_encoders(dc);
814
815 if (dc->clk_mgr) {
816 dc_destroy_clk_mgr(dc->clk_mgr);
817 dc->clk_mgr = NULL;
818 }
819
820 dc_destroy_resource_pool(dc);
821
822 if (dc->link_srv)
823 link_destroy_link_service(&dc->link_srv);
824
825 if (dc->ctx->gpio_service)
826 dal_gpio_service_destroy(&dc->ctx->gpio_service);
827
828 if (dc->ctx->created_bios)
829 dal_bios_parser_destroy(&dc->ctx->dc_bios);
830
831 dc_perf_trace_destroy(&dc->ctx->perf_trace);
832
833 kfree(dc->ctx);
834 dc->ctx = NULL;
835
836 kfree(dc->bw_vbios);
837 dc->bw_vbios = NULL;
838
839 kfree(dc->bw_dceip);
840 dc->bw_dceip = NULL;
841
842 kfree(dc->dcn_soc);
843 dc->dcn_soc = NULL;
844
845 kfree(dc->dcn_ip);
846 dc->dcn_ip = NULL;
847
848 kfree(dc->vm_helper);
849 dc->vm_helper = NULL;
850
851 }
852
dc_construct_ctx(struct dc * dc,const struct dc_init_data * init_params)853 static bool dc_construct_ctx(struct dc *dc,
854 const struct dc_init_data *init_params)
855 {
856 struct dc_context *dc_ctx;
857
858 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
859 if (!dc_ctx)
860 return false;
861
862 dc_ctx->cgs_device = init_params->cgs_device;
863 dc_ctx->driver_context = init_params->driver;
864 dc_ctx->dc = dc;
865 dc_ctx->asic_id = init_params->asic_id;
866 dc_ctx->dc_sink_id_count = 0;
867 dc_ctx->dc_stream_id_count = 0;
868 dc_ctx->dce_environment = init_params->dce_environment;
869 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
870 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
871
872 /* Create logger */
873
874 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
875
876 dc_ctx->perf_trace = dc_perf_trace_create();
877 if (!dc_ctx->perf_trace) {
878 kfree(dc_ctx);
879 ASSERT_CRITICAL(false);
880 return false;
881 }
882
883 dc->ctx = dc_ctx;
884
885 dc->link_srv = link_create_link_service();
886 if (!dc->link_srv)
887 return false;
888
889 return true;
890 }
891
dc_construct(struct dc * dc,const struct dc_init_data * init_params)892 static bool dc_construct(struct dc *dc,
893 const struct dc_init_data *init_params)
894 {
895 struct dc_context *dc_ctx;
896 struct bw_calcs_dceip *dc_dceip;
897 struct bw_calcs_vbios *dc_vbios;
898 struct dcn_soc_bounding_box *dcn_soc;
899 struct dcn_ip_params *dcn_ip;
900
901 dc->config = init_params->flags;
902
903 // Allocate memory for the vm_helper
904 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
905 if (!dc->vm_helper) {
906 dm_error("%s: failed to create dc->vm_helper\n", __func__);
907 goto fail;
908 }
909
910 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
911
912 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
913 if (!dc_dceip) {
914 dm_error("%s: failed to create dceip\n", __func__);
915 goto fail;
916 }
917
918 dc->bw_dceip = dc_dceip;
919
920 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
921 if (!dc_vbios) {
922 dm_error("%s: failed to create vbios\n", __func__);
923 goto fail;
924 }
925
926 dc->bw_vbios = dc_vbios;
927 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
928 if (!dcn_soc) {
929 dm_error("%s: failed to create dcn_soc\n", __func__);
930 goto fail;
931 }
932
933 dc->dcn_soc = dcn_soc;
934
935 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
936 if (!dcn_ip) {
937 dm_error("%s: failed to create dcn_ip\n", __func__);
938 goto fail;
939 }
940
941 dc->dcn_ip = dcn_ip;
942
943 if (!dc_construct_ctx(dc, init_params)) {
944 dm_error("%s: failed to create ctx\n", __func__);
945 goto fail;
946 }
947
948 dc_ctx = dc->ctx;
949
950 /* Resource should construct all asic specific resources.
951 * This should be the only place where we need to parse the asic id
952 */
953 if (init_params->vbios_override)
954 dc_ctx->dc_bios = init_params->vbios_override;
955 else {
956 /* Create BIOS parser */
957 struct bp_init_data bp_init_data;
958
959 bp_init_data.ctx = dc_ctx;
960 bp_init_data.bios = init_params->asic_id.atombios_base_address;
961
962 dc_ctx->dc_bios = dal_bios_parser_create(
963 &bp_init_data, dc_ctx->dce_version);
964
965 if (!dc_ctx->dc_bios) {
966 ASSERT_CRITICAL(false);
967 goto fail;
968 }
969
970 dc_ctx->created_bios = true;
971 }
972
973 dc->vendor_signature = init_params->vendor_signature;
974
975 /* Create GPIO service */
976 dc_ctx->gpio_service = dal_gpio_service_create(
977 dc_ctx->dce_version,
978 dc_ctx->dce_environment,
979 dc_ctx);
980
981 if (!dc_ctx->gpio_service) {
982 ASSERT_CRITICAL(false);
983 goto fail;
984 }
985
986 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
987 if (!dc->res_pool)
988 goto fail;
989
990 /* set i2c speed if not done by the respective dcnxxx__resource.c */
991 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
992 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
993 if (dc->caps.max_optimizable_video_width == 0)
994 dc->caps.max_optimizable_video_width = 5120;
995 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
996 if (!dc->clk_mgr)
997 goto fail;
998 #ifdef CONFIG_DRM_AMD_DC_FP
999 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
1000
1001 if (dc->res_pool->funcs->update_bw_bounding_box) {
1002 DC_FP_START();
1003 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1004 DC_FP_END();
1005 }
1006 #endif
1007
1008 /* Creation of current_state must occur after dc->dml
1009 * is initialized in dc_create_resource_pool because
1010 * on creation it copies the contents of dc->dml
1011 */
1012
1013 dc->current_state = dc_create_state(dc);
1014
1015 if (!dc->current_state) {
1016 dm_error("%s: failed to create validate ctx\n", __func__);
1017 goto fail;
1018 }
1019
1020 if (!create_links(dc, init_params->num_virtual_links))
1021 goto fail;
1022
1023 /* Create additional DIG link encoder objects if fewer than the platform
1024 * supports were created during link construction.
1025 */
1026 if (!create_link_encoders(dc))
1027 goto fail;
1028
1029 dc_resource_state_construct(dc, dc->current_state);
1030
1031 return true;
1032
1033 fail:
1034 return false;
1035 }
1036
disable_all_writeback_pipes_for_stream(const struct dc * dc,struct dc_stream_state * stream,struct dc_state * context)1037 static void disable_all_writeback_pipes_for_stream(
1038 const struct dc *dc,
1039 struct dc_stream_state *stream,
1040 struct dc_state *context)
1041 {
1042 int i;
1043
1044 for (i = 0; i < stream->num_wb_info; i++)
1045 stream->writeback_info[i].wb_enabled = false;
1046 }
1047
apply_ctx_interdependent_lock(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,bool lock)1048 static void apply_ctx_interdependent_lock(struct dc *dc,
1049 struct dc_state *context,
1050 struct dc_stream_state *stream,
1051 bool lock)
1052 {
1053 int i;
1054
1055 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1056 if (dc->hwss.interdependent_update_lock)
1057 dc->hwss.interdependent_update_lock(dc, context, lock);
1058 else {
1059 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1060 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1061 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1062
1063 // Copied conditions that were previously in dce110_apply_ctx_for_surface
1064 if (stream == pipe_ctx->stream) {
1065 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
1066 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1067 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1068 }
1069 }
1070 }
1071 }
1072
dc_update_viusal_confirm_color(struct dc * dc,struct dc_state * context,struct pipe_ctx * pipe_ctx)1073 static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
1074 {
1075 if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
1076 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
1077
1078 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
1079 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1080 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1081 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1082 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
1083 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1084 else {
1085 if (dc->ctx->dce_version < DCN_VERSION_2_0)
1086 color_space_to_black_color(
1087 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
1088 }
1089 if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
1090 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
1091 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1092 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
1093 get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
1094 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
1095 get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
1096 }
1097 }
1098 }
1099
disable_dangling_plane(struct dc * dc,struct dc_state * context)1100 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1101 {
1102 int i, j;
1103 struct dc_state *dangling_context = dc_create_state(dc);
1104 struct dc_state *current_ctx;
1105 struct pipe_ctx *pipe;
1106 struct timing_generator *tg;
1107
1108 if (dangling_context == NULL)
1109 return;
1110
1111 dc_resource_state_copy_construct(dc->current_state, dangling_context);
1112
1113 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1114 struct dc_stream_state *old_stream =
1115 dc->current_state->res_ctx.pipe_ctx[i].stream;
1116 bool should_disable = true;
1117 bool pipe_split_change = false;
1118
1119 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1120 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1121 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1122 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1123 else
1124 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1125 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1126
1127 for (j = 0; j < context->stream_count; j++) {
1128 if (old_stream == context->streams[j]) {
1129 should_disable = false;
1130 break;
1131 }
1132 }
1133 if (!should_disable && pipe_split_change &&
1134 dc->current_state->stream_count != context->stream_count)
1135 should_disable = true;
1136
1137 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1138 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1139 struct pipe_ctx *old_pipe, *new_pipe;
1140
1141 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1142 new_pipe = &context->res_ctx.pipe_ctx[i];
1143
1144 if (old_pipe->plane_state && !new_pipe->plane_state)
1145 should_disable = true;
1146 }
1147
1148 if (should_disable && old_stream) {
1149 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1150 tg = pipe->stream_res.tg;
1151 /* When disabling plane for a phantom pipe, we must turn on the
1152 * phantom OTG so the disable programming gets the double buffer
1153 * update. Otherwise the pipe will be left in a partially disabled
1154 * state that can result in underflow or hang when enabling it
1155 * again for different use.
1156 */
1157 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
1158 if (tg->funcs->enable_crtc) {
1159 int main_pipe_width, main_pipe_height;
1160
1161 main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
1162 main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
1163 if (dc->hwss.blank_phantom)
1164 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
1165 tg->funcs->enable_crtc(tg);
1166 }
1167 }
1168 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1169 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1170
1171 if (pipe->stream && pipe->plane_state)
1172 dc_update_viusal_confirm_color(dc, context, pipe);
1173
1174 if (dc->hwss.apply_ctx_for_surface) {
1175 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1176 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1177 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1178 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1179 }
1180 if (dc->hwss.program_front_end_for_ctx) {
1181 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1182 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1183 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1184 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1185 }
1186 /* We need to put the phantom OTG back into it's default (disabled) state or we
1187 * can get corruption when transition from one SubVP config to a different one.
1188 * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1189 * will still get it's double buffer update.
1190 */
1191 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
1192 if (tg->funcs->disable_phantom_crtc)
1193 tg->funcs->disable_phantom_crtc(tg);
1194 }
1195 }
1196 }
1197
1198 current_ctx = dc->current_state;
1199 dc->current_state = dangling_context;
1200 dc_release_state(current_ctx);
1201 }
1202
disable_vbios_mode_if_required(struct dc * dc,struct dc_state * context)1203 static void disable_vbios_mode_if_required(
1204 struct dc *dc,
1205 struct dc_state *context)
1206 {
1207 unsigned int i, j;
1208
1209 /* check if timing_changed, disable stream*/
1210 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1211 struct dc_stream_state *stream = NULL;
1212 struct dc_link *link = NULL;
1213 struct pipe_ctx *pipe = NULL;
1214
1215 pipe = &context->res_ctx.pipe_ctx[i];
1216 stream = pipe->stream;
1217 if (stream == NULL)
1218 continue;
1219
1220 if (stream->apply_seamless_boot_optimization)
1221 continue;
1222
1223 // only looking for first odm pipe
1224 if (pipe->prev_odm_pipe)
1225 continue;
1226
1227 if (stream->link->local_sink &&
1228 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1229 link = stream->link;
1230 }
1231
1232 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1233 unsigned int enc_inst, tg_inst = 0;
1234 unsigned int pix_clk_100hz;
1235
1236 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1237 if (enc_inst != ENGINE_ID_UNKNOWN) {
1238 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1239 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1240 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1241 dc->res_pool->stream_enc[j]);
1242 break;
1243 }
1244 }
1245
1246 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1247 dc->res_pool->dp_clock_source,
1248 tg_inst, &pix_clk_100hz);
1249
1250 if (link->link_status.link_active) {
1251 uint32_t requested_pix_clk_100hz =
1252 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1253
1254 if (pix_clk_100hz != requested_pix_clk_100hz) {
1255 dc->link_srv->set_dpms_off(pipe);
1256 pipe->stream->dpms_off = false;
1257 }
1258 }
1259 }
1260 }
1261 }
1262 }
1263
wait_for_no_pipes_pending(struct dc * dc,struct dc_state * context)1264 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1265 {
1266 int i;
1267 PERF_TRACE();
1268 for (i = 0; i < MAX_PIPES; i++) {
1269 int count = 0;
1270 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1271
1272 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
1273 continue;
1274
1275 /* Timeout 100 ms */
1276 while (count < 100000) {
1277 /* Must set to false to start with, due to OR in update function */
1278 pipe->plane_state->status.is_flip_pending = false;
1279 dc->hwss.update_pending_status(pipe);
1280 if (!pipe->plane_state->status.is_flip_pending)
1281 break;
1282 udelay(1);
1283 count++;
1284 }
1285 ASSERT(!pipe->plane_state->status.is_flip_pending);
1286 }
1287 PERF_TRACE();
1288 }
1289
1290 /* Public functions */
1291
dc_create(const struct dc_init_data * init_params)1292 struct dc *dc_create(const struct dc_init_data *init_params)
1293 {
1294 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1295 unsigned int full_pipe_count;
1296
1297 if (!dc)
1298 return NULL;
1299
1300 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1301 if (!dc_construct_ctx(dc, init_params))
1302 goto destruct_dc;
1303 } else {
1304 if (!dc_construct(dc, init_params))
1305 goto destruct_dc;
1306
1307 full_pipe_count = dc->res_pool->pipe_count;
1308 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1309 full_pipe_count--;
1310 dc->caps.max_streams = min(
1311 full_pipe_count,
1312 dc->res_pool->stream_enc_count);
1313
1314 dc->caps.max_links = dc->link_count;
1315 dc->caps.max_audios = dc->res_pool->audio_count;
1316 dc->caps.linear_pitch_alignment = 64;
1317
1318 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1319
1320 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1321
1322 if (dc->res_pool->dmcu != NULL)
1323 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1324 }
1325
1326 dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1327 dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1328
1329 /* Populate versioning information */
1330 dc->versions.dc_ver = DC_VER;
1331
1332 dc->build_id = DC_BUILD_ID;
1333
1334 DC_LOG_DC("Display Core initialized\n");
1335
1336
1337
1338 return dc;
1339
1340 destruct_dc:
1341 dc_destruct(dc);
1342 kfree(dc);
1343 return NULL;
1344 }
1345
detect_edp_presence(struct dc * dc)1346 static void detect_edp_presence(struct dc *dc)
1347 {
1348 struct dc_link *edp_links[MAX_NUM_EDP];
1349 struct dc_link *edp_link = NULL;
1350 enum dc_connection_type type;
1351 int i;
1352 int edp_num;
1353
1354 dc_get_edp_links(dc, edp_links, &edp_num);
1355 if (!edp_num)
1356 return;
1357
1358 for (i = 0; i < edp_num; i++) {
1359 edp_link = edp_links[i];
1360 if (dc->config.edp_not_connected) {
1361 edp_link->edp_sink_present = false;
1362 } else {
1363 dc_link_detect_connection_type(edp_link, &type);
1364 edp_link->edp_sink_present = (type != dc_connection_none);
1365 }
1366 }
1367 }
1368
dc_hardware_init(struct dc * dc)1369 void dc_hardware_init(struct dc *dc)
1370 {
1371
1372 detect_edp_presence(dc);
1373 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1374 dc->hwss.init_hw(dc);
1375 }
1376
dc_init_callbacks(struct dc * dc,const struct dc_callback_init * init_params)1377 void dc_init_callbacks(struct dc *dc,
1378 const struct dc_callback_init *init_params)
1379 {
1380 dc->ctx->cp_psp = init_params->cp_psp;
1381 }
1382
dc_deinit_callbacks(struct dc * dc)1383 void dc_deinit_callbacks(struct dc *dc)
1384 {
1385 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1386 }
1387
dc_destroy(struct dc ** dc)1388 void dc_destroy(struct dc **dc)
1389 {
1390 dc_destruct(*dc);
1391 kfree(*dc);
1392 *dc = NULL;
1393 }
1394
enable_timing_multisync(struct dc * dc,struct dc_state * ctx)1395 static void enable_timing_multisync(
1396 struct dc *dc,
1397 struct dc_state *ctx)
1398 {
1399 int i, multisync_count = 0;
1400 int pipe_count = dc->res_pool->pipe_count;
1401 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1402
1403 for (i = 0; i < pipe_count; i++) {
1404 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1405 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1406 continue;
1407 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1408 continue;
1409 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1410 multisync_count++;
1411 }
1412
1413 if (multisync_count > 0) {
1414 dc->hwss.enable_per_frame_crtc_position_reset(
1415 dc, multisync_count, multisync_pipes);
1416 }
1417 }
1418
program_timing_sync(struct dc * dc,struct dc_state * ctx)1419 static void program_timing_sync(
1420 struct dc *dc,
1421 struct dc_state *ctx)
1422 {
1423 int i, j, k;
1424 int group_index = 0;
1425 int num_group = 0;
1426 int pipe_count = dc->res_pool->pipe_count;
1427 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1428
1429 for (i = 0; i < pipe_count; i++) {
1430 if (!ctx->res_ctx.pipe_ctx[i].stream
1431 || ctx->res_ctx.pipe_ctx[i].top_pipe
1432 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1433 continue;
1434
1435 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1436 }
1437
1438 for (i = 0; i < pipe_count; i++) {
1439 int group_size = 1;
1440 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1441 struct pipe_ctx *pipe_set[MAX_PIPES];
1442
1443 if (!unsynced_pipes[i])
1444 continue;
1445
1446 pipe_set[0] = unsynced_pipes[i];
1447 unsynced_pipes[i] = NULL;
1448
1449 /* Add tg to the set, search rest of the tg's for ones with
1450 * same timing, add all tgs with same timing to the group
1451 */
1452 for (j = i + 1; j < pipe_count; j++) {
1453 if (!unsynced_pipes[j])
1454 continue;
1455 if (sync_type != TIMING_SYNCHRONIZABLE &&
1456 dc->hwss.enable_vblanks_synchronization &&
1457 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1458 resource_are_vblanks_synchronizable(
1459 unsynced_pipes[j]->stream,
1460 pipe_set[0]->stream)) {
1461 sync_type = VBLANK_SYNCHRONIZABLE;
1462 pipe_set[group_size] = unsynced_pipes[j];
1463 unsynced_pipes[j] = NULL;
1464 group_size++;
1465 } else
1466 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1467 resource_are_streams_timing_synchronizable(
1468 unsynced_pipes[j]->stream,
1469 pipe_set[0]->stream)) {
1470 sync_type = TIMING_SYNCHRONIZABLE;
1471 pipe_set[group_size] = unsynced_pipes[j];
1472 unsynced_pipes[j] = NULL;
1473 group_size++;
1474 }
1475 }
1476
1477 /* set first unblanked pipe as master */
1478 for (j = 0; j < group_size; j++) {
1479 bool is_blanked;
1480
1481 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1482 is_blanked =
1483 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1484 else
1485 is_blanked =
1486 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1487 if (!is_blanked) {
1488 if (j == 0)
1489 break;
1490
1491 swap(pipe_set[0], pipe_set[j]);
1492 break;
1493 }
1494 }
1495
1496 for (k = 0; k < group_size; k++) {
1497 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1498
1499 status->timing_sync_info.group_id = num_group;
1500 status->timing_sync_info.group_size = group_size;
1501 if (k == 0)
1502 status->timing_sync_info.master = true;
1503 else
1504 status->timing_sync_info.master = false;
1505
1506 }
1507
1508 /* remove any other pipes that are already been synced */
1509 if (dc->config.use_pipe_ctx_sync_logic) {
1510 /* check pipe's syncd to decide which pipe to be removed */
1511 for (j = 1; j < group_size; j++) {
1512 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1513 group_size--;
1514 pipe_set[j] = pipe_set[group_size];
1515 j--;
1516 } else
1517 /* link slave pipe's syncd with master pipe */
1518 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1519 }
1520 } else {
1521 for (j = j + 1; j < group_size; j++) {
1522 bool is_blanked;
1523
1524 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1525 is_blanked =
1526 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1527 else
1528 is_blanked =
1529 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1530 if (!is_blanked) {
1531 group_size--;
1532 pipe_set[j] = pipe_set[group_size];
1533 j--;
1534 }
1535 }
1536 }
1537
1538 if (group_size > 1) {
1539 if (sync_type == TIMING_SYNCHRONIZABLE) {
1540 dc->hwss.enable_timing_synchronization(
1541 dc, group_index, group_size, pipe_set);
1542 } else
1543 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1544 dc->hwss.enable_vblanks_synchronization(
1545 dc, group_index, group_size, pipe_set);
1546 }
1547 group_index++;
1548 }
1549 num_group++;
1550 }
1551 }
1552
streams_changed(struct dc * dc,struct dc_stream_state * streams[],uint8_t stream_count)1553 static bool streams_changed(struct dc *dc,
1554 struct dc_stream_state *streams[],
1555 uint8_t stream_count)
1556 {
1557 uint8_t i;
1558
1559 if (stream_count != dc->current_state->stream_count)
1560 return true;
1561
1562 for (i = 0; i < dc->current_state->stream_count; i++) {
1563 if (dc->current_state->streams[i] != streams[i])
1564 return true;
1565 if (!streams[i]->link->link_state_valid)
1566 return true;
1567 }
1568
1569 return false;
1570 }
1571
dc_validate_boot_timing(const struct dc * dc,const struct dc_sink * sink,struct dc_crtc_timing * crtc_timing)1572 bool dc_validate_boot_timing(const struct dc *dc,
1573 const struct dc_sink *sink,
1574 struct dc_crtc_timing *crtc_timing)
1575 {
1576 struct timing_generator *tg;
1577 struct stream_encoder *se = NULL;
1578
1579 struct dc_crtc_timing hw_crtc_timing = {0};
1580
1581 struct dc_link *link = sink->link;
1582 unsigned int i, enc_inst, tg_inst = 0;
1583
1584 /* Support seamless boot on EDP displays only */
1585 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1586 return false;
1587 }
1588
1589 if (dc->debug.force_odm_combine)
1590 return false;
1591
1592 /* Check for enabled DIG to identify enabled display */
1593 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1594 return false;
1595
1596 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1597
1598 if (enc_inst == ENGINE_ID_UNKNOWN)
1599 return false;
1600
1601 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1602 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1603
1604 se = dc->res_pool->stream_enc[i];
1605
1606 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1607 dc->res_pool->stream_enc[i]);
1608 break;
1609 }
1610 }
1611
1612 // tg_inst not found
1613 if (i == dc->res_pool->stream_enc_count)
1614 return false;
1615
1616 if (tg_inst >= dc->res_pool->timing_generator_count)
1617 return false;
1618
1619 if (tg_inst != link->link_enc->preferred_engine)
1620 return false;
1621
1622 tg = dc->res_pool->timing_generators[tg_inst];
1623
1624 if (!tg->funcs->get_hw_timing)
1625 return false;
1626
1627 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1628 return false;
1629
1630 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1631 return false;
1632
1633 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1634 return false;
1635
1636 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1637 return false;
1638
1639 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1640 return false;
1641
1642 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1643 return false;
1644
1645 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1646 return false;
1647
1648 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1649 return false;
1650
1651 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1652 return false;
1653
1654 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1655 return false;
1656
1657 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1658 return false;
1659
1660 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1661 return false;
1662
1663 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1664 return false;
1665
1666 /* block DSC for now, as VBIOS does not currently support DSC timings */
1667 if (crtc_timing->flags.DSC)
1668 return false;
1669
1670 if (dc_is_dp_signal(link->connector_signal)) {
1671 unsigned int pix_clk_100hz;
1672 uint32_t numOdmPipes = 1;
1673 uint32_t id_src[4] = {0};
1674
1675 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1676 dc->res_pool->dp_clock_source,
1677 tg_inst, &pix_clk_100hz);
1678
1679 if (tg->funcs->get_optc_source)
1680 tg->funcs->get_optc_source(tg,
1681 &numOdmPipes, &id_src[0], &id_src[1]);
1682
1683 if (numOdmPipes == 2)
1684 pix_clk_100hz *= 2;
1685 if (numOdmPipes == 4)
1686 pix_clk_100hz *= 4;
1687
1688 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1689 // slightly due to rounding issues in 10 kHz units.
1690 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1691 return false;
1692
1693 if (!se->funcs->dp_get_pixel_format)
1694 return false;
1695
1696 if (!se->funcs->dp_get_pixel_format(
1697 se,
1698 &hw_crtc_timing.pixel_encoding,
1699 &hw_crtc_timing.display_color_depth))
1700 return false;
1701
1702 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1703 return false;
1704
1705 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1706 return false;
1707 }
1708
1709 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1710 return false;
1711 }
1712
1713 if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
1714 return false;
1715
1716 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
1717 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1718 return false;
1719 }
1720
1721 return true;
1722 }
1723
should_update_pipe_for_stream(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_stream_state * stream)1724 static inline bool should_update_pipe_for_stream(
1725 struct dc_state *context,
1726 struct pipe_ctx *pipe_ctx,
1727 struct dc_stream_state *stream)
1728 {
1729 return (pipe_ctx->stream && pipe_ctx->stream == stream);
1730 }
1731
should_update_pipe_for_plane(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_plane_state * plane_state)1732 static inline bool should_update_pipe_for_plane(
1733 struct dc_state *context,
1734 struct pipe_ctx *pipe_ctx,
1735 struct dc_plane_state *plane_state)
1736 {
1737 return (pipe_ctx->plane_state == plane_state);
1738 }
1739
dc_enable_stereo(struct dc * dc,struct dc_state * context,struct dc_stream_state * streams[],uint8_t stream_count)1740 void dc_enable_stereo(
1741 struct dc *dc,
1742 struct dc_state *context,
1743 struct dc_stream_state *streams[],
1744 uint8_t stream_count)
1745 {
1746 int i, j;
1747 struct pipe_ctx *pipe;
1748
1749 for (i = 0; i < MAX_PIPES; i++) {
1750 if (context != NULL) {
1751 pipe = &context->res_ctx.pipe_ctx[i];
1752 } else {
1753 context = dc->current_state;
1754 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1755 }
1756
1757 for (j = 0; pipe && j < stream_count; j++) {
1758 if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1759 dc->hwss.setup_stereo)
1760 dc->hwss.setup_stereo(pipe, dc);
1761 }
1762 }
1763 }
1764
dc_trigger_sync(struct dc * dc,struct dc_state * context)1765 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1766 {
1767 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1768 enable_timing_multisync(dc, context);
1769 program_timing_sync(dc, context);
1770 }
1771 }
1772
get_stream_mask(struct dc * dc,struct dc_state * context)1773 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1774 {
1775 int i;
1776 unsigned int stream_mask = 0;
1777
1778 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1779 if (context->res_ctx.pipe_ctx[i].stream)
1780 stream_mask |= 1 << i;
1781 }
1782
1783 return stream_mask;
1784 }
1785
dc_z10_restore(const struct dc * dc)1786 void dc_z10_restore(const struct dc *dc)
1787 {
1788 if (dc->hwss.z10_restore)
1789 dc->hwss.z10_restore(dc);
1790 }
1791
dc_z10_save_init(struct dc * dc)1792 void dc_z10_save_init(struct dc *dc)
1793 {
1794 if (dc->hwss.z10_save_init)
1795 dc->hwss.z10_save_init(dc);
1796 }
1797
1798 /**
1799 * dc_commit_state_no_check - Apply context to the hardware
1800 *
1801 * @dc: DC object with the current status to be updated
1802 * @context: New state that will become the current status at the end of this function
1803 *
1804 * Applies given context to the hardware and copy it into current context.
1805 * It's up to the user to release the src context afterwards.
1806 *
1807 * Return: an enum dc_status result code for the operation
1808 */
dc_commit_state_no_check(struct dc * dc,struct dc_state * context)1809 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1810 {
1811 struct dc_bios *dcb = dc->ctx->dc_bios;
1812 enum dc_status result = DC_ERROR_UNEXPECTED;
1813 struct pipe_ctx *pipe;
1814 int i, k, l;
1815 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1816 struct dc_state *old_state;
1817 bool subvp_prev_use = false;
1818
1819 dc_z10_restore(dc);
1820 dc_allow_idle_optimizations(dc, false);
1821
1822 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1823 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1824
1825 /* Check old context for SubVP */
1826 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
1827 if (subvp_prev_use)
1828 break;
1829 }
1830
1831 for (i = 0; i < context->stream_count; i++)
1832 dc_streams[i] = context->streams[i];
1833
1834 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1835 disable_vbios_mode_if_required(dc, context);
1836 dc->hwss.enable_accelerated_mode(dc, context);
1837 }
1838
1839 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1840 context->stream_count == 0)
1841 dc->hwss.prepare_bandwidth(dc, context);
1842
1843 /* When SubVP is active, all HW programming must be done while
1844 * SubVP lock is acquired
1845 */
1846 if (dc->hwss.subvp_pipe_control_lock)
1847 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1848
1849 if (dc->hwss.update_dsc_pg)
1850 dc->hwss.update_dsc_pg(dc, context, false);
1851
1852 disable_dangling_plane(dc, context);
1853 /* re-program planes for existing stream, in case we need to
1854 * free up plane resource for later use
1855 */
1856 if (dc->hwss.apply_ctx_for_surface) {
1857 for (i = 0; i < context->stream_count; i++) {
1858 if (context->streams[i]->mode_changed)
1859 continue;
1860 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1861 dc->hwss.apply_ctx_for_surface(
1862 dc, context->streams[i],
1863 context->stream_status[i].plane_count,
1864 context); /* use new pipe config in new context */
1865 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1866 dc->hwss.post_unlock_program_front_end(dc, context);
1867 }
1868 }
1869
1870 /* Program hardware */
1871 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1872 pipe = &context->res_ctx.pipe_ctx[i];
1873 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1874 }
1875
1876 result = dc->hwss.apply_ctx_to_hw(dc, context);
1877
1878 if (result != DC_OK) {
1879 /* Application of dc_state to hardware stopped. */
1880 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1881 return result;
1882 }
1883
1884 dc_trigger_sync(dc, context);
1885
1886 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
1887 for (i = 0; i < context->stream_count; i++) {
1888 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
1889
1890 context->streams[i]->update_flags.raw = 0xFFFFFFFF;
1891 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
1892 }
1893
1894 /* Program all planes within new context*/
1895 if (dc->hwss.program_front_end_for_ctx) {
1896 dc->hwss.interdependent_update_lock(dc, context, true);
1897 dc->hwss.program_front_end_for_ctx(dc, context);
1898 dc->hwss.interdependent_update_lock(dc, context, false);
1899 dc->hwss.post_unlock_program_front_end(dc, context);
1900 }
1901
1902 if (dc->hwss.commit_subvp_config)
1903 dc->hwss.commit_subvp_config(dc, context);
1904 if (dc->hwss.subvp_pipe_control_lock)
1905 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
1906
1907 for (i = 0; i < context->stream_count; i++) {
1908 const struct dc_link *link = context->streams[i]->link;
1909
1910 if (!context->streams[i]->mode_changed)
1911 continue;
1912
1913 if (dc->hwss.apply_ctx_for_surface) {
1914 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1915 dc->hwss.apply_ctx_for_surface(
1916 dc, context->streams[i],
1917 context->stream_status[i].plane_count,
1918 context);
1919 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1920 dc->hwss.post_unlock_program_front_end(dc, context);
1921 }
1922
1923 /*
1924 * enable stereo
1925 * TODO rework dc_enable_stereo call to work with validation sets?
1926 */
1927 for (k = 0; k < MAX_PIPES; k++) {
1928 pipe = &context->res_ctx.pipe_ctx[k];
1929
1930 for (l = 0 ; pipe && l < context->stream_count; l++) {
1931 if (context->streams[l] &&
1932 context->streams[l] == pipe->stream &&
1933 dc->hwss.setup_stereo)
1934 dc->hwss.setup_stereo(pipe, dc);
1935 }
1936 }
1937
1938 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1939 context->streams[i]->timing.h_addressable,
1940 context->streams[i]->timing.v_addressable,
1941 context->streams[i]->timing.h_total,
1942 context->streams[i]->timing.v_total,
1943 context->streams[i]->timing.pix_clk_100hz / 10);
1944 }
1945
1946 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1947
1948 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1949 context->stream_count == 0) {
1950 /* Must wait for no flips to be pending before doing optimize bw */
1951 wait_for_no_pipes_pending(dc, context);
1952 /* pplib is notified if disp_num changed */
1953 dc->hwss.optimize_bandwidth(dc, context);
1954 /* Need to do otg sync again as otg could be out of sync due to otg
1955 * workaround applied during clock update
1956 */
1957 dc_trigger_sync(dc, context);
1958 }
1959
1960 if (dc->hwss.update_dsc_pg)
1961 dc->hwss.update_dsc_pg(dc, context, true);
1962
1963 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1964 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1965 else
1966 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1967
1968 context->stream_mask = get_stream_mask(dc, context);
1969
1970 if (context->stream_mask != dc->current_state->stream_mask)
1971 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1972
1973 for (i = 0; i < context->stream_count; i++)
1974 context->streams[i]->mode_changed = false;
1975
1976 /* Clear update flags that were set earlier to avoid redundant programming */
1977 for (i = 0; i < context->stream_count; i++) {
1978 context->streams[i]->update_flags.raw = 0x0;
1979 }
1980
1981 old_state = dc->current_state;
1982 dc->current_state = context;
1983
1984 dc_release_state(old_state);
1985
1986 dc_retain_state(dc->current_state);
1987
1988 return result;
1989 }
1990
1991 static bool commit_minimal_transition_state(struct dc *dc,
1992 struct dc_state *transition_base_context);
1993
1994 /**
1995 * dc_commit_streams - Commit current stream state
1996 *
1997 * @dc: DC object with the commit state to be configured in the hardware
1998 * @streams: Array with a list of stream state
1999 * @stream_count: Total of streams
2000 *
2001 * Function responsible for commit streams change to the hardware.
2002 *
2003 * Return:
2004 * Return DC_OK if everything work as expected, otherwise, return a dc_status
2005 * code.
2006 */
dc_commit_streams(struct dc * dc,struct dc_stream_state * streams[],uint8_t stream_count)2007 enum dc_status dc_commit_streams(struct dc *dc,
2008 struct dc_stream_state *streams[],
2009 uint8_t stream_count)
2010 {
2011 int i, j;
2012 struct dc_state *context;
2013 enum dc_status res = DC_OK;
2014 struct dc_validation_set set[MAX_STREAMS] = {0};
2015 struct pipe_ctx *pipe;
2016 bool handle_exit_odm2to1 = false;
2017
2018 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
2019 return res;
2020
2021 if (!streams_changed(dc, streams, stream_count))
2022 return res;
2023
2024 DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
2025
2026 for (i = 0; i < stream_count; i++) {
2027 struct dc_stream_state *stream = streams[i];
2028 struct dc_stream_status *status = dc_stream_get_status(stream);
2029
2030 dc_stream_log(dc, stream);
2031
2032 set[i].stream = stream;
2033
2034 if (status) {
2035 set[i].plane_count = status->plane_count;
2036 for (j = 0; j < status->plane_count; j++)
2037 set[i].plane_states[j] = status->plane_states[j];
2038 }
2039 }
2040
2041 /* ODM Combine 2:1 power optimization is only applied for single stream
2042 * scenario, it uses extra pipes than needed to reduce power consumption
2043 * We need to switch off this feature to make room for new streams.
2044 */
2045 if (stream_count > dc->current_state->stream_count &&
2046 dc->current_state->stream_count == 1) {
2047 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2048 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2049 if (pipe->next_odm_pipe)
2050 handle_exit_odm2to1 = true;
2051 }
2052 }
2053
2054 if (handle_exit_odm2to1)
2055 res = commit_minimal_transition_state(dc, dc->current_state);
2056
2057 context = dc_create_state(dc);
2058 if (!context)
2059 goto context_alloc_fail;
2060
2061 dc_resource_state_copy_construct_current(dc, context);
2062
2063 res = dc_validate_with_context(dc, set, stream_count, context, false);
2064 if (res != DC_OK) {
2065 BREAK_TO_DEBUGGER();
2066 goto fail;
2067 }
2068
2069 res = dc_commit_state_no_check(dc, context);
2070
2071 for (i = 0; i < stream_count; i++) {
2072 for (j = 0; j < context->stream_count; j++) {
2073 if (streams[i]->stream_id == context->streams[j]->stream_id)
2074 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2075
2076 if (dc_is_embedded_signal(streams[i]->signal)) {
2077 struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
2078
2079 if (dc->hwss.is_abm_supported)
2080 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
2081 else
2082 status->is_abm_supported = true;
2083 }
2084 }
2085 }
2086
2087 fail:
2088 dc_release_state(context);
2089
2090 context_alloc_fail:
2091
2092 DC_LOG_DC("%s Finished.\n", __func__);
2093
2094 return res;
2095 }
2096
dc_acquire_release_mpc_3dlut(struct dc * dc,bool acquire,struct dc_stream_state * stream,struct dc_3dlut ** lut,struct dc_transfer_func ** shaper)2097 bool dc_acquire_release_mpc_3dlut(
2098 struct dc *dc, bool acquire,
2099 struct dc_stream_state *stream,
2100 struct dc_3dlut **lut,
2101 struct dc_transfer_func **shaper)
2102 {
2103 int pipe_idx;
2104 bool ret = false;
2105 bool found_pipe_idx = false;
2106 const struct resource_pool *pool = dc->res_pool;
2107 struct resource_context *res_ctx = &dc->current_state->res_ctx;
2108 int mpcc_id = 0;
2109
2110 if (pool && res_ctx) {
2111 if (acquire) {
2112 /*find pipe idx for the given stream*/
2113 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2114 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2115 found_pipe_idx = true;
2116 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2117 break;
2118 }
2119 }
2120 } else
2121 found_pipe_idx = true;/*for release pipe_idx is not required*/
2122
2123 if (found_pipe_idx) {
2124 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2125 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2126 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2127 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2128 }
2129 }
2130 return ret;
2131 }
2132
is_flip_pending_in_pipes(struct dc * dc,struct dc_state * context)2133 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2134 {
2135 int i;
2136 struct pipe_ctx *pipe;
2137
2138 for (i = 0; i < MAX_PIPES; i++) {
2139 pipe = &context->res_ctx.pipe_ctx[i];
2140
2141 // Don't check flip pending on phantom pipes
2142 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
2143 continue;
2144
2145 /* Must set to false to start with, due to OR in update function */
2146 pipe->plane_state->status.is_flip_pending = false;
2147 dc->hwss.update_pending_status(pipe);
2148 if (pipe->plane_state->status.is_flip_pending)
2149 return true;
2150 }
2151 return false;
2152 }
2153
2154 /* Perform updates here which need to be deferred until next vupdate
2155 *
2156 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2157 * but forcing lut memory to shutdown state is immediate. This causes
2158 * single frame corruption as lut gets disabled mid-frame unless shutdown
2159 * is deferred until after entering bypass.
2160 */
process_deferred_updates(struct dc * dc)2161 static void process_deferred_updates(struct dc *dc)
2162 {
2163 int i = 0;
2164
2165 if (dc->debug.enable_mem_low_power.bits.cm) {
2166 ASSERT(dc->dcn_ip->max_num_dpp);
2167 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2168 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2169 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2170 }
2171 }
2172
dc_post_update_surfaces_to_stream(struct dc * dc)2173 void dc_post_update_surfaces_to_stream(struct dc *dc)
2174 {
2175 int i;
2176 struct dc_state *context = dc->current_state;
2177
2178 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2179 return;
2180
2181 post_surface_trace(dc);
2182
2183 /*
2184 * Only relevant for DCN behavior where we can guarantee the optimization
2185 * is safe to apply - retain the legacy behavior for DCE.
2186 */
2187
2188 if (dc->ctx->dce_version < DCE_VERSION_MAX)
2189 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2190 else {
2191 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2192
2193 if (is_flip_pending_in_pipes(dc, context))
2194 return;
2195
2196 for (i = 0; i < dc->res_pool->pipe_count; i++)
2197 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2198 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2199 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2200 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
2201 }
2202
2203 process_deferred_updates(dc);
2204
2205 dc->hwss.optimize_bandwidth(dc, context);
2206
2207 if (dc->hwss.update_dsc_pg)
2208 dc->hwss.update_dsc_pg(dc, context, true);
2209 }
2210
2211 dc->optimized_required = false;
2212 dc->wm_optimized_required = false;
2213 }
2214
init_state(struct dc * dc,struct dc_state * context)2215 static void init_state(struct dc *dc, struct dc_state *context)
2216 {
2217 /* Each context must have their own instance of VBA and in order to
2218 * initialize and obtain IP and SOC the base DML instance from DC is
2219 * initially copied into every context
2220 */
2221 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
2222 }
2223
dc_create_state(struct dc * dc)2224 struct dc_state *dc_create_state(struct dc *dc)
2225 {
2226 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
2227 GFP_KERNEL);
2228
2229 if (!context)
2230 return NULL;
2231
2232 init_state(dc, context);
2233
2234 kref_init(&context->refcount);
2235
2236 return context;
2237 }
2238
dc_copy_state(struct dc_state * src_ctx)2239 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
2240 {
2241 int i, j;
2242 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
2243
2244 if (!new_ctx)
2245 return NULL;
2246 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2247
2248 for (i = 0; i < MAX_PIPES; i++) {
2249 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2250
2251 if (cur_pipe->top_pipe)
2252 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2253
2254 if (cur_pipe->bottom_pipe)
2255 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2256
2257 if (cur_pipe->prev_odm_pipe)
2258 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2259
2260 if (cur_pipe->next_odm_pipe)
2261 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2262
2263 }
2264
2265 for (i = 0; i < new_ctx->stream_count; i++) {
2266 dc_stream_retain(new_ctx->streams[i]);
2267 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2268 dc_plane_state_retain(
2269 new_ctx->stream_status[i].plane_states[j]);
2270 }
2271
2272 kref_init(&new_ctx->refcount);
2273
2274 return new_ctx;
2275 }
2276
dc_retain_state(struct dc_state * context)2277 void dc_retain_state(struct dc_state *context)
2278 {
2279 kref_get(&context->refcount);
2280 }
2281
dc_state_free(struct kref * kref)2282 static void dc_state_free(struct kref *kref)
2283 {
2284 struct dc_state *context = container_of(kref, struct dc_state, refcount);
2285 dc_resource_state_destruct(context);
2286 kvfree(context);
2287 }
2288
dc_release_state(struct dc_state * context)2289 void dc_release_state(struct dc_state *context)
2290 {
2291 kref_put(&context->refcount, dc_state_free);
2292 }
2293
dc_set_generic_gpio_for_stereo(bool enable,struct gpio_service * gpio_service)2294 bool dc_set_generic_gpio_for_stereo(bool enable,
2295 struct gpio_service *gpio_service)
2296 {
2297 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2298 struct gpio_pin_info pin_info;
2299 struct gpio *generic;
2300 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2301 GFP_KERNEL);
2302
2303 if (!config)
2304 return false;
2305 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2306
2307 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2308 kfree(config);
2309 return false;
2310 } else {
2311 generic = dal_gpio_service_create_generic_mux(
2312 gpio_service,
2313 pin_info.offset,
2314 pin_info.mask);
2315 }
2316
2317 if (!generic) {
2318 kfree(config);
2319 return false;
2320 }
2321
2322 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2323
2324 config->enable_output_from_mux = enable;
2325 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2326
2327 if (gpio_result == GPIO_RESULT_OK)
2328 gpio_result = dal_mux_setup_config(generic, config);
2329
2330 if (gpio_result == GPIO_RESULT_OK) {
2331 dal_gpio_close(generic);
2332 dal_gpio_destroy_generic_mux(&generic);
2333 kfree(config);
2334 return true;
2335 } else {
2336 dal_gpio_close(generic);
2337 dal_gpio_destroy_generic_mux(&generic);
2338 kfree(config);
2339 return false;
2340 }
2341 }
2342
is_surface_in_context(const struct dc_state * context,const struct dc_plane_state * plane_state)2343 static bool is_surface_in_context(
2344 const struct dc_state *context,
2345 const struct dc_plane_state *plane_state)
2346 {
2347 int j;
2348
2349 for (j = 0; j < MAX_PIPES; j++) {
2350 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2351
2352 if (plane_state == pipe_ctx->plane_state) {
2353 return true;
2354 }
2355 }
2356
2357 return false;
2358 }
2359
get_plane_info_update_type(const struct dc_surface_update * u)2360 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2361 {
2362 union surface_update_flags *update_flags = &u->surface->update_flags;
2363 enum surface_update_type update_type = UPDATE_TYPE_FAST;
2364
2365 if (!u->plane_info)
2366 return UPDATE_TYPE_FAST;
2367
2368 if (u->plane_info->color_space != u->surface->color_space) {
2369 update_flags->bits.color_space_change = 1;
2370 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2371 }
2372
2373 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2374 update_flags->bits.horizontal_mirror_change = 1;
2375 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2376 }
2377
2378 if (u->plane_info->rotation != u->surface->rotation) {
2379 update_flags->bits.rotation_change = 1;
2380 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2381 }
2382
2383 if (u->plane_info->format != u->surface->format) {
2384 update_flags->bits.pixel_format_change = 1;
2385 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2386 }
2387
2388 if (u->plane_info->stereo_format != u->surface->stereo_format) {
2389 update_flags->bits.stereo_format_change = 1;
2390 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2391 }
2392
2393 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2394 update_flags->bits.per_pixel_alpha_change = 1;
2395 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2396 }
2397
2398 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2399 update_flags->bits.global_alpha_change = 1;
2400 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2401 }
2402
2403 if (u->plane_info->dcc.enable != u->surface->dcc.enable
2404 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2405 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2406 /* During DCC on/off, stutter period is calculated before
2407 * DCC has fully transitioned. This results in incorrect
2408 * stutter period calculation. Triggering a full update will
2409 * recalculate stutter period.
2410 */
2411 update_flags->bits.dcc_change = 1;
2412 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2413 }
2414
2415 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2416 resource_pixel_format_to_bpp(u->surface->format)) {
2417 /* different bytes per element will require full bandwidth
2418 * and DML calculation
2419 */
2420 update_flags->bits.bpp_change = 1;
2421 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2422 }
2423
2424 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2425 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2426 update_flags->bits.plane_size_change = 1;
2427 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2428 }
2429
2430
2431 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2432 sizeof(union dc_tiling_info)) != 0) {
2433 update_flags->bits.swizzle_change = 1;
2434 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2435
2436 /* todo: below are HW dependent, we should add a hook to
2437 * DCE/N resource and validated there.
2438 */
2439 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2440 /* swizzled mode requires RQ to be setup properly,
2441 * thus need to run DML to calculate RQ settings
2442 */
2443 update_flags->bits.bandwidth_change = 1;
2444 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2445 }
2446 }
2447
2448 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2449 return update_type;
2450 }
2451
get_scaling_info_update_type(const struct dc * dc,const struct dc_surface_update * u)2452 static enum surface_update_type get_scaling_info_update_type(
2453 const struct dc *dc,
2454 const struct dc_surface_update *u)
2455 {
2456 union surface_update_flags *update_flags = &u->surface->update_flags;
2457
2458 if (!u->scaling_info)
2459 return UPDATE_TYPE_FAST;
2460
2461 if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2462 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2463 || u->scaling_info->scaling_quality.integer_scaling !=
2464 u->surface->scaling_quality.integer_scaling
2465 ) {
2466 update_flags->bits.scaling_change = 1;
2467
2468 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2469 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2470 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2471 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2472 /* Making dst rect smaller requires a bandwidth change */
2473 update_flags->bits.bandwidth_change = 1;
2474 }
2475
2476 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2477 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2478
2479 update_flags->bits.scaling_change = 1;
2480 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2481 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2482 /* Making src rect bigger requires a bandwidth change */
2483 update_flags->bits.clock_change = 1;
2484 }
2485
2486 if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
2487 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
2488 u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
2489 /* Changing clip size of a large surface may result in MPC slice count change */
2490 update_flags->bits.bandwidth_change = 1;
2491
2492 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2493 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2494 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2495 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2496 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2497 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2498 update_flags->bits.position_change = 1;
2499
2500 if (update_flags->bits.clock_change
2501 || update_flags->bits.bandwidth_change
2502 || update_flags->bits.scaling_change)
2503 return UPDATE_TYPE_FULL;
2504
2505 if (update_flags->bits.position_change)
2506 return UPDATE_TYPE_MED;
2507
2508 return UPDATE_TYPE_FAST;
2509 }
2510
det_surface_update(const struct dc * dc,const struct dc_surface_update * u)2511 static enum surface_update_type det_surface_update(const struct dc *dc,
2512 const struct dc_surface_update *u)
2513 {
2514 const struct dc_state *context = dc->current_state;
2515 enum surface_update_type type;
2516 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2517 union surface_update_flags *update_flags = &u->surface->update_flags;
2518
2519 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2520 update_flags->raw = 0xFFFFFFFF;
2521 return UPDATE_TYPE_FULL;
2522 }
2523
2524 update_flags->raw = 0; // Reset all flags
2525
2526 type = get_plane_info_update_type(u);
2527 elevate_update_type(&overall_type, type);
2528
2529 type = get_scaling_info_update_type(dc, u);
2530 elevate_update_type(&overall_type, type);
2531
2532 if (u->flip_addr) {
2533 update_flags->bits.addr_update = 1;
2534 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2535 update_flags->bits.tmz_changed = 1;
2536 elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2537 }
2538 }
2539 if (u->in_transfer_func)
2540 update_flags->bits.in_transfer_func_change = 1;
2541
2542 if (u->input_csc_color_matrix)
2543 update_flags->bits.input_csc_change = 1;
2544
2545 if (u->coeff_reduction_factor)
2546 update_flags->bits.coeff_reduction_change = 1;
2547
2548 if (u->gamut_remap_matrix)
2549 update_flags->bits.gamut_remap_change = 1;
2550
2551 if (u->gamma) {
2552 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2553
2554 if (u->plane_info)
2555 format = u->plane_info->format;
2556 else if (u->surface)
2557 format = u->surface->format;
2558
2559 if (dce_use_lut(format))
2560 update_flags->bits.gamma_change = 1;
2561 }
2562
2563 if (u->lut3d_func || u->func_shaper)
2564 update_flags->bits.lut_3d = 1;
2565
2566 if (u->hdr_mult.value)
2567 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2568 update_flags->bits.hdr_mult = 1;
2569 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2570 }
2571
2572 if (update_flags->bits.in_transfer_func_change) {
2573 type = UPDATE_TYPE_MED;
2574 elevate_update_type(&overall_type, type);
2575 }
2576
2577 if (update_flags->bits.lut_3d) {
2578 type = UPDATE_TYPE_FULL;
2579 elevate_update_type(&overall_type, type);
2580 }
2581
2582 if (dc->debug.enable_legacy_fast_update &&
2583 (update_flags->bits.gamma_change ||
2584 update_flags->bits.gamut_remap_change ||
2585 update_flags->bits.input_csc_change ||
2586 update_flags->bits.coeff_reduction_change)) {
2587 type = UPDATE_TYPE_FULL;
2588 elevate_update_type(&overall_type, type);
2589 }
2590 return overall_type;
2591 }
2592
check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2593 static enum surface_update_type check_update_surfaces_for_stream(
2594 struct dc *dc,
2595 struct dc_surface_update *updates,
2596 int surface_count,
2597 struct dc_stream_update *stream_update,
2598 const struct dc_stream_status *stream_status)
2599 {
2600 int i;
2601 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2602
2603 if (dc->idle_optimizations_allowed)
2604 overall_type = UPDATE_TYPE_FULL;
2605
2606 if (stream_status == NULL || stream_status->plane_count != surface_count)
2607 overall_type = UPDATE_TYPE_FULL;
2608
2609 if (stream_update && stream_update->pending_test_pattern) {
2610 overall_type = UPDATE_TYPE_FULL;
2611 }
2612
2613 /* some stream updates require passive update */
2614 if (stream_update) {
2615 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2616
2617 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2618 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2619 stream_update->integer_scaling_update)
2620 su_flags->bits.scaling = 1;
2621
2622 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2623 su_flags->bits.out_tf = 1;
2624
2625 if (stream_update->abm_level)
2626 su_flags->bits.abm_level = 1;
2627
2628 if (stream_update->dpms_off)
2629 su_flags->bits.dpms_off = 1;
2630
2631 if (stream_update->gamut_remap)
2632 su_flags->bits.gamut_remap = 1;
2633
2634 if (stream_update->wb_update)
2635 su_flags->bits.wb_update = 1;
2636
2637 if (stream_update->dsc_config)
2638 su_flags->bits.dsc_changed = 1;
2639
2640 if (stream_update->mst_bw_update)
2641 su_flags->bits.mst_bw = 1;
2642
2643 if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
2644 (stream_update->vrr_infopacket || stream_update->allow_freesync ||
2645 stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
2646 su_flags->bits.fams_changed = 1;
2647
2648 if (su_flags->raw != 0)
2649 overall_type = UPDATE_TYPE_FULL;
2650
2651 if (stream_update->output_csc_transform || stream_update->output_color_space)
2652 su_flags->bits.out_csc = 1;
2653
2654 /* Output transfer function changes do not require bandwidth recalculation,
2655 * so don't trigger a full update
2656 */
2657 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2658 su_flags->bits.out_tf = 1;
2659 }
2660
2661 for (i = 0 ; i < surface_count; i++) {
2662 enum surface_update_type type =
2663 det_surface_update(dc, &updates[i]);
2664
2665 elevate_update_type(&overall_type, type);
2666 }
2667
2668 return overall_type;
2669 }
2670
2671 /*
2672 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2673 *
2674 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2675 */
dc_check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2676 enum surface_update_type dc_check_update_surfaces_for_stream(
2677 struct dc *dc,
2678 struct dc_surface_update *updates,
2679 int surface_count,
2680 struct dc_stream_update *stream_update,
2681 const struct dc_stream_status *stream_status)
2682 {
2683 int i;
2684 enum surface_update_type type;
2685
2686 if (stream_update)
2687 stream_update->stream->update_flags.raw = 0;
2688 for (i = 0; i < surface_count; i++)
2689 updates[i].surface->update_flags.raw = 0;
2690
2691 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2692 if (type == UPDATE_TYPE_FULL) {
2693 if (stream_update) {
2694 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2695 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2696 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2697 }
2698 for (i = 0; i < surface_count; i++)
2699 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2700 }
2701
2702 if (type == UPDATE_TYPE_FAST) {
2703 // If there's an available clock comparator, we use that.
2704 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2705 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2706 dc->optimized_required = true;
2707 // Else we fallback to mem compare.
2708 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2709 dc->optimized_required = true;
2710 }
2711
2712 dc->optimized_required |= dc->wm_optimized_required;
2713 }
2714
2715 return type;
2716 }
2717
stream_get_status(struct dc_state * ctx,struct dc_stream_state * stream)2718 static struct dc_stream_status *stream_get_status(
2719 struct dc_state *ctx,
2720 struct dc_stream_state *stream)
2721 {
2722 uint8_t i;
2723
2724 for (i = 0; i < ctx->stream_count; i++) {
2725 if (stream == ctx->streams[i]) {
2726 return &ctx->stream_status[i];
2727 }
2728 }
2729
2730 return NULL;
2731 }
2732
2733 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2734
copy_surface_update_to_plane(struct dc_plane_state * surface,struct dc_surface_update * srf_update)2735 static void copy_surface_update_to_plane(
2736 struct dc_plane_state *surface,
2737 struct dc_surface_update *srf_update)
2738 {
2739 if (srf_update->flip_addr) {
2740 surface->address = srf_update->flip_addr->address;
2741 surface->flip_immediate =
2742 srf_update->flip_addr->flip_immediate;
2743 surface->time.time_elapsed_in_us[surface->time.index] =
2744 srf_update->flip_addr->flip_timestamp_in_us -
2745 surface->time.prev_update_time_in_us;
2746 surface->time.prev_update_time_in_us =
2747 srf_update->flip_addr->flip_timestamp_in_us;
2748 surface->time.index++;
2749 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2750 surface->time.index = 0;
2751
2752 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2753 }
2754
2755 if (srf_update->scaling_info) {
2756 surface->scaling_quality =
2757 srf_update->scaling_info->scaling_quality;
2758 surface->dst_rect =
2759 srf_update->scaling_info->dst_rect;
2760 surface->src_rect =
2761 srf_update->scaling_info->src_rect;
2762 surface->clip_rect =
2763 srf_update->scaling_info->clip_rect;
2764 }
2765
2766 if (srf_update->plane_info) {
2767 surface->color_space =
2768 srf_update->plane_info->color_space;
2769 surface->format =
2770 srf_update->plane_info->format;
2771 surface->plane_size =
2772 srf_update->plane_info->plane_size;
2773 surface->rotation =
2774 srf_update->plane_info->rotation;
2775 surface->horizontal_mirror =
2776 srf_update->plane_info->horizontal_mirror;
2777 surface->stereo_format =
2778 srf_update->plane_info->stereo_format;
2779 surface->tiling_info =
2780 srf_update->plane_info->tiling_info;
2781 surface->visible =
2782 srf_update->plane_info->visible;
2783 surface->per_pixel_alpha =
2784 srf_update->plane_info->per_pixel_alpha;
2785 surface->global_alpha =
2786 srf_update->plane_info->global_alpha;
2787 surface->global_alpha_value =
2788 srf_update->plane_info->global_alpha_value;
2789 surface->dcc =
2790 srf_update->plane_info->dcc;
2791 surface->layer_index =
2792 srf_update->plane_info->layer_index;
2793 }
2794
2795 if (srf_update->gamma &&
2796 (surface->gamma_correction !=
2797 srf_update->gamma)) {
2798 memcpy(&surface->gamma_correction->entries,
2799 &srf_update->gamma->entries,
2800 sizeof(struct dc_gamma_entries));
2801 surface->gamma_correction->is_identity =
2802 srf_update->gamma->is_identity;
2803 surface->gamma_correction->num_entries =
2804 srf_update->gamma->num_entries;
2805 surface->gamma_correction->type =
2806 srf_update->gamma->type;
2807 }
2808
2809 if (srf_update->in_transfer_func &&
2810 (surface->in_transfer_func !=
2811 srf_update->in_transfer_func)) {
2812 surface->in_transfer_func->sdr_ref_white_level =
2813 srf_update->in_transfer_func->sdr_ref_white_level;
2814 surface->in_transfer_func->tf =
2815 srf_update->in_transfer_func->tf;
2816 surface->in_transfer_func->type =
2817 srf_update->in_transfer_func->type;
2818 memcpy(&surface->in_transfer_func->tf_pts,
2819 &srf_update->in_transfer_func->tf_pts,
2820 sizeof(struct dc_transfer_func_distributed_points));
2821 }
2822
2823 if (srf_update->func_shaper &&
2824 (surface->in_shaper_func !=
2825 srf_update->func_shaper))
2826 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2827 sizeof(*surface->in_shaper_func));
2828
2829 if (srf_update->lut3d_func &&
2830 (surface->lut3d_func !=
2831 srf_update->lut3d_func))
2832 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2833 sizeof(*surface->lut3d_func));
2834
2835 if (srf_update->hdr_mult.value)
2836 surface->hdr_mult =
2837 srf_update->hdr_mult;
2838
2839 if (srf_update->blend_tf &&
2840 (surface->blend_tf !=
2841 srf_update->blend_tf))
2842 memcpy(surface->blend_tf, srf_update->blend_tf,
2843 sizeof(*surface->blend_tf));
2844
2845 if (srf_update->input_csc_color_matrix)
2846 surface->input_csc_color_matrix =
2847 *srf_update->input_csc_color_matrix;
2848
2849 if (srf_update->coeff_reduction_factor)
2850 surface->coeff_reduction_factor =
2851 *srf_update->coeff_reduction_factor;
2852
2853 if (srf_update->gamut_remap_matrix)
2854 surface->gamut_remap_matrix =
2855 *srf_update->gamut_remap_matrix;
2856 }
2857
copy_stream_update_to_stream(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,struct dc_stream_update * update)2858 static void copy_stream_update_to_stream(struct dc *dc,
2859 struct dc_state *context,
2860 struct dc_stream_state *stream,
2861 struct dc_stream_update *update)
2862 {
2863 struct dc_context *dc_ctx = dc->ctx;
2864
2865 if (update == NULL || stream == NULL)
2866 return;
2867
2868 if (update->src.height && update->src.width)
2869 stream->src = update->src;
2870
2871 if (update->dst.height && update->dst.width)
2872 stream->dst = update->dst;
2873
2874 if (update->out_transfer_func &&
2875 stream->out_transfer_func != update->out_transfer_func) {
2876 stream->out_transfer_func->sdr_ref_white_level =
2877 update->out_transfer_func->sdr_ref_white_level;
2878 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2879 stream->out_transfer_func->type =
2880 update->out_transfer_func->type;
2881 memcpy(&stream->out_transfer_func->tf_pts,
2882 &update->out_transfer_func->tf_pts,
2883 sizeof(struct dc_transfer_func_distributed_points));
2884 }
2885
2886 if (update->hdr_static_metadata)
2887 stream->hdr_static_metadata = *update->hdr_static_metadata;
2888
2889 if (update->abm_level)
2890 stream->abm_level = *update->abm_level;
2891
2892 if (update->periodic_interrupt)
2893 stream->periodic_interrupt = *update->periodic_interrupt;
2894
2895 if (update->gamut_remap)
2896 stream->gamut_remap_matrix = *update->gamut_remap;
2897
2898 /* Note: this being updated after mode set is currently not a use case
2899 * however if it arises OCSC would need to be reprogrammed at the
2900 * minimum
2901 */
2902 if (update->output_color_space)
2903 stream->output_color_space = *update->output_color_space;
2904
2905 if (update->output_csc_transform)
2906 stream->csc_color_matrix = *update->output_csc_transform;
2907
2908 if (update->vrr_infopacket)
2909 stream->vrr_infopacket = *update->vrr_infopacket;
2910
2911 if (update->allow_freesync)
2912 stream->allow_freesync = *update->allow_freesync;
2913
2914 if (update->vrr_active_variable)
2915 stream->vrr_active_variable = *update->vrr_active_variable;
2916
2917 if (update->vrr_active_fixed)
2918 stream->vrr_active_fixed = *update->vrr_active_fixed;
2919
2920 if (update->crtc_timing_adjust)
2921 stream->adjust = *update->crtc_timing_adjust;
2922
2923 if (update->dpms_off)
2924 stream->dpms_off = *update->dpms_off;
2925
2926 if (update->hfvsif_infopacket)
2927 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2928
2929 if (update->vtem_infopacket)
2930 stream->vtem_infopacket = *update->vtem_infopacket;
2931
2932 if (update->vsc_infopacket)
2933 stream->vsc_infopacket = *update->vsc_infopacket;
2934
2935 if (update->vsp_infopacket)
2936 stream->vsp_infopacket = *update->vsp_infopacket;
2937
2938 if (update->adaptive_sync_infopacket)
2939 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
2940
2941 if (update->dither_option)
2942 stream->dither_option = *update->dither_option;
2943
2944 if (update->pending_test_pattern)
2945 stream->test_pattern = *update->pending_test_pattern;
2946 /* update current stream with writeback info */
2947 if (update->wb_update) {
2948 int i;
2949
2950 stream->num_wb_info = update->wb_update->num_wb_info;
2951 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2952 for (i = 0; i < stream->num_wb_info; i++)
2953 stream->writeback_info[i] =
2954 update->wb_update->writeback_info[i];
2955 }
2956 if (update->dsc_config) {
2957 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2958 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2959 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2960 update->dsc_config->num_slices_v != 0);
2961
2962 /* Use temporarry context for validating new DSC config */
2963 struct dc_state *dsc_validate_context = dc_create_state(dc);
2964
2965 if (dsc_validate_context) {
2966 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2967
2968 stream->timing.dsc_cfg = *update->dsc_config;
2969 stream->timing.flags.DSC = enable_dsc;
2970 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2971 stream->timing.dsc_cfg = old_dsc_cfg;
2972 stream->timing.flags.DSC = old_dsc_enabled;
2973 update->dsc_config = NULL;
2974 }
2975
2976 dc_release_state(dsc_validate_context);
2977 } else {
2978 DC_ERROR("Failed to allocate new validate context for DSC change\n");
2979 update->dsc_config = NULL;
2980 }
2981 }
2982 }
2983
update_planes_and_stream_state(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type * new_update_type,struct dc_state ** new_context)2984 static bool update_planes_and_stream_state(struct dc *dc,
2985 struct dc_surface_update *srf_updates, int surface_count,
2986 struct dc_stream_state *stream,
2987 struct dc_stream_update *stream_update,
2988 enum surface_update_type *new_update_type,
2989 struct dc_state **new_context)
2990 {
2991 struct dc_state *context;
2992 int i, j;
2993 enum surface_update_type update_type;
2994 const struct dc_stream_status *stream_status;
2995 struct dc_context *dc_ctx = dc->ctx;
2996
2997 stream_status = dc_stream_get_status(stream);
2998
2999 if (!stream_status) {
3000 if (surface_count) /* Only an error condition if surf_count non-zero*/
3001 ASSERT(false);
3002
3003 return false; /* Cannot commit surface to stream that is not committed */
3004 }
3005
3006 context = dc->current_state;
3007
3008 update_type = dc_check_update_surfaces_for_stream(
3009 dc, srf_updates, surface_count, stream_update, stream_status);
3010
3011 /* update current stream with the new updates */
3012 copy_stream_update_to_stream(dc, context, stream, stream_update);
3013
3014 /* do not perform surface update if surface has invalid dimensions
3015 * (all zero) and no scaling_info is provided
3016 */
3017 if (surface_count > 0) {
3018 for (i = 0; i < surface_count; i++) {
3019 if ((srf_updates[i].surface->src_rect.width == 0 ||
3020 srf_updates[i].surface->src_rect.height == 0 ||
3021 srf_updates[i].surface->dst_rect.width == 0 ||
3022 srf_updates[i].surface->dst_rect.height == 0) &&
3023 (!srf_updates[i].scaling_info ||
3024 srf_updates[i].scaling_info->src_rect.width == 0 ||
3025 srf_updates[i].scaling_info->src_rect.height == 0 ||
3026 srf_updates[i].scaling_info->dst_rect.width == 0 ||
3027 srf_updates[i].scaling_info->dst_rect.height == 0)) {
3028 DC_ERROR("Invalid src/dst rects in surface update!\n");
3029 return false;
3030 }
3031 }
3032 }
3033
3034 if (update_type >= update_surface_trace_level)
3035 update_surface_trace(dc, srf_updates, surface_count);
3036
3037 if (update_type >= UPDATE_TYPE_FULL) {
3038 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3039
3040 for (i = 0; i < surface_count; i++)
3041 new_planes[i] = srf_updates[i].surface;
3042
3043 /* initialize scratch memory for building context */
3044 context = dc_create_state(dc);
3045 if (context == NULL) {
3046 DC_ERROR("Failed to allocate new validate context!\n");
3047 return false;
3048 }
3049
3050 dc_resource_state_copy_construct(
3051 dc->current_state, context);
3052
3053 /* For each full update, remove all existing phantom pipes first.
3054 * Ensures that we have enough pipes for newly added MPO planes
3055 */
3056 if (dc->res_pool->funcs->remove_phantom_pipes)
3057 dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
3058
3059 /*remove old surfaces from context */
3060 if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
3061
3062 BREAK_TO_DEBUGGER();
3063 goto fail;
3064 }
3065
3066 /* add surface to context */
3067 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3068
3069 BREAK_TO_DEBUGGER();
3070 goto fail;
3071 }
3072 }
3073
3074 /* save update parameters into surface */
3075 for (i = 0; i < surface_count; i++) {
3076 struct dc_plane_state *surface = srf_updates[i].surface;
3077
3078 copy_surface_update_to_plane(surface, &srf_updates[i]);
3079
3080 if (update_type >= UPDATE_TYPE_MED) {
3081 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3082 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3083
3084 if (pipe_ctx->plane_state != surface)
3085 continue;
3086
3087 resource_build_scaling_params(pipe_ctx);
3088 }
3089 }
3090 }
3091
3092 if (update_type == UPDATE_TYPE_FULL) {
3093 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3094 /* For phantom pipes we remove and create a new set of phantom pipes
3095 * for each full update (because we don't know if we'll need phantom
3096 * pipes until after the first round of validation). However, if validation
3097 * fails we need to keep the existing phantom pipes (because we don't update
3098 * the dc->current_state).
3099 *
3100 * The phantom stream/plane refcount is decremented for validation because
3101 * we assume it'll be removed (the free comes when the dc_state is freed),
3102 * but if validation fails we have to increment back the refcount so it's
3103 * consistent.
3104 */
3105 if (dc->res_pool->funcs->retain_phantom_pipes)
3106 dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
3107 BREAK_TO_DEBUGGER();
3108 goto fail;
3109 }
3110 }
3111
3112 *new_context = context;
3113 *new_update_type = update_type;
3114
3115 return true;
3116
3117 fail:
3118 dc_release_state(context);
3119
3120 return false;
3121
3122 }
3123
commit_planes_do_stream_update(struct dc * dc,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3124 static void commit_planes_do_stream_update(struct dc *dc,
3125 struct dc_stream_state *stream,
3126 struct dc_stream_update *stream_update,
3127 enum surface_update_type update_type,
3128 struct dc_state *context)
3129 {
3130 int j;
3131
3132 // Stream updates
3133 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3134 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3135
3136 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
3137
3138 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3139 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3140
3141 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3142 stream_update->vrr_infopacket ||
3143 stream_update->vsc_infopacket ||
3144 stream_update->vsp_infopacket ||
3145 stream_update->hfvsif_infopacket ||
3146 stream_update->adaptive_sync_infopacket ||
3147 stream_update->vtem_infopacket) {
3148 resource_build_info_frame(pipe_ctx);
3149 dc->hwss.update_info_frame(pipe_ctx);
3150
3151 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3152 dc->link_srv->dp_trace_source_sequence(
3153 pipe_ctx->stream->link,
3154 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3155 }
3156
3157 if (stream_update->hdr_static_metadata &&
3158 stream->use_dynamic_meta &&
3159 dc->hwss.set_dmdata_attributes &&
3160 pipe_ctx->stream->dmdata_address.quad_part != 0)
3161 dc->hwss.set_dmdata_attributes(pipe_ctx);
3162
3163 if (stream_update->gamut_remap)
3164 dc_stream_set_gamut_remap(dc, stream);
3165
3166 if (stream_update->output_csc_transform)
3167 dc_stream_program_csc_matrix(dc, stream);
3168
3169 if (stream_update->dither_option) {
3170 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3171 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3172 &pipe_ctx->stream->bit_depth_params);
3173 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3174 &stream->bit_depth_params,
3175 &stream->clamping);
3176 while (odm_pipe) {
3177 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3178 &stream->bit_depth_params,
3179 &stream->clamping);
3180 odm_pipe = odm_pipe->next_odm_pipe;
3181 }
3182 }
3183
3184
3185 /* Full fe update*/
3186 if (update_type == UPDATE_TYPE_FAST)
3187 continue;
3188
3189 if (stream_update->dsc_config)
3190 dc->link_srv->update_dsc_config(pipe_ctx);
3191
3192 if (stream_update->mst_bw_update) {
3193 if (stream_update->mst_bw_update->is_increase)
3194 dc->link_srv->increase_mst_payload(pipe_ctx,
3195 stream_update->mst_bw_update->mst_stream_bw);
3196 else
3197 dc->link_srv->reduce_mst_payload(pipe_ctx,
3198 stream_update->mst_bw_update->mst_stream_bw);
3199 }
3200
3201 if (stream_update->pending_test_pattern) {
3202 dc_link_dp_set_test_pattern(stream->link,
3203 stream->test_pattern.type,
3204 stream->test_pattern.color_space,
3205 stream->test_pattern.p_link_settings,
3206 stream->test_pattern.p_custom_pattern,
3207 stream->test_pattern.cust_pattern_size);
3208 }
3209
3210 if (stream_update->dpms_off) {
3211 if (*stream_update->dpms_off) {
3212 dc->link_srv->set_dpms_off(pipe_ctx);
3213 /* for dpms, keep acquired resources*/
3214 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3215 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3216
3217 dc->optimized_required = true;
3218
3219 } else {
3220 if (get_seamless_boot_stream_count(context) == 0)
3221 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3222 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3223 }
3224 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
3225 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
3226 /*
3227 * Workaround for firmware issue in some receivers where they don't pick up
3228 * correct output color space unless DP link is disabled/re-enabled
3229 */
3230 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3231 }
3232
3233 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3234 bool should_program_abm = true;
3235
3236 // if otg funcs defined check if blanked before programming
3237 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3238 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3239 should_program_abm = false;
3240
3241 if (should_program_abm) {
3242 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3243 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3244 } else {
3245 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3246 pipe_ctx->stream_res.abm, stream->abm_level);
3247 }
3248 }
3249 }
3250 }
3251 }
3252 }
3253
dc_dmub_should_send_dirty_rect_cmd(struct dc * dc,struct dc_stream_state * stream)3254 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3255 {
3256 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3257 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3258 && stream->ctx->dce_version >= DCN_VERSION_3_1)
3259 return true;
3260
3261 if (stream->link->replay_settings.config.replay_supported)
3262 return true;
3263
3264 return false;
3265 }
3266
dc_dmub_update_dirty_rect(struct dc * dc,int surface_count,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,struct dc_state * context)3267 void dc_dmub_update_dirty_rect(struct dc *dc,
3268 int surface_count,
3269 struct dc_stream_state *stream,
3270 struct dc_surface_update *srf_updates,
3271 struct dc_state *context)
3272 {
3273 union dmub_rb_cmd cmd;
3274 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3275 unsigned int i, j;
3276 unsigned int panel_inst = 0;
3277
3278 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3279 return;
3280
3281 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3282 return;
3283
3284 memset(&cmd, 0x0, sizeof(cmd));
3285 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3286 cmd.update_dirty_rect.header.sub_type = 0;
3287 cmd.update_dirty_rect.header.payload_bytes =
3288 sizeof(cmd.update_dirty_rect) -
3289 sizeof(cmd.update_dirty_rect.header);
3290 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3291 for (i = 0; i < surface_count; i++) {
3292 struct dc_plane_state *plane_state = srf_updates[i].surface;
3293 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3294
3295 if (!srf_updates[i].surface || !flip_addr)
3296 continue;
3297 /* Do not send in immediate flip mode */
3298 if (srf_updates[i].surface->flip_immediate)
3299 continue;
3300
3301 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3302 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3303 sizeof(flip_addr->dirty_rects));
3304 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3305 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3306
3307 if (pipe_ctx->stream != stream)
3308 continue;
3309 if (pipe_ctx->plane_state != plane_state)
3310 continue;
3311
3312 update_dirty_rect->panel_inst = panel_inst;
3313 update_dirty_rect->pipe_idx = j;
3314 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
3315 }
3316 }
3317 }
3318
build_dmub_update_dirty_rect(struct dc * dc,int surface_count,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,struct dc_state * context,struct dc_dmub_cmd dc_dmub_cmd[],unsigned int * dmub_cmd_count)3319 static void build_dmub_update_dirty_rect(
3320 struct dc *dc,
3321 int surface_count,
3322 struct dc_stream_state *stream,
3323 struct dc_surface_update *srf_updates,
3324 struct dc_state *context,
3325 struct dc_dmub_cmd dc_dmub_cmd[],
3326 unsigned int *dmub_cmd_count)
3327 {
3328 union dmub_rb_cmd cmd;
3329 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3330 unsigned int i, j;
3331 unsigned int panel_inst = 0;
3332
3333 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3334 return;
3335
3336 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3337 return;
3338
3339 memset(&cmd, 0x0, sizeof(cmd));
3340 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3341 cmd.update_dirty_rect.header.sub_type = 0;
3342 cmd.update_dirty_rect.header.payload_bytes =
3343 sizeof(cmd.update_dirty_rect) -
3344 sizeof(cmd.update_dirty_rect.header);
3345 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3346 for (i = 0; i < surface_count; i++) {
3347 struct dc_plane_state *plane_state = srf_updates[i].surface;
3348 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3349
3350 if (!srf_updates[i].surface || !flip_addr)
3351 continue;
3352 /* Do not send in immediate flip mode */
3353 if (srf_updates[i].surface->flip_immediate)
3354 continue;
3355 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3356 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3357 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3358 sizeof(flip_addr->dirty_rects));
3359 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3360 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3361
3362 if (pipe_ctx->stream != stream)
3363 continue;
3364 if (pipe_ctx->plane_state != plane_state)
3365 continue;
3366 update_dirty_rect->panel_inst = panel_inst;
3367 update_dirty_rect->pipe_idx = j;
3368 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
3369 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
3370 (*dmub_cmd_count)++;
3371 }
3372 }
3373 }
3374
3375
3376 /**
3377 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
3378 *
3379 * @dc: Current DC state
3380 * @srf_updates: Array of surface updates
3381 * @surface_count: Number of surfaces that have an updated
3382 * @stream: Corresponding stream to be updated in the current flip
3383 * @context: New DC state to be programmed
3384 *
3385 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
3386 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
3387 *
3388 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
3389 * to build an array of commands and have them sent while the OTG lock is acquired.
3390 *
3391 * Return: void
3392 */
build_dmub_cmd_list(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_state * context,struct dc_dmub_cmd dc_dmub_cmd[],unsigned int * dmub_cmd_count)3393 static void build_dmub_cmd_list(struct dc *dc,
3394 struct dc_surface_update *srf_updates,
3395 int surface_count,
3396 struct dc_stream_state *stream,
3397 struct dc_state *context,
3398 struct dc_dmub_cmd dc_dmub_cmd[],
3399 unsigned int *dmub_cmd_count)
3400 {
3401 // Initialize cmd count to 0
3402 *dmub_cmd_count = 0;
3403 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
3404 }
3405
commit_planes_for_stream_fast(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3406 static void commit_planes_for_stream_fast(struct dc *dc,
3407 struct dc_surface_update *srf_updates,
3408 int surface_count,
3409 struct dc_stream_state *stream,
3410 struct dc_stream_update *stream_update,
3411 enum surface_update_type update_type,
3412 struct dc_state *context)
3413 {
3414 int i, j;
3415 struct pipe_ctx *top_pipe_to_program = NULL;
3416 dc_z10_restore(dc);
3417
3418 top_pipe_to_program = resource_get_otg_master_for_stream(
3419 &context->res_ctx,
3420 stream);
3421
3422 if (dc->debug.visual_confirm) {
3423 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3424 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3425
3426 if (pipe->stream && pipe->plane_state)
3427 dc_update_viusal_confirm_color(dc, context, pipe);
3428 }
3429 }
3430
3431 for (i = 0; i < surface_count; i++) {
3432 struct dc_plane_state *plane_state = srf_updates[i].surface;
3433 /*set logical flag for lock/unlock use*/
3434 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3435 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3436
3437 if (!pipe_ctx->plane_state)
3438 continue;
3439 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3440 continue;
3441 pipe_ctx->plane_state->triplebuffer_flips = false;
3442 if (update_type == UPDATE_TYPE_FAST &&
3443 dc->hwss.program_triplebuffer &&
3444 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3445 /*triple buffer for VUpdate only*/
3446 pipe_ctx->plane_state->triplebuffer_flips = true;
3447 }
3448 }
3449 }
3450
3451 build_dmub_cmd_list(dc,
3452 srf_updates,
3453 surface_count,
3454 stream,
3455 context,
3456 context->dc_dmub_cmd,
3457 &(context->dmub_cmd_count));
3458 hwss_build_fast_sequence(dc,
3459 context->dc_dmub_cmd,
3460 context->dmub_cmd_count,
3461 context->block_sequence,
3462 &(context->block_sequence_steps),
3463 top_pipe_to_program);
3464 hwss_execute_sequence(dc,
3465 context->block_sequence,
3466 context->block_sequence_steps);
3467 /* Clear update flags so next flip doesn't have redundant programming
3468 * (if there's no stream update, the update flags are not cleared).
3469 * Surface updates are cleared unconditionally at the beginning of each flip,
3470 * so no need to clear here.
3471 */
3472 if (top_pipe_to_program->stream)
3473 top_pipe_to_program->stream->update_flags.raw = 0;
3474 }
3475
wait_for_outstanding_hw_updates(struct dc * dc,const struct dc_state * dc_context)3476 static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
3477 {
3478 /*
3479 * This function calls HWSS to wait for any potentially double buffered
3480 * operations to complete. It should be invoked as a pre-amble prior
3481 * to full update programming before asserting any HW locks.
3482 */
3483 int pipe_idx;
3484 int opp_inst;
3485 int opp_count = dc->res_pool->pipe_count;
3486 struct hubp *hubp;
3487 int mpcc_inst;
3488 const struct pipe_ctx *pipe_ctx;
3489
3490 for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
3491 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
3492
3493 if (!pipe_ctx->stream)
3494 continue;
3495
3496 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
3497 pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
3498
3499 hubp = pipe_ctx->plane_res.hubp;
3500 if (!hubp)
3501 continue;
3502
3503 mpcc_inst = hubp->inst;
3504 // MPCC inst is equal to pipe index in practice
3505 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
3506 if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
3507 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
3508 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
3509 break;
3510 }
3511 }
3512 }
3513 }
3514
commit_planes_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3515 static void commit_planes_for_stream(struct dc *dc,
3516 struct dc_surface_update *srf_updates,
3517 int surface_count,
3518 struct dc_stream_state *stream,
3519 struct dc_stream_update *stream_update,
3520 enum surface_update_type update_type,
3521 struct dc_state *context)
3522 {
3523 int i, j;
3524 struct pipe_ctx *top_pipe_to_program = NULL;
3525 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3526 bool subvp_prev_use = false;
3527 bool subvp_curr_use = false;
3528
3529 // Once we apply the new subvp context to hardware it won't be in the
3530 // dc->current_state anymore, so we have to cache it before we apply
3531 // the new SubVP context
3532 subvp_prev_use = false;
3533 dc_z10_restore(dc);
3534 if (update_type == UPDATE_TYPE_FULL)
3535 wait_for_outstanding_hw_updates(dc, context);
3536
3537 if (update_type == UPDATE_TYPE_FULL) {
3538 dc_allow_idle_optimizations(dc, false);
3539
3540 if (get_seamless_boot_stream_count(context) == 0)
3541 dc->hwss.prepare_bandwidth(dc, context);
3542
3543 if (dc->hwss.update_dsc_pg)
3544 dc->hwss.update_dsc_pg(dc, context, false);
3545
3546 context_clock_trace(dc, context);
3547 }
3548
3549 top_pipe_to_program = resource_get_otg_master_for_stream(
3550 &context->res_ctx,
3551 stream);
3552
3553 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3554 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3555
3556 // Check old context for SubVP
3557 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
3558 if (subvp_prev_use)
3559 break;
3560 }
3561
3562 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3563 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3564
3565 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
3566 subvp_curr_use = true;
3567 break;
3568 }
3569 }
3570
3571 if (dc->debug.visual_confirm)
3572 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3573 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3574
3575 if (pipe->stream && pipe->plane_state)
3576 dc_update_viusal_confirm_color(dc, context, pipe);
3577 }
3578
3579 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3580 struct pipe_ctx *mpcc_pipe;
3581 struct pipe_ctx *odm_pipe;
3582
3583 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3584 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3585 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3586 }
3587
3588 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3589 if (top_pipe_to_program &&
3590 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3591 if (should_use_dmub_lock(stream->link)) {
3592 union dmub_hw_lock_flags hw_locks = { 0 };
3593 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3594
3595 hw_locks.bits.lock_dig = 1;
3596 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3597
3598 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3599 true,
3600 &hw_locks,
3601 &inst_flags);
3602 } else
3603 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3604 top_pipe_to_program->stream_res.tg);
3605 }
3606
3607 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3608 if (dc->hwss.subvp_pipe_control_lock)
3609 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3610 dc->hwss.interdependent_update_lock(dc, context, true);
3611
3612 } else {
3613 if (dc->hwss.subvp_pipe_control_lock)
3614 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3615 /* Lock the top pipe while updating plane addrs, since freesync requires
3616 * plane addr update event triggers to be synchronized.
3617 * top_pipe_to_program is expected to never be NULL
3618 */
3619 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3620 }
3621
3622 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3623
3624 // Stream updates
3625 if (stream_update)
3626 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3627
3628 if (surface_count == 0) {
3629 /*
3630 * In case of turning off screen, no need to program front end a second time.
3631 * just return after program blank.
3632 */
3633 if (dc->hwss.apply_ctx_for_surface)
3634 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3635 if (dc->hwss.program_front_end_for_ctx)
3636 dc->hwss.program_front_end_for_ctx(dc, context);
3637
3638 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3639 dc->hwss.interdependent_update_lock(dc, context, false);
3640 } else {
3641 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3642 }
3643 dc->hwss.post_unlock_program_front_end(dc, context);
3644
3645 if (update_type != UPDATE_TYPE_FAST)
3646 if (dc->hwss.commit_subvp_config)
3647 dc->hwss.commit_subvp_config(dc, context);
3648
3649 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3650 * move the SubVP lock to after the phantom pipes have been setup
3651 */
3652 if (dc->hwss.subvp_pipe_control_lock)
3653 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
3654 NULL, subvp_prev_use);
3655 return;
3656 }
3657
3658 if (update_type != UPDATE_TYPE_FAST) {
3659 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3660 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3661
3662 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
3663 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
3664 pipe_ctx->stream && pipe_ctx->plane_state) {
3665 /* Only update visual confirm for SUBVP and Mclk switching here.
3666 * The bar appears on all pipes, so we need to update the bar on all displays,
3667 * so the information doesn't get stale.
3668 */
3669 dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
3670 pipe_ctx->plane_res.hubp->inst);
3671 }
3672 }
3673 }
3674
3675 for (i = 0; i < surface_count; i++) {
3676 struct dc_plane_state *plane_state = srf_updates[i].surface;
3677 /*set logical flag for lock/unlock use*/
3678 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3679 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3680 if (!pipe_ctx->plane_state)
3681 continue;
3682 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3683 continue;
3684 pipe_ctx->plane_state->triplebuffer_flips = false;
3685 if (update_type == UPDATE_TYPE_FAST &&
3686 dc->hwss.program_triplebuffer != NULL &&
3687 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3688 /*triple buffer for VUpdate only*/
3689 pipe_ctx->plane_state->triplebuffer_flips = true;
3690 }
3691 }
3692 if (update_type == UPDATE_TYPE_FULL) {
3693 /* force vsync flip when reconfiguring pipes to prevent underflow */
3694 plane_state->flip_immediate = false;
3695 }
3696 }
3697
3698 // Update Type FULL, Surface updates
3699 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3700 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3701
3702 if (!pipe_ctx->top_pipe &&
3703 !pipe_ctx->prev_odm_pipe &&
3704 should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3705 struct dc_stream_status *stream_status = NULL;
3706
3707 if (!pipe_ctx->plane_state)
3708 continue;
3709
3710 /* Full fe update*/
3711 if (update_type == UPDATE_TYPE_FAST)
3712 continue;
3713
3714 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3715
3716 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3717 /*turn off triple buffer for full update*/
3718 dc->hwss.program_triplebuffer(
3719 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3720 }
3721 stream_status =
3722 stream_get_status(context, pipe_ctx->stream);
3723
3724 if (dc->hwss.apply_ctx_for_surface)
3725 dc->hwss.apply_ctx_for_surface(
3726 dc, pipe_ctx->stream, stream_status->plane_count, context);
3727 }
3728 }
3729 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3730 dc->hwss.program_front_end_for_ctx(dc, context);
3731 if (dc->debug.validate_dml_output) {
3732 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3733 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3734 if (cur_pipe->stream == NULL)
3735 continue;
3736
3737 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3738 cur_pipe->plane_res.hubp, dc->ctx,
3739 &context->res_ctx.pipe_ctx[i].rq_regs,
3740 &context->res_ctx.pipe_ctx[i].dlg_regs,
3741 &context->res_ctx.pipe_ctx[i].ttu_regs);
3742 }
3743 }
3744 }
3745
3746 // Update Type FAST, Surface updates
3747 if (update_type == UPDATE_TYPE_FAST) {
3748 if (dc->hwss.set_flip_control_gsl)
3749 for (i = 0; i < surface_count; i++) {
3750 struct dc_plane_state *plane_state = srf_updates[i].surface;
3751
3752 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3753 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3754
3755 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3756 continue;
3757
3758 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3759 continue;
3760
3761 // GSL has to be used for flip immediate
3762 dc->hwss.set_flip_control_gsl(pipe_ctx,
3763 pipe_ctx->plane_state->flip_immediate);
3764 }
3765 }
3766
3767 /* Perform requested Updates */
3768 for (i = 0; i < surface_count; i++) {
3769 struct dc_plane_state *plane_state = srf_updates[i].surface;
3770
3771 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3772 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3773
3774 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3775 continue;
3776
3777 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3778 continue;
3779
3780 /*program triple buffer after lock based on flip type*/
3781 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3782 /*only enable triplebuffer for fast_update*/
3783 dc->hwss.program_triplebuffer(
3784 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3785 }
3786 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3787 dc->hwss.update_plane_addr(dc, pipe_ctx);
3788 }
3789 }
3790 }
3791
3792 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3793 dc->hwss.interdependent_update_lock(dc, context, false);
3794 } else {
3795 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3796 }
3797
3798 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3799 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3800 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3801 top_pipe_to_program->stream_res.tg,
3802 CRTC_STATE_VACTIVE);
3803 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3804 top_pipe_to_program->stream_res.tg,
3805 CRTC_STATE_VBLANK);
3806 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3807 top_pipe_to_program->stream_res.tg,
3808 CRTC_STATE_VACTIVE);
3809
3810 if (should_use_dmub_lock(stream->link)) {
3811 union dmub_hw_lock_flags hw_locks = { 0 };
3812 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3813
3814 hw_locks.bits.lock_dig = 1;
3815 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3816
3817 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3818 false,
3819 &hw_locks,
3820 &inst_flags);
3821 } else
3822 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3823 top_pipe_to_program->stream_res.tg);
3824 }
3825
3826 if (subvp_curr_use) {
3827 /* If enabling subvp or transitioning from subvp->subvp, enable the
3828 * phantom streams before we program front end for the phantom pipes.
3829 */
3830 if (update_type != UPDATE_TYPE_FAST) {
3831 if (dc->hwss.enable_phantom_streams)
3832 dc->hwss.enable_phantom_streams(dc, context);
3833 }
3834 }
3835
3836 if (update_type != UPDATE_TYPE_FAST)
3837 dc->hwss.post_unlock_program_front_end(dc, context);
3838
3839 if (subvp_prev_use && !subvp_curr_use) {
3840 /* If disabling subvp, disable phantom streams after front end
3841 * programming has completed (we turn on phantom OTG in order
3842 * to complete the plane disable for phantom pipes).
3843 */
3844 dc->hwss.apply_ctx_to_hw(dc, context);
3845 }
3846
3847 if (update_type != UPDATE_TYPE_FAST)
3848 if (dc->hwss.commit_subvp_config)
3849 dc->hwss.commit_subvp_config(dc, context);
3850 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3851 * move the SubVP lock to after the phantom pipes have been setup
3852 */
3853 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3854 if (dc->hwss.subvp_pipe_control_lock)
3855 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3856 } else {
3857 if (dc->hwss.subvp_pipe_control_lock)
3858 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3859 }
3860
3861 // Fire manual trigger only when bottom plane is flipped
3862 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3863 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3864
3865 if (!pipe_ctx->plane_state)
3866 continue;
3867
3868 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3869 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3870 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3871 pipe_ctx->plane_state->skip_manual_trigger)
3872 continue;
3873
3874 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3875 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3876 }
3877 }
3878
3879 /**
3880 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
3881 *
3882 * @dc: Used to get the current state status
3883 * @stream: Target stream, which we want to remove the attached planes
3884 * @surface_count: Number of surface update
3885 * @is_plane_addition: [in] Fill out with true if it is a plane addition case
3886 *
3887 * DCN32x and newer support a feature named Dynamic ODM which can conflict with
3888 * the MPO if used simultaneously in some specific configurations (e.g.,
3889 * 4k@144). This function checks if the incoming context requires applying a
3890 * transition state with unnecessary pipe splitting and ODM disabled to
3891 * circumvent our hardware limitations to prevent this edge case. If the OPP
3892 * associated with an MPCC might change due to plane additions, this function
3893 * returns true.
3894 *
3895 * Return:
3896 * Return true if OPP and MPCC might change, otherwise, return false.
3897 */
could_mpcc_tree_change_for_active_pipes(struct dc * dc,struct dc_stream_state * stream,int surface_count,bool * is_plane_addition)3898 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
3899 struct dc_stream_state *stream,
3900 int surface_count,
3901 bool *is_plane_addition)
3902 {
3903
3904 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3905 bool force_minimal_pipe_splitting = false;
3906 bool subvp_active = false;
3907 uint32_t i;
3908
3909 *is_plane_addition = false;
3910
3911 if (cur_stream_status &&
3912 dc->current_state->stream_count > 0 &&
3913 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3914 /* determine if minimal transition is required due to MPC*/
3915 if (surface_count > 0) {
3916 if (cur_stream_status->plane_count > surface_count) {
3917 force_minimal_pipe_splitting = true;
3918 } else if (cur_stream_status->plane_count < surface_count) {
3919 force_minimal_pipe_splitting = true;
3920 *is_plane_addition = true;
3921 }
3922 }
3923 }
3924
3925 if (cur_stream_status &&
3926 dc->current_state->stream_count == 1 &&
3927 dc->debug.enable_single_display_2to1_odm_policy) {
3928 /* determine if minimal transition is required due to dynamic ODM*/
3929 if (surface_count > 0) {
3930 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
3931 force_minimal_pipe_splitting = true;
3932 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
3933 force_minimal_pipe_splitting = true;
3934 *is_plane_addition = true;
3935 }
3936 }
3937 }
3938
3939 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3940 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3941
3942 if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
3943 subvp_active = true;
3944 break;
3945 }
3946 }
3947
3948 /* For SubVP when adding or removing planes we need to add a minimal transition
3949 * (even when disabling all planes). Whenever disabling a phantom pipe, we
3950 * must use the minimal transition path to disable the pipe correctly.
3951 *
3952 * We want to use the minimal transition whenever subvp is active, not only if
3953 * a plane is being added / removed from a subvp stream (MPO plane can be added
3954 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
3955 * a min transition to disable subvp.
3956 */
3957 if (cur_stream_status && subvp_active) {
3958 /* determine if minimal transition is required due to SubVP*/
3959 if (cur_stream_status->plane_count > surface_count) {
3960 force_minimal_pipe_splitting = true;
3961 } else if (cur_stream_status->plane_count < surface_count) {
3962 force_minimal_pipe_splitting = true;
3963 *is_plane_addition = true;
3964 }
3965 }
3966
3967 return force_minimal_pipe_splitting;
3968 }
3969
3970 /**
3971 * commit_minimal_transition_state - Create a transition pipe split state
3972 *
3973 * @dc: Used to get the current state status
3974 * @transition_base_context: New transition state
3975 *
3976 * In some specific configurations, such as pipe split on multi-display with
3977 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
3978 * programming when moving to new planes. To mitigate those types of problems,
3979 * this function adds a transition state that minimizes pipe usage before
3980 * programming the new configuration. When adding a new plane, the current
3981 * state requires the least pipes, so it is applied without splitting. When
3982 * removing a plane, the new state requires the least pipes, so it is applied
3983 * without splitting.
3984 *
3985 * Return:
3986 * Return false if something is wrong in the transition state.
3987 */
commit_minimal_transition_state(struct dc * dc,struct dc_state * transition_base_context)3988 static bool commit_minimal_transition_state(struct dc *dc,
3989 struct dc_state *transition_base_context)
3990 {
3991 struct dc_state *transition_context = dc_create_state(dc);
3992 enum pipe_split_policy tmp_mpc_policy = 0;
3993 bool temp_dynamic_odm_policy = 0;
3994 bool temp_subvp_policy = 0;
3995 enum dc_status ret = DC_ERROR_UNEXPECTED;
3996 unsigned int i, j;
3997 unsigned int pipe_in_use = 0;
3998 bool subvp_in_use = false;
3999 bool odm_in_use = false;
4000
4001 if (!transition_context)
4002 return false;
4003 /* Setup:
4004 * Store the current ODM and MPC config in some temp variables to be
4005 * restored after we commit the transition state.
4006 */
4007
4008 /* check current pipes in use*/
4009 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4010 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4011
4012 if (pipe->plane_state)
4013 pipe_in_use++;
4014 }
4015
4016 /* If SubVP is enabled and we are adding or removing planes from any main subvp
4017 * pipe, we must use the minimal transition.
4018 */
4019 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4020 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4021
4022 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
4023 subvp_in_use = true;
4024 break;
4025 }
4026 }
4027
4028 /* If ODM is enabled and we are adding or removing planes from any ODM
4029 * pipe, we must use the minimal transition.
4030 */
4031 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4032 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4033
4034 if (pipe->stream && pipe->next_odm_pipe) {
4035 odm_in_use = true;
4036 break;
4037 }
4038 }
4039
4040 /* When the OS add a new surface if we have been used all of pipes with odm combine
4041 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
4042 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
4043 * call it again. Otherwise return true to skip.
4044 *
4045 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
4046 * enter/exit MPO when DCN still have enough resources.
4047 */
4048 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
4049 dc_release_state(transition_context);
4050 return true;
4051 }
4052
4053 if (!dc->config.is_vmin_only_asic) {
4054 tmp_mpc_policy = dc->debug.pipe_split_policy;
4055 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
4056 }
4057
4058 temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
4059 dc->debug.enable_single_display_2to1_odm_policy = false;
4060
4061 temp_subvp_policy = dc->debug.force_disable_subvp;
4062 dc->debug.force_disable_subvp = true;
4063
4064 dc_resource_state_copy_construct(transition_base_context, transition_context);
4065
4066 /* commit minimal state */
4067 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
4068 for (i = 0; i < transition_context->stream_count; i++) {
4069 struct dc_stream_status *stream_status = &transition_context->stream_status[i];
4070
4071 for (j = 0; j < stream_status->plane_count; j++) {
4072 struct dc_plane_state *plane_state = stream_status->plane_states[j];
4073
4074 /* force vsync flip when reconfiguring pipes to prevent underflow
4075 * and corruption
4076 */
4077 plane_state->flip_immediate = false;
4078 }
4079 }
4080
4081 ret = dc_commit_state_no_check(dc, transition_context);
4082 }
4083
4084 /* always release as dc_commit_state_no_check retains in good case */
4085 dc_release_state(transition_context);
4086
4087 /* TearDown:
4088 * Restore original configuration for ODM and MPO.
4089 */
4090 if (!dc->config.is_vmin_only_asic)
4091 dc->debug.pipe_split_policy = tmp_mpc_policy;
4092
4093 dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
4094 dc->debug.force_disable_subvp = temp_subvp_policy;
4095
4096 if (ret != DC_OK) {
4097 /* this should never happen */
4098 BREAK_TO_DEBUGGER();
4099 return false;
4100 }
4101
4102 /* force full surface update */
4103 for (i = 0; i < dc->current_state->stream_count; i++) {
4104 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4105 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4106 }
4107 }
4108
4109 return true;
4110 }
4111
4112 /**
4113 * update_seamless_boot_flags() - Helper function for updating seamless boot flags
4114 *
4115 * @dc: Current DC state
4116 * @context: New DC state to be programmed
4117 * @surface_count: Number of surfaces that have an updated
4118 * @stream: Corresponding stream to be updated in the current flip
4119 *
4120 * Updating seamless boot flags do not need to be part of the commit sequence. This
4121 * helper function will update the seamless boot flags on each flip (if required)
4122 * outside of the HW commit sequence (fast or slow).
4123 *
4124 * Return: void
4125 */
update_seamless_boot_flags(struct dc * dc,struct dc_state * context,int surface_count,struct dc_stream_state * stream)4126 static void update_seamless_boot_flags(struct dc *dc,
4127 struct dc_state *context,
4128 int surface_count,
4129 struct dc_stream_state *stream)
4130 {
4131 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
4132 /* Optimize seamless boot flag keeps clocks and watermarks high until
4133 * first flip. After first flip, optimization is required to lower
4134 * bandwidth. Important to note that it is expected UEFI will
4135 * only light up a single display on POST, therefore we only expect
4136 * one stream with seamless boot flag set.
4137 */
4138 if (stream->apply_seamless_boot_optimization) {
4139 stream->apply_seamless_boot_optimization = false;
4140
4141 if (get_seamless_boot_stream_count(context) == 0)
4142 dc->optimized_required = true;
4143 }
4144 }
4145 }
4146
populate_fast_updates(struct dc_fast_update * fast_update,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_update * stream_update)4147 static void populate_fast_updates(struct dc_fast_update *fast_update,
4148 struct dc_surface_update *srf_updates,
4149 int surface_count,
4150 struct dc_stream_update *stream_update)
4151 {
4152 int i = 0;
4153
4154 if (stream_update) {
4155 fast_update[0].out_transfer_func = stream_update->out_transfer_func;
4156 fast_update[0].output_csc_transform = stream_update->output_csc_transform;
4157 }
4158
4159 for (i = 0; i < surface_count; i++) {
4160 fast_update[i].flip_addr = srf_updates[i].flip_addr;
4161 fast_update[i].gamma = srf_updates[i].gamma;
4162 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
4163 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
4164 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
4165 }
4166 }
4167
fast_updates_exist(struct dc_fast_update * fast_update,int surface_count)4168 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4169 {
4170 int i;
4171
4172 if (fast_update[0].out_transfer_func ||
4173 fast_update[0].output_csc_transform)
4174 return true;
4175
4176 for (i = 0; i < surface_count; i++) {
4177 if (fast_update[i].flip_addr ||
4178 fast_update[i].gamma ||
4179 fast_update[i].gamut_remap_matrix ||
4180 fast_update[i].input_csc_color_matrix ||
4181 fast_update[i].coeff_reduction_factor)
4182 return true;
4183 }
4184
4185 return false;
4186 }
4187
full_update_required(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_update * stream_update,struct dc_stream_state * stream)4188 static bool full_update_required(struct dc *dc,
4189 struct dc_surface_update *srf_updates,
4190 int surface_count,
4191 struct dc_stream_update *stream_update,
4192 struct dc_stream_state *stream)
4193 {
4194
4195 int i;
4196 struct dc_stream_status *stream_status;
4197 const struct dc_state *context = dc->current_state;
4198
4199 for (i = 0; i < surface_count; i++) {
4200 if (srf_updates &&
4201 (srf_updates[i].plane_info ||
4202 srf_updates[i].scaling_info ||
4203 (srf_updates[i].hdr_mult.value &&
4204 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
4205 srf_updates[i].in_transfer_func ||
4206 srf_updates[i].func_shaper ||
4207 srf_updates[i].lut3d_func ||
4208 srf_updates[i].blend_tf ||
4209 srf_updates[i].surface->force_full_update ||
4210 (srf_updates[i].flip_addr &&
4211 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
4212 !is_surface_in_context(context, srf_updates[i].surface)))
4213 return true;
4214 }
4215
4216 if (stream_update &&
4217 (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
4218 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
4219 stream_update->integer_scaling_update) ||
4220 stream_update->hdr_static_metadata ||
4221 stream_update->abm_level ||
4222 stream_update->periodic_interrupt ||
4223 stream_update->vrr_infopacket ||
4224 stream_update->vsc_infopacket ||
4225 stream_update->vsp_infopacket ||
4226 stream_update->hfvsif_infopacket ||
4227 stream_update->vtem_infopacket ||
4228 stream_update->adaptive_sync_infopacket ||
4229 stream_update->dpms_off ||
4230 stream_update->allow_freesync ||
4231 stream_update->vrr_active_variable ||
4232 stream_update->vrr_active_fixed ||
4233 stream_update->gamut_remap ||
4234 stream_update->output_color_space ||
4235 stream_update->dither_option ||
4236 stream_update->wb_update ||
4237 stream_update->dsc_config ||
4238 stream_update->mst_bw_update ||
4239 stream_update->func_shaper ||
4240 stream_update->lut3d_func ||
4241 stream_update->pending_test_pattern ||
4242 stream_update->crtc_timing_adjust))
4243 return true;
4244
4245 if (stream) {
4246 stream_status = dc_stream_get_status(stream);
4247 if (stream_status == NULL || stream_status->plane_count != surface_count)
4248 return true;
4249 }
4250 if (dc->idle_optimizations_allowed)
4251 return true;
4252
4253 return false;
4254 }
4255
fast_update_only(struct dc * dc,struct dc_fast_update * fast_update,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_update * stream_update,struct dc_stream_state * stream)4256 static bool fast_update_only(struct dc *dc,
4257 struct dc_fast_update *fast_update,
4258 struct dc_surface_update *srf_updates,
4259 int surface_count,
4260 struct dc_stream_update *stream_update,
4261 struct dc_stream_state *stream)
4262 {
4263 return fast_updates_exist(fast_update, surface_count)
4264 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
4265 }
4266
dc_update_planes_and_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update)4267 bool dc_update_planes_and_stream(struct dc *dc,
4268 struct dc_surface_update *srf_updates, int surface_count,
4269 struct dc_stream_state *stream,
4270 struct dc_stream_update *stream_update)
4271 {
4272 struct dc_state *context;
4273 enum surface_update_type update_type;
4274 int i;
4275 struct mall_temp_config mall_temp_config;
4276 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4277
4278 /* In cases where MPO and split or ODM are used transitions can
4279 * cause underflow. Apply stream configuration with minimal pipe
4280 * split first to avoid unsupported transitions for active pipes.
4281 */
4282 bool force_minimal_pipe_splitting = 0;
4283 bool is_plane_addition = 0;
4284
4285 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4286 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
4287 dc,
4288 stream,
4289 surface_count,
4290 &is_plane_addition);
4291
4292 /* on plane addition, minimal state is the current one */
4293 if (force_minimal_pipe_splitting && is_plane_addition &&
4294 !commit_minimal_transition_state(dc, dc->current_state))
4295 return false;
4296
4297 if (!update_planes_and_stream_state(
4298 dc,
4299 srf_updates,
4300 surface_count,
4301 stream,
4302 stream_update,
4303 &update_type,
4304 &context))
4305 return false;
4306
4307 /* on plane removal, minimal state is the new one */
4308 if (force_minimal_pipe_splitting && !is_plane_addition) {
4309 /* Since all phantom pipes are removed in full validation,
4310 * we have to save and restore the subvp/mall config when
4311 * we do a minimal transition since the flags marking the
4312 * pipe as subvp/phantom will be cleared (dc copy constructor
4313 * creates a shallow copy).
4314 */
4315 if (dc->res_pool->funcs->save_mall_state)
4316 dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
4317 if (!commit_minimal_transition_state(dc, context)) {
4318 dc_release_state(context);
4319 return false;
4320 }
4321 if (dc->res_pool->funcs->restore_mall_state)
4322 dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
4323
4324 /* If we do a minimal transition with plane removal and the context
4325 * has subvp we also have to retain back the phantom stream / planes
4326 * since the refcount is decremented as part of the min transition
4327 * (we commit a state with no subvp, so the phantom streams / planes
4328 * had to be removed).
4329 */
4330 if (dc->res_pool->funcs->retain_phantom_pipes)
4331 dc->res_pool->funcs->retain_phantom_pipes(dc, context);
4332 update_type = UPDATE_TYPE_FULL;
4333 }
4334
4335 update_seamless_boot_flags(dc, context, surface_count, stream);
4336 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4337 !dc->debug.enable_legacy_fast_update) {
4338 commit_planes_for_stream_fast(dc,
4339 srf_updates,
4340 surface_count,
4341 stream,
4342 stream_update,
4343 update_type,
4344 context);
4345 } else {
4346 if (!stream_update &&
4347 dc->hwss.is_pipe_topology_transition_seamless &&
4348 !dc->hwss.is_pipe_topology_transition_seamless(
4349 dc, dc->current_state, context)) {
4350
4351 DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
4352 BREAK_TO_DEBUGGER();
4353 }
4354 commit_planes_for_stream(
4355 dc,
4356 srf_updates,
4357 surface_count,
4358 stream,
4359 stream_update,
4360 update_type,
4361 context);
4362 }
4363
4364 if (dc->current_state != context) {
4365
4366 /* Since memory free requires elevated IRQL, an interrupt
4367 * request is generated by mem free. If this happens
4368 * between freeing and reassigning the context, our vsync
4369 * interrupt will call into dc and cause a memory
4370 * corruption BSOD. Hence, we first reassign the context,
4371 * then free the old context.
4372 */
4373
4374 struct dc_state *old = dc->current_state;
4375
4376 dc->current_state = context;
4377 dc_release_state(old);
4378
4379 // clear any forced full updates
4380 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4381 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4382
4383 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4384 pipe_ctx->plane_state->force_full_update = false;
4385 }
4386 }
4387 return true;
4388 }
4389
dc_commit_updates_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_state * state)4390 void dc_commit_updates_for_stream(struct dc *dc,
4391 struct dc_surface_update *srf_updates,
4392 int surface_count,
4393 struct dc_stream_state *stream,
4394 struct dc_stream_update *stream_update,
4395 struct dc_state *state)
4396 {
4397 const struct dc_stream_status *stream_status;
4398 enum surface_update_type update_type;
4399 struct dc_state *context;
4400 struct dc_context *dc_ctx = dc->ctx;
4401 int i, j;
4402 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4403
4404 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4405 stream_status = dc_stream_get_status(stream);
4406 context = dc->current_state;
4407
4408 update_type = dc_check_update_surfaces_for_stream(
4409 dc, srf_updates, surface_count, stream_update, stream_status);
4410
4411 /* TODO: Since change commit sequence can have a huge impact,
4412 * we decided to only enable it for DCN3x. However, as soon as
4413 * we get more confident about this change we'll need to enable
4414 * the new sequence for all ASICs.
4415 */
4416 if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
4417 /*
4418 * Previous frame finished and HW is ready for optimization.
4419 */
4420 if (update_type == UPDATE_TYPE_FAST)
4421 dc_post_update_surfaces_to_stream(dc);
4422
4423 dc_update_planes_and_stream(dc, srf_updates,
4424 surface_count, stream,
4425 stream_update);
4426 return;
4427 }
4428
4429 if (update_type >= update_surface_trace_level)
4430 update_surface_trace(dc, srf_updates, surface_count);
4431
4432
4433 if (update_type >= UPDATE_TYPE_FULL) {
4434
4435 /* initialize scratch memory for building context */
4436 context = dc_create_state(dc);
4437 if (context == NULL) {
4438 DC_ERROR("Failed to allocate new validate context!\n");
4439 return;
4440 }
4441
4442 dc_resource_state_copy_construct(state, context);
4443
4444 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4445 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
4446 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4447
4448 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
4449 new_pipe->plane_state->force_full_update = true;
4450 }
4451 } else if (update_type == UPDATE_TYPE_FAST) {
4452 /*
4453 * Previous frame finished and HW is ready for optimization.
4454 */
4455 dc_post_update_surfaces_to_stream(dc);
4456 }
4457
4458
4459 for (i = 0; i < surface_count; i++) {
4460 struct dc_plane_state *surface = srf_updates[i].surface;
4461
4462 copy_surface_update_to_plane(surface, &srf_updates[i]);
4463
4464 if (update_type >= UPDATE_TYPE_MED) {
4465 for (j = 0; j < dc->res_pool->pipe_count; j++) {
4466 struct pipe_ctx *pipe_ctx =
4467 &context->res_ctx.pipe_ctx[j];
4468
4469 if (pipe_ctx->plane_state != surface)
4470 continue;
4471
4472 resource_build_scaling_params(pipe_ctx);
4473 }
4474 }
4475 }
4476
4477 copy_stream_update_to_stream(dc, context, stream, stream_update);
4478
4479 if (update_type >= UPDATE_TYPE_FULL) {
4480 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4481 DC_ERROR("Mode validation failed for stream update!\n");
4482 dc_release_state(context);
4483 return;
4484 }
4485 }
4486
4487 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
4488
4489 update_seamless_boot_flags(dc, context, surface_count, stream);
4490 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4491 !dc->debug.enable_legacy_fast_update) {
4492 commit_planes_for_stream_fast(dc,
4493 srf_updates,
4494 surface_count,
4495 stream,
4496 stream_update,
4497 update_type,
4498 context);
4499 } else {
4500 commit_planes_for_stream(
4501 dc,
4502 srf_updates,
4503 surface_count,
4504 stream,
4505 stream_update,
4506 update_type,
4507 context);
4508 }
4509 /*update current_State*/
4510 if (dc->current_state != context) {
4511
4512 struct dc_state *old = dc->current_state;
4513
4514 dc->current_state = context;
4515 dc_release_state(old);
4516
4517 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4518 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4519
4520 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4521 pipe_ctx->plane_state->force_full_update = false;
4522 }
4523 }
4524
4525 /* Legacy optimization path for DCE. */
4526 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
4527 dc_post_update_surfaces_to_stream(dc);
4528 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
4529 }
4530
4531 return;
4532
4533 }
4534
dc_get_current_stream_count(struct dc * dc)4535 uint8_t dc_get_current_stream_count(struct dc *dc)
4536 {
4537 return dc->current_state->stream_count;
4538 }
4539
dc_get_stream_at_index(struct dc * dc,uint8_t i)4540 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
4541 {
4542 if (i < dc->current_state->stream_count)
4543 return dc->current_state->streams[i];
4544 return NULL;
4545 }
4546
dc_interrupt_to_irq_source(struct dc * dc,uint32_t src_id,uint32_t ext_id)4547 enum dc_irq_source dc_interrupt_to_irq_source(
4548 struct dc *dc,
4549 uint32_t src_id,
4550 uint32_t ext_id)
4551 {
4552 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
4553 }
4554
4555 /*
4556 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
4557 */
dc_interrupt_set(struct dc * dc,enum dc_irq_source src,bool enable)4558 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
4559 {
4560
4561 if (dc == NULL)
4562 return false;
4563
4564 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
4565 }
4566
dc_interrupt_ack(struct dc * dc,enum dc_irq_source src)4567 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
4568 {
4569 dal_irq_service_ack(dc->res_pool->irqs, src);
4570 }
4571
dc_power_down_on_boot(struct dc * dc)4572 void dc_power_down_on_boot(struct dc *dc)
4573 {
4574 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
4575 dc->hwss.power_down_on_boot)
4576 dc->hwss.power_down_on_boot(dc);
4577 }
4578
dc_set_power_state(struct dc * dc,enum dc_acpi_cm_power_state power_state)4579 void dc_set_power_state(
4580 struct dc *dc,
4581 enum dc_acpi_cm_power_state power_state)
4582 {
4583 struct kref refcount;
4584 struct display_mode_lib *dml;
4585
4586 if (!dc->current_state)
4587 return;
4588
4589 switch (power_state) {
4590 case DC_ACPI_CM_POWER_STATE_D0:
4591 dc_resource_state_construct(dc, dc->current_state);
4592
4593 dc_z10_restore(dc);
4594
4595 dc->hwss.init_hw(dc);
4596
4597 if (dc->hwss.init_sys_ctx != NULL &&
4598 dc->vm_pa_config.valid) {
4599 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
4600 }
4601
4602 break;
4603 default:
4604 ASSERT(dc->current_state->stream_count == 0);
4605 /* Zero out the current context so that on resume we start with
4606 * clean state, and dc hw programming optimizations will not
4607 * cause any trouble.
4608 */
4609 dml = kzalloc(sizeof(struct display_mode_lib),
4610 GFP_KERNEL);
4611
4612 ASSERT(dml);
4613 if (!dml)
4614 return;
4615
4616 /* Preserve refcount */
4617 refcount = dc->current_state->refcount;
4618 /* Preserve display mode lib */
4619 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
4620
4621 dc_resource_state_destruct(dc->current_state);
4622 memset(dc->current_state, 0,
4623 sizeof(*dc->current_state));
4624
4625 dc->current_state->refcount = refcount;
4626 dc->current_state->bw_ctx.dml = *dml;
4627
4628 kfree(dml);
4629
4630 break;
4631 }
4632 }
4633
dc_resume(struct dc * dc)4634 void dc_resume(struct dc *dc)
4635 {
4636 uint32_t i;
4637
4638 for (i = 0; i < dc->link_count; i++)
4639 dc->link_srv->resume(dc->links[i]);
4640 }
4641
dc_is_dmcu_initialized(struct dc * dc)4642 bool dc_is_dmcu_initialized(struct dc *dc)
4643 {
4644 struct dmcu *dmcu = dc->res_pool->dmcu;
4645
4646 if (dmcu)
4647 return dmcu->funcs->is_dmcu_initialized(dmcu);
4648 return false;
4649 }
4650
get_clock_requirements_for_state(struct dc_state * state,struct AsicStateEx * info)4651 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4652 {
4653 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4654 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4655 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4656 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4657 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4658 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4659 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4660 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4661 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4662 }
dc_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)4663 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4664 {
4665 if (dc->hwss.set_clock)
4666 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4667 return DC_ERROR_UNEXPECTED;
4668 }
dc_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)4669 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4670 {
4671 if (dc->hwss.get_clock)
4672 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4673 }
4674
4675 /* enable/disable eDP PSR without specify stream for eDP */
dc_set_psr_allow_active(struct dc * dc,bool enable)4676 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4677 {
4678 int i;
4679 bool allow_active;
4680
4681 for (i = 0; i < dc->current_state->stream_count ; i++) {
4682 struct dc_link *link;
4683 struct dc_stream_state *stream = dc->current_state->streams[i];
4684
4685 link = stream->link;
4686 if (!link)
4687 continue;
4688
4689 if (link->psr_settings.psr_feature_enabled) {
4690 if (enable && !link->psr_settings.psr_allow_active) {
4691 allow_active = true;
4692 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4693 return false;
4694 } else if (!enable && link->psr_settings.psr_allow_active) {
4695 allow_active = false;
4696 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4697 return false;
4698 }
4699 }
4700 }
4701
4702 return true;
4703 }
4704
dc_allow_idle_optimizations(struct dc * dc,bool allow)4705 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4706 {
4707 if (dc->debug.disable_idle_power_optimizations)
4708 return;
4709
4710 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4711 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4712 return;
4713
4714 if (allow == dc->idle_optimizations_allowed)
4715 return;
4716
4717 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4718 dc->idle_optimizations_allowed = allow;
4719 }
4720
4721 /* set min and max memory clock to lowest and highest DPM level, respectively */
dc_unlock_memory_clock_frequency(struct dc * dc)4722 void dc_unlock_memory_clock_frequency(struct dc *dc)
4723 {
4724 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4725 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4726
4727 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4728 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4729 }
4730
4731 /* set min memory clock to the min required for current mode, max to maxDPM */
dc_lock_memory_clock_frequency(struct dc * dc)4732 void dc_lock_memory_clock_frequency(struct dc *dc)
4733 {
4734 if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4735 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4736
4737 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4738 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4739
4740 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4741 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4742 }
4743
blank_and_force_memclk(struct dc * dc,bool apply,unsigned int memclk_mhz)4744 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4745 {
4746 struct dc_state *context = dc->current_state;
4747 struct hubp *hubp;
4748 struct pipe_ctx *pipe;
4749 int i;
4750
4751 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4752 pipe = &context->res_ctx.pipe_ctx[i];
4753
4754 if (pipe->stream != NULL) {
4755 dc->hwss.disable_pixel_data(dc, pipe, true);
4756
4757 // wait for double buffer
4758 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4759 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4760 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4761
4762 hubp = pipe->plane_res.hubp;
4763 hubp->funcs->set_blank_regs(hubp, true);
4764 }
4765 }
4766
4767 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4768 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4769
4770 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4771 pipe = &context->res_ctx.pipe_ctx[i];
4772
4773 if (pipe->stream != NULL) {
4774 dc->hwss.disable_pixel_data(dc, pipe, false);
4775
4776 hubp = pipe->plane_res.hubp;
4777 hubp->funcs->set_blank_regs(hubp, false);
4778 }
4779 }
4780 }
4781
4782
4783 /**
4784 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4785 * @dc: pointer to dc of the dm calling this
4786 * @enable: True = transition to DC mode, false = transition back to AC mode
4787 *
4788 * Some SoCs define additional clock limits when in DC mode, DM should
4789 * invoke this function when the platform undergoes a power source transition
4790 * so DC can apply/unapply the limit. This interface may be disruptive to
4791 * the onscreen content.
4792 *
4793 * Context: Triggered by OS through DM interface, or manually by escape calls.
4794 * Need to hold a dclock when doing so.
4795 *
4796 * Return: none (void function)
4797 *
4798 */
dc_enable_dcmode_clk_limit(struct dc * dc,bool enable)4799 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4800 {
4801 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
4802 bool p_state_change_support;
4803
4804 if (!dc->config.dc_mode_clk_limit_support)
4805 return;
4806
4807 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
4808 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
4809 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
4810 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
4811 }
4812 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
4813 p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
4814
4815 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
4816 if (p_state_change_support) {
4817 if (funcMin <= softMax)
4818 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
4819 // else: No-Op
4820 } else {
4821 if (funcMin <= softMax)
4822 blank_and_force_memclk(dc, true, softMax);
4823 // else: No-Op
4824 }
4825 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
4826 if (p_state_change_support) {
4827 if (funcMin <= softMax)
4828 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
4829 // else: No-Op
4830 } else {
4831 if (funcMin <= softMax)
4832 blank_and_force_memclk(dc, true, maxDPM);
4833 // else: No-Op
4834 }
4835 }
4836 dc->clk_mgr->dc_mode_softmax_enabled = enable;
4837 }
dc_is_plane_eligible_for_idle_optimizations(struct dc * dc,struct dc_plane_state * plane,struct dc_cursor_attributes * cursor_attr)4838 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
4839 struct dc_cursor_attributes *cursor_attr)
4840 {
4841 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
4842 return true;
4843 return false;
4844 }
4845
4846 /* cleanup on driver unload */
dc_hardware_release(struct dc * dc)4847 void dc_hardware_release(struct dc *dc)
4848 {
4849 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
4850
4851 if (dc->hwss.hardware_release)
4852 dc->hwss.hardware_release(dc);
4853 }
4854
dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc * dc)4855 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
4856 {
4857 if (dc->current_state)
4858 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
4859 }
4860
4861 /**
4862 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
4863 *
4864 * @dc: [in] dc structure
4865 *
4866 * Checks whether DMUB FW supports outbox notifications, if supported DM
4867 * should register outbox interrupt prior to actually enabling interrupts
4868 * via dc_enable_dmub_outbox
4869 *
4870 * Return:
4871 * True if DMUB FW supports outbox notifications, False otherwise
4872 */
dc_is_dmub_outbox_supported(struct dc * dc)4873 bool dc_is_dmub_outbox_supported(struct dc *dc)
4874 {
4875 switch (dc->ctx->asic_id.chip_family) {
4876
4877 case FAMILY_YELLOW_CARP:
4878 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
4879 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
4880 !dc->debug.dpia_debug.bits.disable_dpia)
4881 return true;
4882 break;
4883
4884 case AMDGPU_FAMILY_GC_11_0_1:
4885 case AMDGPU_FAMILY_GC_11_5_0:
4886 if (!dc->debug.dpia_debug.bits.disable_dpia)
4887 return true;
4888 break;
4889
4890 default:
4891 break;
4892 }
4893
4894 /* dmub aux needs dmub notifications to be enabled */
4895 return dc->debug.enable_dmub_aux_for_legacy_ddc;
4896
4897 }
4898
4899 /**
4900 * dc_enable_dmub_notifications - Check if dmub fw supports outbox
4901 *
4902 * @dc: [in] dc structure
4903 *
4904 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
4905 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This
4906 * API shall be removed after switching.
4907 *
4908 * Return:
4909 * True if DMUB FW supports outbox notifications, False otherwise
4910 */
dc_enable_dmub_notifications(struct dc * dc)4911 bool dc_enable_dmub_notifications(struct dc *dc)
4912 {
4913 return dc_is_dmub_outbox_supported(dc);
4914 }
4915
4916 /**
4917 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
4918 *
4919 * @dc: [in] dc structure
4920 *
4921 * Enables DMUB unsolicited notifications to x86 via outbox.
4922 */
dc_enable_dmub_outbox(struct dc * dc)4923 void dc_enable_dmub_outbox(struct dc *dc)
4924 {
4925 struct dc_context *dc_ctx = dc->ctx;
4926
4927 dmub_enable_outbox_notification(dc_ctx->dmub_srv);
4928 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
4929 }
4930
4931 /**
4932 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
4933 * Sets port index appropriately for legacy DDC
4934 * @dc: dc structure
4935 * @link_index: link index
4936 * @payload: aux payload
4937 *
4938 * Returns: True if successful, False if failure
4939 */
dc_process_dmub_aux_transfer_async(struct dc * dc,uint32_t link_index,struct aux_payload * payload)4940 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
4941 uint32_t link_index,
4942 struct aux_payload *payload)
4943 {
4944 uint8_t action;
4945 union dmub_rb_cmd cmd = {0};
4946
4947 ASSERT(payload->length <= 16);
4948
4949 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
4950 cmd.dp_aux_access.header.payload_bytes = 0;
4951 /* For dpia, ddc_pin is set to NULL */
4952 if (!dc->links[link_index]->ddc->ddc_pin)
4953 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
4954 else
4955 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
4956
4957 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
4958 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
4959 cmd.dp_aux_access.aux_control.timeout = 0;
4960 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
4961 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
4962 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
4963
4964 /* set aux action */
4965 if (payload->i2c_over_aux) {
4966 if (payload->write) {
4967 if (payload->mot)
4968 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
4969 else
4970 action = DP_AUX_REQ_ACTION_I2C_WRITE;
4971 } else {
4972 if (payload->mot)
4973 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
4974 else
4975 action = DP_AUX_REQ_ACTION_I2C_READ;
4976 }
4977 } else {
4978 if (payload->write)
4979 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
4980 else
4981 action = DP_AUX_REQ_ACTION_DPCD_READ;
4982 }
4983
4984 cmd.dp_aux_access.aux_control.dpaux.action = action;
4985
4986 if (payload->length && payload->write) {
4987 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
4988 payload->data,
4989 payload->length
4990 );
4991 }
4992
4993 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
4994
4995 return true;
4996 }
4997
get_link_index_from_dpia_port_index(const struct dc * dc,uint8_t dpia_port_index)4998 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
4999 uint8_t dpia_port_index)
5000 {
5001 uint8_t index, link_index = 0xFF;
5002
5003 for (index = 0; index < dc->link_count; index++) {
5004 /* ddc_hw_inst has dpia port index for dpia links
5005 * and ddc instance for legacy links
5006 */
5007 if (!dc->links[index]->ddc->ddc_pin) {
5008 if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
5009 link_index = index;
5010 break;
5011 }
5012 }
5013 }
5014 ASSERT(link_index != 0xFF);
5015 return link_index;
5016 }
5017
5018 /**
5019 * dc_process_dmub_set_config_async - Submits set_config command
5020 *
5021 * @dc: [in] dc structure
5022 * @link_index: [in] link_index: link index
5023 * @payload: [in] aux payload
5024 * @notify: [out] set_config immediate reply
5025 *
5026 * Submits set_config command to dmub via inbox message.
5027 *
5028 * Return:
5029 * True if successful, False if failure
5030 */
dc_process_dmub_set_config_async(struct dc * dc,uint32_t link_index,struct set_config_cmd_payload * payload,struct dmub_notification * notify)5031 bool dc_process_dmub_set_config_async(struct dc *dc,
5032 uint32_t link_index,
5033 struct set_config_cmd_payload *payload,
5034 struct dmub_notification *notify)
5035 {
5036 union dmub_rb_cmd cmd = {0};
5037 bool is_cmd_complete = true;
5038
5039 /* prepare SET_CONFIG command */
5040 cmd.set_config_access.header.type = DMUB_CMD__DPIA;
5041 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
5042
5043 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
5044 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
5045 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
5046
5047 if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
5048 /* command is not processed by dmub */
5049 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
5050 return is_cmd_complete;
5051 }
5052
5053 /* command processed by dmub, if ret_status is 1, it is completed instantly */
5054 if (cmd.set_config_access.header.ret_status == 1)
5055 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
5056 else
5057 /* cmd pending, will receive notification via outbox */
5058 is_cmd_complete = false;
5059
5060 return is_cmd_complete;
5061 }
5062
5063 /**
5064 * dc_process_dmub_set_mst_slots - Submits MST solt allocation
5065 *
5066 * @dc: [in] dc structure
5067 * @link_index: [in] link index
5068 * @mst_alloc_slots: [in] mst slots to be allotted
5069 * @mst_slots_in_use: [out] mst slots in use returned in failure case
5070 *
5071 * Submits mst slot allocation command to dmub via inbox message
5072 *
5073 * Return:
5074 * DC_OK if successful, DC_ERROR if failure
5075 */
dc_process_dmub_set_mst_slots(const struct dc * dc,uint32_t link_index,uint8_t mst_alloc_slots,uint8_t * mst_slots_in_use)5076 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
5077 uint32_t link_index,
5078 uint8_t mst_alloc_slots,
5079 uint8_t *mst_slots_in_use)
5080 {
5081 union dmub_rb_cmd cmd = {0};
5082
5083 /* prepare MST_ALLOC_SLOTS command */
5084 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
5085 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
5086
5087 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
5088 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
5089
5090 if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
5091 /* command is not processed by dmub */
5092 return DC_ERROR_UNEXPECTED;
5093
5094 /* command processed by dmub, if ret_status is 1 */
5095 if (cmd.set_config_access.header.ret_status != 1)
5096 /* command processing error */
5097 return DC_ERROR_UNEXPECTED;
5098
5099 /* command processed and we have a status of 2, mst not enabled in dpia */
5100 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
5101 return DC_FAIL_UNSUPPORTED_1;
5102
5103 /* previously configured mst alloc and used slots did not match */
5104 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
5105 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
5106 return DC_NOT_SUPPORTED;
5107 }
5108
5109 return DC_OK;
5110 }
5111
5112 /**
5113 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
5114 *
5115 * @dc: [in] dc structure
5116 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
5117 *
5118 * Submits dpia hpd int enable command to dmub via inbox message
5119 */
dc_process_dmub_dpia_hpd_int_enable(const struct dc * dc,uint32_t hpd_int_enable)5120 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
5121 uint32_t hpd_int_enable)
5122 {
5123 union dmub_rb_cmd cmd = {0};
5124
5125 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
5126 cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
5127
5128 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5129
5130 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
5131 }
5132
5133 /**
5134 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
5135 *
5136 * @dc: [in] dc structure
5137 *
5138 *
5139 */
dc_print_dmub_diagnostic_data(const struct dc * dc)5140 void dc_print_dmub_diagnostic_data(const struct dc *dc)
5141 {
5142 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
5143 }
5144
5145 /**
5146 * dc_disable_accelerated_mode - disable accelerated mode
5147 * @dc: dc structure
5148 */
dc_disable_accelerated_mode(struct dc * dc)5149 void dc_disable_accelerated_mode(struct dc *dc)
5150 {
5151 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
5152 }
5153
5154
5155 /**
5156 * dc_notify_vsync_int_state - notifies vsync enable/disable state
5157 * @dc: dc structure
5158 * @stream: stream where vsync int state changed
5159 * @enable: whether vsync is enabled or disabled
5160 *
5161 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
5162 * interrupts after steady state is reached.
5163 */
dc_notify_vsync_int_state(struct dc * dc,struct dc_stream_state * stream,bool enable)5164 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
5165 {
5166 int i;
5167 int edp_num;
5168 struct pipe_ctx *pipe = NULL;
5169 struct dc_link *link = stream->sink->link;
5170 struct dc_link *edp_links[MAX_NUM_EDP];
5171
5172
5173 if (link->psr_settings.psr_feature_enabled)
5174 return;
5175
5176 if (link->replay_settings.replay_feature_enabled)
5177 return;
5178
5179 /*find primary pipe associated with stream*/
5180 for (i = 0; i < MAX_PIPES; i++) {
5181 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5182
5183 if (pipe->stream == stream && pipe->stream_res.tg)
5184 break;
5185 }
5186
5187 if (i == MAX_PIPES) {
5188 ASSERT(0);
5189 return;
5190 }
5191
5192 dc_get_edp_links(dc, edp_links, &edp_num);
5193
5194 /* Determine panel inst */
5195 for (i = 0; i < edp_num; i++) {
5196 if (edp_links[i] == link)
5197 break;
5198 }
5199
5200 if (i == edp_num) {
5201 return;
5202 }
5203
5204 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
5205 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
5206 }
5207
5208 /*****************************************************************************
5209 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
5210 * ABM
5211 * @dc: dc structure
5212 * @stream: stream where vsync int state changed
5213 * @pData: abm hw states
5214 *
5215 ****************************************************************************/
dc_abm_save_restore(struct dc * dc,struct dc_stream_state * stream,struct abm_save_restore * pData)5216 bool dc_abm_save_restore(
5217 struct dc *dc,
5218 struct dc_stream_state *stream,
5219 struct abm_save_restore *pData)
5220 {
5221 int i;
5222 int edp_num;
5223 struct pipe_ctx *pipe = NULL;
5224 struct dc_link *link = stream->sink->link;
5225 struct dc_link *edp_links[MAX_NUM_EDP];
5226
5227
5228 /*find primary pipe associated with stream*/
5229 for (i = 0; i < MAX_PIPES; i++) {
5230 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5231
5232 if (pipe->stream == stream && pipe->stream_res.tg)
5233 break;
5234 }
5235
5236 if (i == MAX_PIPES) {
5237 ASSERT(0);
5238 return false;
5239 }
5240
5241 dc_get_edp_links(dc, edp_links, &edp_num);
5242
5243 /* Determine panel inst */
5244 for (i = 0; i < edp_num; i++)
5245 if (edp_links[i] == link)
5246 break;
5247
5248 if (i == edp_num)
5249 return false;
5250
5251 if (pipe->stream_res.abm &&
5252 pipe->stream_res.abm->funcs->save_restore)
5253 return pipe->stream_res.abm->funcs->save_restore(
5254 pipe->stream_res.abm,
5255 i,
5256 pData);
5257 return false;
5258 }
5259
dc_query_current_properties(struct dc * dc,struct dc_current_properties * properties)5260 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
5261 {
5262 unsigned int i;
5263 bool subvp_in_use = false;
5264
5265 for (i = 0; i < dc->current_state->stream_count; i++) {
5266 if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) {
5267 subvp_in_use = true;
5268 break;
5269 }
5270 }
5271 properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size;
5272 }
5273
5274 /**
5275 *****************************************************************************
5276 * dc_set_edp_power() - DM controls eDP power to be ON/OFF
5277 *
5278 * Called when DM wants to power on/off eDP.
5279 * Only work on links with flag skip_implict_edp_power_control is set.
5280 *
5281 *****************************************************************************
5282 */
dc_set_edp_power(const struct dc * dc,struct dc_link * edp_link,bool powerOn)5283 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
5284 bool powerOn)
5285 {
5286 if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
5287 return;
5288
5289 if (edp_link->skip_implict_edp_power_control == false)
5290 return;
5291
5292 edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
5293 }
5294
5295