1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
4 * Author: Sandy Huang <hjc@rock-chips.com>
5 */
6 #include <linux/memblock.h>
7 #include <linux/of_address.h>
8 #include <linux/of_platform.h>
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/iommu.h>
12
13 #include <drm/drm_atomic_uapi.h>
14 #include <drm/drm_drv.h>
15 #include <drm/drm_gem_cma_helper.h>
16 #include <drm/drm_of.h>
17 #include <drm/drm_probe_helper.h>
18
19 #include "rockchip_drm_drv.h"
20 #include "rockchip_drm_fb.h"
21 #include "rockchip_drm_logo.h"
22
is_support_hotplug(uint32_t output_type)23 static bool is_support_hotplug(uint32_t output_type)
24 {
25 switch (output_type) {
26 case DRM_MODE_CONNECTOR_DVII:
27 case DRM_MODE_CONNECTOR_DVID:
28 case DRM_MODE_CONNECTOR_DVIA:
29 case DRM_MODE_CONNECTOR_DisplayPort:
30 case DRM_MODE_CONNECTOR_HDMIA:
31 case DRM_MODE_CONNECTOR_HDMIB:
32 case DRM_MODE_CONNECTOR_TV:
33 return true;
34 default:
35 return false;
36 }
37 }
38
39 static struct drm_crtc *
find_crtc_by_node(struct drm_device * drm_dev,struct device_node * node)40 find_crtc_by_node(struct drm_device *drm_dev, struct device_node *node)
41 {
42 struct device_node *np_crtc;
43 struct drm_crtc *crtc;
44
45 np_crtc = of_get_parent(node);
46 if (!np_crtc || !of_device_is_available(np_crtc))
47 return NULL;
48
49 drm_for_each_crtc(crtc, drm_dev) {
50 if (crtc->port == np_crtc)
51 return crtc;
52 }
53
54 return NULL;
55 }
56
57 static struct rockchip_drm_sub_dev *
find_sub_dev_by_node(struct drm_device * drm_dev,struct device_node * node)58 find_sub_dev_by_node(struct drm_device *drm_dev, struct device_node *node)
59 {
60 struct device_node *np_connector;
61 struct rockchip_drm_sub_dev *sub_dev;
62
63 np_connector = of_graph_get_remote_port_parent(node);
64 if (!np_connector || !of_device_is_available(np_connector))
65 return NULL;
66
67 sub_dev = rockchip_drm_get_sub_dev(np_connector);
68 if (!sub_dev)
69 return NULL;
70
71 return sub_dev;
72 }
73
74 static struct rockchip_drm_sub_dev *
find_sub_dev_by_bridge(struct drm_device * drm_dev,struct device_node * node)75 find_sub_dev_by_bridge(struct drm_device *drm_dev, struct device_node *node)
76 {
77 struct device_node *np_encoder, *np_connector = NULL;
78 struct rockchip_drm_sub_dev *sub_dev = NULL;
79 struct device_node *port, *endpoint;
80
81 np_encoder = of_graph_get_remote_port_parent(node);
82 if (!np_encoder || !of_device_is_available(np_encoder))
83 goto err_put_encoder;
84
85 port = of_graph_get_port_by_id(np_encoder, 1);
86 if (!port) {
87 dev_err(drm_dev->dev, "can't found port point!\n");
88 goto err_put_encoder;
89 }
90
91 for_each_child_of_node(port, endpoint) {
92 np_connector = of_graph_get_remote_port_parent(endpoint);
93 if (!np_connector) {
94 dev_err(drm_dev->dev,
95 "can't found connector node, please init!\n");
96 goto err_put_port;
97 }
98 if (!of_device_is_available(np_connector)) {
99 of_node_put(np_connector);
100 np_connector = NULL;
101 continue;
102 } else {
103 break;
104 }
105 }
106 if (!np_connector) {
107 dev_err(drm_dev->dev, "can't found available connector node!\n");
108 goto err_put_port;
109 }
110
111 sub_dev = rockchip_drm_get_sub_dev(np_connector);
112 if (!sub_dev)
113 goto err_put_port;
114
115 of_node_put(np_connector);
116 err_put_port:
117 of_node_put(port);
118 err_put_encoder:
119 of_node_put(np_encoder);
120
121 return sub_dev;
122 }
123
rockchip_drm_release_reserve_vm(struct drm_device * drm,struct drm_mm_node * node)124 static void rockchip_drm_release_reserve_vm(struct drm_device *drm, struct drm_mm_node *node)
125 {
126 struct rockchip_drm_private *private = drm->dev_private;
127
128 mutex_lock(&private->mm_lock);
129 if (drm_mm_node_allocated(node))
130 drm_mm_remove_node(node);
131 mutex_unlock(&private->mm_lock);
132 }
133
rockchip_drm_reserve_vm(struct drm_device * drm,struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 offset)134 static int rockchip_drm_reserve_vm(struct drm_device *drm, struct drm_mm *mm,
135 struct drm_mm_node *node, u64 size, u64 offset)
136 {
137 struct rockchip_drm_private *private = drm->dev_private;
138 int ret;
139
140 node->size = size;
141 node->start = offset;
142 node->color = 0;
143 mutex_lock(&private->mm_lock);
144 ret = drm_mm_reserve_node(mm, node);
145 mutex_unlock(&private->mm_lock);
146
147 return ret;
148 }
149
150 static unsigned long
rockchip_drm_free_reserved_area(void * start,void * end,int poison,const char * s)151 rockchip_drm_free_reserved_area(void *start, void *end, int poison, const char *s)
152 {
153 void *pos;
154 unsigned long pages = 0;
155
156 start = (void *)PAGE_ALIGN((unsigned long)start);
157 end = (void *)((unsigned long)end & PAGE_MASK);
158 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
159 struct page *page = virt_to_page(pos);
160 void *direct_map_addr;
161
162 /*
163 * 'direct_map_addr' might be different from 'pos'
164 * because some architectures' virt_to_page()
165 * work with aliases. Getting the direct map
166 * address ensures that we get a _writeable_
167 * alias for the memset().
168 */
169 direct_map_addr = page_address(page);
170 /*
171 * Perform a kasan-unchecked memset() since this memory
172 * has not been initialized.
173 */
174 direct_map_addr = kasan_reset_tag(direct_map_addr);
175 if ((unsigned int)poison <= 0xFF)
176 memset(direct_map_addr, poison, PAGE_SIZE);
177
178 free_reserved_page(page);
179 }
180
181 if (pages && s)
182 pr_info("Freeing %s memory: %ldK\n", s, pages << (PAGE_SHIFT - 10));
183
184 return pages;
185 }
186
rockchip_free_loader_memory(struct drm_device * drm)187 void rockchip_free_loader_memory(struct drm_device *drm)
188 {
189 struct rockchip_drm_private *private = drm->dev_private;
190 struct rockchip_logo *logo;
191 void *start, *end;
192
193 if (!private || !private->logo || --private->logo->count)
194 return;
195
196 logo = private->logo;
197 start = phys_to_virt(logo->dma_addr);
198 end = phys_to_virt(logo->dma_addr + logo->size);
199
200 if (private->domain) {
201 u32 pg_size = 1UL << __ffs(private->domain->pgsize_bitmap);
202
203 iommu_unmap(private->domain, logo->dma_addr, ALIGN(logo->size, pg_size));
204 rockchip_drm_release_reserve_vm(drm, &logo->logo_reserved_node);
205 }
206
207 memblock_free(logo->start, logo->size);
208 rockchip_drm_free_reserved_area(start, end, -1, "drm_logo");
209 kfree(logo);
210 private->logo = NULL;
211 private->loader_protect = false;
212 }
213
init_loader_memory(struct drm_device * drm_dev)214 static int init_loader_memory(struct drm_device *drm_dev)
215 {
216 struct rockchip_drm_private *private = drm_dev->dev_private;
217 struct rockchip_logo *logo;
218 struct device_node *np = drm_dev->dev->of_node;
219 struct device_node *node;
220 phys_addr_t start, size;
221 u32 pg_size = PAGE_SIZE;
222 struct resource res;
223 int ret, idx;
224
225 idx = of_property_match_string(np, "memory-region-names", "drm-logo");
226 if (idx >= 0)
227 node = of_parse_phandle(np, "memory-region", idx);
228 else
229 node = of_parse_phandle(np, "logo-memory-region", 0);
230 if (!node)
231 return -ENOMEM;
232
233 ret = of_address_to_resource(node, 0, &res);
234 if (ret)
235 return ret;
236 if (private->domain)
237 pg_size = 1UL << __ffs(private->domain->pgsize_bitmap);
238 start = ALIGN_DOWN(res.start, pg_size);
239 size = resource_size(&res);
240 if (!size)
241 return -ENOMEM;
242
243 logo = kmalloc(sizeof(*logo), GFP_KERNEL);
244 if (!logo)
245 return -ENOMEM;
246
247 logo->kvaddr = phys_to_virt(start);
248
249 if (private->domain) {
250 ret = rockchip_drm_reserve_vm(drm_dev, &private->mm, &logo->logo_reserved_node, size, start);
251 if (ret)
252 dev_err(drm_dev->dev, "failed to reserve vm for logo memory\n");
253 ret = iommu_map(private->domain, start, start, ALIGN(size, pg_size),
254 IOMMU_WRITE | IOMMU_READ);
255 if (ret) {
256 dev_err(drm_dev->dev, "failed to create 1v1 mapping\n");
257 goto err_free_logo;
258 }
259 }
260
261 logo->dma_addr = start;
262 logo->size = size;
263 logo->count = 1;
264 private->logo = logo;
265
266 idx = of_property_match_string(np, "memory-region-names", "drm-cubic-lut");
267 if (idx < 0)
268 return 0;
269
270 node = of_parse_phandle(np, "memory-region", idx);
271 if (!node)
272 return -ENOMEM;
273
274 ret = of_address_to_resource(node, 0, &res);
275 if (ret)
276 return ret;
277 start = ALIGN_DOWN(res.start, pg_size);
278 size = resource_size(&res);
279 if (!size)
280 return 0;
281
282 private->cubic_lut_kvaddr = phys_to_virt(start);
283 if (private->domain) {
284 private->clut_reserved_node = kmalloc(sizeof(struct drm_mm_node), GFP_KERNEL);
285 if (!private->clut_reserved_node)
286 return -ENOMEM;
287
288 ret = rockchip_drm_reserve_vm(drm_dev, &private->mm, private->clut_reserved_node, size, start);
289 if (ret)
290 dev_err(drm_dev->dev, "failed to reserve vm for clut memory\n");
291
292 ret = iommu_map(private->domain, start, start, ALIGN(size, pg_size),
293 IOMMU_WRITE | IOMMU_READ);
294 if (ret) {
295 dev_err(drm_dev->dev, "failed to create 1v1 mapping for cubic lut\n");
296 goto err_free_clut;
297 }
298 }
299 private->cubic_lut_dma_addr = start;
300
301 return 0;
302
303 err_free_clut:
304 rockchip_drm_release_reserve_vm(drm_dev, private->clut_reserved_node);
305 kfree(private->clut_reserved_node);
306 private->clut_reserved_node = NULL;
307 err_free_logo:
308 rockchip_drm_release_reserve_vm(drm_dev, &logo->logo_reserved_node);
309 kfree(logo);
310
311 return ret;
312 }
313
314 static struct drm_framebuffer *
get_framebuffer_by_node(struct drm_device * drm_dev,struct device_node * node)315 get_framebuffer_by_node(struct drm_device *drm_dev, struct device_node *node)
316 {
317 struct rockchip_drm_private *private = drm_dev->dev_private;
318 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
319 u32 val;
320 int bpp;
321
322 if (WARN_ON(!private->logo))
323 return NULL;
324
325 if (of_property_read_u32(node, "logo,offset", &val)) {
326 pr_err("%s: failed to get logo,offset\n", __func__);
327 return NULL;
328 }
329 mode_cmd.offsets[0] = val;
330
331 if (of_property_read_u32(node, "logo,width", &val)) {
332 pr_err("%s: failed to get logo,width\n", __func__);
333 return NULL;
334 }
335 mode_cmd.width = val;
336
337 if (of_property_read_u32(node, "logo,height", &val)) {
338 pr_err("%s: failed to get logo,height\n", __func__);
339 return NULL;
340 }
341 mode_cmd.height = val;
342
343 if (of_property_read_u32(node, "logo,bpp", &val)) {
344 pr_err("%s: failed to get logo,bpp\n", __func__);
345 return NULL;
346 }
347 bpp = val;
348
349 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * bpp, 32) / 8;
350
351 switch (bpp) {
352 case 16:
353 mode_cmd.pixel_format = DRM_FORMAT_RGB565;
354 break;
355 case 24:
356 mode_cmd.pixel_format = DRM_FORMAT_RGB888;
357 break;
358 case 32:
359 mode_cmd.pixel_format = DRM_FORMAT_XRGB8888;
360 break;
361 default:
362 pr_err("%s: unsupported to logo bpp %d\n", __func__, bpp);
363 return NULL;
364 }
365
366 return rockchip_drm_logo_fb_alloc(drm_dev, &mode_cmd, private->logo);
367 }
368
369 static struct rockchip_drm_mode_set *
of_parse_display_resource(struct drm_device * drm_dev,struct device_node * route)370 of_parse_display_resource(struct drm_device *drm_dev, struct device_node *route)
371 {
372 struct rockchip_drm_private *private = drm_dev->dev_private;
373 struct rockchip_drm_mode_set *set;
374 struct device_node *connect;
375 struct drm_framebuffer *fb;
376 struct rockchip_drm_sub_dev *sub_dev;
377 struct drm_crtc *crtc;
378 const char *string;
379 u32 val;
380
381 connect = of_parse_phandle(route, "connect", 0);
382 if (!connect)
383 return NULL;
384
385 fb = get_framebuffer_by_node(drm_dev, route);
386 if (IS_ERR_OR_NULL(fb))
387 return NULL;
388
389 crtc = find_crtc_by_node(drm_dev, connect);
390
391 sub_dev = find_sub_dev_by_node(drm_dev, connect);
392
393 if (!sub_dev)
394 sub_dev = find_sub_dev_by_bridge(drm_dev, connect);
395
396 if (!crtc || !sub_dev) {
397 dev_warn(drm_dev->dev,
398 "No available crtc or connector for display");
399 drm_framebuffer_put(fb);
400 return NULL;
401 }
402
403 set = kzalloc(sizeof(*set), GFP_KERNEL);
404 if (!set)
405 return NULL;
406
407 if (!of_property_read_u32(route, "video,clock", &val))
408 set->clock = val;
409
410 if (!of_property_read_u32(route, "video,hdisplay", &val))
411 set->hdisplay = val;
412
413 if (!of_property_read_u32(route, "video,vdisplay", &val))
414 set->vdisplay = val;
415
416 if (!of_property_read_u32(route, "video,crtc_hsync_end", &val))
417 set->crtc_hsync_end = val;
418
419 if (!of_property_read_u32(route, "video,crtc_vsync_end", &val))
420 set->crtc_vsync_end = val;
421
422 if (!of_property_read_u32(route, "video,vrefresh", &val))
423 set->vrefresh = val;
424
425 if (!of_property_read_u32(route, "video,flags", &val))
426 set->flags = val;
427
428 if (!of_property_read_u32(route, "video,aspect_ratio", &val))
429 set->picture_aspect_ratio = val;
430
431 if (!of_property_read_u32(route, "overscan,left_margin", &val))
432 set->left_margin = val;
433
434 if (!of_property_read_u32(route, "overscan,right_margin", &val))
435 set->right_margin = val;
436
437 if (!of_property_read_u32(route, "overscan,top_margin", &val))
438 set->top_margin = val;
439
440 if (!of_property_read_u32(route, "overscan,bottom_margin", &val))
441 set->bottom_margin = val;
442
443 if (!of_property_read_u32(route, "bcsh,brightness", &val))
444 set->brightness = val;
445 else
446 set->brightness = 50;
447
448 if (!of_property_read_u32(route, "bcsh,contrast", &val))
449 set->contrast = val;
450 else
451 set->contrast = 50;
452
453 if (!of_property_read_u32(route, "bcsh,saturation", &val))
454 set->saturation = val;
455 else
456 set->saturation = 50;
457
458 if (!of_property_read_u32(route, "bcsh,hue", &val))
459 set->hue = val;
460 else
461 set->hue = 50;
462
463 set->force_output = of_property_read_bool(route, "force-output");
464
465 if (!of_property_read_u32(route, "cubic_lut,offset", &val)) {
466 private->cubic_lut[crtc->index].enable = true;
467 private->cubic_lut[crtc->index].offset = val;
468 }
469
470 set->ratio = 1;
471 if (!of_property_read_string(route, "logo,mode", &string) &&
472 !strcmp(string, "fullscreen"))
473 set->ratio = 0;
474
475 set->fb = fb;
476 set->crtc = crtc;
477 set->sub_dev = sub_dev;
478
479 return set;
480 }
481
rockchip_drm_fill_connector_modes(struct drm_connector * connector,uint32_t maxX,uint32_t maxY,bool force_output)482 static int rockchip_drm_fill_connector_modes(struct drm_connector *connector,
483 uint32_t maxX, uint32_t maxY,
484 bool force_output)
485 {
486 struct drm_device *dev = connector->dev;
487 struct drm_display_mode *mode;
488 const struct drm_connector_helper_funcs *connector_funcs =
489 connector->helper_private;
490 int count = 0;
491 bool verbose_prune = true;
492 enum drm_connector_status old_status;
493
494 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
495
496 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
497 connector->name);
498 /* set all modes to the unverified state */
499 list_for_each_entry(mode, &connector->modes, head)
500 mode->status = MODE_STALE;
501
502 if (force_output)
503 connector->force = DRM_FORCE_ON;
504 if (connector->force) {
505 if (connector->force == DRM_FORCE_ON ||
506 connector->force == DRM_FORCE_ON_DIGITAL)
507 connector->status = connector_status_connected;
508 else
509 connector->status = connector_status_disconnected;
510 if (connector->funcs->force)
511 connector->funcs->force(connector);
512 } else {
513 old_status = connector->status;
514
515 if (connector->funcs->detect)
516 connector->status = connector->funcs->detect(connector, true);
517 else
518 connector->status = connector_status_connected;
519 /*
520 * Normally either the driver's hpd code or the poll loop should
521 * pick up any changes and fire the hotplug event. But if
522 * userspace sneaks in a probe, we might miss a change. Hence
523 * check here, and if anything changed start the hotplug code.
524 */
525 if (old_status != connector->status) {
526 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
527 connector->base.id,
528 connector->name,
529 old_status, connector->status);
530
531 /*
532 * The hotplug event code might call into the fb
533 * helpers, and so expects that we do not hold any
534 * locks. Fire up the poll struct instead, it will
535 * disable itself again.
536 */
537 dev->mode_config.delayed_event = true;
538 if (dev->mode_config.poll_enabled)
539 schedule_delayed_work(&dev->mode_config.output_poll_work,
540 0);
541 }
542 }
543
544 /* Re-enable polling in case the global poll config changed. */
545 if (!dev->mode_config.poll_running)
546 drm_kms_helper_poll_enable(dev);
547
548 dev->mode_config.poll_running = true;
549
550 if (connector->status == connector_status_disconnected) {
551 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
552 connector->base.id, connector->name);
553 drm_connector_update_edid_property(connector, NULL);
554 verbose_prune = false;
555 goto prune;
556 }
557
558 if (!force_output)
559 count = (*connector_funcs->get_modes)(connector);
560
561 if (count == 0 && connector->status == connector_status_connected)
562 count = drm_add_modes_noedid(connector, 1024, 768);
563 if (force_output)
564 count += rockchip_drm_add_modes_noedid(connector);
565 if (count == 0)
566 goto prune;
567
568 drm_connector_list_update(connector);
569
570 list_for_each_entry(mode, &connector->modes, head) {
571 if (mode->status == MODE_OK)
572 mode->status = drm_mode_validate_driver(dev, mode);
573
574 if (mode->status == MODE_OK)
575 mode->status = drm_mode_validate_size(mode, maxX, maxY);
576
577 /**
578 * if (mode->status == MODE_OK)
579 * mode->status = drm_mode_validate_flag(mode, mode_flags);
580 */
581 if (mode->status == MODE_OK && connector_funcs->mode_valid)
582 mode->status = connector_funcs->mode_valid(connector,
583 mode);
584 if (mode->status == MODE_OK)
585 mode->status = drm_mode_validate_ycbcr420(mode,
586 connector);
587 }
588
589 prune:
590 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
591
592 if (list_empty(&connector->modes))
593 return 0;
594
595 drm_mode_sort(&connector->modes);
596
597 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
598 connector->name);
599 list_for_each_entry(mode, &connector->modes, head) {
600 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
601 drm_mode_debug_printmodeline(mode);
602 }
603
604 return count;
605 }
606
607 /*
608 * For connectors that support multiple encoders, either the
609 * .atomic_best_encoder() or .best_encoder() operation must be implemented.
610 */
611 static struct drm_encoder *
rockchip_drm_connector_get_single_encoder(struct drm_connector * connector)612 rockchip_drm_connector_get_single_encoder(struct drm_connector *connector)
613 {
614 struct drm_encoder *encoder;
615
616 WARN_ON(hweight32(connector->possible_encoders) > 1);
617 drm_connector_for_each_possible_encoder(connector, encoder)
618 return encoder;
619
620 return NULL;
621 }
622
setup_initial_state(struct drm_device * drm_dev,struct drm_atomic_state * state,struct rockchip_drm_mode_set * set)623 static int setup_initial_state(struct drm_device *drm_dev,
624 struct drm_atomic_state *state,
625 struct rockchip_drm_mode_set *set)
626 {
627 struct rockchip_drm_private *priv = drm_dev->dev_private;
628 struct drm_connector *connector = set->sub_dev->connector;
629 struct drm_crtc *crtc = set->crtc;
630 struct drm_crtc_state *crtc_state;
631 struct drm_connector_state *conn_state;
632 struct drm_plane_state *primary_state;
633 struct drm_display_mode *mode = NULL;
634 const struct drm_connector_helper_funcs *funcs;
635 int pipe = drm_crtc_index(crtc);
636 bool is_crtc_enabled = true;
637 int hdisplay, vdisplay;
638 int fb_width, fb_height;
639 int found = 0, match = 0;
640 int num_modes;
641 int ret = 0;
642 struct rockchip_crtc_state *s = NULL;
643
644 if (!set->hdisplay || !set->vdisplay || !set->vrefresh)
645 is_crtc_enabled = false;
646
647 conn_state = drm_atomic_get_connector_state(state, connector);
648 if (IS_ERR(conn_state))
649 return PTR_ERR(conn_state);
650
651 funcs = connector->helper_private;
652
653 if (funcs->best_encoder)
654 conn_state->best_encoder = funcs->best_encoder(connector);
655 else
656 conn_state->best_encoder = rockchip_drm_connector_get_single_encoder(connector);
657
658 if (set->sub_dev->loader_protect)
659 set->sub_dev->loader_protect(conn_state->best_encoder, true);
660 num_modes = rockchip_drm_fill_connector_modes(connector, 4096, 4096, set->force_output);
661 if (!num_modes) {
662 dev_err(drm_dev->dev, "connector[%s] can't found any modes\n",
663 connector->name);
664 ret = -EINVAL;
665 goto error_conn;
666 }
667
668 list_for_each_entry(mode, &connector->modes, head) {
669 if (mode->clock == set->clock &&
670 mode->hdisplay == set->hdisplay &&
671 mode->vdisplay == set->vdisplay &&
672 mode->crtc_hsync_end == set->crtc_hsync_end &&
673 mode->crtc_vsync_end == set->crtc_vsync_end &&
674 drm_mode_vrefresh(mode) == set->vrefresh &&
675 /* we just need to focus on DRM_MODE_FLAG_ALL flag, so here
676 * we compare mode->flags with set->flags & DRM_MODE_FLAG_ALL.
677 */
678 mode->flags == (set->flags & DRM_MODE_FLAG_ALL) &&
679 mode->picture_aspect_ratio == set->picture_aspect_ratio) {
680 found = 1;
681 match = 1;
682 break;
683 }
684 }
685
686 if (!found) {
687 ret = -EINVAL;
688 connector->status = connector_status_disconnected;
689 dev_err(drm_dev->dev, "connector[%s] can't found any match mode\n",
690 connector->name);
691 DRM_INFO("%s support modes:\n\n", connector->name);
692 list_for_each_entry(mode, &connector->modes, head) {
693 DRM_INFO(DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
694 }
695 DRM_INFO("uboot set mode: h/v display[%d,%d] h/v sync_end[%d,%d] vfresh[%d], flags[0x%x], aspect_ratio[%d]\n",
696 set->hdisplay, set->vdisplay, set->crtc_hsync_end, set->crtc_vsync_end,
697 set->vrefresh, set->flags, set->picture_aspect_ratio);
698 goto error_conn;
699 }
700
701 conn_state->tv.brightness = set->brightness;
702 conn_state->tv.contrast = set->contrast;
703 conn_state->tv.saturation = set->saturation;
704 conn_state->tv.hue = set->hue;
705 set->mode = mode;
706 crtc_state = drm_atomic_get_crtc_state(state, crtc);
707 if (IS_ERR(crtc_state)) {
708 ret = PTR_ERR(crtc_state);
709 goto error_conn;
710 }
711
712 drm_mode_copy(&crtc_state->adjusted_mode, mode);
713 if (!match || !is_crtc_enabled) {
714 set->mode_changed = true;
715 } else {
716 ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
717 if (ret)
718 goto error_conn;
719
720 mode->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
721 ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
722 if (ret)
723 goto error_conn;
724
725 crtc_state->active = true;
726
727 if (priv->crtc_funcs[pipe] &&
728 priv->crtc_funcs[pipe]->loader_protect)
729 priv->crtc_funcs[pipe]->loader_protect(crtc, true);
730 }
731
732 if (!set->fb) {
733 ret = 0;
734 goto error_crtc;
735 }
736 primary_state = drm_atomic_get_plane_state(state, crtc->primary);
737 if (IS_ERR(primary_state)) {
738 ret = PTR_ERR(primary_state);
739 goto error_crtc;
740 }
741
742 hdisplay = mode->hdisplay;
743 vdisplay = mode->vdisplay;
744 fb_width = set->fb->width;
745 fb_height = set->fb->height;
746
747 primary_state->crtc = crtc;
748 primary_state->src_x = 0;
749 primary_state->src_y = 0;
750 primary_state->src_w = fb_width << 16;
751 primary_state->src_h = fb_height << 16;
752 if (set->ratio) {
753 if (set->fb->width >= hdisplay) {
754 primary_state->crtc_x = 0;
755 primary_state->crtc_w = hdisplay;
756 } else {
757 primary_state->crtc_x = (hdisplay - fb_width) / 2;
758 primary_state->crtc_w = set->fb->width;
759 }
760
761 if (set->fb->height >= vdisplay) {
762 primary_state->crtc_y = 0;
763 primary_state->crtc_h = vdisplay;
764 } else {
765 primary_state->crtc_y = (vdisplay - fb_height) / 2;
766 primary_state->crtc_h = fb_height;
767 }
768 } else {
769 primary_state->crtc_x = 0;
770 primary_state->crtc_y = 0;
771 primary_state->crtc_w = hdisplay;
772 primary_state->crtc_h = vdisplay;
773 }
774 s = to_rockchip_crtc_state(crtc->state);
775 s->output_type = connector->connector_type;
776
777 return 0;
778
779 error_crtc:
780 if (priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->loader_protect)
781 priv->crtc_funcs[pipe]->loader_protect(crtc, false);
782 error_conn:
783 if (set->sub_dev->loader_protect)
784 set->sub_dev->loader_protect(conn_state->best_encoder, false);
785
786 return ret;
787 }
788
update_state(struct drm_device * drm_dev,struct drm_atomic_state * state,struct rockchip_drm_mode_set * set,unsigned int * plane_mask)789 static int update_state(struct drm_device *drm_dev,
790 struct drm_atomic_state *state,
791 struct rockchip_drm_mode_set *set,
792 unsigned int *plane_mask)
793 {
794 struct drm_crtc *crtc = set->crtc;
795 struct drm_connector *connector = set->sub_dev->connector;
796 struct drm_display_mode *mode = set->mode;
797 struct drm_plane_state *primary_state;
798 struct drm_crtc_state *crtc_state;
799 struct drm_connector_state *conn_state;
800 int ret;
801 struct rockchip_crtc_state *s;
802
803 crtc_state = drm_atomic_get_crtc_state(state, crtc);
804 if (IS_ERR(crtc_state))
805 return PTR_ERR(crtc_state);
806 conn_state = drm_atomic_get_connector_state(state, connector);
807 if (IS_ERR(conn_state))
808 return PTR_ERR(conn_state);
809 s = to_rockchip_crtc_state(crtc_state);
810 s->left_margin = set->left_margin;
811 s->right_margin = set->right_margin;
812 s->top_margin = set->top_margin;
813 s->bottom_margin = set->bottom_margin;
814
815 if (set->mode_changed) {
816 ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
817 if (ret)
818 return ret;
819
820 ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
821 if (ret)
822 return ret;
823
824 crtc_state->active = true;
825 } else {
826 const struct drm_encoder_helper_funcs *encoder_helper_funcs;
827 const struct drm_connector_helper_funcs *connector_helper_funcs;
828 struct drm_encoder *encoder;
829
830 connector_helper_funcs = connector->helper_private;
831 if (!connector_helper_funcs)
832 return -ENXIO;
833 if (connector_helper_funcs->best_encoder)
834 encoder = connector_helper_funcs->best_encoder(connector);
835 else
836 encoder = rockchip_drm_connector_get_single_encoder(connector);
837 if (!encoder)
838 return -ENXIO;
839 encoder_helper_funcs = encoder->helper_private;
840 if (!encoder_helper_funcs->atomic_check)
841 return -ENXIO;
842 ret = encoder_helper_funcs->atomic_check(encoder, crtc->state,
843 conn_state);
844 if (ret)
845 return ret;
846
847 if (encoder_helper_funcs->atomic_mode_set)
848 encoder_helper_funcs->atomic_mode_set(encoder,
849 crtc_state,
850 conn_state);
851 else if (encoder_helper_funcs->mode_set)
852 encoder_helper_funcs->mode_set(encoder, mode, mode);
853 }
854
855 primary_state = drm_atomic_get_plane_state(state, crtc->primary);
856 if (IS_ERR(primary_state))
857 return PTR_ERR(primary_state);
858
859 crtc_state->plane_mask = 1 << drm_plane_index(crtc->primary);
860 *plane_mask |= crtc_state->plane_mask;
861
862
863 drm_atomic_set_fb_for_plane(primary_state, set->fb);
864 drm_framebuffer_put(set->fb);
865 ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
866
867 return ret;
868 }
869
rockchip_drm_show_logo(struct drm_device * drm_dev)870 void rockchip_drm_show_logo(struct drm_device *drm_dev)
871 {
872 struct drm_atomic_state *state, *old_state;
873 struct device_node *np = drm_dev->dev->of_node;
874 struct drm_mode_config *mode_config = &drm_dev->mode_config;
875 struct rockchip_drm_private *private = drm_dev->dev_private;
876 struct device_node *root, *route;
877 struct rockchip_drm_mode_set *set, *tmp, *unset;
878 struct list_head mode_set_list;
879 struct list_head mode_unset_list;
880 unsigned int plane_mask = 0;
881 struct drm_crtc *crtc;
882 int ret, i;
883
884 root = of_get_child_by_name(np, "route");
885 if (!root) {
886 dev_warn(drm_dev->dev, "failed to parse resources for logo display\n");
887 return;
888 }
889
890 if (init_loader_memory(drm_dev)) {
891 dev_warn(drm_dev->dev, "failed to parse loader memory\n");
892 return;
893 }
894
895 INIT_LIST_HEAD(&mode_set_list);
896 INIT_LIST_HEAD(&mode_unset_list);
897 drm_modeset_lock_all(drm_dev);
898 state = drm_atomic_state_alloc(drm_dev);
899 if (!state) {
900 dev_err(drm_dev->dev, "failed to alloc atomic state for logo display\n");
901 ret = -ENOMEM;
902 goto err_unlock;
903 }
904
905 state->acquire_ctx = mode_config->acquire_ctx;
906
907 for_each_child_of_node(root, route) {
908 if (!of_device_is_available(route))
909 continue;
910
911 set = of_parse_display_resource(drm_dev, route);
912 if (!set)
913 continue;
914
915 if (setup_initial_state(drm_dev, state, set)) {
916 drm_framebuffer_put(set->fb);
917 INIT_LIST_HEAD(&set->head);
918 list_add_tail(&set->head, &mode_unset_list);
919 continue;
920 }
921
922 INIT_LIST_HEAD(&set->head);
923 list_add_tail(&set->head, &mode_set_list);
924 }
925
926 /*
927 * the mode_unset_list store the unconnected route, if route's crtc
928 * isn't used, we should close it.
929 */
930 list_for_each_entry_safe(unset, tmp, &mode_unset_list, head) {
931 struct rockchip_drm_mode_set *tmp_set;
932 int find_used_crtc = 0;
933
934 list_for_each_entry_safe(set, tmp_set, &mode_set_list, head) {
935 if (set->crtc == unset->crtc) {
936 find_used_crtc = 1;
937 continue;
938 }
939 }
940
941 if (!find_used_crtc) {
942 struct drm_crtc *crtc = unset->crtc;
943 int pipe = drm_crtc_index(crtc);
944 struct rockchip_drm_private *priv =
945 drm_dev->dev_private;
946
947 if (unset->hdisplay && unset->vdisplay) {
948 if (priv->crtc_funcs[pipe] &&
949 priv->crtc_funcs[pipe]->loader_protect)
950 priv->crtc_funcs[pipe]->loader_protect(crtc, true);
951 priv->crtc_funcs[pipe]->crtc_close(crtc);
952 if (priv->crtc_funcs[pipe] &&
953 priv->crtc_funcs[pipe]->loader_protect)
954 priv->crtc_funcs[pipe]->loader_protect(crtc, false);
955 }
956 }
957
958 list_del(&unset->head);
959 kfree(unset);
960 }
961
962 if (list_empty(&mode_set_list)) {
963 dev_warn(drm_dev->dev, "can't not find any logo display\n");
964 ret = -ENXIO;
965 goto err_free_state;
966 }
967
968 /*
969 * The state save initial devices status, swap the state into
970 * drm devices as old state, so if new state come, can compare
971 * with this state to judge which status need to update.
972 */
973 WARN_ON(drm_atomic_helper_swap_state(state, false));
974 drm_atomic_state_put(state);
975 old_state = drm_atomic_helper_duplicate_state(drm_dev,
976 mode_config->acquire_ctx);
977 if (IS_ERR(old_state)) {
978 dev_err(drm_dev->dev, "failed to duplicate atomic state for logo display\n");
979 ret = PTR_ERR_OR_ZERO(old_state);
980 goto err_free_state;
981 }
982
983 state = drm_atomic_helper_duplicate_state(drm_dev,
984 mode_config->acquire_ctx);
985 if (IS_ERR(state)) {
986 dev_err(drm_dev->dev, "failed to duplicate atomic state for logo display\n");
987 ret = PTR_ERR_OR_ZERO(state);
988 goto err_free_old_state;
989 }
990 state->acquire_ctx = mode_config->acquire_ctx;
991
992 list_for_each_entry(set, &mode_set_list, head)
993 /*
994 * We don't want to see any fail on update_state.
995 */
996 WARN_ON(update_state(drm_dev, state, set, &plane_mask));
997
998 for (i = 0; i < state->num_connector; i++) {
999 if (state->connectors[i].new_state->connector->status !=
1000 connector_status_connected)
1001 state->connectors[i].new_state->best_encoder = NULL;
1002 }
1003
1004 ret = drm_atomic_commit(state);
1005 /**
1006 * todo
1007 * drm_atomic_clean_old_fb(drm_dev, plane_mask, ret);
1008 */
1009
1010 list_for_each_entry_safe(set, tmp, &mode_set_list, head) {
1011 if (set->force_output)
1012 set->sub_dev->connector->force = DRM_FORCE_UNSPECIFIED;
1013 list_del(&set->head);
1014 kfree(set);
1015 }
1016
1017 /*
1018 * Is possible get deadlock here?
1019 */
1020 WARN_ON(ret == -EDEADLK);
1021
1022 if (ret) {
1023 /*
1024 * restore display status if atomic commit failed.
1025 */
1026 WARN_ON(drm_atomic_helper_swap_state(old_state, false));
1027 goto err_free_state;
1028 }
1029
1030 rockchip_free_loader_memory(drm_dev);
1031 drm_atomic_state_put(old_state);
1032 drm_atomic_state_put(state);
1033
1034 private->loader_protect = true;
1035 drm_modeset_unlock_all(drm_dev);
1036
1037 drm_for_each_crtc(crtc, drm_dev) {
1038 struct drm_fb_helper *helper = private->fbdev_helper;
1039 struct rockchip_crtc_state *s = NULL;
1040
1041 if (!helper)
1042 break;
1043
1044 s = to_rockchip_crtc_state(crtc->state);
1045 if (is_support_hotplug(s->output_type))
1046 drm_framebuffer_get(helper->fb);
1047 }
1048
1049 return;
1050 err_free_old_state:
1051 drm_atomic_state_put(old_state);
1052 err_free_state:
1053 drm_atomic_state_put(state);
1054 err_unlock:
1055 drm_modeset_unlock_all(drm_dev);
1056 if (ret)
1057 dev_err(drm_dev->dev, "failed to show kernel logo\n");
1058 }
1059
1060 #ifndef MODULE
1061 static const char *const loader_protect_clocks[] __initconst = {
1062 "hclk_vio",
1063 "hclk_vop",
1064 "hclk_vopb",
1065 "hclk_vopl",
1066 "aclk_vio",
1067 "aclk_vio0",
1068 "aclk_vio1",
1069 "aclk_vop",
1070 "aclk_vopb",
1071 "aclk_vopl",
1072 "aclk_vo_pre",
1073 "aclk_vio_pre",
1074 "dclk_vop",
1075 "dclk_vop0",
1076 "dclk_vop1",
1077 "dclk_vopb",
1078 "dclk_vopl",
1079 };
1080
1081 static struct clk **loader_clocks __initdata;
rockchip_clocks_loader_protect(void)1082 static int __init rockchip_clocks_loader_protect(void)
1083 {
1084 int nclocks = ARRAY_SIZE(loader_protect_clocks);
1085 struct clk *clk;
1086 int i;
1087
1088 loader_clocks = kcalloc(nclocks, sizeof(void *), GFP_KERNEL);
1089 if (!loader_clocks)
1090 return -ENOMEM;
1091
1092 for (i = 0; i < nclocks; i++) {
1093 clk = __clk_lookup(loader_protect_clocks[i]);
1094
1095 if (clk) {
1096 loader_clocks[i] = clk;
1097 clk_prepare_enable(clk);
1098 }
1099 }
1100
1101 return 0;
1102 }
1103 arch_initcall_sync(rockchip_clocks_loader_protect);
1104
rockchip_clocks_loader_unprotect(void)1105 static int __init rockchip_clocks_loader_unprotect(void)
1106 {
1107 int i;
1108
1109 if (!loader_clocks)
1110 return -ENODEV;
1111
1112 for (i = 0; i < ARRAY_SIZE(loader_protect_clocks); i++) {
1113 struct clk *clk = loader_clocks[i];
1114
1115 if (clk)
1116 clk_disable_unprepare(clk);
1117 }
1118 kfree(loader_clocks);
1119
1120 return 0;
1121 }
1122 late_initcall_sync(rockchip_clocks_loader_unprotect);
1123 #endif
1124