• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4  * Author:Mark Yao <mark.yao@rock-chips.com>
5  *
6  * based on exynos_drm_drv.c
7  */
8 
9 #include <linux/dma-buf-cache.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dma-iommu.h>
12 #include <linux/genalloc.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/of_graph.h>
17 #include <linux/of_platform.h>
18 #include <linux/clk.h>
19 #include <linux/component.h>
20 #include <linux/console.h>
21 #include <linux/iommu.h>
22 #include <linux/of_reserved_mem.h>
23 
24 #include <drm/drm_debugfs.h>
25 #include <drm/drm_drv.h>
26 #include <drm/drm_displayid.h>
27 #include <drm/drm_fb_helper.h>
28 #include <drm/drm_gem_cma_helper.h>
29 #include <drm/drm_of.h>
30 #include <drm/drm_probe_helper.h>
31 #include <drm/drm_vblank.h>
32 
33 #include "rockchip_drm_fb.h"
34 #include "rockchip_drm_fbdev.h"
35 #include "rockchip_drm_gem.h"
36 #include "rockchip_drm_logo.h"
37 #include "rockchip_drm_drv.h"
38 
39 #include "../drm_crtc_internal.h"
40 
41 #define DRIVER_NAME "rockchip"
42 #define DRIVER_DESC "RockChip Soc DRM"
43 #define DRIVER_DATE "20140818"
44 #define DRIVER_MAJOR 3
45 #define DRIVER_MINOR 0
46 
47 static bool is_support_iommu = true;
48 static struct drm_driver rockchip_drm_driver;
49 
drm_mode_convert_to_split_mode(struct drm_display_mode * mode)50 void drm_mode_convert_to_split_mode(struct drm_display_mode *mode)
51 {
52     u16 hactive, hfp, hsync, hbp;
53 
54     hactive = mode->hdisplay;
55     hfp = mode->hsync_start - mode->hdisplay;
56     hsync = mode->hsync_end - mode->hsync_start;
57     hbp = mode->htotal - mode->hsync_end;
58 
59     mode->clock *= 0x2;
60     mode->hdisplay = hactive * 0x2;
61     mode->hsync_start = mode->hdisplay + hfp * 0x2;
62     mode->hsync_end = mode->hsync_start + hsync * 0x2;
63     mode->htotal = mode->hsync_end + hbp * 0x2;
64     drm_mode_set_name(mode);
65 }
66 EXPORT_SYMBOL(drm_mode_convert_to_split_mode);
67 
drm_mode_convert_to_origin_mode(struct drm_display_mode * mode)68 void drm_mode_convert_to_origin_mode(struct drm_display_mode *mode)
69 {
70     u16 hactive, hfp, hsync, hbp;
71 
72     hactive = mode->hdisplay;
73     hfp = mode->hsync_start - mode->hdisplay;
74     hsync = mode->hsync_end - mode->hsync_start;
75     hbp = mode->htotal - mode->hsync_end;
76 
77     mode->clock /= 0x2;
78     mode->hdisplay = hactive / 0x2;
79     mode->hsync_start = mode->hdisplay + hfp / 0x2;
80     mode->hsync_end = mode->hsync_start + hsync / 0x2;
81     mode->htotal = mode->hsync_end + hbp / 0x2;
82 }
83 EXPORT_SYMBOL(drm_mode_convert_to_origin_mode);
84 
85 /**
86  * drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
87  * @connector: connector to report the event on
88  *
89  * On some hardware a hotplug event notification may come from outside the display
90  * driver / device. An example of this is some USB Type-C setups where the hardware
91  * muxes the DisplayPort data and aux-lines but does not pass the altmode HPD
92  * status bit to the GPU's DP HPD pin.
93  *
94  * This function can be used to report these out-of-band events after obtaining
95  * a drm_connector reference through calling drm_connector_find_by_fwnode().
96  */
drm_connector_oob_hotplug_event(struct fwnode_handle * connector_fwnode)97 void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode)
98 {
99     struct rockchip_drm_sub_dev *sub_dev;
100 
101     if (!connector_fwnode || !connector_fwnode->dev) {
102         return;
103     }
104 
105     sub_dev = rockchip_drm_get_sub_dev(dev_of_node(connector_fwnode->dev));
106     if (sub_dev && sub_dev->connector && sub_dev->oob_hotplug_event) {
107         sub_dev->oob_hotplug_event(sub_dev->connector);
108     }
109 }
110 EXPORT_SYMBOL(drm_connector_oob_hotplug_event);
111 
rockchip_drm_get_bpp(const struct drm_format_info * info)112 uint32_t rockchip_drm_get_bpp(const struct drm_format_info *info)
113 {
114     /* use whatever a driver has set */
115     if (info->cpp[0]) {
116         return info->cpp[0] * 0x8;
117     }
118 
119     switch (info->format) {
120         case DRM_FORMAT_YUV420_8BIT:
121             return 0xc;
122         case DRM_FORMAT_YUV420_10BIT:
123             return 0xf;
124         case DRM_FORMAT_VUY101010:
125             return 0x1e;
126         default:
127             break;
128     }
129 
130     /* all attempts failed */
131     return 0;
132 }
133 EXPORT_SYMBOL(rockchip_drm_get_bpp);
134 
135 /**
136  * rockchip_drm_of_find_possible_crtcs - find the possible CRTCs for an active
137  * encoder port
138  * @dev: DRM device
139  * @port: encoder port to scan for endpoints
140  *
141  * Scan all active endpoints attached to a port, locate their attached CRTCs,
142  * and generate the DRM mask of CRTCs which may be attached to this
143  * encoder.
144  *
145  * See Documentation/devicetree/bindings/graph.txt for the bindings.
146  */
rockchip_drm_of_find_possible_crtcs(struct drm_device * dev,struct device_node * port)147 uint32_t rockchip_drm_of_find_possible_crtcs(struct drm_device *dev, struct device_node *port)
148 {
149     struct device_node *remote_port, *ep;
150     uint32_t possible_crtcs = 0;
151 
152     for_each_endpoint_of_node(port, ep)
153     {
154         if (!of_device_is_available(ep)) {
155             continue;
156         }
157 
158         remote_port = of_graph_get_remote_port(ep);
159         if (!remote_port) {
160             of_node_put(ep);
161             return 0;
162         }
163 
164         possible_crtcs |= drm_of_crtc_port_mask(dev, remote_port);
165 
166         of_node_put(remote_port);
167     }
168 
169     return possible_crtcs;
170 }
171 EXPORT_SYMBOL(rockchip_drm_of_find_possible_crtcs);
172 
173 static DEFINE_MUTEX(rockchip_drm_sub_dev_lock);
174 static LIST_HEAD(rockchip_drm_sub_dev_list);
175 
rockchip_drm_register_sub_dev(struct rockchip_drm_sub_dev * sub_dev)176 void rockchip_drm_register_sub_dev(struct rockchip_drm_sub_dev *sub_dev)
177 {
178     mutex_lock(&rockchip_drm_sub_dev_lock);
179     list_add_tail(&sub_dev->list, &rockchip_drm_sub_dev_list);
180     mutex_unlock(&rockchip_drm_sub_dev_lock);
181 }
182 EXPORT_SYMBOL(rockchip_drm_register_sub_dev);
183 
rockchip_drm_unregister_sub_dev(struct rockchip_drm_sub_dev * sub_dev)184 void rockchip_drm_unregister_sub_dev(struct rockchip_drm_sub_dev *sub_dev)
185 {
186     mutex_lock(&rockchip_drm_sub_dev_lock);
187     list_del(&sub_dev->list);
188     mutex_unlock(&rockchip_drm_sub_dev_lock);
189 }
190 EXPORT_SYMBOL(rockchip_drm_unregister_sub_dev);
191 
rockchip_drm_get_sub_dev(struct device_node * node)192 struct rockchip_drm_sub_dev *rockchip_drm_get_sub_dev(struct device_node *node)
193 {
194     struct rockchip_drm_sub_dev *sub_dev = NULL;
195     bool found = false;
196 
197     mutex_lock(&rockchip_drm_sub_dev_lock);
198     list_for_each_entry(sub_dev, &rockchip_drm_sub_dev_list, list)
199     {
200         if (sub_dev->of_node == node) {
201             found = true;
202             break;
203         }
204     }
205     mutex_unlock(&rockchip_drm_sub_dev_lock);
206 
207     return found ? sub_dev : NULL;
208 }
209 EXPORT_SYMBOL(rockchip_drm_get_sub_dev);
210 
rockchip_drm_get_sub_dev_type(void)211 int rockchip_drm_get_sub_dev_type(void)
212 {
213     int connector_type = DRM_MODE_CONNECTOR_Unknown;
214     struct rockchip_drm_sub_dev *sub_dev = NULL;
215 
216     mutex_lock(&rockchip_drm_sub_dev_lock);
217     list_for_each_entry(sub_dev, &rockchip_drm_sub_dev_list, list)
218     {
219         if (sub_dev->connector->encoder) {
220             connector_type = sub_dev->connector->connector_type;
221             break;
222         }
223     }
224     mutex_unlock(&rockchip_drm_sub_dev_lock);
225 
226     return connector_type;
227 }
228 EXPORT_SYMBOL(rockchip_drm_get_sub_dev_type);
229 
rockchip_drm_te_handle(struct drm_crtc * crtc)230 void rockchip_drm_te_handle(struct drm_crtc *crtc)
231 {
232     struct rockchip_drm_private *priv = crtc->dev->dev_private;
233     int pipe = drm_crtc_index(crtc);
234     if (priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->te_handler) {
235         priv->crtc_funcs[pipe]->te_handler(crtc);
236     }
237 }
238 EXPORT_SYMBOL(rockchip_drm_te_handle);
239 
240 static const struct drm_display_mode rockchip_drm_default_modes[] = {
241     /* 4 - 1280x720@60Hz 16:9 */
242     {
243         DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0,
244                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
245         .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9,
246     },
247     /* 16 - 1920x1080@60Hz 16:9 */
248     {
249         DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
250                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
251         .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9,
252     },
253     /* 31 - 1920x1080@50Hz 16:9 */
254     {
255         DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
256                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
257         .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9,
258     },
259     /* 19 - 1280x720@50Hz 16:9 */
260     {
261         DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0,
262                  DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
263         .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9,
264     },
265     /* 0x10 - 1024x768@60Hz */
266     {DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0,
267               DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
268     /* 17 - 720x576@50Hz 4:3 */
269     {
270         DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0,
271                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
272         .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
273     },
274     /* 2 - 720x480@60Hz 4:3 */
275     {
276         DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0,
277                  DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
278         .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
279     },
280 };
281 
rockchip_drm_add_modes_noedid(struct drm_connector * connector)282 int rockchip_drm_add_modes_noedid(struct drm_connector *connector)
283 {
284     struct drm_device *dev = connector->dev;
285     struct drm_display_mode *mode;
286     int i, count, num_modes = 0;
287 
288     mutex_lock(&rockchip_drm_sub_dev_lock);
289     count = ARRAY_SIZE(rockchip_drm_default_modes);
290 
291     for (i = 0; i < count; i++) {
292         const struct drm_display_mode *ptr = &rockchip_drm_default_modes[i];
293 
294         mode = drm_mode_duplicate(dev, ptr);
295         if (mode) {
296             if (!i) {
297                 mode->type = DRM_MODE_TYPE_PREFERRED;
298             }
299             drm_mode_probed_add(connector, mode);
300             num_modes++;
301         }
302     }
303     mutex_unlock(&rockchip_drm_sub_dev_lock);
304 
305     return num_modes;
306 }
307 EXPORT_SYMBOL(rockchip_drm_add_modes_noedid);
308 
cea_db_tag(const u8 * db)309 static int cea_db_tag(const u8 *db)
310 {
311     return db[0] >> 0x5;
312 }
313 
cea_db_payload_len(const u8 * db)314 static int cea_db_payload_len(const u8 *db)
315 {
316     return db[0] & 0x1f;
317 }
318 
319 #define for_each_cea_db(cea, i, start, end)                                                                            \
320     for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end);                                  \
321          (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
322 
323 #define HDMI_NEXT_HDR_VSDB_OUI 0xd04601
324 
cea_db_is_hdmi_next_hdr_block(const u8 * db)325 static bool cea_db_is_hdmi_next_hdr_block(const u8 *db)
326 {
327     unsigned int oui;
328 
329     if (cea_db_tag(db) != 0x07) {
330         return false;
331     }
332 
333     if (cea_db_payload_len(db) < 0xb) {
334         return false;
335     }
336 
337     oui = (db[0x3] << 0x10) | (db[0x2] << 0x8) | db[1];
338 
339     return oui == HDMI_NEXT_HDR_VSDB_OUI;
340 }
341 
cea_db_is_hdmi_forum_vsdb(const u8 * db)342 static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
343 {
344     unsigned int oui;
345 
346     if (cea_db_tag(db) != 0x03) {
347         return false;
348     }
349 
350     if (cea_db_payload_len(db) < 0x7) {
351         return false;
352     }
353 
354     oui = (db[0x3] << 0x10) | (db[0x2] << 0x8) | db[1];
355 
356     return oui == HDMI_FORUM_IEEE_OUI;
357 }
358 
cea_db_offsets(const u8 * cea,int * start,int * end)359 static int cea_db_offsets(const u8 *cea, int *start, int *end)
360 {
361     /* DisplayID CTA extension blocks and top-level CEA EDID
362      * block header definitions differ in the following bytes:
363      *   1) Byte 2 of the header specifies length differently,
364      *   2) Byte 3 is only present in the CEA top level block.
365      *
366      * The different definitions for byte 2 follow.
367      *
368      * DisplayID CTA extension block defines byte 2 as:
369      *   Number of payload bytes
370      *
371      * CEA EDID block defines byte 2 as:
372      *   Byte number (decimal) within this block where the 18-byte
373      *   DTDs begin. If no non-DTD data is present in this extension
374      *   block, the value should be set to 04h (the byte after next).
375      *   If set to 00h, there are no DTDs present in this block and
376      *   no non-DTD data.
377      */
378     if (cea[0] == 0x81) {
379         /*
380          * for_each_displayid_db() has already verified
381          * that these stay within expected bounds.
382          */
383         *start = 0x3;
384         *end = *start + cea[0x2];
385     } else if (cea[0] == 0x02) {
386         /* Data block offset in CEA extension block */
387         *start = 0x4;
388         *end = cea[0x2];
389         if (*end == 0) {
390             *end = 0x7f;
391         }
392         if (*end < 0x4 || *end > 0x7f) {
393             return -ERANGE;
394         }
395     } else {
396         return -EOPNOTSUPP;
397     }
398 
399     return 0;
400 }
401 
find_edid_extension(const struct edid * edid,int ext_id,int * ext_index)402 static u8 *find_edid_extension(const struct edid *edid, int ext_id, int *ext_index)
403 {
404     u8 *edid_ext = NULL;
405     int i;
406 
407     /* No EDID or EDID extensions */
408     if (edid == NULL || edid->extensions == 0) {
409         return NULL;
410     }
411 
412     /* Find CEA extension */
413     for (i = *ext_index; i < edid->extensions; i++) {
414         edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
415         if (edid_ext[0] == ext_id) {
416             break;
417         }
418     }
419 
420     if (i >= edid->extensions) {
421         return NULL;
422     }
423 
424     *ext_index = i + 1;
425 
426     return edid_ext;
427 }
428 
validate_displayid(u8 * displayid,int length,int idx)429 static int validate_displayid(u8 *displayid, int length, int idx)
430 {
431     int i, dispid_length;
432     u8 csum = 0;
433     struct displayid_hdr *base;
434 
435     base = (struct displayid_hdr *)&displayid[idx];
436 
437     DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n", base->rev, base->bytes, base->prod_id, base->ext_count);
438 
439     /* +1 for DispID checksum */
440     dispid_length = sizeof(*base) + base->bytes + 1;
441     if (dispid_length > length - idx) {
442         return -EINVAL;
443     }
444 
445     for (i = 0; i < dispid_length; i++) {
446         csum += displayid[idx + i];
447     }
448     if (csum) {
449         DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
450         return -EINVAL;
451     }
452 
453     return 0;
454 }
455 
find_displayid_extension(const struct edid * edid,int * length,int * idx,int * ext_index)456 static u8 *find_displayid_extension(const struct edid *edid, int *length, int *idx, int *ext_index)
457 {
458     u8 *displayid = find_edid_extension(edid, 0x70, ext_index);
459     struct displayid_hdr *base;
460     int ret;
461 
462     if (!displayid) {
463         return NULL;
464     }
465 
466     /* EDID extensions block checksum isn't for us */
467     *length = EDID_LENGTH - 1;
468     *idx = 1;
469 
470     ret = validate_displayid(displayid, *length, *idx);
471     if (ret) {
472         return NULL;
473     }
474 
475     base = (struct displayid_hdr *)&displayid[*idx];
476     *length = *idx + sizeof(*base) + base->bytes;
477 
478     return displayid;
479 }
480 
find_cea_extension(const struct edid * edid)481 static u8 *find_cea_extension(const struct edid *edid)
482 {
483     int length, idx;
484     struct displayid_block *block;
485     u8 *cea;
486     u8 *displayid;
487     int ext_index;
488 
489     /* Look for a top level CEA extension block */
490     /* make callers iterate through multiple CEA ext blocks? */
491     ext_index = 0;
492     cea = find_edid_extension(edid, 0x02, &ext_index);
493     if (cea) {
494         return cea;
495     }
496 
497     /* CEA blocks can also be found embedded in a DisplayID block */
498     ext_index = 0;
499     for (;;) {
500         displayid = find_displayid_extension(edid, &length, &idx, &ext_index);
501         if (!displayid) {
502             return NULL;
503         }
504 
505         idx += sizeof(struct displayid_hdr);
506         for_each_displayid_db(displayid, block, idx, length)
507         {
508             if (block->tag == 0x81) {
509                 return (u8 *)block;
510             }
511         }
512     }
513 
514     return NULL;
515 }
516 
517 #define EDID_CEA_YCRCB422 (1 << 0x4)
518 
rockchip_drm_get_yuv422_format(struct drm_connector * connector,struct edid * edid)519 int rockchip_drm_get_yuv422_format(struct drm_connector *connector, struct edid *edid)
520 {
521     struct drm_display_info *info;
522     const u8 *edid_ext;
523 
524     if (!connector || !edid) {
525         return -EINVAL;
526     }
527 
528     info = &connector->display_info;
529 
530     edid_ext = find_cea_extension(edid);
531     if (!edid_ext) {
532         return -EINVAL;
533     }
534 
535     if (edid_ext[0x3] & EDID_CEA_YCRCB422) {
536         info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
537     }
538 
539     return 0;
540 }
541 EXPORT_SYMBOL(rockchip_drm_get_yuv422_format);
542 
get_max_frl_rate(int max_frl_rate,u8 * max_lanes,u8 * max_rate_per_lane)543 static void get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane)
544 {
545     switch (max_frl_rate) {
546         case 0x1:
547             *max_lanes = 0x3;
548             *max_rate_per_lane = 0x3;
549             break;
550         case 0x2:
551             *max_lanes = 0x3;
552             *max_rate_per_lane = 0x6;
553             break;
554         case 0x3:
555             *max_lanes = 0x4;
556             *max_rate_per_lane = 0x6;
557             break;
558         case 0x4:
559             *max_lanes = 0x4;
560             *max_rate_per_lane = 0x8;
561             break;
562         case 0x5:
563             *max_lanes = 0x4;
564             *max_rate_per_lane = 0xa;
565             break;
566         case 0x6:
567             *max_lanes = 0x4;
568             *max_rate_per_lane = 0xc;
569             break;
570         case 0:
571         default:
572             *max_lanes = 0;
573             *max_rate_per_lane = 0;
574     }
575 }
576 
577 #define EDID_DSC_10BPC (1 << 0)
578 #define EDID_DSC_12BPC (1 << 1)
579 #define EDID_DSC_16BPC (1 << 2)
580 #define EDID_DSC_ALL_BPP (1 << 3)
581 #define EDID_DSC_NATIVE_420 (1 << 6)
582 #define EDID_DSC_1P2 (1 << 7)
583 #define EDID_DSC_MAX_FRL_RATE_MASK 0xf0
584 #define EDID_DSC_MAX_SLICES 0xf
585 #define EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f
586 #define EDID_MAX_FRL_RATE_MASK 0xf0
587 
parse_edid_forum_vsdb(struct rockchip_drm_dsc_cap * dsc_cap,u8 * max_frl_rate_per_lane,u8 * max_lanes,const u8 * hf_vsdb)588 static void parse_edid_forum_vsdb(struct rockchip_drm_dsc_cap *dsc_cap, u8 *max_frl_rate_per_lane, u8 *max_lanes,
589                                   const u8 *hf_vsdb)
590 {
591     u8 max_frl_rate;
592     u8 dsc_max_frl_rate;
593     u8 dsc_max_slices;
594 
595     if (!hf_vsdb[0x7]) {
596         return;
597     }
598 
599     DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
600     max_frl_rate = (hf_vsdb[0x7] & EDID_MAX_FRL_RATE_MASK) >> 0x4;
601     get_max_frl_rate(max_frl_rate, max_lanes, max_frl_rate_per_lane);
602 
603     if (cea_db_payload_len(hf_vsdb) < 0xd) {
604         return;
605     }
606 
607     dsc_cap->v_1p2 = hf_vsdb[0xb] & EDID_DSC_1P2;
608 
609     if (!dsc_cap->v_1p2) {
610         return;
611     }
612 
613     dsc_cap->native_420 = hf_vsdb[0xb] & EDID_DSC_NATIVE_420;
614     dsc_cap->all_bpp = hf_vsdb[0xb] & EDID_DSC_ALL_BPP;
615 
616     if (hf_vsdb[0xb] & EDID_DSC_16BPC) {
617         dsc_cap->bpc_supported = 0x10;
618     } else if (hf_vsdb[0xb] & EDID_DSC_12BPC) {
619         dsc_cap->bpc_supported = 0xc;
620     } else if (hf_vsdb[0xb] & EDID_DSC_10BPC) {
621         dsc_cap->bpc_supported = 0xa;
622     } else {
623         dsc_cap->bpc_supported = 0;
624     }
625 
626     dsc_max_frl_rate = (hf_vsdb[0xc] & EDID_DSC_MAX_FRL_RATE_MASK) >> 0x4;
627     get_max_frl_rate(dsc_max_frl_rate, &dsc_cap->max_lanes, &dsc_cap->max_frl_rate_per_lane);
628     dsc_cap->total_chunk_kbytes = hf_vsdb[0xd] & EDID_DSC_TOTAL_CHUNK_KBYTES;
629 
630     dsc_max_slices = hf_vsdb[0xc] & EDID_DSC_MAX_SLICES;
631     switch (dsc_max_slices) {
632         case 0x1:
633             dsc_cap->max_slices = 0x1;
634             dsc_cap->clk_per_slice = 0x154;
635             break;
636         case 0x2:
637             dsc_cap->max_slices = 0x2;
638             dsc_cap->clk_per_slice = 0x154;
639             break;
640         case 0x3:
641             dsc_cap->max_slices = 0x4;
642             dsc_cap->clk_per_slice = 0x154;
643             break;
644         case 0x4:
645             dsc_cap->max_slices = 0x8;
646             dsc_cap->clk_per_slice = 0x154;
647             break;
648         case 0x5:
649             dsc_cap->max_slices = 0x8;
650             dsc_cap->clk_per_slice = 0x190;
651             break;
652         case 0x6:
653             dsc_cap->max_slices = 0xc;
654             dsc_cap->clk_per_slice = 0x190;
655             break;
656         case 0x7:
657             dsc_cap->max_slices = 0x10;
658             dsc_cap->clk_per_slice = 0x190;
659             break;
660         case 0:
661         default:
662             dsc_cap->max_slices = 0;
663             dsc_cap->clk_per_slice = 0;
664     }
665 }
666 
667 enum {
668     VER_26_BYTE_V0,
669     VER_15_BYTE_V1,
670     VER_12_BYTE_V1,
671     VER_12_BYTE_V2,
672 };
673 
check_next_hdr_version(const u8 * next_hdr_db)674 static int check_next_hdr_version(const u8 *next_hdr_db)
675 {
676     u16 ver;
677 
678     ver = ((next_hdr_db[0x5] & 0xf0) << 0x8) | next_hdr_db[0];
679 
680     switch (ver) {
681         case 0x00f9:
682             return VER_26_BYTE_V0;
683         case 0x20ee:
684             return VER_15_BYTE_V1;
685         case 0x20eb:
686             return VER_12_BYTE_V1;
687         case 0x40eb:
688             return VER_12_BYTE_V2;
689         default:
690             return -ENOENT;
691     }
692 }
693 
parse_ver_26_v0_data(struct ver_26_v0 * hdr,const u8 * data)694 static void parse_ver_26_v0_data(struct ver_26_v0 *hdr, const u8 *data)
695 {
696     hdr->yuv422_12bit = data[0x5] & BIT(0x0);
697     hdr->support_2160p_60 = (data[0x5] & BIT(0x1)) >> 0x1;
698     hdr->global_dimming = (data[0x5] & BIT(0x2)) >> 0x2;
699 
700     hdr->dm_major_ver = (data[0x15] & 0xf0) >> 0x4;
701     hdr->dm_minor_ver = data[0x15] & 0xf;
702 
703     hdr->t_min_pq = (data[0x13] << 0x4) | ((data[0x12] & 0xf0) >> 0x4);
704     hdr->t_max_pq = (data[0x14] << 0x4) | (data[0x12] & 0xf);
705 
706     hdr->rx = (data[0x7] << 0x4) | ((data[0x6] & 0xf0) >> 0x4);
707     hdr->ry = (data[0x8] << 0x4) | (data[0x6] & 0xf);
708     hdr->gx = (data[0xa] << 0x4) | ((data[0x9] & 0xf0) >> 0x4);
709     hdr->gy = (data[0xb] << 0x4) | (data[0x9] & 0xf);
710     hdr->bx = (data[0xd] << 0x4) | ((data[0xc] & 0xf0) >> 0x4);
711     hdr->by = (data[0xe] << 0x4) | (data[0xc] & 0xf);
712     hdr->wx = (data[0x10] << 0x4) | ((data[0xf] & 0xf0) >> 0x4);
713     hdr->wy = (data[0x11] << 0x4) | (data[0xf] & 0xf);
714 }
715 
parse_ver_15_v1_data(struct ver_15_v1 * hdr,const u8 * data)716 static void parse_ver_15_v1_data(struct ver_15_v1 *hdr, const u8 *data)
717 {
718     hdr->yuv422_12bit = data[0x5] & BIT(0x0);
719     hdr->support_2160p_60 = (data[0x5] & BIT(0x1)) >> 0x1;
720     hdr->global_dimming = data[0x6] & BIT(0x0);
721 
722     hdr->dm_version = (data[0x5] & 0x1c) >> 0x2;
723 
724     hdr->colorimetry = data[0x7] & BIT(0x0);
725 
726     hdr->t_max_lum = (data[0x6] & 0xfe) >> 0x1;
727     hdr->t_min_lum = (data[0x7] & 0xfe) >> 0x1;
728 
729     hdr->rx = data[0x9];
730     hdr->ry = data[0xa];
731     hdr->gx = data[0xb];
732     hdr->gy = data[0xc];
733     hdr->bx = data[0xd];
734     hdr->by = data[0xe];
735 }
736 
parse_ver_12_v1_data(struct ver_12_v1 * hdr,const u8 * data)737 static void parse_ver_12_v1_data(struct ver_12_v1 *hdr, const u8 *data)
738 {
739     hdr->yuv422_12bit = data[0x5] & BIT(0);
740     hdr->support_2160p_60 = (data[0x5] & BIT(1)) >> 1;
741     hdr->global_dimming = data[0x6] & BIT(0);
742 
743     hdr->dm_version = (data[0x5] & 0x1c) >> 0x2;
744 
745     hdr->colorimetry = data[0x7] & BIT(0);
746 
747     hdr->t_max_lum = (data[0x6] & 0xfe) >> 1;
748     hdr->t_min_lum = (data[0x7] & 0xfe) >> 1;
749 
750     hdr->low_latency = data[0x8] & 0x3;
751 
752     hdr->unique_rx = (data[0xb] & 0xf8) >> 0x3;
753     hdr->unique_ry = ((data[0xb] & 0x7) << 0x2) | ((data[0xa] & BIT(0)) << 1) | (data[0x9] & BIT(0));
754     hdr->unique_gx = (data[0x9] & 0xfe) >> 1;
755     hdr->unique_gy = (data[0xa] & 0xfe) >> 1;
756     hdr->unique_bx = (data[0x8] & 0xe0) >> 0x5;
757     hdr->unique_by = (data[0x8] & 0x1c) >> 0x2;
758 }
759 
parse_ver_12_v2_data(struct ver_12_v2 * hdr,const u8 * data)760 static void parse_ver_12_v2_data(struct ver_12_v2 *hdr, const u8 *data)
761 {
762     hdr->yuv422_12bit = data[0x5] & BIT(0);
763     hdr->backlt_ctrl = (data[0x5] & BIT(1)) >> 1;
764     hdr->global_dimming = (data[0x6] & BIT(0x2)) >> 0x2;
765 
766     hdr->dm_version = (data[0x5] & 0x1c) >> 0x2;
767     hdr->backlt_min_luma = data[0x6] & 0x3;
768     hdr->interface = data[0x7] & 0x3;
769     hdr->yuv444_10b_12b = ((data[0x8] & BIT(0)) << 1) | (data[0x9] & BIT(0));
770 
771     hdr->t_min_pq_v2 = (data[0x6] & 0xf8) >> 0x3;
772     hdr->t_max_pq_v2 = (data[0x7] & 0xf8) >> 0x3;
773 
774     hdr->unique_rx = (data[0xa] & 0xf8) >> 0x3;
775     hdr->unique_ry = (data[0xb] & 0xf8) >> 0x3;
776     hdr->unique_gx = (data[0x8] & 0xfe) >> 1;
777     hdr->unique_gy = (data[0x9] & 0xfe) >> 1;
778     hdr->unique_bx = data[0xa] & 0x7;
779     hdr->unique_by = data[0xb] & 0x7;
780 }
781 
parse_next_hdr_block(struct next_hdr_sink_data * sink_data,const u8 * next_hdr_db)782 static void parse_next_hdr_block(struct next_hdr_sink_data *sink_data, const u8 *next_hdr_db)
783 {
784     int version;
785 
786     version = check_next_hdr_version(next_hdr_db);
787     if (version < 0) {
788         return;
789     }
790 
791     sink_data->version = version;
792 
793     switch (version) {
794         case VER_26_BYTE_V0:
795             parse_ver_26_v0_data(&sink_data->ver_26_v0, next_hdr_db);
796             break;
797         case VER_15_BYTE_V1:
798             parse_ver_15_v1_data(&sink_data->ver_15_v1, next_hdr_db);
799             break;
800         case VER_12_BYTE_V1:
801             parse_ver_12_v1_data(&sink_data->ver_12_v1, next_hdr_db);
802             break;
803         case VER_12_BYTE_V2:
804             parse_ver_12_v2_data(&sink_data->ver_12_v2, next_hdr_db);
805             break;
806         default:
807             break;
808     }
809 }
810 
rockchip_drm_parse_cea_ext(struct rockchip_drm_dsc_cap * dsc_cap,u8 * max_frl_rate_per_lane,u8 * max_lanes,const struct edid * edid)811 int rockchip_drm_parse_cea_ext(struct rockchip_drm_dsc_cap *dsc_cap, u8 *max_frl_rate_per_lane, u8 *max_lanes,
812                                const struct edid *edid)
813 {
814     const u8 *edid_ext;
815     int i, start, end;
816 
817     if (!dsc_cap || !max_frl_rate_per_lane || !max_lanes || !edid) {
818         return -EINVAL;
819     }
820 
821     edid_ext = find_cea_extension(edid);
822     if (!edid_ext) {
823         return -EINVAL;
824     }
825 
826     if (cea_db_offsets(edid_ext, &start, &end)) {
827         return -EINVAL;
828     }
829 
830     for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(edid_ext)[(i)]) < (end);
831          (i) += cea_db_payload_len(&(edid_ext)[(i)]) + 1) {
832         const u8 *db = &edid_ext[i];
833 
834         if (cea_db_is_hdmi_forum_vsdb(db)) {
835             parse_edid_forum_vsdb(dsc_cap, max_frl_rate_per_lane, max_lanes, db);
836         }
837     }
838 
839     return 0;
840 }
841 EXPORT_SYMBOL(rockchip_drm_parse_cea_ext);
842 
rockchip_drm_parse_next_hdr(struct next_hdr_sink_data * sink_data,const struct edid * edid)843 int rockchip_drm_parse_next_hdr(struct next_hdr_sink_data *sink_data, const struct edid *edid)
844 {
845     const u8 *edid_ext;
846     int i, start, end;
847 
848     if (!sink_data || !edid) {
849         return -EINVAL;
850     }
851 
852     memset(sink_data, 0, sizeof(struct next_hdr_sink_data));
853 
854     edid_ext = find_cea_extension(edid);
855     if (!edid_ext) {
856         return -EINVAL;
857     }
858 
859     if (cea_db_offsets(edid_ext, &start, &end)) {
860         return -EINVAL;
861     }
862 
863     for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(edid_ext)[(i)]) < (end);
864          (i) += cea_db_payload_len(&(edid_ext)[(i)]) + 1) {
865         const u8 *db = &edid_ext[i];
866 
867         if (cea_db_is_hdmi_next_hdr_block(db)) {
868             parse_next_hdr_block(sink_data, db);
869         }
870     }
871 
872     return 0;
873 }
874 EXPORT_SYMBOL(rockchip_drm_parse_next_hdr);
875 
876 /*
877  * Attach a (component) device to the shared drm dma mapping from master drm
878  * device.  This is used by the VOPs to map GEM buffers to a common DMA
879  * mapping.
880  */
rockchip_drm_dma_attach_device(struct drm_device * drm_dev,struct device * dev)881 int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, struct device *dev)
882 {
883     struct rockchip_drm_private *private = drm_dev->dev_private;
884     int ret;
885 
886     if (!is_support_iommu) {
887         return 0;
888     }
889 
890     ret = iommu_attach_device(private->domain, dev);
891     if (ret) {
892         DRM_DEV_ERROR(dev, "Failed to attach iommu device\n");
893         return ret;
894     }
895 
896     return 0;
897 }
898 
rockchip_drm_dma_detach_device(struct drm_device * drm_dev,struct device * dev)899 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, struct device *dev)
900 {
901     struct rockchip_drm_private *private = drm_dev->dev_private;
902     struct iommu_domain *domain = private->domain;
903 
904     if (!is_support_iommu) {
905         return;
906     }
907 
908     iommu_detach_device(domain, dev);
909 }
910 
rockchip_register_crtc_funcs(struct drm_crtc * crtc,const struct rockchip_crtc_funcs * crtc_funcs)911 int rockchip_register_crtc_funcs(struct drm_crtc *crtc, const struct rockchip_crtc_funcs *crtc_funcs)
912 {
913     int pipe = drm_crtc_index(crtc);
914     struct rockchip_drm_private *priv = crtc->dev->dev_private;
915 
916     if (pipe >= ROCKCHIP_MAX_CRTC) {
917         return -EINVAL;
918     }
919 
920     priv->crtc_funcs[pipe] = crtc_funcs;
921 
922     return 0;
923 }
924 
rockchip_unregister_crtc_funcs(struct drm_crtc * crtc)925 void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
926 {
927     int pipe = drm_crtc_index(crtc);
928     struct rockchip_drm_private *priv = crtc->dev->dev_private;
929 
930     if (pipe >= ROCKCHIP_MAX_CRTC) {
931         return;
932     }
933 
934     priv->crtc_funcs[pipe] = NULL;
935 }
936 
rockchip_drm_fault_handler(struct iommu_domain * iommu,struct device * dev,unsigned long iova,int flags,void * arg)937 static int rockchip_drm_fault_handler(struct iommu_domain *iommu, struct device *dev, unsigned long iova, int flags,
938                                       void *arg)
939 {
940     struct drm_device *drm_dev = arg;
941     struct rockchip_drm_private *priv = drm_dev->dev_private;
942     struct drm_crtc *crtc;
943 
944     DRM_ERROR("iommu fault handler flags: 0x%x\n", flags);
945     drm_for_each_crtc(crtc, drm_dev)
946     {
947         int pipe = drm_crtc_index(crtc);
948         if (priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->regs_dump) {
949             priv->crtc_funcs[pipe]->regs_dump(crtc, NULL);
950         }
951 
952         if (priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->debugfs_dump) {
953             priv->crtc_funcs[pipe]->debugfs_dump(crtc, NULL);
954         }
955     }
956 
957     return 0;
958 }
959 
rockchip_drm_init_iommu(struct drm_device * drm_dev)960 static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
961 {
962     struct rockchip_drm_private *private = drm_dev->dev_private;
963     struct iommu_domain_geometry *geometry;
964     u64 start, end;
965 
966     if (!is_support_iommu) {
967         return 0;
968     }
969 
970     private->domain = iommu_domain_alloc(&platform_bus_type);
971     if (!private->domain) {
972         return -ENOMEM;
973     }
974 
975     geometry = &private->domain->geometry;
976     start = geometry->aperture_start;
977     end = geometry->aperture_end;
978 
979     DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n", start, end);
980     drm_mm_init(&private->mm, start, end - start + 1);
981     mutex_init(&private->mm_lock);
982 
983     iommu_set_fault_handler(private->domain, rockchip_drm_fault_handler, drm_dev);
984 
985     return 0;
986 }
987 
rockchip_iommu_cleanup(struct drm_device * drm_dev)988 static void rockchip_iommu_cleanup(struct drm_device *drm_dev)
989 {
990     struct rockchip_drm_private *private = drm_dev->dev_private;
991 
992     if (!is_support_iommu) {
993         return;
994     }
995 
996     drm_mm_takedown(&private->mm);
997     iommu_domain_free(private->domain);
998 }
999 
1000 #ifdef CONFIG_DEBUG_FS
rockchip_drm_mm_dump(struct seq_file * s,void * data)1001 static int rockchip_drm_mm_dump(struct seq_file *s, void *data)
1002 {
1003     struct drm_info_node *node = s->private;
1004     struct drm_minor *minor = node->minor;
1005     struct drm_device *drm_dev = minor->dev;
1006     struct rockchip_drm_private *priv = drm_dev->dev_private;
1007     struct drm_printer p = drm_seq_file_printer(s);
1008 
1009     if (!priv->domain) {
1010         return 0;
1011     }
1012     mutex_lock(&priv->mm_lock);
1013     drm_mm_print(&priv->mm, &p);
1014     mutex_unlock(&priv->mm_lock);
1015 
1016     return 0;
1017 }
1018 
rockchip_drm_summary_show(struct seq_file * s,void * data)1019 static int rockchip_drm_summary_show(struct seq_file *s, void *data)
1020 {
1021     struct drm_info_node *node = s->private;
1022     struct drm_minor *minor = node->minor;
1023     struct drm_device *drm_dev = minor->dev;
1024     struct rockchip_drm_private *priv = drm_dev->dev_private;
1025     struct drm_crtc *crtc;
1026 
1027     drm_for_each_crtc(crtc, drm_dev)
1028     {
1029         int pipe = drm_crtc_index(crtc);
1030         if (priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->debugfs_dump) {
1031             priv->crtc_funcs[pipe]->debugfs_dump(crtc, s);
1032         }
1033     }
1034 
1035     return 0;
1036 }
1037 
1038 static struct drm_info_list rockchip_debugfs_files[] = {
1039     {"summary", rockchip_drm_summary_show, 0, NULL},
1040     {"mm_dump", rockchip_drm_mm_dump, 0, NULL},
1041 };
1042 
rockchip_drm_debugfs_init(struct drm_minor * minor)1043 static void rockchip_drm_debugfs_init(struct drm_minor *minor)
1044 {
1045     struct drm_device *dev = minor->dev;
1046     struct rockchip_drm_private *priv = dev->dev_private;
1047     struct drm_crtc *crtc;
1048 
1049     drm_debugfs_create_files(rockchip_debugfs_files, ARRAY_SIZE(rockchip_debugfs_files), minor->debugfs_root, minor);
1050 
1051     drm_for_each_crtc(crtc, dev)
1052     {
1053         int pipe = drm_crtc_index(crtc);
1054         if (priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->debugfs_init) {
1055             priv->crtc_funcs[pipe]->debugfs_init(minor, crtc);
1056         }
1057     }
1058 }
1059 #endif
1060 
rockchip_drm_create_properties(struct drm_device * dev)1061 static int rockchip_drm_create_properties(struct drm_device *dev)
1062 {
1063     struct drm_property *prop;
1064     struct rockchip_drm_private *private = dev->dev_private;
1065 
1066     prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "EOTF", 0, 0x5);
1067     if (!prop) {
1068         return -ENOMEM;
1069     }
1070     private->eotf_prop = prop;
1071 
1072     prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "COLOR_SPACE", 0, 0xc);
1073     if (!prop) {
1074         return -ENOMEM;
1075     }
1076     private->color_space_prop = prop;
1077 
1078     prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "ASYNC_COMMIT", 0, 1);
1079     if (!prop) {
1080         return -ENOMEM;
1081     }
1082     private->async_commit_prop = prop;
1083 
1084     prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SHARE_ID", 0, UINT_MAX);
1085     if (!prop) {
1086         return -ENOMEM;
1087     }
1088     private->share_id_prop = prop;
1089 
1090     prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_IMMUTABLE, "CONNECTOR_ID", 0, 0xf);
1091     if (!prop) {
1092         return -ENOMEM;
1093     }
1094     private->connector_id_prop = prop;
1095 
1096     prop =
1097         drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_IMMUTABLE, "SOC_ID", DRM_MODE_OBJECT_CRTC);
1098     private->soc_id_prop = prop;
1099 
1100     prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_IMMUTABLE, "PORT_ID",
1101                                       DRM_MODE_OBJECT_CRTC);
1102     private->port_id_prop = prop;
1103 
1104     private->aclk_prop = drm_property_create_range(dev, 0, "ACLK", 0, UINT_MAX);
1105     private->bg_prop = drm_property_create_range(dev, 0, "BACKGROUND", 0, UINT_MAX);
1106     private->line_flag_prop = drm_property_create_range(dev, 0, "LINE_FLAG1", 0, UINT_MAX);
1107 
1108     return drm_mode_create_tv_properties(dev, 0, NULL);
1109 }
1110 
rockchip_attach_connector_property(struct drm_device * drm)1111 static void rockchip_attach_connector_property(struct drm_device *drm)
1112 {
1113     struct drm_connector *connector;
1114     struct drm_mode_config *conf = &drm->mode_config;
1115     struct drm_connector_list_iter conn_iter;
1116 
1117     mutex_lock(&drm->mode_config.mutex);
1118 
1119 #define ROCKCHIP_PROP_ATTACH(prop, v) drm_object_attach_property(&connector->base, prop, v)
1120 
1121     drm_connector_list_iter_begin(drm, &conn_iter);
1122     drm_for_each_connector_iter(connector, &conn_iter)
1123     {
1124         ROCKCHIP_PROP_ATTACH(conf->tv_brightness_property, 0x32);
1125         ROCKCHIP_PROP_ATTACH(conf->tv_contrast_property, 0x32);
1126         ROCKCHIP_PROP_ATTACH(conf->tv_saturation_property, 0x32);
1127         ROCKCHIP_PROP_ATTACH(conf->tv_hue_property, 0x32);
1128     }
1129     drm_connector_list_iter_end(&conn_iter);
1130 #undef ROCKCHIP_PROP_ATTACH
1131 
1132     mutex_unlock(&drm->mode_config.mutex);
1133 }
1134 
rockchip_drm_set_property_default(struct drm_device * drm)1135 static void rockchip_drm_set_property_default(struct drm_device *drm)
1136 {
1137     struct drm_connector *connector;
1138     struct drm_mode_config *conf = &drm->mode_config;
1139     struct drm_atomic_state *state;
1140     int ret;
1141     struct drm_connector_list_iter conn_iter;
1142 
1143     drm_modeset_lock_all(drm);
1144 
1145     state = drm_atomic_helper_duplicate_state(drm, conf->acquire_ctx);
1146     if (!state) {
1147         DRM_ERROR("failed to alloc atomic state\n");
1148         goto err_unlock;
1149     }
1150     state->acquire_ctx = conf->acquire_ctx;
1151 
1152     drm_connector_list_iter_begin(drm, &conn_iter);
1153     drm_for_each_connector_iter(connector, &conn_iter)
1154     {
1155         struct drm_connector_state *connector_state;
1156 
1157         connector_state = drm_atomic_get_connector_state(state, connector);
1158         if (IS_ERR(connector_state)) {
1159             DRM_ERROR("Connector[%d]: Failed to get state\n", connector->base.id);
1160             continue;
1161         }
1162 
1163         connector_state->tv.brightness = 0x32;
1164         connector_state->tv.contrast = 0x32;
1165         connector_state->tv.saturation = 0x32;
1166         connector_state->tv.hue = 0x32;
1167     }
1168     drm_connector_list_iter_end(&conn_iter);
1169 
1170     ret = drm_atomic_commit(state);
1171     WARN_ON(ret == -EDEADLK);
1172     if (ret) {
1173         DRM_ERROR("Failed to update properties\n");
1174     }
1175     drm_atomic_state_put(state);
1176 
1177 err_unlock:
1178     drm_modeset_unlock_all(drm);
1179 }
1180 
rockchip_gem_pool_init(struct drm_device * drm)1181 static int rockchip_gem_pool_init(struct drm_device *drm)
1182 {
1183     struct rockchip_drm_private *private = drm->dev_private;
1184     struct device_node *np = drm->dev->of_node;
1185     struct device_node *node;
1186     phys_addr_t start, size;
1187     struct resource res;
1188     int ret;
1189 
1190     node = of_parse_phandle(np, "secure-memory-region", 0);
1191     if (!node) {
1192         return -ENXIO;
1193     }
1194 
1195     ret = of_address_to_resource(node, 0, &res);
1196     if (ret) {
1197         return ret;
1198     }
1199     start = res.start;
1200     size = resource_size(&res);
1201     if (!size) {
1202         return -ENOMEM;
1203     }
1204 
1205     private->secure_buffer_pool = gen_pool_create(PAGE_SHIFT, -1);
1206     if (!private->secure_buffer_pool) {
1207         return -ENOMEM;
1208     }
1209 
1210     gen_pool_add(private->secure_buffer_pool, start, size, -1);
1211 
1212     return 0;
1213 }
1214 
rockchip_gem_pool_destroy(struct drm_device * drm)1215 static void rockchip_gem_pool_destroy(struct drm_device *drm)
1216 {
1217     struct rockchip_drm_private *private = drm->dev_private;
1218 
1219     if (!private->secure_buffer_pool) {
1220         return;
1221     }
1222 
1223     gen_pool_destroy(private->secure_buffer_pool);
1224 }
1225 
rockchip_drm_bind(struct device * dev)1226 static int rockchip_drm_bind(struct device *dev)
1227 {
1228     struct drm_device *drm_dev;
1229     struct rockchip_drm_private *private;
1230     int ret;
1231 
1232     drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
1233     if (IS_ERR(drm_dev)) {
1234         return PTR_ERR(drm_dev);
1235     }
1236 
1237     dev_set_drvdata(dev, drm_dev);
1238 
1239     private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
1240     if (!private) {
1241         ret = -ENOMEM;
1242         goto err_free;
1243     }
1244 
1245     mutex_init(&private->ovl_lock);
1246 
1247     drm_dev->dev_private = private;
1248 
1249     INIT_LIST_HEAD(&private->psr_list);
1250     mutex_init(&private->psr_list_lock);
1251     mutex_init(&private->commit_lock);
1252 
1253     private->hdmi_pll.pll = devm_clk_get_optional(dev, "hdmi-tmds-pll");
1254     if (PTR_ERR(private->hdmi_pll.pll) == -EPROBE_DEFER) {
1255         ret = -EPROBE_DEFER;
1256         goto err_free;
1257     } else if (IS_ERR(private->hdmi_pll.pll)) {
1258         dev_err(dev, "failed to get hdmi-tmds-pll\n");
1259         ret = PTR_ERR(private->hdmi_pll.pll);
1260         goto err_free;
1261     }
1262     private->default_pll.pll = devm_clk_get_optional(dev, "default-vop-pll");
1263     if (PTR_ERR(private->default_pll.pll) == -EPROBE_DEFER) {
1264         ret = -EPROBE_DEFER;
1265         goto err_free;
1266     } else if (IS_ERR(private->default_pll.pll)) {
1267         dev_err(dev, "failed to get default vop pll\n");
1268         ret = PTR_ERR(private->default_pll.pll);
1269         goto err_free;
1270     }
1271 
1272     ret = rockchip_drm_init_iommu(drm_dev);
1273     if (ret) {
1274         goto err_free;
1275     }
1276 
1277     ret = drmm_mode_config_init(drm_dev);
1278     if (ret) {
1279         goto err_iommu_cleanup;
1280     }
1281 
1282     rockchip_drm_mode_config_init(drm_dev);
1283     rockchip_drm_create_properties(drm_dev);
1284     /* Try to bind all sub drivers. */
1285     ret = component_bind_all(dev, drm_dev);
1286     if (ret) {
1287         goto err_mode_config_cleanup;
1288     }
1289 
1290     rockchip_attach_connector_property(drm_dev);
1291     ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
1292     if (ret) {
1293         goto err_unbind_all;
1294     }
1295 
1296     drm_mode_config_reset(drm_dev);
1297     rockchip_drm_set_property_default(drm_dev);
1298 
1299     /*
1300      * enable drm irq mode.
1301      * - with irq_enabled = true, we can use the vblank feature.
1302      */
1303     drm_dev->irq_enabled = true;
1304 
1305     /* init kms poll for handling hpd */
1306     drm_kms_helper_poll_init(drm_dev);
1307 
1308     rockchip_gem_pool_init(drm_dev);
1309     ret = of_reserved_mem_device_init(drm_dev->dev);
1310     if (ret) {
1311         DRM_DEBUG_KMS("No reserved memory region assign to drm\n");
1312     }
1313 
1314     rockchip_drm_show_logo(drm_dev);
1315 
1316     ret = rockchip_drm_fbdev_init(drm_dev);
1317     if (ret) {
1318         goto err_unbind_all;
1319     }
1320 
1321     drm_dev->mode_config.allow_fb_modifiers = true;
1322 
1323     ret = drm_dev_register(drm_dev, 0);
1324     if (ret) {
1325         goto err_kms_helper_poll_fini;
1326     }
1327 
1328     return 0;
1329 err_kms_helper_poll_fini:
1330     rockchip_gem_pool_destroy(drm_dev);
1331     drm_kms_helper_poll_fini(drm_dev);
1332     rockchip_drm_fbdev_fini(drm_dev);
1333 err_unbind_all:
1334     component_unbind_all(dev, drm_dev);
1335 err_mode_config_cleanup:
1336     drm_mode_config_cleanup(drm_dev);
1337 err_iommu_cleanup:
1338     rockchip_iommu_cleanup(drm_dev);
1339 err_free:
1340     drm_dev->dev_private = NULL;
1341     dev_set_drvdata(dev, NULL);
1342     drm_dev_put(drm_dev);
1343     return ret;
1344 }
1345 
rockchip_drm_unbind(struct device * dev)1346 static void rockchip_drm_unbind(struct device *dev)
1347 {
1348     struct drm_device *drm_dev = dev_get_drvdata(dev);
1349 
1350     drm_dev_unregister(drm_dev);
1351 
1352     rockchip_drm_fbdev_fini(drm_dev);
1353     rockchip_gem_pool_destroy(drm_dev);
1354     drm_kms_helper_poll_fini(drm_dev);
1355 
1356     drm_atomic_helper_shutdown(drm_dev);
1357     component_unbind_all(dev, drm_dev);
1358     drm_mode_config_cleanup(drm_dev);
1359     rockchip_iommu_cleanup(drm_dev);
1360 
1361     drm_dev->dev_private = NULL;
1362     dev_set_drvdata(dev, NULL);
1363     drm_dev_put(drm_dev);
1364 }
1365 
rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc * crtc,struct drm_file * file_priv)1366 static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc, struct drm_file *file_priv)
1367 {
1368     struct rockchip_drm_private *priv = crtc->dev->dev_private;
1369     int pipe = drm_crtc_index(crtc);
1370     if (pipe < ROCKCHIP_MAX_CRTC && priv->crtc_funcs[pipe] && priv->crtc_funcs[pipe]->cancel_pending_vblank) {
1371         priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
1372     }
1373 }
1374 
rockchip_drm_open(struct drm_device * dev,struct drm_file * file)1375 static int rockchip_drm_open(struct drm_device *dev, struct drm_file *file)
1376 {
1377     struct drm_crtc *crtc;
1378 
1379     drm_for_each_crtc(crtc, dev) crtc->primary->fb = NULL;
1380 
1381     return 0;
1382 }
1383 
rockchip_drm_postclose(struct drm_device * dev,struct drm_file * file_priv)1384 static void rockchip_drm_postclose(struct drm_device *dev, struct drm_file *file_priv)
1385 {
1386     struct drm_crtc *crtc;
1387 
1388     list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
1389         rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
1390 }
1391 
rockchip_drm_lastclose(struct drm_device * dev)1392 static void rockchip_drm_lastclose(struct drm_device *dev)
1393 {
1394     struct rockchip_drm_private *priv = dev->dev_private;
1395 
1396     if (!priv->logo) {
1397         drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev_helper);
1398     }
1399 }
1400 
rockchip_drm_add_vcnt_event(struct drm_crtc * crtc,struct drm_file * file_priv)1401 static struct drm_pending_vblank_event *rockchip_drm_add_vcnt_event(struct drm_crtc *crtc, struct drm_file *file_priv)
1402 {
1403     struct drm_pending_vblank_event *e;
1404     struct drm_device *dev = crtc->dev;
1405     unsigned long flags;
1406 
1407     e = kzalloc(sizeof(*e), GFP_KERNEL);
1408     if (!e) {
1409         return NULL;
1410     }
1411 
1412     e->pipe = drm_crtc_index(crtc);
1413     e->event.base.type = DRM_EVENT_ROCKCHIP_CRTC_VCNT;
1414     e->event.base.length = sizeof(e->event.vbl);
1415     e->event.vbl.crtc_id = crtc->base.id;
1416     /* store crtc pipe id */
1417     e->event.vbl.user_data = e->pipe;
1418 
1419     spin_lock_irqsave(&dev->event_lock, flags);
1420     drm_event_reserve_init_locked(dev, file_priv, &e->base, &e->event.base);
1421     spin_unlock_irqrestore(&dev->event_lock, flags);
1422 
1423     return e;
1424 }
1425 
rockchip_drm_get_vcnt_event_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1426 static int rockchip_drm_get_vcnt_event_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1427 {
1428     struct rockchip_drm_private *priv = dev->dev_private;
1429     union drm_wait_vblank *vblwait = data;
1430     struct drm_pending_vblank_event *e;
1431     struct drm_crtc *crtc;
1432     unsigned int flags, pipe;
1433 
1434     flags = vblwait->request.type & (_DRM_VBLANK_FLAGS_MASK | _DRM_ROCKCHIP_VCNT_EVENT);
1435     pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
1436     if (pipe) {
1437         pipe = pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
1438     } else {
1439         pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
1440     }
1441 
1442     crtc = drm_crtc_from_index(dev, pipe);
1443 
1444     if (flags & _DRM_ROCKCHIP_VCNT_EVENT) {
1445         e = rockchip_drm_add_vcnt_event(crtc, file_priv);
1446         priv->vcnt[pipe].event = e;
1447     }
1448 
1449     return 0;
1450 }
1451 
1452 static const struct drm_ioctl_desc rockchip_ioctls[] = {
1453     DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_CREATE, rockchip_gem_create_ioctl, DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
1454     DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_MAP_OFFSET, rockchip_gem_map_offset_ioctl,
1455                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
1456     DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_GET_PHYS, rockchip_gem_get_phys_ioctl, DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
1457     DRM_IOCTL_DEF_DRV(ROCKCHIP_GET_VCNT_EVENT, rockchip_drm_get_vcnt_event_ioctl, DRM_UNLOCKED),
1458 };
1459 
1460 static const struct file_operations rockchip_drm_driver_fops = {
1461     .owner = THIS_MODULE,
1462     .open = drm_open,
1463     .mmap = rockchip_gem_mmap,
1464     .poll = drm_poll,
1465     .read = drm_read,
1466     .unlocked_ioctl = drm_ioctl,
1467     .compat_ioctl = drm_compat_ioctl,
1468     .release = drm_release,
1469 };
1470 
rockchip_drm_gem_dmabuf_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction dir)1471 static int rockchip_drm_gem_dmabuf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir)
1472 {
1473     struct drm_gem_object *obj = dma_buf->priv;
1474 
1475     return rockchip_gem_prime_begin_cpu_access(obj, dir);
1476 }
1477 
rockchip_drm_gem_dmabuf_end_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction dir)1478 static int rockchip_drm_gem_dmabuf_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir)
1479 {
1480     struct drm_gem_object *obj = dma_buf->priv;
1481 
1482     return rockchip_gem_prime_end_cpu_access(obj, dir);
1483 }
1484 
rockchip_drm_gem_begin_cpu_access_partial(struct dma_buf * dma_buf,enum dma_data_direction dir,unsigned int offset,unsigned int len)1485 static int rockchip_drm_gem_begin_cpu_access_partial(struct dma_buf *dma_buf, enum dma_data_direction dir,
1486                                                      unsigned int offset, unsigned int len)
1487 {
1488     struct drm_gem_object *obj = dma_buf->priv;
1489 
1490     return rockchip_gem_prime_begin_cpu_access_partial(obj, dir, offset, len);
1491 }
1492 
rockchip_drm_gem_end_cpu_access_partial(struct dma_buf * dma_buf,enum dma_data_direction dir,unsigned int offset,unsigned int len)1493 static int rockchip_drm_gem_end_cpu_access_partial(struct dma_buf *dma_buf, enum dma_data_direction dir,
1494                                                    unsigned int offset, unsigned int len)
1495 {
1496     struct drm_gem_object *obj = dma_buf->priv;
1497 
1498     return rockchip_gem_prime_end_cpu_access_partial(obj, dir, offset, len);
1499 }
1500 
1501 static const struct dma_buf_ops rockchip_drm_gem_prime_dmabuf_ops = {
1502     .cache_sgt_mapping = true,
1503     .attach = drm_gem_map_attach,
1504     .detach = drm_gem_map_detach,
1505     .map_dma_buf = drm_gem_map_dma_buf,
1506     .unmap_dma_buf = drm_gem_unmap_dma_buf,
1507     .release = drm_gem_dmabuf_release,
1508     .mmap = drm_gem_dmabuf_mmap,
1509     .vmap = drm_gem_dmabuf_vmap,
1510     .vunmap = drm_gem_dmabuf_vunmap,
1511     .get_uuid = drm_gem_dmabuf_get_uuid,
1512     .begin_cpu_access = rockchip_drm_gem_dmabuf_begin_cpu_access,
1513     .end_cpu_access = rockchip_drm_gem_dmabuf_end_cpu_access,
1514     .begin_cpu_access_partial = rockchip_drm_gem_begin_cpu_access_partial,
1515     .end_cpu_access_partial = rockchip_drm_gem_end_cpu_access_partial,
1516 };
1517 
rockchip_drm_gem_prime_import_dev(struct drm_device * dev,struct dma_buf * dma_buf,struct device * attach_dev)1518 static struct drm_gem_object *rockchip_drm_gem_prime_import_dev(struct drm_device *dev, struct dma_buf *dma_buf,
1519                                                                 struct device *attach_dev)
1520 {
1521     struct dma_buf_attachment *attach;
1522     struct sg_table *sgt;
1523     struct drm_gem_object *obj;
1524     int ret;
1525 
1526     if (dma_buf->ops == &rockchip_drm_gem_prime_dmabuf_ops) {
1527         obj = dma_buf->priv;
1528         if (obj->dev == dev) {
1529             /*
1530              * Importing dmabuf exported from out own gem increases
1531              * refcount on gem itself instead of f_count of dmabuf.
1532              */
1533             drm_gem_object_get(obj);
1534             return obj;
1535         }
1536     }
1537 
1538     if (!dev->driver->gem_prime_import_sg_table) {
1539         return ERR_PTR(-EINVAL);
1540     }
1541 
1542     attach = dma_buf_attach(dma_buf, attach_dev);
1543     if (IS_ERR(attach)) {
1544         return ERR_CAST(attach);
1545     }
1546 
1547     get_dma_buf(dma_buf);
1548 
1549     sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1550     if (IS_ERR(sgt)) {
1551         ret = PTR_ERR(sgt);
1552         goto fail_detach;
1553     }
1554 
1555     obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
1556     if (IS_ERR(obj)) {
1557         ret = PTR_ERR(obj);
1558         goto fail_unmap;
1559     }
1560 
1561     obj->import_attach = attach;
1562     obj->resv = dma_buf->resv;
1563 
1564     return obj;
1565 
1566 fail_unmap:
1567     dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
1568 fail_detach:
1569     dma_buf_detach(dma_buf, attach);
1570     dma_buf_put(dma_buf);
1571 
1572     return ERR_PTR(ret);
1573 }
1574 
rockchip_drm_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)1575 static struct drm_gem_object *rockchip_drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
1576 {
1577     return rockchip_drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
1578 }
1579 
rockchip_drm_gem_prime_export(struct drm_gem_object * obj,int flags)1580 static struct dma_buf *rockchip_drm_gem_prime_export(struct drm_gem_object *obj, int flags)
1581 {
1582     struct drm_device *dev = obj->dev;
1583     struct dma_buf_export_info exp_info = {
1584         .exp_name = KBUILD_MODNAME, /* white lie for debug */
1585         .owner = dev->driver->fops->owner,
1586         .ops = &rockchip_drm_gem_prime_dmabuf_ops,
1587         .size = obj->size,
1588         .flags = flags,
1589         .priv = obj,
1590         .resv = obj->resv,
1591     };
1592 
1593     return drm_gem_dmabuf_export(dev, &exp_info);
1594 }
1595 
1596 static struct drm_driver rockchip_drm_driver = {
1597     .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER,
1598     .postclose = rockchip_drm_postclose,
1599     .lastclose = rockchip_drm_lastclose,
1600     .open = rockchip_drm_open,
1601     .gem_vm_ops = &drm_gem_cma_vm_ops,
1602     .gem_free_object_unlocked = rockchip_gem_free_object,
1603     .dumb_create = rockchip_gem_dumb_create,
1604     .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1605     .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1606     .gem_prime_import = rockchip_drm_gem_prime_import,
1607     .gem_prime_export = rockchip_drm_gem_prime_export,
1608     .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
1609     .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
1610     .gem_prime_vmap = rockchip_gem_prime_vmap,
1611     .gem_prime_vunmap = rockchip_gem_prime_vunmap,
1612     .gem_prime_mmap = rockchip_gem_mmap_buf,
1613 #ifdef CONFIG_DEBUG_FS
1614     .debugfs_init = rockchip_drm_debugfs_init,
1615 #endif
1616     .ioctls = rockchip_ioctls,
1617     .num_ioctls = ARRAY_SIZE(rockchip_ioctls),
1618     .fops = &rockchip_drm_driver_fops,
1619     .name = DRIVER_NAME,
1620     .desc = DRIVER_DESC,
1621     .date = DRIVER_DATE,
1622     .major = DRIVER_MAJOR,
1623     .minor = DRIVER_MINOR,
1624 };
1625 
1626 #ifdef CONFIG_PM_SLEEP
rockchip_drm_sys_suspend(struct device * dev)1627 static int rockchip_drm_sys_suspend(struct device *dev)
1628 {
1629     struct drm_device *drm = dev_get_drvdata(dev);
1630 
1631     return drm_mode_config_helper_suspend(drm);
1632 }
1633 
rockchip_drm_sys_resume(struct device * dev)1634 static int rockchip_drm_sys_resume(struct device *dev)
1635 {
1636     struct drm_device *drm = dev_get_drvdata(dev);
1637 
1638     return drm_mode_config_helper_resume(drm);
1639 }
1640 #endif
1641 
1642 static const struct dev_pm_ops rockchip_drm_pm_ops = {
1643     SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend, rockchip_drm_sys_resume)};
1644 
1645 #define MAX_ROCKCHIP_SUB_DRIVERS 16
1646 static struct platform_driver *rockchip_sub_drivers[MAX_ROCKCHIP_SUB_DRIVERS];
1647 static int num_rockchip_sub_drivers;
1648 
1649 /*
1650  * Check if a vop endpoint is leading to a rockchip subdriver or bridge.
1651  * Should be called from the component bind stage of the drivers
1652  * to ensure that all subdrivers are probed.
1653  *
1654  * @ep: endpoint of a rockchip vop
1655  *
1656  * returns true if subdriver, false if external bridge and -ENODEV
1657  * if remote port does not contain a device.
1658  */
rockchip_drm_endpoint_is_subdriver(struct device_node * ep)1659 int rockchip_drm_endpoint_is_subdriver(struct device_node *ep)
1660 {
1661     struct device_node *node = of_graph_get_remote_port_parent(ep);
1662     struct platform_device *pdev;
1663     struct device_driver *drv;
1664     int i;
1665 
1666     if (!node) {
1667         return -ENODEV;
1668     }
1669 
1670     /* status disabled will prevent creation of platform-devices */
1671     pdev = of_find_device_by_node(node);
1672     of_node_put(node);
1673     if (!pdev) {
1674         return -ENODEV;
1675     }
1676 
1677     /*
1678      * All rockchip subdrivers have probed at this point, so
1679      * any device not having a driver now is an external bridge.
1680      */
1681     drv = pdev->dev.driver;
1682     if (!drv) {
1683         platform_device_put(pdev);
1684         return false;
1685     }
1686 
1687     for (i = 0; i < num_rockchip_sub_drivers; i++) {
1688         if (rockchip_sub_drivers[i] == to_platform_driver(drv)) {
1689             platform_device_put(pdev);
1690             return true;
1691         }
1692     }
1693 
1694     platform_device_put(pdev);
1695     return false;
1696 }
1697 
compare_dev(struct device * dev,void * data)1698 static int compare_dev(struct device *dev, void *data)
1699 {
1700     return dev == (struct device *)data;
1701 }
1702 
rockchip_drm_match_remove(struct device * dev)1703 static void rockchip_drm_match_remove(struct device *dev)
1704 {
1705     struct device_link *link;
1706 
1707     list_for_each_entry(link, &dev->links.consumers, s_node) device_link_del(link);
1708 }
1709 
rockchip_drm_match_add(struct device * dev)1710 static struct component_match *rockchip_drm_match_add(struct device *dev)
1711 {
1712     struct component_match *match = NULL;
1713     int i;
1714 
1715     for (i = 0; i < num_rockchip_sub_drivers; i++) {
1716         struct platform_driver *drv = rockchip_sub_drivers[i];
1717         struct device *p = NULL, *d;
1718 
1719         do {
1720             d = platform_find_device_by_driver(p, &drv->driver);
1721             put_device(p);
1722             p = d;
1723 
1724             if (!d) {
1725                 break;
1726             }
1727 
1728             device_link_add(dev, d, DL_FLAG_STATELESS);
1729             component_match_add(dev, &match, compare_dev, d);
1730         } while (true);
1731     }
1732 
1733     if (IS_ERR(match)) {
1734         rockchip_drm_match_remove(dev);
1735     }
1736 
1737     return match ?: ERR_PTR(-ENODEV);
1738 }
1739 
1740 static const struct component_master_ops rockchip_drm_ops = {
1741     .bind = rockchip_drm_bind,
1742     .unbind = rockchip_drm_unbind,
1743 };
1744 
rockchip_drm_platform_of_probe(struct device * dev)1745 static int rockchip_drm_platform_of_probe(struct device *dev)
1746 {
1747     struct device_node *np = dev->of_node;
1748     struct device_node *port;
1749     bool found = false;
1750     int i;
1751 
1752     if (!np) {
1753         return -ENODEV;
1754     }
1755 
1756     for (i = 0;; i++) {
1757         struct device_node *iommu;
1758 
1759         port = of_parse_phandle(np, "ports", i);
1760         if (!port) {
1761             break;
1762         }
1763 
1764         if (!of_device_is_available(port->parent)) {
1765             of_node_put(port);
1766             continue;
1767         }
1768 
1769         iommu = of_parse_phandle(port->parent, "iommus", 0);
1770         if (!iommu || !of_device_is_available(iommu)) {
1771             DRM_DEV_DEBUG(dev, "no iommu attached for %pOF, using non-iommu buffers\n", port->parent);
1772             /*
1773              * if there is a crtc not support iommu, force set all
1774              * crtc use non-iommu buffer.
1775              */
1776             is_support_iommu = false;
1777         }
1778 
1779         found = true;
1780 
1781         of_node_put(iommu);
1782         of_node_put(port);
1783     }
1784 
1785     if (i == 0) {
1786         DRM_DEV_ERROR(dev, "missing 'ports' property\n");
1787         return -ENODEV;
1788     }
1789 
1790     if (!found) {
1791         DRM_DEV_ERROR(dev, "No available vop found for display-subsystem.\n");
1792         return -ENODEV;
1793     }
1794 
1795     return 0;
1796 }
1797 
rockchip_drm_platform_probe(struct platform_device * pdev)1798 static int rockchip_drm_platform_probe(struct platform_device *pdev)
1799 {
1800     struct device *dev = &pdev->dev;
1801     struct component_match *match = NULL;
1802     int ret;
1803 
1804     ret = rockchip_drm_platform_of_probe(dev);
1805     if (ret) {
1806         return ret;
1807     }
1808 
1809     match = rockchip_drm_match_add(dev);
1810     if (IS_ERR(match)) {
1811         return PTR_ERR(match);
1812     }
1813 
1814     ret = component_master_add_with_match(dev, &rockchip_drm_ops, match);
1815     if (ret < 0) {
1816         rockchip_drm_match_remove(dev);
1817         return ret;
1818     }
1819 
1820     ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(0x40));
1821     if (ret) {
1822         return ret;
1823     }
1824 
1825     return 0;
1826 }
1827 
rockchip_drm_platform_remove(struct platform_device * pdev)1828 static int rockchip_drm_platform_remove(struct platform_device *pdev)
1829 {
1830     component_master_del(&pdev->dev, &rockchip_drm_ops);
1831 
1832     rockchip_drm_match_remove(&pdev->dev);
1833 
1834     return 0;
1835 }
1836 
rockchip_drm_platform_shutdown(struct platform_device * pdev)1837 static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
1838 {
1839     struct drm_device *drm = platform_get_drvdata(pdev);
1840 
1841     if (drm) {
1842         drm_atomic_helper_shutdown(drm);
1843     }
1844 }
1845 
1846 static const struct of_device_id rockchip_drm_dt_ids[] = {
1847     {
1848         .compatible = "rockchip,display-subsystem",
1849     },
1850     {},
1851 };
1852 MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
1853 
1854 static struct platform_driver rockchip_drm_platform_driver = {
1855     .probe = rockchip_drm_platform_probe,
1856     .remove = rockchip_drm_platform_remove,
1857     .shutdown = rockchip_drm_platform_shutdown,
1858     .driver =
1859         {
1860             .name = "rockchip-drm",
1861             .of_match_table = rockchip_drm_dt_ids,
1862             .pm = &rockchip_drm_pm_ops,
1863         },
1864 };
1865 
1866 #define ADD_ROCKCHIP_SUB_DRIVER(drv, cond)                                                                             \
1867     {                                                                                                                  \
1868         if (IS_ENABLED(cond) && !WARN_ON(num_rockchip_sub_drivers >= MAX_ROCKCHIP_SUB_DRIVERS))                        \
1869             rockchip_sub_drivers[num_rockchip_sub_drivers++] = &(drv);                                                 \
1870     }
1871 
rockchip_drm_init(void)1872 static int __init rockchip_drm_init(void)
1873 {
1874     int ret;
1875 
1876     num_rockchip_sub_drivers = 0;
1877     ADD_ROCKCHIP_SUB_DRIVER(vop_platform_driver, CONFIG_DRM_ROCKCHIP);
1878     ADD_ROCKCHIP_SUB_DRIVER(vop2_platform_driver, CONFIG_DRM_ROCKCHIP);
1879     ADD_ROCKCHIP_SUB_DRIVER(vconn_platform_driver, CONFIG_ROCKCHIP_VCONN);
1880     ADD_ROCKCHIP_SUB_DRIVER(rockchip_lvds_driver, CONFIG_ROCKCHIP_LVDS);
1881     ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver, CONFIG_ROCKCHIP_ANALOGIX_DP);
1882     ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
1883     ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver, CONFIG_ROCKCHIP_DW_HDMI);
1884     ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver, CONFIG_ROCKCHIP_DW_MIPI_DSI);
1885     ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi2_rockchip_driver, CONFIG_ROCKCHIP_DW_MIPI_DSI);
1886     ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
1887     ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver, CONFIG_ROCKCHIP_RK3066_HDMI);
1888     ADD_ROCKCHIP_SUB_DRIVER(rockchip_rgb_driver, CONFIG_ROCKCHIP_RGB);
1889     ADD_ROCKCHIP_SUB_DRIVER(dw_dp_driver, CONFIG_ROCKCHIP_DW_DP);
1890 
1891     ret = platform_register_drivers(rockchip_sub_drivers, num_rockchip_sub_drivers);
1892     if (ret) {
1893         return ret;
1894     }
1895 
1896     ret = platform_driver_register(&rockchip_drm_platform_driver);
1897     if (ret) {
1898         goto err_unreg_drivers;
1899     }
1900 
1901     rockchip_gem_get_ddr_info();
1902 
1903     return 0;
1904 
1905 err_unreg_drivers:
1906     platform_unregister_drivers(rockchip_sub_drivers, num_rockchip_sub_drivers);
1907     return ret;
1908 }
1909 
rockchip_drm_fini(void)1910 static void __exit rockchip_drm_fini(void)
1911 {
1912     platform_driver_unregister(&rockchip_drm_platform_driver);
1913 
1914     platform_unregister_drivers(rockchip_sub_drivers, num_rockchip_sub_drivers);
1915 }
1916 
1917 module_init(rockchip_drm_init);
1918 module_exit(rockchip_drm_fini);
1919 
1920 MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
1921 MODULE_DESCRIPTION("ROCKCHIP DRM Driver");
1922 MODULE_LICENSE("GPL v2");
1923