1 /*
2 * Allwinner SoCs display driver.
3 *
4 * Copyright (C) 2018 Allwinner.
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11 #include <linux/version.h>
12 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
13 #include <linux/fence.h>
14 #else
15 #include <linux/dma-fence.h>
16 #define fence dma_fence
17 #define fence_ops dma_fence_ops
18 #define fence_init dma_fence_init
19 #define fence_put dma_fence_put
20 #define fence_signal_locked dma_fence_signal_locked
21 #define fence_default_wait dma_fence_default_wait
22 #define fence_context_alloc dma_fence_context_alloc
23 #define FENCE_FLAG_ENABLE_SIGNAL_BIT DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
24 #endif
25 #include <linux/sync_file.h>
26 #include <linux/file.h>
27 #include "dev_disp.h"
28 #include <video/sunxi_display2.h>
29
30 enum {
31 HWC_NEW_CLIENT = 1,
32 HWC_DESTROY_CLIENT,
33 HWC_ACQUIRE_FENCE,
34 HWC_SUBMIT_FENCE,
35 };
36
37 struct sync_info {
38 int fd;
39 unsigned int count;
40 };
41
42 struct hwc_fence {
43 struct list_head node;
44 struct fence base;
45 };
46
47 struct display_sync {
48 unsigned timeline_count;
49 unsigned submmit_count;
50 unsigned current_count;
51 unsigned skip_count;
52 unsigned free_count;
53 u64 context;
54 spinlock_t fence_lock;
55 struct list_head fence_list;
56 char name[7];
57 bool active_disp;
58 };
59
60 struct composer_private_data {
61 struct disp_drv_info *dispopt;
62 bool b_no_output;
63 struct display_sync display_sync[DISP_SCREEN_NUM];
64 };
65
66 static struct composer_private_data composer_priv;
67
get_display_sync(struct fence * fence)68 static inline struct display_sync *get_display_sync(struct fence *fence)
69 {
70 return container_of(fence->lock, struct display_sync, fence_lock);
71 }
72
get_hwc_fence(struct fence * fence)73 static inline struct hwc_fence *get_hwc_fence(struct fence *fence)
74 {
75 return container_of(fence, struct hwc_fence, base);
76 }
77
hwc_timeline_get_driver_name(struct fence * fence)78 static const char *hwc_timeline_get_driver_name (struct fence *fence)
79 {
80 return "sunxi_hwc";
81 }
82
hwc_timeline_get_timeline_name(struct fence * fence)83 static const char *hwc_timeline_get_timeline_name(struct fence *fence)
84 {
85 struct display_sync *parent;
86 parent = get_display_sync(fence);
87 return parent->name;
88 }
89
hwc_timeline_enable_signaling(struct fence * fence)90 static bool hwc_timeline_enable_signaling(struct fence *fence)
91 {
92 return true;
93 }
94
hwc_timeline_fence_signaled(struct fence * fence)95 static bool hwc_timeline_fence_signaled(struct fence *fence)
96 {
97 struct hwc_fence *hwc_fence;
98 struct display_sync *display_sync;
99
100 hwc_fence = get_hwc_fence(fence);
101 display_sync = get_display_sync(fence);
102 return hwc_fence->base.seqno - display_sync->current_count > INT_MAX;
103 }
104
hwc_timeline_fence_release(struct fence * fence)105 static void hwc_timeline_fence_release(struct fence *fence)
106 {
107 struct hwc_fence *hwc_fence;
108 struct display_sync *display_sync;
109
110 display_sync = get_display_sync(fence);
111 hwc_fence = get_hwc_fence(fence);
112 if (fence->seqno - display_sync->current_count < INT_MAX
113 && !list_empty(&hwc_fence->node)) {
114 printk(KERN_ERR "Other user put the fence:%llu,check it\n", fence->seqno);
115 return;
116 }
117 kfree(hwc_fence);
118 display_sync->free_count++;
119 }
120
121 static const struct fence_ops hwc_timeline_fence_ops = {
122 .get_driver_name = hwc_timeline_get_driver_name,
123 .get_timeline_name = hwc_timeline_get_timeline_name,
124 .enable_signaling = hwc_timeline_enable_signaling,
125 .signaled = hwc_timeline_fence_signaled,
126 .wait = fence_default_wait,
127 .release = hwc_timeline_fence_release,
128 };
129
disp_composer_proc(u32 sel)130 static void disp_composer_proc(u32 sel)
131 {
132 struct hwc_fence *fence, *next;
133 unsigned long flags;
134 struct display_sync *display_sync = &composer_priv.display_sync[sel];
135 bool all_relaease;
136
137 if (sel >= DISP_SCREEN_NUM)
138 return;
139 all_relaease = composer_priv.b_no_output || !display_sync->active_disp;
140 if (display_sync->current_count + 1 < display_sync->submmit_count)
141 display_sync->skip_count +=
142 display_sync->submmit_count - 1 - display_sync->current_count;
143
144 display_sync->current_count = display_sync->submmit_count;
145 spin_lock_irqsave(&display_sync->fence_lock, flags);
146 list_for_each_entry_safe(fence, next, &display_sync->fence_list,
147 node) {
148 if (fence->base.seqno - display_sync->current_count > INT_MAX
149 || all_relaease) {
150 list_del_init(&fence->node);
151 fence_signal_locked(&fence->base);
152 fence_put(&fence->base);
153 } else {
154 break;
155 }
156 }
157 spin_unlock_irqrestore(&display_sync->fence_lock, flags);
158 }
159
hwc_aquire_fence(int disp,void * user_fence)160 static int hwc_aquire_fence(int disp, void *user_fence)
161 {
162 struct display_sync *dispsync = NULL;
163 struct hwc_fence *fence;
164 struct sync_file *sync_file;
165 unsigned long flags;
166 struct sync_info sync;
167
168 dispsync = &composer_priv.display_sync[disp];
169
170 if (!dispsync->active_disp)
171 return -ENODEV;
172
173 fence = kzalloc(sizeof(struct hwc_fence), GFP_KERNEL);
174 if (fence == NULL) {
175 printk(KERN_ERR "kzlloc display pt fail\n");
176 goto err_quire;
177 }
178
179 fence_init(&fence->base, &hwc_timeline_fence_ops, &dispsync->fence_lock,
180 dispsync->context, ++dispsync->timeline_count);
181 sync_file = sync_file_create(&fence->base);
182 if (!sync_file) {
183 kfree(fence);
184 goto err_quire;
185 }
186
187 sync.fd = get_unused_fd_flags(O_CLOEXEC);
188 if (sync.fd < 0) {
189 printk(KERN_ERR "get unused fd fail\n");
190 kfree(fence);
191 goto err_quire;
192 }
193 fd_install(sync.fd, sync_file->file);
194 sync.count = fence->base.seqno;
195 if (copy_to_user((void __user *)user_fence, &sync, sizeof(sync)))
196 printk(KERN_ERR "copy_to_user fail.\n");
197
198 spin_lock_irqsave(&dispsync->fence_lock, flags);
199 list_add_tail(&fence->node, &dispsync->fence_list);
200 spin_unlock_irqrestore(&dispsync->fence_lock, flags);
201
202 set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->base.flags);
203
204 return 0;
205
206 err_quire:
207 return -ENXIO;
208 }
209
hwc_submit(int disp,unsigned int sbcount)210 static inline int hwc_submit(int disp, unsigned int sbcount)
211 {
212 composer_priv.display_sync[disp].submmit_count = sbcount;
213 if (composer_priv.b_no_output)
214 disp_composer_proc(disp);
215 return 0;
216 }
217
get_de_clk_rate(unsigned int disp,int * usr)218 static int get_de_clk_rate(unsigned int disp, int *usr)
219 {
220 struct disp_manager *disp_mgr;
221 int rate = 254000000;
222
223 if (DISP_SCREEN_NUM <= disp) {
224 printk("%s: disp=%d\n", __func__, disp);
225 return -1;
226 }
227
228 disp_mgr = composer_priv.dispopt->mgr[disp];
229 if (disp_mgr && disp_mgr->get_clk_rate)
230 rate = disp_mgr->get_clk_rate(disp_mgr);
231 put_user(rate, usr);
232 return 0;
233 }
234
hwc_new_client(int disp,int * user)235 static int hwc_new_client(int disp, int *user)
236 {
237 if (composer_priv.display_sync[disp].active_disp == true)
238 return 0;
239 composer_priv.display_sync[disp].timeline_count = 0;
240 composer_priv.display_sync[disp].submmit_count = 0;
241 composer_priv.display_sync[disp].free_count = 0;
242 composer_priv.display_sync[disp].current_count = 0;
243 composer_priv.display_sync[disp].skip_count = 0;
244 composer_priv.display_sync[disp].active_disp = true;
245 get_de_clk_rate(disp, (int *)user);
246 return 0;
247 }
248
hwc_destroy_client(int disp)249 static int hwc_destroy_client(int disp)
250 {
251 composer_priv.display_sync[disp].active_disp = false;
252 disp_composer_proc(disp);
253 return 0;
254 }
255
hwc_ioctl(unsigned int cmd,unsigned long arg)256 static int hwc_ioctl(unsigned int cmd, unsigned long arg)
257 {
258 int ret = -EFAULT;
259
260 if (cmd == DISP_HWC_COMMIT) {
261 unsigned long *ubuffer;
262 ubuffer = (unsigned long *)arg;
263 switch (ubuffer[1]) {
264 case HWC_NEW_CLIENT:
265 ret = hwc_new_client((int)ubuffer[0], (int *)ubuffer[2]);
266 break;
267 case HWC_DESTROY_CLIENT:
268 ret = hwc_destroy_client((int)ubuffer[0]);
269 break;
270 case HWC_ACQUIRE_FENCE:
271 ret = hwc_aquire_fence(ubuffer[0], (void *)ubuffer[2]);
272 break;
273 case HWC_SUBMIT_FENCE:
274 ret = hwc_submit(ubuffer[0], ubuffer[2]);
275 break;
276 default:
277 pr_warn("hwc give a err iotcl.\n");
278 }
279 }
280 return ret;
281 }
282
hwc_suspend(void)283 static int hwc_suspend(void)
284 {
285 int i;
286
287 composer_priv.b_no_output = 1;
288 for (i = 0; i < DISP_SCREEN_NUM; i++)
289 disp_composer_proc(i);
290 return 0;
291 }
292
hwc_resume(void)293 static int hwc_resume(void)
294 {
295 composer_priv.b_no_output = 0;
296 return 0;
297 }
298
hwc_dump(char * buf)299 int hwc_dump(char *buf)
300 {
301 int i = 0, count = 0;
302
303 for (i = 0; i < DISP_SCREEN_NUM; i++) {
304 if (composer_priv.display_sync[i].active_disp) {
305 count += sprintf(buf + count,
306 "disp[%1d]all:%u, sub:%u, cur:%u, free:%u, skip:%u\n",
307 i,
308 composer_priv.display_sync[i].timeline_count,
309 composer_priv.display_sync[i].submmit_count,
310 composer_priv.display_sync[i].current_count,
311 composer_priv.display_sync[i].free_count,
312 composer_priv.display_sync[i].skip_count);
313 }
314 }
315 return count;
316 }
317
composer_init(struct disp_drv_info * psg_disp_drv)318 s32 composer_init(struct disp_drv_info *psg_disp_drv)
319 {
320 int i;
321
322 disp_register_ioctl_func(DISP_HWC_COMMIT, hwc_ioctl);
323 #if defined(CONFIG_COMPAT)
324 disp_register_compat_ioctl_func(DISP_HWC_COMMIT, hwc_ioctl);
325 #endif
326 disp_register_sync_finish_proc(disp_composer_proc);
327 disp_register_standby_func(hwc_suspend, hwc_resume);
328 composer_priv.dispopt = psg_disp_drv;
329 for (i = 0; i < DISP_SCREEN_NUM; i++) {
330 INIT_LIST_HEAD(&composer_priv.display_sync[i].fence_list);
331 spin_lock_init(&composer_priv.display_sync[i].fence_lock);
332 composer_priv.display_sync[i].context = fence_context_alloc(1);
333 sprintf(composer_priv.display_sync[i].name, "disp_%d", i);
334 composer_priv.display_sync[i].name[6] = 0;
335 }
336 return 0;
337 }
338