• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18 
19 #include "hifb_init.h"
20 #include <linux/poll.h>
21 #include <linux/fb.h>
22 #include <linux/interrupt.h>
23 #include <linux/seq_file.h>
24 #include "securec.h"
25 #include <linux/of_platform.h>
26 #include <linux/memblock.h>
27 #include <linux/highmem.h>
28 #include <linux/version.h>
29 
30 bool g_temp_update_rotate_rect = HI_FALSE;
31 char g_tmp_video[64] = "hifb:vram0_size:8100"; /* 64 The length of the array */
32 
33 module_param(g_temp_update_rotate_rect, bool, S_IRUGO);
34 module_param_string(video, g_tmp_video, 64, 0); /* 64 The length of the array */
35 
36 char *apsz_layer_mmz_names[HIFB_MAX_LAYER_NUM] = { [0 ... HIFB_MAX_LAYER_NUM - 1] = HI_NULL };
37 
38 module_param_array(apsz_layer_mmz_names, charp, HI_NULL, S_IRUGO);
39 MODULE_PARM_DESC(apsz_layer_mmz_names, "The mmz names for the graphics layers.");
40 
hifb_get_layer_mmz_names(hi_u32 layer_id)41 char *hifb_get_layer_mmz_names(hi_u32 layer_id)
42 {
43     if (layer_id >= HIFB_MAX_LAYER_NUM) {
44         return HI_NULL;
45     }
46     return apsz_layer_mmz_names[layer_id];
47 }
48 
49 #define VSYNC_LEN 64
50 static unsigned int g_vsync_major = 288;
51 static unsigned int g_vsync_minor = 0;
52 static dev_t g_vsync_devno;
53 static struct class *g_vsync_cls;
54 static struct device *g_vsync_device;
55 
56 static unsigned int g_pts_major = 289;
57 static unsigned int g_pts_minor = 0;
58 static dev_t g_pts_devno;
59 static struct class *g_pts_cls;
60 static struct device *g_pts_device;
61 
62 static char g_vdata[VSYNC_LEN] = {0};
63 static char g_nowdata[VSYNC_LEN] = {0};
64 
65 static spinlock_t g_vsync_lock;
66 static wait_queue_head_t g_wait;
67 
68 static unsigned long long int g_timestamp;
69 static bool g_bnewdata = 0;
70 
vsync_open(struct inode * inode,struct file * file)71 static int vsync_open(struct inode *inode, struct file *file)
72 {
73     hi_unused(inode);
74     hi_unused(file);
75     return 0;
76 }
77 
vsync_release(struct inode * inode,struct file * file)78 static int vsync_release(struct inode *inode, struct file *file)
79 {
80     hi_unused(inode);
81     hi_unused(file);
82     return 0;
83 }
84 
vsync_read(struct file * file,char __user * buf,size_t count,loff_t * offset)85 static int vsync_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
86 {
87     unsigned int len;
88     unsigned long syncflags = 0;
89     hi_unused(file);
90     hi_unused(offset);
91     spin_lock_irqsave(&g_vsync_lock, syncflags);
92     (hi_void)memset_s(g_vdata, VSYNC_LEN, 0, VSYNC_LEN);
93     if (snprintf_s(g_vdata, VSYNC_LEN, VSYNC_LEN - 1, "%llu\n", g_timestamp) < 0) {
94         osal_printk("%s:%d:snprintf_s failure\n", __FUNCTION__, __LINE__);
95         spin_unlock_irqrestore(&g_vsync_lock, syncflags);
96         return HI_FAILURE;
97     }
98     spin_unlock_irqrestore(&g_vsync_lock, syncflags);
99 
100     len = strnlen(g_vdata, (VSYNC_LEN - 1)) + 1;
101     if ((buf == HI_NULL) || (count < len)) {
102         return 0;
103     }
104 
105     if (osal_copy_to_user(buf, g_vdata, len)) {
106         osal_printk("copy to user err\n");
107         len = 0;
108     }
109 
110     spin_lock_irqsave(&g_vsync_lock, syncflags);
111     g_bnewdata = 0;
112     spin_unlock_irqrestore(&g_vsync_lock, syncflags);
113 
114     return len;
115 }
116 
vsync_poll(struct file * file,struct poll_table_struct * table)117 static unsigned int vsync_poll(struct file *file, struct poll_table_struct *table)
118 {
119     unsigned int mask = 0;
120     unsigned long syncflags = 0;
121 
122     poll_wait(file, &g_wait, table);
123 
124     spin_lock_irqsave(&g_vsync_lock, syncflags);
125     if (g_bnewdata) {
126         mask |= (POLLIN | POLLRDNORM);
127     }
128     spin_unlock_irqrestore(&g_vsync_lock, syncflags);
129 
130     return mask;
131 }
132 
133 static struct file_operations g_vsync_ops = {
134     .owner = THIS_MODULE,
135     .open = vsync_open,
136     .read = (hi_void *)vsync_read,
137     .release = vsync_release,
138     .poll = vsync_poll,
139 };
140 
get_cur_pts(void)141 inline static unsigned long long int get_cur_pts(void)
142 {
143     unsigned long long int time_now;
144 
145     time_now = osal_sched_clock();
146     do_div(time_now, 1000); /* 1000 alg data */
147 
148     return time_now;
149 }
150 
hifb_vsync_notify(void)151 int hifb_vsync_notify(void)
152 {
153     unsigned long syncflags = 0;
154 
155     spin_lock_irqsave(&g_vsync_lock, syncflags);
156     g_timestamp = get_cur_pts();
157     g_bnewdata = 1;
158     spin_unlock_irqrestore(&g_vsync_lock, syncflags);
159 
160     wake_up_interruptible(&g_wait);
161 
162     return 0;
163 }
164 
hifb_vsync_init(void)165 int hifb_vsync_init(void)
166 {
167     int ret;
168 
169     g_vsync_devno = MKDEV(g_vsync_major, g_vsync_minor);
170     ret = register_chrdev(g_vsync_major, "vsync", &g_vsync_ops);
171     if (ret < 0) {
172         osal_printk("Unable to register characterdevice!\n");
173         return ret;
174     }
175     g_vsync_cls = class_create(THIS_MODULE, "vsync");
176     if (IS_ERR(g_vsync_cls)) {
177         unregister_chrdev(g_vsync_major, "vsync");
178         return -EBUSY;
179     }
180 
181     g_vsync_device = device_create(g_vsync_cls, HI_NULL, g_vsync_devno, HI_NULL, "vsync"); /* mknod /dev/vsync */
182     if (IS_ERR(g_vsync_device)) {
183         class_destroy(g_vsync_cls);
184         unregister_chrdev(g_vsync_major, "vsync");
185         return -EBUSY;
186     }
187 
188     init_waitqueue_head(&g_wait);
189     spin_lock_init(&g_vsync_lock);
190 
191     return 0;
192 }
193 
hifb_vsync_exit(void)194 void hifb_vsync_exit(void)
195 {
196     device_destroy(g_vsync_cls, g_vsync_devno);
197     class_destroy(g_vsync_cls);
198     unregister_chrdev(g_vsync_major, "vsync");
199 }
200 
pts_read(struct file * file,char __user * buf,size_t count,loff_t * offset)201 static int pts_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
202 {
203     unsigned long long int time_now;
204     unsigned int len;
205     time_now = get_cur_pts();
206     hi_unused(file);
207     hi_unused(offset);
208 
209     (hi_void)memset_s(g_nowdata, sizeof(g_nowdata), 0, VSYNC_LEN);
210     if (snprintf_s(g_nowdata, sizeof(g_nowdata), VSYNC_LEN - 1, "%llu\n", time_now) < 0) {
211         osal_printk("%s:%d:snprintf_s failure\n", __FUNCTION__, __LINE__);
212         return HI_FAILURE;
213     }
214     len = strnlen(g_nowdata, (VSYNC_LEN - 1)) + 1;
215     if ((buf == HI_NULL) || (count < len)) {
216         return 0;
217     }
218 
219     if (osal_copy_to_user(buf, g_nowdata, len)) {
220         osal_printk("copy to user err\n");
221         len = 0;
222     }
223 
224     return len;
225 }
226 
227 static struct file_operations g_pts_ops = {
228     .owner = THIS_MODULE,
229     .read = (hi_void *)pts_read,
230 };
231 
hifb_pts_init(void)232 int hifb_pts_init(void)
233 {
234     int ret;
235 
236     g_pts_devno = MKDEV(g_pts_major, g_pts_minor);
237     ret = register_chrdev(g_pts_major, "vsync_pts", &g_pts_ops);
238     if (ret < 0) {
239         osal_printk("Unable to register characterdevice!\n");
240         return ret;
241     }
242 
243     g_pts_cls = class_create(THIS_MODULE, "vsync_pts");
244     if (IS_ERR(g_pts_cls)) {
245         unregister_chrdev(g_pts_major, "vsync_pts");
246         return -EBUSY;
247     }
248 
249     g_pts_device = device_create(g_pts_cls, HI_NULL, g_pts_devno, HI_NULL, "vsync_pts"); /* mknod /dev/vsync_pts */
250     if (IS_ERR(g_pts_device)) {
251         class_destroy(g_pts_cls);
252         unregister_chrdev(g_pts_major, "vsync_pts");
253         return -EBUSY;
254     }
255 
256     return 0;
257 }
258 
hifb_pts_exit(void)259 void hifb_pts_exit(void)
260 {
261     device_destroy(g_pts_cls, g_pts_devno);
262     class_destroy(g_pts_cls);
263     unregister_chrdev(g_pts_major, "vsync_pts");
264 }
265 
266 #ifdef CONFIG_HI_VO_FB_SEPARATE
267 #define HIFB_INT_NAME_LENGTH 10
268 
hi35xx_hifb_probe(struct platform_device * pdev)269 static int hi35xx_hifb_probe(struct platform_device *pdev)
270 {
271     hi_char hifb_int_name[HIFB_INT_NAME_LENGTH] = "hifb";
272     int temp_hifb_irq;
273 #ifdef CONFIG_HIFB_SOFT_IRQ_SUPPORT
274     unsigned int temp_hifb_soft_irq;
275 #endif
276     set_update_rotate_rect(g_temp_update_rotate_rect);
277     osal_platform_get_modparam_string(pdev, "video", 64, g_tmp_video); /* 64 video length */
278     set_video_name(g_tmp_video);
279     temp_hifb_irq = osal_platform_get_irq_byname(pdev, hifb_int_name);
280     if (temp_hifb_irq <= 0) {
281         dev_err(&pdev->dev, "cannot find hifb IRQ\n");
282         return HI_FAILURE;
283     }
284 
285 #ifdef CONFIG_HIFB_SOFT_IRQ_SUPPORT
286     temp_hifb_soft_irq = osal_platform_get_irq_byname(pdev, "hifb_soft");
287     if (temp_hifb_soft_irq <= 0) {
288         return HI_FAILURE;
289     }
290     set_hifb_soft_irq(temp_hifb_soft_irq);
291 #endif
292     set_hifb_irq((unsigned int)temp_hifb_irq);
293     if (hifb_init() != HI_SUCCESS) {
294         osal_printk("hifb_init HI_FAILURE!\n");
295     }
296 
297     return 0;
298 }
299 
hi35xx_hifb_remove(struct platform_device * pdev)300 static int hi35xx_hifb_remove(struct platform_device *pdev)
301 {
302     hi_unused(pdev);
303     hifb_cleanup();
304     return 0;
305 }
306 
307 static const struct of_device_id g_hi35xx_hifb_match[] = {
308     {.compatible = "hisilicon,hisi-hifb"},
309     {},
310 };
311 
312 MODULE_DEVICE_TABLE(of, g_hi35xx_hifb_match);
313 
314 static struct platform_driver g_hi35xx_hifb_driver = {
315     .probe = hi35xx_hifb_probe,
316     .remove = hi35xx_hifb_remove,
317     .driver =
318         {
319             .name = "hi35xx_hifb",
320             .of_match_table = g_hi35xx_hifb_match,
321         },
322 };
323 
324 osal_module_platform_driver(g_hi35xx_hifb_driver);
325 
326 MODULE_LICENSE("GPL");
327 #endif
328 
329 /****************************** dmabuf start **************************/
330 typedef struct {
331     phys_addr_t mem_base;
332 } hifb_mem_block_pdata;
333 
hifb_memblock_map(struct dma_buf_attachment * buf_attach,enum dma_data_direction data_direction)334 static struct sg_table *hifb_memblock_map(struct dma_buf_attachment *buf_attach,
335                                           enum dma_data_direction data_direction)
336 {
337     hi_s32 i = 0;
338     hi_s32 ret;
339     hi_ulong page_pfn;
340     hifb_mem_block_pdata *mem_block_data = HI_NULL;
341     struct page *fb_page = HI_NULL;
342     struct sg_table *fb_table = HI_NULL;
343     struct scatterlist *sg_list = HI_NULL;
344 
345     hi_unused(data_direction);
346 
347     if ((buf_attach == HI_NULL) || (buf_attach->dmabuf == HI_NULL) || (buf_attach->dmabuf->priv == HI_NULL)) {
348         return HI_NULL;
349     }
350 
351     mem_block_data = buf_attach->dmabuf->priv;
352     if (mem_block_data == HI_NULL) {
353         return HI_NULL;
354     }
355 
356     page_pfn = PFN_DOWN(mem_block_data->mem_base);
357     fb_page = pfn_to_page(page_pfn);
358     if (fb_page == HI_NULL) {
359         return HI_NULL;
360     }
361 
362     fb_table = kzalloc(sizeof(*fb_table), GFP_KERNEL);
363     if (fb_table == HI_NULL) {
364         return ERR_PTR(-ENOMEM);
365     }
366 
367     ret = sg_alloc_table(fb_table, 1, GFP_KERNEL);
368     if (ret < 0) {
369         kfree(fb_table);
370         fb_table = HI_NULL;
371         return ERR_PTR(ret);
372     }
373 
374     sg_set_page(fb_table->sgl, fb_page, buf_attach->dmabuf->size, 0);
375     for_each_sg(fb_table->sgl, sg_list, fb_table->nents, i) {
376         if (sg_list != HI_NULL) {
377             sg_dma_address(sg_list) = sg_phys(sg_list);
378         }
379     }
380     return fb_table;
381 }
382 
hifb_memblock_unmap(struct dma_buf_attachment * buf_attach,struct sg_table * fb_table,enum dma_data_direction data_direction)383 static hi_void hifb_memblock_unmap(struct dma_buf_attachment *buf_attach, struct sg_table *fb_table,
384                                    enum dma_data_direction data_direction)
385 {
386     hi_unused(buf_attach);
387     hi_unused(data_direction);
388     if (fb_table != HI_NULL) {
389         sg_free_table(fb_table);
390     }
391 }
392 
hifb_memblock_release(struct dma_buf * fb_dma_buf)393 static hi_void __init_memblock hifb_memblock_release(struct dma_buf *fb_dma_buf)
394 {
395     hi_unused(fb_dma_buf);
396 }
397 
hifb_memblock_do_kmap(struct dma_buf * fb_dma_buf,hi_ulong pgoffset,bool atomic)398 static hi_void *hifb_memblock_do_kmap(struct dma_buf *fb_dma_buf, hi_ulong pgoffset, bool atomic)
399 {
400     hi_ulong page_pfn;
401     hifb_mem_block_pdata *mem_block_data = HI_NULL;
402     struct page *fb_page = HI_NULL;
403 
404     if ((fb_dma_buf == HI_NULL) || (fb_dma_buf->priv == HI_NULL)) {
405         return HI_NULL;
406     }
407 
408     mem_block_data = fb_dma_buf->priv;
409     if (mem_block_data == HI_NULL) {
410         return HI_NULL;
411     }
412 
413     page_pfn = PFN_DOWN(mem_block_data->mem_base) + pgoffset;
414     fb_page = pfn_to_page(page_pfn);
415     if (fb_page == HI_NULL) {
416         return HI_NULL;
417     }
418 
419     if (atomic) {
420         return kmap_atomic(fb_page);
421     } else {
422         return kmap(fb_page);
423     }
424 }
425 
hifb_memblock_kmap(struct dma_buf * fb_dma_buf,hi_ulong pgoffset)426 static hi_void *hifb_memblock_kmap(struct dma_buf *fb_dma_buf, hi_ulong pgoffset)
427 {
428     return hifb_memblock_do_kmap(fb_dma_buf, pgoffset, false);
429 }
430 
hifb_memblock_kunmap(struct dma_buf * fb_dma_buf,hi_ulong pgoffset,hi_void * vir_addr)431 static hi_void hifb_memblock_kunmap(struct dma_buf *fb_dma_buf, hi_ulong pgoffset, hi_void *vir_addr)
432 {
433     hi_unused(fb_dma_buf);
434     hi_unused(pgoffset);
435     if (vir_addr != HI_NULL) {
436         kunmap(vir_addr);
437     }
438 }
439 
hifb_valid_mmap_phys_addr_range(unsigned long pfn,size_t size)440 static inline int hifb_valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
441 {
442     return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
443 }
444 
hifb_memblock_mmap(struct dma_buf * fb_dma_buf,struct vm_area_struct * vma_area)445 static hi_s32 hifb_memblock_mmap(struct dma_buf *fb_dma_buf, struct vm_area_struct *vma_area)
446 {
447     hi_ulong size;
448     hifb_mem_block_pdata *mem_block_data = HI_NULL;
449 
450     if ((fb_dma_buf == HI_NULL) || (vma_area == HI_NULL)) {
451         return -1;
452     }
453 
454     mem_block_data = fb_dma_buf->priv;
455     if (mem_block_data == HI_NULL) {
456         return -1;
457     }
458 
459     if (mem_block_data->mem_base == 0) {
460         return -1;
461     }
462 
463     size = vma_area->vm_end - vma_area->vm_start;
464     if (size == 0) {
465         return -1;
466     }
467 
468     if (!hifb_valid_mmap_phys_addr_range(vma_area->vm_pgoff, size)) {
469         return -1;
470     }
471     vma_area->vm_page_prot = pgprot_writecombine(vma_area->vm_page_prot);
472 
473     return remap_pfn_range(vma_area, vma_area->vm_start, PFN_DOWN(mem_block_data->mem_base), size,
474                            vma_area->vm_page_prot);
475 }
476 
477 struct dma_buf_ops g_hifb_mem_block_ops = {
478     .map_dma_buf = hifb_memblock_map,
479     .unmap_dma_buf = hifb_memblock_unmap,
480     .release = hifb_memblock_release,
481 #if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
482     .map = hifb_memblock_kmap,
483     .unmap = hifb_memblock_kunmap,
484 #endif
485     .mmap = hifb_memblock_mmap,
486 };
487 
488 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
hifb_memblock_export(phys_addr_t mem_base,size_t mem_size,hi_s32 flags)489 struct dma_buf *hifb_memblock_export(phys_addr_t mem_base, size_t mem_size, hi_s32 flags)
490 {
491     struct hifb_mem_block_pdata *mem_block_data = HI_NULL;
492     struct dma_buf *fb_dma_buf = HI_NULL;
493 
494     if ((mem_base != PAGE_ALIGN(mem_base)) || (mem_size != PAGE_ALIGN(mem_size))) {
495         return ERR_PTR(-EINVAL);
496     }
497     mem_block_data = kzalloc(sizeof(*mem_block_data), GFP_KERNEL);
498     if (mem_block_data == HI_NULL) {
499         return ERR_PTR(-ENOMEM);
500     }
501 
502     mem_block_data->mem_base = mem_base;
503     fb_dma_buf = dma_buf_export(mem_block_data, &g_hifb_mem_block_ops, mem_size, flags, HI_NULL);
504     if (IS_ERR(fb_dma_buf)) {
505         kfree(mem_block_data);
506         mem_block_data = HI_NULL;
507     }
508     return fb_dma_buf;
509 }
510 #else
hifb_memblock_export(phys_addr_t mem_base,size_t mem_size,hi_s32 flags)511 struct dma_buf *hifb_memblock_export(phys_addr_t mem_base, size_t mem_size, hi_s32 flags)
512 {
513     DEFINE_DMA_BUF_EXPORT_INFO(export_info);
514     struct dma_buf *fb_dma_buf = HI_NULL;
515 
516     if ((mem_base != PAGE_ALIGN(mem_base)) || (mem_size != PAGE_ALIGN(mem_size))) {
517         return ERR_PTR(-EINVAL);
518     }
519 
520     export_info.ops = &g_hifb_mem_block_ops;
521     export_info.size = mem_size;
522     export_info.flags = flags;
523     export_info.resv = HI_NULL;
524     export_info.priv = (hi_void *)mem_base;
525 
526     fb_dma_buf = dma_buf_export(&export_info);
527 
528     return fb_dma_buf;
529 }
530 #endif
531 
532 /****************************** dmabuf end **************************/
533 
534