• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 HiSilicon (Shanghai) Technologies CO., LIMITED.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18 
19 #include <linux/mman.h>
20 #include <linux/proc_fs.h>
21 #include <linux/delay.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <asm/uaccess.h>
25 #include <asm/cacheflush.h>
26 #include <linux/version.h>
27 #include <linux/slab.h>
28 #include <linux/syscalls.h>
29 #include <linux/file.h>
30 #include <linux/vmalloc.h>
31 #include <linux/list.h>
32 
33 #include "securec.h"
34 #include "drv_media_mem.h"
35 #include "drv_mmz_ioctl.h"
36 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
37 #include "drv_tzsmmu.h"
38 #endif
39 #include "drv_tee_smmu_agent.h"
40 #include "himedia.h"
41 
42 #ifdef CONFIG_COMPAT
43 #include "drv_mmz_compat.h"
44 #endif
45 
46 #if LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)
47 #include <linux/dma-buf.h>
48 #include <linux/dma-direction.h>
49 #endif
50 #include <linux/hisilicon/hisi_iommu.h>
51 
52 #define HI_ZERO 0
53 
54 DEFINE_SEMAPHORE(process_lock);
55 LIST_HEAD(release_list);
56 struct release_info {
57     hil_mmb_t *mmb;
58     struct list_head list;
59 };
60 
mmz_flush_dcache_mmb(const struct mmb_info * mi)61 int mmz_flush_dcache_mmb(const struct mmb_info *mi)
62 {
63     hil_mmb_t *mmb = NULL;
64     struct mmb_udata *udata = NULL;
65     struct mmb_udata *p = NULL;
66     unsigned int phyaddr;
67     unsigned int iommu;
68 
69     if (mi == NULL) {
70         hi_mmz_warn("err args!\n");
71         return -EINVAL;
72     }
73     /*
74      * If pmi->phys_addr is not  MMB_ADDR_INVALID, pmi->phys_addr
75      * is userd first.Because pmi->smmu_addr may be 0 and it is legal at
76      * the same time, but pmi->phys_addr hasn't such feature.So phys_addr
77      * is stricter.
78      */
79     if (mi->phys_addr != MMB_ADDR_INVALID) {
80         phyaddr = mi->phys_addr;
81         iommu = 0;
82     } else {
83         phyaddr = mi->smmu_addr;
84         iommu = 1;
85     }
86 
87     mmb = hil_mmb_getby_phys(phyaddr, iommu);
88 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
89     if (mmb == NULL) {
90         hi_mmz_warn("err args!\n");
91         return -EINVAL;
92     }
93 #else
94     if (mmb == NULL || mmb->handle == NULL) {
95         hi_mmz_warn("err args!\n");
96         return -EINVAL;
97     }
98 #endif
99 
100     spin_lock(&mmb->u_lock);
101     if (list_empty(&mmb->ulist)) {
102         if (iommu) {
103             hi_mmz_warn("mmb smmu:0x%x have not mapped yet!\n", (unsigned int)phyaddr);
104         } else {
105             hi_mmz_warn("mmb phy:0x%x have not mapped yet!\n", (unsigned int)phyaddr);
106         }
107         spin_unlock(&mmb->u_lock);
108         return -EINVAL;
109     }
110 
111     list_for_each_entry(p, &mmb->ulist, list) {
112         if (p->tgid == current->tgid) {
113             udata = p;
114             break;
115         }
116     }
117     spin_unlock(&mmb->u_lock);
118 
119     if (udata == NULL || udata->map_cached == 0 || udata->uvirt == NULL) {
120         hi_mmz_debug("error!\n");
121         return -EINVAL;
122     }
123 
124 #ifndef DMABUF_FLUSH_CACHE
125     flush_inner_cache((void *)udata->uvirt, mmb->length);
126     flush_outer_cache_range(phyaddr, mmb->length, iommu);
127 #else
128     dma_buf_end_cpu_access(mmb->handle, DMA_TO_DEVICE); // clean cache
129     dma_buf_begin_cpu_access(mmb->handle, DMA_FROM_DEVICE); // invalid cache
130 #endif
131 
132     return 0;
133 }
134 
mmz_flush_all_dcache(void)135 int mmz_flush_all_dcache(void)
136 {
137 #ifdef CONFIG_SMP
138     on_each_cpu((smp_call_func_t)mmz_flush_dcache_all, NULL, 1);
139 #else
140     mmz_flush_dcache_all();
141 #endif
142 
143 #ifndef CONFIG_64BIT
144     /* just for A9 core */
145     outer_flush_all();
146 #endif
147     return 0;
148 }
149 
mmz_userdev_open(struct inode * inode,struct file * file)150 static int mmz_userdev_open(struct inode *inode, struct file *file)
151 {
152     struct mmz_userdev_info *pmu = NULL;
153 
154     if (file == NULL) {
155         hi_mmz_error("err args!\n");
156         return -ENOMEM;
157     }
158 
159     pmu = kmalloc(sizeof(*pmu), GFP_KERNEL);
160     if (pmu == NULL) {
161         hi_mmz_error("alloc mmz_userdev_info failed!\n");
162         return -ENOMEM;
163     }
164     pmu->tpid = current->tgid;
165     sema_init(&pmu->sem, 1);
166     pmu->mmap_tpid = 0;
167     file->private_data = (void *)pmu;
168 
169     return HI_SUCCESS;
170 }
171 
ioctl_mmb_alloc(const struct file * file,unsigned int iocmd,struct mmb_info * mi)172 static int ioctl_mmb_alloc(const struct file *file, unsigned int iocmd, struct mmb_info *mi)
173 {
174     hil_mmb_t *mmb = NULL;
175 
176     mmb = hil_mmb_alloc(mi->mmb_name, mi->size, mi->align, mi->mmz_name, HI_USER_ALLOC);
177     if (mmb == NULL) {
178         return -ENOMEM;
179     }
180 
181     if (mmb->iommu) {
182         mi->smmu_addr = mmb->iommu_addr;
183     } else {
184         mi->phys_addr = mmb->phys_addr;
185     }
186 
187     return HI_SUCCESS;
188 }
189 
get_mmb_info(unsigned int addr,unsigned int iommu)190 static hil_mmb_t *get_mmb_info(unsigned int addr, unsigned int iommu)
191 {
192     hil_mmb_t *p = NULL;
193 
194     p = hil_mmb_getby_phys(addr, iommu);
195     if (p == NULL) {
196         hi_mmz_warn("mmb(0x%08X) not found!\n", addr);
197         return NULL;
198     }
199 
200     return p;
201 }
202 
203 static int ioctl_mmb_user_unmap(const struct file *file, unsigned int iocmd, struct mmb_info *mi);
204 
ioctl_mmb_free(const struct file * file,unsigned int iocmd,const struct mmb_info * mi)205 static int ioctl_mmb_free(const struct file *file, unsigned int iocmd, const struct mmb_info *mi)
206 {
207     int ret;
208     unsigned int iommu;
209     unsigned int phyaddr;
210     hil_mmb_t *mmb = NULL;
211 
212     if (mi->phys_addr != MMB_ADDR_INVALID) {
213         phyaddr = mi->phys_addr;
214         iommu = 0;
215     } else {
216         phyaddr = mi->smmu_addr;
217         iommu = 1;
218     }
219 
220     mmb = hil_mmb_getby_phys(phyaddr, iommu);
221     if (mmb == NULL) {
222         hi_mmz_warn("mmb free failed!\n");
223         return -EPERM;
224     }
225 
226     down(&g_mmz_lock);
227     ret = hil_mmb_free(mmb);
228     up(&g_mmz_lock);
229 
230     return ret;
231 }
232 
233 #if LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)
ioctl_mmb_get(const struct file * file,unsigned int iocmd,const struct mmb_info * mi)234 static int ioctl_mmb_get(const struct file *file, unsigned int iocmd, const struct mmb_info *mi)
235 {
236     int ret;
237     unsigned int iommu;
238     unsigned int phyaddr;
239 
240     if (mi->phys_addr != MMB_ADDR_INVALID) {
241         phyaddr = mi->phys_addr;
242         iommu = 0;
243     } else {
244         phyaddr = mi->smmu_addr;
245         iommu = 1;
246     }
247 
248     ret = mmb_get(phyaddr, iommu);
249 
250     return ret;
251 }
252 
ioctl_mmb_put(const struct file * file,unsigned int iocmd,const struct mmb_info * mi)253 static int ioctl_mmb_put(const struct file *file, unsigned int iocmd, const struct mmb_info *mi)
254 {
255     int ret;
256     unsigned int iommu;
257     unsigned int phyaddr;
258 
259     if (mi->phys_addr != MMB_ADDR_INVALID) {
260         phyaddr = mi->phys_addr;
261         iommu = 0;
262     } else {
263         phyaddr = mi->smmu_addr;
264         iommu = 1;
265     }
266 
267     ret = mmb_put(phyaddr, iommu);
268 
269     return ret;
270 }
271 
ioctl_mmb_query_ref(const struct file * file,unsigned int iocmd,struct mmb_info * mi)272 static int ioctl_mmb_query_ref(const struct file *file, unsigned int iocmd, struct mmb_info *mi)
273 {
274     int ret;
275     unsigned int iommu;
276     unsigned int phyaddr;
277     unsigned int ref;
278 
279     if (mi->phys_addr != MMB_ADDR_INVALID) {
280         phyaddr = mi->phys_addr;
281         iommu = 0;
282     } else {
283         phyaddr = mi->smmu_addr;
284         iommu = 1;
285     }
286 
287     ret = mmb_ref_query(phyaddr, iommu, &ref);
288     if (ret == HI_SUCCESS) {
289         mi->ref = ref;
290     }
291 
292     return ret;
293 }
294 
ioctl_mmb_query_source(const struct file * file,unsigned int iocmd,struct mmb_info * mi)295 static int ioctl_mmb_query_source(const struct file *file, unsigned int iocmd, struct mmb_info *mi)
296 {
297     int ret;
298     int source;
299 
300     if (mi->smmu_addr == MMB_ADDR_INVALID) {
301         return HI_FAILURE;
302     }
303 
304     ret = mem_source_query(mi->smmu_addr, &source);
305     if (ret == HI_SUCCESS) {
306         mi->source = source;
307     }
308 
309     return ret;
310 }
311 
dma_buf_export_fd(unsigned int phyaddr,unsigned int iommu)312 int dma_buf_export_fd(unsigned int phyaddr, unsigned int iommu)
313 {
314     hil_mmb_t *mmb = NULL;
315     mmb = hil_mmb_getby_phys(phyaddr, iommu);
316     if (mmb == NULL || mmb->handle == NULL) {
317         hi_mmz_warn("export dma buf fd failed!\n");
318         return -EPERM;
319     }
320 
321     return hi_dma_buf_fd(mmb->handle, O_RDWR | O_CLOEXEC);
322 }
323 
ioctl_dma_buf_export_fd(const struct file * file,unsigned int iocmd,struct mmb_info * mi)324 static int ioctl_dma_buf_export_fd(const struct file *file, unsigned int iocmd, struct mmb_info *mi)
325 {
326     int fd;
327     unsigned int iommu;
328     unsigned int phyaddr;
329 
330     if (mi->phys_addr != MMB_ADDR_INVALID) {
331         phyaddr = mi->phys_addr;
332         iommu = 0;
333     } else {
334         phyaddr = mi->smmu_addr;
335         iommu = 1;
336     }
337 
338     fd = dma_buf_export_fd(phyaddr, iommu);
339     if (fd < 0) {
340         return HI_FAILURE;
341     }
342     mi->fd = fd;
343 
344     return HI_SUCCESS;
345 }
346 #endif
347 
ioctl_mmb_get_pgtable_addr(const struct file * file,unsigned int iocmd,struct mmb_info * mi)348 static int ioctl_mmb_get_pgtable_addr(const struct file *file, unsigned int iocmd, struct mmb_info *mi)
349 {
350     unsigned long pt_addr_long = (unsigned long)MMB_ADDR_INVALID;
351     unsigned long err_rd_addr_long = (unsigned long)MMB_ADDR_INVALID;
352     unsigned long err_wr_addr_long = (unsigned long)MMB_ADDR_INVALID;
353 
354     hisi_get_iommu_ptable_addr(&pt_addr_long, &err_rd_addr_long, &err_wr_addr_long);
355 
356     if (pt_addr_long == (unsigned long)MMB_ADDR_INVALID
357             || err_rd_addr_long == (unsigned long)MMB_ADDR_INVALID
358             || err_wr_addr_long == (unsigned long)MMB_ADDR_INVALID) {
359         return HI_FAILURE;
360     }
361 
362     mi->pt_addr = (unsigned int)pt_addr_long;
363     mi->err_rd_addr = (unsigned int)err_rd_addr_long;
364     mi->err_wr_addr = (unsigned int)err_wr_addr_long;
365 
366     return HI_SUCCESS;
367 }
368 
ioctl_mmb_user_map_cace_attr(hil_mmb_t * mmb,struct mmb_info * mi,int cached,unsigned long * offset)369 static int ioctl_mmb_user_map_cace_attr(hil_mmb_t *mmb, struct mmb_info *mi, int cached, unsigned long *offset)
370 {
371     struct mmb_udata *p = NULL;
372     unsigned int phyaddr;
373     unsigned int iommu;
374     unsigned long offset_tmp;
375     int ret = 1; /* 1, not map cache attr */
376 
377     if (mi->phys_addr != MMB_ADDR_INVALID) {
378         phyaddr = mi->phys_addr;
379         iommu = 0;
380     } else {
381         phyaddr = mi->smmu_addr;
382         iommu = 1;
383     }
384 
385     if (iommu) {
386         offset_tmp = phyaddr - mmb->iommu_addr;
387     } else {
388         offset_tmp = phyaddr - mmb->phys_addr;
389     }
390     *offset = offset_tmp;
391 
392     spin_lock(&mmb->u_lock);
393     if ((mmb->map_ref > 0) && (!list_empty(&mmb->ulist))) {
394         list_for_each_entry(p, &mmb->ulist, list) {
395             if (p->tgid == current->tgid) {
396                 if (p->map_cached == cached) {
397                     mmb->phy_ref++;
398                     mmb->map_ref++;
399                     mi->mapped = (void *)((uintptr_t)p->uvirt + offset_tmp);
400                     p->map_ref++;
401                     ret = 0;  /* 0, map cache attr */
402                     break;
403                 } else {
404                     hi_mmz_warn("mmb<%s> already mapped one cache attr, can not be remap to other attr\n", mmb->name);
405                     ret = -EINVAL;
406                     break;
407                 }
408             }
409         }
410     }
411     spin_unlock(&mmb->u_lock);
412 
413     return ret;
414 }
415 
ioctl_mmb_user_remap_get_virt_addr(struct file * file,struct mmb_udata * udata,hil_mmb_t * mmb,int cached,const struct mmb_info * mi)416 static unsigned long ioctl_mmb_user_remap_get_virt_addr(struct file *file, struct mmb_udata *udata, hil_mmb_t *mmb,
417                                                         int cached, const struct mmb_info *mi)
418 {
419     struct mmz_userdev_info *pmu = file->private_data;
420     unsigned long prot, flags, len, addr;
421 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
422     unsigned long round_up_len;
423 #endif
424 
425     /*
426      * ion_map framework is used here,so rule must fit to ion ramwork.
427      * vma->vm_pgoff is set to 0. pgoff will be passed to vma->vm_pgoff.
428      */
429     const unsigned long pgoff = 0;
430     addr = 0;
431     len = PAGE_ALIGN(mmb->length);
432 
433     prot = mi->prot;
434     if (!prot || !(prot & PROT_READ) || !(prot & PROT_WRITE)) {
435         prot = prot | PROT_READ | PROT_WRITE;
436     }
437     flags = mi->flags;
438     if (flags == 0) {
439         flags = MAP_SHARED;
440     }
441 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
442     down_write(&current->mm->mmap_sem);
443 #else
444     down_write(&current->mm->mmap_lock);
445 #endif
446     udata->tgid = current->tgid;
447     udata->map_cached = cached;
448 
449     /* The following 3 structures are needed in mmap function. */
450     pmu->mmap_tpid = current->tgid;
451     pmu->private_data = udata;
452     pmu->tmp = mmb;
453 
454 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
455     addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
456 #elif LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)
457     addr = do_mmap(file, addr, len, prot, flags, pgoff, &round_up_len, NULL);
458 #elif LINUX_VERSION_CODE >  KERNEL_VERSION(4, 14, 0)
459     addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff, &round_up_len, NULL);
460 #else
461     addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff, &round_up_len);
462 #endif
463 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
464     up_write(&current->mm->mmap_sem);
465 #else
466     up_write(&current->mm->mmap_lock);
467 #endif
468     if (IS_ERR_VALUE((uintptr_t)addr)) {
469         hi_mmz_error("do_mmap_pgoff(file, 0, %lu, 0x%08lX, 0x%08lX, 0x%08lX) return 0x%08lX\n",
470                      len, prot, flags, pgoff, addr);
471         pmu->mmap_tpid = 0;
472         pmu->tmp = NULL;
473         pmu->private_data = NULL;
474         return 0;
475     }
476 
477     /* Following 3 parameters need to clear for next remap operation. */
478     pmu->mmap_tpid = 0;
479     pmu->tmp = NULL;
480     pmu->private_data = NULL;
481 
482     return addr;
483 }
484 
ioctl_mmb_user_remap(struct file * file,unsigned int iocmd,struct mmb_info * mi,int cached)485 static int ioctl_mmb_user_remap(struct file *file, unsigned int iocmd, struct mmb_info *mi, int cached)
486 {
487     hil_mmb_t *mmb = NULL;
488     struct mmb_udata *udata = NULL;
489     unsigned long addr;
490     unsigned int iommu;
491     unsigned int phyaddr;
492     unsigned long offset = 0;
493     int ret;
494 
495     /*
496      * If pmi->phys_addr is not MMB_ADDR_INVALID , pmi->phys_addr is userd first.
497      * Because pmi->smmu_addr may be 0 and it is legal atthe same time,
498      * but pmi->phys_addr hasn't such feature.So phys_addr is stricter.
499      */
500     if (mi->phys_addr != MMB_ADDR_INVALID) {
501         phyaddr = mi->phys_addr;
502         iommu = 0;
503     } else {
504         phyaddr = mi->smmu_addr;
505         iommu = 1;
506     }
507 
508     mmb = get_mmb_info(phyaddr, iommu);
509     if (mmb == NULL) {
510         return -EPERM;
511     }
512 
513     down(&g_mmz_lock);
514     ret = ioctl_mmb_user_map_cace_attr(mmb, mi, cached, &offset);
515     if (ret != 1) {
516         up(&g_mmz_lock);
517         return ret;
518     }
519 
520     udata = kzalloc(sizeof(struct mmb_udata), GFP_KERNEL);
521     if (udata == NULL) {
522         hi_mmz_warn("nomem!\n");
523         up(&g_mmz_lock);
524         return HI_FAILURE;
525     }
526 
527     addr = ioctl_mmb_user_remap_get_virt_addr(file, udata, mmb, cached, mi);
528     if (addr == 0) {
529         kfree(udata);
530         up(&g_mmz_lock);
531         return HI_FAILURE;
532     }
533 
534     udata->uvirt = (void *)(uintptr_t)addr;
535     udata->map_ref++;
536 
537     spin_lock(&mmb->u_lock);
538     list_add_tail(&udata->list, &mmb->ulist);
539     spin_unlock(&mmb->u_lock);
540 
541     mmb->phy_ref++;
542     mmb->map_ref++;
543     mi->mapped = (void *)((uintptr_t)udata->uvirt + offset);
544     up(&g_mmz_lock);
545     return 0;
546 }
547 
ioctl_mmb_user_get_udata(hil_mmb_t * mmb,struct mmb_udata ** udata)548 static int ioctl_mmb_user_get_udata(hil_mmb_t *mmb, struct mmb_udata **udata)
549 {
550     struct mmb_udata *p = NULL;
551 
552     spin_lock(&mmb->u_lock);
553     if (list_empty(&mmb->ulist)) {
554         hi_mmz_warn("[%s] addr(0x%08X) haven't been user-mapped yet!\n",
555                     mmb->name, mmb->iommu ? mmb->iommu_addr : mmb->phys_addr);
556         spin_unlock(&mmb->u_lock);
557         return -EIO;
558     }
559 
560     list_for_each_entry(p, &mmb->ulist, list) {
561         if (current->tgid == p->tgid) {
562             *udata = p;
563             break;
564         }
565     }
566     spin_unlock(&mmb->u_lock);
567 
568     return 0;  /* 0, success */
569 }
570 
ioctl_mmb_user_reference_decrease(hil_mmb_t * mmb,struct mmb_udata * udata)571 static int ioctl_mmb_user_reference_decrease(hil_mmb_t *mmb, struct mmb_udata *udata)
572 {
573     uintptr_t addr;
574     unsigned int len;
575     int ret = 0;
576     int ref;
577 
578     addr = (uintptr_t)udata->uvirt;
579     len = PAGE_ALIGN(mmb->length);
580 
581     ref = udata->map_ref - 1;
582     if (!ref) {
583         /*
584          * we register struct vm_operations_struct when mmap called
585          * so ref count is decreased in vm_operations_struct->close,
586          */
587 #if LINUX_VERSION_CODE <  KERNEL_VERSION(5, 10, 0)
588         down_write(&current->mm->mmap_sem);
589 #else
590         down_write(&current->mm->mmap_lock);
591 #endif
592 #if LINUX_VERSION_CODE <  KERNEL_VERSION(4, 14, 0)
593         ret = do_munmap(current->mm, addr, len);
594 #else
595         ret = do_munmap(current->mm, addr, len, NULL);
596 #endif
597 #if LINUX_VERSION_CODE <  KERNEL_VERSION(5, 10, 0)
598         up_write(&current->mm->mmap_sem);
599 #else
600         up_write(&current->mm->mmap_lock);
601 #endif
602 
603         if (!ret) {
604             /* the memory may be used by system later so clean the L2 cache(L1 cache is guaranteed by do_munmap) */
605             udata->uvirt = NULL;
606         }
607         spin_lock(&mmb->u_lock);
608         udata->map_ref--;
609         mmb->map_ref--;
610         mmb->phy_ref--;
611         list_del(&udata->list);
612         spin_unlock(&mmb->u_lock);
613         kfree(udata);
614         udata = NULL;
615     } else {
616         spin_lock(&mmb->u_lock);
617         udata->map_ref--;
618         mmb->map_ref--;
619         mmb->phy_ref--;
620         spin_unlock(&mmb->u_lock);
621     }
622 
623     return ret;
624 }
625 
ioctl_mmb_user_unmap(const struct file * file,unsigned int iocmd,struct mmb_info * mi)626 static int ioctl_mmb_user_unmap(const struct file *file, unsigned int iocmd, struct mmb_info *mi)
627 {
628     hil_mmb_t *mmb = NULL;
629     struct mmb_udata *udata = NULL;
630     unsigned int iommu;
631     unsigned int phyaddr;
632     int ret;
633 
634     /*
635      * If pmi->phys_addr is not MMB_ADDR_INVALID, pmi->phys_addr is userd first.
636      * Because pmi->smmu_addr may be 0 and it is legal atthe same time,
637      * but pmi->phys_addr hasn't such feature. So phys_addr is stricter.
638      */
639     phyaddr = (mi->phys_addr != MMB_ADDR_INVALID) ? mi->phys_addr : mi->smmu_addr;
640     iommu = (mi->phys_addr != MMB_ADDR_INVALID) ? 0 : 1;
641 
642     mmb = get_mmb_info(phyaddr, iommu);
643     if (mmb == NULL) {
644         return -EPERM;
645     }
646 
647     down(&g_mmz_lock);
648     if (!mmb->map_ref) {
649         hi_mmz_warn("[%s] addr(0x%08X) haven't been mapped yet!\n",
650                     mmb->name, mmb->iommu ? mmb->iommu_addr : mmb->phys_addr);
651         mi->mapped = NULL;
652         up(&g_mmz_lock);
653         return -EIO;
654     }
655 
656     ret = ioctl_mmb_user_get_udata(mmb, &udata);
657     if (ret) {
658         mi->mapped = NULL;
659         up(&g_mmz_lock);
660         return ret;
661     }
662 
663     if (udata == NULL) {
664         /* do not call mmap yourself or unmap is called when process is killed */
665         hi_mmz_warn("do not call mmap() yourself!\n");
666         up(&g_mmz_lock);
667         return 0;
668     }
669 
670     if (udata->map_cached) {
671         up(&g_mmz_lock);
672 #ifndef DMABUF_FLUSH_CACHE
673         mmz_flush_dcache_area((void *)udata->uvirt, (size_t)mmb->length);
674         flush_outer_cache_range(phyaddr, mmb->length, iommu);
675 #else
676         dma_buf_end_cpu_access(mmb->handle, DMA_TO_DEVICE); // clean cache
677         dma_buf_begin_cpu_access(mmb->handle, DMA_FROM_DEVICE); // invalid cache
678 #endif
679         down(&g_mmz_lock);
680     }
681 
682     udata->unmap_flag = (udata->unmap_flag | NORMAL_FLAG);
683 
684     ret = ioctl_mmb_user_reference_decrease(mmb, udata);
685     if ((mmb->phy_ref == 0) && (mmb->map_ref == 0) && (mmb->cma_smmu_ref == 0)) {
686         hil_mmb_free(mmb);
687     }
688 
689     mi->mapped = NULL;
690     up(&g_mmz_lock);
691     return ret;
692 }
693 
694 /* find mmbinfo by use addr */
get_mmbinfo_byusraddr(unsigned long addr,struct mmb_udata * udata)695 static hil_mmb_t *get_mmbinfo_byusraddr(unsigned long addr, struct mmb_udata *udata)
696 {
697     hil_mmb_t *mmb = NULL;
698     struct mmb_udata *p = NULL;
699     hil_mmz_t *zone = NULL;
700     int ret;
701 
702     if (addr == (unsigned long)NULL) {
703         return NULL;
704     }
705 
706     down(&g_mmz_lock);
707     list_for_each_entry(zone, &g_mmz_list, list) {
708         struct rb_node *n;
709         for (n = rb_first(&zone->root); n; n = rb_next(n)) {
710             hil_mmb_t *m = NULL;
711             if (zone->iommu) {
712                 m = rb_entry(n, hil_mmb_t, s_node);
713             } else {
714                 m = rb_entry(n, hil_mmb_t, node);
715             }
716 
717             if ((!m->map_ref) && list_empty(&m->ulist)) {
718                 continue;
719             }
720             spin_lock(&m->u_lock);
721             list_for_each_entry(p, &m->ulist, list) {
722                 if ((p->tgid == current->tgid) && ((uintptr_t)p->uvirt <= addr) &&
723                     ((uintptr_t)p->uvirt + m->length > addr)) {
724                     mmb = m;
725                     ret = memcpy_s(udata, sizeof(struct mmb_udata), p, sizeof(struct mmb_udata));
726                     if (ret != EOK) {
727                         hi_mmz_warn("memcpy failed\n");
728                         mmb = NULL;
729                     }
730                     spin_unlock(&m->u_lock);
731                     goto end;
732                 }
733             }
734             spin_unlock(&m->u_lock);
735         }
736     }
737 end:
738     up(&g_mmz_lock);
739     return mmb;
740 }
741 
ioctl_mmb_user_getphyaddr(const struct file * file,unsigned int iocmd,struct mmb_info * mi)742 static int ioctl_mmb_user_getphyaddr(const struct file *file, unsigned int iocmd, struct mmb_info *mi)
743 {
744     hil_mmb_t *p = NULL;
745     struct mmb_udata udata;
746     int ret;
747 
748     ret = memset_s(&udata, sizeof(udata), 0, sizeof(udata));
749     if (ret != EOK) {
750         hi_mmz_warn("memset failed\n");
751         return -EPERM;
752     }
753 
754     p = get_mmbinfo_byusraddr((uintptr_t)mi->mapped, &udata);
755     if (p == NULL) {
756         return -EPERM;
757     }
758     if (p->iommu_addr != MMB_ADDR_INVALID) {
759         mi->smmu_addr = p->iommu_addr + ((uintptr_t)mi->mapped - (uintptr_t)udata.uvirt);
760     }
761 
762     if (p->phys_addr != MMB_ADDR_INVALID) {
763         mi->phys_addr = p->phys_addr + ((uintptr_t)mi->mapped - (uintptr_t)udata.uvirt);
764     }
765 
766     mi->size = p->length - ((uintptr_t)mi->mapped - (uintptr_t)udata.uvirt);
767     return 0;
768 }
769 
ioctl_mmb_user_cma_mapto_iommu(const struct file * file,unsigned int iocmd,struct mmb_info * mi)770 static int ioctl_mmb_user_cma_mapto_iommu(const struct file *file, unsigned int iocmd, struct mmb_info *mi)
771 {
772     mmb_addr_t addr;
773 
774     addr = hil_mmb_cma_mapto_iommu(mi->phys_addr, 0);
775     if (addr == MMB_ADDR_INVALID) {
776         hi_mmz_warn("Phys:0x%x  cma mapto smmu failed!\n", (unsigned int)mi->phys_addr);
777         return -EPERM;
778     }
779     mi->smmu_addr = addr;
780     return 0;
781 }
782 
ioctl_mmb_user_cma_unmapfrom_iommu(const struct file * file,unsigned int iocmd,const struct mmb_info * mi)783 static int ioctl_mmb_user_cma_unmapfrom_iommu(const struct file *file, unsigned int iocmd, const struct mmb_info *mi)
784 {
785     int ret;
786 
787     ret = hil_mmb_cma_unmapfrom_iommu(mi->smmu_addr, 1);
788     if (ret == HI_FAILURE) {
789         hi_mmz_warn("smmu:0x%x  cma unmapfrom smmu failed!\n", (unsigned int)mi->smmu_addr);
790         return -EPERM;
791     }
792     return HI_SUCCESS;
793 }
794 
mmz_userdev_ioctl_m(struct inode * inode,struct file * file,unsigned int cmd,struct mmb_info * mi)795 int mmz_userdev_ioctl_m(struct inode *inode, struct file *file, unsigned int cmd, struct mmb_info *mi)
796 {
797     int ret = 0;
798     switch (_IOC_NR(cmd)) {
799         case _IOC_NR(IOC_MMB_ALLOC):
800             ret = ioctl_mmb_alloc(file, cmd, mi);
801             break;
802         case _IOC_NR(IOC_MMB_FREE):
803             ret = ioctl_mmb_free(file, cmd, mi);
804             break;
805 #if LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)
806         case _IOC_NR(IOC_MMB_GET):
807             ret = ioctl_mmb_get(file, cmd, mi);
808             break;
809         case _IOC_NR(IOC_MMB_PUT):
810             ret = ioctl_mmb_put(file, cmd, mi);
811             break;
812 #endif
813         case _IOC_NR(IOC_MMB_USER_REMAP):
814             ret = ioctl_mmb_user_remap(file, cmd, mi, 0);
815             break;
816         case _IOC_NR(IOC_MMB_USER_REMAP_CACHED):
817             ret = ioctl_mmb_user_remap(file, cmd, mi, 1);
818             break;
819         case _IOC_NR(IOC_MMB_USER_UNMAP):
820             ret = ioctl_mmb_user_unmap(file, cmd, mi);
821             break;
822         case _IOC_NR(IOC_MMB_USER_GETPHYADDR):
823             ret = ioctl_mmb_user_getphyaddr(file, cmd, mi);
824             break;
825         case _IOC_NR(IOC_MMB_USER_CMA_MAPTO_SMMU):
826             ret = ioctl_mmb_user_cma_mapto_iommu(file, cmd, mi);
827             break;
828         case _IOC_NR(IOC_MMB_USER_CMA_UNMAPTO_SMMU):
829             ret = ioctl_mmb_user_cma_unmapfrom_iommu(file, cmd, mi);
830             break;
831 #if LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)
832         case _IOC_NR(IOC_MMB_USER_QUERY_REF):
833             ret = ioctl_mmb_query_ref(file, cmd, mi);
834             break;
835         case _IOC_NR(IOC_MMB_USER_QUERY_SRC):
836             ret = ioctl_mmb_query_source(file, cmd, mi);
837             break;
838         case _IOC_NR(IOC_DMA_BUF_EXPORT_FD):
839             ret = ioctl_dma_buf_export_fd(file, cmd, mi);
840             break;
841 #endif
842         case _IOC_NR(IOC_SMMU_GET_PGTABLE_ADDR):
843             ret = ioctl_mmb_get_pgtable_addr(file, cmd, mi);
844             break;
845         default:
846             hi_mmz_error("invalid ioctl cmd = %08X\n", cmd);
847             ret = -EINVAL;
848             break;
849     }
850 
851     return ret;
852 }
853 
mmz_userdev_ioctl_get_data(unsigned int cmd,unsigned long arg,void * data,size_t size)854 static long mmz_userdev_ioctl_get_data(unsigned int cmd, unsigned long arg, void *data, size_t size)
855 {
856     int ret;
857 
858     if (size != _IOC_SIZE(cmd) || arg == 0) {
859         hi_mmz_error("_IOC_SIZE(cmd) = %d, arg = 0x%08lX\n", _IOC_SIZE(cmd), arg);
860         return -EINVAL;
861     }
862 
863     ret = memset_s(data, size, 0, size);
864     if (ret != EOK) {
865         hi_mmz_warn("memset failed\n");
866         return -EINVAL;
867     }
868 
869     if (copy_from_user(data, (void *)(uintptr_t)arg, _IOC_SIZE(cmd))) {
870         hi_mmz_error("copy_from_user error.\n");
871         return -EINVAL;
872     }
873 
874     return 0;
875 }
876 
mmz_userdev_ioctl_c(unsigned int cmd,unsigned long arg)877 static int mmz_userdev_ioctl_c(unsigned int cmd, unsigned long arg)
878 {
879     struct mmb_info mmi;
880     int ret;
881 
882     if (arg == 0) {
883         mmz_flush_all_dcache();
884         ret = 0;
885         goto __error_exit;
886     }
887 
888     ret = mmz_userdev_ioctl_get_data(cmd, arg, (void *)&mmi, sizeof(mmi));
889     if (ret) {
890         hi_mmz_error("get data error.\n");
891         goto __error_exit;
892     }
893 
894     switch (_IOC_NR(cmd)) {
895         case _IOC_NR(IOC_MMB_FLUSH_DCACHE): {
896             mmz_flush_dcache_mmb(&mmi);
897             ret = 0;
898             break;
899         }
900         default:
901             ret = -EINVAL;
902             break;
903     }
904 
905 __error_exit:
906     return ret;
907 }
908 
mmz_userdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)909 static long mmz_userdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
910 {
911     long ret = 0;
912     struct mmz_userdev_info *pmu = NULL;
913 
914     if (file == NULL || file->private_data == NULL) {
915         hi_mmz_error("invaled params!\n");
916         return -EINVAL;
917     }
918 
919     pmu = file->private_data;
920     down(&pmu->sem);
921 
922     if (_IOC_TYPE(cmd) == 'm') {
923         struct mmb_info mi = {0};
924 
925         ret = mmz_userdev_ioctl_get_data(cmd, arg, (void *)&mi, sizeof(mi));
926         if (ret) {
927             hi_mmz_error("get data error.\n");
928             goto __error_exit;
929         }
930 
931         ret = mmz_userdev_ioctl_m(file->f_path.dentry->d_inode, file, cmd, &mi);
932         if (!ret && (cmd & IOC_OUT)) {
933             if (copy_to_user((void *)(uintptr_t)arg, &mi, _IOC_SIZE(cmd))) {
934                 hi_mmz_error("mmz_userdev_ioctl: copy_to_user error.\n");
935                 ret = -EFAULT;
936                 goto __error_exit;
937             }
938         }
939     } else if (_IOC_TYPE(cmd) == 'c') {
940         ret = mmz_userdev_ioctl_c(cmd, arg);
941         if (ret) {
942             hi_mmz_error("mmz_userdev_ioctl_c error.\n");
943             goto __error_exit;
944         }
945     } else {
946         ret = -EINVAL;
947     }
948 
949 __error_exit:
950     up(&pmu->sem);
951     return ret;
952 }
953 
mmz_vm_open(struct vm_area_struct * vma)954 static void mmz_vm_open(struct vm_area_struct *vma)
955 {
956     return;
957 }
958 
mmz_vm_close(struct vm_area_struct * vma)959 static void mmz_vm_close(struct vm_area_struct *vma)
960 {
961     return;
962 }
963 
964 static struct vm_operations_struct g_mmz_vma_ops = {
965     .open = mmz_vm_open,
966     .close = mmz_vm_close,
967 };
968 
969 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
mmz_userdev_mmap(struct file * file,struct vm_area_struct * vma)970 static int mmz_userdev_mmap(struct file *file, struct vm_area_struct *vma)
971 {
972     struct mmz_userdev_info *pmu = NULL;
973     hil_mmb_t *mmb = NULL;
974     struct mmb_udata *udata = NULL;
975     int map_fd;
976     struct file *map_file = NULL;
977     unsigned int cached;
978     int ret;
979 
980     if (vma == NULL || file == NULL || file->private_data == NULL) {
981         hi_mmz_error("sys err\n");
982         return -EPERM;
983     }
984 
985     pmu = file->private_data;
986     mmb = pmu->tmp;
987     udata = pmu->private_data;
988 
989     if (mmb == NULL || udata == NULL) {
990         return -EPERM;
991     }
992 
993     if (current->tgid != pmu->mmap_tpid) {
994         hi_mmz_error("do not call mmap() yourself!\n");
995         return -EPERM;
996     }
997 
998     /*
999      * only ION_FLAG_CACHED is meanings mapped cached and build map when page
1000      * is used in Missing page exception.
1001      * ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC means cached mapped and
1002      * build map as soon as the func is called.
1003      */
1004     if (udata->map_cached) {
1005         cached = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
1006     } else {
1007         cached = 0;
1008     }
1009 
1010     if (mmb->client == NULL || mmb->handle == NULL) {
1011         return -EBADF;
1012     }
1013     set_buffer_cached(mmb->client, mmb->handle, cached);
1014     map_fd = ion_share_dma_buf_fd(mmb->client, mmb->handle);
1015     map_file = fget(map_fd);
1016     if (map_file == NULL || map_file->f_op == NULL) {
1017         goto err;
1018     }
1019 
1020     if (!map_file->f_op->mmap) {
1021         goto err;
1022     }
1023 
1024     /*
1025      * Ref count is decrease in vma->vm_ops->closed. And this satisfy:
1026      * 1 we can decrease count of ref before release func is called
1027      * 2 no effect in normal operation process
1028      */
1029     vma->vm_private_data = mmb;
1030     vma->vm_ops = &g_mmz_vma_ops;
1031     mmz_vm_open(vma);
1032 
1033     ret = map_file->f_op->mmap(map_file, vma);
1034     fput(map_file);
1035     sys_close(map_fd);
1036     return ret;
1037 err:
1038     return -EBADF;
1039 }
1040 #else
mmz_userdev_mmap(struct file * file,struct vm_area_struct * vma)1041 int mmz_userdev_mmap(struct file *file, struct vm_area_struct *vma)
1042 {
1043     struct mmz_userdev_info *pmu = NULL;
1044     struct mmb_udata *udata = NULL;
1045     hil_mmb_t *mmb = NULL;
1046     int ret;
1047 
1048     if (vma == NULL || file == NULL || file->private_data == NULL) {
1049         hi_mmz_error("sys err\n");
1050         return -EPERM;
1051     }
1052 
1053     pmu = file->private_data;
1054     mmb = pmu->tmp;
1055     udata = pmu->private_data;
1056 
1057     if (mmb == NULL || mmb->handle == NULL
1058                     || udata == NULL) {
1059         return -EPERM;
1060     }
1061     if (current->tgid != pmu->mmap_tpid) {
1062         hi_mmz_error("do not call mmap() yourself!\n");
1063         return -EPERM;
1064     }
1065 
1066     /*
1067      * Ref count is decrease in vma->vm_ops->closed. And this satisfy:
1068      * 1 we can decrease count of ref before release func is called
1069      * 2 no effect in normal operation process
1070      */
1071     vma->vm_private_data = mmb;
1072     vma->vm_ops = &g_mmz_vma_ops;
1073     if (!udata->map_cached) {
1074         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1075     }
1076 
1077     mmz_vm_open(vma);
1078 
1079     ret = dma_buf_mmap(mmb->handle, vma, vma->vm_pgoff);
1080     if (ret) {
1081         return -EBADF;
1082     }
1083 
1084     return ret;
1085 }
1086 #endif
1087 
1088 #if LINUX_VERSION_CODE <  KERNEL_VERSION(5, 10, 0)
1089 /* check mmb before use this func */
force_secmem_free(const hil_mmb_t * mmb)1090 static int force_secmem_free(const hil_mmb_t *mmb)
1091 {
1092     int ret;
1093     unsigned int base_addr;
1094     while (mmb->sec_smmu_ref) {
1095         if (mmb->sec_flag) {
1096             if (mmb->iommu) {
1097                 base_addr = secmem_free(mmb->sec_smmu, 1);
1098             } else {
1099                 base_addr = secmem_free(mmb->phys_addr, 0);
1100             }
1101             if (base_addr == 0) {
1102                 hi_mmz_warn("secmem_free failed, mmb->iommu = %d! \n", mmb->iommu);
1103             }
1104         } else {
1105             ret = secmem_unmap_from_secsmmu(mmb->sec_smmu, mmb->iommu);
1106             if (ret != 0) {
1107                 hi_mmz_warn("secmem_unmap_from_secsmmu failed! \n");
1108                 return -1;
1109             }
1110         }
1111     }
1112 
1113     return 0;
1114 }
1115 #endif
1116 
force_mmb_free(hil_mmb_t * mmb)1117 static void force_mmb_free(hil_mmb_t *mmb)
1118 {
1119     struct mmb_kdata *p = NULL;
1120     int ref;
1121     int ret;
1122 
1123     if (mmb == NULL) {
1124         hi_mmz_warn("err args ,free mmb failed\n");
1125         return;
1126     }
1127 
1128 #if LINUX_VERSION_CODE <  KERNEL_VERSION(5, 10, 0)
1129     ret = force_secmem_free(mmb);
1130     if (ret) {
1131         hi_mmz_warn("force_secmem_free failed\n");
1132         return;
1133     }
1134 #endif
1135 
1136     if (!mmb->iommu) {
1137         while (mmb->cma_smmu_ref) {
1138             ret = hil_mmb_cma_unmapfrom_iommu(mmb->iommu_addr, !mmb->iommu);
1139             if (ret) {
1140                 hi_mmz_warn("unmap iommu failed!\n");
1141                 return;
1142             }
1143         }
1144     }
1145 
1146     p = mmb->kdata;
1147     if (p != NULL) {
1148         ref = p->kmap_ref;
1149         while (ref) {
1150             p->kmap_ref--;
1151             mmb->map_ref--;
1152             mmb->phy_ref--;
1153             ref = p->kmap_ref;
1154             if (!p->kmap_ref) {
1155                 /* need to check as user space */
1156                 vunmap(p->kvirt);
1157                 kfree(p);
1158                 p = NULL;
1159                 mmb->kdata = NULL;
1160             }
1161         }
1162     }
1163 
1164     down(&g_mmz_lock);
1165     hil_mmb_free(mmb);
1166     up(&g_mmz_lock);
1167 }
1168 
force_mem_free(void)1169 static void force_mem_free(void)
1170 {
1171     struct release_info *p = NULL, *q = NULL;
1172     int ret;
1173 
1174     ret = list_empty(&release_list);
1175     if (ret) {
1176         /* no need to release */
1177         return;
1178     }
1179 
1180     list_for_each_entry_safe(p, q, &release_list, list) {
1181         force_mmb_free(p->mmb);
1182         list_del(&p->list);
1183         kfree(p);
1184     }
1185 }
1186 
add_release_list(hil_mmb_t * m)1187 static int add_release_list(hil_mmb_t *m)
1188 {
1189     int ret = 0;
1190     struct release_info *info = NULL;
1191 
1192     if (m == NULL) {
1193         hi_mmz_warn("sys err\n");
1194         return -1;
1195     }
1196 
1197     info = kmalloc(sizeof(struct release_info), GFP_KERNEL);
1198     if (info == NULL) {
1199         hi_mmz_warn("no mem, release failed \n");
1200         return -1;
1201     }
1202 
1203     ret = memset_s(info, sizeof(struct release_info), 0, sizeof(struct release_info));
1204     if (ret != 0) {
1205         return -1;
1206     }
1207 
1208     info->mmb = m;
1209     list_add_tail(&info->list, &release_list);
1210 
1211     return 0;
1212 }
1213 
mmz_userdev_add_mmb_to_release_list(hil_mmb_t * m,pid_t tpid)1214 static int mmz_userdev_add_mmb_to_release_list(hil_mmb_t *m, pid_t tpid)
1215 {
1216     struct mmb_udata *q = NULL;
1217     struct mmb_udata *p = NULL;
1218     int ret;
1219 
1220     if ((m->flag != HI_KERNEL_ALLOC) && (m->owner_id == tpid)) {
1221         if (add_release_list(m)) {
1222             return -1;
1223         }
1224     }
1225 
1226     spin_lock(&m->u_lock);
1227     ret = list_empty(&m->ulist);
1228     if (!ret) {
1229         list_for_each_entry_safe(q, p, &m->ulist, list) {
1230             if ((m->flag == HI_KERNEL_ALLOC) && (q->tgid == tpid)) {
1231                 while (q->map_ref) {
1232                     q->map_ref--;
1233                     m->map_ref--;
1234                     m->phy_ref--;
1235                 }
1236                 list_del(&q->list);
1237                 kfree(q);
1238                 q = NULL;
1239                 if (m->phy_ref == 0) {
1240                     m->phy_ref = 1;
1241                     spin_unlock(&m->u_lock);
1242                     add_release_list(m);
1243                     spin_lock(&m->u_lock);
1244                 }
1245                 continue;
1246             }
1247             /* user-mod alloc, then user-mod use */
1248             if ((((m->flag != HI_KERNEL_ALLOC)) && (m->owner_id == tpid)) || ((q->tgid == tpid))) {
1249                 while (q->map_ref) {
1250                     q->map_ref--;
1251                     m->map_ref--;
1252                     m->phy_ref--;
1253                 }
1254                 list_del(&q->list);
1255                 kfree(q);
1256                 q = NULL;
1257             }
1258         }
1259     }
1260     spin_unlock(&m->u_lock);
1261 
1262     return 0;
1263 }
1264 
mmz_userdev_release(struct inode * inode,struct file * file)1265 static int mmz_userdev_release(struct inode *inode, struct file *file)
1266 {
1267     struct mmz_userdev_info *pmu = NULL;
1268     hil_mmz_t *zone = NULL;
1269     hil_mmz_t *z = NULL;
1270     int ret;
1271 
1272     if (file == NULL || file->private_data == NULL) {
1273         hi_mmz_error("err args!\n");
1274         return -EPERM;
1275     }
1276 
1277     pmu = file->private_data;
1278 
1279     down(&process_lock);
1280     down(&g_mmz_lock);
1281     list_for_each_entry_safe(zone, z, &g_mmz_list, list) {
1282         struct rb_node *n;
1283         for (n = rb_first(&zone->root); n; n = rb_next(n)) {
1284             hil_mmb_t *m = NULL;
1285             if (zone->iommu) {
1286                 m = rb_entry(n, hil_mmb_t, s_node);
1287             } else {
1288                 m = rb_entry(n, hil_mmb_t, node);
1289             }
1290 
1291             ret = mmz_userdev_add_mmb_to_release_list(m, pmu->tpid);
1292             if (ret) {
1293                 goto out;
1294             }
1295         }
1296     }
1297 out:
1298     up(&g_mmz_lock);
1299     force_mem_free();
1300     up(&process_lock);
1301     file->private_data = NULL;
1302     kfree(pmu);
1303 
1304     return 0;
1305 }
1306 
1307 static struct file_operations g_mmz_userdev_fops = {
1308     .owner = THIS_MODULE,
1309     .open = mmz_userdev_open,
1310     .release = mmz_userdev_release,
1311     .unlocked_ioctl = mmz_userdev_ioctl,
1312 #ifdef CONFIG_COMPAT
1313     .compat_ioctl = compat_mmz_userdev_ioctl,
1314 #endif
1315     .mmap = mmz_userdev_mmap,
1316 };
1317 
1318 #if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
1319 #if (HI_PROC_SUPPORT == 1)
proc_mmz_read(struct inode * inode,struct file * file)1320 static int proc_mmz_read(struct inode *inode, struct file *file)
1321 {
1322     return single_open(file, mmz_read_proc, PDE_DATA(inode));
1323 }
1324 
1325 #define    MAX_BUFFER_LENTH 10
1326 
proc_mmz_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)1327 ssize_t proc_mmz_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1328 {
1329     char   *order_info = NULL;
1330     size_t len         = MAX_BUFFER_LENTH;
1331     int    flag        = -1;
1332 
1333     if ((ppos == NULL) || (*ppos >= MAX_BUFFER_LENTH) || (count >= MAX_BUFFER_LENTH)) {
1334         return -EFBIG;
1335     }
1336 
1337 
1338     order_info = kmalloc(MAX_BUFFER_LENTH + 1, GFP_KERNEL);
1339     if (order_info == NULL) {
1340         return -EFAULT;
1341     }
1342 
1343     len = min(len, count);
1344 
1345     if (copy_from_user(order_info, buffer, len)) {
1346         kfree(order_info);
1347         order_info = NULL;
1348         return -EFAULT;
1349     }
1350     order_info[MAX_BUFFER_LENTH] = '\0';
1351 
1352     if (strstr(order_info, "help")) {
1353         hi_mmz_fatal("Usage:\n");
1354         hi_mmz_fatal("      echo help >/proc/media-mem : get the help about echo XXX >/proce/media-mem\n");
1355         hi_mmz_fatal("      echo show >/proc/media-mem : show present print level value\n");
1356         hi_mmz_fatal("      echo n    >/proc/media-mem : set the print level and 0 <n< 6\n");
1357         goto end;
1358     }
1359 
1360     if (strstr(order_info, "show")) {
1361         hi_mmz_fatal("The mmz print level now is %d\n", g_mmz_print_level);
1362         goto end;
1363     }
1364 
1365     flag = (order_info[0]);
1366     if (len >2 || flag < '1' || flag > '5') {
1367         hi_mmz_fatal("Input Error, input 'echo help >/proc/media-mem' to get help \n");
1368         goto end;
1369     }
1370     g_mmz_print_level = flag - '0';
1371     hi_mmz_fatal("Set succeeded, the mmz print level now is %d\n", g_mmz_print_level);
1372 
1373 end:
1374     kfree(order_info);
1375     order_info = NULL;
1376     *ppos      = len ;
1377     return len;
1378 }
1379 
1380 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
1381 static struct file_operations g_proc_mmz_fops = {
1382     .owner = THIS_MODULE,
1383     .open = proc_mmz_read,
1384     .read = seq_read,
1385     .llseek = seq_lseek,
1386     .write = proc_mmz_write,
1387     .release = single_release,
1388 };
1389 #else
1390 static struct proc_ops g_proc_mmz_fops = {
1391     .proc_open = proc_mmz_read,
1392     .proc_read = seq_read,
1393     .proc_write = proc_mmz_write,
1394     .proc_lseek = seq_lseek,
1395     .proc_release = single_release,
1396 };
1397 #endif // KERNEL 5.10
1398 
1399 #endif // HI_PROC_SUPPORT
1400 #endif
1401 
1402 /* ********************** proc ************************* */
1403 #define MEDIA_MEM_NAME "media-mem"
media_mem_proc_init(void)1404 static int media_mem_proc_init(void)
1405 {
1406 #if !(HI_PROC_SUPPORT == 0)
1407     struct proc_dir_entry *p;
1408 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
1409     p = create_proc_entry(MEDIA_MEM_NAME, 0444, NULL); /* 0444: access permission */
1410     if (p == NULL) {
1411         return -1;
1412     }
1413     p->read_proc = mmz_read_proc;
1414     p->write_proc = NULL;
1415 #else
1416     p = proc_create(MEDIA_MEM_NAME, 0444, NULL, &g_proc_mmz_fops); /* 0444: access permission */
1417     if (p == NULL) {
1418         return -1;
1419     }
1420 #endif
1421 
1422 #endif
1423     return 0;
1424 }
1425 
media_mem_proc_exit(void)1426 static void media_mem_proc_exit(void)
1427 {
1428 #if !(HI_PROC_SUPPORT == 0)
1429     remove_proc_entry(MEDIA_MEM_NAME, NULL);
1430 #endif
1431 }
1432 
1433 /* ******************* init ************************* */
1434 static pm_device g_mmz_userdev = {
1435     .minor = HIMEDIA_DYNAMIC_MINOR,
1436     .name = "mmz_userdev",
1437     .owner = THIS_MODULE,
1438     .app_ops = &g_mmz_userdev_fops
1439 };
1440 
drv_mmz_mod_init(void)1441 int drv_mmz_mod_init(void)
1442 {
1443     HI_S32 ret;
1444 
1445     hi_drv_mmz_init();
1446     media_mem_proc_init();
1447     ret = hi_drv_pm_register(&g_mmz_userdev);
1448     if (ret != HI_SUCCESS) {
1449         media_mem_proc_exit();
1450         hi_drv_mmz_exit();
1451         return HI_FAILURE;
1452     }
1453 #ifdef MODULE
1454     hi_mmz_fatal("Load hi_mmz.ko success.\t\t\n");
1455 #endif
1456     tee_mem_agent_register();
1457 
1458     return 0;
1459 }
1460 
drv_mmz_mod_exit(void)1461 void drv_mmz_mod_exit(void)
1462 {
1463     hi_drv_pm_un_register(&g_mmz_userdev);
1464     media_mem_proc_exit();
1465     hi_drv_mmz_exit();
1466     tee_mem_agent_unregister();
1467 
1468 #ifdef MODULE
1469     hi_mmz_fatal("remove hi_mmz.ko success.\n");
1470 #endif
1471 }
1472 
1473 #if defined(MODULE) || defined(CFG_HI_USER_DRV)
1474 module_init(drv_mmz_mod_init);
1475 module_exit(drv_mmz_mod_exit);
1476 #endif
1477 
1478 EXPORT_SYMBOL(drv_mmz_mod_init);
1479 EXPORT_SYMBOL(drv_mmz_mod_exit);
1480 
1481 MODULE_LICENSE("GPL");
1482 MODULE_AUTHOR("Hisilicon");
1483 
1484