• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 HiSilicon (Shanghai) Technologies CO., LIMITED.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/version.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/mm.h>
26 #include <linux/mman.h>
27 #include <linux/miscdevice.h>
28 #include <linux/proc_fs.h>
29 #include <linux/device.h>
30 #include <linux/fs.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/delay.h>
34 #include <asm/uaccess.h>
35 #include <asm/io.h>
36 #include <linux/interrupt.h>
37 #include <linux/ioport.h>
38 #include <linux/spinlock.h>
39 #include <linux/vmalloc.h>
40 #include <linux/string.h>
41 #include <linux/list.h>
42 #include <asm/cacheflush.h>
43 #include <linux/time.h>
44 #include <asm/setup.h>
45 #include <linux/dma-mapping.h>
46 #include <asm/memory.h>
47 
48 #ifndef CONFIG_64BIT
49 #include <asm/highmem.h>
50 #endif
51 
52 #if LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)
53 #include <linux/cma.h>
54 #include <linux/dma-buf.h>
55 #include <linux/dma-map-ops.h>
56 #include <linux/dma-heap.h>
57 #endif
58 
59 #include <linux/hisilicon/hisi_iommu.h>
60 #include <asm/tlbflush.h>
61 #include <asm/pgtable.h>
62 #include <linux/seq_file.h>
63 #include <linux/err.h>
64 #include "securec.h"
65 #include "drv_media_mem.h"
66 
67 #define DEFAULT_ALLOC  0
68 #define SLAB_ALLOC     1
69 #define EQ_BLOCK_ALLOC 2
70 
71 #define LOW_TO_HIGH 0
72 #define HIGH_TO_LOW 1
73 
74 #define mmz_trace(s, params...) \
75     do {                                        \
76             hi_mmz_debug(s "\n", ##params);     \
77     } while (0)
78 #define mmz_trace_func() mmz_trace("%s", __FILE__)
79 
80 #define MMZ_GRAIN          PAGE_SIZE
81 #define mmz_bitmap_size(p) (mmz_align2(mmz_length2grain((p)->nbytes), 8) / 8)
82 
83 #define mmz_get_bit(p, n) (((p)->bitmap[(n) / 8] >> ((n) & 0x7)) & 0x1)
84 #define mmz_set_bit(p, n) (p)->bitmap[(n) / 8] |= 1 << ((n) & 0x7)
85 #define mmz_clr_bit(p, n) (p)->bitmap[(n) / 8] &= ~(1 << ((n) & 0x7))
86 
87 #define mmz_pos2phy_addr(p, n) ((p)->phys_start + (n) * MMZ_GRAIN)
88 #define mmz_phy_addr2pos(p, a) (((a) - (p)->phys_start) / MMZ_GRAIN)
89 
90 #define mmz_align2low(x, g)   (((x) / (g)) * (g))
91 #define mmz_align2(x, g)      ((((x) + (g)-1) / (g)) * (g))
92 #define mmz_grain_align(x)    mmz_align2(x, MMZ_GRAIN)
93 #define mmz_length2grain(len) (mmz_grain_align(len) / MMZ_GRAIN)
94 
95 char g_line[COMMAND_LINE_SIZE];
96 int g_mmz_print_level = MMZ_WARN_PRINT_LEVEL;
97 
98 #define NAME_LEN_MAX 64
99 
100 struct iommu_zone {
101     unsigned int iova_start;
102     unsigned int iova_end;
103     unsigned int iova_align;
104 };
105 
106 #define SPLIT_LINE \
107         "--------------------------------------------------------------------------------------------------------\n"
108 
109 #ifdef CONFIG_HISI_IOMMU
110 extern struct iommu_zone *hisi_get_iommu_zone(void);
111 #else
112 struct iommu_zone g_hisi_iommu;
hisi_get_iommu_zone(void)113 static inline struct iommu_zone *hisi_get_iommu_zone(void)
114 {
115     g_hisi_iommu.iova_start = 0x100000;
116     g_hisi_iommu.iova_end = 0xffefffff;
117     g_hisi_iommu.iova_align = 4096; /* 4096 表示对齐长度为4096 */
118 
119     return &g_hisi_iommu;
120 }
121 #endif
122 
123 #define ZONE_MAX 64
124 struct mmz_iommu g_mmz_iommu;
125 
126 LIST_HEAD(g_mmz_list);
127 DEFINE_SEMAPHORE(g_mmz_lock);
128 
129 static int g_anony = 0;
130 
131 module_param(g_anony, int, S_IRUGO);
132 
133 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
134 static void __dma_clear_buffer(struct ion_handle *handle);
135 #endif
136 
137 static int _mmb_free(hil_mmb_t *mmb);
138 
139 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
get_mmz_iommu_root(void)140 static struct mmz_iommu *get_mmz_iommu_root(void)
141 {
142     return &g_mmz_iommu;
143 }
144 #endif
145 
hil_media_memory_zone_free(const void * addr)146 static void hil_media_memory_zone_free(const void *addr)
147 {
148     kfree(addr);
149 }
150 
hil_mmz_create(const char * name,HI_U32 gfp,HI_U32 zone_start,HI_U32 nbytes)151 hil_mmz_t *hil_mmz_create(const char *name, HI_U32 gfp, HI_U32 zone_start, HI_U32 nbytes)
152 {
153     hil_mmz_t *p = NULL;
154     int ret;
155 
156     mmz_trace_func();
157     if (name == NULL) {
158         hi_mmz_error("'name' can not be zero!");
159         return NULL;
160     }
161 
162     p = kmalloc(sizeof(hil_mmz_t) + 1, GFP_KERNEL);
163     if (p == NULL) {
164         return NULL;
165     }
166 
167     ret = memset_s(p, sizeof(hil_mmz_t) + 1, 0, sizeof(hil_mmz_t) + 1);
168     if (ret != EOK) {
169         hi_mmz_error("'memset fail\n");
170         kfree(p);
171         p = NULL;
172         return NULL;
173     }
174     ret = strncpy_s(p->name, HIL_MAX_NAME_LEN, name, (HIL_MAX_NAME_LEN - 1));
175     if (ret != EOK) {
176         hi_mmz_error("strlncpy fail\n");
177         kfree(p);
178         p = NULL;
179         return NULL;
180     }
181 
182     p->gfp = gfp;
183     p->zone_start = zone_start;
184     p->nbytes = nbytes;
185 
186     INIT_LIST_HEAD(&p->list);
187 
188     p->destructor = hil_media_memory_zone_free;
189     return p;
190 }
191 
192 #ifndef DMABUF_FLUSH_CACHE
flush_inner_cache(const void * viraddr,unsigned int len)193 void flush_inner_cache(const void *viraddr, unsigned int len)
194 {
195     if ((viraddr == NULL) || (len == 0)) {
196         hi_mmz_warn("failed, viraddr:%pK len:0x%x!\n", viraddr, len);
197         return;
198     }
199 
200     mmz_flush_dcache_area((void *)viraddr, (size_t)len);
201 }
202 
203 #ifndef CONFIG_64BIT
flush_outer(hil_mmb_t * mmb)204 static void flush_outer(hil_mmb_t *mmb)
205 {
206     struct scatterlist *sg = NULL;
207     int i = 0;
208     unsigned long size;
209     struct sg_table *table = NULL;
210     struct mmz_iommu *common = &g_mmz_iommu;
211 
212 #if LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)
213     table = hi_dma_buf_sgt(mmb->handle);
214 #else
215     table = get_pages_from_buffer(common->client, mmb->handle, &size);
216 #endif
217     if (table == NULL) {
218         hi_mmz_warn("get pages failed!\n");
219         return;
220     }
221 
222     for_each_sg(table->sgl, sg, table->nents, i) {
223         struct page *page = sg_page(sg);
224         HI_U32 len = PAGE_ALIGN(sg->length);
225         HI_U32 phys = __pfn_to_phys(page_to_pfn(page));
226 
227         outer_flush_range(phys, phys + len);
228     }
229 }
230 
231 /* just for A9 */
flush_outer_cache_range(mmb_addr_t phyaddr,mmb_addr_t len,unsigned int iommu)232 void flush_outer_cache_range(mmb_addr_t phyaddr, mmb_addr_t len, unsigned int iommu)
233 {
234     hil_mmb_t *mmb = NULL;
235 
236     if (!iommu) {
237         outer_flush_range(phyaddr, phyaddr + len);
238         return;
239     }
240 
241     mmb = hil_mmb_getby_phys(phyaddr, iommu);
242     if (mmb == NULL) {
243         hi_mmz_warn("invalid args!\n");
244         return;
245     }
246 
247     if (!mmb->iommu) {
248         outer_flush_range(mmb->phys_addr, mmb->phys_addr + mmb->length);
249     } else {
250         flush_outer(mmb);
251     }
252 }
253 #else
flush_outer_cache_range(mmb_addr_t phyaddr,mmb_addr_t len,unsigned int iommu)254 void flush_outer_cache_range(mmb_addr_t phyaddr, mmb_addr_t len, unsigned int iommu)
255 {
256     (void)phyaddr;
257     (void)len;
258     (void)iommu;
259     return;
260 }
261 #endif
262 #endif // DMABUF_FLUSH_CACHE
263 
hil_mmz_destroy(const hil_mmz_t * zone)264 int hil_mmz_destroy(const hil_mmz_t *zone)
265 {
266     if (zone == NULL) {
267         return -1;
268     }
269 
270     if (zone->destructor != NULL) {
271         zone->destructor(zone);
272         zone = NULL;
273     }
274 
275     return 0;
276 }
277 
hil_mmz_register(hil_mmz_t * zone)278 int hil_mmz_register(hil_mmz_t *zone)
279 {
280     if (zone == NULL) {
281         return -1;
282     }
283 
284     mmz_trace(HIL_MMZ_FMT, hil_mmz_fmt_arg(zone));
285 
286     down(&g_mmz_lock);
287     list_add(&zone->list, &g_mmz_list);
288     up(&g_mmz_lock);
289 
290     return 0;
291 }
292 
hil_mmz_unregister(hil_mmz_t * zone)293 int hil_mmz_unregister(hil_mmz_t *zone)
294 {
295     int losts = 0;
296     hil_mmb_t *p = NULL;
297     struct rb_node *n = NULL;
298 
299     if (zone == NULL) {
300         return -1;
301     }
302 
303     mmz_trace_func();
304 
305     down(&g_mmz_lock);
306     for (n = rb_first(&zone->root); n; n = rb_next(n)) {
307         if (zone->iommu) {
308             p = rb_entry(n, hil_mmb_t, s_node);
309         } else {
310             p = rb_entry(n, hil_mmb_t, node);
311         }
312 
313         hi_mmz_warn("Lost: " HIL_MMB_FMT "\n", hil_mmb_fmt_arg(p));
314         losts++;
315     }
316 
317     if (losts) {
318         hi_mmz_error("  %d mmbs not free, mmz<%s> can not be deregistered!\n", losts, zone->name);
319         up(&g_mmz_lock);
320         return -1;
321     }
322 
323     list_del(&zone->list);
324     hil_mmz_destroy(zone);
325     up(&g_mmz_lock);
326 
327     return 0;
328 }
329 
330 #if !(HI_PROC_SUPPORT == 0)
dump_mem_head(const hil_mmz_t * p,const int zone_cnt)331 static void dump_mem_head(const hil_mmz_t *p, const int zone_cnt)
332 {
333     struct rb_node *n = NULL;
334     char *mem_type = NULL;
335     char *smmu_name = "SMMU";
336     char *phy_name = "DDR";
337     unsigned int number = 0;
338 
339     if (p->iommu) {
340         mem_type = smmu_name;
341     } else {
342         mem_type = phy_name;
343     }
344 
345     hi_mmz_debug(SPLIT_LINE);
346     hi_mmz_debug("|                   %s           |  ID  | ZONE  |  KVIRT  |  FLAGS  |"
347                  "  LENGTH(KB)  |       NAME        |\n", mem_type);
348     hi_mmz_debug(SPLIT_LINE);
349     for (n = rb_first(&p->root); n; n = rb_next(n)) {
350         number++;
351     }
352     hi_mmz_debug("|ZONE[%d]: (0x%08x, 0x%08x)   %d         %d        0x%08x      %-10u   \"%s%-14s|\n", zone_cnt,
353                  p->zone_start, p->zone_start + p->nbytes - 1, number,
354                  p->iommu, p->gfp, p->nbytes / SZ_1K, p->name, "\"");
355 }
356 
dump_mem_statistics(const hil_mmz_t * p,const int zone_cnt,int block_number,const unsigned int used_size)357 static void dump_mem_statistics(const hil_mmz_t *p, const int zone_cnt, int block_number, const unsigned int used_size)
358 {
359     int total_size = p->nbytes / 1024; /* 1024 1M = 1024 Bytes */
360     unsigned int free_size = total_size - used_size;
361 
362     hi_mmz_debug(SPLIT_LINE);
363     hi_mmz_debug("|%-102s|\n", "Summary:");
364     hi_mmz_debug(SPLIT_LINE);
365     if (p->iommu) {
366         hi_mmz_debug("|  SMMU Total Size  |    Iommu Used     |     Idle     |  Zone Number  |"
367                      "   BLock Number                 |\n");
368     } else {
369         hi_mmz_debug("|  MMZ Total Size  |     CMA Used      |     Idle     |  Zone Number  |"
370                      "   BLock Number                 |\n");
371     }
372     hi_mmz_debug(SPLIT_LINE);
373     hi_mmz_debug("|       %d%-8s       %d%-8s          %d%-8s         %d              %d                |\n",
374                  total_size / 1024, "MB", used_size / 1024, "MB", /* 1024 1M = 1024 Bytes */
375                  free_size / 1024, "MB", zone_cnt, block_number); /* 1024 1M = 1024 Bytes */
376     hi_mmz_debug(SPLIT_LINE);
377     hi_mmz_debug("\n");
378 }
379 
dump_mem_zone(const hil_mmz_t * p,int * block_number)380 static unsigned int dump_mem_zone(const hil_mmz_t *p, int *block_number)
381 {
382     hil_mmb_t *mmb = NULL;
383     struct rb_node *n = NULL;
384     unsigned int used_size = 0;
385     int block_number_tmp = 0;
386 
387     for (n = rb_first(&p->root); n; n = rb_next(n)) {
388         if (p->iommu) {
389             mmb = rb_entry(n, hil_mmb_t, s_node);
390             if (!mmb->iommu) {
391                 continue;
392             }
393         } else {
394             mmb = rb_entry(n, hil_mmb_t, node);
395         }
396         hi_mmz_debug("|" HIL_MMB_FMT "|\n", hil_mmb_fmt_arg(mmb));
397         used_size += mmb->length / 1024; /* 1024 1M = 1024 Bytes */
398         block_number_tmp++;
399     }
400 
401     *block_number += block_number_tmp;
402     return used_size;
403 }
404 #endif
405 
dump_mem(void)406 static void dump_mem(void)
407 {
408 #if !(HI_PROC_SUPPORT == 0)
409 
410     int zone_cnt = 0;
411     int block_number;
412     hil_mmz_t *p;
413     unsigned int used_size;
414 
415     list_for_each_entry(p, &g_mmz_list, list) {
416 #ifndef HI_SMMU_SUPPORT
417         if (p->iommu) {
418             continue;
419         }
420 #endif
421         dump_mem_head(p, zone_cnt);
422         block_number = 0;
423         used_size = dump_mem_zone(p, &block_number);
424         dump_mem_statistics(p, zone_cnt, block_number, used_size);
425         zone_cnt++;
426     }
427 #endif
428 }
429 
430 #ifdef HI_ADVCA_FUNCTION_RELEASE
431 #define mmz_dump_stack()
432 #else
433 #define mmz_dump_stack()  dump_stack()
434 #endif
435 
hil_mmb_getby_phys(HI_U32 addr,HI_U32 iommu)436 hil_mmb_t *hil_mmb_getby_phys(HI_U32 addr, HI_U32 iommu)
437 {
438     hil_mmz_t *zone = NULL;
439     hil_mmb_t *mmb = NULL;
440 
441     if (addr == 0) {
442         hi_mmz_error("err args\n");
443         if (g_mmz_print_level > MMZ_WARN_PRINT_LEVEL) {
444             mmz_dump_stack();
445         }
446         return NULL;
447     }
448 
449     down(&g_mmz_lock);
450     list_for_each_entry(zone, &g_mmz_list, list) {
451         struct rb_node *n = NULL;
452 
453         if (zone->iommu != iommu) {
454             continue;
455         }
456 
457         for (n = rb_first(&zone->root); n; n = rb_next(n)) {
458             hil_mmb_t *m = NULL;
459             if (!iommu) {
460                 m = rb_entry(n, hil_mmb_t, node);
461                 if ((m->phys_addr <= addr) && (addr < (m->phys_addr + m->length))) {
462                     mmb = m;
463                     goto result;
464                 }
465             } else {
466                 m = rb_entry(n, hil_mmb_t, s_node);
467                 if ((m->iommu_addr <= addr) && (addr < (m->iommu_addr + m->length))) {
468                     mmb = m;
469                     goto result;
470                 }
471             }
472         }
473     }
474 result:
475     if (mmb == NULL) {
476         if (iommu) {
477             hi_mmz_warn("smmu:0x%x err args\n", addr);
478         } else {
479             hi_mmz_warn("phys:0x%x err args\n", addr);
480         }
481 
482         if (g_mmz_print_level > MMZ_WARN_PRINT_LEVEL) {
483             mmz_dump_stack();
484             dump_mem();
485         }
486     }
487 
488     up(&g_mmz_lock);
489     return mmb;
490 }
491 
hil_mmb_getby_sec_addr(HI_U32 sec_addr,HI_U32 iommu)492 hil_mmb_t *hil_mmb_getby_sec_addr(HI_U32 sec_addr, HI_U32 iommu)
493 {
494     hil_mmz_t *zone = NULL;
495     hil_mmb_t *mmb = NULL;
496 
497     if (sec_addr == 0) {
498         hi_mmz_warn("err args\n");
499         return NULL;
500     }
501 
502     down(&g_mmz_lock);
503     list_for_each_entry(zone, &g_mmz_list, list) {
504         struct rb_node *n = NULL;
505 
506         for (n = rb_first(&zone->root); n; n = rb_next(n)) {
507             hil_mmb_t *m = NULL;
508             if (!iommu) {
509                 /* is sec_addr is phys_addr, so it just in ddr zone */
510                 m = rb_entry(n, hil_mmb_t, node);
511                 if (m->phys_addr <= sec_addr && sec_addr < (m->phys_addr + m->length)) {
512                     mmb = m;
513                     goto result;
514                 }
515             } else {
516                 /* if sec_addr is sec_smmu, then it maybe both in ddr zone and smmu zone */
517                 if (zone->iommu) {
518                     m = rb_entry(n, hil_mmb_t, s_node);
519                 } else {
520                     m = rb_entry(n, hil_mmb_t, node);
521                 }
522                 if (m->sec_smmu != 0 && m->sec_smmu <= sec_addr && sec_addr < (m->sec_smmu + m->length)) {
523                     mmb = m;
524                     goto result;
525                 }
526             }
527         }
528     }
529 result:
530     up(&g_mmz_lock);
531     if (mmb == NULL) {
532         if (iommu) {
533             hi_mmz_warn("smmu:0x%x err args\n", sec_addr);
534         } else {
535             hi_mmz_warn("phys:0x%x err args\n", sec_addr);
536         }
537     }
538     return mmb;
539 }
540 
mmb_add_to_rbtree(hil_mmb_t * mmb,hil_mmz_t * mmz,int iommu)541 static int mmb_add_to_rbtree(hil_mmb_t *mmb, hil_mmz_t *mmz, int iommu)
542 {
543     struct rb_node **p = &mmz->root.rb_node;
544     struct rb_node *parent = NULL;
545     hil_mmb_t *entry = NULL;
546 
547     if (iommu) {
548         while (*p) {
549             parent = *p;
550             entry = rb_entry(parent, hil_mmb_t, s_node);
551             if (mmb->iommu_addr < entry->iommu_addr) {
552                 p = &(*p)->rb_left;
553             } else if (mmb->iommu_addr > entry->iommu_addr) {
554                 p = &(*p)->rb_right;
555             } else {
556                 hi_mmz_warn("buffer already found.\n");
557                 BUG();
558                 return HI_FAILURE;
559             }
560         }
561         rb_link_node(&mmb->s_node, parent, p);
562         rb_insert_color(&mmb->s_node, &mmz->root);
563     } else {
564         while (*p) {
565             parent = *p;
566             entry = rb_entry(parent, hil_mmb_t, node);
567             if (mmb->phys_addr < entry->phys_addr) {
568                 p = &(*p)->rb_left;
569             } else if (mmb->phys_addr > entry->phys_addr) {
570                 p = &(*p)->rb_right;
571             } else {
572                 hi_mmz_warn("buffer already found.\n");
573                 BUG();
574                 return HI_FAILURE;
575             }
576         }
577 
578         rb_link_node(&mmb->node, parent, p);
579         rb_insert_color(&mmb->node, &mmz->root);
580     }
581     return HI_SUCCESS;
582 }
583 
584 #define  MMZ_NODE_FIRST_MATCHED    1
585 #define  MMZ_NODE_LAST_MATCHED     0
586 
587 /*
588  * first : 1,find the first node which matches the requirement
589  *         0,find the last node which matches the requirement
590  */
list_for_each_mmz(HI_U32 gfp,const char * mmz_name,int first)591 static hil_mmz_t *list_for_each_mmz(HI_U32 gfp, const char *mmz_name, int first)
592 {
593     hil_mmz_t *p = NULL;
594     hil_mmz_t *mmz = NULL;
595 
596     list_for_each_entry(p, &g_mmz_list, list) {
597         if (gfp == 0 ? 0 : p->gfp != gfp) {
598             continue;
599         }
600         if (mmz_name != NULL) {
601             if ((*mmz_name != '\0') && strncmp(mmz_name, p->name, HIL_MAX_NAME_LEN)) {
602                 continue;
603             }
604         }
605         if (mmz_name == NULL && g_anony == 1) {
606             if (strncmp("anonymous", p->name, HIL_MAX_NAME_LEN)) {
607                 continue;
608             }
609         }
610 
611         mmz = p;
612         if (first) {
613             break;
614         }
615     }
616     return mmz;
617 }
618 
619 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
real_page_buffer_alloc_and_map(hil_mmb_t * mmb,HI_U32 size,HI_U32 align,int iommu,unsigned int heap_id_mask)620 static int real_page_buffer_alloc_and_map(hil_mmb_t *mmb, HI_U32 size, HI_U32 align, int iommu,
621                                           unsigned int heap_id_mask)
622 {
623     struct mmz_iommu *common = get_mmz_iommu_root();
624     struct ion_client *client = common->client;
625     struct iommu_map_format *format = NULL;
626     const unsigned int flags = 0;
627     int ret = 0;
628 
629     /* alloc real page buffer via ion interface */
630     mmb->handle = ion_alloc(client, size, align, heap_id_mask, flags);
631     if (IS_ERR(mmb->handle)) {
632         hi_mmz_warn("mem alloc failed !\n");
633         return -1;  /* -1, error */
634     }
635     __dma_clear_buffer(mmb->handle);
636 
637     if (iommu) {
638         format = kzalloc(sizeof(struct iommu_map_format), GFP_KERNEL);
639         if (format == NULL) {
640             hi_mmz_warn("no mem!\n");
641             goto err;
642         }
643         /* map real page buffer to smmu space */
644         ret = ion_map_iommu(client, mmb->handle, format);
645         if (ret) {
646             hi_mmz_warn("alloc iommu failed!\n");
647             kfree(format);
648             goto err;
649         }
650 
651         mmb->phys_addr = MMB_ADDR_INVALID;
652         mmb->iommu_addr = format->iova_start;
653         mmb->length = format->iova_size;
654         kfree(format);
655         format = NULL;
656     } else {
657         size_t len = 0;
658         unsigned long phys = 0;
659         /* get cma phy address and len of real page buffer */
660         ret = ion_phys(client, mmb->handle, &phys, &len);
661         if (ret) {
662             hi_mmz_error("cannot get phys_addr!\n");
663             goto err;
664         }
665 
666         mmb->phys_addr = (HI_U32)phys;
667         mmb->iommu_addr = MMB_ADDR_INVALID;
668         mmb->length = (HI_U32)len;
669     }
670 
671     mmb->client = client;
672     return 0;  /* 0, success */
673 err:
674     ion_free(client, mmb->handle);
675     return -1;  /* -1, error */
676 }
677 #else
real_page_buffer_alloc_and_map(hil_mmb_t * mmb,HI_U32 size,HI_U32 align,int iommu,struct dma_heap * heap)678 static int real_page_buffer_alloc_and_map(hil_mmb_t *mmb, HI_U32 size, HI_U32 align, int iommu,
679                                           struct dma_heap *heap)
680 {
681     dma_addr_t iova_start;
682     unsigned int fd_flags = O_RDWR | O_CLOEXEC;
683     unsigned int heap_flags = 0; /* 0: Currently no heap flags */
684 
685     mmb->handle = hi_dma_buf_alloc(heap, size, fd_flags, heap_flags);
686 
687     if (IS_ERR(mmb->handle)) {
688         hi_mmz_warn("mem alloc failed !\n");
689         return -1;  /* -1, error */
690     }
691 
692     if (iommu) {
693         iova_start = dma_buf_map_iommu(mmb->handle);
694         if (!iova_start) {
695             hi_mmz_warn("alloc iommu failed!\n");
696             goto err;
697         }
698 
699         mmb->phys_addr = MMB_ADDR_INVALID;
700         mmb->iommu_addr = iova_start;
701     } else {
702         unsigned long phys = 0;
703         /* get cma phy address and len of real page buffer */
704         phys = dma_buf_phys(mmb->handle);
705         if (!phys) {
706             hi_mmz_error("cannot get phys_addr!\n");
707             goto err;
708         }
709 
710         mmb->phys_addr = (HI_U32)phys;
711         mmb->iommu_addr = MMB_ADDR_INVALID;
712     }
713     mmb->length = mmb->handle->size;
714 
715     return 0;  /* 0, success */
716 err:
717     dma_buf_put(mmb->handle);
718     return -1;  /* -1, error */
719 }
720 #endif
721 
hil_mmb_init(hil_mmb_t * mmb,hil_mmz_t * mmz,unsigned int size,unsigned int align,int flag)722 static int hil_mmb_init(hil_mmb_t *mmb, hil_mmz_t *mmz, unsigned int size, unsigned int align, int flag)
723 {
724 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
725     unsigned int heap_id_mask;
726 #else
727     struct dma_heap *heap;
728 #endif
729     int iommu;
730     int ret;
731 
732     iommu = mmz->iommu;
733 
734 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
735     heap_id_mask = mmz->heap_id_mask;
736     ret = real_page_buffer_alloc_and_map(mmb, size, align, iommu, heap_id_mask);
737 #else
738     heap = mmz->heap;
739     ret = real_page_buffer_alloc_and_map(mmb, size, align, iommu, heap);
740 #endif
741     if (ret) {
742         hi_mmz_warn("real_page_buffer_alloc_and_map fail\n");
743         return ret;
744     }
745 
746     mmb->zone = mmz;
747     mmb->iommu = iommu;
748     mmb->phy_ref++;
749     mmb->flag = flag; /* used when release */
750     mmb->kdata = NULL;
751     mmb->sec_smmu = MMB_ADDR_INVALID; /* for secure smmu */
752     spin_lock_init(&mmb->u_lock);
753 
754     if (flag == HI_USER_ALLOC) {
755         mmb->owner_id = current->tgid;
756     } else {
757         mmb->owner_id = 0;
758     }
759     INIT_LIST_HEAD(&mmb->ulist);
760 
761     return 0;
762 }
763 
hil_mmb_alloc(const char * name,HI_U32 size,HI_U32 align,const char * mmz_name,int flag)764 hil_mmb_t *hil_mmb_alloc(const char *name, HI_U32 size, HI_U32 align, const char *mmz_name, int flag)
765 {
766     hil_mmb_t *mmb = NULL;
767     hil_mmz_t *mmz = NULL;
768     const char *mmb_name = (name == NULL) ? "<null>" : name;
769     int iommu;
770     int ret;
771     const unsigned int gfp = 0;  /* gfp is always 0 in later version */
772 
773     down(&g_mmz_lock);
774 
775     mmz = list_for_each_mmz(gfp, mmz_name, MMZ_NODE_LAST_MATCHED);
776     if (mmz == NULL) {
777         hi_mmz_warn("can't find zone:%s\n", mmz_name);
778         goto err_exit;
779     }
780     iommu = mmz->iommu;
781 
782     mmb = kmalloc(sizeof(hil_mmb_t), GFP_KERNEL);
783     if (mmb == NULL) {
784         goto err_exit;
785     }
786     ret = memset_s(mmb, sizeof(hil_mmb_t), 0, sizeof(hil_mmb_t));
787     if (ret != 0) {
788         hi_mmz_error("memset fail");
789         goto handle_err;
790     }
791 
792     ret = strncpy_s(mmb->name, HIL_MAX_NAME_LEN, mmb_name, HIL_MAX_NAME_LEN - 1);
793     if (ret != EOK) {
794         hi_mmz_warn("strncpy fail\n");
795         goto handle_err;
796     }
797 
798     ret = hil_mmb_init(mmb, mmz, size, align, flag);
799     if (ret != EOK) {
800         hi_mmz_warn("hil_mmb_init failed\n");
801         goto handle_err;
802     }
803 
804     ret  = mmb_add_to_rbtree(mmb, mmz, iommu);
805     if (ret != HI_SUCCESS) {
806         hi_mmz_warn("add to rbtree failed!\n");
807         goto handle_err;
808     }
809 
810     up(&g_mmz_lock);
811     return mmb;
812 
813 handle_err:
814     kfree(mmb);
815     mmb = NULL;
816 err_exit:
817     up(&g_mmz_lock);
818     return NULL;
819 }
820 
821 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
__dma_clear_buffer(struct ion_handle * handle)822 static void __dma_clear_buffer(struct ion_handle *handle)
823 {
824     struct scatterlist *sg = NULL;
825     int i = 0;
826     HI_U32 size;
827     struct sg_table *table = NULL;
828     struct mmz_iommu *common = &g_mmz_iommu;
829     unsigned long len;
830 
831     table = get_pages_from_buffer(common->client, handle, &len);
832     if (table == NULL) {
833         hi_mmz_warn("get pages failed!\n");
834         return;
835     }
836 
837     size = (HI_U32)len;
838     size = PAGE_ALIGN(size);
839     for_each_sg(table->sgl, sg, table->nents, i) {
840         struct page *page = sg_page(sg);
841         HI_U32 length = PAGE_ALIGN(sg->length);
842 #ifdef CONFIG_64BIT
843         void *ptr = page_address(page);
844         mmz_flush_dcache_area(ptr, length);
845 #else
846         HI_U32 phys = __pfn_to_phys(page_to_pfn(page));
847 
848         if (PageHighMem(page)) {
849             while (length > 0) {
850                 void *ptr = kmap_atomic(page);
851 
852                 mmz_flush_dcache_area(ptr, PAGE_SIZE);
853                 __kunmap_atomic(ptr);
854                 page++;
855                 length -= PAGE_SIZE;
856             }
857         } else {
858             void *ptr = page_address(page);
859             mmz_flush_dcache_area(ptr, length);
860         }
861         outer_flush_range(phys, phys + length);
862 #endif
863     }
864 }
865 #endif
866 
867 #ifndef DMABUF_FLUSH_CACHE
_map2kern(const hil_mmb_t * mmb,int cached)868 static void *_map2kern(const hil_mmb_t *mmb, int cached)
869 {
870     struct scatterlist *sg = NULL;
871     int i = 0;
872     int j = 0;
873     void *vaddr = NULL;
874     pgprot_t pgprot;
875     int npages;
876     HI_U32 size;
877     struct sg_table *table = NULL;
878     struct page **pages = NULL;
879     struct page **tmp = NULL;
880     struct mmz_iommu *common = &g_mmz_iommu;
881     unsigned long len;
882 
883 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
884     table = get_pages_from_buffer(common->client, mmb->handle, &len);
885 #else
886     table = hi_dma_buf_sgt(mmb->handle);
887     len = mmb->length;
888 #endif
889     if (table == NULL) {
890         hi_mmz_warn("get pages failed!\n");
891         return NULL;
892     }
893     size = (HI_U32)len;
894     npages = PAGE_ALIGN(size) / PAGE_SIZE;
895     pages = vmalloc(sizeof(struct page *) * npages);
896     tmp = pages;
897 
898     if (pages == HI_NULL) {
899         hi_mmz_warn("no mem!\n");
900         return NULL;
901     }
902     pgprot = PAGE_KERNEL_EXEC;
903 
904     if (!cached) {
905         pgprot = pgprot_writecombine(PAGE_KERNEL_EXEC);
906     }
907 
908     for_each_sg(table->sgl, sg, table->nents, i) {
909         int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
910         struct page *page = sg_page(sg);
911         for (j = 0; j < npages_this_entry; j++) {
912             *(tmp++) = page++;
913         }
914     }
915     vaddr = vmap(pages, npages, VM_MAP, pgprot);
916     vfree(pages);
917     return vaddr;
918 }
919 #endif
920 
_mmb_map2kern(hil_mmb_t * mmb,int cached)921 static void *_mmb_map2kern(hil_mmb_t *mmb, int cached)
922 {
923     struct mmb_kdata *kdata = NULL;
924 
925     kdata = mmb->kdata;
926     if (kdata != NULL) {
927         if (kdata->map_cached != cached) {
928             hi_mmz_warn("mmb<%s> already kernel-mapped one cache attr, now another cache "
929                         "attr re-mapped requested. But the first remap is returned\n", mmb->name);
930         }
931         mmb->phy_ref++;
932         mmb->map_ref++;
933         kdata->kmap_ref++;
934         return kdata->kvirt;
935     }
936     kdata = kmalloc(sizeof(struct mmb_kdata), GFP_KERNEL | __GFP_ZERO);
937     if (kdata == NULL) {
938         hi_mmz_warn("remap failed!\n");
939         return NULL;
940     }
941 #ifndef DMABUF_FLUSH_CACHE
942     kdata->kvirt = _map2kern(mmb, cached);
943 #else
944     kdata->kvirt = dma_buf_vmap(mmb->handle);
945 #endif
946     if (kdata->kvirt == NULL) {
947         hi_mmz_warn("remap failed in ion!\n");
948         kfree(kdata);
949         return NULL;
950     }
951     kdata->map_cached = cached;
952     kdata->kmap_ref++;
953 
954     mmb->kdata = kdata;
955     mmb->phy_ref++;
956     mmb->map_ref++;
957 
958     return kdata->kvirt;
959 }
960 
hil_mmb_map2kern(hil_mmb_t * mmb)961 void *hil_mmb_map2kern(hil_mmb_t *mmb)
962 {
963     void *p = NULL;
964 
965     if (mmb == NULL) {
966         return NULL;
967     }
968 
969     down(&g_mmz_lock);
970     p = _mmb_map2kern(mmb, 0);
971     up(&g_mmz_lock);
972 
973     return p;
974 }
975 
hil_mmb_map2kern_cached(hil_mmb_t * mmb)976 void *hil_mmb_map2kern_cached(hil_mmb_t *mmb)
977 {
978     void *p = NULL;
979 
980     if (mmb == NULL) {
981         return NULL;
982     }
983 
984     down(&g_mmz_lock);
985     p = _mmb_map2kern(mmb, 1);
986     up(&g_mmz_lock);
987 
988     return p;
989 }
990 
991 #ifndef DMABUF_FLUSH_CACHE
hil_mmb_unmap(hil_mmb_t * mmb,const void * addr)992 int hil_mmb_unmap(hil_mmb_t *mmb, const void *addr)
993 {
994     struct mmb_kdata *kdata = NULL;
995     mmb_addr_t phyaddr;
996 
997     if (mmb == NULL) {
998         return -1;
999     }
1000     down(&g_mmz_lock);
1001 
1002     kdata = mmb->kdata;
1003     if (kdata == NULL) {
1004         hi_mmz_warn("cannot find userdata!\n");
1005         up(&g_mmz_lock);
1006         return HI_FAILURE;
1007     }
1008     if (mmb->iommu) {
1009         phyaddr = mmb->iommu_addr;
1010     } else {
1011         phyaddr = mmb->phys_addr;
1012     }
1013 
1014     if (kdata->map_cached) {
1015         up(&g_mmz_lock);
1016 #ifndef CONFIG_64BIT
1017         __cpuc_flush_dcache_area((void *)kdata->kvirt, (size_t)mmb->length);
1018         flush_outer_cache_range(phyaddr, mmb->length, mmb->iommu);
1019 #else
1020         __flush_dcache_area((void *)kdata->kvirt, (size_t)mmb->length);
1021 #endif
1022         down(&g_mmz_lock);
1023     }
1024 
1025     kdata->kmap_ref--;
1026     if (!kdata->kmap_ref) {
1027         vunmap(kdata->kvirt);
1028         kfree(kdata);
1029         mmb->kdata = NULL;
1030     }
1031 
1032     mmb->map_ref--;
1033     mmb->phy_ref--;
1034 
1035     if ((mmb->phy_ref == 0) && (mmb->map_ref == 0) && (mmb->cma_smmu_ref == 0) && (mmb->sec_smmu_ref == 0)) {
1036         hil_mmb_free(mmb);
1037         mmb = NULL;
1038     }
1039 
1040     up(&g_mmz_lock);
1041     return 0;
1042 }
1043 #endif
1044 
_mmb_free(hil_mmb_t * mmb)1045 static int _mmb_free(hil_mmb_t *mmb)
1046 {
1047 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
1048     if (mmb->iommu) {
1049         ion_unmap_iommu(mmb->client, mmb->handle);
1050     }
1051 
1052     ion_free(mmb->client, mmb->handle);
1053 #else
1054     if (mmb->iommu) {
1055         dma_buf_unmap_iommu(mmb->iommu_addr, mmb->handle);
1056     }
1057 
1058     dma_buf_put(mmb->handle);
1059 #endif
1060 
1061     if (mmb->iommu) {
1062         rb_erase(&mmb->s_node, &mmb->zone->root);
1063     } else {
1064         rb_erase(&mmb->node, &mmb->zone->root);
1065     }
1066     kfree(mmb);
1067 
1068     return 0;
1069 }
1070 
hil_mmb_free(hil_mmb_t * mmb)1071 int hil_mmb_free(hil_mmb_t *mmb)
1072 {
1073     struct mmb_udata *p = NULL;
1074     struct mmb_udata *q = NULL;
1075     hil_mmz_t *zone = NULL;
1076     int found = 0;
1077 
1078     if (mmb == NULL) {
1079         return HI_FAILURE;
1080     }
1081 
1082     list_for_each_entry(zone, &g_mmz_list, list) {
1083         struct rb_node *n = NULL;
1084 
1085         for (n = rb_first(&zone->root); n; n = rb_next(n)) {
1086             hil_mmb_t *m = NULL;
1087             if (zone->iommu) {
1088                 m = rb_entry(n, hil_mmb_t, s_node);
1089                 if (mmb == m) {
1090                     found = 1;
1091                     break;
1092                 }
1093             } else {
1094                 m = rb_entry(n, hil_mmb_t, node);
1095                 if (mmb == m) {
1096                     found = 1;
1097                     break;
1098                 }
1099             }
1100         }
1101     }
1102 
1103     if (found == 0) {
1104         return HI_FAILURE;
1105     }
1106 
1107     if (mmb->phy_ref > 0) {
1108         mmb->phy_ref--;
1109     }
1110 
1111     if ((mmb->map_ref) || (mmb->phy_ref) || (mmb->cma_smmu_ref) || (mmb->sec_smmu_ref)) {
1112         hi_mmz_debug("name=%s, mmz(0x%x) smmu(0x%x) is still used: "
1113                      "phy_ref:%d map_ref:%d smmu_ref:%d sec_smmu_ref:0x%d\n",
1114                      mmb->name, mmb->phys_addr, mmb->iommu_addr,
1115                      mmb->phy_ref, mmb->map_ref, mmb->cma_smmu_ref, mmb->sec_smmu_ref);
1116         return 0;
1117     }
1118     spin_lock(&mmb->u_lock);
1119     if (!list_empty(&mmb->ulist)) {
1120         /*
1121          * if we are in exception mode like killing process by ctrl+c
1122          * the udata in mmb maybe not free, we free them there.
1123          * And unmap is called normally, udata has been free before and
1124          * this branch do not get in.
1125          */
1126         list_for_each_entry_safe(p, q, &mmb->ulist, list) {
1127             list_del(&p->list);
1128             kfree(p);
1129         }
1130     }
1131     spin_unlock(&mmb->u_lock);
1132     _mmb_free(mmb);
1133     mmb = NULL;
1134 
1135     return 0;
1136 }
1137 
hil_mmb_getby_kvirt(const void * virt)1138 hil_mmb_t *hil_mmb_getby_kvirt(const void *virt)
1139 {
1140     hil_mmb_t *mmb = NULL;
1141     hil_mmz_t *zone = NULL;
1142 
1143     if (virt == NULL) {
1144         hi_mmz_warn("virt:%pK err args\n", virt);
1145         return NULL;
1146     }
1147 
1148     down(&g_mmz_lock);
1149     list_for_each_entry(zone, &g_mmz_list, list) {
1150         struct rb_node *n;
1151         for (n = rb_first(&zone->root); n; n = rb_next(n)) {
1152             hil_mmb_t *m = NULL;
1153 
1154             if (zone->iommu) {
1155                 m = rb_entry(n, hil_mmb_t, s_node);
1156             } else {
1157                 m = rb_entry(n, hil_mmb_t, node);
1158             }
1159             if (m->kdata == NULL) {
1160                 continue;
1161             }
1162             if (((uintptr_t)(m->kdata->kvirt) <= (uintptr_t)virt) &&
1163                 ((uintptr_t)virt < ((uintptr_t)m->kdata->kvirt + m->length))) {
1164                 mmb = m;
1165                 goto end;
1166             }
1167         }
1168     }
1169 end:
1170     up(&g_mmz_lock);
1171 
1172     if (mmb == NULL) {
1173         hi_mmz_warn("virt:%pK cannot find mem\n", virt);
1174     }
1175 
1176     return mmb;
1177 }
1178 
hil_mmbinfo_getby_kvirt(const void * virt)1179 hil_mmb_t *hil_mmbinfo_getby_kvirt(const void *virt)
1180 {
1181     hil_mmb_t *mmb = NULL;
1182     hil_mmz_t *zone = NULL;
1183 
1184     if (virt == NULL) {
1185         hi_mmz_warn("virt:%pK err args\n", virt);
1186         return NULL;
1187     }
1188 
1189     down(&g_mmz_lock);
1190     list_for_each_entry(zone, &g_mmz_list, list) {
1191         struct rb_node *n;
1192         for (n = rb_first(&zone->root); n; n = rb_next(n)) {
1193             hil_mmb_t *m = NULL;
1194 
1195             if (zone->iommu) {
1196                 m = rb_entry(n, hil_mmb_t, s_node);
1197             } else {
1198                 m = rb_entry(n, hil_mmb_t, node);
1199             }
1200             if (m->kdata == NULL) {
1201                 continue;
1202             }
1203             if (((uintptr_t)(m->kdata->kvirt) <= (uintptr_t)virt) &&
1204                 ((uintptr_t)(m->kdata->kvirt + m->length) >= (uintptr_t)virt)) {
1205                 mmb = m;
1206                 goto end;
1207             }
1208         }
1209     }
1210 end:
1211     up(&g_mmz_lock);
1212     if (mmb == NULL) {
1213         hi_mmz_warn("virt:%pK cannot find mem\n", virt);
1214     }
1215 
1216     return mmb;
1217 }
1218 
1219 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
hil_mmb_alloc_iommu(hil_mmb_t * mmb)1220 static int hil_mmb_alloc_iommu(hil_mmb_t *mmb)
1221 {
1222     struct iommu_map_format *format;
1223     struct mmz_iommu *common = &g_mmz_iommu;
1224     int ret;
1225 
1226     format = kmalloc(sizeof(struct iommu_map_format), GFP_KERNEL | __GFP_ZERO);
1227     if (format == NULL) {
1228         hi_mmz_warn("no mem!\n");
1229         return -1;  /* -1, error */
1230     }
1231     ret = ion_map_iommu(common->client, mmb->handle, format);
1232     if (ret) {
1233         hi_mmz_warn("alloc iommu failed!\n");
1234         kfree(format);
1235         return ret;
1236     }
1237 
1238     mmb->iommu_addr = format->iova_start;
1239     kfree(format);
1240     format = NULL;
1241 
1242     return ret;
1243 }
1244 #else
hil_mmb_alloc_iommu(hil_mmb_t * mmb)1245 static int hil_mmb_alloc_iommu(hil_mmb_t *mmb)
1246 {
1247     int ret;
1248 
1249     if (mmb->handle == NULL) {
1250         hi_mmz_warn("err args!\n");
1251         return MMB_ADDR_INVALID;
1252     }
1253 
1254     ret = dma_buf_map_iommu(mmb->handle);
1255     if (!ret) {
1256         hi_mmz_warn("dma_buf_map_iommu failed\n");
1257         return MMB_ADDR_INVALID;
1258     }
1259 
1260     mmb->iommu_addr = ret;
1261 
1262     return 0;
1263 }
1264 #endif
1265 
hil_mmb_cma_mapto_iommu(mmb_addr_t addr,int iommu)1266 mmb_addr_t hil_mmb_cma_mapto_iommu(mmb_addr_t addr, int iommu)
1267 {
1268     hil_mmb_t *mmb = NULL;
1269     hil_mmz_t *mmz = NULL;
1270     const HI_U32 gfp = 0; /* gfp is 0 in later version */
1271     int ret;
1272     mmb_addr_t offset;
1273 
1274     if (iommu) {
1275         hi_mmz_warn("err args, iommu must be 0, and addr must be cma phy_addr\n");
1276         return MMB_ADDR_INVALID;
1277     }
1278 
1279     mmb = hil_mmb_getby_phys((HI_U32)addr, 0);
1280     if (mmb == NULL) {
1281         hi_mmz_warn("err args!\n");
1282         return MMB_ADDR_INVALID;
1283     }
1284 
1285     down(&g_mmz_lock);
1286     if (mmb->iommu_addr != MMB_ADDR_INVALID) {
1287         hi_mmz_warn("It's already mapped to iommu.\n");
1288         mmb->cma_smmu_ref++;
1289         mmb->phy_ref++;
1290         up(&g_mmz_lock);
1291         return mmb->iommu_addr;
1292     }
1293     offset = addr - mmb->phys_addr;
1294 
1295     ret = hil_mmb_alloc_iommu(mmb);
1296     if (ret) {
1297         hi_mmz_warn("hil_mmb_alloc_iommu failed!\n");
1298         goto err;
1299     }
1300 
1301     mmb->cma_smmu_ref++;
1302     mmb->phy_ref++;
1303 
1304     mmz = list_for_each_mmz(gfp, "iommu", MMZ_NODE_LAST_MATCHED);
1305     if (mmz == NULL) {
1306         hi_mmz_warn("cannot find iommu zone!\n");
1307         goto err;
1308     }
1309 
1310     ret = mmb_add_to_rbtree(mmb, mmz, 1);
1311     if (ret != HI_SUCCESS) {
1312         hi_mmz_warn("cannot find iommu zone!\n");
1313         goto err;
1314     }
1315 
1316     up(&g_mmz_lock);
1317     return (mmb->iommu_addr + offset);
1318 
1319 err:
1320     up(&g_mmz_lock);
1321     return MMB_ADDR_INVALID;
1322 }
1323 
hil_mmb_cma_unmapfrom_iommu(mmb_addr_t addr,int iommu)1324 int hil_mmb_cma_unmapfrom_iommu(mmb_addr_t addr, int iommu)
1325 {
1326     hil_mmb_t *mmb = NULL;
1327     hil_mmz_t *mmz = NULL;
1328     const HI_U32 gfp = 0;
1329 
1330     if (!iommu) {
1331         hi_mmz_warn("err args, iommu must be 1, and addr must be cma phy_addr\n");
1332         return HI_FAILURE;
1333     }
1334 
1335     mmb = hil_mmb_getby_phys((HI_U32)addr, 1);
1336     if (mmb == NULL || mmb->handle == NULL) {
1337         hi_mmz_warn("err args!\n");
1338         return HI_FAILURE;
1339     }
1340 
1341     down(&g_mmz_lock);
1342     if (mmb->iommu_addr != MMB_ADDR_INVALID) {
1343         mmb->cma_smmu_ref--;
1344         mmb->phy_ref--;
1345     }
1346 
1347     if (mmb->cma_smmu_ref) {
1348         up(&g_mmz_lock);
1349         return HI_SUCCESS;
1350     }
1351 
1352 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
1353     ion_unmap_iommu(mmb->client, mmb->handle);
1354 #else
1355     dma_buf_unmap_iommu(mmb->iommu_addr, mmb->handle);
1356 #endif
1357     mmz = list_for_each_mmz(gfp, "iommu", MMZ_NODE_LAST_MATCHED);
1358     if (mmz == NULL) {
1359         hi_mmz_warn("cannot find iommu zone!\n");
1360         up(&g_mmz_lock);
1361         return HI_FAILURE;
1362     }
1363 
1364     mmb->iommu_addr = MMB_ADDR_INVALID;
1365     rb_erase(&mmb->s_node, &mmz->root);
1366 
1367     if ((mmb->phy_ref == 0) && (mmb->map_ref == 0) && (mmb->cma_smmu_ref == 0) && (mmb->sec_smmu_ref == 0)) {
1368         hil_mmb_free(mmb);
1369     }
1370     up(&g_mmz_lock);
1371 
1372     return HI_SUCCESS;
1373 }
1374 
1375 
hil_get_meminfo(const hil_mmb_t * mmb)1376 struct sg_table *hil_get_meminfo(const hil_mmb_t *mmb)
1377 {
1378     unsigned long size;
1379     struct sg_table *table = NULL;
1380     struct mmz_iommu *common = &g_mmz_iommu;
1381 
1382     if (mmb == NULL) {
1383         hi_mmz_error("invalid params!\n");
1384         return NULL;
1385     }
1386 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
1387     table = get_pages_from_buffer(common->client, mmb->handle, &size);
1388 #else
1389     table = hi_dma_buf_sgt(mmb->handle);
1390 #endif
1391     if (table == NULL) {
1392         hi_mmz_warn("get pages failed!\n");
1393         return NULL;
1394     }
1395     return table;
1396 }
1397 
1398 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
sec_mmb_get(u32 addr,int iommu,u32 sec_smmu)1399 int sec_mmb_get(u32 addr, int iommu, u32 sec_smmu)
1400 {
1401     hil_mmb_t *mmb;
1402 
1403     mmb = hil_mmb_getby_phys((HI_U32)addr, (HI_U32)iommu);
1404     if (mmb == NULL) {
1405         hi_mmz_warn("err args!\n");
1406         return HI_FAILURE;
1407     }
1408     down(&g_mmz_lock);
1409 
1410     if (!mmb->sec_smmu_ref) {
1411         mmb->sec_smmu = sec_smmu;
1412     }
1413 
1414     mmb->sec_smmu_ref++;
1415     mmb->phy_ref++;
1416 
1417     up(&g_mmz_lock);
1418     return HI_SUCCESS;
1419 }
1420 
sec_mmb_put(u32 addr,int iommu)1421 int sec_mmb_put(u32 addr, int iommu)
1422 {
1423     hil_mmb_t *mmb = NULL;
1424     int ref;
1425 
1426     mmb = hil_mmb_getby_phys((HI_U32)addr, (HI_U32)iommu);
1427     if (mmb == NULL) {
1428         hi_mmz_warn("err args, addr:0x%x iommu:0x%x!\n", addr, iommu);
1429         return HI_FAILURE;
1430     }
1431     down(&g_mmz_lock);
1432 
1433     if (!mmb->sec_smmu_ref || !mmb->phy_ref) {
1434         hi_mmz_warn("wrong operation.mmb->sec_smmu_ref :%d mmb->phy_ref: %d \n",
1435                     mmb->sec_smmu_ref, mmb->phy_ref);
1436         up(&g_mmz_lock);
1437         return HI_FAILURE;
1438     }
1439 
1440     mmb->sec_smmu_ref--;
1441     mmb->phy_ref--;
1442     if (!mmb->sec_smmu_ref) {
1443         mmb->sec_smmu = MMB_ADDR_INVALID;
1444     }
1445 
1446     ref = mmb->sec_smmu_ref;
1447     if ((mmb->phy_ref == 0) && (mmb->map_ref == 0) && (mmb->cma_smmu_ref == 0) && (mmb->sec_smmu_ref == 0)) {
1448         hil_mmb_free(mmb);
1449     }
1450 
1451     up(&g_mmz_lock);
1452     return ref;
1453 }
1454 
sec_mmb_query_ref(HI_U32 sec_addr,int iommu,HI_U32 * ref)1455 int sec_mmb_query_ref(HI_U32 sec_addr, int iommu, HI_U32 *ref)
1456 {
1457     hil_mmb_t *mmb = NULL;
1458 
1459     if (ref == NULL) {
1460         hi_mmz_warn("ref should not be null!\n");
1461         return HI_FAILURE;
1462     }
1463 
1464     mmb = hil_mmb_getby_sec_addr(sec_addr, iommu);
1465     if (mmb == NULL) {
1466         hi_mmz_warn("err args!\n");
1467         return HI_FAILURE;
1468     }
1469     down(&g_mmz_lock);
1470     *ref = mmb->sec_smmu_ref;
1471     up(&g_mmz_lock);
1472 
1473     return HI_SUCCESS;
1474 }
1475 
set_sec_mmb_flag(u32 addr,int iommu)1476 int set_sec_mmb_flag(u32 addr, int iommu)
1477 {
1478     hil_mmb_t *mmb;
1479 
1480     mmb = hil_mmb_getby_phys((HI_U32)addr, (HI_U32)iommu);
1481     if (mmb == NULL) {
1482         hi_mmz_warn("err args!\n");
1483         return HI_FAILURE;
1484     }
1485     down(&g_mmz_lock);
1486     mmb->sec_flag = 1;
1487     up(&g_mmz_lock);
1488     return HI_SUCCESS;
1489 }
1490 
clr_sec_mmb_flag(u32 addr,int iommu)1491 int clr_sec_mmb_flag(u32 addr, int iommu)
1492 {
1493     hil_mmb_t *mmb;
1494 
1495     mmb = hil_mmb_getby_phys((HI_U32)addr, (HI_U32)iommu);
1496     if (mmb == NULL) {
1497         hi_mmz_warn("err args!\n");
1498         return HI_FAILURE;
1499     }
1500     down(&g_mmz_lock);
1501     mmb->sec_flag = 0;
1502     up(&g_mmz_lock);
1503     return HI_SUCCESS;
1504 }
1505 
1506 /* return 0 : normal mem ; return 1 :sec mem */
is_sec_mem(u32 addr,int iommu)1507 int is_sec_mem(u32 addr, int iommu)
1508 {
1509     hil_mmb_t *mmb;
1510     int ret;
1511 
1512     mmb = hil_mmb_getby_phys((HI_U32)addr, (HI_U32)iommu);
1513     if (mmb == NULL) {
1514         hi_mmz_warn("err args!\n");
1515         return HI_FAILURE;
1516     }
1517 
1518     down(&g_mmz_lock);
1519     ret = mmb->sec_flag;
1520     up(&g_mmz_lock);
1521 
1522     return ret;
1523 }
1524 
sec_delay_release_for_mem(u32 addr,int iommu)1525 int sec_delay_release_for_mem(u32 addr, int iommu)
1526 {
1527     hil_mmb_t *mmb;
1528 
1529     mmb = hil_mmb_getby_phys((HI_U32)addr, (HI_U32)iommu);
1530     if (mmb == NULL) {
1531         hi_mmz_warn("err args!\n");
1532         return HI_FAILURE;
1533     }
1534 
1535     down(&g_mmz_lock);
1536     if ((mmb->phy_ref == 0) && (mmb->map_ref == 0) && (mmb->cma_smmu_ref == 0) && (mmb->sec_smmu_ref == 0)) {
1537         hil_mmb_free(mmb);
1538     }
1539     up(&g_mmz_lock);
1540 
1541     return 0;
1542 }
1543 #endif
1544 
mmb_get(u32 addr,int iommu)1545 int mmb_get(u32 addr, int iommu)
1546 {
1547     hil_mmb_t *mmb;
1548 
1549     mmb = hil_mmb_getby_phys((HI_U32)addr, (HI_U32)iommu);
1550     if (mmb == NULL) {
1551         hi_mmz_warn("err args!\n");
1552         return HI_FAILURE;
1553     }
1554 
1555     down(&g_mmz_lock);
1556     mmb->phy_ref++;
1557     up(&g_mmz_lock);
1558 
1559     return HI_SUCCESS;
1560 }
1561 
mmb_put(u32 addr,int iommu)1562 int mmb_put(u32 addr, int iommu)
1563 {
1564     hil_mmb_t *mmb;
1565 
1566     mmb = hil_mmb_getby_phys((HI_U32)addr, (HI_U32)iommu);
1567     if (mmb == NULL) {
1568         hi_mmz_warn("err args!\n");
1569         return HI_FAILURE;
1570     }
1571 
1572     down(&g_mmz_lock);
1573     if (mmb->phy_ref > 0) {
1574         mmb->phy_ref--;
1575     } else {
1576         hi_mmz_warn("phy_ref is wrong:ref:%d\n", mmb->phy_ref);
1577         up(&g_mmz_lock);
1578         return HI_FAILURE;
1579     }
1580 
1581     if (!mmb->phy_ref && !mmb->map_ref && !mmb->cma_smmu_ref && !mmb->sec_smmu_ref) {
1582         hil_mmb_free(mmb);
1583     }
1584 
1585     up(&g_mmz_lock);
1586     return HI_SUCCESS;
1587 }
1588 
mmb_ref_query(u32 addr,int iommu,u32 * ref)1589 int mmb_ref_query(u32 addr, int iommu, u32 *ref)
1590 {
1591     hil_mmb_t *mmb = NULL;
1592 
1593     if (ref == NULL) {
1594         hi_mmz_warn("ref should not be null!\n");
1595         return HI_FAILURE;
1596     }
1597 
1598     mmb = hil_mmb_getby_phys((HI_U32)addr, (HI_U32)iommu);
1599     if (mmb == NULL) {
1600         hi_mmz_warn("err args!\n");
1601         return HI_FAILURE;
1602     }
1603     down(&g_mmz_lock);
1604     *ref = mmb->phy_ref;
1605     up(&g_mmz_lock);
1606 
1607     return HI_SUCCESS;
1608 }
1609 
mem_source_query(u32 iommu_addr,int * source)1610 int mem_source_query(u32 iommu_addr, int *source)
1611 {
1612     phys_addr_t phys;
1613     hil_mmb_t *mmb = NULL;
1614     hil_mmz_t *zone = NULL;
1615     hil_mmb_t *m = NULL;
1616 
1617     if (source == NULL || !iommu_addr) {
1618         hi_mmz_warn("source or iommu_addr should not be null!\n");
1619         return HI_FAILURE;
1620     }
1621 
1622     phys = hisi_iommu_domain_iova_to_phys(iommu_addr);
1623     if (!phys) {
1624         /* iommu_addr is illegal */
1625         *source = -1;
1626         goto out;
1627     }
1628 
1629     down(&g_mmz_lock);
1630     list_for_each_entry(zone, &g_mmz_list, list) {
1631         struct rb_node *n = NULL;
1632 
1633         if (zone->iommu != 1) {
1634             continue;
1635         }
1636 
1637         for (n = rb_first(&zone->root); n; n = rb_next(n)) {
1638             m = NULL;
1639             m = rb_entry(n, hil_mmb_t, s_node);
1640             if ((m->iommu_addr <= iommu_addr) && (iommu_addr < (m->iommu_addr + m->length))) {
1641                 mmb = m;
1642                 break;
1643             }
1644         }
1645     }
1646     if (mmb == NULL) {
1647         *source = 1; /* the iommu_addr from other source such ion */
1648     } else {
1649         *source = 0; /* the iommu_addr from mmz driver   */
1650     }
1651 
1652     up(&g_mmz_lock);
1653 
1654 out:
1655     return HI_SUCCESS;
1656 }
sec_mem_source_query(u32 sec_iommu,int * source)1657 int sec_mem_source_query(u32 sec_iommu, int *source)
1658 {
1659     hil_mmb_t *mmb = NULL;
1660     hil_mmz_t *zone = NULL;
1661 
1662     if (source == NULL) {
1663         return HI_FAILURE;
1664     }
1665     if (!sec_iommu) {
1666         hi_mmz_warn("sec_iommu:0x%x err args\n", sec_iommu);
1667         *source = -1;
1668         return HI_FAILURE;
1669     }
1670 
1671     down(&g_mmz_lock);
1672     list_for_each_entry(zone, &g_mmz_list, list) {
1673         struct rb_node *n;
1674         for (n = rb_first(&zone->root); n; n = rb_next(n)) {
1675             hil_mmb_t *m = NULL;
1676 
1677             if (zone->iommu) {
1678                 m = rb_entry(n, hil_mmb_t, s_node);
1679             } else {
1680                 m = rb_entry(n, hil_mmb_t, node);
1681             }
1682 
1683             if ((m->sec_smmu <= sec_iommu) && (sec_iommu < (m->sec_smmu + m->length))) {
1684                 mmb = m;
1685                 goto end;
1686             } else {
1687                 continue;
1688             }
1689         }
1690     }
1691 end:
1692     up(&g_mmz_lock);
1693 
1694     if (mmb == NULL) {
1695         *source = 1; /* the sec_smmu point buffer don't from mmz driver */
1696     } else {
1697         *source = 0; /* the sec_smmu  point buffer from mmz driver   */
1698     }
1699 
1700     return HI_SUCCESS;
1701 }
1702 
hil_mmz_find(HI_U32 gfp,const char * mmz_name)1703 static hil_mmz_t *hil_mmz_find(HI_U32 gfp, const char *mmz_name)
1704 {
1705     hil_mmz_t *mmz = NULL;
1706 
1707     down(&g_mmz_lock);
1708     mmz = list_for_each_mmz(gfp, mmz_name, MMZ_NODE_FIRST_MATCHED);
1709     up(&g_mmz_lock);
1710 
1711     return mmz;
1712 }
1713 
1714 /*
1715  * name,gfp,phys_start,nbytes,alloc_type;
1716  * All param in hex mode, except name.
1717  */
media_mem_parse_cmdline(char * s)1718 static int media_mem_parse_cmdline(char *s)
1719 {
1720     hil_mmz_t *zone = NULL;
1721     char *line = NULL;
1722 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
1723     struct ion_platform_heap *heap = NULL;
1724 #else
1725     struct dma_heap *heap;
1726     struct cma *cma;
1727 #endif
1728     int ret;
1729 
1730     line = strsep(&s, ":");
1731     while (line != NULL) {
1732         int i;
1733         char *argv[6]; /* 6 表示保存参数的字符串长度 */
1734 
1735         /*
1736          * We got 4 args in "line", formatted "argv[0], argv[1], argv[2], argv[3], argv[4]".
1737          * eg: "<mmz_name>, <gfp>, <phys_start_addr>, <size>, <alloc_type>"
1738          * For more convenient, "hard code" are used such as "arg[0]", i.e.
1739          */
1740         for (i = 0; (argv[i] = strsep(&line, ",")) != NULL;) {
1741             if (++i == ARRAY_SIZE(argv)) {
1742                 break;
1743             }
1744         }
1745 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
1746         heap = hisi_get_cma_heap(argv[0]);
1747         if (heap == NULL) {
1748             hi_mmz_warn("can't get cma zone info:%s\n", argv[0]);
1749             continue;
1750         }
1751 #else
1752         heap = get_heap_by_name(CMA_HEAP_NAME);
1753         cma = dev_get_cma_area(NULL);
1754         if (heap == NULL || cma == NULL) {
1755             hi_mmz_warn("can't get cma zone info:%s\n", argv[0]);
1756             continue;
1757         }
1758 #endif
1759 
1760         if (i == 4) { /* 4 表示第5个参数 */
1761             zone = hil_mmz_create("null", 0, 0, 0);
1762             if (zone == NULL) {
1763                 continue;
1764             }
1765             ret = strncpy_s(zone->name, HIL_MAX_NAME_LEN, argv[0], (HIL_MAX_NAME_LEN - 1));
1766             if (ret != EOK) {
1767                 hi_mmz_error("strncpy_s fail\n");
1768                 hil_mmz_destroy(zone);
1769                 return HI_FAILURE;
1770             }
1771 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
1772             zone->gfp = 0;
1773             zone->zone_start = heap->base;
1774             zone->nbytes = heap->size;
1775             zone->heap_id_mask = 1 << (heap->id);
1776 #else
1777             zone->gfp = 0;
1778             zone->zone_start = cma_get_base(cma);
1779             zone->nbytes = cma_get_size(cma);
1780             zone->heap = heap;
1781 #endif
1782             /* all cma zones share one root,all mmb from cma zones are added to this root. */
1783             zone->root = RB_ROOT;
1784             zone->iommu = 0;
1785         } else {
1786             hi_mmz_error("MMZ: your parameter num is not correct!\n");
1787             continue;
1788         }
1789 
1790         if (hil_mmz_register(zone)) {
1791             hi_mmz_warn("Add MMZ failed: " HIL_MMZ_FMT "\n", hil_mmz_fmt_arg(zone));
1792             hil_mmz_destroy(zone);
1793         }
1794 
1795         zone = NULL;
1796     }
1797     return 0;
1798 }
1799 
1800 #define MAX_MMZ_INFO_LEN (64 * 1024)
1801 
1802 #if !(HI_PROC_SUPPORT == 0)
mmz_proc_head(struct seq_file * m,const hil_mmz_t * p,const int zone_cnt)1803 static void mmz_proc_head(struct seq_file *m, const hil_mmz_t *p, const int zone_cnt)
1804 {
1805     struct rb_node *n = NULL;
1806     char *mem_type = NULL;
1807     char *smmu_name = "SMMU";
1808     char *phy_name = "DDR";
1809     unsigned int number = 0;
1810 
1811     if (p->iommu) {
1812         mem_type = smmu_name;
1813     } else {
1814         mem_type = phy_name;
1815     }
1816 
1817     seq_puts(m, SPLIT_LINE);
1818     seq_printf(m, "|                   %s           |  ID  | ZONE  |  KVIRT  |  FLAGS  |"
1819                "  LENGTH(KB)  |       NAME        |\n", mem_type);
1820     seq_puts(m, SPLIT_LINE);
1821 
1822     for (n = rb_first(&p->root); n; n = rb_next(n)) {
1823         number++;
1824     }
1825 
1826     seq_printf(m, "|ZONE[%d]: (0x%08x, 0x%08x)   %d         %d        0x%08x      %-10u   \"%s%-14s|\n",
1827                zone_cnt, p->zone_start, p->zone_start + p->nbytes - 1, number, p->iommu, p->gfp,
1828                p->nbytes / SZ_1K, p->name, "\"");
1829 }
1830 
mmz_proc_zone(struct seq_file * m,const hil_mmz_t * p,unsigned int * used_size,int * block_number)1831 static int mmz_proc_zone(struct seq_file *m, const hil_mmz_t *p, unsigned int *used_size, int *block_number)
1832 {
1833     hil_mmb_t *mmb = NULL;
1834     struct rb_node *n = NULL;
1835     unsigned int used_size_tmp = 0;
1836     int block_number_tmp = 0;
1837 
1838     for (n = rb_first(&p->root); n; n = rb_next(n)) {
1839         if (p->iommu) {
1840             mmb = rb_entry(n, hil_mmb_t, s_node);
1841             if (!mmb->iommu) {
1842                 continue;
1843             }
1844         } else {
1845             mmb = rb_entry(n, hil_mmb_t, node);
1846         }
1847         seq_printf(m, "|" HIL_MMB_FMT "|\n", hil_mmb_fmt_arg(mmb));
1848         if (m->count > MAX_MMZ_INFO_LEN) {
1849             hi_mmz_error("mmz_info_buff overflow(0x%x), more than 20k data!\n", (unsigned int)m->count);
1850             return -1;  /* -1, proc error */
1851         }
1852         used_size_tmp += mmb->length / 1024; /* 1024 表示1MB = 1024Bytes */
1853         block_number_tmp++;
1854     }
1855 
1856     *used_size += used_size_tmp;
1857     *block_number += block_number_tmp;
1858 
1859     return 0;  /* 0, proc success */
1860 }
1861 
mmz_proc_statistics(struct seq_file * m,const hil_mmz_t * p,const int zone_cnt,int block_number,const unsigned int used_size)1862 static void mmz_proc_statistics(struct seq_file *m, const hil_mmz_t *p, const int zone_cnt,
1863                                 int block_number, const unsigned int used_size)
1864 {
1865     int total_size = p->nbytes / 1024; /* 1024 表示1MB = 1024Bytes */
1866     unsigned int free_size = total_size - used_size;
1867 
1868     seq_puts(m, SPLIT_LINE);
1869     seq_printf(m, "|%-102s|\n", "Summary:");
1870     seq_puts(m, SPLIT_LINE);
1871     if (p->iommu) {
1872         seq_puts(m, "|  SMMU Total Size  |    Iommu Used     |     Idle     |  Zone Number  |"
1873                  "   BLock Number                 |\n");
1874     } else {
1875         seq_puts(m, "|  MMZ Total Size  |     CMA Used      |     Idle     |  Zone Number  |"
1876                  "   BLock Number                 |\n");
1877     }
1878 
1879     seq_puts(m, SPLIT_LINE);
1880     seq_printf(m, "|       %d%-8s       %d%-8s          %d%-8s         %d              %d                |\n",
1881                total_size / 1024, "MB", used_size / 1024, "MB", /* 1024 表示1MB = 1024Bytes */
1882                free_size / 1024, "MB", zone_cnt, block_number); /* 1024 表示1MB = 1024Bytes */
1883     seq_puts(m, SPLIT_LINE);
1884     seq_puts(m, "\n");
1885 }
1886 #endif
1887 
mmz_read_proc(struct seq_file * m,void * v)1888 int mmz_read_proc(struct seq_file *m, void *v)
1889 {
1890 #if !(HI_PROC_SUPPORT == 0)
1891     int zone_cnt = 0;
1892     int block_number;
1893     hil_mmz_t *p = NULL;
1894     unsigned int used_size;
1895     int ret;
1896 
1897     if (m == NULL) {
1898         return HI_FAILURE;
1899     }
1900     down(&g_mmz_lock);
1901 
1902     /* Collect all mmb info into mmz_info_buff */
1903     list_for_each_entry(p, &g_mmz_list, list) {
1904 #ifndef HI_SMMU_SUPPORT
1905         if (p->iommu) {
1906             continue;
1907         }
1908 #endif
1909 
1910         mmz_proc_head(m, p, zone_cnt);
1911         if (m->count > MAX_MMZ_INFO_LEN) {
1912             hi_mmz_error("mmz_info_buff overflow(0x%x), more than 20k data!\n", (unsigned int)m->count);
1913             break;
1914         }
1915 
1916         block_number = 0;
1917         used_size = 0;
1918         ret = mmz_proc_zone(m, p, &used_size, &block_number);
1919         if (ret) {
1920             hi_mmz_error("mmz_proc_zone failed!\n");
1921             break;
1922         }
1923 
1924         mmz_proc_statistics(m, p, zone_cnt, block_number, used_size);
1925         if (m->count > MAX_MMZ_INFO_LEN) {
1926             hi_mmz_error("mmz_info_buff overflow(0x%x), more than 20k data!\n", (unsigned int)m->count);
1927             break;
1928         }
1929         zone_cnt++;
1930     }
1931 
1932     up(&g_mmz_lock);
1933 
1934 #endif
1935     return 0;
1936 }
1937 
1938 #define MMZ_SETUP_CMDLINE_LEN 256
1939 static char __initdata setup_zones[MMZ_SETUP_CMDLINE_LEN] = "ddr,0,0,160M";
1940 
mmz_exit_check(void)1941 static void mmz_exit_check(void)
1942 {
1943     hil_mmz_t *p = NULL;
1944 
1945     mmz_trace_func();
1946     for (p = hil_mmz_find(0, NULL); p != NULL; p = hil_mmz_find(0, NULL)) {
1947         hil_mmz_unregister(p);
1948     }
1949 }
1950 
mmz_zone_init(void)1951 static int mmz_zone_init(void)
1952 {
1953     char *s = NULL;
1954     char *p = NULL;
1955     char *q = NULL;
1956     int ret;
1957 
1958     ret = strncpy_s(g_line, COMMAND_LINE_SIZE, saved_command_line, COMMAND_LINE_SIZE - 1);
1959     if (ret != EOK) {
1960         hi_mmz_error("strncpy_s fail\n");
1961         return HI_FAILURE;
1962     }
1963 
1964     q = strstr(g_line, "mmz=");
1965     if (q != NULL) {
1966         s = strsep(&q, "=");
1967         if (s != NULL) {
1968             p = strsep(&q, " ");
1969         }
1970         if (p != NULL) {
1971             ret = strncpy_s(setup_zones, MMZ_SETUP_CMDLINE_LEN, p, (MMZ_SETUP_CMDLINE_LEN - 1));
1972             if (ret != EOK) {
1973                 hi_mmz_error("strncpy_s fail\n");
1974                 return HI_FAILURE;
1975             }
1976         }
1977     }
1978 
1979     ret = media_mem_parse_cmdline(setup_zones);
1980 
1981     return ret;
1982 }
1983 
1984 #ifdef HI_SMMU_SUPPORT
iommu_zone_init(void)1985 static int iommu_zone_init(void)
1986 {
1987     hil_mmz_t *zone = NULL;
1988     struct iommu_zone *iommu_zone;
1989 #if LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)
1990     struct dma_heap *heap;
1991 #endif
1992     int ret;
1993 
1994     iommu_zone = hisi_get_iommu_zone();
1995     if (iommu_zone == NULL) {
1996         hi_mmz_warn("iommu zone can not be used!\n");
1997         return HI_FAILURE;
1998     }
1999 
2000     zone = hil_mmz_create("null", 0, 0, 0);
2001     if (zone == NULL) {
2002         hi_mmz_warn("iommu zone created failed,iommu zone may not be used!\n");
2003         return HI_FAILURE;
2004     }
2005     ret = strcpy_s(zone->name, HIL_MAX_NAME_LEN, "iommu");
2006     if (ret != EOK) {
2007         hi_mmz_error("strncpy_s fail\n");
2008         hil_mmz_destroy(zone);
2009         return HI_FAILURE;
2010     }
2011     zone->gfp = 0;
2012     zone->zone_start = iommu_zone->iova_start;
2013     zone->nbytes = iommu_zone->iova_end - iommu_zone->iova_start + 1;
2014 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
2015     zone->heap_id_mask = ION_HEAP_SYSTEM_MASK;
2016 #else
2017     heap = get_heap_by_name(SYSTEM_HEAP_NAME);
2018     if (heap == NULL) {
2019         hi_mmz_error("get dma heap failed!\n");
2020         hil_mmz_destroy(zone);
2021         return HI_FAILURE;
2022     }
2023 
2024     zone->heap = heap;
2025 #endif
2026     zone->root = RB_ROOT;
2027     zone->iommu = 1;
2028     if (hil_mmz_register(zone)) {
2029         hi_mmz_warn("Add MMZ failed: " HIL_MMZ_FMT "\n", hil_mmz_fmt_arg(zone));
2030         hil_mmz_destroy(zone);
2031     }
2032 
2033     return HI_SUCCESS;
2034 }
2035 #endif // HI_SMMU_SUPPORT
2036 
2037 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
client_init(void)2038 static int client_init(void)
2039 {
2040     struct mmz_iommu *common = &g_mmz_iommu;
2041 
2042     common->client = hisi_ion_client_create("cma-iommu");
2043     if (common->client == NULL) {
2044         hi_mmz_warn("ion client is created failed!\n");
2045         return HI_FAILURE;
2046     }
2047 
2048     return HI_SUCCESS;
2049 }
2050 #endif
2051 
2052 static int g_init_done;
hi_drv_mmz_init(void)2053 int hi_drv_mmz_init(void)
2054 {
2055     int ret;
2056 
2057     if (g_init_done) {
2058         return 0;
2059     }
2060 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
2061     ret = client_init();
2062     if (ret) {
2063         hi_mmz_warn("client init is failed!\n");
2064         return HI_FAILURE;
2065     }
2066 #endif
2067 
2068     ret = mmz_zone_init();
2069     if (ret) {
2070         hi_mmz_warn("mmz zone init is failed!\n");
2071         return HI_FAILURE;
2072     }
2073 
2074 #ifdef HI_SMMU_SUPPORT
2075     ret = iommu_zone_init();
2076     if (ret) {
2077         hi_mmz_warn("iommu zone init is failed!\n");
2078         return HI_FAILURE;
2079     }
2080 #endif
2081 
2082     g_init_done = 1;
2083     return HI_SUCCESS;
2084 }
2085 
2086 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
hi_drv_mmz_exit(void)2087 void hi_drv_mmz_exit(void)
2088 {
2089     struct mmz_iommu *common = &g_mmz_iommu;
2090 
2091     mmz_exit_check();
2092     g_init_done = 0;
2093     hisi_ion_client_destroy(common->client);
2094 
2095     return;
2096 }
2097 #else
hi_drv_mmz_exit(void)2098 void hi_drv_mmz_exit(void)
2099 {
2100     mmz_exit_check();
2101     g_init_done = 0;
2102 
2103     return;
2104 }
2105 #endif
2106 
2107 EXPORT_SYMBOL(hi_drv_mmz_init);
2108 EXPORT_SYMBOL(hi_drv_mmz_exit);
2109