• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18 
19 #include <generated/autoconf.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/mm.h>
26 #include <linux/mman.h>
27 #include <linux/miscdevice.h>
28 #include <linux/proc_fs.h>
29 #include <linux/device.h>
30 #include <linux/fs.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/delay.h>
34 #include <linux/version.h>
35 #include <asm/uaccess.h>
36 #include <asm/io.h>
37 #include <linux/interrupt.h>
38 #include <linux/ioport.h>
39 #include <linux/spinlock.h>
40 #include <linux/vmalloc.h>
41 #include <asm/cacheflush.h>
42 #ifndef CONFIG_64BIT
43 #include <linux/highmem.h>
44 #include <asm/pgtable.h>
45 #endif
46 #include <linux/seq_file.h>
47 #include <linux/string.h>
48 #include <linux/list.h>
49 #include <linux/time.h>
50 #include <linux/dma-mapping.h>
51 #include "securec.h"
52 #include "hi_osal.h"
53 #include "allocator.h"
54 
55 
56 OSAL_LIST_HEAD(g_mmz_list);
57 OSAL_LIST_HEAD(g_map_mmz_list);
58 
59 int anony = 0;
60 static DEFINE_SEMAPHORE(g_mmz_lock);
61 
62 module_param(anony, int, S_IRUGO);
63 
64 #define MMZ_SETUP_CMDLINE_LEN     256
65 #define MMZ_ALLOCATOR_NAME_LEN    32
66 
67 #ifndef MODULE
68 
69 static char __initdata g_setup_zones[MMZ_SETUP_CMDLINE_LEN] = CONFIG_HISILICON_MMZ_DEFAULT;
parse_kern_cmdline(char * line)70 static int __init parse_kern_cmdline(char *line)
71 {
72     if (strncpy_s(g_setup_zones, sizeof(g_setup_zones), line, sizeof(g_setup_zones) - 1) != EOK) {
73         osal_trace("%s - strncpy_s failed!\n", __FUNCTION__);
74         return -1;
75     }
76     return 1;
77 }
78 __setup("mmz=", parse_kern_cmdline);
79 
80 static char __initdata g_setup_allocator[MMZ_ALLOCATOR_NAME_LEN];
parse_kern_allocator(char * line)81 static int __init parse_kern_allocator(char *line)
82 {
83     if (strncpy_s(g_setup_allocator, sizeof(g_setup_allocator), line, sizeof(g_setup_allocator) - 1) != EOK) {
84         osal_trace("%s - strncpy_s failed!\n", __FUNCTION__);
85         return -1;
86     }
87     return 1;
88 }
89 __setup("mmz_allocator=", parse_kern_allocator);
90 
91 static char __initdata g_mmap_zones[MMZ_SETUP_CMDLINE_LEN] = '\0';
92 __setup("map_mmz=", parse_kern_cmdline);
93 
94 #else
95 static char g_setup_zones[MMZ_SETUP_CMDLINE_LEN] = {'\0'};
96 static char g_mmap_zones[MMZ_SETUP_CMDLINE_LEN] = {'\0'};
97 static char g_setup_allocator[MMZ_ALLOCATOR_NAME_LEN] = "hisi"; /* default setting */
98 module_param_string(mmz, g_setup_zones, MMZ_SETUP_CMDLINE_LEN, 0600);
99 module_param_string(map_mmz, g_mmap_zones, MMZ_SETUP_CMDLINE_LEN, 0600);
100 module_param_string(mmz_allocator, g_setup_allocator, MMZ_ALLOCATOR_NAME_LEN, 0600);
101 MODULE_PARM_DESC(mmz, "mmz_allocator=allocator mmz=name,0,start,size,type,eqsize:[others] map_mmz=start,size:[others]");
102 #endif
103 
104 static struct mmz_allocator g_the_allocator;
105 
hil_mmz_create(const char * name,unsigned long gfp,unsigned long phys_start,unsigned long nbytes)106 hil_mmz_t *hil_mmz_create(const char *name,
107                           unsigned long gfp,
108                           unsigned long phys_start,
109                           unsigned long nbytes)
110 {
111     hil_mmz_t *p = NULL;
112 
113     mmz_trace_func();
114 
115     if (name == NULL) {
116         osal_trace(KERN_ERR "%s: 'name' should not be NULL!", __FUNCTION__);
117         return NULL;
118     }
119 
120     p = kmalloc(sizeof(hil_mmz_t) + 1, GFP_KERNEL);
121     if (p == NULL) {
122         osal_trace(KERN_ERR "%s: System OOM!\n", __func__);
123         return NULL;
124     }
125 
126     (void)memset_s(p, sizeof(hil_mmz_t) + 1, 0, sizeof(hil_mmz_t) + 1);
127     if (strncpy_s(p->name, HIL_MMZ_NAME_LEN, name, HIL_MMZ_NAME_LEN - 1) != EOK) {
128         osal_trace("%s - strncpy_s failed!\n", __FUNCTION__);
129         kfree(p);
130         return NULL;
131     }
132     p->gfp = gfp;
133     p->phys_start = phys_start;
134     p->nbytes = nbytes;
135 
136     OSAL_INIT_LIST_HEAD(&p->list);
137     OSAL_INIT_LIST_HEAD(&p->mmb_list);
138 
139     p->destructor = kfree;
140 
141     return p;
142 }
143 EXPORT_SYMBOL(hil_mmz_create);
144 
hil_mmz_create_v2(const char * name,unsigned long gfp,unsigned long phys_start,unsigned long nbytes,unsigned int alloc_type,unsigned long block_align)145 hil_mmz_t *hil_mmz_create_v2(const char *name,
146                              unsigned long gfp,
147                              unsigned long phys_start,
148                              unsigned long nbytes,
149                              unsigned int alloc_type,
150                              unsigned long block_align)
151 {
152     hil_mmz_t *p = NULL;
153 
154     mmz_trace_func();
155 
156     if (name == NULL) {
157         osal_trace(KERN_ERR "%s: 'name' can not be zero!", __FUNCTION__);
158         return NULL;
159     }
160 
161     p = kmalloc(sizeof(hil_mmz_t), GFP_KERNEL);
162     if (p == NULL) {
163         return NULL;
164     }
165 
166     (void)memset_s(p, sizeof(hil_mmz_t), 0, sizeof(hil_mmz_t));
167     if (strncpy_s(p->name, HIL_MMZ_NAME_LEN, name, HIL_MMZ_NAME_LEN - 1) != EOK) {
168         osal_trace("%s - strncpy_s failed!\n", __FUNCTION__);
169         kfree(p);
170         return NULL;
171     }
172     p->gfp = gfp;
173     p->phys_start = phys_start;
174     p->nbytes = nbytes;
175     p->alloc_type = alloc_type;
176     p->block_align = block_align;
177 
178     OSAL_INIT_LIST_HEAD(&p->list);
179     OSAL_INIT_LIST_HEAD(&p->mmb_list);
180 
181     p->destructor = kfree;
182 
183     return p;
184 }
185 
hil_mmz_destroy(hil_mmz_t * zone)186 int hil_mmz_destroy(hil_mmz_t *zone)
187 {
188     if (zone == NULL) {
189         return -1;
190     }
191 
192     if (zone->destructor) {
193         zone->destructor(zone);
194     }
195     return 0;
196 }
197 EXPORT_SYMBOL(hil_mmz_destroy);
198 
_check_mmz(hil_mmz_t * zone)199 static int _check_mmz(hil_mmz_t *zone)
200 {
201     hil_mmz_t *p = NULL;
202 
203     unsigned long new_start = zone->phys_start;
204     unsigned long new_end = zone->phys_start + zone->nbytes;
205 
206     if (zone->nbytes == 0) {
207         return -1;
208     }
209 
210     if (!((new_start >= __pa((uintptr_t)high_memory)) ||
211         ((new_start < (unsigned long)PHYS_OFFSET) && (new_end <= (unsigned long)PHYS_OFFSET)))) {
212         osal_trace(KERN_ERR "ERROR: Conflict MMZ:\n");
213         osal_trace(KERN_ERR HIL_MMZ_FMT_S "\n", hil_mmz_fmt_arg(zone));
214         osal_trace(KERN_ERR "MMZ conflict to kernel memory (0x%08lX, 0x%08lX)\n",
215                (long unsigned int)PHYS_OFFSET,
216                (long unsigned int)(__pa((uintptr_t)high_memory) - 1));
217         return -1;
218     }
219 
220     osal_list_for_each_entry(p, &g_mmz_list, list) {
221         unsigned long start, end;
222         start = p->phys_start;
223         end   = p->phys_start + p->nbytes;
224         if (new_start >= end) {
225             continue;
226         } else if (new_start < start && new_end <= start) {
227             continue;
228         } else {
229         }
230 
231         osal_trace(KERN_ERR "ERROR: Conflict MMZ:\n");
232         osal_trace(KERN_ERR "MMZ new:   " HIL_MMZ_FMT_S "\n", hil_mmz_fmt_arg(zone));
233         osal_trace(KERN_ERR "MMZ exist: " HIL_MMZ_FMT_S "\n", hil_mmz_fmt_arg(p));
234         osal_trace(KERN_ERR "Add new MMZ failed!\n");
235         return -1;
236     }
237 
238     return 0;
239 }
240 
hil_mmz_register(hil_mmz_t * zone)241 int hil_mmz_register(hil_mmz_t *zone)
242 {
243     int ret;
244 
245     mmz_trace(1, HIL_MMZ_FMT_S, hil_mmz_fmt_arg(zone));
246 
247     if (zone == NULL) {
248         return -1;
249     }
250 
251     down(&g_mmz_lock);
252 
253     if (strcmp(g_setup_allocator, "hisi") == 0) {
254         ret = _check_mmz(zone);
255         if (ret) {
256             up(&g_mmz_lock);
257             return ret;
258         }
259     }
260 
261     OSAL_INIT_LIST_HEAD(&zone->mmb_list);
262 
263     osal_list_add(&zone->list, &g_mmz_list);
264 
265     up(&g_mmz_lock);
266 
267     return 0;
268 }
269 
hil_mmz_unregister(hil_mmz_t * zone)270 int hil_mmz_unregister(hil_mmz_t *zone)
271 {
272     int losts = 0;
273     hil_mmb_t *p = NULL;
274 
275     if (zone == NULL) {
276         return -1;
277     }
278 
279     mmz_trace_func();
280 
281     down(&g_mmz_lock);
282     osal_list_for_each_entry(p, &zone->mmb_list, list) {
283         osal_trace(KERN_WARNING "MB Lost: " HIL_MMB_FMT_S "\n",
284                hil_mmb_fmt_arg(p));
285         losts++;
286     }
287 
288     if (losts) {
289         osal_trace(KERN_ERR "%d mmbs not free, mmz<%s> can not be unregistered!\n",
290                losts, zone->name);
291         up(&g_mmz_lock);
292         return -1;
293     }
294 
295     osal_list_del(&zone->list);
296     up(&g_mmz_lock);
297 
298     return 0;
299 }
300 
hil_mmb_alloc(const char * name,unsigned long size,unsigned long align,unsigned long gfp,const char * mmz_name)301 hil_mmb_t *hil_mmb_alloc(const char *name,
302                          unsigned long size,
303                          unsigned long align,
304                          unsigned long gfp,
305                          const char *mmz_name)
306 {
307     hil_mmb_t *mmb = NULL;
308 
309     down(&g_mmz_lock);
310     if (g_the_allocator.mmb_alloc != NULL) {
311         mmb = g_the_allocator.mmb_alloc(name, size, align, gfp, mmz_name, NULL);
312     }
313     up(&g_mmz_lock);
314 
315     return mmb;
316 }
317 EXPORT_SYMBOL(hil_mmb_alloc);
318 
hil_mmb_alloc_v2(const char * name,unsigned long size,unsigned long align,unsigned long gfp,const char * mmz_name,unsigned int order)319 hil_mmb_t *hil_mmb_alloc_v2(const char *name,
320                             unsigned long size,
321                             unsigned long align,
322                             unsigned long gfp,
323                             const char *mmz_name,
324                             unsigned int order)
325 {
326     hil_mmb_t *mmb = NULL;
327 
328     down(&g_mmz_lock);
329     if (g_the_allocator.mmb_alloc_v2 != NULL) {
330         mmb = g_the_allocator.mmb_alloc_v2(name, size, align,
331                                            gfp, mmz_name, NULL, order);
332     }
333     up(&g_mmz_lock);
334 
335     return mmb;
336 }
337 EXPORT_SYMBOL(hil_mmb_alloc_v2);
338 
hil_mmb_alloc_in(const char * name,unsigned long size,unsigned long align,hil_mmz_t * _user_mmz)339 hil_mmb_t *hil_mmb_alloc_in(const char *name,
340                             unsigned long size,
341                             unsigned long align,
342                             hil_mmz_t *_user_mmz)
343 {
344     hil_mmb_t *mmb = NULL;
345 
346     if (_user_mmz == NULL) {
347         return NULL;
348     }
349 
350     down(&g_mmz_lock);
351     if (g_the_allocator.mmb_alloc != NULL) {
352         mmb = g_the_allocator.mmb_alloc(name, size, align,
353                                         _user_mmz->gfp, _user_mmz->name, _user_mmz);
354     }
355     up(&g_mmz_lock);
356 
357     return mmb;
358 }
359 
hil_mmb_alloc_in_v2(const char * name,unsigned long size,unsigned long align,hil_mmz_t * _user_mmz,unsigned int order)360 hil_mmb_t *hil_mmb_alloc_in_v2(const char *name,
361                                unsigned long size,
362                                unsigned long align,
363                                hil_mmz_t *_user_mmz,
364                                unsigned int order)
365 {
366     hil_mmb_t *mmb = NULL;
367 
368     if (_user_mmz == NULL) {
369         return NULL;
370     }
371 
372     down(&g_mmz_lock);
373     if (g_the_allocator.mmb_alloc_v2 != NULL) {
374         mmb = g_the_allocator.mmb_alloc_v2(name, size, align, _user_mmz->gfp,
375                                            _user_mmz->name, _user_mmz, order);
376     }
377     up(&g_mmz_lock);
378 
379     return mmb;
380 }
381 
hil_mmb_map2kern(hil_mmb_t * mmb)382 void *hil_mmb_map2kern(hil_mmb_t *mmb)
383 {
384     void *p = NULL;
385 
386     if (mmb == NULL) {
387         return NULL;
388     }
389 
390     down(&g_mmz_lock);
391     if (g_the_allocator.mmb_map2kern != NULL) {
392         p = g_the_allocator.mmb_map2kern(mmb, 0);
393     }
394     up(&g_mmz_lock);
395 
396     return p;
397 }
398 EXPORT_SYMBOL(hil_mmb_map2kern);
399 
400 /* mmf: media-memory fragment */
hil_mmf_map2kern_nocache(unsigned long phys,int len)401 void *hil_mmf_map2kern_nocache(unsigned long phys, int len)
402 {
403     void *virt = g_the_allocator.mmf_map(phys, len, 0);
404     if (virt != NULL) {
405         return virt;
406     }
407 
408     return NULL;
409 }
410 EXPORT_SYMBOL(hil_mmf_map2kern_nocache);
411 
hil_mmf_map2kern_cache(unsigned long phys,int len)412 void *hil_mmf_map2kern_cache(unsigned long phys, int len)
413 {
414     void *virt = g_the_allocator.mmf_map(phys, len, 1);
415     if (virt != NULL) {
416         return virt;
417     }
418 
419     return NULL;
420 }
421 EXPORT_SYMBOL(hil_mmf_map2kern_cache);
422 
hil_mmf_unmap(void * virt)423 void hil_mmf_unmap(void *virt)
424 {
425     if (g_the_allocator.mmf_unmap != NULL) {
426         g_the_allocator.mmf_unmap(virt);
427     }
428 }
429 EXPORT_SYMBOL(hil_mmf_unmap);
430 
hil_mmb_map2kern_cached(hil_mmb_t * mmb)431 void *hil_mmb_map2kern_cached(hil_mmb_t *mmb)
432 {
433     void *p = NULL;
434 
435     if (mmb == NULL) {
436         return NULL;
437     }
438 
439     down(&g_mmz_lock);
440     if (g_the_allocator.mmb_map2kern != NULL) {
441         p = g_the_allocator.mmb_map2kern(mmb, 1);
442     }
443     up(&g_mmz_lock);
444 
445     return p;
446 }
447 EXPORT_SYMBOL(hil_mmb_map2kern_cached);
448 
hil_mmb_flush_dcache_byaddr(void * kvirt,unsigned long phys_addr,unsigned long length)449 int hil_mmb_flush_dcache_byaddr(void *kvirt,
450                                 unsigned long phys_addr,
451                                 unsigned long length)
452 {
453     if (kvirt == NULL) {
454         return -EINVAL;
455     }
456     /*
457      * Use flush range to instead flush_cache_all,
458      * because flush_cache_all only flush local cpu.
459      * And on_each_cpu macro cannot used to flush
460      * all cpus with irq disabled.
461      */
462 #ifdef CONFIG_64BIT
463     __flush_dcache_area(kvirt, length);
464 #else
465     /*
466      * dmac_map_area is invalid in  hi3518ev200 kernel,
467      * arm9 is not supported yet
468      */
469 #if (HICHIP==0x3516A100)
470     /* flush without clean */
471     dmac_map_area(kvirt, length, DMA_TO_DEVICE);
472 #else
473     __cpuc_flush_dcache_area(kvirt, length);
474 #endif
475 #endif
476 
477 #if defined(CONFIG_CACHE_HIL2V200) || defined(CONFIG_CACHE_L2X0)
478     /* flush l2 cache, use paddr */
479     /*
480      * if length > L2 cache size, then this interface
481      * will call <outer_flush_all>
482      */
483     outer_flush_range(phys_addr, phys_addr + length);
484 #else
485     osal_unused(phys_addr);
486 #endif
487 
488     return 0;
489 }
490 EXPORT_SYMBOL(hil_mmb_flush_dcache_byaddr);
491 
hil_mmb_invalid_cache_byaddr(void * kvirt,unsigned long phys_addr,unsigned long length)492 int hil_mmb_invalid_cache_byaddr(void *kvirt,
493                                  unsigned long phys_addr,
494                                  unsigned long length)
495 {
496     osal_unused(phys_addr);
497     if (kvirt == NULL) {
498         return -EINVAL;
499     }
500 
501 #ifdef CONFIG_64BIT
502     __flush_dcache_area(kvirt, length);
503 #else
504     /*
505      * dmac_map_area is invalid in  hi3518ev200 kernel,
506      * arm9 is not supported yet
507      */
508 #if (HICHIP == 0x3516A100)
509     /* flush without clean */
510     dmac_map_area(kvirt, length, DMA_FROM_DEVICE);
511 #else
512     __cpuc_flush_dcache_area(kvirt, length);
513 #endif
514 #endif
515     return 0;
516 }
517 EXPORT_SYMBOL(hil_mmb_invalid_cache_byaddr);
518 
hil_mmb_unmap(hil_mmb_t * mmb)519 int hil_mmb_unmap(hil_mmb_t *mmb)
520 {
521     int ref;
522 
523     if ((mmb == NULL) || (g_the_allocator.mmb_unmap == NULL)) {
524         return -1;
525     }
526 
527     down(&g_mmz_lock);
528 
529     ref = g_the_allocator.mmb_unmap(mmb);
530 
531     up(&g_mmz_lock);
532 
533     return ref;
534 }
535 EXPORT_SYMBOL(hil_mmb_unmap);
536 
hil_mmb_get(hil_mmb_t * mmb)537 int hil_mmb_get(hil_mmb_t *mmb)
538 {
539     int ref;
540 
541     if (mmb == NULL) {
542         return -1;
543     }
544     down(&g_mmz_lock);
545 
546     if (mmb->flags & HIL_MMB_RELEASED)
547         osal_trace(KERN_WARNING "hil_mmb_get: amazing, mmb<%s> is released!\n", mmb->name);
548     ref = ++mmb->phy_ref;
549 
550     up(&g_mmz_lock);
551 
552     return ref;
553 }
554 
hil_mmb_put(hil_mmb_t * mmb)555 int hil_mmb_put(hil_mmb_t *mmb)
556 {
557     int ref;
558 
559     if (mmb == NULL) {
560         return -1;
561     }
562 
563     down(&g_mmz_lock);
564 
565     if (mmb->flags & HIL_MMB_RELEASED) {
566         osal_trace(KERN_WARNING "hil_mmb_put: amazing, mmb<%s> is released!\n", mmb->name);
567     }
568 
569     ref = --mmb->phy_ref;
570 
571     if ((mmb->flags & HIL_MMB_RELEASED) && (mmb->phy_ref == 0) && (mmb->map_ref == 0)) {
572         if (g_the_allocator.mmb_free != NULL) {
573             g_the_allocator.mmb_free(mmb);
574         }
575     }
576 
577     up(&g_mmz_lock);
578 
579     return ref;
580 }
581 
hil_mmb_free(hil_mmb_t * mmb)582 int hil_mmb_free(hil_mmb_t *mmb)
583 {
584     mmz_trace_func();
585 
586     if (mmb == NULL) {
587         return -1;
588     }
589 
590     mmz_trace(1, HIL_MMB_FMT_S, hil_mmb_fmt_arg(mmb));
591     down(&g_mmz_lock);
592 
593     if (mmb->flags & HIL_MMB_RELEASED) {
594         osal_trace(KERN_WARNING "hil_mmb_free: amazing, mmb<%s> has been released, but is still in use!\n", mmb->name);
595         up(&g_mmz_lock);
596         return 0;
597     }
598 
599     if (mmb->phy_ref > 0) {
600         osal_trace(KERN_WARNING "hil_mmb_free: free mmb<%s> delayed for which ref-count is %d!\n",
601                mmb->name, mmb->map_ref);
602         mmb->flags |= HIL_MMB_RELEASED;
603         up(&g_mmz_lock);
604         return 0;
605     }
606 
607     if (mmb->flags & HIL_MMB_MAP2KERN) {
608         osal_trace(KERN_WARNING "free mmb<%s> delayed for which is kernel-mapped to 0x%pK with map_ref %d!\n",
609             mmb->name, mmb->kvirt, mmb->map_ref);
610         mmb->flags |= HIL_MMB_RELEASED;
611         up(&g_mmz_lock);
612         return 0;
613     }
614     if (g_the_allocator.mmb_free != NULL) {
615         g_the_allocator.mmb_free(mmb);
616     }
617     up(&g_mmz_lock);
618     return 0;
619 }
620 EXPORT_SYMBOL(hil_mmb_free);
621 
622 #define MACH_MMB(p, val, member) do { \
623     hil_mmz_t *__mach_mmb_zone__ = NULL; \
624     (p) = NULL; \
625     list_for_each_entry(__mach_mmb_zone__, &g_mmz_list, list) { \
626         hil_mmb_t *__mach_mmb__ = NULL; \
627         list_for_each_entry(__mach_mmb__, &__mach_mmb_zone__->mmb_list, list) { \
628             if (__mach_mmb__->member == (val)) { \
629                 (p) = __mach_mmb__; \
630                 break; \
631             } \
632         } \
633         if ((p) != NULL) { \
634             break; \
635         } \
636     } \
637 } while (0)
638 
hil_mmb_getby_phys(unsigned long addr)639 hil_mmb_t *hil_mmb_getby_phys(unsigned long addr)
640 {
641     hil_mmb_t *p = NULL;
642     down(&g_mmz_lock);
643     MACH_MMB(p, addr, phys_addr);
644     up(&g_mmz_lock);
645     return p;
646 }
647 EXPORT_SYMBOL(hil_mmb_getby_phys);
648 
usr_virt_to_phys(unsigned long virt)649 unsigned long usr_virt_to_phys(unsigned long virt)
650 {
651     pgd_t *pgd = NULL;
652 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
653 	p4d_t *p4d = NULL;
654 #endif
655     pud_t *pud = NULL;
656     pmd_t *pmd = NULL;
657     pte_t *pte = NULL;
658     unsigned int cacheable = 0;
659     unsigned long page_addr;
660     unsigned long page_offset;
661     unsigned long phys_addr;
662 
663     if (virt & 0x3) {
664         osal_trace("invalid virt addr 0x%08lx[not 4 bytes align]\n", virt);
665         return 0;
666     }
667 
668     if (virt >= PAGE_OFFSET) {
669         osal_trace("invalid user space virt addr 0x%08lx\n", virt);
670         return 0;
671     }
672 
673     pgd = pgd_offset(current->mm, virt);
674     if (pgd_none(*pgd)) {
675         osal_trace("osal_trace: not mapped in pgd!\n");
676         return 0;
677     }
678 
679 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
680 	p4d = p4d_offset(pgd, virt);
681     pud = pud_offset(p4d, virt);
682 #else
683     pud = pud_offset(pgd, virt);
684 #endif
685     if (pud_none(*pud)) {
686         osal_trace("osal_trace: not mapped in pud!\n");
687         return 0;
688     }
689 
690     pmd = pmd_offset(pud, virt);
691     if (pmd_none(*pmd)) {
692         osal_trace("osal_trace: not mapped in pmd!\n");
693         return 0;
694     }
695 
696     pte = pte_offset_map(pmd, virt);
697     if (pte_none(*pte)) {
698         osal_trace("osal_trace: not mapped in pte!\n");
699         pte_unmap(pte);
700         return 0;
701     }
702 
703     page_addr = (pte_val(*pte) & PHYS_MASK) & PAGE_MASK;
704     page_offset = virt & ~PAGE_MASK;
705     phys_addr = page_addr | page_offset;
706 #ifdef CONFIG_64BIT
707     if (pte_val(*pte) & (1 << 4)) { /* 4: cacheable flag of 64-bit linux is bit 4 */
708 #else
709     if (pte_val(*pte) & (1 << 3)) { /* 3: cacheable flag of 32-bit linux is bit 3 */
710 #endif
711         cacheable = 1;
712     }
713 
714     /*
715      * phys_addr: the lowest bit indicates its cache attribute
716      * 1: cacheable
717      * 0: uncacheable
718      */
719     phys_addr |= cacheable;
720 
721     pte_unmap(pte);
722 
723     return phys_addr;
724 }
725 EXPORT_SYMBOL(usr_virt_to_phys);
726 
727 #define mach_mmb_2(p, val, member, Outoffset) do { \
728     hil_mmz_t *__mach_mmb_zone__ = NULL; \
729     (p) = NULL; \
730     list_for_each_entry(__mach_mmb_zone__, &g_mmz_list, list) { \
731         hil_mmb_t *__mach_mmb__ = NULL; \
732         list_for_each_entry(__mach_mmb__, &__mach_mmb_zone__->mmb_list, list) { \
733             if ((__mach_mmb__->member <= (val)) && ((__mach_mmb__->length + __mach_mmb__->member) > (val))) { \
734                 (p) = __mach_mmb__; \
735                 (Outoffset) = (val) - __mach_mmb__->member; \
736                 break; \
737             } \
738         } \
739         if ((p) != NULL) { \
740             break; \
741         } \
742     } \
743 } while (0)
744 
745 hil_mmb_t *hil_mmb_getby_kvirt(void *virt)
746 {
747     hil_mmb_t *p = NULL;
748     unsigned long out_offset;
749 
750     if (virt == NULL) {
751         return NULL;
752     }
753     down(&g_mmz_lock);
754     mach_mmb_2(p, virt, kvirt, out_offset);
755     up(&g_mmz_lock);
756 
757     mmz_trace(1, "Outoffset %lu \n", out_offset);
758 
759     return p;
760 }
761 EXPORT_SYMBOL(hil_mmb_getby_kvirt);
762 
763 hil_mmb_t *hil_mmb_getby_phys_2(unsigned long addr, unsigned long *out_offset)
764 {
765     hil_mmb_t *p = NULL;
766 
767     down(&g_mmz_lock);
768     mach_mmb_2(p, addr, phys_addr, *out_offset);
769     up(&g_mmz_lock);
770     return p;
771 }
772 EXPORT_SYMBOL(hil_mmb_getby_phys_2);
773 
774 hil_mmz_t *hil_mmz_find(unsigned long gfp, const char *mmz_name)
775 {
776     hil_mmz_t *p = NULL;
777 
778     down(&g_mmz_lock);
779     begin_list_for_each_mmz(p, gfp, mmz_name)
780     up(&g_mmz_lock);
781     return p;
782     end_list_for_each_mmz()
783     up(&g_mmz_lock);
784 
785     return NULL;
786 }
787 EXPORT_SYMBOL(hil_mmz_find);
788 
789 unsigned long hil_mmz_get_phys(const char *zone_name)
790 {
791     hil_mmz_t *zone = NULL;
792 
793     zone = hil_mmz_find(0, zone_name);
794     if (zone != NULL) {
795         return zone->phys_start;
796     }
797 
798     return 0;
799 }
800 EXPORT_SYMBOL(hil_mmz_get_phys);
801 
802 static unsigned long _strtoul_ex(const char *s, char **ep, unsigned int base)
803 {
804     char *__end_p = NULL;
805     unsigned long __value;
806 
807     __value = simple_strtoul(s, &__end_p, base);
808 
809     switch (*__end_p) {
810         case 'm':
811         case 'M':
812             __value <<= 10; /* 10: 1M=1024k, left shift 10bit */
813             /* fall-through */
814         case 'k':
815         case 'K':
816             __value <<= 10; /* 10: 1K=1024Byte, left shift 10bit */
817             if (ep != NULL) {
818                 (*ep) = __end_p + 1;
819             }
820             /* fall-through */
821         default:
822             break;
823     }
824 
825     return __value;
826 }
827 
828 static int _check_map_mmz(hil_mmz_t *zone)
829 {
830     hil_mmz_t *p = NULL;
831 
832     unsigned long new_start = zone->phys_start;
833     unsigned long new_end = zone->phys_start + zone->nbytes;
834 
835     if (zone->nbytes == 0) {
836         return -1;
837     }
838 
839     osal_list_for_each_entry(p, &g_map_mmz_list, list) {
840         unsigned long start, end;
841         start = p->phys_start;
842         end   = p->phys_start + p->nbytes;
843 
844         if (new_start >= end) {
845             continue;
846         } else if ((new_start < start) && (new_end <= start)) {
847             continue;
848         } else {
849         }
850 
851         osal_trace(KERN_ERR "ERROR: Conflict MMZ:\n");
852         osal_trace(KERN_ERR "MMZ new:   " HIL_MMZ_FMT_S "\n", hil_mmz_fmt_arg(zone));
853         osal_trace(KERN_ERR "MMZ exist: " HIL_MMZ_FMT_S "\n", hil_mmz_fmt_arg(p));
854         osal_trace(KERN_ERR "Add new MMZ failed!\n");
855         return -1;
856     }
857 
858     return 0;
859 }
860 
861 int hil_map_mmz_register(hil_mmz_t *zone)
862 {
863     int ret;
864 
865     mmz_trace(1, HIL_MMZ_FMT_S, hil_mmz_fmt_arg(zone));
866 
867     if (zone == NULL) {
868         return -1;
869     }
870 
871     down(&g_mmz_lock);
872 
873     if (strcmp(g_setup_allocator, "hisi") == 0) {
874         ret = _check_mmz(zone);
875         if (ret != 0) {
876             up(&g_mmz_lock);
877             return ret;
878         }
879     }
880 
881     ret = _check_map_mmz(zone);
882     if (ret) {
883         up(&g_mmz_lock);
884         return ret;
885     }
886 
887     OSAL_INIT_LIST_HEAD(&zone->mmb_list);
888 
889     osal_list_add(&zone->list, &g_map_mmz_list);
890 
891     up(&g_mmz_lock);
892 
893     return 0;
894 }
895 
896 int hil_map_mmz_unregister(hil_mmz_t *zone)
897 {
898     int losts = 0;
899     hil_mmb_t *p = NULL;
900 
901     if (zone == NULL) {
902         return -1;
903     }
904 
905     mmz_trace_func();
906 
907     down(&g_mmz_lock);
908     osal_list_for_each_entry(p, &zone->mmb_list, list) {
909         osal_trace(KERN_WARNING "MB Lost: " HIL_MMB_FMT_S "\n",
910                hil_mmb_fmt_arg(p));
911         losts++;
912     }
913 
914     if (losts) {
915         osal_trace(KERN_ERR "%d mmbs not free, mmz<%s> can not be unregistered!\n",
916                losts, zone->name);
917         up(&g_mmz_lock);
918         return -1;
919     }
920 
921     osal_list_del(&zone->list);
922     up(&g_mmz_lock);
923 
924     return 0;
925 }
926 
927 static int map_mmz_init(char *s)
928 {
929     hil_mmz_t *zone = NULL;
930     char *line = NULL;
931 
932     if (s[0] == '\0') {
933         return 0;
934     }
935 
936     while ((line = strsep(&s, ":")) != NULL) {
937         int i;
938         char *argv[2]; /* 2: map mmz has two arguments */
939 
940         for (i = 0; i < 2; i++) { /* 2: map mmz has two arguments */
941             argv[i] = strsep(&line, ",");
942             if (argv[i] == NULL) {
943                 break;
944             }
945         }
946 
947         if (i == 2) { /* 2: had parse two args */
948             zone = hil_mmz_create("null", 0, 0, 0);
949             if (zone == NULL) {
950                 continue;
951             }
952             zone->phys_start = _strtoul_ex(argv[0], NULL, 0);
953             zone->nbytes = _strtoul_ex(argv[1], NULL, 0);
954         } else {
955             osal_trace(KERN_ERR "error parameters\n");
956             return -EINVAL;
957         }
958 
959         if (hil_map_mmz_register(zone)) {
960             osal_trace(KERN_WARNING "Add MMZ failed: " HIL_MMZ_FMT_S "\n", hil_mmz_fmt_arg(zone));
961             hil_mmz_destroy(zone);
962         }
963         zone = NULL;
964     }
965 
966     return 0;
967 }
968 
969 static void map_mmz_exit(void)
970 {
971     hil_mmz_t *pmmz = NULL;
972     struct osal_list_head *p = NULL;
973     struct osal_list_head *n = NULL;
974 
975     mmz_trace_func();
976 
977     list_for_each_safe(p, n, &g_map_mmz_list) {
978         pmmz = list_entry(p, hil_mmz_t, list);
979         osal_trace(KERN_WARNING "MMZ force removed: " HIL_MMZ_FMT_S "\n",
980                hil_mmz_fmt_arg(pmmz));
981         hil_map_mmz_unregister(pmmz);
982         hil_mmz_destroy(pmmz);
983     }
984 }
985 
986 int hil_map_mmz_check_phys(unsigned long addr_start, unsigned long addr_len)
987 {
988     hil_mmz_t *p = NULL;
989     unsigned long addr_end = addr_start + addr_len;
990     unsigned long temp_start, temp_end;
991 
992     if ((addr_len > 0) && (addr_end > addr_start)) {
993         osal_list_for_each_entry(p, &g_map_mmz_list, list) {
994             temp_start = p->phys_start;
995             temp_end   = p->phys_start + p->nbytes;
996             if ((addr_start >= temp_start) && (addr_end <= temp_end)) {
997                 return 0;
998             }
999         }
1000     }
1001 
1002     osal_trace(KERN_ERR "ERROR: MMAP ADDR: 0x%lx-0x%lx\n", addr_start, addr_end);
1003 
1004     return -1;
1005 }
1006 EXPORT_SYMBOL(hil_map_mmz_check_phys);
1007 
1008 int hil_vma_check(unsigned long vm_start, unsigned long vm_end)
1009 {
1010     struct vm_area_struct *pvma1 = NULL;
1011     struct vm_area_struct *pvma2 = NULL;
1012 
1013     pvma1 = find_vma(current->mm, vm_start);
1014     if (pvma1 == NULL) {
1015         osal_trace(KERN_ERR "ERROR: pvma1 is null\n");
1016         return -1;
1017     }
1018 
1019     pvma2 = find_vma(current->mm, vm_end - 1);
1020     if (pvma2 == NULL) {
1021         osal_trace(KERN_ERR "ERROR: pvma2 is null\n");
1022         return -1;
1023     }
1024 
1025     if (pvma1 != pvma2) {
1026         osal_trace(KERN_ERR "ERROR: pvma1:[0x%lx,0x%lx) and pvma2:[0x%lx,0x%lx) are not equal\n",
1027             pvma1->vm_start, pvma1->vm_end, pvma2->vm_start, pvma2->vm_end);
1028         return -1;
1029     }
1030 
1031     if (!(pvma1->vm_flags & VM_WRITE)) {
1032         osal_trace(KERN_ERR "ERROR vma flag:0x%lx\n", pvma1->vm_flags);
1033         return -1;
1034     }
1035 
1036     if (pvma1->vm_start > vm_start) {
1037         osal_trace("cannot find corresponding vma, vm[%lx, %lx], user range[%lx,%lx]\n",
1038             pvma1->vm_start, pvma1->vm_end, vm_start, vm_end);
1039         return -1;
1040     }
1041 
1042     return 0;
1043 }
1044 EXPORT_SYMBOL(hil_vma_check);
1045 
1046 int hil_is_phys_in_mmz(unsigned long addr_start, unsigned long addr_len)
1047 {
1048     hil_mmz_t *p = NULL;
1049     unsigned long addr_end = addr_start + addr_len;
1050     unsigned long temp_start, temp_end;
1051 
1052     if ((addr_len > 0) && (addr_end > addr_start)) {
1053         osal_list_for_each_entry(p, &g_mmz_list, list) {
1054             temp_start = p->phys_start;
1055             temp_end   = p->phys_start + p->nbytes;
1056             if ((addr_start >= temp_start) && (addr_end <= temp_end)) {
1057                 return 0;
1058             }
1059         }
1060     }
1061 
1062     return -1;
1063 }
1064 EXPORT_SYMBOL(hil_is_phys_in_mmz);
1065 
1066 int hil_mmb_flush_dcache_byaddr_safe(void *kvirt,
1067                                      unsigned long phys_addr,
1068                                      unsigned long length)
1069 {
1070     int ret;
1071     struct mm_struct *mm = current->mm;
1072 
1073     if (kvirt == NULL) {
1074         return -EINVAL;
1075     }
1076 
1077 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
1078     down_read(&mm->mmap_lock);
1079 #else
1080     down_read(&mm->mmap_sem);
1081 #endif
1082 
1083     if (hil_vma_check((unsigned long)(uintptr_t)kvirt, (unsigned long)(uintptr_t)kvirt + length)) {
1084 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
1085         up_read(&mm->mmap_lock);
1086 #else
1087         up_read(&mm->mmap_sem);
1088 #endif
1089         return -EPERM;
1090     }
1091 
1092     ret = hil_mmb_flush_dcache_byaddr(kvirt, phys_addr, length);
1093 
1094 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
1095     up_read(&mm->mmap_lock);
1096 #else
1097     up_read(&mm->mmap_sem);
1098 #endif
1099 
1100     return ret;
1101 }
1102 EXPORT_SYMBOL(hil_mmb_flush_dcache_byaddr_safe);
1103 
1104 #define MEDIA_MEM_NAME  "media-mem"
1105 
1106 #ifdef CONFIG_HI_PROC_SHOW_SUPPORT
1107 
1108 int mmz_seq_show(struct osal_proc_dir_entry *sfile)
1109 {
1110     hil_mmz_t *p = NULL;
1111     unsigned int zone_number = 0;
1112     unsigned int block_number = 0;
1113     unsigned int used_size = 0;
1114     unsigned int free_size;
1115     unsigned int mmz_total_size = 0;
1116 
1117     mmz_trace_func();
1118 
1119     down(&g_mmz_lock);
1120     list_for_each_entry(p, &g_mmz_list, list) {
1121         hil_mmb_t *mmb = NULL;
1122         hil_mmb_t *temp_mmb = NULL;
1123         osal_seq_printf(sfile, "+---ZONE: " HIL_MMZ_FMT_S "\n", hil_mmz_fmt_arg(p));
1124         mmz_total_size += p->nbytes / 1024; /* 1024: 1KByte = 1024Byte */
1125         ++zone_number;
1126 
1127         list_for_each_entry(mmb, &p->mmb_list, list) {
1128             if (temp_mmb != NULL && ((mmb)->phys_addr > mmz_grain_align((temp_mmb)->phys_addr + (temp_mmb)->length))) {
1129                 osal_seq_printf(sfile, "   *-MMB: " HIL_MMB_FMT_S "\n", hil_mmb_fmt_arg(mmb));
1130             } else {
1131                 osal_seq_printf(sfile, "   |-MMB: " HIL_MMB_FMT_S "\n", hil_mmb_fmt_arg(mmb));
1132             }
1133             temp_mmb = mmb;
1134             used_size += mmb->length / 1024; /* 1024: 1KByte = 1024Byte */
1135             ++block_number;
1136         }
1137     }
1138 
1139     if (mmz_total_size != 0) {
1140         free_size = mmz_total_size - used_size;
1141         osal_seq_printf(sfile, "\n---MMZ_USE_INFO:\n total size=%dKB(%dMB),"
1142             "used=%dKB(%dMB + %dKB),remain=%dKB(%dMB + %dKB),"
1143             "zone_number=%d,block_number=%d\n",
1144             mmz_total_size, mmz_total_size / 1024, /* 1024: 1MByte = 1024KByte */
1145             used_size, used_size / 1024, used_size % 1024, /* 1024: 1MByte = 1024KByte */
1146             free_size, free_size / 1024, free_size % 1024, /* 1024: 1MByte = 1024KByte */
1147             zone_number, block_number);
1148         mmz_total_size = 0;
1149         zone_number = 0;
1150         block_number = 0;
1151     }
1152     up(&g_mmz_lock);
1153 
1154     return 0;
1155 }
1156 
1157 static int media_mem_proc_init(void)
1158 {
1159     osal_proc_entry_t *proc = NULL;
1160 
1161     proc = osal_create_proc_entry(MEDIA_MEM_NAME, NULL);
1162     if (proc == NULL) {
1163         osal_trace(KERN_ERR "Create mmz proc fail!\n");
1164         return -1;
1165     }
1166     proc->read = mmz_seq_show;
1167 
1168     return 0;
1169 }
1170 
1171 static void media_mem_proc_exit(void)
1172 {
1173     osal_remove_proc_entry(MEDIA_MEM_NAME, NULL);
1174 }
1175 #endif /* CONFIG_HI_PROC_SHOW_SUPPORT */
1176 
1177 /* this function is used by osal_init.c */
1178 int mem_check_module_param(void)
1179 {
1180     if (anony != 1) {
1181         osal_trace("The module param \"anony\" should only be 1 which is %d \n", anony);
1182         return -1;
1183     }
1184     return 0;
1185 }
1186 
1187 static void mmz_exit_check(void)
1188 {
1189     hil_mmz_t* pmmz = NULL;
1190     struct osal_list_head *p = NULL;
1191     struct osal_list_head *n = NULL;
1192 
1193     mmz_trace_func();
1194 
1195     list_for_each_safe(p, n, &g_mmz_list) {
1196         pmmz = list_entry(p, hil_mmz_t, list);
1197         osal_trace(KERN_WARNING "MMZ force removed: " HIL_MMZ_FMT_S "\n",
1198                hil_mmz_fmt_arg(pmmz));
1199         hil_mmz_unregister(pmmz);
1200         hil_mmz_destroy(pmmz);
1201     }
1202 }
1203 
1204 int media_mem_init(void)
1205 {
1206     int ret;
1207 
1208     osal_trace(KERN_INFO "Hisilicon Media Memory Zone Manager\n");
1209 
1210     if (anony != 1) {
1211         osal_trace("The module param \"anony\" should only be 1 which is %d\n", anony);
1212         return -EINVAL;
1213     }
1214 
1215     if (strcmp(g_setup_allocator, "cma") == 0) {
1216 #ifdef CONFIG_CMA
1217         ret = cma_allocator_setopt(&g_the_allocator);
1218 #else
1219         pr_err("cma is not enabled in kernel, please check!\n");
1220         return -EINVAL;
1221 #endif
1222     } else if (strcmp(g_setup_allocator, "hisi") == 0) {
1223         ret = hisi_allocator_setopt(&g_the_allocator);
1224     } else {
1225         osal_trace("The module param \"g_setup_allocator\" should be \"cma\" or \"hisi\", which is \"%s\"\n",
1226             g_setup_allocator);
1227         mmz_exit_check();
1228         return -EINVAL;
1229     }
1230 
1231     ret = g_the_allocator.init(g_setup_zones);
1232     if (ret != 0) {
1233         mmz_exit_check();
1234         return ret;
1235     }
1236 
1237 #ifdef CONFIG_HI_PROC_SHOW_SUPPORT
1238     media_mem_proc_init();
1239 #endif
1240 
1241     mmz_userdev_init();
1242 
1243     map_mmz_init(g_mmap_zones);
1244 
1245     return 0;
1246 }
1247 
1248 #ifdef MODULE
1249 void media_mem_exit(void)
1250 {
1251     map_mmz_exit();
1252     mmz_userdev_exit();
1253     mmz_exit_check();
1254 
1255 #ifdef CONFIG_HI_PROC_SHOW_SUPPORT
1256     media_mem_proc_exit();
1257 #endif
1258 }
1259 #else
1260 subsys_initcall(media_mem_init);
1261 #endif
1262