• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/init.h>
22 #include <linux/delay.h>
23 #include <linux/version.h>
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <linux/list.h>
27 #include <asm/cacheflush.h>
28 
29 #include <asm/memory.h>
30 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
31 #include <linux/dma-map-ops.h>
32 #else
33 #include <linux/dma-contiguous.h>
34 #endif
35 #include <linux/dma-mapping.h>
36 #include <asm/memory.h>
37 #ifndef CONFIG_64BIT
38 #include <asm/highmem.h>
39 #include <asm/pgtable.h>
40 #endif
41 #include <asm/tlbflush.h>
42 #include <asm/pgtable.h>
43 #include <linux/vmalloc.h>
44 #include "securec.h"
45 #include "allocator.h"
46 
47 struct cma_zone {
48     struct device pdev;
49     char name[NAME_LEN_MAX];
50     unsigned long gfp;
51     unsigned long phys_start;
52     unsigned long nbytes;
53     unsigned int alloc_type;
54     unsigned long block_align;
55 };
56 
57 extern struct osal_list_head g_mmz_list;
58 
59 extern int anony;
60 
61 unsigned long max_malloc_size = 0x40000000UL;
62 
63 
64 #ifndef CONFIG_64BIT
65 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
__dma_update_pte(pte_t * pte,unsigned long addr,void * data)66 static int __dma_update_pte(pte_t* pte, unsigned long addr, void* data)
67 #else
68 static int __dma_update_pte(pte_t* pte, pgtable_t token,
69                             unsigned long addr, void* data)
70 #endif
71 {
72     struct page* page = virt_to_page(addr);
73     pgprot_t prot = *(pgprot_t*)data;
74 
75 #if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)
76     osal_unused(token);
77 #endif
78     set_pte_ext(pte, mk_pte(page, prot), 0);
79 
80     return 0;
81 }
82 
__dma_clear_buffer(struct page * page,size_t size)83 void __dma_clear_buffer(struct page* page, size_t size)
84 {
85     osal_unused(page);
86     osal_unused(size);
87     return;
88 }
89 
90 extern void hisi_flush_tlb_kernel_range(unsigned long start, unsigned long end);
__dma_remap(struct page * page,size_t size,pgprot_t prot)91 static void __dma_remap(struct page* page, size_t size, pgprot_t prot)
92 {
93     unsigned long start = (unsigned long) page_address(page);
94     unsigned end = start + size;
95 
96     apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
97     dsb();
98     hisi_flush_tlb_kernel_range(start, end);
99 }
100 #else /* CONFIG_64BIT */
101 
102 
103 #endif /* CONFIG_64BIT */
104 
do_mmb_alloc(hil_mmb_t * mmb)105 static int do_mmb_alloc(hil_mmb_t* mmb)
106 {
107     hil_mmb_t* p = NULL;
108     mmz_trace_func();
109 
110     /* add mmb sorted */
111     osal_list_for_each_entry(p, &mmb->zone->mmb_list, list)
112     {
113         if (mmb->phys_addr < p->phys_addr)
114             break;
115         if (mmb->phys_addr == p->phys_addr)
116             osal_trace(KERN_ERR "ERROR:cma allocator bad in %s(%s, %d)",
117                     mmb->zone->name,  __func__, __LINE__);
118     }
119     osal_list_add(&mmb->list, p->list.prev);
120 
121     mmz_trace(1, HIL_MMB_FMT_S, hil_mmb_fmt_arg(mmb));
122 
123     return 0;
124 }
125 
__mmb_alloc(const char * name,unsigned long size,unsigned long align,unsigned long gfp,const char * mmz_name,hil_mmz_t * _user_mmz)126 static hil_mmb_t *__mmb_alloc(const char *name,
127                               unsigned long size,
128                               unsigned long align,
129                               unsigned long gfp,
130                               const char *mmz_name,
131                               hil_mmz_t *_user_mmz)
132 {
133     hil_mmz_t *mmz = NULL;
134     hil_mmb_t *mmb = NULL;
135 
136     unsigned long order;
137     size_t count;
138     struct page *page = NULL;
139 
140     unsigned long fixed_start;
141 
142     hil_mmz_t *fixed_mmz = NULL;
143     errno_t err_value;
144 
145     mmz_trace_func();
146 
147     /*
148      * no more than 1GB
149      */
150     if (size == 0 || size > max_malloc_size) {
151         return NULL;
152     }
153 
154     if (align == 0) {
155         align = MMZ_GRAIN;
156     }
157 
158     size = mmz_grain_align(size);
159     order = get_order(size);
160     count = size >> PAGE_SHIFT;
161 
162     mmz_trace(1, "anonymous=%s,size=%luKB,align=%lu", mmz_name, size / SZ_1K, align);
163 
164     begin_list_for_each_mmz(mmz, gfp, mmz_name)
165 
166     if ((_user_mmz != NULL) && (_user_mmz != mmz)) {
167         continue;
168     }
169 
170     page = dma_alloc_from_contiguous(mmz->cma_dev, count, order, 0);
171     if (page == NULL) {
172         break;
173     }
174     fixed_mmz = mmz;
175     fixed_start = page_to_phys(page);
176     break;
177 
178     end_list_for_each_mmz()
179 
180     if (fixed_mmz == NULL)
181         return NULL;
182 
183     if (page != NULL) {
184 #ifndef CONFIG_64BIT
185         __dma_clear_buffer(page, size);
186 #else
187         (void)memset_s(page_address(page), size, 0, size);
188         __flush_dcache_area(page_address(page), size);
189 #endif
190     }
191 
192     mmb = kmalloc(sizeof(hil_mmb_t), GFP_KERNEL);
193     if (mmb == NULL) {
194         goto cma_free;
195     }
196     (void)memset_s(mmb, sizeof(hil_mmb_t), 0, sizeof(hil_mmb_t));
197 
198     mmb->zone = fixed_mmz;
199     mmb->phys_addr = fixed_start;
200     mmb->length = size;
201 
202     if (name != NULL) {
203         err_value = strncpy_s(mmb->name, HIL_MMB_NAME_LEN, name, HIL_MMB_NAME_LEN - 1);
204     } else {
205         err_value = strncpy_s(mmb->name, HIL_MMB_NAME_LEN, "<null>", HIL_MMB_NAME_LEN - 1);
206     }
207 
208     if ((err_value != EOK) || do_mmb_alloc(mmb)) {
209         goto mmb_free;
210     }
211 
212     return mmb;
213 mmb_free:
214     kfree(mmb);
215     mmb = NULL;
216 
217 cma_free:
218     dma_release_from_contiguous(mmz->cma_dev, page, count);
219     return NULL;
220 }
221 
__mmb_alloc_v2(const char * name,unsigned long size,unsigned long align,unsigned long gfp,const char * mmz_name,hil_mmz_t * _user_mmz,unsigned int order)222 static hil_mmb_t *__mmb_alloc_v2(const char *name,
223                                  unsigned long size,
224                                  unsigned long align,
225                                  unsigned long gfp,
226                                  const char *mmz_name,
227                                  hil_mmz_t *_user_mmz,
228                                  unsigned int order)
229 {
230     hil_mmz_t *mmz = NULL;
231     hil_mmb_t *mmb = NULL;
232     unsigned int i;
233     unsigned long cma_order;
234     size_t count = size >> PAGE_SHIFT;
235     struct page *page = NULL;
236     unsigned long fixed_start = 0;
237     hil_mmz_t *fixed_mmz = NULL;
238     errno_t err_value;
239 
240     mmz_trace_func();
241 
242     if ((size == 0) || (size > max_malloc_size))
243         return NULL;
244 
245     if (align == 0) {
246         align = 1;
247     }
248 
249     size = mmz_grain_align(size);
250     mmz_trace(1, "size=%luKB, align=%lu", size / SZ_1K, align);
251     begin_list_for_each_mmz(mmz, gfp, mmz_name)
252         if ((_user_mmz != NULL) && (_user_mmz != mmz))
253             continue;
254 
255         if (mmz->alloc_type == SLAB_ALLOC) {
256             if ((size - 1) & size) {
257                 for (i = 1; i < 32; i++) { /* 32: the max size is 2^(32-1) */
258                     if (!((size >> i) & ~0)) {
259                         size = 1 << i;
260                         break;
261                     }
262                 }
263             }
264         } else if (mmz->alloc_type == EQ_BLOCK_ALLOC) {
265             size = mmz_align2(size, mmz->block_align);
266         }
267 
268         cma_order = get_order(size);
269 
270         page = dma_alloc_from_contiguous(mmz->cma_dev, count, cma_order, 0);
271         if (page == NULL) {
272             return NULL;
273         }
274         fixed_mmz = mmz;
275         fixed_start = page_to_phys(page);
276         break;
277     end_list_for_each_mmz()
278 
279     if (fixed_mmz == NULL) {
280         return NULL;
281     }
282 
283     mmb = kmalloc(sizeof(hil_mmb_t), GFP_KERNEL);
284     if (mmb == NULL) {
285         goto cma_free;
286     }
287     (void)memset_s(mmb, sizeof(hil_mmb_t), 0, sizeof(hil_mmb_t));
288 
289     mmb->zone = fixed_mmz;
290     mmb->phys_addr = fixed_start;
291     mmb->length = size;
292     mmb->order = order;
293 
294     if (name != NULL) {
295         err_value = strncpy_s(mmb->name, HIL_MMB_NAME_LEN, name, HIL_MMB_NAME_LEN - 1);
296     } else {
297         err_value = strncpy_s(mmb->name, HIL_MMB_NAME_LEN, "<null>", HIL_MMB_NAME_LEN - 1);
298     }
299     if ((err_value != EOK) || do_mmb_alloc(mmb)) {
300         goto mmb_free;
301     }
302     return mmb;
303 
304 mmb_free:
305     kfree(mmb);
306     mmb = NULL;
307 cma_free:
308     dma_release_from_contiguous(mmz->cma_dev, page, count);
309 
310     return NULL;
311 }
312 
__mmb_free(hil_mmb_t * mmb)313 static void __mmb_free(hil_mmb_t *mmb)
314 {
315     size_t count = mmb->length >> PAGE_SHIFT;
316     struct page *page = phys_to_page(mmb->phys_addr);
317 
318     hil_mmz_t *mmz = mmb->zone;
319 
320     if (mmb->flags & HIL_MMB_MAP2KERN_CACHED) {
321 #ifdef CONFIG_64BIT
322         __flush_dcache_area((void *)mmb->kvirt, (size_t)mmb->length);
323 #else
324         __cpuc_flush_dcache_area((void*)mmb->kvirt, (size_t)mmb->length);
325         outer_flush_range(mmb->phys_addr, mmb->phys_addr + mmb->length);
326 #endif
327     }
328 
329     dma_release_from_contiguous(mmz->cma_dev, page, count);
330 
331     osal_list_del(&mmb->list);
332     kfree(mmb);
333 }
334 
335 static int __mmb_unmap(hil_mmb_t* mmb);
336 #define MAX_KMALLOC_PAGENR  4
__mmb_map2kern(hil_mmb_t * mmb,int cached)337 static void *__mmb_map2kern(hil_mmb_t *mmb, int cached)
338 {
339     pgprot_t prot;
340     struct page* page = phys_to_page(mmb->phys_addr);
341 
342     if (mmb->flags & HIL_MMB_MAP2KERN) {
343         if ((!!cached * HIL_MMB_MAP2KERN_CACHED) != (mmb->flags & HIL_MMB_MAP2KERN_CACHED)) {
344             osal_trace(KERN_ERR "mmb<%s> has been kernel-mapped %s, can not be re-mapped as %s.",
345                 mmb->name, (mmb->flags&HIL_MMB_MAP2KERN_CACHED) ? "cached" : "non-cached",
346                 (cached) ? "cached" : "non-cached");
347             return NULL;
348         }
349         mmb->map_ref++;
350         return mmb->kvirt;
351     }
352 
353 #ifdef CONFIG_64BIT
354 
355     if (cached) {
356         mmb->flags |= HIL_MMB_MAP2KERN_CACHED;
357         prot = PAGE_KERNEL;
358     } else {
359         mmb->flags &= ~HIL_MMB_MAP2KERN_CACHED;
360         prot = __pgprot(PROT_NORMAL_NC);
361     }
362 
363 #else
364 
365     if (cached) {
366         mmb->flags |= HIL_MMB_MAP2KERN_CACHED;
367         prot = pgprot_kernel;
368     } else {
369         mmb->flags &= ~HIL_MMB_MAP2KERN_CACHED;
370         prot = pgprot_noncached(pgprot_kernel);
371     }
372 
373     __dma_remap(page, mmb->length, prot);
374 #endif
375     /*
376      * Map into vmalloc space
377      */
378     {
379         unsigned int i;
380         struct page **pages = NULL;
381         unsigned int pagesnr = mmb->length / PAGE_SIZE;
382         struct page *tmp = page;
383         int array_size = sizeof(struct page *) * pagesnr;
384         struct vm_struct *area = NULL;
385 
386         /*
387          * Noted: mmb->length would be very large in some cases(for example:
388          * more than one Giga Bytes). and array_size would be very large as
389          * well. So, don't use kmalloc here.
390          */
391         pages = vmalloc(array_size);
392         if (pages == NULL) {
393             osal_trace(KERN_ERR "ptr array(0x%x) vmalloc failed.\n", array_size);
394             return NULL;
395         }
396 
397         for (i = 0; i < pagesnr; i++) {
398             *(pages + i) = tmp;
399             tmp++;
400         }
401 
402 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,8,0)
403         area = __get_vm_area_caller((pagesnr << PAGE_SHIFT), VM_MAP, VMALLOC_START, VMALLOC_END,
404 									__builtin_return_address(0));
405 #else
406         area = __get_vm_area((pagesnr << PAGE_SHIFT), VM_MAP, VMALLOC_START, VMALLOC_END);
407 #endif
408         if (area == NULL) {
409             osal_trace(KERN_ERR "get vm area from high failed!\n");
410             vfree(pages);
411             return NULL;
412         }
413 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,8,0)
414         if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), prot, pages)) {
415 #else
416         if (map_vm_area(area, prot, pages)) {
417 #endif
418             osal_trace(KERN_ERR "map vm area to mmz pages failed!\n");
419             vunmap(area->addr);
420             vfree(pages);
421             return NULL;
422         }
423         mmb->kvirt = area->addr;
424         vfree(pages);
425     }
426 
427     if (mmb->kvirt == NULL) {
428         /*
429          * you should never get here.
430          */
431         osal_trace(KERN_ERR "mmb[0x%lx, 0x%lx] map to kernel failed\n",
432                 mmb->phys_addr, mmb->length);
433         return NULL;
434     }
435 
436     mmb->flags |= HIL_MMB_MAP2KERN;
437     mmb->map_ref++;
438 
439     return mmb->kvirt;
440 }
441 
442 static int __mmb_unmap(hil_mmb_t *mmb)
443 {
444     int ref;
445 
446     if (mmb->flags & HIL_MMB_MAP2KERN_CACHED) {
447 #ifdef CONFIG_64BIT
448         __flush_dcache_area((void *)mmb->kvirt, (size_t)mmb->length);
449 #else
450         __cpuc_flush_dcache_area((void*)mmb->kvirt, (size_t)mmb->length);
451         outer_flush_range(mmb->phys_addr, mmb->phys_addr + mmb->length);
452 #endif
453     }
454 
455     if (mmb->flags & HIL_MMB_MAP2KERN) {
456         ref = --mmb->map_ref;
457         if (mmb->map_ref != 0)
458             return ref;
459     }
460 
461     /*
462      * unmap from vmalloc space.
463      */
464     {
465         vunmap(mmb->kvirt);
466     }
467     mmb->kvirt = NULL;
468     mmb->flags &= ~HIL_MMB_MAP2KERN;
469     mmb->flags &= ~HIL_MMB_MAP2KERN_CACHED;
470 
471     if ((mmb->flags & HIL_MMB_RELEASED) && (mmb->phy_ref == 0)) {
472         __mmb_free(mmb);
473     }
474 
475     return 0;
476 }
477 
478 static void *__mmf_map(phys_addr_t phys, int len, int cache)
479 {
480     struct page **pages = NULL;
481     unsigned int pagesnr = len / PAGE_SIZE;
482     unsigned int i;
483     void *virt = NULL;
484     pgprot_t prot;
485     struct page* page = phys_to_page(phys);
486     struct page *tmp = page;
487     int array_size = sizeof(struct page *) * pagesnr;
488     struct vm_struct *area = NULL;
489 #ifdef CONFIG_64BIT
490     if (cache) {
491         prot = PAGE_KERNEL;
492     } else {
493         prot = __pgprot(PROT_NORMAL_NC);
494     }
495 #else
496     if (cache) {
497         prot = pgprot_kernel;
498     } else {
499         prot = pgprot_noncached(pgprot_kernel);
500     }
501 #endif
502 
503     /*
504      * Map into vmalloc space.
505      *
506      *
507      * Noted: length of region may be very large in some cases(for example:
508      * more than one Giga Bytes). and array_size would be very large as
509      * well. So, don't use kmalloc here.
510      */
511     pages = vmalloc(array_size);
512     if (pages == NULL)    {
513         osal_trace(KERN_ERR "ptr vmalloc %d failed.\n", array_size);
514         return NULL;
515     }
516 
517     for (i = 0; i < pagesnr; i++) {
518         *(pages + i) = tmp;
519         tmp++;
520     }
521 
522 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,8,0)
523     area = __get_vm_area_caller((pagesnr << PAGE_SHIFT), VM_MAP, VMALLOC_START, VMALLOC_END, __builtin_return_address(0));
524 #else
525     area = __get_vm_area((pagesnr << PAGE_SHIFT), VM_MAP, VMALLOC_START, VMALLOC_END);
526 #endif
527     if (area == NULL) {
528         osal_trace(KERN_ERR "get vm area from high failed!\n");
529         vfree(pages);
530         pages = NULL;
531         return NULL;
532     }
533 
534 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,8,0)
535     if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), prot, pages)) {
536 #else
537     if (map_vm_area(area, prot, pages)) {
538 #endif
539         osal_trace(KERN_ERR "map vm area to mmz pages failed!\n");
540         vunmap(area->addr);
541         vfree(pages);
542         pages = NULL;
543         return NULL;
544     }
545     virt = area->addr;
546     vfree(pages);
547     pages = NULL;
548 
549     return virt;
550 }
551 
552 /*
553  * this function is added for some extra-requirements from
554  * some customers, and the implementation is not strictly
555  * in accordance with our early design.
556  */
557 static void __mmf_unmap(void *virt)
558 {
559     unsigned long vaddr = (unsigned long)(uintptr_t)virt;
560     if ((vaddr >= VMALLOC_START) && (vaddr < VMALLOC_END))
561         vunmap(virt);
562 }
563 
564 static int __allocator_init(char *s)
565 {
566 #ifdef CONFIG_CMA
567     hil_mmz_t *zone = NULL;
568     char *line = NULL;
569     struct cma_zone *cma_zone = NULL;
570 
571     while ((line = strsep(&s, ":")) != NULL) {
572         int i;
573         char *argv[6];
574         extern struct cma_zone *hisi_get_cma_zone(const char *name);
575         /*
576          * We got 4 args in "line", formatted as
577          * "argv[0],argv[1],argv[2],argv[3],argv[4]".
578          * eg: "<mmz_name>,<gfp>,<phys_start>,<size>,<alloc_type>"
579          * For more convenient, "hard code" are used such as "arg[0]", i.e.
580          */
581         for (i = 0; (argv[i] = strsep(&line, ",")) != NULL;) {
582             if (++i == ARRAY_SIZE(argv)) {
583                 break;
584             }
585         }
586 
587         cma_zone = hisi_get_cma_zone(argv[0]);
588         if (cma_zone == NULL) {
589             osal_trace(KERN_ERR"can't get cma zone info:%s\n", argv[0]);
590             continue;
591         }
592 
593         if (i == 4) { /* 4: had parse four args */
594             zone = hil_mmz_create("null", 0, 0, 0);
595             if (zone == NULL) {
596                 continue;
597             }
598 
599             if (strncpy_s(zone->name, HIL_MMZ_NAME_LEN, argv[0], HIL_MMZ_NAME_LEN - 1) != EOK) {
600                 osal_trace("%s - strncpy_s failed!\n", __FUNCTION__);
601                 hil_mmz_destroy(zone);
602                 continue;
603             }
604 
605             osal_trace("cmz zone gfp 0x%lx, phys 0x%lx, nbytes 0x%lx\n",
606                 cma_zone->gfp, cma_zone->phys_start, cma_zone->nbytes);
607             zone->gfp = cma_zone->gfp;
608             zone->phys_start = cma_zone->phys_start;
609             zone->nbytes = cma_zone->nbytes;
610             zone->cma_dev = &cma_zone->pdev;
611             if (zone->nbytes > max_malloc_size) {
612                 max_malloc_size = zone->nbytes;
613             }
614         } else if (i == 6) { /* 6: had parse six args */
615             zone = hil_mmz_create_v2("null", 0, 0, 0, 0, 0);
616             if (zone == NULL) {
617                 continue;
618             }
619 
620             if (strncpy_s(zone->name, HIL_MMZ_NAME_LEN, argv[0], HIL_MMZ_NAME_LEN - 1) != EOK) {
621                 osal_trace("%s - strncpy_s failed!\n", __FUNCTION__);
622                 hil_mmz_destroy(zone);
623                 continue;
624             }
625 
626             zone->gfp = cma_zone->gfp;
627             zone->phys_start = cma_zone->phys_start;
628             zone->nbytes = cma_zone->nbytes;
629             zone->alloc_type = cma_zone->alloc_type;
630             zone->block_align = cma_zone->block_align;
631             zone->cma_dev = &cma_zone->pdev;
632             if (zone->nbytes > max_malloc_size) {
633                 max_malloc_size = zone->nbytes;
634             }
635         } else {
636             osal_trace(KERN_ERR "Input parameter num incorrect!\n");
637             continue;
638         }
639 
640         if (hil_mmz_register(zone)) {
641             osal_trace(KERN_WARNING "Add MMZ failed: " HIL_MMZ_FMT_S "\n",
642                     hil_mmz_fmt_arg(zone));
643             hil_mmz_destroy(zone);
644         }
645 
646         zone = NULL;
647     }
648 #endif
649     return 0;
650 }
651 
652 int cma_allocator_setopt(struct mmz_allocator *allocator)
653 {
654     allocator->init = __allocator_init;
655     allocator->mmb_alloc = __mmb_alloc;
656     allocator->mmb_alloc_v2 = __mmb_alloc_v2;
657     allocator->mmb_map2kern = __mmb_map2kern;
658     allocator->mmb_unmap = __mmb_unmap;
659     allocator->mmb_free = __mmb_free;
660     allocator->mmf_map = __mmf_map;
661     allocator->mmf_unmap = __mmf_unmap;
662     return 0;
663 }
664 
665