1 /*
2 * Copyright (C) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/version.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/mm.h>
26 #include <linux/mman.h>
27 #include <linux/miscdevice.h>
28 #include <linux/proc_fs.h>
29 #include <linux/device.h>
30 #include <linux/fs.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/ioport.h>
36 #include <linux/spinlock.h>
37 #include <linux/vmalloc.h>
38 #include <linux/string.h>
39 #include <linux/list.h>
40 #include <linux/time.h>
41 #include <linux/sched.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/uaccess.h>
44 #include <asm/uaccess.h>
45 #include <asm/io.h>
46 #include <asm/cacheflush.h>
47 #include "securec.h"
48 #include "osal_mmz.h"
49 #include "hi_osal.h"
50
51 #define error_mmz(s...) \
52 do { \
53 osal_trace(KERN_ERR "mmz_userdev:%s: ", __FUNCTION__); \
54 osal_trace(s); \
55 } while (0)
56 #define warning(s...) \
57 do { \
58 osal_trace(KERN_WARNING "mmz_userdev:%s: ", __FUNCTION__); \
59 osal_trace(s); \
60 } while (0)
61
62 struct mmz_userdev_info {
63 pid_t pid;
64 pid_t mmap_pid;
65 struct semaphore sem;
66 struct osal_list_head list;
67 };
68
mmz_flush_dcache_mmb_dirty(struct dirty_area * p_area)69 static int mmz_flush_dcache_mmb_dirty(struct dirty_area *p_area)
70 {
71 if (p_area == NULL) {
72 return -EINVAL;
73 }
74
75 #ifdef CONFIG_64BIT
76 __flush_dcache_area(p_area->dirty_virt_start, p_area->dirty_size);
77 #else
78 /* flush l1 cache, use vir addr */
79 __cpuc_flush_dcache_area(p_area->dirty_virt_start, p_area->dirty_size);
80
81 #if defined(CONFIG_CACHE_HIL2V200) || defined(CONFIG_CACHE_L2X0)
82 /* flush l2 cache, use paddr */
83 outer_flush_range(p_area->dirty_phys_start,
84 p_area->dirty_phys_start + p_area->dirty_size);
85 #endif
86 #endif
87 return 0;
88 }
89
mmz_flush_dcache_mmb(struct mmb_info * pmi)90 static int mmz_flush_dcache_mmb(struct mmb_info *pmi)
91 {
92 hil_mmb_t *mmb = NULL;
93
94 if (pmi == NULL) {
95 return -EINVAL;
96 }
97
98 mmb = pmi->mmb;
99 if ((mmb == NULL) || (pmi->map_cached == 0)) {
100 osal_trace("%s->%d,error!\n", __func__, __LINE__);
101 return -EINVAL;
102 }
103
104 #ifdef CONFIG_64BIT
105 __flush_dcache_area(pmi->mapped, (size_t)pmi->size);
106 #else
107 /* flush l1 cache, use vir addr */
108 __cpuc_flush_dcache_area(pmi->mapped, (size_t)pmi->size);
109
110 #if defined(CONFIG_CACHE_HIL2V200) || defined(CONFIG_CACHE_L2X0)
111 /* flush l2 cache, use paddr */
112 outer_flush_range(mmb->phys_addr, mmb->phys_addr + mmb->length);
113 #endif
114 #endif
115 return 0;
116 }
117 #ifdef CONFIG_64BIT
118
flush_cache_all(void)119 void flush_cache_all (void)
120 {
121 }
122
123 #endif
124
125 /*
126 * this function should never be called with local irq disabled,
127 * because on_each_cpu marco will raise ipi interrupt.
128 */
mmz_flush_dcache_all(void)129 int mmz_flush_dcache_all(void)
130 {
131 #ifdef CONFIG_64BIT
132 on_each_cpu((smp_call_func_t)flush_cache_all, NULL, 1);
133 #else
134 on_each_cpu((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
135 outer_flush_all();
136 #endif /* CONFIG_64BIT */
137 return 0;
138 }
139
mmz_userdev_open(struct inode * inode,struct file * file)140 static int mmz_userdev_open(struct inode *inode, struct file *file)
141 {
142 struct mmz_userdev_info *pmu = NULL;
143
144 osal_unused(inode);
145
146 pmu = kmalloc(sizeof(*pmu), GFP_KERNEL);
147 if (pmu == NULL) {
148 error_mmz("alloc mmz_userdev_info failed!\n");
149 return -ENOMEM;
150 }
151 (void)memset_s(pmu, sizeof(*pmu), 0, sizeof(*pmu));
152 pmu->pid = current->pid;
153 pmu->mmap_pid = 0;
154 sema_init(&pmu->sem, 1);
155 OSAL_INIT_LIST_HEAD(&pmu->list);
156
157 /* This file could be opened just for once */
158 file->private_data = (void *)pmu;
159
160 return 0;
161 }
162
ioctl_mmb_alloc(struct file * file,struct mmb_info * pmi)163 static int ioctl_mmb_alloc(struct file *file,
164 struct mmb_info *pmi)
165 {
166 struct mmz_userdev_info *pmu = file->private_data;
167 struct mmb_info *new_mmbinfo = NULL;
168 hil_mmb_t *mmb = NULL;
169
170 mmb = hil_mmb_alloc(pmi->mmb_name, pmi->size,
171 pmi->align, pmi->gfp, pmi->mmz_name);
172 if (mmb == NULL) {
173 #if defined(KERNEL_BIT_64) && defined(USER_BIT_32)
174 error_mmz("hil_mmb_alloc(%s, %llu, 0x%llx, %lu, %s) failed!\n",
175 pmi->mmb_name, pmi->size, pmi->align,
176 pmi->gfp, pmi->mmz_name);
177 #else
178 error_mmz("hil_mmb_alloc(%s, %lu, 0x%lx, %lu, %s) failed!\n",
179 pmi->mmb_name, pmi->size, pmi->align,
180 pmi->gfp, pmi->mmz_name);
181 #endif
182 return -ENOMEM;
183 }
184
185 new_mmbinfo = kmalloc(sizeof(*new_mmbinfo), GFP_KERNEL);
186 if (new_mmbinfo == NULL) {
187 hil_mmb_free(mmb);
188 error_mmz("alloc mmb_info failed!\n");
189 return -ENOMEM;
190 }
191
192 (void)memcpy_s(new_mmbinfo, sizeof(*new_mmbinfo), pmi, sizeof(*new_mmbinfo));
193 new_mmbinfo->phys_addr = hil_mmb_phys(mmb);
194 new_mmbinfo->mmb = mmb;
195 new_mmbinfo->prot = PROT_READ;
196 new_mmbinfo->flags = MAP_SHARED;
197 osal_list_add_tail(&new_mmbinfo->list, &pmu->list);
198
199 pmi->phys_addr = new_mmbinfo->phys_addr;
200
201 hil_mmb_get(mmb);
202
203 return 0;
204 }
205
ioctl_mmb_alloc_v2(struct file * file,struct mmb_info * pmi)206 static int ioctl_mmb_alloc_v2(struct file *file, struct mmb_info *pmi)
207 {
208 struct mmz_userdev_info *pmu = file->private_data;
209 struct mmb_info *new_mmbinfo = NULL;
210 hil_mmb_t *mmb = NULL;
211
212 mmb = hil_mmb_alloc_v2(pmi->mmb_name, pmi->size, pmi->align,
213 pmi->gfp, pmi->mmz_name, pmi->order);
214 if (mmb == NULL) {
215 #if defined(KERNEL_BIT_64) && defined(USER_BIT_32)
216 error_mmz("hil_mmb_alloc(%s, %llu, 0x%llx, %lu, %s) failed!\n",
217 pmi->mmb_name, pmi->size, pmi->align,
218 pmi->gfp, pmi->mmz_name);
219 #else
220 error_mmz("hil_mmb_alloc(%s, %lu, 0x%lx, %lu, %s) failed!\n",
221 pmi->mmb_name, pmi->size, pmi->align,
222 pmi->gfp, pmi->mmz_name);
223 #endif
224 return -ENOMEM;
225 }
226
227 new_mmbinfo = kmalloc(sizeof(*new_mmbinfo), GFP_KERNEL);
228 if (new_mmbinfo == NULL) {
229 hil_mmb_free(mmb);
230 error_mmz("alloc mmb_info failed!\n");
231 return -ENOMEM;
232 }
233
234 (void)memcpy_s(new_mmbinfo, sizeof(*new_mmbinfo), pmi, sizeof(*new_mmbinfo));
235 new_mmbinfo->phys_addr = hil_mmb_phys(mmb);
236 new_mmbinfo->mmb = mmb;
237 new_mmbinfo->prot = PROT_READ;
238 new_mmbinfo->flags = MAP_SHARED;
239 osal_list_add_tail(&new_mmbinfo->list, &pmu->list);
240
241 pmi->phys_addr = new_mmbinfo->phys_addr;
242
243 hil_mmb_get(mmb);
244
245 return 0;
246 }
247
get_mmbinfo(unsigned long addr,struct mmz_userdev_info * pmu)248 static struct mmb_info *get_mmbinfo(unsigned long addr,
249 struct mmz_userdev_info *pmu)
250 {
251 struct mmb_info *p = NULL;
252
253 osal_list_for_each_entry(p, &pmu->list, list) {
254 if ((addr >= p->phys_addr) && (addr < (p->phys_addr + p->size))) {
255 break;
256 }
257 }
258 if (&p->list == &pmu->list) {
259 return NULL;
260 }
261
262 return p;
263 }
264
get_mmbinfo_safe(unsigned long addr,struct mmz_userdev_info * pmu)265 static struct mmb_info *get_mmbinfo_safe(unsigned long addr,
266 struct mmz_userdev_info *pmu)
267 {
268 struct mmb_info *p = NULL;
269
270 p = get_mmbinfo(addr, pmu);
271 if (p == NULL) {
272 error_mmz("mmb(0x%08lX) not found!\n", addr);
273 return NULL;
274 }
275
276 return p;
277 }
278
279 static int ioctl_mmb_user_unmap(struct file *file, struct mmb_info *pmi);
280
_usrdev_mmb_free(struct mmb_info * p)281 static int _usrdev_mmb_free(struct mmb_info *p)
282 {
283 int ret;
284
285 osal_list_del(&p->list);
286 hil_mmb_put(p->mmb);
287 ret = hil_mmb_free(p->mmb);
288 kfree(p);
289
290 return ret;
291 }
292
ioctl_mmb_free(struct file * file,struct mmb_info * pmi)293 static int ioctl_mmb_free(struct file *file, struct mmb_info *pmi)
294 {
295 int ret;
296 struct mmz_userdev_info *pmu = file->private_data;
297 struct mmb_info *p = get_mmbinfo_safe(pmi->phys_addr, pmu);
298
299 if (p == NULL) {
300 return -EPERM;
301 }
302
303 if (p->delayed_free) {
304 warning("mmb<%s> is delayed_free, can not free again!\n", p->mmb->name);
305 return -EBUSY;
306 }
307
308 if ((p->map_ref > 0) || (p->mmb_ref > 0)) {
309 warning("mmb<%s> is still in use!\n", p->mmb->name);
310 p->delayed_free = 1;
311 return -EBUSY;
312 }
313
314 ret = _usrdev_mmb_free(p);
315
316 return ret;
317 }
318
ioctl_mmb_attr(struct file * file,struct mmb_info * pmi)319 static int ioctl_mmb_attr(struct file *file, struct mmb_info *pmi)
320 {
321 struct mmz_userdev_info *pmu = file->private_data;
322 struct mmb_info *p = NULL;
323
324 if ((p = get_mmbinfo_safe(pmi->phys_addr, pmu)) == NULL) {
325 return -EPERM;
326 }
327
328 (void)memcpy_s(pmi, sizeof(*pmi), p, sizeof(*pmi));
329 return 0;
330 }
331
ioctl_mmb_user_remap(struct file * file,struct mmb_info * pmi,int cached)332 static int ioctl_mmb_user_remap(struct file *file,
333 struct mmb_info *pmi,
334 int cached)
335 {
336 struct mmz_userdev_info *pmu = file->private_data;
337 struct mmb_info *p = NULL;
338 unsigned long addr, len, prot, flags, pgoff;
339
340 if ((p = get_mmbinfo_safe(pmi->phys_addr, pmu)) == NULL) {
341 return -EPERM;
342 }
343 /*
344 * mmb could be remapped for more than once, but should not
345 * be remapped with confusing cache type.
346 */
347 if (p->mapped && (p->map_ref > 0)) {
348 if (cached != p->map_cached) {
349 error_mmz("mmb<%s> already mapped as %s, cannot remap as %s.\n",
350 p->mmb->name,
351 p->map_cached ? "cached" : "non-cached",
352 cached ? "cached" : "non-cached");
353 return -EINVAL;
354 }
355
356 p->map_ref++;
357 p->mmb_ref++;
358
359 hil_mmb_get(p->mmb);
360
361 /*
362 * pmi->phys may not always start at p->phys,
363 * and may start with offset from p->phys.
364 * so, we need to calculate with the offset.
365 */
366 pmi->mapped = p->mapped + (pmi->phys_addr - p->phys_addr);
367
368 return 0;
369 }
370
371 if (p->phys_addr & ~PAGE_MASK) {
372 return -EINVAL;
373 }
374
375 addr = 0;
376 len = PAGE_ALIGN(p->size);
377
378 prot = pmi->prot;
379 flags = pmi->flags;
380 if (prot == 0) {
381 prot = p->prot;
382 }
383 if (flags == 0) {
384 flags = p->flags;
385 }
386
387 pmu->mmap_pid = current->pid;
388 p->map_cached = cached;
389
390 pgoff = p->phys_addr;
391 addr = vm_mmap(file, addr, len, prot, flags, pgoff);
392
393 pmu->mmap_pid = 0;
394
395 if (IS_ERR_VALUE((uintptr_t)addr)) {
396 error_mmz("vm_mmap(file, 0, %lu, 0x%08lX, 0x%08lX, 0x%08lX) return 0x%08lX\n",
397 len, prot, flags, pgoff, addr);
398 return addr;
399 }
400
401 p->mapped = (void *)(uintptr_t)addr;
402 p->prot = prot;
403 p->flags = flags;
404
405 p->map_ref++;
406 p->mmb_ref++;
407 hil_mmb_get(p->mmb);
408
409 /*
410 * pmi->phys may not always start at p->phys,
411 * and may start with offset from p->phys.
412 * so, we need to calculate with the offset.
413 */
414 pmi->mapped = p->mapped + (pmi->phys_addr - p->phys_addr);
415
416 return 0;
417 }
418
ioctl_mmb_user_unmap(struct file * file,struct mmb_info * pmi)419 static int ioctl_mmb_user_unmap(struct file *file, struct mmb_info *pmi)
420 {
421 int ret;
422 unsigned long addr, len;
423 struct mmb_info *p = NULL;
424 struct mmz_userdev_info *pmu = file->private_data;
425
426 p = get_mmbinfo_safe(pmi->phys_addr, pmu);
427 if (p == NULL) {
428 return -EPERM;
429 }
430
431 if (p->mapped == NULL) {
432 #if defined(KERNEL_BIT_64) && defined(USER_BIT_32)
433 warning("mmb(0x%llx) isn't user-mapped!\n", p->phys_addr);
434 #else
435 warning("mmb(0x%lx) isn't user-mapped!\n", p->phys_addr);
436 #endif
437 pmi->mapped = NULL;
438 return -EIO;
439 }
440
441 if (!((p->map_ref > 0) && (p->mmb_ref > 0))) {
442 error_mmz("mmb<%s> has invalid refer: map_ref=%d, mmb_ref=%d.\n",
443 p->mmb->name, p->map_ref, p->mmb_ref);
444 return -EIO;
445 }
446
447 p->map_ref--;
448 p->mmb_ref--;
449 hil_mmb_put(p->mmb);
450
451 if (p->map_ref > 0) {
452 return 0;
453 }
454
455 addr = (unsigned long)(uintptr_t)p->mapped;
456 len = PAGE_ALIGN(p->size);
457
458 /* before unmap, refresh cache manually */
459 if (p->map_cached) {
460 struct mm_struct *mm = current->mm;
461 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
462 down_read(&mm->mmap_lock);
463 if (hil_vma_check(addr, addr + len)) {
464 error_mmz("mmb<%s> vma is invalid.\n", p->mmb->name);
465 up_read(&mm->mmap_lock);
466 #else
467 down_read(&mm->mmap_sem);
468 if (hil_vma_check(addr, addr + len)) {
469 error_mmz("mmb<%s> vma is invalid.\n", p->mmb->name);
470 up_read(&mm->mmap_sem);
471 #endif
472 return -EPERM;
473 }
474 #ifdef CONFIG_64BIT
475 __flush_dcache_area ((void *)(uintptr_t)addr, (size_t)len);
476 #else
477 __cpuc_flush_dcache_area ((void *)(uintptr_t)addr, (size_t)len);
478 #if defined(CONFIG_CACHE_HIL2V200) || defined(CONFIG_CACHE_L2X0)
479 outer_flush_range(p->phys_addr, p->phys_addr + len);
480 #endif
481 #endif /* CONFIG_64BIT */
482 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
483 up_read(&mm->mmap_lock);
484 #else
485 up_read(&mm->mmap_sem);
486 #endif
487 }
488
489 ret = vm_munmap(addr, len);
490 if (!IS_ERR_VALUE((uintptr_t)ret)) {
491 p->mapped = NULL;
492 pmi->mapped = NULL;
493 }
494
495 if (p->delayed_free && (p->map_ref == 0) && (p->mmb_ref == 0)) {
496 _usrdev_mmb_free(p);
497 }
498
499 return ret;
500 }
501
502 static int ioctl_mmb_virt2phys(struct file *file, struct mmb_info *pmi)
503 {
504 int ret = 0;
505 unsigned long virt, phys;
506 unsigned long offset = 0;
507
508 osal_unused(file);
509 virt = (unsigned long)(uintptr_t)pmi->mapped;
510 phys = usr_virt_to_phys(virt);
511 if (!phys) {
512 ret = -ENOMEM;
513 }
514
515 if (hil_mmb_getby_phys_2(phys, &offset) == NULL) {
516 error_mmz("Not mmz alloc memory[0x%lx 0x%lx]! 0x%lx\n", virt, phys, offset);
517 return -EINVAL;
518 }
519
520 pmi->phys_addr = phys;
521
522 return ret;
523 }
524
525 int ioctl_mmb_sys_flush_cache(__phys_addr_type__ phy_addr, void *vir_addr, unsigned int size)
526 {
527 unsigned long end_vir_addr;
528 unsigned long end_phy_addr;
529 unsigned long trans_phy;
530
531 /* check start address */
532 trans_phy = usr_virt_to_phys((unsigned int)(uintptr_t)vir_addr);
533 if (trans_phy == 0) {
534 error_mmz("start virtual address %p is err.\n", vir_addr);
535 return -1;
536 }
537
538 if ((trans_phy & 0xFFFFFFFFFFFFFFFEULL) != phy_addr) {
539 error_mmz("trans_phy 0x%lx and phy_addr 0x%lx are not equal!\n", trans_phy, phy_addr);
540 return -1;
541 }
542
543 /* check end address */
544 end_vir_addr = (unsigned int)(uintptr_t)vir_addr + size - CACHE_LINE_SIZE;
545 trans_phy = usr_virt_to_phys(end_vir_addr);
546 if (trans_phy == 0) {
547 error_mmz("end virtual address 0x%lx is err.\n", end_vir_addr);
548 return -1;
549 }
550
551 end_phy_addr = phy_addr + size - CACHE_LINE_SIZE;
552 if ((trans_phy & 0xFFFFFFFFFFFFFFFEULL) != end_phy_addr) {
553 error_mmz("trans_phy 0x%lx and end_phy_addr 0x%lx are not equal!\n", trans_phy, end_phy_addr);
554 return -1;
555 }
556
557 return hil_mmb_flush_dcache_byaddr_safe(vir_addr, phy_addr, size);
558 }
559
560 int ioctl_mmb_check_mmz_phy_addr(unsigned long long phy_addr, unsigned int len)
561 {
562 return cmpi_check_mmz_phy_addr(phy_addr, len);
563 }
564
565 int ioctl_mmb_invalid_cache_byaddr(void *kvirt, unsigned long phys_addr, unsigned long len)
566 {
567 return hil_mmb_invalid_cache_byaddr(kvirt, phys_addr, len);
568 }
569
570 int ioctl_mmb_check_phy_in_priv(struct file const *file, struct mmb_info const *pmi)
571 {
572 struct mmb_info *p = NULL;
573 struct mmz_userdev_info *pmu = file->private_data;
574
575 p = get_mmbinfo_safe(pmi->phys_addr, pmu);
576 if (p == NULL) {
577 return -1;
578 }
579
580 return 0;
581 }
582
583 int ioctl_mmb_user_mmf_map(struct file const *file, struct mmb_info *pmi)
584 {
585 osal_unused(file);
586 pmi->mapped = cmpi_remap_nocache(pmi->phys_addr, pmi->size);
587
588 if (pmi->mapped == NULL) {
589 return -1;
590 }
591
592 return 0;
593 }
594
595 int ioctl_mmb_user_mmf_map_cache(struct file const *file, struct mmb_info *pmi)
596 {
597 osal_unused(file);
598 pmi->mapped = cmpi_remap_cached(pmi->phys_addr, pmi->size);
599
600 if (pmi->mapped == NULL) {
601 return -1;
602 }
603
604 return 0;
605 }
606
607 int ioctl_mmb_user_mmf_unmap(void *virt_addr)
608 {
609 hil_mmf_unmap(virt_addr);
610 return 0;
611 }
612
613 static int mmz_userdev_ioctl_m(struct file *file, unsigned int cmd, struct mmb_info *pmi)
614 {
615 int ret;
616
617 switch (_IOC_NR(cmd)) {
618 case _IOC_NR(IOC_MMB_ALLOC):
619 ret = ioctl_mmb_alloc(file, pmi);
620 break;
621 case _IOC_NR(IOC_MMB_ALLOC_V2):
622 ret = ioctl_mmb_alloc_v2(file, pmi);
623 break;
624 case _IOC_NR(IOC_MMB_ATTR):
625 ret = ioctl_mmb_attr(file, pmi);
626 break;
627 case _IOC_NR(IOC_MMB_FREE):
628 ret = ioctl_mmb_free(file, pmi);
629 break;
630
631 case _IOC_NR(IOC_MMB_USER_REMAP):
632 ret = ioctl_mmb_user_remap(file, pmi, 0);
633 break;
634 case _IOC_NR(IOC_MMB_USER_REMAP_CACHED):
635 ret = ioctl_mmb_user_remap(file, pmi, 1);
636 break;
637 case _IOC_NR(IOC_MMB_USER_UNMAP):
638 ret = ioctl_mmb_user_unmap(file, pmi);
639 break;
640 case _IOC_NR(IOC_MMB_VIRT_GET_PHYS):
641 ret = ioctl_mmb_virt2phys(file, pmi);
642 break;
643 case _IOC_NR(IOC_MMB_SYS_FLUSH_CACHE):
644 ret = ioctl_mmb_sys_flush_cache(pmi->phys_addr, pmi->mapped, pmi->size);
645 break;
646 case _IOC_NR(IOC_MMB_BASE_CHECK_ADDR):
647 ret = ioctl_mmb_check_mmz_phy_addr(pmi->phys_addr, pmi->size);
648 break;
649 case _IOC_NR(IOC_MMB_INVALID_CACHE):
650 ret = ioctl_mmb_invalid_cache_byaddr(pmi->mapped, pmi->phys_addr, pmi->size);
651 break;
652 case _IOC_NR(IOC_MMB_CHECK_PHY_ALLOC):
653 ret = ioctl_mmb_check_phy_in_priv(file, pmi);
654 break;
655 case _IOC_NR(IOC_MMB_MMF_REMAP):
656 ret = ioctl_mmb_user_mmf_map(file, pmi);
657 break;
658 case _IOC_NR(IOC_MMB_MMF_REMAP_CACHED):
659 ret = ioctl_mmb_user_mmf_map_cache(file, pmi);
660 break;
661 case _IOC_NR(IOC_MMB_MMF_UNMAP):
662 ret = ioctl_mmb_user_mmf_unmap(pmi->mapped);
663 break;
664
665 default:
666 error_mmz("invalid ioctl cmd = %08X\n", cmd);
667 ret = -EINVAL;
668 break;
669 }
670
671 return ret;
672 }
673
674 static int mmz_userdev_ioctl_r(struct file *file, unsigned int cmd, struct mmb_info *pmi)
675 {
676 osal_unused(file);
677 switch (_IOC_NR(cmd)) {
678 case _IOC_NR(IOC_MMB_ADD_REF):
679 pmi->mmb_ref++;
680 hil_mmb_get(pmi->mmb);
681 break;
682 case _IOC_NR(IOC_MMB_DEC_REF):
683 if (pmi->mmb_ref <= 0) {
684 error_mmz("mmb<%s> mmb_ref is %d!\n", pmi->mmb->name, pmi->mmb_ref);
685 return -EPERM;
686 }
687 pmi->mmb_ref--;
688 hil_mmb_put(pmi->mmb);
689 if (pmi->delayed_free && (pmi->mmb_ref == 0) && (pmi->map_ref == 0)) {
690 _usrdev_mmb_free(pmi);
691 }
692 break;
693 default:
694 return -EINVAL;
695 break;
696 }
697
698 return 0;
699 }
700
701 /* just for test */
702 static int mmz_userdev_ioctl_t(struct file *file, unsigned int cmd, struct mmb_info *pmi);
703
704 static long mmz_userdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
705 {
706 int ret = 0;
707 struct mmz_userdev_info *pmu = file->private_data;
708
709 down(&pmu->sem);
710
711 if (_IOC_TYPE(cmd) == 'm') {
712 struct mmb_info mi = { 0 };
713
714 if ((_IOC_SIZE(cmd) > sizeof(mi)) || (arg == 0)) {
715 error_mmz("_IOC_SIZE(cmd)=%d, arg==0x%08lX\n", _IOC_SIZE(cmd), arg);
716 ret = -EINVAL;
717 goto __error_exit;
718 }
719 (void)memset_s(&mi, sizeof(mi), 0, sizeof(mi));
720 if (copy_from_user(&mi, (void *)(uintptr_t)arg, _IOC_SIZE(cmd))) {
721 osal_trace("\nmmz_userdev_ioctl: copy_from_user error.\n");
722 ret = -EFAULT;
723 goto __error_exit;
724 }
725 mi.mmz_name[HIL_MMZ_NAME_LEN - 1] = '\0';
726 mi.mmb_name[HIL_MMB_NAME_LEN - 1] = '\0';
727 ret = mmz_userdev_ioctl_m(file, cmd, &mi);
728 if (!ret && (cmd & IOC_OUT)) {
729 if (copy_to_user((void *)(uintptr_t)arg, &mi, _IOC_SIZE(cmd))) {
730 osal_trace("\nmmz_userdev_ioctl: copy_to_user error.\n");
731 ret = -EFAULT;
732 goto __error_exit;
733 }
734 }
735 } else if (_IOC_TYPE(cmd) == 'r') {
736 struct mmb_info *pmi = NULL;
737
738 pmi = get_mmbinfo_safe(arg, pmu);
739 if (pmi == NULL) {
740 ret = -EPERM;
741 goto __error_exit;
742 }
743
744 ret = mmz_userdev_ioctl_r(file, cmd, pmi);
745 } else if (_IOC_TYPE(cmd) == 'c') {
746 struct mmb_info *pmi = NULL;
747 if (arg == 0) {
748 mmz_flush_dcache_all();
749 goto __error_exit;
750 }
751
752 pmi = get_mmbinfo_safe(arg, pmu);
753 if (pmi == NULL) {
754 ret = -EPERM;
755 goto __error_exit;
756 }
757
758 switch (_IOC_NR(cmd)) {
759 case _IOC_NR(IOC_MMB_FLUSH_DCACHE):
760 mmz_flush_dcache_mmb(pmi);
761 break;
762 default:
763 ret = -EINVAL;
764 break;
765 }
766 } else if (_IOC_TYPE(cmd) == 'd') {
767 hil_mmb_t *mmb = NULL;
768 struct mmb_info *pmi = NULL;
769 struct dirty_area area;
770 __phys_addr_type__ orig_addr;
771 unsigned long virt_addr, offset;
772 struct mm_struct *mm = current->mm;
773
774 if ((_IOC_SIZE(cmd) != sizeof(area)) || (arg == 0)) {
775 error_mmz("_IOC_SIZE(cmd)=%d, arg==0x%08lx\n", _IOC_SIZE(cmd), arg);
776 ret = -EINVAL;
777 goto __error_exit;
778 }
779 (void)memset_s(&area, sizeof(area), 0, sizeof(area));
780 if (copy_from_user(&area, (void *)(uintptr_t)arg, _IOC_SIZE(cmd))) {
781 osal_trace(KERN_WARNING "\nmmz_userdev_ioctl: copy_from_user error.\n");
782 ret = -EFAULT;
783 goto __error_exit;
784 }
785
786 mmb = hil_mmb_getby_phys_2(area.dirty_phys_start, &offset);
787 if (mmb == NULL) {
788 #if defined(KERNEL_BIT_64) && defined(USER_BIT_32)
789 error_mmz("dirty_phys_addr=0x%llx\n", area.dirty_phys_start);
790 #else
791 error_mmz("dirty_phys_addr=0x%lx\n", area.dirty_phys_start);
792 #endif
793 ret = -EFAULT;
794 goto __error_exit;
795 }
796
797 pmi = get_mmbinfo_safe(mmb->phys_addr, pmu);
798 if (pmi == NULL) {
799 ret = -EPERM;
800 goto __error_exit;
801 }
802 if ((uintptr_t)area.dirty_virt_start != (uintptr_t)pmi->mapped + offset) {
803 osal_trace(KERN_WARNING "dirty_virt_start addr was not consistent with dirty_phys_start addr!\n");
804 ret = -EFAULT;
805 goto __error_exit;
806 }
807 if (area.dirty_phys_start + area.dirty_size > mmb->phys_addr + mmb->length) {
808 osal_trace(KERN_WARNING "\ndirty area overflow!\n");
809 ret = -EFAULT;
810 goto __error_exit;
811 }
812
813 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
814 down_read(&mm->mmap_lock);
815 #else
816 down_read(&mm->mmap_sem);
817 #endif
818
819 if (hil_vma_check((uintptr_t)area.dirty_virt_start, (uintptr_t)area.dirty_virt_start + area.dirty_size)) {
820 osal_trace(KERN_WARNING "\ndirty area[0x%lx,0x%lx] overflow!\n",
821 (unsigned long)(uintptr_t)area.dirty_virt_start,
822 (unsigned long)(uintptr_t)area.dirty_virt_start + area.dirty_size);
823 ret = -EFAULT;
824 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
825 up_read(&mm->mmap_lock);
826 #else
827 up_read(&mm->mmap_sem);
828 #endif
829 goto __error_exit;
830 }
831
832 /* cache line aligned */
833 orig_addr = area.dirty_phys_start;
834 area.dirty_phys_start &= ~(CACHE_LINE_SIZE - 1);
835 virt_addr = (unsigned long)(uintptr_t)area.dirty_virt_start;
836 virt_addr &= ~(CACHE_LINE_SIZE - 1);
837 area.dirty_virt_start = (void *)(uintptr_t)virt_addr;
838 area.dirty_size = (area.dirty_size + (orig_addr - area.dirty_phys_start) +
839 (CACHE_LINE_SIZE - 1)) & ~(CACHE_LINE_SIZE - 1);
840
841 mmz_flush_dcache_mmb_dirty(&area);
842 #if LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0)
843 up_read(&mm->mmap_lock);
844 #else
845 up_read(&mm->mmap_sem);
846 #endif
847 } else if (_IOC_TYPE(cmd) == 't') {
848 struct mmb_info mi;
849
850 if ((_IOC_SIZE(cmd) != sizeof(mi)) || (arg == 0)) {
851 error_mmz("_IOC_SIZE(cmd)=%d, arg==0x%08lx\n", _IOC_SIZE(cmd), arg);
852 ret = -EINVAL;
853 goto __error_exit;
854 }
855
856 (void)memset_s(&mi, sizeof(mi), 0, sizeof(mi));
857 if (copy_from_user(&mi, (void *)(uintptr_t)arg, sizeof(mi))) {
858 osal_trace("\nmmz_userdev_ioctl: copy_from_user error.\n");
859 ret = -EFAULT;
860 goto __error_exit;
861 }
862
863 if ((get_mmbinfo_safe(mi.phys_addr, pmu)) == NULL) {
864 ret = -EPERM;
865 goto __error_exit;
866 }
867 ret = mmz_userdev_ioctl_t(file, cmd, &mi);
868 } else {
869 ret = -EINVAL;
870 }
871
872 __error_exit:
873
874 up(&pmu->sem);
875
876 return ret;
877 }
878
879 #ifdef CONFIG_COMPAT
880 static long compat_mmz_userdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
881 {
882 return mmz_userdev_ioctl(file, cmd, (unsigned long)(uintptr_t)compat_ptr(arg));
883 }
884 #endif
885
886 int mmz_userdev_mmap(struct file *file, struct vm_area_struct *vma)
887 {
888 struct mmb_info *p = NULL;
889 struct mmz_userdev_info *pmu = file->private_data;
890 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
891 unsigned long size = vma->vm_end - vma->vm_start;
892 hil_mmb_t *mmb = NULL;
893 int mmb_cached = 0;
894
895 p = get_mmbinfo(offset, pmu);
896 if (p == NULL) {
897 unsigned long mmb_offset;
898 mmb = hil_mmb_getby_phys_2(offset, &mmb_offset);
899 if (mmb == NULL) {
900 /* Allow mmap MMZ allocated by other core. */
901 if (hil_map_mmz_check_phys(offset, size)) {
902 return -EPERM;
903 }
904 } else {
905 mmb_cached = mmb->flags & HIL_MMB_MAP2KERN_CACHED;
906 }
907 } else {
908 mmb = p->mmb;
909
910 if (p->mapped != NULL) {
911 if (p->map_cached) {
912 error_mmz("mmb(0x%08lX) have been mapped already and cache_type is %u?!\n", offset, p->map_cached);
913 return -EIO;
914 }
915 }
916 mmb_cached = p->map_cached;
917 }
918
919 if (mmb != NULL && mmb->length - (offset - mmb->phys_addr) < size) {
920 error_mmz("mmap failed for oversize %08lX\n", size);
921 return -EINVAL;
922 }
923
924 if (file->f_flags & O_SYNC) {
925 #ifdef CONFIG_64BIT
926 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
927 | PTE_WRITE | PTE_DIRTY);
928 #endif
929 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
930 } else {
931 #ifdef CONFIG_64BIT
932 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
933 | PTE_WRITE | PTE_DIRTY);
934 #else
935 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
936 | L_PTE_PRESENT | L_PTE_YOUNG
937 | L_PTE_DIRTY | L_PTE_MT_DEV_CACHED);
938 #endif
939 if (mmb_cached == 0) {
940 /*
941 * pagetable property changes from <normal nocache> to
942 * <strong order>, for enhance ddr access performance,
943 */
944 /* <normal nocache> */
945 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
946 }
947 }
948
949 if (pfn_valid(vma->vm_pgoff)) {
950 unsigned long start = vma->vm_start;
951 unsigned long pfn = vma->vm_pgoff;
952
953 while (size) {
954 if (pfn_valid(pfn)) {
955 if (vm_insert_page(vma, start, pfn_to_page(pfn))) {
956 error_mmz("insert page failed.\n");
957 break;
958 }
959 } else {
960 #ifdef CONFIG_64BIT
961 error_mmz("vm map failed for phy address(0x%llx)\n", __pfn_to_phys(pfn));
962 #else
963 error_mmz("vm map failed for phy address(0x%x)\n", __pfn_to_phys(pfn));
964 #endif
965 }
966
967 start += PAGE_SIZE;
968 size = (size < PAGE_SIZE) ? 0 : (size - PAGE_SIZE);
969 pfn++;
970 }
971 } else {
972 if (size == 0) {
973 return -EPERM;
974 }
975 /*
976 * Remap-pfn-range will mark the range
977 * as VM_IO and VM_RESERVED
978 */
979 if (remap_pfn_range(vma,
980 vma->vm_start,
981 vma->vm_pgoff,
982 vma->vm_end - vma->vm_start,
983 vma->vm_page_prot)) {
984 return -EAGAIN;
985 }
986 }
987
988 return 0;
989 }
990
991 static int mmz_userdev_release(struct inode *inode, struct file *file)
992 {
993 struct mmz_userdev_info *pmu = file->private_data;
994 struct mmb_info *p = NULL;
995 struct mmb_info *n = NULL;
996
997 osal_unused(inode);
998 list_for_each_entry_safe(p, n, &pmu->list, list) {
999 error_mmz("MMB LEAK(pid=%d): 0x%lX, %lu bytes, '%s'\n",
1000 pmu->pid, hil_mmb_phys(p->mmb),
1001 hil_mmb_length(p->mmb),
1002 hil_mmb_name(p->mmb));
1003
1004 /*
1005 * we do not need to release mapped-area here,
1006 * system will do it for us
1007 */
1008 if (p->mapped != NULL) {
1009 #if defined(KERNEL_BIT_64) && defined(USER_BIT_32)
1010 warning("mmb<0x%llx> mapped to userspace 0x%pK will be unmapped!\n",
1011 p->phys_addr, p->mapped);
1012 #else
1013 warning("mmb<0x%lx> mapped to userspace 0x%pK will be unmapped!\n",
1014 p->phys_addr, p->mapped);
1015 #endif
1016 }
1017 for (; p->mmb_ref > 0; p->mmb_ref--) {
1018 hil_mmb_put(p->mmb);
1019 }
1020 _usrdev_mmb_free(p);
1021 }
1022
1023 file->private_data = NULL;
1024 kfree(pmu);
1025 pmu = NULL;
1026
1027 return 0;
1028 }
1029
1030 static struct file_operations g_mmz_userdev_fops = {
1031 .owner = THIS_MODULE,
1032 .open = mmz_userdev_open,
1033 .release = mmz_userdev_release,
1034 #ifdef CONFIG_COMPAT
1035 .compat_ioctl = compat_mmz_userdev_ioctl,
1036 #endif
1037 .unlocked_ioctl = mmz_userdev_ioctl,
1038 .mmap = mmz_userdev_mmap,
1039 };
1040
1041 static struct miscdevice g_mmz_userdev = {
1042 .minor = MISC_DYNAMIC_MINOR,
1043 .fops = &g_mmz_userdev_fops,
1044 .name = "mmz_userdev"
1045 };
1046
1047 int mmz_userdev_init(void)
1048 {
1049 int ret;
1050 ret = misc_register(&g_mmz_userdev);
1051 if (ret) {
1052 osal_trace("register mmz dev failure!\n");
1053 return -1;
1054 }
1055
1056 return 0;
1057 }
1058
1059 void mmz_userdev_exit(void)
1060 {
1061 misc_deregister(&g_mmz_userdev);
1062 }
1063
1064 /* Test func */
1065 static int mmz_userdev_ioctl_t(struct file *file, unsigned int cmd, struct mmb_info *pmi)
1066 {
1067 osal_unused(file);
1068 osal_unused(pmi);
1069 osal_unused(cmd);
1070 return 0;
1071 }
1072
1073