• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 HiSilicon (Shanghai) Technologies CO., LIMITED.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/version.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/errno.h>
24 #include <linux/fcntl.h>
25 #include <linux/mm.h>
26 #include <linux/mman.h>
27 #include <linux/miscdevice.h>
28 #include <linux/proc_fs.h>
29 #include <linux/device.h>
30 #include <linux/fs.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/delay.h>
34 #include <asm/uaccess.h>
35 #include <asm/io.h>
36 #include <linux/interrupt.h>
37 #include <linux/ioport.h>
38 #include <linux/spinlock.h>
39 #include <linux/vmalloc.h>
40 #include <asm/cacheflush.h>
41 #include <linux/string.h>
42 #include <linux/list.h>
43 #include <linux/time.h>
44 
45 #include "drv_media_mem.h"
46 #include "drv_mmz.h"
47 
new_mmb(const char * name,int size,unsigned int align,const char * zone_name)48 mmb_addr_t new_mmb(const char *name, int size, unsigned int align, const char *zone_name)
49 {
50     hil_mmb_t *mmb = NULL;
51 
52     mmb = hil_mmb_alloc(name, size, align, zone_name, HI_KERNEL_ALLOC);
53     if (mmb == NULL) {
54         return MMB_ADDR_INVALID;
55     }
56     if (mmb->iommu) {
57         return mmb->iommu_addr;
58     } else {
59         return mmb->phys_addr;
60     }
61 }
62 
delete_mmb(mmb_addr_t addr,unsigned int iommu)63 void delete_mmb(mmb_addr_t addr, unsigned int iommu)
64 {
65     hil_mmb_t *mmb = NULL;
66 
67     mmb = hil_mmb_getby_phys((HI_U32)addr, iommu);
68     if (mmb == NULL) {
69         return;
70     }
71 
72     down(&g_mmz_lock);
73     hil_mmb_free(mmb);
74     up(&g_mmz_lock);
75 }
76 
remap_mmb(mmb_addr_t addr,unsigned int iommu)77 void *remap_mmb(mmb_addr_t addr, unsigned int iommu)
78 {
79     hil_mmb_t *mmb = NULL;
80     HI_SIZE_T offset = 0;
81     void *virt = NULL;
82 
83     mmb = hil_mmb_getby_phys((HI_U32)addr, iommu);
84     if (mmb == NULL) {
85         return NULL;
86     }
87     if (iommu) {
88         offset = addr - mmb->iommu_addr;
89     } else {
90         offset = addr - mmb->phys_addr;
91     }
92 
93     virt = hil_mmb_map2kern(mmb);
94     if (virt == NULL) {
95         return NULL;
96     }
97     return (void *)((uintptr_t) virt + offset);
98 }
99 
remap_mmb_cached(mmb_addr_t addr,unsigned int iommu)100 void *remap_mmb_cached(mmb_addr_t addr, unsigned int iommu)
101 {
102     hil_mmb_t *mmb = NULL;
103     HI_SIZE_T offset = 0;
104     void *virt = NULL;
105 
106     mmb = hil_mmb_getby_phys((HI_U32)addr, iommu);
107     if (mmb == NULL) {
108         return NULL;
109     }
110     if (iommu) {
111         offset = addr - mmb->iommu_addr;
112     } else {
113         offset = addr - mmb->phys_addr;
114     }
115 
116     virt = hil_mmb_map2kern_cached(mmb);
117     if (virt == NULL) {
118         return NULL;
119     }
120     return (void *)((uintptr_t) virt + offset);
121 }
122 
123 #ifndef DMABUF_FLUSH_CACHE
unmap_mmb(const void * mapped_addr)124 int unmap_mmb(const void *mapped_addr)
125 {
126     hil_mmb_t *mmb = NULL;
127 
128     mmb = hil_mmb_getby_kvirt(mapped_addr);
129     if (mmb == NULL) {
130         return -1;
131     }
132 
133     return hil_mmb_unmap(mmb, mapped_addr);
134 }
135 #else
unmap_mmb(void * mapped_addr)136 int unmap_mmb(void *mapped_addr)
137 {
138     hil_mmb_t *mmb = NULL;
139 
140     mmb = hil_mmb_getby_kvirt(mapped_addr);
141     if (mmb == NULL || mmb->handle == NULL) {
142         return -1;
143     }
144 
145     dma_buf_vunmap(mmb->handle, mapped_addr);
146     return 0;
147 }
148 #endif
149 
flush_cache_kern(mmb_addr_t phyaddr,const void * viraddr,mmb_addr_t len,unsigned int iommu)150 void flush_cache_kern(mmb_addr_t phyaddr, const void *viraddr, mmb_addr_t len, unsigned int iommu)
151 {
152 #ifndef DMABUF_FLUSH_CACHE
153     flush_inner_cache((void *)viraddr, len);
154     flush_outer_cache_range(phyaddr, len, iommu);
155 #else
156     hil_mmb_t *mmb = NULL;
157 
158     mmb = hil_mmb_getby_phys(phyaddr, iommu);
159     if (mmb == NULL || mmb->handle == NULL) {
160         return;
161     }
162 
163     // clean cache
164     dma_buf_end_cpu_access(mmb->handle, DMA_TO_DEVICE);
165     // invalid cache
166     dma_buf_begin_cpu_access(mmb->handle, DMA_FROM_DEVICE);
167 #endif
168 }
169 
cma_mapto_smmu(mmb_addr_t addr,int iommu)170 mmb_addr_t cma_mapto_smmu(mmb_addr_t addr, int iommu)
171 {
172     return hil_mmb_cma_mapto_iommu(addr, iommu);
173 }
174 
cma_unmapfrom_smmu(mmb_addr_t addr,int iommu)175 int cma_unmapfrom_smmu(mmb_addr_t addr, int iommu)
176 {
177     return hil_mmb_cma_unmapfrom_iommu(addr, iommu);
178 }
179 
get_phyaddr_byvirt(const void * mapped_addr,int iommu)180 mmb_addr_t get_phyaddr_byvirt(const void *mapped_addr, int iommu)
181 {
182     hil_mmb_t *mmb = NULL;
183     mmb_addr_t phyaddr = MMB_ADDR_INVALID;
184     mmb_addr_t iommuaddr = MMB_ADDR_INVALID;
185 
186     mmb = hil_mmbinfo_getby_kvirt(mapped_addr);
187     if (mmb == NULL) {
188         return MMB_ADDR_INVALID;
189     }
190 
191     if (mmb->iommu_addr != MMB_ADDR_INVALID) {
192         iommuaddr = mmb->iommu_addr + ((uintptr_t)mapped_addr - (uintptr_t)mmb->kdata->kvirt);
193     }
194 
195     if (mmb->phys_addr != MMB_ADDR_INVALID) {
196         phyaddr = mmb->phys_addr + ((uintptr_t)mapped_addr - (uintptr_t)mmb->kdata->kvirt);
197     }
198 
199     if (iommu) {
200         return iommuaddr;
201     } else {
202         return phyaddr;
203     }
204 }
205 
get_meminfo(u32 addr,u32 iommu,u32 * size,u32 * base)206 struct sg_table *get_meminfo(u32 addr, u32 iommu, u32 *size, u32 *base)
207 {
208     hil_mmb_t *mmb = NULL;
209     if (size == NULL || base == NULL) {
210         return NULL;
211     }
212 
213     mmb = hil_mmb_getby_phys(addr, iommu);
214     if (mmb == NULL) {
215         return NULL;
216     }
217     *size = mmb->length;
218     if (iommu) {
219         *base = mmb->iommu_addr;
220     } else {
221         *base = mmb->phys_addr;
222     }
223 
224     return hil_get_meminfo(mmb);
225 }
226 
227 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
get_nonsecsmmu_by_secsmmu(HI_U32 sec_smmu)228 mmb_addr_t get_nonsecsmmu_by_secsmmu(HI_U32 sec_smmu)
229 {
230     hil_mmb_t *mmb = NULL;
231     HI_U32 offset;
232 
233     mmb = hil_mmb_getby_sec_addr(sec_smmu, 1);
234     if (mmb == NULL) {
235         return MMB_ADDR_INVALID;
236     }
237     offset = sec_smmu - mmb->sec_smmu;
238     if (!mmb->iommu_addr) {
239         return MMB_ADDR_INVALID;
240     } else {
241         return (mmb->iommu_addr + offset);
242     }
243 }
244 
get_phys_by_secsmmu(HI_U32 sec_smmu)245 mmb_addr_t get_phys_by_secsmmu(HI_U32 sec_smmu)
246 {
247     hil_mmb_t *mmb = NULL;
248     HI_U32 offset;
249 
250     mmb = hil_mmb_getby_sec_addr(sec_smmu, 1);
251     if (mmb == NULL) {
252         return MMB_ADDR_INVALID;
253     }
254     offset = sec_smmu - mmb->sec_smmu;
255     if (!mmb->phys_addr) {
256         return MMB_ADDR_INVALID;
257     } else {
258         return (mmb->phys_addr + offset);
259     }
260 }
261 
get_sec_smmu_by_phys(HI_U32 phys_addr)262 mmb_addr_t get_sec_smmu_by_phys(HI_U32 phys_addr)
263 {
264     hil_mmb_t *mmb = NULL;
265     HI_U32 offset;
266 
267     mmb = hil_mmb_getby_phys(phys_addr, 0);
268     if (mmb == NULL) {
269         return MMB_ADDR_INVALID;
270     }
271     offset = phys_addr - mmb->phys_addr;
272     if (!mmb->sec_smmu) {
273         return MMB_ADDR_INVALID;
274     } else {
275         return (mmb->sec_smmu + offset);
276     }
277 }
278 
get_sec_smmu_by_nosmmu(HI_U32 nonsmmu)279 mmb_addr_t get_sec_smmu_by_nosmmu(HI_U32 nonsmmu)
280 {
281     hil_mmb_t *mmb = NULL;
282     HI_U32 offset;
283 
284     mmb = hil_mmb_getby_phys(nonsmmu, 1);
285     if (mmb == NULL) {
286         return MMB_ADDR_INVALID;
287     }
288     offset = nonsmmu - mmb->iommu_addr;
289     if (!mmb->sec_smmu) {
290         return MMB_ADDR_INVALID;
291     } else {
292         return (mmb->sec_smmu + offset);
293     }
294 }
295 #endif
296 
mmb_buf_get(HI_U32 addr,HI_U32 iommu)297 int mmb_buf_get(HI_U32 addr, HI_U32 iommu)
298 {
299     return mmb_get(addr, iommu);
300 }
301 
mmb_buf_put(HI_U32 addr,HI_U32 iommu)302 int mmb_buf_put(HI_U32 addr, HI_U32 iommu)
303 {
304     return mmb_put(addr, iommu);
305 }
306 
mmb_buf_ref_query(HI_U32 addr,HI_U32 iommu,HI_U32 * ref)307 int mmb_buf_ref_query(HI_U32 addr, HI_U32 iommu, HI_U32 *ref)
308 {
309     if (ref == HI_NULL) {
310         return -1;
311     }
312 
313     return mmb_ref_query(addr, iommu, ref);
314 }
315 
query_buffer_source(HI_U32 iommu_addr,HI_S32 * source)316 int query_buffer_source(HI_U32 iommu_addr, HI_S32 *source)
317 {
318     if (source == HI_NULL) {
319         return -1;
320     }
321 
322     return mem_source_query(iommu_addr, source);
323 }
324 
query_secure_buffer_source(HI_U32 sec_smmu,HI_S32 * source)325 int query_secure_buffer_source(HI_U32 sec_smmu, HI_S32 *source)
326 {
327     if (!sec_smmu) {
328         return HI_FAILURE;
329     }
330     return sec_mem_source_query(sec_smmu, source);
331 }
332 
333 EXPORT_SYMBOL(new_mmb);
334 EXPORT_SYMBOL(delete_mmb);
335 EXPORT_SYMBOL(remap_mmb);
336 EXPORT_SYMBOL(remap_mmb_cached);
337 EXPORT_SYMBOL(unmap_mmb);
338 EXPORT_SYMBOL(flush_cache_kern);
339 EXPORT_SYMBOL(get_phyaddr_byvirt);
340 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
341 EXPORT_SYMBOL(get_meminfo);
342 EXPORT_SYMBOL(get_nonsecsmmu_by_secsmmu);
343 EXPORT_SYMBOL(get_phys_by_secsmmu);
344 EXPORT_SYMBOL(get_sec_smmu_by_phys);
345 EXPORT_SYMBOL(get_sec_smmu_by_nosmmu);
346 #endif
347 EXPORT_SYMBOL(mmb_buf_get);
348 EXPORT_SYMBOL(mmb_buf_put);
349 EXPORT_SYMBOL(mmb_buf_ref_query);
350 EXPORT_SYMBOL(query_buffer_source);
351 EXPORT_SYMBOL(query_secure_buffer_source);
352