1 /*
2 * Copyright (C) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19 #include "drv_osal_lib.h"
20 #include <linux/dmapool.h>
21 #include <asm/cacheflush.h>
22 #include "securec.h"
23
24 /* under TEE, we only can malloc secure mmz at system setup,
25 * then map the mmz to Smmu, but the smmu can't map to cpu address,
26 * so we must save the cpu address in a static table when malloc and map mmz.
27 * when call crypto_mem_map, we try to query the table to get cpu address firstly,
28 * if can't get cpu address from the table, then call system api to map it.
29 */
30 #define CRYPTO_MEM_MAP_TABLE_DEPTH 32
31 #define DMA_ALLOC_MAX_SIZE (1024 * 256)
32
33 typedef struct {
34 hi_u32 valid;
35 compat_addr dma;
36 hi_void *via;
37 } crypto_mem_map_table;
38
39 static crypto_mem_map_table g_local_map_table[CRYPTO_MEM_MAP_TABLE_DEPTH];
40
41 #ifdef CONFIG_64BIT
42 #define CRYPTO_FLUSH_DCACHE_AREA __flush_dcache_area
43 #else
44 #define CRYPTO_FLUSH_DCACHE_AREA __cpuc_flush_dcache_area
45 #endif
46
47 /* in 32-bit system. */
48 #define SYS_32BITS 32
49
50 /* in 64-bit system. */
51 #define SYS_64BITS 64
52
53 /* mmz/mmu api. */
cipher_dma_set_mask(struct device * dev)54 static hi_s32 cipher_dma_set_mask(struct device *dev)
55 {
56 hi_s32 ret;
57
58 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(SYS_64BITS));
59 if (ret == HI_SUCCESS) {
60 return ret;
61 }
62
63 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(SYS_32BITS));
64 if (ret != HI_SUCCESS) {
65 hi_log_error("Failed to set DMA mask %d.\n", ret);
66 return ret;
67 }
68
69 return HI_SUCCESS;
70 }
71
cipher_dma_alloc_coherent(crypto_mem * mem,hi_u32 type,const hi_char * name,hi_u32 size)72 static hi_s32 cipher_dma_alloc_coherent(crypto_mem *mem, hi_u32 type, const hi_char *name, hi_u32 size)
73 {
74 struct device *dev = HI_NULL;
75 hi_s32 ret;
76 hi_s32 i;
77
78 crypto_unused(type);
79 crypto_unused(name);
80
81 if (mem == HI_NULL) {
82 hi_log_error("mem is null.\n");
83 return HI_ERR_CIPHER_INVALID_POINT;
84 }
85
86 if (size > DMA_ALLOC_MAX_SIZE) {
87 hi_log_error("dma alloc coherent with invalid size(0x%x).\n", size);
88 return HI_ERR_CIPHER_INVALID_PARAM;
89 }
90
91 dev = (struct device *)cipher_get_device();
92 if (dev == HI_NULL) {
93 hi_log_error("cipher_get_device error.\n");
94 return HI_ERR_CIPHER_INVALID_POINT;
95 }
96
97 ret = cipher_dma_set_mask(dev);
98 if (ret != HI_SUCCESS) {
99 hi_log_error("cipher dma set mask failed.\n");
100 return ret;
101 }
102
103 mem->dma_size = size;
104 mem->dma_virt = dma_alloc_coherent(dev, mem->dma_size, (dma_addr_t *)(&addr_u64(mem->dma_addr)), GFP_ATOMIC);
105 if (mem->dma_virt == HI_NULL) {
106 hi_log_error("dma_alloc_coherent error.\n");
107 return HI_ERR_CIPHER_FAILED_MEM;
108 }
109 addr_u64(mem->mmz_addr) = addr_u64(mem->dma_addr);
110
111 /* save the map info. */
112 for (i = 0; i < CRYPTO_MEM_MAP_TABLE_DEPTH; i++) {
113 if (g_local_map_table[i].valid == HI_FALSE) {
114 addr_u64(g_local_map_table[i].dma) = addr_u64(mem->dma_addr);
115 g_local_map_table[i].via = mem->dma_virt;
116 g_local_map_table[i].valid = HI_TRUE;
117 hi_log_debug("map local map %d, dam 0x%x, via 0x%pK\n",
118 i, addr_u64(mem->dma_addr), mem->dma_virt);
119 break;
120 }
121 }
122
123 return HI_SUCCESS;
124 }
125
cipher_dma_free_coherent(crypto_mem * mem)126 static hi_s32 cipher_dma_free_coherent(crypto_mem *mem)
127 {
128 struct device *dev = HI_NULL;
129 hi_s32 ret;
130 hi_s32 i;
131 crypto_mem mem_temp;
132
133 hi_log_chk_param_return(mem == HI_NULL);
134 hi_log_chk_param_return(mem->dma_virt == HI_NULL);
135
136 (hi_void)memset_s(&mem_temp, sizeof(mem_temp), 0, sizeof(mem_temp));
137 if (memcpy_s(&mem_temp, sizeof(mem_temp), mem, sizeof(crypto_mem)) != EOK) {
138 hi_log_print_func_err(memcpy_s, HI_ERR_CIPHER_MEMCPY_S_FAILED);
139 return HI_ERR_CIPHER_MEMCPY_S_FAILED;
140 }
141
142 dev = (struct device *)cipher_get_device();
143 if (dev == NULL) {
144 hi_log_error("cipher_get_device error.\n");
145 return HI_ERR_CIPHER_INVALID_POINT;
146 }
147
148 ret = cipher_dma_set_mask(dev);
149 if (ret != HI_SUCCESS) {
150 hi_log_error("cipher dma set mask failed.\n");
151 return ret;
152 }
153
154 dma_free_coherent(dev, mem->dma_size, mem->dma_virt, (dma_addr_t)addr_u64(mem->dma_addr));
155
156 /* remove the map info. */
157 for (i = 0; i < CRYPTO_MEM_MAP_TABLE_DEPTH; i++) {
158 if (g_local_map_table[i].valid && addr_u64(g_local_map_table[i].dma) == addr_u64(mem_temp.dma_addr)) {
159 addr_u64(g_local_map_table[i].dma) = 0x00;
160 g_local_map_table[i].via = HI_NULL;
161 g_local_map_table[i].valid = HI_FALSE;
162 hi_log_debug("unmap local map %d, dam 0x%x, via 0x%pK\n",
163 i, addr_u64(mem_temp.dma_addr), mem_temp.dma_virt);
164 break;
165 }
166 }
167 (hi_void)memset_s(mem, sizeof(crypto_mem), 0, sizeof(crypto_mem));
168
169 return HI_SUCCESS;
170 }
171
172 /*
173 * brief allocate and map a mmz or smmu memory
174 * we can't allocate smmu directly during TEE boot period.
175 * in addition, the buffer of cipher node list must be mmz.
176 * so here we have to allocate a mmz memory then map to smmu if necessary.
177 */
hash_mem_alloc_remap(crypto_mem * mem,hi_u32 type,const char * name,hi_u32 size)178 static hi_s32 hash_mem_alloc_remap(crypto_mem *mem, hi_u32 type, const char *name, hi_u32 size)
179 {
180 hi_u32 i;
181
182 crypto_unused(type);
183 crypto_unused(name);
184
185 (hi_void)memset_s(mem, sizeof(crypto_mem), 0, sizeof(crypto_mem));
186
187 hi_log_debug("mem_alloc_remap()- name %s, size 0x%x\n", name, size);
188
189 mem->dma_size = size;
190 mem->dma_virt = kzalloc(size, GFP_KERNEL);
191 if (mem->dma_virt == NULL) {
192 return HI_ERR_CIPHER_FAILED_MEM;
193 }
194
195 addr_u64(mem->mmz_addr) = (hi_phys_addr_t)virt_to_phys(mem->dma_virt);
196 if (addr_u64(mem->mmz_addr) == 0) {
197 if (mem->dma_virt != HI_NULL) {
198 kfree(mem->dma_virt);
199 mem->dma_virt = HI_NULL;
200 }
201
202 return HI_ERR_CIPHER_FAILED_MEM;
203 }
204
205 addr_u64(mem->dma_addr) = addr_u64(mem->mmz_addr);
206
207 hi_log_debug("MMZ/MMU malloc, MMZ 0x%x, MMZ/MMU 0x%x, VIA 0x%pK, SIZE 0x%x\n",
208 addr_u64(mem->mmz_addr), addr_u64(mem->dma_addr), mem->dma_virt, size);
209
210 mem->user_buf = HI_NULL;
211
212 /* save the map info. */
213 for (i = 0; i < CRYPTO_MEM_MAP_TABLE_DEPTH; i++) {
214 if (g_local_map_table[i].valid == HI_FALSE) {
215 addr_u64(g_local_map_table[i].dma) = addr_u64(mem->dma_addr);
216 g_local_map_table[i].via = mem->dma_virt;
217 g_local_map_table[i].valid = HI_TRUE;
218 hi_log_debug("map local map %u, dam 0x%x, via 0x%pK\n",
219 i, addr_u64(mem->dma_addr), mem->dma_virt);
220 break;
221 }
222 }
223
224 return HI_SUCCESS;
225 }
226
227 /* brief release and unmap a mmz or smmu memory. */
hash_mem_release_unmap(crypto_mem * mem)228 static hi_s32 hash_mem_release_unmap(crypto_mem *mem)
229 {
230 hi_u32 i;
231
232 kfree(mem->dma_virt);
233 mem->dma_virt = HI_NULL;
234
235 /* remove the map info. */
236 for (i = 0; i < CRYPTO_MEM_MAP_TABLE_DEPTH; i++) {
237 if (g_local_map_table[i].valid &&
238 addr_u64(g_local_map_table[i].dma) == addr_u64(mem->dma_addr)) {
239 addr_u64(g_local_map_table[i].dma) = 0x00;
240 g_local_map_table[i].via = HI_NULL;
241 g_local_map_table[i].valid = HI_FALSE;
242 hi_log_debug("unmap local map %u, dam 0x%x, via 0x%pK\n",
243 i, addr_u64(mem->dma_addr), mem->dma_virt);
244 break;
245 }
246 }
247 (hi_void)memset_s(mem, sizeof(crypto_mem), 0, sizeof(crypto_mem));
248
249 return HI_SUCCESS;
250 }
251
crypto_mem_alloc_remap(crypto_mem * mem,hi_u32 type,const hi_char * name,hi_u32 size)252 static hi_s32 crypto_mem_alloc_remap(crypto_mem *mem, hi_u32 type, const hi_char *name, hi_u32 size)
253 {
254 return cipher_dma_alloc_coherent(mem, type, name, size);
255 }
256
257 /* brief release and unmap a mmz or smmu memory. */
crypto_mem_release_unmap(crypto_mem * mem)258 static hi_s32 crypto_mem_release_unmap(crypto_mem *mem)
259 {
260 return cipher_dma_free_coherent(mem);
261 }
262
263 /* brief map a mmz or smmu memory. */
crypto_mem_map(crypto_mem * mem)264 static hi_s32 crypto_mem_map(crypto_mem *mem)
265 {
266 hi_u32 i;
267
268 hi_log_debug("crypto_mem_map()- dma 0x%x, size 0x%x\n",
269 addr_u64(mem->dma_addr), mem->dma_size);
270
271 /* try to query the table to get cpu address firstly,
272 * if can't get cpu address from the table, then call system api to map it.
273 */
274 for (i = 0; i < CRYPTO_MEM_MAP_TABLE_DEPTH; i++) {
275 if (g_local_map_table[i].valid && addr_u64(g_local_map_table[i].dma) == addr_u64(mem->dma_addr)) {
276 mem->dma_virt = g_local_map_table[i].via;
277 hi_log_debug("local map %d, dam 0x%x, via 0x%pK\n", i, addr_u64(mem->dma_addr), mem->dma_virt);
278 return HI_SUCCESS;
279 }
280 }
281
282 mem->dma_virt = (hi_u8 *)phys_to_virt((phys_addr_t)addr_u64(mem->dma_addr));
283 if (mem->dma_virt == HI_NULL) {
284 return HI_ERR_CIPHER_FAILED_MEM;
285 }
286
287 hi_log_info("crypto_mem_map()- via 0x%pK\n", mem->dma_virt);
288
289 return HI_SUCCESS;
290 }
291
292 /* brief unmap a mmz or smmu memory. */
crypto_mem_unmap(crypto_mem * mem)293 static hi_s32 crypto_mem_unmap(crypto_mem *mem)
294 {
295 hi_u32 i;
296
297 hi_log_debug("crypto_mem_unmap()- dma 0x%x, size 0x%x\n",
298 addr_u64(mem->dma_addr), mem->dma_size);
299
300 /* try to query the table to ummap cpu address firstly,
301 * if can't get cpu address from the table, then call system api to unmap it.
302 */
303 for (i = 0; i < CRYPTO_MEM_MAP_TABLE_DEPTH; i++) {
304 if (g_local_map_table[i].valid && (addr_u64(g_local_map_table[i].dma) == addr_u64(mem->dma_addr))) {
305 /* this api can't unmap the dma within the map table. */
306 hi_log_debug("local unmap %d, dam 0x%x, via 0x%pK\n", i, addr_u64(mem->dma_addr), mem->dma_virt);
307 return HI_SUCCESS;
308 }
309 }
310
311 return HI_SUCCESS;
312 }
313
crypto_cpuc_flush_dcache_area(hi_void * kvir,hi_u32 length)314 hi_void crypto_cpuc_flush_dcache_area(hi_void *kvir, hi_u32 length)
315 {
316 CRYPTO_FLUSH_DCACHE_AREA(kvir, length);
317 }
318
crypto_mem_init(hi_void)319 hi_void crypto_mem_init(hi_void)
320 {
321 (hi_void)memset_s(&g_local_map_table, sizeof(g_local_map_table), 0, sizeof(g_local_map_table));
322 }
323
crypto_mem_deinit(hi_void)324 hi_void crypto_mem_deinit(hi_void)
325 {
326 }
327
crypto_mem_create(crypto_mem * mem,hi_u32 type,const char * name,hi_u32 size)328 hi_s32 crypto_mem_create(crypto_mem *mem, hi_u32 type, const char *name, hi_u32 size)
329 {
330 hi_log_chk_param_return(mem == HI_NULL);
331
332 return crypto_mem_alloc_remap(mem, type, name, size);
333 }
334
crypto_mem_destroy(crypto_mem * mem)335 hi_s32 crypto_mem_destroy(crypto_mem *mem)
336 {
337 hi_log_chk_param_return(mem == HI_NULL);
338
339 return crypto_mem_release_unmap(mem);
340 }
341
hash_mem_create(crypto_mem * mem,hi_u32 type,const char * name,hi_u32 size)342 hi_s32 hash_mem_create(crypto_mem *mem, hi_u32 type, const char *name, hi_u32 size)
343 {
344 hi_log_chk_param_return(mem == HI_NULL);
345
346 return hash_mem_alloc_remap(mem, type, name, size);
347 }
348
hash_mem_destroy(crypto_mem * mem)349 hi_s32 hash_mem_destroy(crypto_mem *mem)
350 {
351 hi_log_chk_param_return(mem == HI_NULL);
352
353 return hash_mem_release_unmap(mem);
354 }
355
crypto_mem_open(crypto_mem * mem,compat_addr dma_addr,hi_u32 dma_size)356 hi_s32 crypto_mem_open(crypto_mem *mem, compat_addr dma_addr, hi_u32 dma_size)
357 {
358 hi_log_chk_param_return(mem == HI_NULL);
359
360 mem->dma_addr = dma_addr;
361 mem->dma_size = dma_size;
362
363 return crypto_mem_map(mem);
364 }
365
crypto_mem_close(crypto_mem * mem)366 hi_s32 crypto_mem_close(crypto_mem *mem)
367 {
368 hi_log_chk_param_return(mem == HI_NULL);
369
370 return crypto_mem_unmap(mem);
371 }
372
crypto_mem_attach(crypto_mem * mem,hi_void * buffer)373 hi_s32 crypto_mem_attach(crypto_mem *mem, hi_void *buffer)
374 {
375 hi_log_chk_param_return(mem == HI_NULL);
376
377 mem->user_buf = buffer;
378
379 return HI_SUCCESS;
380 }
381
crypto_mem_phys(crypto_mem * mem,compat_addr * dma_addr)382 hi_s32 crypto_mem_phys(crypto_mem *mem, compat_addr *dma_addr)
383 {
384 hi_log_chk_param_return(mem == HI_NULL);
385
386 dma_addr->phy = addr_u64(mem->dma_addr);
387
388 return HI_SUCCESS;
389 }
390
crypto_mem_virt(const crypto_mem * mem)391 hi_void *crypto_mem_virt(const crypto_mem *mem)
392 {
393 if (mem == HI_NULL) {
394 return HI_NULL;
395 }
396
397 return mem->dma_virt;
398 }
399
crypto_copy_from_user(hi_void * to,unsigned long to_len,const hi_void * from,unsigned long from_len)400 hi_s32 crypto_copy_from_user(hi_void *to, unsigned long to_len,
401 const hi_void *from, unsigned long from_len)
402 {
403 if (from_len == 0) {
404 return HI_SUCCESS;
405 }
406
407 hi_log_chk_param_return(to == HI_NULL);
408 hi_log_chk_param_return(from == HI_NULL);
409 hi_log_chk_param_return(from_len > MAX_COPY_FROM_USER_SIZE);
410 hi_log_chk_param_return(from_len > to_len);
411
412 return (hi_s32)copy_from_user(to, from, from_len);
413 }
414
crypto_copy_to_user(hi_void * to,unsigned long to_len,const hi_void * from,unsigned long from_len)415 hi_s32 crypto_copy_to_user(hi_void *to, unsigned long to_len,
416 const hi_void *from, unsigned long from_len)
417 {
418 if (from_len == 0) {
419 return HI_SUCCESS;
420 }
421
422 hi_log_chk_param_return(to == HI_NULL);
423 hi_log_chk_param_return(from == HI_NULL);
424 hi_log_chk_param_return(from_len > MAX_COPY_FROM_USER_SIZE);
425 hi_log_chk_param_return(from_len > to_len);
426
427 return (hi_s32)copy_to_user(to, from, from_len);
428 }
429
crypto_is_sec_cpu(hi_void)430 hi_u32 crypto_is_sec_cpu(hi_void)
431 {
432 return module_get_secure();
433 }
434
smmu_get_table_addr(hi_phys_addr_t * rdaddr,hi_phys_addr_t * wraddr,hi_u64 * table)435 hi_void smmu_get_table_addr(hi_phys_addr_t *rdaddr, hi_phys_addr_t *wraddr, hi_u64 *table)
436 {
437 #ifdef CRYPTO_SMMU_SUPPORT
438 hi_u32 smmu_e_raddr, smmu_e_waddr, mmu_pgtbl;
439 HI_DRV_SMMU_GetPageTableAddr(&mmu_pgtbl, &smmu_e_raddr, &smmu_e_waddr);
440
441 *rdaddr = smmu_e_raddr;
442 *wraddr = smmu_e_waddr;
443 *table = mmu_pgtbl;
444 #else
445 *rdaddr = 0x00;
446 *wraddr = 0x00;
447 *table = 0x00;
448 #endif
449 }
450
crypto_waitdone_callback(hi_void * param)451 hi_s32 crypto_waitdone_callback(hi_void *param)
452 {
453 hi_u32 *done = param;
454
455 return (*done != HI_FALSE);
456 }
457
cipher_check_mmz_phy_addr(hi_phys_addr_t phy_addr,hi_u32 length)458 hi_s32 cipher_check_mmz_phy_addr(hi_phys_addr_t phy_addr, hi_u32 length)
459 {
460 #ifdef CIPHER_CHECK_MMZ_PHY
461 hil_mmb_t *mmb = HI_NULL;
462 unsigned long mmb_offset = 0;
463
464 /* Check whether the start address is within the MMZ range of the current system. */
465 mmb = hil_mmb_getby_phys_2(phy_addr, &mmb_offset);
466 if (mmb != NULL) {
467 /* Check whether the end address is within the MMZ range of the current system. */
468 mmb = hil_mmb_getby_phys_2(phy_addr + length - 1, &mmb_offset);
469 if (mmb == NULL) {
470 hi_log_print_func_err(hil_mmb_getby_phys_2, HI_ERR_CIPHER_INVALID_ADDR);
471 return HI_ERR_CIPHER_INVALID_ADDR;
472 }
473 } else { /* Whether the starting address is within the MMZ range of other systems. */
474 if (hil_map_mmz_check_phys(phy_addr, length)) {
475 hi_log_print_func_err(hil_map_mmz_check_phys, HI_ERR_CIPHER_INVALID_ADDR);
476 return HI_ERR_CIPHER_INVALID_ADDR;
477 }
478 }
479 #endif
480
481 #ifdef CIPHER_BUILTIN
482 /* check physical addr is ram region. */
483 if (pfn_valid(phy_addr >> PAGE_SHIFT) || pfn_valid(length + (phy_addr >> PAGE_SHIFT))) {
484 #if defined(CONFIG_CMA) && defined(CONFIG_ARCH_HISI_BVT)
485 if (is_hicma_address(phy_addr, length)) {
486 return HI_SUCCESS;
487 } else {
488 hi_log_print_func_err(is_hicma_address, HI_ERR_CIPHER_INVALID_ADDR);
489 return HI_ERR_CIPHER_INVALID_ADDR;
490 }
491 #endif
492 hi_log_error("physical addr is ram region.\n");
493 return HI_ERR_CIPHER_INVALID_ADDR;
494 } else {
495 return HI_SUCCESS;
496 }
497 #endif
498
499 return HI_SUCCESS;
500 }
501