1 /*
2 * Copyright (C) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19 #include "drv_hash_v200.h"
20 #include "drv_hash.h"
21 #include "securec.h"
22
23 #ifdef CHIP_HASH_VER_V200
24
25 /* ************************** Structure Definition *************************** */
26 /* hash in entry list size */
27 #define HASH_NODE_SIZE 4096
28
29 /* hash in entry list size */
30 #define HASH_NODE_LIST_SIZE (HASH_NODE_SIZE * HASH_HARD_CHANNEL_CNT)
31
32 /* hash node depth */
33 #define HASH_MAX_DEPTH 127
34
35 #define KLAD_KEY_USE_ERR 0x01
36 #define ALG_LEN_ERR 0x02
37 #define SMMU_PAGE_INVALID 0x04
38 #define OUT_SMMU_PAGE_NOT_VALID 0x08
39 #define KLAD_KEY_WRITE_ERR 0x10
40
41 /* Define the flag of node */
42 typedef enum {
43 HASH_CTRL_NONE = 0x00, /* middle node */
44 HASH_CTRL_HASH_IN_FIRST = 0x01, /* first node */
45 HASH_CTRL_HASH_IN_LAST = 0x02, /* last node */
46 HASH_CTRL_COUNT,
47 } hash_ctrl_en;
48
49 /* spacc digest in entry struct which is defined by hardware, you can't change it */
50 struct hash_entry_in {
51 hi_u32 spacc_cmd : 2; /* reserve */
52 hi_u32 rev1 : 6; /* reserve */
53 hi_u32 hash_ctrl : 6; /* hash control flag. */
54 hi_u32 rev2 : 18; /* reserve */
55 hi_u32 hash_start_addr; /* hash message address */
56 hi_u32 hash_alg_length; /* hash message length */
57 hi_u32 word1; /* reserve */
58 };
59
60 /* Define the context of hash */
61 typedef struct {
62 hash_mode hash_alg; /* hash mode */
63 struct hash_entry_in *entry_in; /* spacc digest in entry struct */
64 hi_u32 id_in; /* current hash nodes to be used */
65 hi_u32 done; /* calculation finish flag. */
66 CRYPTO_QUEUE_HEAD queue; /* queue list */
67 } hash_hard_context;
68
69 /* hash already initialize or not */
70 static hi_u32 g_hash_initialize = HI_FALSE;
71
72 /* dma memory of hash node list. */
73 static crypto_mem g_hash_dma;
74
75 /* Channel of hash */
76 static channel_context g_hash_hard_channel[CRYPTO_HARD_CHANNEL_MAX];
77
78 /* ****************************** API Declaration **************************** */
hash_print_last_node(hi_u32 chn_num)79 static hi_void hash_print_last_node(hi_u32 chn_num)
80 {
81 struct hash_entry_in *in = HI_NULL;
82 hash_hard_context *ctx = HI_NULL;
83
84 ctx = (hash_hard_context *)g_hash_hard_channel[chn_num].ctx;
85
86 /* get last in node info. */
87 in = &ctx->entry_in[ctx->id_in];
88
89 hi_log_error("chn %u, src addr 0x%x, size 0x%x\n", chn_num, in->hash_start_addr, in->hash_alg_length);
90 crypto_unused(in);
91 }
92
drv_hash_get_err_code(hi_u32 chn_num)93 static hi_s32 drv_hash_get_err_code(hi_u32 chn_num)
94 {
95 hi_u32 code;
96
97 /* check error code
98 * bit0: klad_key_use_err
99 * bit1: alg_len_err
100 * bit2: smmu_page_invalid
101 * bit3: out_smmu_page_not_valid
102 * bit4: klad_key_write_err
103 */
104 code = hash_read(CALC_ERR);
105 if (code & KLAD_KEY_USE_ERR) {
106 hi_log_error("hash error: klad_key_use_err, chn %u !!!\n", chn_num);
107 }
108 if (code & ALG_LEN_ERR) {
109 hi_log_error("hash error: alg_len_err, chn %u !!!\n", chn_num);
110 }
111 if (code & SMMU_PAGE_INVALID) {
112 hi_log_error("hash error: smmu_page_invalid, chn %u !!!\n", chn_num);
113 }
114 if (code & OUT_SMMU_PAGE_NOT_VALID) {
115 hi_log_error("symc error: out_smmu_page_not_valid, chn %u !!!\n", chn_num);
116 }
117 if (code & KLAD_KEY_WRITE_ERR) {
118 hi_log_error("symc error: klad_key_write_err, chn %u !!!\n", chn_num);
119 }
120
121 /* print the in out buffer address. */
122 if (code) {
123 hash_print_last_node(chn_num);
124 return HI_ERR_CIPHER_FAILED_MEM;
125 }
126
127 return HI_SUCCESS;
128 }
129
130 #ifdef CRYPTO_OS_INT_SUPPORT
hash_done_notify(hi_void)131 static hi_u32 hash_done_notify(hi_void)
132 {
133 hash_int_raw int_raw;
134 hash_int_status int_st;
135 hi_u32 chn_mask;
136
137 int_st.u32 = hash_read(HASH_INT_STATUS);
138 int_raw.u32 = 0;
139
140 /* just process the valid channel. */
141 int_st.bits.hash_chn_oram_int &= HASH_HARD_CHANNEL_MASK;
142 chn_mask = int_st.bits.hash_chn_oram_int;
143 int_raw.bits.hash_chn_oram_raw = int_st.bits.hash_chn_oram_int;
144
145 hi_log_debug("int_st 0x%x, mask 0x%x\n", int_st.u32, chn_mask);
146
147 /* Clean raw int. */
148 hash_write(HASH_INT_RAW, int_raw.u32);
149
150 return chn_mask;
151 }
152
symc_done_test(hi_void)153 static hi_u32 symc_done_test(hi_void)
154 {
155 cipher_int_status status;
156
157 status.u32 = symc_read(CIPHER_INT_STATUS);
158
159 /* just process the valid channel. */
160 status.bits.cipher_chn_obuf_int &= CIPHER_HARD_CHANNEL_MASK;
161
162 return status.bits.cipher_chn_obuf_int; /* mask */
163 }
164
165 /* hash interrupt process function */
hash_interrupt_isr(hi_s32 irq,hi_void * dev_id)166 static CRYPTO_IRQRETURN_T hash_interrupt_isr(hi_s32 irq, hi_void *dev_id)
167 {
168 hi_u32 mask, i;
169 hash_hard_context *ctx = HI_NULL;
170
171 crypto_unused(irq);
172 crypto_unused(dev_id);
173
174 mask = hash_done_notify();
175
176 for (i = 0; i < CRYPTO_HARD_CHANNEL_MAX; i++) {
177 if ((mask >> i) & 0x01) {
178 ctx = (hash_hard_context *)g_hash_hard_channel[i].ctx;
179 ctx->done = HI_TRUE;
180 hi_log_debug("chn %u wake up\n", i);
181 crypto_queue_wait_up(&ctx->queue);
182 }
183 }
184
185 /* symc and hash use the sample interrupt number
186 * so if symc has occur interrupt, we should return IRQ_NONE
187 * to tell system continue to process the symc interrupt.
188 */
189 if (symc_done_test() != 0) {
190 return IRQ_NONE;
191 }
192
193 return IRQ_HANDLED;
194 }
195
196 /* hash register interrupt process function */
drv_hash_register_interrupt(hi_void)197 static hi_s32 drv_hash_register_interrupt(hi_void)
198 {
199 hi_s32 ret;
200 hi_u32 int_valid = 0;
201 hi_u32 int_num = 0;
202 hi_u32 i;
203 const char *name = HI_NULL;
204
205 hi_log_func_enter();
206
207 module_get_attr(CRYPTO_MODULE_ID_HASH, &int_valid, &int_num, &name);
208
209 if (int_valid == HI_FALSE) {
210 return HI_SUCCESS;
211 }
212
213 /* request irq */
214 ret = crypto_request_irq(int_num, hash_interrupt_isr, name);
215 if (ret != HI_SUCCESS) {
216 hi_log_error("Irq request failure, irq = %u", int_num);
217 hi_log_print_err_code(HI_ERR_CIPHER_REGISTER_IRQ);
218 return HI_ERR_CIPHER_REGISTER_IRQ;
219 }
220
221 /* initialize queue list. */
222 for (i = 0; i < CRYPTO_HARD_CHANNEL_MAX; i++) {
223 crypto_queue_init(&((hash_hard_context *)g_hash_hard_channel[i].ctx)->queue);
224 }
225
226 hi_log_func_exit();
227 return HI_SUCCESS;
228 }
229
230 /* hash unregister interrupt process function */
drv_hash_unregister_interrupt(hi_void)231 static hi_void drv_hash_unregister_interrupt(hi_void)
232 {
233 hi_u32 int_valid = 0;
234 hi_u32 int_num = 0;
235 const char *name = HI_NULL;
236
237 hi_log_func_enter();
238
239 module_get_attr(CRYPTO_MODULE_ID_HASH, &int_valid, &int_num, &name);
240
241 if (int_valid == HI_FALSE) {
242 return;
243 }
244
245 /* free irq */
246 hi_log_debug("hash free irq, num %u, name %s\n", int_num, name);
247 crypto_free_irq(int_num, name);
248
249 hi_log_func_exit();
250 }
251
252 /* set interrupt */
hash_set_interrupt(hi_void)253 static hi_void hash_set_interrupt(hi_void)
254 {
255 hi_u32 int_valid = 0;
256 hi_u32 int_num = 0;
257 const char *name = HI_NULL;
258 hash_int_en int_en;
259 hash_int_raw int_raw;
260
261 hi_log_func_enter();
262
263 module_get_attr(CRYPTO_MODULE_ID_HASH, &int_valid, &int_num, &name);
264
265 int_en.u32 = hash_read(HASH_INT_EN);
266
267 if (int_valid == HI_FALSE) {
268 /* The top interrupt switch only can be enable/disable by secure CPU. */
269 int_en.bits.hash_int_en = 0;
270 int_en.bits.hash_sec_int_en = 0;
271 int_en.bits.hash_chn_oram_en &= ~HASH_HARD_CHANNEL_MASK;
272 } else {
273 /* The top interrupt switch only can be enable/disable by secure CPU. */
274 int_en.bits.hash_int_en = 1;
275 int_en.bits.hash_sec_int_en = 1;
276 int_en.bits.hash_chn_oram_en |= HASH_HARD_CHANNEL_MASK;
277 }
278
279 hash_write(HASH_INT_EN, int_en.u32);
280 hi_log_info("HASH_INT_EN: 0x%x\n", int_en.u32);
281
282 /* clear interception
283 * the history of interception may trigge the system to
284 * call the irq function before initialization
285 * when register interrupt, this will cause a system abort.
286 */
287 int_raw.u32 = symc_read(HASH_INT_RAW);
288 int_raw.bits.hash_chn_oram_raw &= HASH_HARD_CHANNEL_MASK; /* clear valid channel */
289 symc_write(HASH_INT_RAW, int_raw.u32);
290
291 hi_log_func_exit();
292
293 return;
294 }
295
drv_hash_wait_irq(hi_u32 chnnel_id)296 static hi_s32 drv_hash_wait_irq(hi_u32 chnnel_id)
297 {
298 hi_s32 ret;
299 hi_s32 err_code;
300 hash_hard_context *ctx = HI_NULL;
301
302 ctx = (hash_hard_context *)g_hash_hard_channel[chnnel_id].ctx;
303 if (ctx == HI_NULL) {
304 hi_log_error("hash hard ctx is null.\n");
305 hi_log_print_err_code(HI_ERR_CIPHER_TIMEOUT);
306 return HI_ERR_CIPHER_TIMEOUT;
307 }
308
309 ret = crypto_queue_wait_timeout(ctx->queue, &ctx->done, CRYPTO_TIME_OUT);
310 if ((ret <= 0) && (ret != -ERESTARTSYS)) {
311 hi_log_print_func_err(crypto_queue_wait_timeout, ret);
312 err_code = drv_hash_get_err_code(chnnel_id);
313 if (err_code != HI_SUCCESS) {
314 hi_log_print_func_err(drv_hash_get_err_code, err_code);
315 }
316 hi_log_print_err_code(HI_ERR_CIPHER_TIMEOUT);
317 return HI_ERR_CIPHER_TIMEOUT;
318 }
319
320 return HI_SUCCESS;
321 }
322
323 #endif
324
hash_done_try(hi_u32 chn_num)325 static hi_u32 hash_done_try(hi_u32 chn_num)
326 {
327 hash_int_raw int_raw;
328 hi_u32 chn_mask;
329
330 int_raw.u32 = hash_read(HASH_INT_RAW);
331 int_raw.bits.hash_chn_oram_raw &= 0x01 << chn_num;
332 chn_mask = int_raw.bits.hash_chn_oram_raw;
333
334 /* Clean raw int. */
335 hash_write(HASH_INT_RAW, int_raw.u32);
336
337 return chn_mask;
338 }
339
340 /* set hash entry */
hash_set_entry(hi_u32 chn,compat_addr dma_addr,const hi_void * cpu_addr)341 static hi_void hash_set_entry(hi_u32 chn, compat_addr dma_addr, const hi_void *cpu_addr)
342 {
343 hash_hard_context *ctx = (hash_hard_context *)g_hash_hard_channel[chn].ctx;
344 chann_hash_int_node_cfg hash_in_cfg;
345
346 /* set total num and start addr for hash in node. */
347 hash_in_cfg.u32 = hash_read(chann_hash_in_node_cfg(chn));
348 hash_in_cfg.bits.hash_in_node_total_num = HASH_MAX_DEPTH;
349 hash_write(chann_hash_in_node_cfg(chn), hash_in_cfg.u32);
350 hash_write(chann_hash_in_node_start_addr(chn), addr_l32(dma_addr));
351 hash_write(chann_hash_in_node_start_high(chn), addr_h32(dma_addr));
352 hi_log_info("chann_hash_in_node_cfg[0x%x]: \t0x%x, PHY: 0x%x, VIA %pK\n",
353 chann_hash_in_node_cfg(chn), hash_in_cfg.u32, addr_l32(dma_addr), cpu_addr);
354
355 ctx->entry_in = (struct hash_entry_in *)cpu_addr;
356 ctx->id_in = hash_in_cfg.bits.hash_in_node_wptr;
357 }
358
359 /* set smmu */
hash_smmu_bypass(hi_void)360 static hi_void hash_smmu_bypass(hi_void)
361 {
362 #ifdef CRYPTO_SMMU_SUPPORT
363 hash_in_smmu_en hash_in_smmu_en;
364
365 hash_in_smmu_en.u32 = hash_read(HASH_IN_SMMU_EN);
366 hash_in_smmu_en.bits.hash_in_chan_rd_dat_smmu_en |= HASH_HARD_CHANNEL_MASK >> 1;
367 hash_in_smmu_en.bits.hash_in_chan_rd_node_smmu_en &= ~(HASH_HARD_CHANNEL_MASK >> 1);
368 hash_write(HASH_IN_SMMU_EN, hash_in_smmu_en.u32);
369 hi_log_info("HASH_IN_SMMU_EN[0x%x] : 0x%x\n", HASH_IN_SMMU_EN, hash_in_smmu_en.u32);
370 #endif
371 return;
372 }
373
374 /* smmu set base address */
drv_hash_smmu_base_addr(hi_void)375 static hi_void drv_hash_smmu_base_addr(hi_void)
376 {
377 #ifdef CRYPTO_SMMU_SUPPORT
378 hi_phys_addr_t err_raddr = 0;
379 hi_phys_addr_t err_waddr = 0;
380 hi_phys_addr_t table_addr = 0;
381
382 /* get table base addr from system api */
383 smmu_get_table_addr(&err_raddr, &err_waddr, &table_addr);
384
385 if (crypto_is_sec_cpu()) {
386 /* smmu page secure table addr. */
387 hash_write(NORM_SMMU_START_ADDR, (hi_u32)table_addr);
388 hi_log_info("NORM_SMMU_START_ADDR[0x%x] : 0x%x\n", NORM_SMMU_START_ADDR, (hi_u32)table_addr);
389 } else {
390 /* smmu page nonsecure table addr. */
391 hash_write(SEC_SMMU_START_ADDR, (hi_u32)table_addr);
392 hi_log_info("SEC_SMMU_START_ADDR[0x%x] : 0x%x\n", SEC_SMMU_START_ADDR, (hi_u32)table_addr);
393 }
394 #endif
395 return;
396 }
397
398 /* set secure channel,
399 * non-secure CPU can't change the value of SEC_CHN_CFG,
400 * so non-secure CPU call this function will do nothing.
401 */
drv_hash_enable_secure(hi_void)402 static hi_void drv_hash_enable_secure(hi_void)
403 {
404 sec_chn_cfg chn_cfg;
405
406 chn_cfg.u32 = hash_read(SEC_CHN_CFG);
407 chn_cfg.bits.hash_sec_chn_cfg |= HASH_HARD_CHANNEL_MASK;
408 hash_write(SEC_CHN_CFG, chn_cfg.u32);
409 hi_log_info("SEC_CHN_CFG[0x%x]: 0x%x\n", SEC_CHN_CFG, chn_cfg.u32);
410 }
411
hash_entry_init(crypto_mem mem)412 static hi_void hash_entry_init(crypto_mem mem)
413 {
414 hi_u32 i;
415 compat_addr mmz_addr;
416 hi_void *cpu_addr = HI_NULL;
417
418 hi_log_info("symc entry list configure\n");
419 addr_u64(mmz_addr) = addr_u64(mem.mmz_addr);
420 cpu_addr = mem.dma_virt;
421 for (i = 0; i < CRYPTO_HARD_CHANNEL_MAX; i++) {
422 if ((HASH_HARD_CHANNEL_MASK >> i) & 0x01) { /* valid channel. */
423 hash_set_entry(i, mmz_addr, cpu_addr);
424 addr_u64(mmz_addr) += HASH_NODE_SIZE; /* move to next channel */
425 cpu_addr = (hi_u8 *)cpu_addr + HASH_NODE_SIZE; /* move to next channel */
426 }
427 }
428 return;
429 }
430
drv_hash_cfg_init(hi_void)431 static hi_s32 drv_hash_cfg_init(hi_void)
432 {
433 hi_s32 ret;
434
435 hi_log_info("alloc memory for nodes list\n");
436 ret = hash_mem_create(&g_hash_dma, SEC_MMZ, "hash_node_list", HASH_NODE_LIST_SIZE);
437 if (ret != HI_SUCCESS) {
438 hi_log_print_func_err(hash_mem_create, ret);
439 return ret;
440 }
441 hi_log_info("HASH DMA buffer, MMU 0x%x, MMZ 0x%x, VIA %pK, size 0x%x\n", addr_l32(g_hash_dma.dma_addr),
442 addr_l32(g_hash_dma.mmz_addr), g_hash_dma.dma_virt, g_hash_dma.dma_size);
443
444 hi_log_info("hash entry list configure\n");
445 hash_entry_init(g_hash_dma);
446
447 hi_log_info("hash SMMU configure\n");
448 hash_smmu_bypass();
449 drv_hash_smmu_base_addr();
450
451 hi_log_info("hash secure channel configure\n");
452 drv_hash_enable_secure();
453 #ifdef CRYPTO_OS_INT_SUPPORT
454 hi_log_info("hash interrupt configure\n");
455 hash_set_interrupt();
456
457 hi_log_info("hash register interrupt function\n");
458 ret = drv_hash_register_interrupt();
459 if (ret != HI_SUCCESS) {
460 hi_s32 ret_error;
461
462 hi_log_print_func_err(drv_hash_register_interrupt, ret);
463 ret_error = hash_mem_destroy(&g_hash_dma);
464 if (ret_error != HI_SUCCESS) {
465 hi_log_print_func_err(hash_mem_destroy, ret_error);
466 }
467 return ret;
468 }
469 #endif
470
471 return HI_SUCCESS;
472 }
473
drv_hash_init(hi_void)474 hi_s32 drv_hash_init(hi_void)
475 {
476 hi_s32 ret;
477
478 hi_log_func_enter();
479
480 if (g_hash_initialize == HI_TRUE) {
481 hi_log_func_exit();
482 return HI_SUCCESS;
483 }
484
485 ret = crypto_channel_init(g_hash_hard_channel, CRYPTO_HARD_CHANNEL_MAX, sizeof(hash_hard_context));
486 if (ret != HI_SUCCESS) {
487 hi_log_print_func_err(crypto_channel_init, ret);
488 return ret;
489 }
490
491 hi_log_info("enable hash\n");
492 module_enable(CRYPTO_MODULE_ID_HASH);
493
494 ret = drv_hash_cfg_init();
495 if (ret != HI_SUCCESS) {
496 hi_s32 ret_error;
497
498 hi_log_print_func_err(crypto_channel_init, ret);
499 module_disable(CRYPTO_MODULE_ID_HASH);
500 ret_error = crypto_channel_deinit(g_hash_hard_channel, CRYPTO_HARD_CHANNEL_MAX);
501 if (ret_error != HI_SUCCESS) {
502 hi_log_print_func_err(crypto_channel_deinit, ret_error);
503 }
504 return ret;
505 }
506
507 g_hash_initialize = HI_TRUE;
508 hi_log_func_exit();
509 return HI_SUCCESS;
510 }
511
drv_hash_deinit(hi_void)512 hi_s32 drv_hash_deinit(hi_void)
513 {
514 hi_s32 ret;
515 hi_log_func_enter();
516
517 if (g_hash_initialize == HI_FALSE) {
518 hi_log_func_exit();
519 return HI_SUCCESS;
520 }
521
522 #ifdef CRYPTO_OS_INT_SUPPORT
523 drv_hash_unregister_interrupt();
524 #endif
525
526 ret = hash_mem_destroy(&g_hash_dma);
527 if (ret != HI_SUCCESS) {
528 hi_log_print_func_err(hash_mem_destroy, ret);
529 return ret;
530 }
531 module_disable(CRYPTO_MODULE_ID_HASH);
532 ret = crypto_channel_deinit(g_hash_hard_channel, CRYPTO_HARD_CHANNEL_MAX);
533 if (ret != HI_SUCCESS) {
534 hi_log_print_func_err(crypto_channel_deinit, ret);
535 return ret;
536 }
537
538 g_hash_initialize = HI_FALSE;
539
540 hi_log_func_exit();
541 return HI_SUCCESS;
542 }
543
drv_hash_resume(hi_void)544 hi_void drv_hash_resume(hi_void)
545 {
546 hi_log_func_enter();
547
548 hi_log_info("enable hash\n");
549 module_enable(CRYPTO_MODULE_ID_HASH);
550
551 hi_log_info("hash entry list configure\n");
552 hash_entry_init(g_hash_dma);
553
554 #ifdef CRYPTO_OS_INT_SUPPORT
555 hi_log_info("hash interrupt configure\n");
556 hash_set_interrupt();
557 #endif
558
559 hi_log_info("hash SMMU configure\n");
560 hash_smmu_bypass();
561 drv_hash_smmu_base_addr();
562
563 hi_log_info("hash secure channel configure\n");
564 drv_hash_enable_secure();
565
566 hi_log_func_exit();
567
568 return;
569 }
570
drv_hash_suspend(hi_void)571 hi_void drv_hash_suspend(hi_void)
572 {
573 hi_log_func_enter();
574 hi_log_func_exit();
575
576 return;
577 }
578
drv_hash_query_raw_interrupt(hi_u32 chnnel_id)579 static hi_s32 drv_hash_query_raw_interrupt(hi_u32 chnnel_id)
580 {
581 hi_s32 i;
582
583 /* interrupt unsupported, query the raw interrupt flag. */
584 for (i = 0; i < CRYPTO_TIME_OUT; i++) {
585 if (hash_done_try(chnnel_id)) {
586 break;
587 }
588
589 if (i <= MS_TO_US) {
590 crypto_udelay(1); /* short waiting for 1000 us. */
591 } else {
592 crypto_msleep(1); /* long waiting for 5000 ms. */
593 }
594 }
595
596 if (i >= CRYPTO_TIME_OUT) {
597 hi_s32 err_code;
598
599 hi_log_error("hash wait done timeout, chn=%u\n", chnnel_id);
600
601 err_code = drv_hash_get_err_code(chnnel_id);
602 if (err_code != HI_SUCCESS) {
603 hi_log_print_func_err(drv_hash_get_err_code, err_code);
604 }
605
606 hi_log_print_err_code(HI_ERR_CIPHER_TIMEOUT);
607 return HI_ERR_CIPHER_TIMEOUT;
608 }
609
610 return HI_SUCCESS;
611 }
612
613 /* wait hash ready */
drv_hash_wait_ready(hi_u32 chn_num)614 static hi_s32 drv_hash_wait_ready(hi_u32 chn_num)
615 {
616 hi_s32 ret;
617 hi_u32 int_valid = 0;
618 hi_u32 int_num = 0;
619
620 hi_log_func_enter();
621
622 hi_log_chk_param_return(chn_num >= CRYPTO_HARD_CHANNEL_MAX);
623
624 module_get_attr(CRYPTO_MODULE_ID_HASH, &int_valid, &int_num, HI_NULL);
625
626 #ifdef CRYPTO_OS_INT_SUPPORT
627 /* interrupt support, wait irq. */
628 if (int_valid) {
629 ret = drv_hash_wait_irq(chn_num);
630 if (ret != HI_SUCCESS) {
631 hi_log_print_func_err(drv_hash_wait_irq, ret);
632 return ret;
633 }
634 } else {
635 ret = drv_hash_query_raw_interrupt(chn_num);
636 if (ret != HI_SUCCESS) {
637 hi_log_print_func_err(drv_hash_query_raw_interrupt, ret);
638 return ret;
639 }
640 }
641 #else
642 ret = drv_hash_query_raw_interrupt(chn_num);
643 if (ret != HI_SUCCESS) {
644 hi_log_print_func_err(drv_hash_query_raw_interrupt, ret);
645 return ret;
646 }
647 #endif
648
649 hi_log_func_exit();
650 return HI_SUCCESS;
651 }
652
hash_addbuf(hi_u32 chn_num,compat_addr buf_phy,hi_u32 buf_size)653 static hi_void hash_addbuf(hi_u32 chn_num, compat_addr buf_phy, hi_u32 buf_size)
654 {
655 hash_hard_context *ctx = HI_NULL;
656 hi_u32 id, size;
657 hi_void *addr = HI_NULL;
658
659 ctx = (hash_hard_context *)g_hash_hard_channel[chn_num].ctx;
660
661 /* clean in entry */
662 id = ctx->id_in++;
663 addr = &ctx->entry_in[id];
664 size = sizeof(struct hash_entry_in);
665
666 (hi_void)memset_s(addr, size, 0, size);
667
668 /* set addr and length */
669 ctx->entry_in[id].spacc_cmd = 0x00;
670 ctx->entry_in[id].hash_start_addr = addr_l32(buf_phy);
671 ctx->entry_in[id].word1 = addr_h32(buf_phy);
672 ctx->entry_in[id].hash_alg_length = buf_size;
673 ctx->entry_in[id].hash_ctrl = HASH_CTRL_HASH_IN_FIRST | HASH_CTRL_HASH_IN_LAST;
674 ctx->id_in %= HASH_MAX_DEPTH;
675 hi_log_info("add digest in buf: id %u, addr 0x%x, len 0x%x\n", id, addr_l32(buf_phy), buf_size);
676 }
677
drv_hash_cfg(hi_u32 chn_num,hash_mode mode,const hi_u32 state[HASH_RESULT_MAX_SIZE_IN_WORD])678 hi_s32 drv_hash_cfg(hi_u32 chn_num, hash_mode mode, const hi_u32 state[HASH_RESULT_MAX_SIZE_IN_WORD])
679 {
680 hash_hard_context *ctx = HI_NULL;
681 chann_hash_ctrl hash_ctrl;
682 hi_u32 i;
683
684 hi_log_chk_param_return(g_hash_initialize != HI_TRUE);
685 hi_log_chk_param_return(((HASH_HARD_CHANNEL_MASK >> chn_num) & 0x01) == 0);
686
687 ctx = (hash_hard_context *)g_hash_hard_channel[chn_num].ctx;
688 ctx->hash_alg = mode;
689
690 /* Control */
691 hash_ctrl.u32 = hash_read(chann_hash_ctrl(chn_num));
692 hash_ctrl.bits.hash_chn_mode = 0;
693 hash_ctrl.bits.hash_chn_agl_sel = mode;
694 hash_write(chann_hash_ctrl(chn_num), hash_ctrl.u32);
695 hi_log_info("CTRL: 0x%X\n", hash_ctrl.u32);
696
697 /* Write last state. */
698 for (i = 0; i < HASH_RESULT_MAX_SIZE_IN_WORD; i++) {
699 hash_write(chann_hash_state_val_addr(chn_num), i);
700 hash_write(chann_hash_state_val(chn_num), state[i]);
701 }
702 hi_log_info("state[0]: 0x%x\n", state[0]);
703
704 return HI_SUCCESS;
705 }
706
drv_hash_start(hi_u32 chn_num,const crypto_mem * mem,hi_u32 length)707 hi_s32 drv_hash_start(hi_u32 chn_num, const crypto_mem *mem, hi_u32 length)
708 {
709 chann_hash_int_node_cfg in_node_cfg;
710 hash_hard_context *ctx = HI_NULL;
711 hi_u32 ptr;
712 crypto_mem *hash_dma_ctx = HI_NULL;
713
714 hi_log_func_enter();
715
716 hi_log_chk_param_return(g_hash_initialize != HI_TRUE);
717 hi_log_chk_param_return(((HASH_HARD_CHANNEL_MASK >> chn_num) & 0x01) == 0);
718
719 ctx = (hash_hard_context *)g_hash_hard_channel[chn_num].ctx;
720
721 if (length == 0) {
722 return HI_SUCCESS;
723 }
724
725 ctx->done = HI_FALSE;
726
727 /* set message addr and length */
728 hash_addbuf(chn_num, mem->dma_addr, length);
729
730 /* configure in-node, only compute one nodes */
731 in_node_cfg.u32 = hash_read(chann_hash_in_node_cfg(chn_num));
732 ptr = in_node_cfg.bits.hash_in_node_wptr + 1;
733 in_node_cfg.bits.hash_in_node_wptr = ptr % HASH_MAX_DEPTH;
734 in_node_cfg.bits.hash_in_node_mpackage_int_level = 1;
735
736 /* hash flush cache of hash mem and hash list buffer. */
737 crypto_cpuc_flush_dcache_area(mem->dma_virt, length);
738 hash_dma_ctx = &g_hash_dma;
739 crypto_cpuc_flush_dcache_area(hash_dma_ctx->dma_virt, HASH_NODE_LIST_SIZE);
740
741 /* Start */
742 hash_write(chann_hash_in_node_cfg(chn_num), in_node_cfg.u32);
743 hi_log_info("chann_hash_in_node_cfg: 0x%x\n", in_node_cfg.u32);
744
745 hi_log_func_exit();
746 return HI_SUCCESS;
747 }
748
drv_hash_wait_done(hi_u32 chn_num,hi_u32 * state)749 hi_s32 drv_hash_wait_done(hi_u32 chn_num, hi_u32 *state)
750 {
751 hi_u32 i;
752 hi_s32 ret;
753
754 hi_log_func_enter();
755
756 hi_log_chk_param_return(state == HI_NULL);
757 hi_log_chk_param_return(g_hash_initialize != HI_TRUE);
758 hi_log_chk_param_return(((HASH_HARD_CHANNEL_MASK >> chn_num) & 0x01) == 0);
759
760 ret = drv_hash_wait_ready(chn_num);
761 if (ret != HI_SUCCESS) {
762 hi_log_print_func_err(drv_hash_wait_ready, ret);
763 return ret;
764 }
765
766 /* read hash result */
767 for (i = 0; i < HASH_RESULT_MAX_SIZE_IN_WORD; i++) {
768 hash_write(chann_hash_state_val_addr(chn_num), i);
769 state[i] = hash_read(chann_hash_state_val(chn_num));
770 }
771 hi_log_debug("digest[0]: 0x%x\n", state[0]);
772
773 hi_log_func_exit();
774 return HI_SUCCESS;
775 }
776
drv_hash_reset(hi_u32 chn_num)777 hi_void drv_hash_reset(hi_u32 chn_num)
778 {
779 crypto_unused(chn_num);
780 }
781
drv_hash_get_capacity(hash_capacity * capacity)782 hi_void drv_hash_get_capacity(hash_capacity *capacity)
783 {
784 (hi_void)memset_s(capacity, sizeof(hash_capacity), 0, sizeof(hash_capacity));
785
786 capacity->sha1 = CRYPTO_CAPACITY_SUPPORT;
787 capacity->sha224 = CRYPTO_CAPACITY_SUPPORT;
788 capacity->sha256 = CRYPTO_CAPACITY_SUPPORT;
789 capacity->sha384 = CRYPTO_CAPACITY_SUPPORT;
790 capacity->sha512 = CRYPTO_CAPACITY_SUPPORT;
791 capacity->sm3 = CRYPTO_CAPACITY_SUPPORT;
792
793 return;
794 }
795 #endif /* End of CHIP_HASH_VER_V200 */
796