1 /*
2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <common/debug.h>
11 #include <drivers/delay_timer.h>
12 #include <errno.h>
13 #include <lib/mmio.h>
14 #include <lib/psci/psci.h>
15 #include <se_private.h>
16 #include <security_engine.h>
17 #include <tegra_platform.h>
18
19 /*******************************************************************************
20 * Constants and Macros
21 ******************************************************************************/
22
23 #define TIMEOUT_100MS 100U /* Timeout in 100ms */
24 #define RNG_AES_KEY_INDEX 1
25
26 /*******************************************************************************
27 * Data structure and global variables
28 ******************************************************************************/
29
30 /* The security engine contexts are formatted as follows:
31 *
32 * SE1 CONTEXT:
33 * #--------------------------------#
34 * | Random Data 1 Block |
35 * #--------------------------------#
36 * | Sticky Bits 2 Blocks |
37 * #--------------------------------#
38 * | Key Table 64 Blocks |
39 * | For each Key (x16): |
40 * | Key: 2 Blocks |
41 * | Original-IV: 1 Block |
42 * | Updated-IV: 1 Block |
43 * #--------------------------------#
44 * | RSA Keys 64 Blocks |
45 * #--------------------------------#
46 * | Known Pattern 1 Block |
47 * #--------------------------------#
48 *
49 * SE2/PKA1 CONTEXT:
50 * #--------------------------------#
51 * | Random Data 1 Block |
52 * #--------------------------------#
53 * | Sticky Bits 2 Blocks |
54 * #--------------------------------#
55 * | Key Table 64 Blocks |
56 * | For each Key (x16): |
57 * | Key: 2 Blocks |
58 * | Original-IV: 1 Block |
59 * | Updated-IV: 1 Block |
60 * #--------------------------------#
61 * | RSA Keys 64 Blocks |
62 * #--------------------------------#
63 * | PKA sticky bits 1 Block |
64 * #--------------------------------#
65 * | PKA keys 512 Blocks |
66 * #--------------------------------#
67 * | Known Pattern 1 Block |
68 * #--------------------------------#
69 */
70
71 /* Known pattern data for T210 */
72 static const uint8_t se_ctx_known_pattern_data[SE_CTX_KNOWN_PATTERN_SIZE] = {
73 /* 128 bit AES block */
74 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
75 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
76 };
77
78 /* SE input and output linked list buffers */
79 static tegra_se_io_lst_t se1_src_ll_buf;
80 static tegra_se_io_lst_t se1_dst_ll_buf;
81
82 /* SE2 input and output linked list buffers */
83 static tegra_se_io_lst_t se2_src_ll_buf;
84 static tegra_se_io_lst_t se2_dst_ll_buf;
85
86 /* SE1 context buffer, 132 blocks */
87 static __aligned(64) uint8_t se1_ctx_buf[SE_CTX_DRBG_BUFER_SIZE];
88
89 /* SE1 security engine device handle */
90 static tegra_se_dev_t se_dev_1 = {
91 .se_num = 1,
92 /* Setup base address for se */
93 .se_base = TEGRA_SE1_BASE,
94 /* Setup context size in AES blocks */
95 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE1,
96 /* Setup SRC buffers for SE operations */
97 .src_ll_buf = &se1_src_ll_buf,
98 /* Setup DST buffers for SE operations */
99 .dst_ll_buf = &se1_dst_ll_buf,
100 /* Setup context save destination */
101 .ctx_save_buf = (uint32_t *)&se1_ctx_buf
102 };
103
104 /* SE2 security engine device handle (T210B01 only) */
105 static tegra_se_dev_t se_dev_2 = {
106 .se_num = 2,
107 /* Setup base address for se */
108 .se_base = TEGRA_SE2_BASE,
109 /* Setup context size in AES blocks */
110 .ctx_size_blks = SE_CTX_SAVE_SIZE_BLOCKS_SE2,
111 /* Setup SRC buffers for SE operations */
112 .src_ll_buf = &se2_src_ll_buf,
113 /* Setup DST buffers for SE operations */
114 .dst_ll_buf = &se2_dst_ll_buf,
115 /* Setup context save destination */
116 .ctx_save_buf = (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE + 0x1000)
117 };
118
119 static bool ecid_valid;
120
121 /*******************************************************************************
122 * Functions Definition
123 ******************************************************************************/
124
tegra_se_make_data_coherent(const tegra_se_dev_t * se_dev)125 static void tegra_se_make_data_coherent(const tegra_se_dev_t *se_dev)
126 {
127 flush_dcache_range(((uint64_t)(se_dev->src_ll_buf)),
128 sizeof(tegra_se_io_lst_t));
129 flush_dcache_range(((uint64_t)(se_dev->dst_ll_buf)),
130 sizeof(tegra_se_io_lst_t));
131 }
132
133 /*
134 * Check that SE operation has completed after kickoff
135 * This function is invoked after an SE operation has been started,
136 * and it checks the following conditions:
137 * 1. SE_INT_STATUS = SE_OP_DONE
138 * 2. SE_STATUS = IDLE
139 * 3. AHB bus data transfer complete.
140 * 4. SE_ERR_STATUS is clean.
141 */
tegra_se_operation_complete(const tegra_se_dev_t * se_dev)142 static int32_t tegra_se_operation_complete(const tegra_se_dev_t *se_dev)
143 {
144 uint32_t val = 0;
145 int32_t ret = 0;
146 uint32_t timeout;
147
148 /* Poll the SE interrupt register to ensure H/W operation complete */
149 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
150 for (timeout = 0; (SE_INT_OP_DONE(val) == SE_INT_OP_DONE_CLEAR) &&
151 (timeout < TIMEOUT_100MS); timeout++) {
152 mdelay(1);
153 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
154 }
155
156 if (timeout == TIMEOUT_100MS) {
157 ERROR("%s: ERR: Atomic context save operation timeout!\n",
158 __func__);
159 ret = -ETIMEDOUT;
160 }
161
162 /* Poll the SE status idle to ensure H/W operation complete */
163 if (ret == 0) {
164 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
165 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS);
166 timeout++) {
167 mdelay(1);
168 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
169 }
170
171 if (timeout == TIMEOUT_100MS) {
172 ERROR("%s: ERR: MEM_INTERFACE and SE state "
173 "idle state timeout.\n", __func__);
174 ret = -ETIMEDOUT;
175 }
176 }
177
178 /* Check AHB bus transfer complete */
179 if (ret == 0) {
180 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
181 for (timeout = 0; ((val & (ARAHB_MST_ID_SE_MASK | ARAHB_MST_ID_SE2_MASK)) != 0U) &&
182 (timeout < TIMEOUT_100MS); timeout++) {
183 mdelay(1);
184 val = mmio_read_32(TEGRA_AHB_ARB_BASE + ARAHB_MEM_WRQUE_MST_ID_OFFSET);
185 }
186
187 if (timeout == TIMEOUT_100MS) {
188 ERROR("%s: SE write over AHB timeout.\n", __func__);
189 ret = -ETIMEDOUT;
190 }
191 }
192
193 /* Ensure that no errors are thrown during operation */
194 if (ret == 0) {
195 val = tegra_se_read_32(se_dev, SE_ERR_STATUS_REG_OFFSET);
196 if (val != 0U) {
197 ERROR("%s: error during SE operation! 0x%x", __func__, val);
198 ret = -ENOTSUP;
199 }
200 }
201
202 return ret;
203 }
204
205 /*
206 * Wait for SE engine to be idle and clear pending interrupts before
207 * starting the next SE operation.
208 */
tegra_se_operation_prepare(const tegra_se_dev_t * se_dev)209 static int32_t tegra_se_operation_prepare(const tegra_se_dev_t *se_dev)
210 {
211 int32_t ret = 0;
212 uint32_t val = 0;
213 uint32_t timeout;
214
215 /* disable SE interrupt to prevent interrupt issued by SE operation */
216 tegra_se_write_32(se_dev, SE_INT_ENABLE_REG_OFFSET, 0U);
217
218 /* Wait for previous operation to finish */
219 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
220 for (timeout = 0; (val != 0U) && (timeout < TIMEOUT_100MS); timeout++) {
221 mdelay(1);
222 val = tegra_se_read_32(se_dev, SE_STATUS_OFFSET);
223 }
224
225 if (timeout == TIMEOUT_100MS) {
226 ERROR("%s: ERR: SE status is not idle!\n", __func__);
227 ret = -ETIMEDOUT;
228 }
229
230 /* Clear any pending interrupts from previous operation */
231 val = tegra_se_read_32(se_dev, SE_INT_STATUS_REG_OFFSET);
232 tegra_se_write_32(se_dev, SE_INT_STATUS_REG_OFFSET, val);
233 return ret;
234 }
235
236 /*
237 * SE atomic context save. At SC7 entry, SE driver triggers the
238 * hardware automatically performs the context save operation.
239 */
tegra_se_context_save_atomic(const tegra_se_dev_t * se_dev)240 static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t *se_dev)
241 {
242 int32_t ret = 0;
243 uint32_t val = 0;
244 uint32_t blk_count_limit = 0;
245 uint32_t block_count;
246
247 /* Check that previous operation is finalized */
248 ret = tegra_se_operation_prepare(se_dev);
249
250 /* Read the context save progress counter: block_count
251 * Ensure no previous context save has been triggered
252 * SE_CTX_SAVE_AUTO.CURR_CNT == 0
253 */
254 if (ret == 0) {
255 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
256 block_count = SE_CTX_SAVE_GET_BLK_COUNT(val);
257 if (block_count != 0U) {
258 ERROR("%s: ctx_save triggered multiple times\n",
259 __func__);
260 ret = -EALREADY;
261 }
262 }
263
264 /* Set the destination block count when the context save complete */
265 if (ret == 0) {
266 blk_count_limit = block_count + se_dev->ctx_size_blks;
267 }
268
269 /* Program SE_CONFIG register as for RNG operation
270 * SE_CONFIG.ENC_ALG = RNG
271 * SE_CONFIG.DEC_ALG = NOP
272 * SE_CONFIG.ENC_MODE is ignored
273 * SE_CONFIG.DEC_MODE is ignored
274 * SE_CONFIG.DST = MEMORY
275 */
276 if (ret == 0) {
277 val = (SE_CONFIG_ENC_ALG_RNG |
278 SE_CONFIG_DEC_ALG_NOP |
279 SE_CONFIG_DST_MEMORY);
280 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
281
282 tegra_se_make_data_coherent(se_dev);
283
284 /* SE_CTX_SAVE operation */
285 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET,
286 SE_OP_CTX_SAVE);
287
288 ret = tegra_se_operation_complete(se_dev);
289 }
290
291 /* Check that context has written the correct number of blocks */
292 if (ret == 0) {
293 val = tegra_se_read_32(se_dev, SE_CTX_SAVE_AUTO_REG_OFFSET);
294 if (SE_CTX_SAVE_GET_BLK_COUNT(val) != blk_count_limit) {
295 ERROR("%s: expected %d blocks but %d were written\n",
296 __func__, blk_count_limit, val);
297 ret = -ECANCELED;
298 }
299 }
300
301 return ret;
302 }
303
304 /*
305 * Security engine primitive operations, including normal operation
306 * and the context save operation.
307 */
tegra_se_perform_operation(const tegra_se_dev_t * se_dev,uint32_t nbytes,bool context_save)308 static int tegra_se_perform_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes,
309 bool context_save)
310 {
311 uint32_t nblocks = nbytes / TEGRA_SE_AES_BLOCK_SIZE;
312 int ret = 0;
313
314 assert(se_dev);
315
316 /* Use device buffers for in and out */
317 tegra_se_write_32(se_dev, SE_OUT_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->dst_ll_buf)));
318 tegra_se_write_32(se_dev, SE_IN_LL_ADDR_REG_OFFSET, ((uint64_t)(se_dev->src_ll_buf)));
319
320 /* Check that previous operation is finalized */
321 ret = tegra_se_operation_prepare(se_dev);
322 if (ret != 0) {
323 goto op_error;
324 }
325
326 /* Program SE operation size */
327 if (nblocks) {
328 tegra_se_write_32(se_dev, SE_BLOCK_COUNT_REG_OFFSET, nblocks - 1);
329 }
330
331 /* Make SE LL data coherent before the SE operation */
332 tegra_se_make_data_coherent(se_dev);
333
334 /* Start hardware operation */
335 if (context_save)
336 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_CTX_SAVE);
337 else
338 tegra_se_write_32(se_dev, SE_OPERATION_REG_OFFSET, SE_OP_START);
339
340 /* Wait for operation to finish */
341 ret = tegra_se_operation_complete(se_dev);
342
343 op_error:
344 return ret;
345 }
346
347 /*
348 * Normal security engine operations other than the context save
349 */
tegra_se_start_normal_operation(const tegra_se_dev_t * se_dev,uint32_t nbytes)350 int tegra_se_start_normal_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
351 {
352 return tegra_se_perform_operation(se_dev, nbytes, false);
353 }
354
355 /*
356 * Security engine context save operation
357 */
tegra_se_start_ctx_save_operation(const tegra_se_dev_t * se_dev,uint32_t nbytes)358 int tegra_se_start_ctx_save_operation(const tegra_se_dev_t *se_dev, uint32_t nbytes)
359 {
360 return tegra_se_perform_operation(se_dev, nbytes, true);
361 }
362
363 /*
364 * Security Engine sequence to generat SRK
365 * SE and SE2 will generate different SRK by different
366 * entropy seeds.
367 */
tegra_se_generate_srk(const tegra_se_dev_t * se_dev)368 static int tegra_se_generate_srk(const tegra_se_dev_t *se_dev)
369 {
370 int ret = PSCI_E_INTERN_FAIL;
371 uint32_t val;
372
373 /* Confgure the following hardware register settings:
374 * SE_CONFIG.DEC_ALG = NOP
375 * SE_CONFIG.ENC_ALG = RNG
376 * SE_CONFIG.DST = SRK
377 * SE_OPERATION.OP = START
378 * SE_CRYPTO_LAST_BLOCK = 0
379 */
380 se_dev->src_ll_buf->last_buff_num = 0;
381 se_dev->dst_ll_buf->last_buff_num = 0;
382
383 /* Configure random number generator */
384 if (ecid_valid)
385 val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_ENTROPY);
386 else
387 val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_ENTROPY);
388 tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
389
390 /* Configure output destination = SRK */
391 val = (SE_CONFIG_ENC_ALG_RNG |
392 SE_CONFIG_DEC_ALG_NOP |
393 SE_CONFIG_DST_SRK);
394 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
395
396 /* Perform hardware operation */
397 ret = tegra_se_start_normal_operation(se_dev, 0);
398
399 return ret;
400 }
401
402 /*
403 * Generate plain text random data to some memory location using
404 * SE/SE2's SP800-90 random number generator. The random data size
405 * must be some multiple of the AES block size (16 bytes).
406 */
tegra_se_lp_generate_random_data(tegra_se_dev_t * se_dev)407 static int tegra_se_lp_generate_random_data(tegra_se_dev_t *se_dev)
408 {
409 int ret = 0;
410 uint32_t val;
411
412 /* Set some arbitrary memory location to store the random data */
413 se_dev->dst_ll_buf->last_buff_num = 0;
414 if (!se_dev->ctx_save_buf) {
415 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
416 return PSCI_E_NOT_PRESENT;
417 }
418 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
419 se_dev->ctx_save_buf)->rand_data)));
420 se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_RANDOM_DATA_SIZE;
421
422
423 /* Confgure the following hardware register settings:
424 * SE_CONFIG.DEC_ALG = NOP
425 * SE_CONFIG.ENC_ALG = RNG
426 * SE_CONFIG.ENC_MODE = KEY192
427 * SE_CONFIG.DST = MEMORY
428 */
429 val = (SE_CONFIG_ENC_ALG_RNG |
430 SE_CONFIG_DEC_ALG_NOP |
431 SE_CONFIG_ENC_MODE_KEY192 |
432 SE_CONFIG_DST_MEMORY);
433 tegra_se_write_32(se_dev, SE_CONFIG_REG_OFFSET, val);
434
435 /* Program the RNG options in SE_CRYPTO_CONFIG as follows:
436 * XOR_POS = BYPASS
437 * INPUT_SEL = RANDOM (Entropy or LFSR)
438 * HASH_ENB = DISABLE
439 */
440 val = (SE_CRYPTO_INPUT_RANDOM |
441 SE_CRYPTO_XOR_BYPASS |
442 SE_CRYPTO_CORE_ENCRYPT |
443 SE_CRYPTO_HASH_DISABLE |
444 SE_CRYPTO_KEY_INDEX(RNG_AES_KEY_INDEX) |
445 SE_CRYPTO_IV_ORIGINAL);
446 tegra_se_write_32(se_dev, SE_CRYPTO_REG_OFFSET, val);
447
448 /* Configure RNG */
449 if (ecid_valid)
450 val = (DRBG_MODE_FORCE_INSTANTION | DRBG_SRC_LFSR);
451 else
452 val = (DRBG_MODE_FORCE_RESEED | DRBG_SRC_LFSR);
453 tegra_se_write_32(se_dev, SE_RNG_CONFIG_REG_OFFSET, val);
454
455 /* SE normal operation */
456 ret = tegra_se_start_normal_operation(se_dev, SE_CTX_SAVE_RANDOM_DATA_SIZE);
457
458 return ret;
459 }
460
461 /*
462 * Encrypt memory blocks with SRK as part of the security engine context.
463 * The data blocks include: random data and the known pattern data, where
464 * the random data is the first block and known pattern is the last block.
465 */
tegra_se_lp_data_context_save(tegra_se_dev_t * se_dev,uint64_t src_addr,uint64_t dst_addr,uint32_t data_size)466 static int tegra_se_lp_data_context_save(tegra_se_dev_t *se_dev,
467 uint64_t src_addr, uint64_t dst_addr, uint32_t data_size)
468 {
469 int ret = 0;
470
471 se_dev->src_ll_buf->last_buff_num = 0;
472 se_dev->dst_ll_buf->last_buff_num = 0;
473 se_dev->src_ll_buf->buffer[0].addr = src_addr;
474 se_dev->src_ll_buf->buffer[0].data_len = data_size;
475 se_dev->dst_ll_buf->buffer[0].addr = dst_addr;
476 se_dev->dst_ll_buf->buffer[0].data_len = data_size;
477
478 /* By setting the context source from memory and calling the context save
479 * operation, the SE encrypts the memory data with SRK.
480 */
481 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, SE_CTX_SAVE_SRC_MEM);
482
483 ret = tegra_se_start_ctx_save_operation(se_dev, data_size);
484
485 return ret;
486 }
487
488 /*
489 * Context save the key table access control sticky bits and
490 * security status of each key-slot. The encrypted sticky-bits are
491 * 32 bytes (2 AES blocks) and formatted as the following structure:
492 * { bit in registers bit in context save
493 * SECURITY_0[4] 158
494 * SE_RSA_KEYTABLE_ACCE4SS_1[2:0] 157:155
495 * SE_RSA_KEYTABLE_ACCE4SS_0[2:0] 154:152
496 * SE_RSA_SECURITY_PERKEY_0[1:0] 151:150
497 * SE_CRYPTO_KEYTABLE_ACCESS_15[7:0] 149:142
498 * ...,
499 * SE_CRYPTO_KEYTABLE_ACCESS_0[7:0] 29:22
500 * SE_CRYPTO_SECURITY_PERKEY_0[15:0] 21:6
501 * SE_TZRAM_SECURITY_0[1:0] 5:4
502 * SE_SECURITY_0[16] 3:3
503 * SE_SECURITY_0[2:0] } 2:0
504 */
tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t * se_dev)505 static int tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t *se_dev)
506 {
507 int ret = PSCI_E_INTERN_FAIL;
508 uint32_t val = 0;
509
510 se_dev->dst_ll_buf->last_buff_num = 0;
511 if (!se_dev->ctx_save_buf) {
512 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
513 return PSCI_E_NOT_PRESENT;
514 }
515 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(((tegra_se_context_t *)
516 se_dev->ctx_save_buf)->sticky_bits)));
517 se_dev->dst_ll_buf->buffer[0].data_len = SE_CTX_SAVE_STICKY_BITS_SIZE;
518
519 /*
520 * The 1st AES block save the sticky-bits context 1 - 16 bytes (0 - 3 words).
521 * The 2nd AES block save the sticky-bits context 17 - 32 bytes (4 - 7 words).
522 */
523 for (int i = 0; i < 2; i++) {
524 val = SE_CTX_SAVE_SRC_STICKY_BITS |
525 SE_CTX_SAVE_STICKY_WORD_QUAD(i);
526 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
527
528 /* SE context save operation */
529 ret = tegra_se_start_ctx_save_operation(se_dev,
530 SE_CTX_SAVE_STICKY_BITS_SIZE);
531 if (ret)
532 break;
533 se_dev->dst_ll_buf->buffer[0].addr += SE_CTX_SAVE_STICKY_BITS_SIZE;
534 }
535
536 return ret;
537 }
538
tegra_se_aeskeytable_context_save(tegra_se_dev_t * se_dev)539 static int tegra_se_aeskeytable_context_save(tegra_se_dev_t *se_dev)
540 {
541 uint32_t val = 0;
542 int ret = 0;
543
544 se_dev->dst_ll_buf->last_buff_num = 0;
545 if (!se_dev->ctx_save_buf) {
546 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__);
547 ret = -EINVAL;
548 goto aes_keytable_save_err;
549 }
550
551 /* AES key context save */
552 for (int slot = 0; slot < TEGRA_SE_AES_KEYSLOT_COUNT; slot++) {
553 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
554 ((tegra_se_context_t *)se_dev->
555 ctx_save_buf)->key_slots[slot].key)));
556 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
557 for (int i = 0; i < 2; i++) {
558 val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
559 SE_CTX_SAVE_KEY_INDEX(slot) |
560 SE_CTX_SAVE_WORD_QUAD(i);
561 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
562
563 /* SE context save operation */
564 ret = tegra_se_start_ctx_save_operation(se_dev,
565 TEGRA_SE_KEY_128_SIZE);
566 if (ret) {
567 ERROR("%s: ERR: AES key CTX_SAVE OP failed, "
568 "slot=%d, word_quad=%d.\n",
569 __func__, slot, i);
570 goto aes_keytable_save_err;
571 }
572 se_dev->dst_ll_buf->buffer[0].addr += TEGRA_SE_KEY_128_SIZE;
573 }
574
575 /* OIV context save */
576 se_dev->dst_ll_buf->last_buff_num = 0;
577 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
578 ((tegra_se_context_t *)se_dev->
579 ctx_save_buf)->key_slots[slot].oiv)));
580 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
581
582 val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
583 SE_CTX_SAVE_KEY_INDEX(slot) |
584 SE_CTX_SAVE_WORD_QUAD_ORIG_IV;
585 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
586
587 /* SE context save operation */
588 ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
589 if (ret) {
590 ERROR("%s: ERR: OIV CTX_SAVE OP failed, slot=%d.\n",
591 __func__, slot);
592 goto aes_keytable_save_err;
593 }
594
595 /* UIV context save */
596 se_dev->dst_ll_buf->last_buff_num = 0;
597 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
598 ((tegra_se_context_t *)se_dev->
599 ctx_save_buf)->key_slots[slot].uiv)));
600 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_IV_SIZE;
601
602 val = SE_CTX_SAVE_SRC_AES_KEYTABLE |
603 SE_CTX_SAVE_KEY_INDEX(slot) |
604 SE_CTX_SAVE_WORD_QUAD_UPD_IV;
605 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
606
607 /* SE context save operation */
608 ret = tegra_se_start_ctx_save_operation(se_dev, TEGRA_SE_AES_IV_SIZE);
609 if (ret) {
610 ERROR("%s: ERR: UIV CTX_SAVE OP failed, slot=%d\n",
611 __func__, slot);
612 goto aes_keytable_save_err;
613 }
614 }
615
616 aes_keytable_save_err:
617 return ret;
618 }
619
tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t * se_dev)620 static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t *se_dev)
621 {
622 uint32_t val = 0;
623 int ret = 0;
624 /* For T210, First the modulus and then exponent must be
625 * encrypted and saved. This is repeated for SLOT 0
626 * and SLOT 1. Hence the order:
627 * SLOT 0 modulus : RSA_KEY_INDEX : 1
628 * SLOT 0 exponent : RSA_KEY_INDEX : 0
629 * SLOT 1 modulus : RSA_KEY_INDEX : 3
630 * SLOT 1 exponent : RSA_KEY_INDEX : 2
631 */
632 const unsigned int key_index_mod[TEGRA_SE_RSA_KEYSLOT_COUNT][2] = {
633 /* RSA key slot 0 */
634 {SE_RSA_KEY_INDEX_SLOT0_MOD, SE_RSA_KEY_INDEX_SLOT0_EXP},
635 /* RSA key slot 1 */
636 {SE_RSA_KEY_INDEX_SLOT1_MOD, SE_RSA_KEY_INDEX_SLOT1_EXP},
637 };
638
639 se_dev->dst_ll_buf->last_buff_num = 0;
640 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
641 ((tegra_se_context_t *)se_dev->
642 ctx_save_buf)->rsa_keys)));
643 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
644
645 for (int slot = 0; slot < TEGRA_SE_RSA_KEYSLOT_COUNT; slot++) {
646 /* loop for modulus and exponent */
647 for (int index = 0; index < 2; index++) {
648 for (int word_quad = 0; word_quad < 16; word_quad++) {
649 val = SE_CTX_SAVE_SRC_RSA_KEYTABLE |
650 SE_CTX_SAVE_RSA_KEY_INDEX(
651 key_index_mod[slot][index]) |
652 SE_CTX_RSA_WORD_QUAD(word_quad);
653 tegra_se_write_32(se_dev,
654 SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
655
656 /* SE context save operation */
657 ret = tegra_se_start_ctx_save_operation(se_dev,
658 TEGRA_SE_KEY_128_SIZE);
659 if (ret) {
660 ERROR("%s: ERR: slot=%d.\n",
661 __func__, slot);
662 goto rsa_keytable_save_err;
663 }
664
665 /* Update the pointer to the next word quad */
666 se_dev->dst_ll_buf->buffer[0].addr +=
667 TEGRA_SE_KEY_128_SIZE;
668 }
669 }
670 }
671
672 rsa_keytable_save_err:
673 return ret;
674 }
675
tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t * se_dev)676 static int tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t *se_dev)
677 {
678 int ret = 0;
679
680 se_dev->dst_ll_buf->last_buff_num = 0;
681 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
682 ((tegra_se2_context_blob_t *)se_dev->
683 ctx_save_buf)->pka_ctx.sticky_bits)));
684 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_AES_BLOCK_SIZE;
685
686 /* PKA1 sticky bits are 1 AES block (16 bytes) */
687 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
688 SE_CTX_SAVE_SRC_PKA1_STICKY_BITS |
689 SE_CTX_STICKY_WORD_QUAD_WORDS_0_3);
690
691 /* SE context save operation */
692 ret = tegra_se_start_ctx_save_operation(se_dev, 0);
693 if (ret) {
694 ERROR("%s: ERR: PKA1 sticky bits CTX_SAVE OP failed\n",
695 __func__);
696 goto pka_sticky_bits_save_err;
697 }
698
699 pka_sticky_bits_save_err:
700 return ret;
701 }
702
tegra_se_pkakeytable_context_save(tegra_se_dev_t * se_dev)703 static int tegra_se_pkakeytable_context_save(tegra_se_dev_t *se_dev)
704 {
705 uint32_t val = 0;
706 int ret = 0;
707
708 se_dev->dst_ll_buf->last_buff_num = 0;
709 se_dev->dst_ll_buf->buffer[0].addr = ((uint64_t)(&(
710 ((tegra_se2_context_blob_t *)se_dev->
711 ctx_save_buf)->pka_ctx.pka_keys)));
712 se_dev->dst_ll_buf->buffer[0].data_len = TEGRA_SE_KEY_128_SIZE;
713
714 /* for each slot, save word quad 0-127 */
715 for (int slot = 0; slot < TEGRA_SE_PKA1_KEYSLOT_COUNT; slot++) {
716 for (int word_quad = 0; word_quad < 512/4; word_quad++) {
717 val = SE_CTX_SAVE_SRC_PKA1_KEYTABLE |
718 SE_CTX_PKA1_WORD_QUAD_L((slot * 128) +
719 word_quad) |
720 SE_CTX_PKA1_WORD_QUAD_H((slot * 128) +
721 word_quad);
722 tegra_se_write_32(se_dev,
723 SE_CTX_SAVE_CONFIG_REG_OFFSET, val);
724
725 /* SE context save operation */
726 ret = tegra_se_start_ctx_save_operation(se_dev,
727 TEGRA_SE_KEY_128_SIZE);
728 if (ret) {
729 ERROR("%s: ERR: pka1 keytable ctx save error\n",
730 __func__);
731 goto pka_keytable_save_err;
732 }
733
734 /* Update the pointer to the next word quad */
735 se_dev->dst_ll_buf->buffer[0].addr +=
736 TEGRA_SE_KEY_128_SIZE;
737 }
738 }
739
740 pka_keytable_save_err:
741 return ret;
742 }
743
tegra_se_save_SRK(tegra_se_dev_t * se_dev)744 static int tegra_se_save_SRK(tegra_se_dev_t *se_dev)
745 {
746 tegra_se_write_32(se_dev, SE_CTX_SAVE_CONFIG_REG_OFFSET,
747 SE_CTX_SAVE_SRC_SRK);
748
749 /* SE context save operation */
750 return tegra_se_start_ctx_save_operation(se_dev, 0);
751 }
752
753 /*
754 * Lock both SE from non-TZ clients.
755 */
tegra_se_lock(tegra_se_dev_t * se_dev)756 static inline void tegra_se_lock(tegra_se_dev_t *se_dev)
757 {
758 uint32_t val;
759
760 assert(se_dev);
761 val = tegra_se_read_32(se_dev, SE_SECURITY_REG_OFFSET);
762 val |= SE_SECURITY_TZ_LOCK_SOFT(SE_SECURE);
763 tegra_se_write_32(se_dev, SE_SECURITY_REG_OFFSET, val);
764 }
765
766 /*
767 * Use SRK to encrypt SE state and save to TZRAM carveout
768 */
tegra_se_context_save_sw(tegra_se_dev_t * se_dev)769 static int tegra_se_context_save_sw(tegra_se_dev_t *se_dev)
770 {
771 int err = 0;
772
773 assert(se_dev);
774
775 /* Lock entire SE/SE2 as TZ protected */
776 tegra_se_lock(se_dev);
777
778 INFO("%s: generate SRK\n", __func__);
779 /* Generate SRK */
780 err = tegra_se_generate_srk(se_dev);
781 if (err) {
782 ERROR("%s: ERR: SRK generation failed\n", __func__);
783 return err;
784 }
785
786 INFO("%s: generate random data\n", __func__);
787 /* Generate random data */
788 err = tegra_se_lp_generate_random_data(se_dev);
789 if (err) {
790 ERROR("%s: ERR: LP random pattern generation failed\n", __func__);
791 return err;
792 }
793
794 INFO("%s: encrypt random data\n", __func__);
795 /* Encrypt the random data block */
796 err = tegra_se_lp_data_context_save(se_dev,
797 ((uint64_t)(&(((tegra_se_context_t *)se_dev->
798 ctx_save_buf)->rand_data))),
799 ((uint64_t)(&(((tegra_se_context_t *)se_dev->
800 ctx_save_buf)->rand_data))),
801 SE_CTX_SAVE_RANDOM_DATA_SIZE);
802 if (err) {
803 ERROR("%s: ERR: random pattern encryption failed\n", __func__);
804 return err;
805 }
806
807 INFO("%s: save SE sticky bits\n", __func__);
808 /* Save AES sticky bits context */
809 err = tegra_se_lp_sticky_bits_context_save(se_dev);
810 if (err) {
811 ERROR("%s: ERR: sticky bits context save failed\n", __func__);
812 return err;
813 }
814
815 INFO("%s: save AES keytables\n", __func__);
816 /* Save AES key table context */
817 err = tegra_se_aeskeytable_context_save(se_dev);
818 if (err) {
819 ERROR("%s: ERR: LP keytable save failed\n", __func__);
820 return err;
821 }
822
823 /* RSA key slot table context save */
824 INFO("%s: save RSA keytables\n", __func__);
825 err = tegra_se_lp_rsakeytable_context_save(se_dev);
826 if (err) {
827 ERROR("%s: ERR: rsa key table context save failed\n", __func__);
828 return err;
829 }
830
831 /* Only SE2 has an interface with PKA1; thus, PKA1's context is saved
832 * via SE2.
833 */
834 if (se_dev->se_num == 2) {
835 /* Encrypt PKA1 sticky bits on SE2 only */
836 INFO("%s: save PKA sticky bits\n", __func__);
837 err = tegra_se_pkakeytable_sticky_bits_save(se_dev);
838 if (err) {
839 ERROR("%s: ERR: PKA sticky bits context save failed\n", __func__);
840 return err;
841 }
842
843 /* Encrypt PKA1 keyslots on SE2 only */
844 INFO("%s: save PKA keytables\n", __func__);
845 err = tegra_se_pkakeytable_context_save(se_dev);
846 if (err) {
847 ERROR("%s: ERR: PKA key table context save failed\n", __func__);
848 return err;
849 }
850 }
851
852 /* Encrypt known pattern */
853 if (se_dev->se_num == 1) {
854 err = tegra_se_lp_data_context_save(se_dev,
855 ((uint64_t)(&se_ctx_known_pattern_data)),
856 ((uint64_t)(&(((tegra_se_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
857 SE_CTX_KNOWN_PATTERN_SIZE);
858 } else if (se_dev->se_num == 2) {
859 err = tegra_se_lp_data_context_save(se_dev,
860 ((uint64_t)(&se_ctx_known_pattern_data)),
861 ((uint64_t)(&(((tegra_se2_context_blob_t *)se_dev->ctx_save_buf)->known_pattern))),
862 SE_CTX_KNOWN_PATTERN_SIZE);
863 }
864 if (err) {
865 ERROR("%s: ERR: save LP known pattern failure\n", __func__);
866 return err;
867 }
868
869 /* Write lp context buffer address into PMC scratch register */
870 if (se_dev->se_num == 1) {
871 /* SE context address, support T210 only */
872 mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SCRATCH43_REG_OFFSET,
873 ((uint64_t)(se_dev->ctx_save_buf)));
874 } else if (se_dev->se_num == 2) {
875 /* SE2 & PKA1 context address */
876 mmio_write_32((uint64_t)TEGRA_PMC_BASE + PMC_SECURE_SCRATCH116_OFFSET,
877 ((uint64_t)(se_dev->ctx_save_buf)));
878 }
879
880 /* Saves SRK to PMC secure scratch registers for BootROM, which
881 * verifies and restores the security engine context on warm boot.
882 */
883 err = tegra_se_save_SRK(se_dev);
884 if (err < 0) {
885 ERROR("%s: ERR: LP SRK save failure\n", __func__);
886 return err;
887 }
888
889 INFO("%s: SE context save done \n", __func__);
890
891 return err;
892 }
893
894 /*
895 * Initialize the SE engine handle
896 */
tegra_se_init(void)897 void tegra_se_init(void)
898 {
899 uint32_t val = 0;
900 INFO("%s: start SE init\n", __func__);
901
902 /* Generate random SRK to initialize DRBG */
903 tegra_se_generate_srk(&se_dev_1);
904
905 if (tegra_chipid_is_t210_b01()) {
906 tegra_se_generate_srk(&se_dev_2);
907 }
908
909 /* determine if ECID is valid */
910 val = mmio_read_32(TEGRA_FUSE_BASE + FUSE_JTAG_SECUREID_VALID);
911 ecid_valid = (val == ECID_VALID);
912
913 INFO("%s: SE init done\n", __func__);
914 }
915
tegra_se_enable_clocks(void)916 static void tegra_se_enable_clocks(void)
917 {
918 uint32_t val = 0;
919
920 /* Enable entropy clock */
921 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
922 val |= ENTROPY_CLK_ENB_BIT;
923 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
924
925 /* De-Assert Entropy Reset */
926 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W);
927 val &= ~ENTROPY_RESET_BIT;
928 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_W, val);
929
930 /*
931 * Switch SE clock source to CLK_M, to make sure SE clock
932 * is on when saving SE context
933 */
934 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_RST_CTL_CLK_SRC_SE,
935 SE_CLK_SRC_CLK_M);
936
937 /* Enable SE clock */
938 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
939 val |= SE_CLK_ENB_BIT;
940 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
941
942 /* De-Assert SE Reset */
943 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V);
944 val &= ~SE_RESET_BIT;
945 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_RST_DEVICES_V, val);
946 }
947
tegra_se_disable_clocks(void)948 static void tegra_se_disable_clocks(void)
949 {
950 uint32_t val = 0;
951
952 /* Disable entropy clock */
953 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W);
954 val &= ~ENTROPY_CLK_ENB_BIT;
955 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_W, val);
956
957 /* Disable SE clock */
958 val = mmio_read_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V);
959 val &= ~SE_CLK_ENB_BIT;
960 mmio_write_32(TEGRA_CAR_RESET_BASE + TEGRA_CLK_OUT_ENB_V, val);
961 }
962
963 /*
964 * Security engine power suspend entry point.
965 * This function is invoked from PSCI power domain suspend handler.
966 */
tegra_se_suspend(void)967 int32_t tegra_se_suspend(void)
968 {
969 int32_t ret = 0;
970 uint32_t val = 0;
971
972 /* SE does not use SMMU in EL3, disable SMMU.
973 * This will be re-enabled by kernel on resume */
974 val = mmio_read_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0);
975 val &= ~PPCS_SMMU_ENABLE;
976 mmio_write_32(TEGRA_MC_BASE + MC_SMMU_PPCS_ASID_0, val);
977
978 tegra_se_enable_clocks();
979
980 if (tegra_chipid_is_t210_b01()) {
981 /* It is T210 B01, Atomic context save se2 and pka1 */
982 INFO("%s: SE2/PKA1 atomic context save\n", __func__);
983 ret = tegra_se_context_save_atomic(&se_dev_2);
984 if (ret != 0) {
985 ERROR("%s: SE2 ctx save failed (%d)\n", __func__, ret);
986 }
987
988 ret = tegra_se_context_save_atomic(&se_dev_1);
989 if (ret != 0) {
990 ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret);
991 }
992 } else {
993 /* It is T210, SW context save se */
994 INFO("%s: SE1 legacy(SW) context save\n", __func__);
995 ret = tegra_se_context_save_sw(&se_dev_1);
996 if (ret != 0) {
997 ERROR("%s: SE1 ctx save failed (%d)\n", __func__, ret);
998 }
999 }
1000
1001 tegra_se_disable_clocks();
1002
1003 return ret;
1004 }
1005
1006 /*
1007 * Save TZRAM to shadow TZRAM in AON
1008 */
tegra_se_save_tzram(void)1009 int32_t tegra_se_save_tzram(void)
1010 {
1011 uint32_t val = 0;
1012 int32_t ret = 0;
1013 uint32_t timeout;
1014
1015 INFO("%s: SE TZRAM save start\n", __func__);
1016 tegra_se_enable_clocks();
1017
1018 val = (SE_TZRAM_OP_REQ_INIT | SE_TZRAM_OP_MODE_SAVE);
1019 tegra_se_write_32(&se_dev_1, SE_TZRAM_OPERATION, val);
1020
1021 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
1022 for (timeout = 0; (SE_TZRAM_OP_BUSY(val) == SE_TZRAM_OP_BUSY_ON) &&
1023 (timeout < TIMEOUT_100MS); timeout++) {
1024 mdelay(1);
1025 val = tegra_se_read_32(&se_dev_1, SE_TZRAM_OPERATION);
1026 }
1027
1028 if (timeout == TIMEOUT_100MS) {
1029 ERROR("%s: ERR: TZRAM save timeout!\n", __func__);
1030 ret = -ETIMEDOUT;
1031 }
1032
1033 if (ret == 0) {
1034 INFO("%s: SE TZRAM save done!\n", __func__);
1035 }
1036
1037 tegra_se_disable_clocks();
1038
1039 return ret;
1040 }
1041
1042 /*
1043 * The function is invoked by SE resume
1044 */
tegra_se_warm_boot_resume(const tegra_se_dev_t * se_dev)1045 static void tegra_se_warm_boot_resume(const tegra_se_dev_t *se_dev)
1046 {
1047 uint32_t val;
1048
1049 assert(se_dev);
1050
1051 /* Lock RNG source to ENTROPY on resume */
1052 val = DRBG_RO_ENT_IGNORE_MEM_ENABLE |
1053 DRBG_RO_ENT_SRC_LOCK_ENABLE |
1054 DRBG_RO_ENT_SRC_ENABLE;
1055 tegra_se_write_32(se_dev, SE_RNG_SRC_CONFIG_REG_OFFSET, val);
1056
1057 /* Set a random value to SRK to initialize DRBG */
1058 tegra_se_generate_srk(se_dev);
1059 }
1060
1061 /*
1062 * The function is invoked on SC7 resume
1063 */
tegra_se_resume(void)1064 void tegra_se_resume(void)
1065 {
1066 tegra_se_warm_boot_resume(&se_dev_1);
1067
1068 if (tegra_chipid_is_t210_b01()) {
1069 tegra_se_warm_boot_resume(&se_dev_2);
1070 }
1071 }
1072