1 /*
2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
15 */
16
17 #include <linux/crypto.h>
18 #include <linux/version.h>
19 #include <crypto/algapi.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/hash.h>
22 #include <crypto/authenc.h>
23 #include <crypto/scatterwalk.h>
24 #include <linux/dmapool.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/crypto.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29
30 #include "ssi_buffer_mgr.h"
31 #include "cc_lli_defs.h"
32 #include "ssi_cipher.h"
33 #include "ssi_hash.h"
34 #include "ssi_aead.h"
35
36 #ifdef CC_DEBUG
37 #define GET_DMA_BUFFER_TYPE(buff_type) ( \
38 ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
39 ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
40 ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
41 #else
42 #define GET_DMA_BUFFER_TYPE(buff_type)
43 #endif
44
45 enum dma_buffer_type {
46 DMA_NULL_TYPE = -1,
47 DMA_SGL_TYPE = 1,
48 DMA_BUFF_TYPE = 2,
49 };
50
51 struct buff_mgr_handle {
52 struct dma_pool *mlli_buffs_pool;
53 };
54
55 union buffer_array_entry {
56 struct scatterlist *sgl;
57 dma_addr_t buffer_dma;
58 };
59
60 struct buffer_array {
61 unsigned int num_of_buffers;
62 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
63 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
64 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
65 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
66 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
67 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
68 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
69 };
70
71 /**
72 * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
73 *
74 * @sg_list: SG list
75 * @nbytes: [IN] Total SGL data bytes.
76 * @lbytes: [OUT] Returns the amount of bytes at the last entry
77 */
ssi_buffer_mgr_get_sgl_nents(struct scatterlist * sg_list,unsigned int nbytes,u32 * lbytes,bool * is_chained)78 static unsigned int ssi_buffer_mgr_get_sgl_nents(
79 struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
80 {
81 unsigned int nents = 0;
82
83 while (nbytes != 0) {
84 if (sg_is_chain(sg_list)) {
85 SSI_LOG_ERR("Unexpected chained entry "
86 "in sg (entry =0x%X)\n", nents);
87 BUG();
88 }
89 if (sg_list->length != 0) {
90 nents++;
91 /* get the number of bytes in the last entry */
92 *lbytes = nbytes;
93 nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
94 sg_list = sg_next(sg_list);
95 } else {
96 sg_list = (struct scatterlist *)sg_page(sg_list);
97 if (is_chained)
98 *is_chained = true;
99 }
100 }
101 SSI_LOG_DEBUG("nents %d last bytes %d\n", nents, *lbytes);
102 return nents;
103 }
104
105 /**
106 * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
107 *
108 * @sgl:
109 */
ssi_buffer_mgr_zero_sgl(struct scatterlist * sgl,u32 data_len)110 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
111 {
112 struct scatterlist *current_sg = sgl;
113 int sg_index = 0;
114
115 while (sg_index <= data_len) {
116 if (!current_sg) {
117 /* reached the end of the sgl --> just return back */
118 return;
119 }
120 memset(sg_virt(current_sg), 0, current_sg->length);
121 sg_index += current_sg->length;
122 current_sg = sg_next(current_sg);
123 }
124 }
125
126 /**
127 * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
128 * from to_skip to end, to dest and vice versa
129 *
130 * @dest:
131 * @sg:
132 * @to_skip:
133 * @end:
134 * @direct:
135 */
ssi_buffer_mgr_copy_scatterlist_portion(u8 * dest,struct scatterlist * sg,u32 to_skip,u32 end,enum ssi_sg_cpy_direct direct)136 void ssi_buffer_mgr_copy_scatterlist_portion(
137 u8 *dest, struct scatterlist *sg,
138 u32 to_skip, u32 end,
139 enum ssi_sg_cpy_direct direct)
140 {
141 u32 nents, lbytes;
142
143 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
144 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
145 (direct == SSI_SG_TO_BUF));
146 }
147
ssi_buffer_mgr_render_buff_to_mlli(dma_addr_t buff_dma,u32 buff_size,u32 * curr_nents,u32 ** mlli_entry_pp)148 static inline int ssi_buffer_mgr_render_buff_to_mlli(
149 dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
150 u32 **mlli_entry_pp)
151 {
152 u32 *mlli_entry_p = *mlli_entry_pp;
153 u32 new_nents;
154
155 /* Verify there is no memory overflow*/
156 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
157 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
158 return -ENOMEM;
159
160 /*handle buffer longer than 64 kbytes */
161 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
162 cc_lli_set_addr(mlli_entry_p, buff_dma);
163 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
164 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
165 mlli_entry_p[LLI_WORD0_OFFSET],
166 mlli_entry_p[LLI_WORD1_OFFSET]);
167 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
168 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
169 mlli_entry_p = mlli_entry_p + 2;
170 (*curr_nents)++;
171 }
172 /*Last entry */
173 cc_lli_set_addr(mlli_entry_p, buff_dma);
174 cc_lli_set_size(mlli_entry_p, buff_size);
175 SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
176 mlli_entry_p[LLI_WORD0_OFFSET],
177 mlli_entry_p[LLI_WORD1_OFFSET]);
178 mlli_entry_p = mlli_entry_p + 2;
179 *mlli_entry_pp = mlli_entry_p;
180 (*curr_nents)++;
181 return 0;
182 }
183
ssi_buffer_mgr_render_scatterlist_to_mlli(struct scatterlist * sgl,u32 sgl_data_len,u32 sgl_offset,u32 * curr_nents,u32 ** mlli_entry_pp)184 static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
185 struct scatterlist *sgl, u32 sgl_data_len, u32 sgl_offset,
186 u32 *curr_nents, u32 **mlli_entry_pp)
187 {
188 struct scatterlist *curr_sgl = sgl;
189 u32 *mlli_entry_p = *mlli_entry_pp;
190 s32 rc = 0;
191
192 for ( ; (curr_sgl) && (sgl_data_len != 0);
193 curr_sgl = sg_next(curr_sgl)) {
194 u32 entry_data_len =
195 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
196 sg_dma_len(curr_sgl) - sgl_offset :
197 sgl_data_len;
198 sgl_data_len -= entry_data_len;
199 rc = ssi_buffer_mgr_render_buff_to_mlli(
200 sg_dma_address(curr_sgl) + sgl_offset, entry_data_len,
201 curr_nents, &mlli_entry_p);
202 if (rc != 0)
203 return rc;
204
205 sgl_offset = 0;
206 }
207 *mlli_entry_pp = mlli_entry_p;
208 return 0;
209 }
210
ssi_buffer_mgr_generate_mlli(struct device * dev,struct buffer_array * sg_data,struct mlli_params * mlli_params)211 static int ssi_buffer_mgr_generate_mlli(
212 struct device *dev,
213 struct buffer_array *sg_data,
214 struct mlli_params *mlli_params)
215 {
216 u32 *mlli_p;
217 u32 total_nents = 0, prev_total_nents = 0;
218 int rc = 0, i;
219
220 SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
221
222 /* Allocate memory from the pointed pool */
223 mlli_params->mlli_virt_addr = dma_pool_alloc(
224 mlli_params->curr_pool, GFP_KERNEL,
225 &mlli_params->mlli_dma_addr);
226 if (unlikely(!mlli_params->mlli_virt_addr)) {
227 SSI_LOG_ERR("dma_pool_alloc() failed\n");
228 rc = -ENOMEM;
229 goto build_mlli_exit;
230 }
231 /* Point to start of MLLI */
232 mlli_p = (u32 *)mlli_params->mlli_virt_addr;
233 /* go over all SG's and link it to one MLLI table */
234 for (i = 0; i < sg_data->num_of_buffers; i++) {
235 if (sg_data->type[i] == DMA_SGL_TYPE)
236 rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
237 sg_data->entry[i].sgl,
238 sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
239 &mlli_p);
240 else /*DMA_BUFF_TYPE*/
241 rc = ssi_buffer_mgr_render_buff_to_mlli(
242 sg_data->entry[i].buffer_dma,
243 sg_data->total_data_len[i], &total_nents,
244 &mlli_p);
245 if (rc != 0)
246 return rc;
247
248 /* set last bit in the current table */
249 if (sg_data->mlli_nents[i]) {
250 /*Calculate the current MLLI table length for the
251 *length field in the descriptor
252 */
253 *sg_data->mlli_nents[i] +=
254 (total_nents - prev_total_nents);
255 prev_total_nents = total_nents;
256 }
257 }
258
259 /* Set MLLI size for the bypass operation */
260 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
261
262 SSI_LOG_DEBUG("MLLI params: "
263 "virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
264 mlli_params->mlli_virt_addr,
265 mlli_params->mlli_dma_addr,
266 mlli_params->mlli_len);
267
268 build_mlli_exit:
269 return rc;
270 }
271
ssi_buffer_mgr_add_buffer_entry(struct buffer_array * sgl_data,dma_addr_t buffer_dma,unsigned int buffer_len,bool is_last_entry,u32 * mlli_nents)272 static inline void ssi_buffer_mgr_add_buffer_entry(
273 struct buffer_array *sgl_data,
274 dma_addr_t buffer_dma, unsigned int buffer_len,
275 bool is_last_entry, u32 *mlli_nents)
276 {
277 unsigned int index = sgl_data->num_of_buffers;
278
279 SSI_LOG_DEBUG("index=%u single_buff=%pad "
280 "buffer_len=0x%08X is_last=%d\n",
281 index, buffer_dma, buffer_len, is_last_entry);
282 sgl_data->nents[index] = 1;
283 sgl_data->entry[index].buffer_dma = buffer_dma;
284 sgl_data->offset[index] = 0;
285 sgl_data->total_data_len[index] = buffer_len;
286 sgl_data->type[index] = DMA_BUFF_TYPE;
287 sgl_data->is_last[index] = is_last_entry;
288 sgl_data->mlli_nents[index] = mlli_nents;
289 if (sgl_data->mlli_nents[index])
290 *sgl_data->mlli_nents[index] = 0;
291 sgl_data->num_of_buffers++;
292 }
293
ssi_buffer_mgr_add_scatterlist_entry(struct buffer_array * sgl_data,unsigned int nents,struct scatterlist * sgl,unsigned int data_len,unsigned int data_offset,bool is_last_table,u32 * mlli_nents)294 static inline void ssi_buffer_mgr_add_scatterlist_entry(
295 struct buffer_array *sgl_data,
296 unsigned int nents,
297 struct scatterlist *sgl,
298 unsigned int data_len,
299 unsigned int data_offset,
300 bool is_last_table,
301 u32 *mlli_nents)
302 {
303 unsigned int index = sgl_data->num_of_buffers;
304
305 SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
306 index, nents, sgl, data_len, is_last_table);
307 sgl_data->nents[index] = nents;
308 sgl_data->entry[index].sgl = sgl;
309 sgl_data->offset[index] = data_offset;
310 sgl_data->total_data_len[index] = data_len;
311 sgl_data->type[index] = DMA_SGL_TYPE;
312 sgl_data->is_last[index] = is_last_table;
313 sgl_data->mlli_nents[index] = mlli_nents;
314 if (sgl_data->mlli_nents[index])
315 *sgl_data->mlli_nents[index] = 0;
316 sgl_data->num_of_buffers++;
317 }
318
319 static int
ssi_buffer_mgr_dma_map_sg(struct device * dev,struct scatterlist * sg,u32 nents,enum dma_data_direction direction)320 ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
321 enum dma_data_direction direction)
322 {
323 u32 i, j;
324 struct scatterlist *l_sg = sg;
325
326 for (i = 0; i < nents; i++) {
327 if (!l_sg)
328 break;
329 if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
330 SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
331 goto err;
332 }
333 l_sg = sg_next(l_sg);
334 }
335 return nents;
336
337 err:
338 /* Restore mapped parts */
339 for (j = 0; j < i; j++) {
340 if (!sg)
341 break;
342 dma_unmap_sg(dev, sg, 1, direction);
343 sg = sg_next(sg);
344 }
345 return 0;
346 }
347
ssi_buffer_mgr_map_scatterlist(struct device * dev,struct scatterlist * sg,unsigned int nbytes,int direction,u32 * nents,u32 max_sg_nents,u32 * lbytes,u32 * mapped_nents)348 static int ssi_buffer_mgr_map_scatterlist(
349 struct device *dev, struct scatterlist *sg,
350 unsigned int nbytes, int direction,
351 u32 *nents, u32 max_sg_nents,
352 u32 *lbytes, u32 *mapped_nents)
353 {
354 bool is_chained = false;
355
356 if (sg_is_last(sg)) {
357 /* One entry only case -set to DLLI */
358 if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
359 SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
360 return -ENOMEM;
361 }
362 SSI_LOG_DEBUG("Mapped sg: dma_address=%pad "
363 "page=%p addr=%pK offset=%u "
364 "length=%u\n",
365 sg_dma_address(sg),
366 sg_page(sg),
367 sg_virt(sg),
368 sg->offset, sg->length);
369 *lbytes = nbytes;
370 *nents = 1;
371 *mapped_nents = 1;
372 } else { /*sg_is_last*/
373 *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
374 &is_chained);
375 if (*nents > max_sg_nents) {
376 *nents = 0;
377 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
378 *nents, max_sg_nents);
379 return -ENOMEM;
380 }
381 if (!is_chained) {
382 /* In case of mmu the number of mapped nents might
383 * be changed from the original sgl nents
384 */
385 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
386 if (unlikely(*mapped_nents == 0)) {
387 *nents = 0;
388 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
389 return -ENOMEM;
390 }
391 } else {
392 /*In this case the driver maps entry by entry so it
393 * must have the same nents before and after map
394 */
395 *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
396 sg,
397 *nents,
398 direction);
399 if (unlikely(*mapped_nents != *nents)) {
400 *nents = *mapped_nents;
401 SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
402 return -ENOMEM;
403 }
404 }
405 }
406
407 return 0;
408 }
409
410 static inline int
ssi_aead_handle_config_buf(struct device * dev,struct aead_req_ctx * areq_ctx,u8 * config_data,struct buffer_array * sg_data,unsigned int assoclen)411 ssi_aead_handle_config_buf(struct device *dev,
412 struct aead_req_ctx *areq_ctx,
413 u8 *config_data,
414 struct buffer_array *sg_data,
415 unsigned int assoclen)
416 {
417 SSI_LOG_DEBUG(" handle additional data config set to DLLI\n");
418 /* create sg for the current buffer */
419 sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
420 if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
421 DMA_TO_DEVICE) != 1)) {
422 SSI_LOG_ERR("dma_map_sg() "
423 "config buffer failed\n");
424 return -ENOMEM;
425 }
426 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad "
427 "page=%p addr=%pK "
428 "offset=%u length=%u\n",
429 sg_dma_address(&areq_ctx->ccm_adata_sg),
430 sg_page(&areq_ctx->ccm_adata_sg),
431 sg_virt(&areq_ctx->ccm_adata_sg),
432 areq_ctx->ccm_adata_sg.offset,
433 areq_ctx->ccm_adata_sg.length);
434 /* prepare for case of MLLI */
435 if (assoclen > 0) {
436 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
437 &areq_ctx->ccm_adata_sg,
438 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
439 0, false, NULL);
440 }
441 return 0;
442 }
443
ssi_ahash_handle_curr_buf(struct device * dev,struct ahash_req_ctx * areq_ctx,u8 * curr_buff,u32 curr_buff_cnt,struct buffer_array * sg_data)444 static inline int ssi_ahash_handle_curr_buf(struct device *dev,
445 struct ahash_req_ctx *areq_ctx,
446 u8 *curr_buff,
447 u32 curr_buff_cnt,
448 struct buffer_array *sg_data)
449 {
450 SSI_LOG_DEBUG(" handle curr buff %x set to DLLI\n", curr_buff_cnt);
451 /* create sg for the current buffer */
452 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
453 if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
454 DMA_TO_DEVICE) != 1)) {
455 SSI_LOG_ERR("dma_map_sg() "
456 "src buffer failed\n");
457 return -ENOMEM;
458 }
459 SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad "
460 "page=%p addr=%pK "
461 "offset=%u length=%u\n",
462 sg_dma_address(areq_ctx->buff_sg),
463 sg_page(areq_ctx->buff_sg),
464 sg_virt(areq_ctx->buff_sg),
465 areq_ctx->buff_sg->offset,
466 areq_ctx->buff_sg->length);
467 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
468 areq_ctx->curr_sg = areq_ctx->buff_sg;
469 areq_ctx->in_nents = 0;
470 /* prepare for case of MLLI */
471 ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
472 curr_buff_cnt, 0, false, NULL);
473 return 0;
474 }
475
ssi_buffer_mgr_unmap_blkcipher_request(struct device * dev,void * ctx,unsigned int ivsize,struct scatterlist * src,struct scatterlist * dst)476 void ssi_buffer_mgr_unmap_blkcipher_request(
477 struct device *dev,
478 void *ctx,
479 unsigned int ivsize,
480 struct scatterlist *src,
481 struct scatterlist *dst)
482 {
483 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
484
485 if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
486 SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
487 req_ctx->gen_ctx.iv_dma_addr,
488 ivsize);
489 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
490 ivsize,
491 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
492 DMA_TO_DEVICE);
493 }
494 /* Release pool */
495 if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI &&
496 req_ctx->mlli_params.mlli_virt_addr) {
497 dma_pool_free(req_ctx->mlli_params.curr_pool,
498 req_ctx->mlli_params.mlli_virt_addr,
499 req_ctx->mlli_params.mlli_dma_addr);
500 }
501
502 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
503 SSI_LOG_DEBUG("Unmapped req->src=%pK\n", sg_virt(src));
504
505 if (src != dst) {
506 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
507 SSI_LOG_DEBUG("Unmapped req->dst=%pK\n", sg_virt(dst));
508 }
509 }
510
ssi_buffer_mgr_map_blkcipher_request(struct ssi_drvdata * drvdata,void * ctx,unsigned int ivsize,unsigned int nbytes,void * info,struct scatterlist * src,struct scatterlist * dst)511 int ssi_buffer_mgr_map_blkcipher_request(
512 struct ssi_drvdata *drvdata,
513 void *ctx,
514 unsigned int ivsize,
515 unsigned int nbytes,
516 void *info,
517 struct scatterlist *src,
518 struct scatterlist *dst)
519 {
520 struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
521 struct mlli_params *mlli_params = &req_ctx->mlli_params;
522 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
523 struct device *dev = &drvdata->plat_dev->dev;
524 struct buffer_array sg_data;
525 u32 dummy = 0;
526 int rc = 0;
527 u32 mapped_nents = 0;
528
529 req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
530 mlli_params->curr_pool = NULL;
531 sg_data.num_of_buffers = 0;
532
533 /* Map IV buffer */
534 if (likely(ivsize != 0)) {
535 dump_byte_array("iv", (u8 *)info, ivsize);
536 req_ctx->gen_ctx.iv_dma_addr =
537 dma_map_single(dev, (void *)info,
538 ivsize,
539 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
540 DMA_TO_DEVICE);
541 if (unlikely(dma_mapping_error(dev,
542 req_ctx->gen_ctx.iv_dma_addr))) {
543 SSI_LOG_ERR("Mapping iv %u B at va=%pK "
544 "for DMA failed\n", ivsize, info);
545 return -ENOMEM;
546 }
547 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n",
548 ivsize, info,
549 req_ctx->gen_ctx.iv_dma_addr);
550 } else {
551 req_ctx->gen_ctx.iv_dma_addr = 0;
552 }
553
554 /* Map the src SGL */
555 rc = ssi_buffer_mgr_map_scatterlist(dev, src,
556 nbytes, DMA_BIDIRECTIONAL,
557 &req_ctx->in_nents,
558 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
559 &mapped_nents);
560 if (unlikely(rc != 0)) {
561 rc = -ENOMEM;
562 goto ablkcipher_exit;
563 }
564 if (mapped_nents > 1)
565 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
566
567 if (unlikely(src == dst)) {
568 /* Handle inplace operation */
569 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
570 req_ctx->out_nents = 0;
571 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
572 req_ctx->in_nents,
573 src, nbytes, 0,
574 true,
575 &req_ctx->in_mlli_nents);
576 }
577 } else {
578 /* Map the dst sg */
579 if (unlikely(ssi_buffer_mgr_map_scatterlist(
580 dev, dst, nbytes,
581 DMA_BIDIRECTIONAL, &req_ctx->out_nents,
582 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
583 &mapped_nents))){
584 rc = -ENOMEM;
585 goto ablkcipher_exit;
586 }
587 if (mapped_nents > 1)
588 req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
589
590 if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
591 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
592 req_ctx->in_nents,
593 src, nbytes, 0,
594 true,
595 &req_ctx->in_mlli_nents);
596 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
597 req_ctx->out_nents,
598 dst, nbytes, 0,
599 true,
600 &req_ctx->out_mlli_nents);
601 }
602 }
603
604 if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
605 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
606 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
607 if (unlikely(rc != 0))
608 goto ablkcipher_exit;
609 }
610
611 SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
612 GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
613
614 return 0;
615
616 ablkcipher_exit:
617 ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
618 return rc;
619 }
620
ssi_buffer_mgr_unmap_aead_request(struct device * dev,struct aead_request * req)621 void ssi_buffer_mgr_unmap_aead_request(
622 struct device *dev, struct aead_request *req)
623 {
624 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
625 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
626 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
627 struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
628 u32 dummy;
629 bool chained;
630 u32 size_to_unmap = 0;
631
632 if (areq_ctx->mac_buf_dma_addr != 0) {
633 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
634 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
635 }
636
637 #if SSI_CC_HAS_AES_GCM
638 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
639 if (areq_ctx->hkey_dma_addr != 0) {
640 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
641 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
642 }
643
644 if (areq_ctx->gcm_block_len_dma_addr != 0) {
645 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
646 AES_BLOCK_SIZE, DMA_TO_DEVICE);
647 }
648
649 if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
650 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
651 AES_BLOCK_SIZE, DMA_TO_DEVICE);
652 }
653
654 if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
655 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
656 AES_BLOCK_SIZE, DMA_TO_DEVICE);
657 }
658 }
659 #endif
660
661 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
662 if (areq_ctx->ccm_iv0_dma_addr != 0) {
663 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
664 AES_BLOCK_SIZE, DMA_TO_DEVICE);
665 }
666
667 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
668 }
669 if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
670 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
671 hw_iv_size, DMA_BIDIRECTIONAL);
672 }
673
674 /*In case a pool was set, a table was
675 *allocated and should be released
676 */
677 if (areq_ctx->mlli_params.curr_pool) {
678 SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n",
679 areq_ctx->mlli_params.mlli_dma_addr,
680 areq_ctx->mlli_params.mlli_virt_addr);
681 dma_pool_free(areq_ctx->mlli_params.curr_pool,
682 areq_ctx->mlli_params.mlli_virt_addr,
683 areq_ctx->mlli_params.mlli_dma_addr);
684 }
685
686 SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, req->assoclen, req->cryptlen);
687 size_to_unmap = req->assoclen + req->cryptlen;
688 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
689 size_to_unmap += areq_ctx->req_authsize;
690 if (areq_ctx->is_gcm4543)
691 size_to_unmap += crypto_aead_ivsize(tfm);
692
693 dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
694 if (unlikely(req->src != req->dst)) {
695 SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
696 sg_virt(req->dst));
697 dma_unmap_sg(dev, req->dst,
698 ssi_buffer_mgr_get_sgl_nents(req->dst,
699 size_to_unmap,
700 &dummy,
701 &chained),
702 DMA_BIDIRECTIONAL);
703 }
704 if (drvdata->coherent &&
705 (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
706 likely(req->src == req->dst)) {
707 u32 size_to_skip = req->assoclen;
708
709 if (areq_ctx->is_gcm4543)
710 size_to_skip += crypto_aead_ivsize(tfm);
711
712 /* copy mac to a temporary location to deal with possible
713 * data memory overriding that caused by cache coherence problem.
714 */
715 ssi_buffer_mgr_copy_scatterlist_portion(
716 areq_ctx->backup_mac, req->src,
717 size_to_skip + req->cryptlen - areq_ctx->req_authsize,
718 size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
719 }
720 }
721
ssi_buffer_mgr_get_aead_icv_nents(struct scatterlist * sgl,unsigned int sgl_nents,unsigned int authsize,u32 last_entry_data_size,bool * is_icv_fragmented)722 static inline int ssi_buffer_mgr_get_aead_icv_nents(
723 struct scatterlist *sgl,
724 unsigned int sgl_nents,
725 unsigned int authsize,
726 u32 last_entry_data_size,
727 bool *is_icv_fragmented)
728 {
729 unsigned int icv_max_size = 0;
730 unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
731 unsigned int nents;
732 unsigned int i;
733
734 if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
735 *is_icv_fragmented = false;
736 return 0;
737 }
738
739 for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
740 if (!sgl)
741 break;
742 sgl = sg_next(sgl);
743 }
744
745 if (sgl)
746 icv_max_size = sgl->length;
747
748 if (last_entry_data_size > authsize) {
749 nents = 0; /* ICV attached to data in last entry (not fragmented!) */
750 *is_icv_fragmented = false;
751 } else if (last_entry_data_size == authsize) {
752 nents = 1; /* ICV placed in whole last entry (not fragmented!) */
753 *is_icv_fragmented = false;
754 } else if (icv_max_size > icv_required_size) {
755 nents = 1;
756 *is_icv_fragmented = true;
757 } else if (icv_max_size == icv_required_size) {
758 nents = 2;
759 *is_icv_fragmented = true;
760 } else {
761 SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
762 MAX_ICV_NENTS_SUPPORTED);
763 nents = -1; /*unsupported*/
764 }
765 SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
766 (*is_icv_fragmented ? "true" : "false"), nents);
767
768 return nents;
769 }
770
ssi_buffer_mgr_aead_chain_iv(struct ssi_drvdata * drvdata,struct aead_request * req,struct buffer_array * sg_data,bool is_last,bool do_chain)771 static inline int ssi_buffer_mgr_aead_chain_iv(
772 struct ssi_drvdata *drvdata,
773 struct aead_request *req,
774 struct buffer_array *sg_data,
775 bool is_last, bool do_chain)
776 {
777 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
778 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
779 struct device *dev = &drvdata->plat_dev->dev;
780 int rc = 0;
781
782 if (unlikely(!req->iv)) {
783 areq_ctx->gen_ctx.iv_dma_addr = 0;
784 goto chain_iv_exit;
785 }
786
787 areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size,
788 DMA_BIDIRECTIONAL);
789 if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
790 SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
791 hw_iv_size, req->iv);
792 rc = -ENOMEM;
793 goto chain_iv_exit;
794 }
795
796 SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n",
797 hw_iv_size, req->iv,
798 areq_ctx->gen_ctx.iv_dma_addr);
799 if (do_chain && areq_ctx->plaintext_authenticate_only) { // TODO: what about CTR?? ask Ron
800 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
801 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
802 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
803 /* Chain to given list */
804 ssi_buffer_mgr_add_buffer_entry(
805 sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
806 iv_size_to_authenc, is_last,
807 &areq_ctx->assoc.mlli_nents);
808 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
809 }
810
811 chain_iv_exit:
812 return rc;
813 }
814
ssi_buffer_mgr_aead_chain_assoc(struct ssi_drvdata * drvdata,struct aead_request * req,struct buffer_array * sg_data,bool is_last,bool do_chain)815 static inline int ssi_buffer_mgr_aead_chain_assoc(
816 struct ssi_drvdata *drvdata,
817 struct aead_request *req,
818 struct buffer_array *sg_data,
819 bool is_last, bool do_chain)
820 {
821 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
822 int rc = 0;
823 u32 mapped_nents = 0;
824 struct scatterlist *current_sg = req->src;
825 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
826 unsigned int sg_index = 0;
827 u32 size_of_assoc = req->assoclen;
828
829 if (areq_ctx->is_gcm4543)
830 size_of_assoc += crypto_aead_ivsize(tfm);
831
832 if (!sg_data) {
833 rc = -EINVAL;
834 goto chain_assoc_exit;
835 }
836
837 if (unlikely(req->assoclen == 0)) {
838 areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
839 areq_ctx->assoc.nents = 0;
840 areq_ctx->assoc.mlli_nents = 0;
841 SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
842 GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
843 areq_ctx->assoc.nents);
844 goto chain_assoc_exit;
845 }
846
847 //iterate over the sgl to see how many entries are for associated data
848 //it is assumed that if we reach here , the sgl is already mapped
849 sg_index = current_sg->length;
850 if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
851 mapped_nents++;
852 } else {
853 while (sg_index <= size_of_assoc) {
854 current_sg = sg_next(current_sg);
855 //if have reached the end of the sgl, then this is unexpected
856 if (!current_sg) {
857 SSI_LOG_ERR("reached end of sg list. unexpected\n");
858 BUG();
859 }
860 sg_index += current_sg->length;
861 mapped_nents++;
862 }
863 }
864 if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
865 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
866 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
867 return -ENOMEM;
868 }
869 areq_ctx->assoc.nents = mapped_nents;
870
871 /* in CCM case we have additional entry for
872 * ccm header configurations
873 */
874 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
875 if (unlikely((mapped_nents + 1) >
876 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
877 SSI_LOG_ERR("CCM case.Too many fragments. Current %d max %d\n",
878 (areq_ctx->assoc.nents + 1),
879 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
880 rc = -ENOMEM;
881 goto chain_assoc_exit;
882 }
883 }
884
885 if (likely(mapped_nents == 1) &&
886 (areq_ctx->ccm_hdr_size == ccm_header_size_null))
887 areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
888 else
889 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
890
891 if (unlikely((do_chain) ||
892 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
893 SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
894 GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
895 areq_ctx->assoc.nents);
896 ssi_buffer_mgr_add_scatterlist_entry(
897 sg_data, areq_ctx->assoc.nents,
898 req->src, req->assoclen, 0, is_last,
899 &areq_ctx->assoc.mlli_nents);
900 areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
901 }
902
903 chain_assoc_exit:
904 return rc;
905 }
906
ssi_buffer_mgr_prepare_aead_data_dlli(struct aead_request * req,u32 * src_last_bytes,u32 * dst_last_bytes)907 static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
908 struct aead_request *req,
909 u32 *src_last_bytes, u32 *dst_last_bytes)
910 {
911 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
912 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
913 unsigned int authsize = areq_ctx->req_authsize;
914
915 areq_ctx->is_icv_fragmented = false;
916 if (likely(req->src == req->dst)) {
917 /*INPLACE*/
918 areq_ctx->icv_dma_addr = sg_dma_address(
919 areq_ctx->src_sgl) +
920 (*src_last_bytes - authsize);
921 areq_ctx->icv_virt_addr = sg_virt(
922 areq_ctx->src_sgl) +
923 (*src_last_bytes - authsize);
924 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
925 /*NON-INPLACE and DECRYPT*/
926 areq_ctx->icv_dma_addr = sg_dma_address(
927 areq_ctx->src_sgl) +
928 (*src_last_bytes - authsize);
929 areq_ctx->icv_virt_addr = sg_virt(
930 areq_ctx->src_sgl) +
931 (*src_last_bytes - authsize);
932 } else {
933 /*NON-INPLACE and ENCRYPT*/
934 areq_ctx->icv_dma_addr = sg_dma_address(
935 areq_ctx->dst_sgl) +
936 (*dst_last_bytes - authsize);
937 areq_ctx->icv_virt_addr = sg_virt(
938 areq_ctx->dst_sgl) +
939 (*dst_last_bytes - authsize);
940 }
941 }
942
ssi_buffer_mgr_prepare_aead_data_mlli(struct ssi_drvdata * drvdata,struct aead_request * req,struct buffer_array * sg_data,u32 * src_last_bytes,u32 * dst_last_bytes,bool is_last_table)943 static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
944 struct ssi_drvdata *drvdata,
945 struct aead_request *req,
946 struct buffer_array *sg_data,
947 u32 *src_last_bytes, u32 *dst_last_bytes,
948 bool is_last_table)
949 {
950 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
951 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
952 unsigned int authsize = areq_ctx->req_authsize;
953 int rc = 0, icv_nents;
954 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
955
956 if (likely(req->src == req->dst)) {
957 /*INPLACE*/
958 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
959 areq_ctx->src.nents,
960 areq_ctx->src_sgl,
961 areq_ctx->cryptlen,
962 areq_ctx->src_offset,
963 is_last_table,
964 &areq_ctx->src.mlli_nents);
965
966 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl,
967 areq_ctx->src.nents,
968 authsize,
969 *src_last_bytes,
970 &areq_ctx->is_icv_fragmented);
971 if (unlikely(icv_nents < 0)) {
972 rc = -ENOTSUPP;
973 goto prepare_data_mlli_exit;
974 }
975
976 if (unlikely(areq_ctx->is_icv_fragmented)) {
977 /* Backup happens only when ICV is fragmented, ICV
978 * verification is made by CPU compare in order to simplify
979 * MAC verification upon request completion
980 */
981 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
982 if (!drvdata->coherent) {
983 /* In coherent platforms (e.g. ACP)
984 * already copying ICV for any
985 * INPLACE-DECRYPT operation, hence
986 * we must neglect this code.
987 */
988 u32 skip = req->assoclen;
989
990 if (areq_ctx->is_gcm4543)
991 skip += crypto_aead_ivsize(tfm);
992
993 ssi_buffer_mgr_copy_scatterlist_portion(
994 areq_ctx->backup_mac, req->src,
995 (skip + req->cryptlen -
996 areq_ctx->req_authsize),
997 skip + req->cryptlen,
998 SSI_SG_TO_BUF);
999 }
1000 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1001 } else {
1002 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1003 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1004 }
1005 } else { /* Contig. ICV */
1006 /*Should hanlde if the sg is not contig.*/
1007 areq_ctx->icv_dma_addr = sg_dma_address(
1008 &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
1009 (*src_last_bytes - authsize);
1010 areq_ctx->icv_virt_addr = sg_virt(
1011 &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
1012 (*src_last_bytes - authsize);
1013 }
1014
1015 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
1016 /*NON-INPLACE and DECRYPT*/
1017 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1018 areq_ctx->src.nents,
1019 areq_ctx->src_sgl,
1020 areq_ctx->cryptlen,
1021 areq_ctx->src_offset,
1022 is_last_table,
1023 &areq_ctx->src.mlli_nents);
1024 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1025 areq_ctx->dst.nents,
1026 areq_ctx->dst_sgl,
1027 areq_ctx->cryptlen,
1028 areq_ctx->dst_offset,
1029 is_last_table,
1030 &areq_ctx->dst.mlli_nents);
1031
1032 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl,
1033 areq_ctx->src.nents,
1034 authsize,
1035 *src_last_bytes,
1036 &areq_ctx->is_icv_fragmented);
1037 if (unlikely(icv_nents < 0)) {
1038 rc = -ENOTSUPP;
1039 goto prepare_data_mlli_exit;
1040 }
1041
1042 if (unlikely(areq_ctx->is_icv_fragmented)) {
1043 /* Backup happens only when ICV is fragmented, ICV
1044 * verification is made by CPU compare in order to simplify
1045 * MAC verification upon request completion
1046 */
1047 u32 size_to_skip = req->assoclen;
1048
1049 if (areq_ctx->is_gcm4543)
1050 size_to_skip += crypto_aead_ivsize(tfm);
1051
1052 ssi_buffer_mgr_copy_scatterlist_portion(
1053 areq_ctx->backup_mac, req->src,
1054 size_to_skip + req->cryptlen - areq_ctx->req_authsize,
1055 size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
1056 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
1057 } else { /* Contig. ICV */
1058 /*Should hanlde if the sg is not contig.*/
1059 areq_ctx->icv_dma_addr = sg_dma_address(
1060 &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
1061 (*src_last_bytes - authsize);
1062 areq_ctx->icv_virt_addr = sg_virt(
1063 &areq_ctx->src_sgl[areq_ctx->src.nents - 1]) +
1064 (*src_last_bytes - authsize);
1065 }
1066
1067 } else {
1068 /*NON-INPLACE and ENCRYPT*/
1069 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1070 areq_ctx->dst.nents,
1071 areq_ctx->dst_sgl,
1072 areq_ctx->cryptlen,
1073 areq_ctx->dst_offset,
1074 is_last_table,
1075 &areq_ctx->dst.mlli_nents);
1076 ssi_buffer_mgr_add_scatterlist_entry(sg_data,
1077 areq_ctx->src.nents,
1078 areq_ctx->src_sgl,
1079 areq_ctx->cryptlen,
1080 areq_ctx->src_offset,
1081 is_last_table,
1082 &areq_ctx->src.mlli_nents);
1083
1084 icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dst_sgl,
1085 areq_ctx->dst.nents,
1086 authsize,
1087 *dst_last_bytes,
1088 &areq_ctx->is_icv_fragmented);
1089 if (unlikely(icv_nents < 0)) {
1090 rc = -ENOTSUPP;
1091 goto prepare_data_mlli_exit;
1092 }
1093
1094 if (likely(!areq_ctx->is_icv_fragmented)) {
1095 /* Contig. ICV */
1096 areq_ctx->icv_dma_addr = sg_dma_address(
1097 &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
1098 (*dst_last_bytes - authsize);
1099 areq_ctx->icv_virt_addr = sg_virt(
1100 &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
1101 (*dst_last_bytes - authsize);
1102 } else {
1103 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1104 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1105 }
1106 }
1107
1108 prepare_data_mlli_exit:
1109 return rc;
1110 }
1111
ssi_buffer_mgr_aead_chain_data(struct ssi_drvdata * drvdata,struct aead_request * req,struct buffer_array * sg_data,bool is_last_table,bool do_chain)1112 static inline int ssi_buffer_mgr_aead_chain_data(
1113 struct ssi_drvdata *drvdata,
1114 struct aead_request *req,
1115 struct buffer_array *sg_data,
1116 bool is_last_table, bool do_chain)
1117 {
1118 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1119 struct device *dev = &drvdata->plat_dev->dev;
1120 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1121 unsigned int authsize = areq_ctx->req_authsize;
1122 int src_last_bytes = 0, dst_last_bytes = 0;
1123 int rc = 0;
1124 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1125 u32 offset = 0;
1126 unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
1127 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1128 u32 sg_index = 0;
1129 bool chained = false;
1130 bool is_gcm4543 = areq_ctx->is_gcm4543;
1131 u32 size_to_skip = req->assoclen;
1132
1133 if (is_gcm4543)
1134 size_to_skip += crypto_aead_ivsize(tfm);
1135
1136 offset = size_to_skip;
1137
1138 if (!sg_data) {
1139 rc = -EINVAL;
1140 goto chain_data_exit;
1141 }
1142 areq_ctx->src_sgl = req->src;
1143 areq_ctx->dst_sgl = req->dst;
1144
1145 if (is_gcm4543)
1146 size_for_map += crypto_aead_ivsize(tfm);
1147
1148 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1149 src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
1150 sg_index = areq_ctx->src_sgl->length;
1151 //check where the data starts
1152 while (sg_index <= size_to_skip) {
1153 offset -= areq_ctx->src_sgl->length;
1154 areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
1155 //if have reached the end of the sgl, then this is unexpected
1156 if (!areq_ctx->src_sgl) {
1157 SSI_LOG_ERR("reached end of sg list. unexpected\n");
1158 BUG();
1159 }
1160 sg_index += areq_ctx->src_sgl->length;
1161 src_mapped_nents--;
1162 }
1163 if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
1164 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1165 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1166 return -ENOMEM;
1167 }
1168
1169 areq_ctx->src.nents = src_mapped_nents;
1170
1171 areq_ctx->src_offset = offset;
1172
1173 if (req->src != req->dst) {
1174 size_for_map = req->assoclen + req->cryptlen;
1175 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
1176 if (is_gcm4543)
1177 size_for_map += crypto_aead_ivsize(tfm);
1178
1179 rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
1180 DMA_BIDIRECTIONAL,
1181 &areq_ctx->dst.nents,
1182 LLI_MAX_NUM_OF_DATA_ENTRIES,
1183 &dst_last_bytes,
1184 &dst_mapped_nents);
1185 if (unlikely(rc != 0)) {
1186 rc = -ENOMEM;
1187 goto chain_data_exit;
1188 }
1189 }
1190
1191 dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
1192 sg_index = areq_ctx->dst_sgl->length;
1193 offset = size_to_skip;
1194
1195 //check where the data starts
1196 while (sg_index <= size_to_skip) {
1197 offset -= areq_ctx->dst_sgl->length;
1198 areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
1199 //if have reached the end of the sgl, then this is unexpected
1200 if (!areq_ctx->dst_sgl) {
1201 SSI_LOG_ERR("reached end of sg list. unexpected\n");
1202 BUG();
1203 }
1204 sg_index += areq_ctx->dst_sgl->length;
1205 dst_mapped_nents--;
1206 }
1207 if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
1208 SSI_LOG_ERR("Too many fragments. current %d max %d\n",
1209 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1210 return -ENOMEM;
1211 }
1212 areq_ctx->dst.nents = dst_mapped_nents;
1213 areq_ctx->dst_offset = offset;
1214 if ((src_mapped_nents > 1) ||
1215 (dst_mapped_nents > 1) ||
1216 do_chain) {
1217 areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
1218 rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req,
1219 sg_data,
1220 &src_last_bytes,
1221 &dst_last_bytes,
1222 is_last_table);
1223 } else {
1224 areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
1225 ssi_buffer_mgr_prepare_aead_data_dlli(
1226 req, &src_last_bytes, &dst_last_bytes);
1227 }
1228
1229 chain_data_exit:
1230 return rc;
1231 }
1232
ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata * drvdata,struct aead_request * req)1233 static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
1234 struct aead_request *req)
1235 {
1236 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1237 u32 curr_mlli_size = 0;
1238
1239 if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
1240 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1241 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1242 LLI_ENTRY_BYTE_SIZE;
1243 }
1244
1245 if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
1246 /*Inplace case dst nents equal to src nents*/
1247 if (req->src == req->dst) {
1248 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1249 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1250 curr_mlli_size;
1251 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1252 if (!areq_ctx->is_single_pass)
1253 areq_ctx->assoc.mlli_nents +=
1254 areq_ctx->src.mlli_nents;
1255 } else {
1256 if (areq_ctx->gen_ctx.op_type ==
1257 DRV_CRYPTO_DIRECTION_DECRYPT) {
1258 areq_ctx->src.sram_addr =
1259 drvdata->mlli_sram_addr +
1260 curr_mlli_size;
1261 areq_ctx->dst.sram_addr =
1262 areq_ctx->src.sram_addr +
1263 areq_ctx->src.mlli_nents *
1264 LLI_ENTRY_BYTE_SIZE;
1265 if (!areq_ctx->is_single_pass)
1266 areq_ctx->assoc.mlli_nents +=
1267 areq_ctx->src.mlli_nents;
1268 } else {
1269 areq_ctx->dst.sram_addr =
1270 drvdata->mlli_sram_addr +
1271 curr_mlli_size;
1272 areq_ctx->src.sram_addr =
1273 areq_ctx->dst.sram_addr +
1274 areq_ctx->dst.mlli_nents *
1275 LLI_ENTRY_BYTE_SIZE;
1276 if (!areq_ctx->is_single_pass)
1277 areq_ctx->assoc.mlli_nents +=
1278 areq_ctx->dst.mlli_nents;
1279 }
1280 }
1281 }
1282 }
1283
ssi_buffer_mgr_map_aead_request(struct ssi_drvdata * drvdata,struct aead_request * req)1284 int ssi_buffer_mgr_map_aead_request(
1285 struct ssi_drvdata *drvdata, struct aead_request *req)
1286 {
1287 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1288 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1289 struct device *dev = &drvdata->plat_dev->dev;
1290 struct buffer_array sg_data;
1291 unsigned int authsize = areq_ctx->req_authsize;
1292 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1293 int rc = 0;
1294 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1295 bool is_gcm4543 = areq_ctx->is_gcm4543;
1296
1297 u32 mapped_nents = 0;
1298 u32 dummy = 0; /*used for the assoc data fragments */
1299 u32 size_to_map = 0;
1300
1301 mlli_params->curr_pool = NULL;
1302 sg_data.num_of_buffers = 0;
1303
1304 if (drvdata->coherent &&
1305 (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1306 likely(req->src == req->dst)) {
1307 u32 size_to_skip = req->assoclen;
1308
1309 if (is_gcm4543)
1310 size_to_skip += crypto_aead_ivsize(tfm);
1311
1312 /* copy mac to a temporary location to deal with possible
1313 * data memory overriding that caused by cache coherence problem.
1314 */
1315 ssi_buffer_mgr_copy_scatterlist_portion(
1316 areq_ctx->backup_mac, req->src,
1317 size_to_skip + req->cryptlen - areq_ctx->req_authsize,
1318 size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
1319 }
1320
1321 /* cacluate the size for cipher remove ICV in decrypt*/
1322 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1323 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1324 req->cryptlen :
1325 (req->cryptlen - authsize);
1326
1327 areq_ctx->mac_buf_dma_addr = dma_map_single(dev, areq_ctx->mac_buf,
1328 MAX_MAC_SIZE,
1329 DMA_BIDIRECTIONAL);
1330 if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
1331 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
1332 MAX_MAC_SIZE, areq_ctx->mac_buf);
1333 rc = -ENOMEM;
1334 goto aead_map_failure;
1335 }
1336
1337 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1338 areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
1339 (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
1340 AES_BLOCK_SIZE,
1341 DMA_TO_DEVICE);
1342
1343 if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
1344 SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
1345 "for DMA failed\n", AES_BLOCK_SIZE,
1346 (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
1347 areq_ctx->ccm_iv0_dma_addr = 0;
1348 rc = -ENOMEM;
1349 goto aead_map_failure;
1350 }
1351 if (ssi_aead_handle_config_buf(dev, areq_ctx,
1352 areq_ctx->ccm_config, &sg_data,
1353 req->assoclen) != 0) {
1354 rc = -ENOMEM;
1355 goto aead_map_failure;
1356 }
1357 }
1358
1359 #if SSI_CC_HAS_AES_GCM
1360 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1361 areq_ctx->hkey_dma_addr = dma_map_single(dev,
1362 areq_ctx->hkey,
1363 AES_BLOCK_SIZE,
1364 DMA_BIDIRECTIONAL);
1365 if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
1366 SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
1367 AES_BLOCK_SIZE, areq_ctx->hkey);
1368 rc = -ENOMEM;
1369 goto aead_map_failure;
1370 }
1371
1372 areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
1373 &areq_ctx->gcm_len_block,
1374 AES_BLOCK_SIZE,
1375 DMA_TO_DEVICE);
1376 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
1377 SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1378 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1379 rc = -ENOMEM;
1380 goto aead_map_failure;
1381 }
1382
1383 areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
1384 areq_ctx->gcm_iv_inc1,
1385 AES_BLOCK_SIZE,
1386 DMA_TO_DEVICE);
1387
1388 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
1389 SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
1390 "for DMA failed\n", AES_BLOCK_SIZE,
1391 (areq_ctx->gcm_iv_inc1));
1392 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1393 rc = -ENOMEM;
1394 goto aead_map_failure;
1395 }
1396
1397 areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
1398 areq_ctx->gcm_iv_inc2,
1399 AES_BLOCK_SIZE,
1400 DMA_TO_DEVICE);
1401
1402 if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
1403 SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
1404 "for DMA failed\n", AES_BLOCK_SIZE,
1405 (areq_ctx->gcm_iv_inc2));
1406 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1407 rc = -ENOMEM;
1408 goto aead_map_failure;
1409 }
1410 }
1411 #endif /*SSI_CC_HAS_AES_GCM*/
1412
1413 size_to_map = req->cryptlen + req->assoclen;
1414 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1415 size_to_map += authsize;
1416
1417 if (is_gcm4543)
1418 size_to_map += crypto_aead_ivsize(tfm);
1419 rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
1420 size_to_map, DMA_BIDIRECTIONAL, &areq_ctx->src.nents,
1421 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
1422 if (unlikely(rc != 0)) {
1423 rc = -ENOMEM;
1424 goto aead_map_failure;
1425 }
1426
1427 if (likely(areq_ctx->is_single_pass)) {
1428 /*
1429 * Create MLLI table for:
1430 * (1) Assoc. data
1431 * (2) Src/Dst SGLs
1432 * Note: IV is contg. buffer (not an SGL)
1433 */
1434 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1435 if (unlikely(rc != 0))
1436 goto aead_map_failure;
1437 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
1438 if (unlikely(rc != 0))
1439 goto aead_map_failure;
1440 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
1441 if (unlikely(rc != 0))
1442 goto aead_map_failure;
1443 } else { /* DOUBLE-PASS flow */
1444 /*
1445 * Prepare MLLI table(s) in this order:
1446 *
1447 * If ENCRYPT/DECRYPT (inplace):
1448 * (1) MLLI table for assoc
1449 * (2) IV entry (chained right after end of assoc)
1450 * (3) MLLI for src/dst (inplace operation)
1451 *
1452 * If ENCRYPT (non-inplace)
1453 * (1) MLLI table for assoc
1454 * (2) IV entry (chained right after end of assoc)
1455 * (3) MLLI for dst
1456 * (4) MLLI for src
1457 *
1458 * If DECRYPT (non-inplace)
1459 * (1) MLLI table for assoc
1460 * (2) IV entry (chained right after end of assoc)
1461 * (3) MLLI for src
1462 * (4) MLLI for dst
1463 */
1464 rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1465 if (unlikely(rc != 0))
1466 goto aead_map_failure;
1467 rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
1468 if (unlikely(rc != 0))
1469 goto aead_map_failure;
1470 rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
1471 if (unlikely(rc != 0))
1472 goto aead_map_failure;
1473 }
1474
1475 /* Mlli support -start building the MLLI according to the above results */
1476 if (unlikely(
1477 (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1478 (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
1479 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1480 rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
1481 if (unlikely(rc != 0))
1482 goto aead_map_failure;
1483
1484 ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
1485 SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx->assoc.mlli_nents);
1486 SSI_LOG_DEBUG("src params mn %d\n", areq_ctx->src.mlli_nents);
1487 SSI_LOG_DEBUG("dst params mn %d\n", areq_ctx->dst.mlli_nents);
1488 }
1489 return 0;
1490
1491 aead_map_failure:
1492 ssi_buffer_mgr_unmap_aead_request(dev, req);
1493 return rc;
1494 }
1495
ssi_buffer_mgr_map_hash_request_final(struct ssi_drvdata * drvdata,void * ctx,struct scatterlist * src,unsigned int nbytes,bool do_update)1496 int ssi_buffer_mgr_map_hash_request_final(
1497 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
1498 {
1499 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1500 struct device *dev = &drvdata->plat_dev->dev;
1501 u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1502 areq_ctx->buff0;
1503 u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1504 &areq_ctx->buff0_cnt;
1505 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1506 struct buffer_array sg_data;
1507 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1508 u32 dummy = 0;
1509 u32 mapped_nents = 0;
1510
1511 SSI_LOG_DEBUG(" final params : curr_buff=%pK "
1512 "curr_buff_cnt=0x%X nbytes = 0x%X "
1513 "src=%pK curr_index=%u\n",
1514 curr_buff, *curr_buff_cnt, nbytes,
1515 src, areq_ctx->buff_index);
1516 /* Init the type of the dma buffer */
1517 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1518 mlli_params->curr_pool = NULL;
1519 sg_data.num_of_buffers = 0;
1520 areq_ctx->in_nents = 0;
1521
1522 if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
1523 /* nothing to do */
1524 return 0;
1525 }
1526
1527 /*TODO: copy data in case that buffer is enough for operation */
1528 /* map the previous buffer */
1529 if (*curr_buff_cnt != 0) {
1530 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1531 *curr_buff_cnt, &sg_data) != 0) {
1532 return -ENOMEM;
1533 }
1534 }
1535
1536 if (src && (nbytes > 0) && do_update) {
1537 if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, nbytes,
1538 DMA_TO_DEVICE,
1539 &areq_ctx->in_nents,
1540 LLI_MAX_NUM_OF_DATA_ENTRIES,
1541 &dummy,
1542 &mapped_nents))){
1543 goto unmap_curr_buff;
1544 }
1545 if (src && (mapped_nents == 1)
1546 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
1547 memcpy(areq_ctx->buff_sg, src,
1548 sizeof(struct scatterlist));
1549 areq_ctx->buff_sg->length = nbytes;
1550 areq_ctx->curr_sg = areq_ctx->buff_sg;
1551 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1552 } else {
1553 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1554 }
1555 }
1556
1557 /*build mlli */
1558 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1559 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1560 /* add the src data to the sg_data */
1561 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1562 areq_ctx->in_nents,
1563 src, nbytes, 0, true,
1564 &areq_ctx->mlli_nents);
1565 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1566 mlli_params) != 0)) {
1567 goto fail_unmap_din;
1568 }
1569 }
1570 /* change the buffer index for the unmap function */
1571 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1572 SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
1573 GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
1574 return 0;
1575
1576 fail_unmap_din:
1577 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1578
1579 unmap_curr_buff:
1580 if (*curr_buff_cnt != 0)
1581 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1582
1583 return -ENOMEM;
1584 }
1585
ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata * drvdata,void * ctx,struct scatterlist * src,unsigned int nbytes,unsigned int block_size)1586 int ssi_buffer_mgr_map_hash_request_update(
1587 struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
1588 {
1589 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1590 struct device *dev = &drvdata->plat_dev->dev;
1591 u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
1592 areq_ctx->buff0;
1593 u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
1594 &areq_ctx->buff0_cnt;
1595 u8 *next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
1596 areq_ctx->buff1;
1597 u32 *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1598 &areq_ctx->buff1_cnt;
1599 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1600 unsigned int update_data_len;
1601 u32 total_in_len = nbytes + *curr_buff_cnt;
1602 struct buffer_array sg_data;
1603 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1604 unsigned int swap_index = 0;
1605 u32 dummy = 0;
1606 u32 mapped_nents = 0;
1607
1608 SSI_LOG_DEBUG(" update params : curr_buff=%pK "
1609 "curr_buff_cnt=0x%X nbytes=0x%X "
1610 "src=%pK curr_index=%u\n",
1611 curr_buff, *curr_buff_cnt, nbytes,
1612 src, areq_ctx->buff_index);
1613 /* Init the type of the dma buffer */
1614 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
1615 mlli_params->curr_pool = NULL;
1616 areq_ctx->curr_sg = NULL;
1617 sg_data.num_of_buffers = 0;
1618 areq_ctx->in_nents = 0;
1619
1620 if (unlikely(total_in_len < block_size)) {
1621 SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
1622 "*curr_buff_cnt=0x%X copy_to=%pK\n",
1623 curr_buff, *curr_buff_cnt,
1624 &curr_buff[*curr_buff_cnt]);
1625 areq_ctx->in_nents =
1626 ssi_buffer_mgr_get_sgl_nents(src,
1627 nbytes,
1628 &dummy, NULL);
1629 sg_copy_to_buffer(src, areq_ctx->in_nents,
1630 &curr_buff[*curr_buff_cnt], nbytes);
1631 *curr_buff_cnt += nbytes;
1632 return 1;
1633 }
1634
1635 /* Calculate the residue size*/
1636 *next_buff_cnt = total_in_len & (block_size - 1);
1637 /* update data len */
1638 update_data_len = total_in_len - *next_buff_cnt;
1639
1640 SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
1641 "update_data_len=0x%X\n",
1642 *next_buff_cnt, update_data_len);
1643
1644 /* Copy the new residue to next buffer */
1645 if (*next_buff_cnt != 0) {
1646 SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
1647 " residue %u\n", next_buff,
1648 (update_data_len - *curr_buff_cnt),
1649 *next_buff_cnt);
1650 ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
1651 (update_data_len - *curr_buff_cnt),
1652 nbytes, SSI_SG_TO_BUF);
1653 /* change the buffer index for next operation */
1654 swap_index = 1;
1655 }
1656
1657 if (*curr_buff_cnt != 0) {
1658 if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
1659 *curr_buff_cnt, &sg_data) != 0) {
1660 return -ENOMEM;
1661 }
1662 /* change the buffer index for next operation */
1663 swap_index = 1;
1664 }
1665
1666 if (update_data_len > *curr_buff_cnt) {
1667 if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
1668 (update_data_len - *curr_buff_cnt),
1669 DMA_TO_DEVICE,
1670 &areq_ctx->in_nents,
1671 LLI_MAX_NUM_OF_DATA_ENTRIES,
1672 &dummy,
1673 &mapped_nents))){
1674 goto unmap_curr_buff;
1675 }
1676 if ((mapped_nents == 1)
1677 && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
1678 /* only one entry in the SG and no previous data */
1679 memcpy(areq_ctx->buff_sg, src,
1680 sizeof(struct scatterlist));
1681 areq_ctx->buff_sg->length = update_data_len;
1682 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
1683 areq_ctx->curr_sg = areq_ctx->buff_sg;
1684 } else {
1685 areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
1686 }
1687 }
1688
1689 if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
1690 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1691 /* add the src data to the sg_data */
1692 ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
1693 areq_ctx->in_nents,
1694 src,
1695 (update_data_len - *curr_buff_cnt),
1696 0,
1697 true,
1698 &areq_ctx->mlli_nents);
1699 if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
1700 mlli_params) != 0)) {
1701 goto fail_unmap_din;
1702 }
1703 }
1704 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1705
1706 return 0;
1707
1708 fail_unmap_din:
1709 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1710
1711 unmap_curr_buff:
1712 if (*curr_buff_cnt != 0)
1713 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1714
1715 return -ENOMEM;
1716 }
1717
ssi_buffer_mgr_unmap_hash_request(struct device * dev,void * ctx,struct scatterlist * src,bool do_revert)1718 void ssi_buffer_mgr_unmap_hash_request(
1719 struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
1720 {
1721 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1722 u32 *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
1723 &areq_ctx->buff1_cnt;
1724
1725 /*In case a pool was set, a table was
1726 *allocated and should be released
1727 */
1728 if (areq_ctx->mlli_params.curr_pool) {
1729 SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n",
1730 areq_ctx->mlli_params.mlli_dma_addr,
1731 areq_ctx->mlli_params.mlli_virt_addr);
1732 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1733 areq_ctx->mlli_params.mlli_virt_addr,
1734 areq_ctx->mlli_params.mlli_dma_addr);
1735 }
1736
1737 if ((src) && likely(areq_ctx->in_nents != 0)) {
1738 SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1739 sg_virt(src),
1740 sg_dma_address(src),
1741 sg_dma_len(src));
1742 dma_unmap_sg(dev, src,
1743 areq_ctx->in_nents, DMA_TO_DEVICE);
1744 }
1745
1746 if (*prev_len != 0) {
1747 SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
1748 " dma=%pad len 0x%X\n",
1749 sg_virt(areq_ctx->buff_sg),
1750 sg_dma_address(areq_ctx->buff_sg),
1751 sg_dma_len(areq_ctx->buff_sg));
1752 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1753 if (!do_revert) {
1754 /* clean the previous data length for update operation */
1755 *prev_len = 0;
1756 } else {
1757 areq_ctx->buff_index ^= 1;
1758 }
1759 }
1760 }
1761
ssi_buffer_mgr_init(struct ssi_drvdata * drvdata)1762 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
1763 {
1764 struct buff_mgr_handle *buff_mgr_handle;
1765 struct device *dev = &drvdata->plat_dev->dev;
1766
1767 buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1768 if (!buff_mgr_handle)
1769 return -ENOMEM;
1770
1771 drvdata->buff_mgr_handle = buff_mgr_handle;
1772
1773 buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
1774 "dx_single_mlli_tables", dev,
1775 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1776 LLI_ENTRY_BYTE_SIZE,
1777 MLLI_TABLE_MIN_ALIGNMENT, 0);
1778
1779 if (unlikely(!buff_mgr_handle->mlli_buffs_pool))
1780 goto error;
1781
1782 return 0;
1783
1784 error:
1785 ssi_buffer_mgr_fini(drvdata);
1786 return -ENOMEM;
1787 }
1788
ssi_buffer_mgr_fini(struct ssi_drvdata * drvdata)1789 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
1790 {
1791 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1792
1793 if (buff_mgr_handle) {
1794 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1795 kfree(drvdata->buff_mgr_handle);
1796 drvdata->buff_mgr_handle = NULL;
1797 }
1798 return 0;
1799 }
1800