• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/gfp.h>
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
5 
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
9 #include "nitrox_req.h"
10 
11 /* SLC_STORE_INFO */
12 #define MIN_UDD_LEN 16
13 /* PKT_IN_HDR + SLC_STORE_INFO */
14 #define FDATA_SIZE 32
15 /* Base destination port for the solicited requests */
16 #define SOLICIT_BASE_DPORT 256
17 #define PENDING_SIG	0xFFFFFFFFFFFFFFFFUL
18 
19 #define REQ_NOT_POSTED 1
20 #define REQ_BACKLOG    2
21 #define REQ_POSTED     3
22 
23 /**
24  * Response codes from SE microcode
25  * 0x00 - Success
26  *   Completion with no error
27  * 0x43 - ERR_GC_DATA_LEN_INVALID
28  *   Invalid Data length if Encryption Data length is
29  *   less than 16 bytes for AES-XTS and AES-CTS.
30  * 0x45 - ERR_GC_CTX_LEN_INVALID
31  *   Invalid context length: CTXL != 23 words.
32  * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
33  *   DOCSIS support is enabled with other than
34  *   AES/DES-CBC mode encryption.
35  * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
36  *   Authentication offset is other than 0 with
37  *   Encryption IV source = 0.
38  *   Authentication offset is other than 8 (DES)/16 (AES)
39  *   with Encryption IV source = 1
40  * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
41  *   CRC32 is enabled for other than DOCSIS encryption.
42  * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
43  *   Invalid flag options in AES-CCM IV.
44  */
45 
incr_index(int index,int count,int max)46 static inline int incr_index(int index, int count, int max)
47 {
48 	if ((index + count) >= max)
49 		index = index + count - max;
50 	else
51 		index += count;
52 
53 	return index;
54 }
55 
56 /**
57  * dma_free_sglist - unmap and free the sg lists.
58  * @ndev: N5 device
59  * @sgtbl: SG table
60  */
softreq_unmap_sgbufs(struct nitrox_softreq * sr)61 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
62 {
63 	struct nitrox_device *ndev = sr->ndev;
64 	struct device *dev = DEV(ndev);
65 	struct nitrox_sglist *sglist;
66 
67 	/* unmap in sgbuf */
68 	sglist = sr->in.sglist;
69 	if (!sglist)
70 		goto out_unmap;
71 
72 	/* unmap iv */
73 	dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
74 	/* unmpa src sglist */
75 	dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
76 	/* unamp gather component */
77 	dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
78 	kfree(sr->in.sglist);
79 	kfree(sr->in.sgcomp);
80 	sr->in.sglist = NULL;
81 	sr->in.buf = NULL;
82 	sr->in.map_bufs_cnt = 0;
83 
84 out_unmap:
85 	/* unmap out sgbuf */
86 	sglist = sr->out.sglist;
87 	if (!sglist)
88 		return;
89 
90 	/* unmap orh */
91 	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
92 
93 	/* unmap dst sglist */
94 	if (!sr->inplace) {
95 		dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
96 			     sr->out.dir);
97 	}
98 	/* unmap completion */
99 	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
100 
101 	/* unmap scatter component */
102 	dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
103 	kfree(sr->out.sglist);
104 	kfree(sr->out.sgcomp);
105 	sr->out.sglist = NULL;
106 	sr->out.buf = NULL;
107 	sr->out.map_bufs_cnt = 0;
108 }
109 
softreq_destroy(struct nitrox_softreq * sr)110 static void softreq_destroy(struct nitrox_softreq *sr)
111 {
112 	softreq_unmap_sgbufs(sr);
113 	kfree(sr);
114 }
115 
116 /**
117  * create_sg_component - create SG componets for N5 device.
118  * @sr: Request structure
119  * @sgtbl: SG table
120  * @nr_comp: total number of components required
121  *
122  * Component structure
123  *
124  *   63     48 47     32 31    16 15      0
125  *   --------------------------------------
126  *   |   LEN0  |  LEN1  |  LEN2  |  LEN3  |
127  *   |-------------------------------------
128  *   |               PTR0                 |
129  *   --------------------------------------
130  *   |               PTR1                 |
131  *   --------------------------------------
132  *   |               PTR2                 |
133  *   --------------------------------------
134  *   |               PTR3                 |
135  *   --------------------------------------
136  *
137  *   Returns 0 if success or a negative errno code on error.
138  */
create_sg_component(struct nitrox_softreq * sr,struct nitrox_sgtable * sgtbl,int map_nents)139 static int create_sg_component(struct nitrox_softreq *sr,
140 			       struct nitrox_sgtable *sgtbl, int map_nents)
141 {
142 	struct nitrox_device *ndev = sr->ndev;
143 	struct nitrox_sgcomp *sgcomp;
144 	struct nitrox_sglist *sglist;
145 	dma_addr_t dma;
146 	size_t sz_comp;
147 	int i, j, nr_sgcomp;
148 
149 	nr_sgcomp = roundup(map_nents, 4) / 4;
150 
151 	/* each component holds 4 dma pointers */
152 	sz_comp = nr_sgcomp * sizeof(*sgcomp);
153 	sgcomp = kzalloc(sz_comp, sr->gfp);
154 	if (!sgcomp)
155 		return -ENOMEM;
156 
157 	sgtbl->sgcomp = sgcomp;
158 	sgtbl->nr_sgcomp = nr_sgcomp;
159 
160 	sglist = sgtbl->sglist;
161 	/* populate device sg component */
162 	for (i = 0; i < nr_sgcomp; i++) {
163 		for (j = 0; j < 4; j++) {
164 			sgcomp->len[j] = cpu_to_be16(sglist->len);
165 			sgcomp->dma[j] = cpu_to_be64(sglist->dma);
166 			sglist++;
167 		}
168 		sgcomp++;
169 	}
170 	/* map the device sg component */
171 	dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
172 	if (dma_mapping_error(DEV(ndev), dma)) {
173 		kfree(sgtbl->sgcomp);
174 		sgtbl->sgcomp = NULL;
175 		return -ENOMEM;
176 	}
177 
178 	sgtbl->dma = dma;
179 	sgtbl->len = sz_comp;
180 
181 	return 0;
182 }
183 
184 /**
185  * dma_map_inbufs - DMA map input sglist and creates sglist component
186  *                  for N5 device.
187  * @sr: Request structure
188  * @req: Crypto request structre
189  *
190  * Returns 0 if successful or a negative errno code on error.
191  */
dma_map_inbufs(struct nitrox_softreq * sr,struct se_crypto_request * req)192 static int dma_map_inbufs(struct nitrox_softreq *sr,
193 			  struct se_crypto_request *req)
194 {
195 	struct device *dev = DEV(sr->ndev);
196 	struct scatterlist *sg = req->src;
197 	struct nitrox_sglist *glist;
198 	int i, nents, ret = 0;
199 	dma_addr_t dma;
200 	size_t sz;
201 
202 	nents = sg_nents(req->src);
203 
204 	/* creater gather list IV and src entries */
205 	sz = roundup((1 + nents), 4) * sizeof(*glist);
206 	glist = kzalloc(sz, sr->gfp);
207 	if (!glist)
208 		return -ENOMEM;
209 
210 	sr->in.sglist = glist;
211 	/* map IV */
212 	dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
213 	if (dma_mapping_error(dev, dma)) {
214 		ret = -EINVAL;
215 		goto iv_map_err;
216 	}
217 
218 	sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
219 	/* map src entries */
220 	nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
221 	if (!nents) {
222 		ret = -EINVAL;
223 		goto src_map_err;
224 	}
225 	sr->in.buf = req->src;
226 
227 	/* store the mappings */
228 	glist->len = req->ivsize;
229 	glist->dma = dma;
230 	glist++;
231 	sr->in.total_bytes += req->ivsize;
232 
233 	for_each_sg(req->src, sg, nents, i) {
234 		glist->len = sg_dma_len(sg);
235 		glist->dma = sg_dma_address(sg);
236 		sr->in.total_bytes += glist->len;
237 		glist++;
238 	}
239 	/* roundup map count to align with entires in sg component */
240 	sr->in.map_bufs_cnt = (1 + nents);
241 
242 	/* create NITROX gather component */
243 	ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
244 	if (ret)
245 		goto incomp_err;
246 
247 	return 0;
248 
249 incomp_err:
250 	dma_unmap_sg(dev, req->src, nents, sr->in.dir);
251 	sr->in.map_bufs_cnt = 0;
252 src_map_err:
253 	dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
254 iv_map_err:
255 	kfree(sr->in.sglist);
256 	sr->in.sglist = NULL;
257 	return ret;
258 }
259 
dma_map_outbufs(struct nitrox_softreq * sr,struct se_crypto_request * req)260 static int dma_map_outbufs(struct nitrox_softreq *sr,
261 			   struct se_crypto_request *req)
262 {
263 	struct device *dev = DEV(sr->ndev);
264 	struct nitrox_sglist *glist = sr->in.sglist;
265 	struct nitrox_sglist *slist;
266 	struct scatterlist *sg;
267 	int i, nents, map_bufs_cnt, ret = 0;
268 	size_t sz;
269 
270 	nents = sg_nents(req->dst);
271 
272 	/* create scatter list ORH, IV, dst entries and Completion header */
273 	sz = roundup((3 + nents), 4) * sizeof(*slist);
274 	slist = kzalloc(sz, sr->gfp);
275 	if (!slist)
276 		return -ENOMEM;
277 
278 	sr->out.sglist = slist;
279 	sr->out.dir = DMA_BIDIRECTIONAL;
280 	/* map ORH */
281 	sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
282 					  sr->out.dir);
283 	if (dma_mapping_error(dev, sr->resp.orh_dma)) {
284 		ret = -EINVAL;
285 		goto orh_map_err;
286 	}
287 
288 	/* map completion */
289 	sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
290 						 COMP_HLEN, sr->out.dir);
291 	if (dma_mapping_error(dev, sr->resp.completion_dma)) {
292 		ret = -EINVAL;
293 		goto compl_map_err;
294 	}
295 
296 	sr->inplace = (req->src == req->dst) ? true : false;
297 	/* out place */
298 	if (!sr->inplace) {
299 		nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
300 		if (!nents) {
301 			ret = -EINVAL;
302 			goto dst_map_err;
303 		}
304 	}
305 	sr->out.buf = req->dst;
306 
307 	/* store the mappings */
308 	/* orh */
309 	slist->len = ORH_HLEN;
310 	slist->dma = sr->resp.orh_dma;
311 	slist++;
312 
313 	/* copy the glist mappings */
314 	if (sr->inplace) {
315 		nents = sr->in.map_bufs_cnt - 1;
316 		map_bufs_cnt = sr->in.map_bufs_cnt;
317 		while (map_bufs_cnt--) {
318 			slist->len = glist->len;
319 			slist->dma = glist->dma;
320 			slist++;
321 			glist++;
322 		}
323 	} else {
324 		/* copy iv mapping */
325 		slist->len = glist->len;
326 		slist->dma = glist->dma;
327 		slist++;
328 		/* copy remaining maps */
329 		for_each_sg(req->dst, sg, nents, i) {
330 			slist->len = sg_dma_len(sg);
331 			slist->dma = sg_dma_address(sg);
332 			slist++;
333 		}
334 	}
335 
336 	/* completion */
337 	slist->len = COMP_HLEN;
338 	slist->dma = sr->resp.completion_dma;
339 
340 	sr->out.map_bufs_cnt = (3 + nents);
341 
342 	ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
343 	if (ret)
344 		goto outcomp_map_err;
345 
346 	return 0;
347 
348 outcomp_map_err:
349 	if (!sr->inplace)
350 		dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
351 	sr->out.map_bufs_cnt = 0;
352 	sr->out.buf = NULL;
353 dst_map_err:
354 	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
355 	sr->resp.completion_dma = 0;
356 compl_map_err:
357 	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
358 	sr->resp.orh_dma = 0;
359 orh_map_err:
360 	kfree(sr->out.sglist);
361 	sr->out.sglist = NULL;
362 	return ret;
363 }
364 
softreq_map_iobuf(struct nitrox_softreq * sr,struct se_crypto_request * creq)365 static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
366 				    struct se_crypto_request *creq)
367 {
368 	int ret;
369 
370 	ret = dma_map_inbufs(sr, creq);
371 	if (ret)
372 		return ret;
373 
374 	ret = dma_map_outbufs(sr, creq);
375 	if (ret)
376 		softreq_unmap_sgbufs(sr);
377 
378 	return ret;
379 }
380 
backlog_list_add(struct nitrox_softreq * sr,struct nitrox_cmdq * cmdq)381 static inline void backlog_list_add(struct nitrox_softreq *sr,
382 				    struct nitrox_cmdq *cmdq)
383 {
384 	INIT_LIST_HEAD(&sr->backlog);
385 
386 	spin_lock_bh(&cmdq->backlog_lock);
387 	list_add_tail(&sr->backlog, &cmdq->backlog_head);
388 	atomic_inc(&cmdq->backlog_count);
389 	atomic_set(&sr->status, REQ_BACKLOG);
390 	spin_unlock_bh(&cmdq->backlog_lock);
391 }
392 
response_list_add(struct nitrox_softreq * sr,struct nitrox_cmdq * cmdq)393 static inline void response_list_add(struct nitrox_softreq *sr,
394 				     struct nitrox_cmdq *cmdq)
395 {
396 	INIT_LIST_HEAD(&sr->response);
397 
398 	spin_lock_bh(&cmdq->response_lock);
399 	list_add_tail(&sr->response, &cmdq->response_head);
400 	spin_unlock_bh(&cmdq->response_lock);
401 }
402 
response_list_del(struct nitrox_softreq * sr,struct nitrox_cmdq * cmdq)403 static inline void response_list_del(struct nitrox_softreq *sr,
404 				     struct nitrox_cmdq *cmdq)
405 {
406 	spin_lock_bh(&cmdq->response_lock);
407 	list_del(&sr->response);
408 	spin_unlock_bh(&cmdq->response_lock);
409 }
410 
411 static struct nitrox_softreq *
get_first_response_entry(struct nitrox_cmdq * cmdq)412 get_first_response_entry(struct nitrox_cmdq *cmdq)
413 {
414 	return list_first_entry_or_null(&cmdq->response_head,
415 					struct nitrox_softreq, response);
416 }
417 
cmdq_full(struct nitrox_cmdq * cmdq,int qlen)418 static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
419 {
420 	if (atomic_inc_return(&cmdq->pending_count) > qlen) {
421 		atomic_dec(&cmdq->pending_count);
422 		/* sync with other cpus */
423 		smp_mb__after_atomic();
424 		return true;
425 	}
426 	return false;
427 }
428 
429 /**
430  * post_se_instr - Post SE instruction to Packet Input ring
431  * @sr: Request structure
432  *
433  * Returns 0 if successful or a negative error code,
434  * if no space in ring.
435  */
post_se_instr(struct nitrox_softreq * sr,struct nitrox_cmdq * cmdq)436 static void post_se_instr(struct nitrox_softreq *sr,
437 			  struct nitrox_cmdq *cmdq)
438 {
439 	struct nitrox_device *ndev = sr->ndev;
440 	int idx;
441 	u8 *ent;
442 
443 	spin_lock_bh(&cmdq->cmdq_lock);
444 
445 	idx = cmdq->write_idx;
446 	/* copy the instruction */
447 	ent = cmdq->head + (idx * cmdq->instr_size);
448 	memcpy(ent, &sr->instr, cmdq->instr_size);
449 
450 	atomic_set(&sr->status, REQ_POSTED);
451 	response_list_add(sr, cmdq);
452 	sr->tstamp = jiffies;
453 	/* flush the command queue updates */
454 	dma_wmb();
455 
456 	/* Ring doorbell with count 1 */
457 	writeq(1, cmdq->dbell_csr_addr);
458 	/* orders the doorbell rings */
459 	mmiowb();
460 
461 	cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
462 
463 	spin_unlock_bh(&cmdq->cmdq_lock);
464 }
465 
post_backlog_cmds(struct nitrox_cmdq * cmdq)466 static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
467 {
468 	struct nitrox_device *ndev = cmdq->ndev;
469 	struct nitrox_softreq *sr, *tmp;
470 	int ret = 0;
471 
472 	if (!atomic_read(&cmdq->backlog_count))
473 		return 0;
474 
475 	spin_lock_bh(&cmdq->backlog_lock);
476 
477 	list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
478 		struct skcipher_request *skreq;
479 
480 		/* submit until space available */
481 		if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
482 			ret = -ENOSPC;
483 			break;
484 		}
485 		/* delete from backlog list */
486 		list_del(&sr->backlog);
487 		atomic_dec(&cmdq->backlog_count);
488 		/* sync with other cpus */
489 		smp_mb__after_atomic();
490 
491 		skreq = sr->skreq;
492 		/* post the command */
493 		post_se_instr(sr, cmdq);
494 
495 		/* backlog requests are posted, wakeup with -EINPROGRESS */
496 		skcipher_request_complete(skreq, -EINPROGRESS);
497 	}
498 	spin_unlock_bh(&cmdq->backlog_lock);
499 
500 	return ret;
501 }
502 
nitrox_enqueue_request(struct nitrox_softreq * sr)503 static int nitrox_enqueue_request(struct nitrox_softreq *sr)
504 {
505 	struct nitrox_cmdq *cmdq = sr->cmdq;
506 	struct nitrox_device *ndev = sr->ndev;
507 
508 	/* try to post backlog requests */
509 	post_backlog_cmds(cmdq);
510 
511 	if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
512 		if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
513 			return -ENOSPC;
514 		/* add to backlog list */
515 		backlog_list_add(sr, cmdq);
516 		return -EBUSY;
517 	}
518 	post_se_instr(sr, cmdq);
519 
520 	return -EINPROGRESS;
521 }
522 
523 /**
524  * nitrox_se_request - Send request to SE core
525  * @ndev: NITROX device
526  * @req: Crypto request
527  *
528  * Returns 0 on success, or a negative error code.
529  */
nitrox_process_se_request(struct nitrox_device * ndev,struct se_crypto_request * req,completion_t callback,struct skcipher_request * skreq)530 int nitrox_process_se_request(struct nitrox_device *ndev,
531 			      struct se_crypto_request *req,
532 			      completion_t callback,
533 			      struct skcipher_request *skreq)
534 {
535 	struct nitrox_softreq *sr;
536 	dma_addr_t ctx_handle = 0;
537 	int qno, ret = 0;
538 
539 	if (!nitrox_ready(ndev))
540 		return -ENODEV;
541 
542 	sr = kzalloc(sizeof(*sr), req->gfp);
543 	if (!sr)
544 		return -ENOMEM;
545 
546 	sr->ndev = ndev;
547 	sr->flags = req->flags;
548 	sr->gfp = req->gfp;
549 	sr->callback = callback;
550 	sr->skreq = skreq;
551 
552 	atomic_set(&sr->status, REQ_NOT_POSTED);
553 
554 	WRITE_ONCE(sr->resp.orh, PENDING_SIG);
555 	WRITE_ONCE(sr->resp.completion, PENDING_SIG);
556 
557 	ret = softreq_map_iobuf(sr, req);
558 	if (ret) {
559 		kfree(sr);
560 		return ret;
561 	}
562 
563 	/* get the context handle */
564 	if (req->ctx_handle) {
565 		struct ctx_hdr *hdr;
566 		u8 *ctx_ptr;
567 
568 		ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
569 		hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
570 		ctx_handle = hdr->ctx_dma;
571 	}
572 
573 	/* select the queue */
574 	qno = smp_processor_id() % ndev->nr_queues;
575 
576 	sr->cmdq = &ndev->pkt_cmdqs[qno];
577 
578 	/*
579 	 * 64-Byte Instruction Format
580 	 *
581 	 *  ----------------------
582 	 *  |      DPTR0         | 8 bytes
583 	 *  ----------------------
584 	 *  |  PKT_IN_INSTR_HDR  | 8 bytes
585 	 *  ----------------------
586 	 *  |    PKT_IN_HDR      | 16 bytes
587 	 *  ----------------------
588 	 *  |    SLC_INFO        | 16 bytes
589 	 *  ----------------------
590 	 *  |   Front data       | 16 bytes
591 	 *  ----------------------
592 	 */
593 
594 	/* fill the packet instruction */
595 	/* word 0 */
596 	sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
597 
598 	/* word 1 */
599 	sr->instr.ih.value = 0;
600 	sr->instr.ih.s.g = 1;
601 	sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
602 	sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
603 	sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
604 	sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
605 	sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
606 
607 	/* word 2 */
608 	sr->instr.irh.value[0] = 0;
609 	sr->instr.irh.s.uddl = MIN_UDD_LEN;
610 	/* context length in 64-bit words */
611 	sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
612 	/* offset from solicit base port 256 */
613 	sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
614 	sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
615 	sr->instr.irh.s.arg = req->ctrl.s.arg;
616 	sr->instr.irh.s.opcode = req->opcode;
617 	sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
618 
619 	/* word 3 */
620 	sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
621 
622 	/* word 4 */
623 	sr->instr.slc.value[0] = 0;
624 	sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
625 	sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
626 
627 	/* word 5 */
628 	sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
629 
630 	/*
631 	 * No conversion for front data,
632 	 * It goes into payload
633 	 * put GP Header in front data
634 	 */
635 	sr->instr.fdata[0] = *((u64 *)&req->gph);
636 	sr->instr.fdata[1] = 0;
637 
638 	ret = nitrox_enqueue_request(sr);
639 	if (ret == -ENOSPC)
640 		goto send_fail;
641 
642 	return ret;
643 
644 send_fail:
645 	softreq_destroy(sr);
646 	return ret;
647 }
648 
cmd_timeout(unsigned long tstamp,unsigned long timeout)649 static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
650 {
651 	return time_after_eq(jiffies, (tstamp + timeout));
652 }
653 
backlog_qflush_work(struct work_struct * work)654 void backlog_qflush_work(struct work_struct *work)
655 {
656 	struct nitrox_cmdq *cmdq;
657 
658 	cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
659 	post_backlog_cmds(cmdq);
660 }
661 
662 /**
663  * process_request_list - process completed requests
664  * @ndev: N5 device
665  * @qno: queue to operate
666  *
667  * Returns the number of responses processed.
668  */
process_response_list(struct nitrox_cmdq * cmdq)669 static void process_response_list(struct nitrox_cmdq *cmdq)
670 {
671 	struct nitrox_device *ndev = cmdq->ndev;
672 	struct nitrox_softreq *sr;
673 	struct skcipher_request *skreq;
674 	completion_t callback;
675 	int req_completed = 0, err = 0, budget;
676 
677 	/* check all pending requests */
678 	budget = atomic_read(&cmdq->pending_count);
679 
680 	while (req_completed < budget) {
681 		sr = get_first_response_entry(cmdq);
682 		if (!sr)
683 			break;
684 
685 		if (atomic_read(&sr->status) != REQ_POSTED)
686 			break;
687 
688 		/* check orh and completion bytes updates */
689 		if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
690 			/* request not completed, check for timeout */
691 			if (!cmd_timeout(sr->tstamp, ndev->timeout))
692 				break;
693 			dev_err_ratelimited(DEV(ndev),
694 					    "Request timeout, orh 0x%016llx\n",
695 					    READ_ONCE(sr->resp.orh));
696 		}
697 		atomic_dec(&cmdq->pending_count);
698 		/* sync with other cpus */
699 		smp_mb__after_atomic();
700 		/* remove from response list */
701 		response_list_del(sr, cmdq);
702 
703 		callback = sr->callback;
704 		skreq = sr->skreq;
705 
706 		/* ORH error code */
707 		err = READ_ONCE(sr->resp.orh) & 0xff;
708 		softreq_destroy(sr);
709 
710 		if (callback)
711 			callback(skreq, err);
712 
713 		req_completed++;
714 	}
715 }
716 
717 /**
718  * pkt_slc_resp_handler - post processing of SE responses
719  */
pkt_slc_resp_handler(unsigned long data)720 void pkt_slc_resp_handler(unsigned long data)
721 {
722 	struct bh_data *bh = (void *)(uintptr_t)(data);
723 	struct nitrox_cmdq *cmdq = bh->cmdq;
724 	union nps_pkt_slc_cnts pkt_slc_cnts;
725 
726 	/* read completion count */
727 	pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
728 	/* resend the interrupt if more work to do */
729 	pkt_slc_cnts.s.resend = 1;
730 
731 	process_response_list(cmdq);
732 
733 	/*
734 	 * clear the interrupt with resend bit enabled,
735 	 * MSI-X interrupt generates if Completion count > Threshold
736 	 */
737 	writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
738 	/* order the writes */
739 	mmiowb();
740 
741 	if (atomic_read(&cmdq->backlog_count))
742 		schedule_work(&cmdq->backlog_qflush);
743 }
744