• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2015 Intel Corporation.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Copyright(c) 2015 Intel Corporation.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50 #include <linux/mm.h>
51 #include <linux/types.h>
52 #include <linux/device.h>
53 #include <linux/dmapool.h>
54 #include <linux/slab.h>
55 #include <linux/list.h>
56 #include <linux/highmem.h>
57 #include <linux/io.h>
58 #include <linux/uio.h>
59 #include <linux/rbtree.h>
60 #include <linux/spinlock.h>
61 #include <linux/delay.h>
62 #include <linux/kthread.h>
63 #include <linux/mmu_context.h>
64 #include <linux/module.h>
65 #include <linux/vmalloc.h>
66 
67 #include "hfi.h"
68 #include "sdma.h"
69 #include "user_sdma.h"
70 #include "sdma.h"
71 #include "verbs.h"  /* for the headers */
72 #include "common.h" /* for struct hfi1_tid_info */
73 #include "trace.h"
74 
75 static uint hfi1_sdma_comp_ring_size = 128;
76 module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
77 MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
78 
79 /* The maximum number of Data io vectors per message/request */
80 #define MAX_VECTORS_PER_REQ 8
81 /*
82  * Maximum number of packet to send from each message/request
83  * before moving to the next one.
84  */
85 #define MAX_PKTS_PER_QUEUE 16
86 
87 #define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
88 
89 #define req_opcode(x) \
90 	(((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
91 #define req_version(x) \
92 	(((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
93 #define req_iovcnt(x) \
94 	(((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
95 
96 /* Number of BTH.PSN bits used for sequence number in expected rcvs */
97 #define BTH_SEQ_MASK 0x7ffull
98 
99 /*
100  * Define fields in the KDETH header so we can update the header
101  * template.
102  */
103 #define KDETH_OFFSET_SHIFT        0
104 #define KDETH_OFFSET_MASK         0x7fff
105 #define KDETH_OM_SHIFT            15
106 #define KDETH_OM_MASK             0x1
107 #define KDETH_TID_SHIFT           16
108 #define KDETH_TID_MASK            0x3ff
109 #define KDETH_TIDCTRL_SHIFT       26
110 #define KDETH_TIDCTRL_MASK        0x3
111 #define KDETH_INTR_SHIFT          28
112 #define KDETH_INTR_MASK           0x1
113 #define KDETH_SH_SHIFT            29
114 #define KDETH_SH_MASK             0x1
115 #define KDETH_HCRC_UPPER_SHIFT    16
116 #define KDETH_HCRC_UPPER_MASK     0xff
117 #define KDETH_HCRC_LOWER_SHIFT    24
118 #define KDETH_HCRC_LOWER_MASK     0xff
119 
120 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
121 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
122 
123 #define KDETH_GET(val, field)						\
124 	(((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
125 #define KDETH_SET(dw, field, val) do {					\
126 		u32 dwval = le32_to_cpu(dw);				\
127 		dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
128 		dwval |= (((val) & KDETH_##field##_MASK) << \
129 			  KDETH_##field##_SHIFT);			\
130 		dw = cpu_to_le32(dwval);				\
131 	} while (0)
132 
133 #define AHG_HEADER_SET(arr, idx, dw, bit, width, value)			\
134 	do {								\
135 		if ((idx) < ARRAY_SIZE((arr)))				\
136 			(arr)[(idx++)] = sdma_build_ahg_descriptor(	\
137 				(__force u16)(value), (dw), (bit),	\
138 							(width));	\
139 		else							\
140 			return -ERANGE;					\
141 	} while (0)
142 
143 /* KDETH OM multipliers and switch over point */
144 #define KDETH_OM_SMALL     4
145 #define KDETH_OM_LARGE     64
146 #define KDETH_OM_MAX_SIZE  (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
147 
148 /* Last packet in the request */
149 #define TXREQ_FLAGS_REQ_LAST_PKT   (1 << 0)
150 #define TXREQ_FLAGS_IOVEC_LAST_PKT (1 << 0)
151 
152 #define SDMA_REQ_IN_USE     0
153 #define SDMA_REQ_FOR_THREAD 1
154 #define SDMA_REQ_SEND_DONE  2
155 #define SDMA_REQ_HAVE_AHG   3
156 #define SDMA_REQ_HAS_ERROR  4
157 #define SDMA_REQ_DONE_ERROR 5
158 
159 #define SDMA_PKT_Q_INACTIVE (1 << 0)
160 #define SDMA_PKT_Q_ACTIVE   (1 << 1)
161 #define SDMA_PKT_Q_DEFERRED (1 << 2)
162 
163 /*
164  * Maximum retry attempts to submit a TX request
165  * before putting the process to sleep.
166  */
167 #define MAX_DEFER_RETRY_COUNT 1
168 
169 static unsigned initial_pkt_count = 8;
170 
171 #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
172 
173 struct user_sdma_iovec {
174 	struct iovec iov;
175 	/* number of pages in this vector */
176 	unsigned npages;
177 	/* array of pinned pages for this vector */
178 	struct page **pages;
179 	/* offset into the virtual address space of the vector at
180 	 * which we last left off. */
181 	u64 offset;
182 };
183 
184 struct user_sdma_request {
185 	struct sdma_req_info info;
186 	struct hfi1_user_sdma_pkt_q *pq;
187 	struct hfi1_user_sdma_comp_q *cq;
188 	/* This is the original header from user space */
189 	struct hfi1_pkt_header hdr;
190 	/*
191 	 * Pointer to the SDMA engine for this request.
192 	 * Since different request could be on different VLs,
193 	 * each request will need it's own engine pointer.
194 	 */
195 	struct sdma_engine *sde;
196 	u8 ahg_idx;
197 	u32 ahg[9];
198 	/*
199 	 * KDETH.Offset (Eager) field
200 	 * We need to remember the initial value so the headers
201 	 * can be updated properly.
202 	 */
203 	u32 koffset;
204 	/*
205 	 * KDETH.OFFSET (TID) field
206 	 * The offset can cover multiple packets, depending on the
207 	 * size of the TID entry.
208 	 */
209 	u32 tidoffset;
210 	/*
211 	 * KDETH.OM
212 	 * Remember this because the header template always sets it
213 	 * to 0.
214 	 */
215 	u8 omfactor;
216 	/*
217 	 * pointer to the user's task_struct. We are going to
218 	 * get a reference to it so we can process io vectors
219 	 * at a later time.
220 	 */
221 	struct task_struct *user_proc;
222 	/*
223 	 * pointer to the user's mm_struct. We are going to
224 	 * get a reference to it so it doesn't get freed
225 	 * since we might not be in process context when we
226 	 * are processing the iov's.
227 	 * Using this mm_struct, we can get vma based on the
228 	 * iov's address (find_vma()).
229 	 */
230 	struct mm_struct *user_mm;
231 	/*
232 	 * We copy the iovs for this request (based on
233 	 * info.iovcnt). These are only the data vectors
234 	 */
235 	unsigned data_iovs;
236 	/* total length of the data in the request */
237 	u32 data_len;
238 	/* progress index moving along the iovs array */
239 	unsigned iov_idx;
240 	struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
241 	/* number of elements copied to the tids array */
242 	u16 n_tids;
243 	/* TID array values copied from the tid_iov vector */
244 	u32 *tids;
245 	u16 tididx;
246 	u32 sent;
247 	u64 seqnum;
248 	spinlock_t list_lock;
249 	struct list_head txps;
250 	unsigned long flags;
251 };
252 
253 /*
254  * A single txreq could span up to 3 physical pages when the MTU
255  * is sufficiently large (> 4K). Each of the IOV pointers also
256  * needs it's own set of flags so the vector has been handled
257  * independently of each other.
258  */
259 struct user_sdma_txreq {
260 	/* Packet header for the txreq */
261 	struct hfi1_pkt_header hdr;
262 	struct sdma_txreq txreq;
263 	struct user_sdma_request *req;
264 	struct {
265 		struct user_sdma_iovec *vec;
266 		u8 flags;
267 	} iovecs[3];
268 	int idx;
269 	u16 flags;
270 	unsigned busycount;
271 	u64 seqnum;
272 };
273 
274 #define SDMA_DBG(req, fmt, ...)				     \
275 	hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
276 		 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
277 		 ##__VA_ARGS__)
278 #define SDMA_Q_DBG(pq, fmt, ...)			 \
279 	hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
280 		 (pq)->subctxt, ##__VA_ARGS__)
281 
282 static int user_sdma_send_pkts(struct user_sdma_request *, unsigned);
283 static int num_user_pages(const struct iovec *);
284 static void user_sdma_txreq_cb(struct sdma_txreq *, int, int);
285 static void user_sdma_free_request(struct user_sdma_request *);
286 static int pin_vector_pages(struct user_sdma_request *,
287 			    struct user_sdma_iovec *);
288 static void unpin_vector_pages(struct user_sdma_iovec *);
289 static int check_header_template(struct user_sdma_request *,
290 				 struct hfi1_pkt_header *, u32, u32);
291 static int set_txreq_header(struct user_sdma_request *,
292 			    struct user_sdma_txreq *, u32);
293 static int set_txreq_header_ahg(struct user_sdma_request *,
294 				struct user_sdma_txreq *, u32);
295 static inline void set_comp_state(struct user_sdma_request *,
296 					enum hfi1_sdma_comp_state, int);
297 static inline u32 set_pkt_bth_psn(__be32, u8, u32);
298 static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
299 
300 static int defer_packet_queue(
301 	struct sdma_engine *,
302 	struct iowait *,
303 	struct sdma_txreq *,
304 	unsigned seq);
305 static void activate_packet_queue(struct iowait *, int);
306 
defer_packet_queue(struct sdma_engine * sde,struct iowait * wait,struct sdma_txreq * txreq,unsigned seq)307 static int defer_packet_queue(
308 	struct sdma_engine *sde,
309 	struct iowait *wait,
310 	struct sdma_txreq *txreq,
311 	unsigned seq)
312 {
313 	struct hfi1_user_sdma_pkt_q *pq =
314 		container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
315 	struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
316 	struct user_sdma_txreq *tx =
317 		container_of(txreq, struct user_sdma_txreq, txreq);
318 
319 	if (sdma_progress(sde, seq, txreq)) {
320 		if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
321 			goto eagain;
322 	}
323 	/*
324 	 * We are assuming that if the list is enqueued somewhere, it
325 	 * is to the dmawait list since that is the only place where
326 	 * it is supposed to be enqueued.
327 	 */
328 	xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
329 	write_seqlock(&dev->iowait_lock);
330 	if (list_empty(&pq->busy.list))
331 		list_add_tail(&pq->busy.list, &sde->dmawait);
332 	write_sequnlock(&dev->iowait_lock);
333 	return -EBUSY;
334 eagain:
335 	return -EAGAIN;
336 }
337 
activate_packet_queue(struct iowait * wait,int reason)338 static void activate_packet_queue(struct iowait *wait, int reason)
339 {
340 	struct hfi1_user_sdma_pkt_q *pq =
341 		container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
342 	xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
343 	wake_up(&wait->wait_dma);
344 };
345 
sdma_kmem_cache_ctor(void * obj)346 static void sdma_kmem_cache_ctor(void *obj)
347 {
348 	struct user_sdma_txreq *tx = (struct user_sdma_txreq *)obj;
349 
350 	memset(tx, 0, sizeof(*tx));
351 }
352 
hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata * uctxt,struct file * fp)353 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
354 {
355 	int ret = 0;
356 	unsigned memsize;
357 	char buf[64];
358 	struct hfi1_devdata *dd;
359 	struct hfi1_user_sdma_comp_q *cq;
360 	struct hfi1_user_sdma_pkt_q *pq;
361 	unsigned long flags;
362 
363 	if (!uctxt || !fp) {
364 		ret = -EBADF;
365 		goto done;
366 	}
367 
368 	if (!hfi1_sdma_comp_ring_size) {
369 		ret = -EINVAL;
370 		goto done;
371 	}
372 
373 	dd = uctxt->dd;
374 
375 	pq = kzalloc(sizeof(*pq), GFP_KERNEL);
376 	if (!pq)
377 		goto pq_nomem;
378 
379 	memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size;
380 	pq->reqs = kmalloc(memsize, GFP_KERNEL);
381 	if (!pq->reqs)
382 		goto pq_reqs_nomem;
383 
384 	INIT_LIST_HEAD(&pq->list);
385 	pq->dd = dd;
386 	pq->ctxt = uctxt->ctxt;
387 	pq->subctxt = subctxt_fp(fp);
388 	pq->n_max_reqs = hfi1_sdma_comp_ring_size;
389 	pq->state = SDMA_PKT_Q_INACTIVE;
390 	atomic_set(&pq->n_reqs, 0);
391 
392 	iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
393 		    activate_packet_queue);
394 	pq->reqidx = 0;
395 	snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
396 		 subctxt_fp(fp));
397 	pq->txreq_cache = kmem_cache_create(buf,
398 			       sizeof(struct user_sdma_txreq),
399 					    L1_CACHE_BYTES,
400 					    SLAB_HWCACHE_ALIGN,
401 					    sdma_kmem_cache_ctor);
402 	if (!pq->txreq_cache) {
403 		dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
404 			   uctxt->ctxt);
405 		goto pq_txreq_nomem;
406 	}
407 	user_sdma_pkt_fp(fp) = pq;
408 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
409 	if (!cq)
410 		goto cq_nomem;
411 
412 	memsize = ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size,
413 			PAGE_SIZE);
414 	cq->comps = vmalloc_user(memsize);
415 	if (!cq->comps)
416 		goto cq_comps_nomem;
417 
418 	cq->nentries = hfi1_sdma_comp_ring_size;
419 	user_sdma_comp_fp(fp) = cq;
420 
421 	spin_lock_irqsave(&uctxt->sdma_qlock, flags);
422 	list_add(&pq->list, &uctxt->sdma_queues);
423 	spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
424 	goto done;
425 
426 cq_comps_nomem:
427 	kfree(cq);
428 cq_nomem:
429 	kmem_cache_destroy(pq->txreq_cache);
430 pq_txreq_nomem:
431 	kfree(pq->reqs);
432 pq_reqs_nomem:
433 	kfree(pq);
434 	user_sdma_pkt_fp(fp) = NULL;
435 pq_nomem:
436 	ret = -ENOMEM;
437 done:
438 	return ret;
439 }
440 
hfi1_user_sdma_free_queues(struct hfi1_filedata * fd)441 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
442 {
443 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
444 	struct hfi1_user_sdma_pkt_q *pq;
445 	unsigned long flags;
446 
447 	hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
448 		  uctxt->ctxt, fd->subctxt);
449 	pq = fd->pq;
450 	if (pq) {
451 		u16 i, j;
452 
453 		spin_lock_irqsave(&uctxt->sdma_qlock, flags);
454 		if (!list_empty(&pq->list))
455 			list_del_init(&pq->list);
456 		spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
457 		iowait_sdma_drain(&pq->busy);
458 		if (pq->reqs) {
459 			for (i = 0, j = 0; i < atomic_read(&pq->n_reqs) &&
460 				     j < pq->n_max_reqs; j++) {
461 				struct user_sdma_request *req = &pq->reqs[j];
462 
463 				if (test_bit(SDMA_REQ_IN_USE, &req->flags)) {
464 					set_comp_state(req, ERROR, -ECOMM);
465 					user_sdma_free_request(req);
466 					i++;
467 				}
468 			}
469 			kfree(pq->reqs);
470 		}
471 		kmem_cache_destroy(pq->txreq_cache);
472 		kfree(pq);
473 		fd->pq = NULL;
474 	}
475 	if (fd->cq) {
476 		if (fd->cq->comps)
477 			vfree(fd->cq->comps);
478 		kfree(fd->cq);
479 		fd->cq = NULL;
480 	}
481 	return 0;
482 }
483 
hfi1_user_sdma_process_request(struct file * fp,struct iovec * iovec,unsigned long dim,unsigned long * count)484 int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
485 				   unsigned long dim, unsigned long *count)
486 {
487 	int ret = 0, i = 0, sent;
488 	struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
489 	struct hfi1_user_sdma_pkt_q *pq = user_sdma_pkt_fp(fp);
490 	struct hfi1_user_sdma_comp_q *cq = user_sdma_comp_fp(fp);
491 	struct hfi1_devdata *dd = pq->dd;
492 	unsigned long idx = 0;
493 	u8 pcount = initial_pkt_count;
494 	struct sdma_req_info info;
495 	struct user_sdma_request *req;
496 	u8 opcode, sc, vl;
497 
498 	if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
499 		hfi1_cdbg(
500 		   SDMA,
501 		   "[%u:%u:%u] First vector not big enough for header %lu/%lu",
502 		   dd->unit, uctxt->ctxt, subctxt_fp(fp),
503 		   iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
504 		ret = -EINVAL;
505 		goto done;
506 	}
507 	ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
508 	if (ret) {
509 		hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
510 			  dd->unit, uctxt->ctxt, subctxt_fp(fp), ret);
511 		ret = -EFAULT;
512 		goto done;
513 	}
514 	trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, subctxt_fp(fp),
515 				     (u16 *)&info);
516 	if (cq->comps[info.comp_idx].status == QUEUED) {
517 		hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state",
518 			  dd->unit, uctxt->ctxt, subctxt_fp(fp),
519 			  info.comp_idx);
520 		ret = -EBADSLT;
521 		goto done;
522 	}
523 	if (!info.fragsize) {
524 		hfi1_cdbg(SDMA,
525 			  "[%u:%u:%u:%u] Request does not specify fragsize",
526 			  dd->unit, uctxt->ctxt, subctxt_fp(fp), info.comp_idx);
527 		ret = -EINVAL;
528 		goto done;
529 	}
530 	/*
531 	 * We've done all the safety checks that we can up to this point,
532 	 * "allocate" the request entry.
533 	 */
534 	hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
535 		  uctxt->ctxt, subctxt_fp(fp), info.comp_idx);
536 	req = pq->reqs + info.comp_idx;
537 	memset(req, 0, sizeof(*req));
538 	/* Mark the request as IN_USE before we start filling it in. */
539 	set_bit(SDMA_REQ_IN_USE, &req->flags);
540 	req->data_iovs = req_iovcnt(info.ctrl) - 1;
541 	req->pq = pq;
542 	req->cq = cq;
543 	INIT_LIST_HEAD(&req->txps);
544 	spin_lock_init(&req->list_lock);
545 	memcpy(&req->info, &info, sizeof(info));
546 
547 	if (req_opcode(info.ctrl) == EXPECTED)
548 		req->data_iovs--;
549 
550 	if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
551 		SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
552 			 MAX_VECTORS_PER_REQ);
553 		ret = -EINVAL;
554 		goto done;
555 	}
556 	/* Copy the header from the user buffer */
557 	ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
558 			     sizeof(req->hdr));
559 	if (ret) {
560 		SDMA_DBG(req, "Failed to copy header template (%d)", ret);
561 		ret = -EFAULT;
562 		goto free_req;
563 	}
564 
565 	/* If Static rate control is not enabled, sanitize the header. */
566 	if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
567 		req->hdr.pbc[2] = 0;
568 
569 	/* Validate the opcode. Do not trust packets from user space blindly. */
570 	opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
571 	if ((opcode & USER_OPCODE_CHECK_MASK) !=
572 	     USER_OPCODE_CHECK_VAL) {
573 		SDMA_DBG(req, "Invalid opcode (%d)", opcode);
574 		ret = -EINVAL;
575 		goto free_req;
576 	}
577 	/*
578 	 * Validate the vl. Do not trust packets from user space blindly.
579 	 * VL comes from PBC, SC comes from LRH, and the VL needs to
580 	 * match the SC look up.
581 	 */
582 	vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
583 	sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
584 	      (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
585 	if (vl >= dd->pport->vls_operational ||
586 	    vl != sc_to_vlt(dd, sc)) {
587 		SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
588 		ret = -EINVAL;
589 		goto free_req;
590 	}
591 
592 	/*
593 	 * Also should check the BTH.lnh. If it says the next header is GRH then
594 	 * the RXE parsing will be off and will land in the middle of the KDETH
595 	 * or miss it entirely.
596 	 */
597 	if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
598 		SDMA_DBG(req, "User tried to pass in a GRH");
599 		ret = -EINVAL;
600 		goto free_req;
601 	}
602 
603 	req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
604 	/* Calculate the initial TID offset based on the values of
605 	   KDETH.OFFSET and KDETH.OM that are passed in. */
606 	req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
607 		(KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
608 		 KDETH_OM_LARGE : KDETH_OM_SMALL);
609 	SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
610 	idx++;
611 
612 	/* Save all the IO vector structures */
613 	while (i < req->data_iovs) {
614 		memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
615 		req->iovs[i].offset = 0;
616 		req->data_len += req->iovs[i++].iov.iov_len;
617 	}
618 	SDMA_DBG(req, "total data length %u", req->data_len);
619 
620 	if (pcount > req->info.npkts)
621 		pcount = req->info.npkts;
622 	/*
623 	 * Copy any TID info
624 	 * User space will provide the TID info only when the
625 	 * request type is EXPECTED. This is true even if there is
626 	 * only one packet in the request and the header is already
627 	 * setup. The reason for the singular TID case is that the
628 	 * driver needs to perform safety checks.
629 	 */
630 	if (req_opcode(req->info.ctrl) == EXPECTED) {
631 		u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
632 
633 		if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
634 			ret = -EINVAL;
635 			goto free_req;
636 		}
637 		req->tids = kcalloc(ntids, sizeof(*req->tids), GFP_KERNEL);
638 		if (!req->tids) {
639 			ret = -ENOMEM;
640 			goto free_req;
641 		}
642 		/*
643 		 * We have to copy all of the tids because they may vary
644 		 * in size and, therefore, the TID count might not be
645 		 * equal to the pkt count. However, there is no way to
646 		 * tell at this point.
647 		 */
648 		ret = copy_from_user(req->tids, iovec[idx].iov_base,
649 				     ntids * sizeof(*req->tids));
650 		if (ret) {
651 			SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
652 				 ntids, ret);
653 			ret = -EFAULT;
654 			goto free_req;
655 		}
656 		req->n_tids = ntids;
657 		idx++;
658 	}
659 
660 	/* Have to select the engine */
661 	req->sde = sdma_select_engine_vl(dd,
662 					 (u32)(uctxt->ctxt + subctxt_fp(fp)),
663 					 vl);
664 	if (!req->sde || !sdma_running(req->sde)) {
665 		ret = -ECOMM;
666 		goto free_req;
667 	}
668 
669 	/* We don't need an AHG entry if the request contains only one packet */
670 	if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) {
671 		int ahg = sdma_ahg_alloc(req->sde);
672 
673 		if (likely(ahg >= 0)) {
674 			req->ahg_idx = (u8)ahg;
675 			set_bit(SDMA_REQ_HAVE_AHG, &req->flags);
676 		}
677 	}
678 
679 	set_comp_state(req, QUEUED, 0);
680 	/* Send the first N packets in the request to buy us some time */
681 	sent = user_sdma_send_pkts(req, pcount);
682 	if (unlikely(sent < 0)) {
683 		if (sent != -EBUSY) {
684 			ret = sent;
685 			goto send_err;
686 		} else
687 			sent = 0;
688 	}
689 	atomic_inc(&pq->n_reqs);
690 
691 	if (sent < req->info.npkts) {
692 		/* Take the references to the user's task and mm_struct */
693 		get_task_struct(current);
694 		req->user_proc = current;
695 
696 		/*
697 		 * This is a somewhat blocking send implementation.
698 		 * The driver will block the caller until all packets of the
699 		 * request have been submitted to the SDMA engine. However, it
700 		 * will not wait for send completions.
701 		 */
702 		while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
703 			ret = user_sdma_send_pkts(req, pcount);
704 			if (ret < 0) {
705 				if (ret != -EBUSY)
706 					goto send_err;
707 				wait_event_interruptible_timeout(
708 					pq->busy.wait_dma,
709 					(pq->state == SDMA_PKT_Q_ACTIVE),
710 					msecs_to_jiffies(
711 						SDMA_IOWAIT_TIMEOUT));
712 			}
713 		}
714 
715 	}
716 	ret = 0;
717 	*count += idx;
718 	goto done;
719 send_err:
720 	set_comp_state(req, ERROR, ret);
721 free_req:
722 	user_sdma_free_request(req);
723 done:
724 	return ret;
725 }
726 
compute_data_length(struct user_sdma_request * req,struct user_sdma_txreq * tx)727 static inline u32 compute_data_length(struct user_sdma_request *req,
728 					    struct user_sdma_txreq *tx)
729 {
730 	/*
731 	 * Determine the proper size of the packet data.
732 	 * The size of the data of the first packet is in the header
733 	 * template. However, it includes the header and ICRC, which need
734 	 * to be subtracted.
735 	 * The size of the remaining packets is the minimum of the frag
736 	 * size (MTU) or remaining data in the request.
737 	 */
738 	u32 len;
739 
740 	if (!req->seqnum) {
741 		len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
742 		       (sizeof(tx->hdr) - 4));
743 	} else if (req_opcode(req->info.ctrl) == EXPECTED) {
744 		u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
745 			PAGE_SIZE;
746 		/* Get the data length based on the remaining space in the
747 		 * TID pair. */
748 		len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
749 		/* If we've filled up the TID pair, move to the next one. */
750 		if (unlikely(!len) && ++req->tididx < req->n_tids &&
751 		    req->tids[req->tididx]) {
752 			tidlen = EXP_TID_GET(req->tids[req->tididx],
753 					     LEN) * PAGE_SIZE;
754 			req->tidoffset = 0;
755 			len = min_t(u32, tidlen, req->info.fragsize);
756 		}
757 		/* Since the TID pairs map entire pages, make sure that we
758 		 * are not going to try to send more data that we have
759 		 * remaining. */
760 		len = min(len, req->data_len - req->sent);
761 	} else
762 		len = min(req->data_len - req->sent, (u32)req->info.fragsize);
763 	SDMA_DBG(req, "Data Length = %u", len);
764 	return len;
765 }
766 
get_lrh_len(struct hfi1_pkt_header hdr,u32 len)767 static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
768 {
769 	/* (Size of complete header - size of PBC) + 4B ICRC + data length */
770 	return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
771 }
772 
user_sdma_send_pkts(struct user_sdma_request * req,unsigned maxpkts)773 static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
774 {
775 	int ret = 0;
776 	unsigned npkts = 0;
777 	struct user_sdma_txreq *tx = NULL;
778 	struct hfi1_user_sdma_pkt_q *pq = NULL;
779 	struct user_sdma_iovec *iovec = NULL;
780 
781 	if (!req->pq) {
782 		ret = -EINVAL;
783 		goto done;
784 	}
785 
786 	pq = req->pq;
787 
788 	/*
789 	 * Check if we might have sent the entire request already
790 	 */
791 	if (unlikely(req->seqnum == req->info.npkts)) {
792 		if (!list_empty(&req->txps))
793 			goto dosend;
794 		goto done;
795 	}
796 
797 	if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
798 		maxpkts = req->info.npkts - req->seqnum;
799 
800 	while (npkts < maxpkts) {
801 		u32 datalen = 0, queued = 0, data_sent = 0;
802 		u64 iov_offset = 0;
803 
804 		/*
805 		 * Check whether any of the completions have come back
806 		 * with errors. If so, we are not going to process any
807 		 * more packets from this request.
808 		 */
809 		if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
810 			set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
811 			ret = -EFAULT;
812 			goto done;
813 		}
814 
815 		tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
816 		if (!tx) {
817 			ret = -ENOMEM;
818 			goto done;
819 		}
820 		tx->flags = 0;
821 		tx->req = req;
822 		tx->busycount = 0;
823 		tx->idx = -1;
824 		memset(tx->iovecs, 0, sizeof(tx->iovecs));
825 
826 		if (req->seqnum == req->info.npkts - 1)
827 			tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT;
828 
829 		/*
830 		 * Calculate the payload size - this is min of the fragment
831 		 * (MTU) size or the remaining bytes in the request but only
832 		 * if we have payload data.
833 		 */
834 		if (req->data_len) {
835 			iovec = &req->iovs[req->iov_idx];
836 			if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
837 				if (++req->iov_idx == req->data_iovs) {
838 					ret = -EFAULT;
839 					goto free_txreq;
840 				}
841 				iovec = &req->iovs[req->iov_idx];
842 				WARN_ON(iovec->offset);
843 			}
844 
845 			/*
846 			 * This request might include only a header and no user
847 			 * data, so pin pages only if there is data and it the
848 			 * pages have not been pinned already.
849 			 */
850 			if (unlikely(!iovec->pages && iovec->iov.iov_len)) {
851 				ret = pin_vector_pages(req, iovec);
852 				if (ret)
853 					goto free_tx;
854 			}
855 
856 			tx->iovecs[++tx->idx].vec = iovec;
857 			datalen = compute_data_length(req, tx);
858 			if (!datalen) {
859 				SDMA_DBG(req,
860 					 "Request has data but pkt len is 0");
861 				ret = -EFAULT;
862 				goto free_tx;
863 			}
864 		}
865 
866 		if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) {
867 			if (!req->seqnum) {
868 				u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
869 				u32 lrhlen = get_lrh_len(req->hdr, datalen);
870 				/*
871 				 * Copy the request header into the tx header
872 				 * because the HW needs a cacheline-aligned
873 				 * address.
874 				 * This copy can be optimized out if the hdr
875 				 * member of user_sdma_request were also
876 				 * cacheline aligned.
877 				 */
878 				memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
879 				if (PBC2LRH(pbclen) != lrhlen) {
880 					pbclen = (pbclen & 0xf000) |
881 						LRH2PBC(lrhlen);
882 					tx->hdr.pbc[0] = cpu_to_le16(pbclen);
883 				}
884 				ret = sdma_txinit_ahg(&tx->txreq,
885 						      SDMA_TXREQ_F_AHG_COPY,
886 						      sizeof(tx->hdr) + datalen,
887 						      req->ahg_idx, 0, NULL, 0,
888 						      user_sdma_txreq_cb);
889 				if (ret)
890 					goto free_tx;
891 				ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq,
892 							&tx->hdr,
893 							sizeof(tx->hdr));
894 				if (ret)
895 					goto free_txreq;
896 			} else {
897 				int changes;
898 
899 				changes = set_txreq_header_ahg(req, tx,
900 							       datalen);
901 				if (changes < 0)
902 					goto free_tx;
903 				sdma_txinit_ahg(&tx->txreq,
904 						SDMA_TXREQ_F_USE_AHG,
905 						datalen, req->ahg_idx, changes,
906 						req->ahg, sizeof(req->hdr),
907 						user_sdma_txreq_cb);
908 			}
909 		} else {
910 			ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
911 					  datalen, user_sdma_txreq_cb);
912 			if (ret)
913 				goto free_tx;
914 			/*
915 			 * Modify the header for this packet. This only needs
916 			 * to be done if we are not going to use AHG. Otherwise,
917 			 * the HW will do it based on the changes we gave it
918 			 * during sdma_txinit_ahg().
919 			 */
920 			ret = set_txreq_header(req, tx, datalen);
921 			if (ret)
922 				goto free_txreq;
923 		}
924 
925 		/*
926 		 * If the request contains any data vectors, add up to
927 		 * fragsize bytes to the descriptor.
928 		 */
929 		while (queued < datalen &&
930 		       (req->sent + data_sent) < req->data_len) {
931 			unsigned long base, offset;
932 			unsigned pageidx, len;
933 
934 			base = (unsigned long)iovec->iov.iov_base;
935 			offset = ((base + iovec->offset + iov_offset) &
936 				  ~PAGE_MASK);
937 			pageidx = (((iovec->offset + iov_offset +
938 				     base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
939 			len = offset + req->info.fragsize > PAGE_SIZE ?
940 				PAGE_SIZE - offset : req->info.fragsize;
941 			len = min((datalen - queued), len);
942 			ret = sdma_txadd_page(pq->dd, &tx->txreq,
943 					      iovec->pages[pageidx],
944 					      offset, len);
945 			if (ret) {
946 				int i;
947 
948 				dd_dev_err(pq->dd,
949 					   "SDMA txreq add page failed %d\n",
950 					   ret);
951 				/* Mark all assigned vectors as complete so they
952 				 * are unpinned in the callback. */
953 				for (i = tx->idx; i >= 0; i--) {
954 					tx->iovecs[i].flags |=
955 						TXREQ_FLAGS_IOVEC_LAST_PKT;
956 				}
957 				goto free_txreq;
958 			}
959 			iov_offset += len;
960 			queued += len;
961 			data_sent += len;
962 			if (unlikely(queued < datalen &&
963 				     pageidx == iovec->npages &&
964 				     req->iov_idx < req->data_iovs - 1 &&
965 				     tx->idx < ARRAY_SIZE(tx->iovecs))) {
966 				iovec->offset += iov_offset;
967 				tx->iovecs[tx->idx].flags |=
968 					TXREQ_FLAGS_IOVEC_LAST_PKT;
969 				iovec = &req->iovs[++req->iov_idx];
970 				if (!iovec->pages) {
971 					ret = pin_vector_pages(req, iovec);
972 					if (ret)
973 						goto free_txreq;
974 				}
975 				iov_offset = 0;
976 				tx->iovecs[++tx->idx].vec = iovec;
977 			}
978 		}
979 		/*
980 		 * The txreq was submitted successfully so we can update
981 		 * the counters.
982 		 */
983 		req->koffset += datalen;
984 		if (req_opcode(req->info.ctrl) == EXPECTED)
985 			req->tidoffset += datalen;
986 		req->sent += data_sent;
987 		if (req->data_len) {
988 			tx->iovecs[tx->idx].vec->offset += iov_offset;
989 			/* If we've reached the end of the io vector, mark it
990 			 * so the callback can unpin the pages and free it. */
991 			if (tx->iovecs[tx->idx].vec->offset ==
992 			    tx->iovecs[tx->idx].vec->iov.iov_len)
993 				tx->iovecs[tx->idx].flags |=
994 					TXREQ_FLAGS_IOVEC_LAST_PKT;
995 		}
996 
997 		/*
998 		 * It is important to increment this here as it is used to
999 		 * generate the BTH.PSN and, therefore, can't be bulk-updated
1000 		 * outside of the loop.
1001 		 */
1002 		tx->seqnum = req->seqnum++;
1003 		list_add_tail(&tx->txreq.list, &req->txps);
1004 		npkts++;
1005 	}
1006 dosend:
1007 	ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps);
1008 	if (list_empty(&req->txps))
1009 		if (req->seqnum == req->info.npkts) {
1010 			set_bit(SDMA_REQ_SEND_DONE, &req->flags);
1011 			/*
1012 			 * The txreq has already been submitted to the HW queue
1013 			 * so we can free the AHG entry now. Corruption will not
1014 			 * happen due to the sequential manner in which
1015 			 * descriptors are processed.
1016 			 */
1017 			if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
1018 				sdma_ahg_free(req->sde, req->ahg_idx);
1019 		}
1020 	goto done;
1021 free_txreq:
1022 	sdma_txclean(pq->dd, &tx->txreq);
1023 free_tx:
1024 	kmem_cache_free(pq->txreq_cache, tx);
1025 done:
1026 	return ret;
1027 }
1028 
1029 /*
1030  * How many pages in this iovec element?
1031  */
num_user_pages(const struct iovec * iov)1032 static inline int num_user_pages(const struct iovec *iov)
1033 {
1034 	const unsigned long addr  = (unsigned long) iov->iov_base;
1035 	const unsigned long len   = iov->iov_len;
1036 	const unsigned long spage = addr & PAGE_MASK;
1037 	const unsigned long epage = (addr + len - 1) & PAGE_MASK;
1038 
1039 	return 1 + ((epage - spage) >> PAGE_SHIFT);
1040 }
1041 
pin_vector_pages(struct user_sdma_request * req,struct user_sdma_iovec * iovec)1042 static int pin_vector_pages(struct user_sdma_request *req,
1043 			    struct user_sdma_iovec *iovec) {
1044 	int ret = 0;
1045 	unsigned pinned;
1046 
1047 	iovec->npages = num_user_pages(&iovec->iov);
1048 	iovec->pages = kcalloc(iovec->npages, sizeof(*iovec->pages),
1049 			       GFP_KERNEL);
1050 	if (!iovec->pages) {
1051 		SDMA_DBG(req, "Failed page array alloc");
1052 		ret = -ENOMEM;
1053 		goto done;
1054 	}
1055 	/* If called by the kernel thread, use the user's mm */
1056 	if (current->flags & PF_KTHREAD)
1057 		use_mm(req->user_proc->mm);
1058 	pinned = get_user_pages_fast(
1059 		(unsigned long)iovec->iov.iov_base,
1060 		iovec->npages, 0, iovec->pages);
1061 	/* If called by the kernel thread, unuse the user's mm */
1062 	if (current->flags & PF_KTHREAD)
1063 		unuse_mm(req->user_proc->mm);
1064 	if (pinned != iovec->npages) {
1065 		SDMA_DBG(req, "Failed to pin pages (%u/%u)", pinned,
1066 			 iovec->npages);
1067 		ret = -EFAULT;
1068 		goto pfree;
1069 	}
1070 	goto done;
1071 pfree:
1072 	unpin_vector_pages(iovec);
1073 done:
1074 	return ret;
1075 }
1076 
unpin_vector_pages(struct user_sdma_iovec * iovec)1077 static void unpin_vector_pages(struct user_sdma_iovec *iovec)
1078 {
1079 	unsigned i;
1080 
1081 	if (ACCESS_ONCE(iovec->offset) != iovec->iov.iov_len) {
1082 		hfi1_cdbg(SDMA,
1083 			  "the complete vector has not been sent yet %llu %zu",
1084 			  iovec->offset, iovec->iov.iov_len);
1085 		return;
1086 	}
1087 	for (i = 0; i < iovec->npages; i++)
1088 		if (iovec->pages[i])
1089 			put_page(iovec->pages[i]);
1090 	kfree(iovec->pages);
1091 	iovec->pages = NULL;
1092 	iovec->npages = 0;
1093 	iovec->offset = 0;
1094 }
1095 
check_header_template(struct user_sdma_request * req,struct hfi1_pkt_header * hdr,u32 lrhlen,u32 datalen)1096 static int check_header_template(struct user_sdma_request *req,
1097 				 struct hfi1_pkt_header *hdr, u32 lrhlen,
1098 				 u32 datalen)
1099 {
1100 	/*
1101 	 * Perform safety checks for any type of packet:
1102 	 *    - transfer size is multiple of 64bytes
1103 	 *    - packet length is multiple of 4bytes
1104 	 *    - entire request length is multiple of 4bytes
1105 	 *    - packet length is not larger than MTU size
1106 	 *
1107 	 * These checks are only done for the first packet of the
1108 	 * transfer since the header is "given" to us by user space.
1109 	 * For the remainder of the packets we compute the values.
1110 	 */
1111 	if (req->info.fragsize % PIO_BLOCK_SIZE ||
1112 	    lrhlen & 0x3 || req->data_len & 0x3  ||
1113 	    lrhlen > get_lrh_len(*hdr, req->info.fragsize))
1114 		return -EINVAL;
1115 
1116 	if (req_opcode(req->info.ctrl) == EXPECTED) {
1117 		/*
1118 		 * The header is checked only on the first packet. Furthermore,
1119 		 * we ensure that at least one TID entry is copied when the
1120 		 * request is submitted. Therefore, we don't have to verify that
1121 		 * tididx points to something sane.
1122 		 */
1123 		u32 tidval = req->tids[req->tididx],
1124 			tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
1125 			tididx = EXP_TID_GET(tidval, IDX),
1126 			tidctrl = EXP_TID_GET(tidval, CTRL),
1127 			tidoff;
1128 		__le32 kval = hdr->kdeth.ver_tid_offset;
1129 
1130 		tidoff = KDETH_GET(kval, OFFSET) *
1131 			  (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
1132 			   KDETH_OM_LARGE : KDETH_OM_SMALL);
1133 		/*
1134 		 * Expected receive packets have the following
1135 		 * additional checks:
1136 		 *     - offset is not larger than the TID size
1137 		 *     - TIDCtrl values match between header and TID array
1138 		 *     - TID indexes match between header and TID array
1139 		 */
1140 		if ((tidoff + datalen > tidlen) ||
1141 		    KDETH_GET(kval, TIDCTRL) != tidctrl ||
1142 		    KDETH_GET(kval, TID) != tididx)
1143 			return -EINVAL;
1144 	}
1145 	return 0;
1146 }
1147 
1148 /*
1149  * Correctly set the BTH.PSN field based on type of
1150  * transfer - eager packets can just increment the PSN but
1151  * expected packets encode generation and sequence in the
1152  * BTH.PSN field so just incrementing will result in errors.
1153  */
set_pkt_bth_psn(__be32 bthpsn,u8 expct,u32 frags)1154 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
1155 {
1156 	u32 val = be32_to_cpu(bthpsn),
1157 		mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
1158 			0xffffffull),
1159 		psn = val & mask;
1160 	if (expct)
1161 		psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
1162 	else
1163 		psn = psn + frags;
1164 	return psn & mask;
1165 }
1166 
set_txreq_header(struct user_sdma_request * req,struct user_sdma_txreq * tx,u32 datalen)1167 static int set_txreq_header(struct user_sdma_request *req,
1168 			    struct user_sdma_txreq *tx, u32 datalen)
1169 {
1170 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
1171 	struct hfi1_pkt_header *hdr = &tx->hdr;
1172 	u16 pbclen;
1173 	int ret;
1174 	u32 tidval = 0, lrhlen = get_lrh_len(*hdr, datalen);
1175 
1176 	/* Copy the header template to the request before modification */
1177 	memcpy(hdr, &req->hdr, sizeof(*hdr));
1178 
1179 	/*
1180 	 * Check if the PBC and LRH length are mismatched. If so
1181 	 * adjust both in the header.
1182 	 */
1183 	pbclen = le16_to_cpu(hdr->pbc[0]);
1184 	if (PBC2LRH(pbclen) != lrhlen) {
1185 		pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
1186 		hdr->pbc[0] = cpu_to_le16(pbclen);
1187 		hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
1188 		/*
1189 		 * Third packet
1190 		 * This is the first packet in the sequence that has
1191 		 * a "static" size that can be used for the rest of
1192 		 * the packets (besides the last one).
1193 		 */
1194 		if (unlikely(req->seqnum == 2)) {
1195 			/*
1196 			 * From this point on the lengths in both the
1197 			 * PBC and LRH are the same until the last
1198 			 * packet.
1199 			 * Adjust the template so we don't have to update
1200 			 * every packet
1201 			 */
1202 			req->hdr.pbc[0] = hdr->pbc[0];
1203 			req->hdr.lrh[2] = hdr->lrh[2];
1204 		}
1205 	}
1206 	/*
1207 	 * We only have to modify the header if this is not the
1208 	 * first packet in the request. Otherwise, we use the
1209 	 * header given to us.
1210 	 */
1211 	if (unlikely(!req->seqnum)) {
1212 		ret = check_header_template(req, hdr, lrhlen, datalen);
1213 		if (ret)
1214 			return ret;
1215 		goto done;
1216 
1217 	}
1218 
1219 	hdr->bth[2] = cpu_to_be32(
1220 		set_pkt_bth_psn(hdr->bth[2],
1221 				(req_opcode(req->info.ctrl) == EXPECTED),
1222 				req->seqnum));
1223 
1224 	/* Set ACK request on last packet */
1225 	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1226 		hdr->bth[2] |= cpu_to_be32(1UL<<31);
1227 
1228 	/* Set the new offset */
1229 	hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1230 	/* Expected packets have to fill in the new TID information */
1231 	if (req_opcode(req->info.ctrl) == EXPECTED) {
1232 		tidval = req->tids[req->tididx];
1233 		/*
1234 		 * If the offset puts us at the end of the current TID,
1235 		 * advance everything.
1236 		 */
1237 		if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1238 					 PAGE_SIZE)) {
1239 			req->tidoffset = 0;
1240 			/* Since we don't copy all the TIDs, all at once,
1241 			 * we have to check again. */
1242 			if (++req->tididx > req->n_tids - 1 ||
1243 			    !req->tids[req->tididx]) {
1244 				return -EINVAL;
1245 			}
1246 			tidval = req->tids[req->tididx];
1247 		}
1248 		req->omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1249 			KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE : KDETH_OM_SMALL;
1250 		/* Set KDETH.TIDCtrl based on value for this TID. */
1251 		KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1252 			  EXP_TID_GET(tidval, CTRL));
1253 		/* Set KDETH.TID based on value for this TID */
1254 		KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1255 			  EXP_TID_GET(tidval, IDX));
1256 		/* Clear KDETH.SH only on the last packet */
1257 		if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1258 			KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1259 		/*
1260 		 * Set the KDETH.OFFSET and KDETH.OM based on size of
1261 		 * transfer.
1262 		 */
1263 		SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
1264 			 req->tidoffset, req->tidoffset / req->omfactor,
1265 			 !!(req->omfactor - KDETH_OM_SMALL));
1266 		KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1267 			  req->tidoffset / req->omfactor);
1268 		KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1269 			  !!(req->omfactor - KDETH_OM_SMALL));
1270 	}
1271 done:
1272 	trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1273 				    req->info.comp_idx, hdr, tidval);
1274 	return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1275 }
1276 
set_txreq_header_ahg(struct user_sdma_request * req,struct user_sdma_txreq * tx,u32 len)1277 static int set_txreq_header_ahg(struct user_sdma_request *req,
1278 				struct user_sdma_txreq *tx, u32 len)
1279 {
1280 	int diff = 0;
1281 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
1282 	struct hfi1_pkt_header *hdr = &req->hdr;
1283 	u16 pbclen = le16_to_cpu(hdr->pbc[0]);
1284 	u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, len);
1285 
1286 	if (PBC2LRH(pbclen) != lrhlen) {
1287 		/* PBC.PbcLengthDWs */
1288 		AHG_HEADER_SET(req->ahg, diff, 0, 0, 12,
1289 			       cpu_to_le16(LRH2PBC(lrhlen)));
1290 		/* LRH.PktLen (we need the full 16 bits due to byte swap) */
1291 		AHG_HEADER_SET(req->ahg, diff, 3, 0, 16,
1292 			       cpu_to_be16(lrhlen >> 2));
1293 	}
1294 
1295 	/*
1296 	 * Do the common updates
1297 	 */
1298 	/* BTH.PSN and BTH.A */
1299 	val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1300 		(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
1301 	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1302 		val32 |= 1UL << 31;
1303 	AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
1304 	AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
1305 	/* KDETH.Offset */
1306 	AHG_HEADER_SET(req->ahg, diff, 15, 0, 16,
1307 		       cpu_to_le16(req->koffset & 0xffff));
1308 	AHG_HEADER_SET(req->ahg, diff, 15, 16, 16,
1309 		       cpu_to_le16(req->koffset >> 16));
1310 	if (req_opcode(req->info.ctrl) == EXPECTED) {
1311 		__le16 val;
1312 
1313 		tidval = req->tids[req->tididx];
1314 
1315 		/*
1316 		 * If the offset puts us at the end of the current TID,
1317 		 * advance everything.
1318 		 */
1319 		if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1320 					 PAGE_SIZE)) {
1321 			req->tidoffset = 0;
1322 			/* Since we don't copy all the TIDs, all at once,
1323 			 * we have to check again. */
1324 			if (++req->tididx > req->n_tids - 1 ||
1325 			    !req->tids[req->tididx]) {
1326 				return -EINVAL;
1327 			}
1328 			tidval = req->tids[req->tididx];
1329 		}
1330 		req->omfactor = ((EXP_TID_GET(tidval, LEN) *
1331 				  PAGE_SIZE) >=
1332 				 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE :
1333 			KDETH_OM_SMALL;
1334 		/* KDETH.OM and KDETH.OFFSET (TID) */
1335 		AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
1336 			       ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 |
1337 				((req->tidoffset / req->omfactor) & 0x7fff)));
1338 		/* KDETH.TIDCtrl, KDETH.TID */
1339 		val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
1340 					(EXP_TID_GET(tidval, IDX) & 0x3ff));
1341 		/* Clear KDETH.SH on last packet */
1342 		if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
1343 			val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
1344 								INTR) >> 16);
1345 			val &= cpu_to_le16(~(1U << 13));
1346 			AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
1347 		} else
1348 			AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val);
1349 	}
1350 
1351 	trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1352 					req->info.comp_idx, req->sde->this_idx,
1353 					req->ahg_idx, req->ahg, diff, tidval);
1354 	return diff;
1355 }
1356 
user_sdma_txreq_cb(struct sdma_txreq * txreq,int status,int drain)1357 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status,
1358 			       int drain)
1359 {
1360 	struct user_sdma_txreq *tx =
1361 		container_of(txreq, struct user_sdma_txreq, txreq);
1362 	struct user_sdma_request *req = tx->req;
1363 	struct hfi1_user_sdma_pkt_q *pq = req ? req->pq : NULL;
1364 	u64 tx_seqnum;
1365 
1366 	if (unlikely(!req || !pq))
1367 		return;
1368 
1369 	/* If we have any io vectors associated with this txreq,
1370 	 * check whether they need to be 'freed'. */
1371 	if (tx->idx != -1) {
1372 		int i;
1373 
1374 		for (i = tx->idx; i >= 0; i--) {
1375 			if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT)
1376 				unpin_vector_pages(tx->iovecs[i].vec);
1377 		}
1378 	}
1379 
1380 	tx_seqnum = tx->seqnum;
1381 	kmem_cache_free(pq->txreq_cache, tx);
1382 
1383 	if (status != SDMA_TXREQ_S_OK) {
1384 		dd_dev_err(pq->dd, "SDMA completion with error %d", status);
1385 		set_comp_state(req, ERROR, status);
1386 		set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
1387 		/* Do not free the request until the sender loop has ack'ed
1388 		 * the error and we've seen all txreqs. */
1389 		if (tx_seqnum == ACCESS_ONCE(req->seqnum) &&
1390 		    test_bit(SDMA_REQ_DONE_ERROR, &req->flags)) {
1391 			atomic_dec(&pq->n_reqs);
1392 			user_sdma_free_request(req);
1393 		}
1394 	} else {
1395 		if (tx_seqnum == req->info.npkts - 1) {
1396 			/* We've sent and completed all packets in this
1397 			 * request. Signal completion to the user */
1398 			atomic_dec(&pq->n_reqs);
1399 			set_comp_state(req, COMPLETE, 0);
1400 			user_sdma_free_request(req);
1401 		}
1402 	}
1403 	if (!atomic_read(&pq->n_reqs))
1404 		xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
1405 }
1406 
user_sdma_free_request(struct user_sdma_request * req)1407 static void user_sdma_free_request(struct user_sdma_request *req)
1408 {
1409 	if (!list_empty(&req->txps)) {
1410 		struct sdma_txreq *t, *p;
1411 
1412 		list_for_each_entry_safe(t, p, &req->txps, list) {
1413 			struct user_sdma_txreq *tx =
1414 				container_of(t, struct user_sdma_txreq, txreq);
1415 			list_del_init(&t->list);
1416 			sdma_txclean(req->pq->dd, t);
1417 			kmem_cache_free(req->pq->txreq_cache, tx);
1418 		}
1419 	}
1420 	if (req->data_iovs) {
1421 		int i;
1422 
1423 		for (i = 0; i < req->data_iovs; i++)
1424 			if (req->iovs[i].npages && req->iovs[i].pages)
1425 				unpin_vector_pages(&req->iovs[i]);
1426 	}
1427 	if (req->user_proc)
1428 		put_task_struct(req->user_proc);
1429 	kfree(req->tids);
1430 	clear_bit(SDMA_REQ_IN_USE, &req->flags);
1431 }
1432 
set_comp_state(struct user_sdma_request * req,enum hfi1_sdma_comp_state state,int ret)1433 static inline void set_comp_state(struct user_sdma_request *req,
1434 					enum hfi1_sdma_comp_state state,
1435 					int ret)
1436 {
1437 	SDMA_DBG(req, "Setting completion status %u %d", state, ret);
1438 	req->cq->comps[req->info.comp_idx].status = state;
1439 	if (state == ERROR)
1440 		req->cq->comps[req->info.comp_idx].errcode = -ret;
1441 	trace_hfi1_sdma_user_completion(req->pq->dd, req->pq->ctxt,
1442 					req->pq->subctxt, req->info.comp_idx,
1443 					state, ret);
1444 }
1445