• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2015 - 2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #include <linux/mm.h>
48 #include <linux/types.h>
49 #include <linux/device.h>
50 #include <linux/dmapool.h>
51 #include <linux/slab.h>
52 #include <linux/list.h>
53 #include <linux/highmem.h>
54 #include <linux/io.h>
55 #include <linux/uio.h>
56 #include <linux/rbtree.h>
57 #include <linux/spinlock.h>
58 #include <linux/delay.h>
59 #include <linux/kthread.h>
60 #include <linux/mmu_context.h>
61 #include <linux/module.h>
62 #include <linux/vmalloc.h>
63 #include <linux/string.h>
64 
65 #include "hfi.h"
66 #include "sdma.h"
67 #include "mmu_rb.h"
68 #include "user_sdma.h"
69 #include "verbs.h"  /* for the headers */
70 #include "common.h" /* for struct hfi1_tid_info */
71 #include "trace.h"
72 
73 static uint hfi1_sdma_comp_ring_size = 128;
74 module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
75 MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
76 
77 static unsigned initial_pkt_count = 8;
78 
79 static int user_sdma_send_pkts(struct user_sdma_request *req,
80 			       unsigned maxpkts);
81 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
82 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
83 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
84 static int pin_vector_pages(struct user_sdma_request *req,
85 			    struct user_sdma_iovec *iovec);
86 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
87 			       unsigned start, unsigned npages);
88 static int check_header_template(struct user_sdma_request *req,
89 				 struct hfi1_pkt_header *hdr, u32 lrhlen,
90 				 u32 datalen);
91 static int set_txreq_header(struct user_sdma_request *req,
92 			    struct user_sdma_txreq *tx, u32 datalen);
93 static int set_txreq_header_ahg(struct user_sdma_request *req,
94 				struct user_sdma_txreq *tx, u32 len);
95 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
96 				  struct hfi1_user_sdma_comp_q *cq,
97 				  u16 idx, enum hfi1_sdma_comp_state state,
98 				  int ret);
99 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags);
100 static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
101 
102 static int defer_packet_queue(
103 	struct sdma_engine *sde,
104 	struct iowait *wait,
105 	struct sdma_txreq *txreq,
106 	uint seq,
107 	bool pkts_sent);
108 static void activate_packet_queue(struct iowait *wait, int reason);
109 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
110 			   unsigned long len);
111 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
112 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
113 			 void *arg2, bool *stop);
114 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
115 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
116 
117 static struct mmu_rb_ops sdma_rb_ops = {
118 	.filter = sdma_rb_filter,
119 	.insert = sdma_rb_insert,
120 	.evict = sdma_rb_evict,
121 	.remove = sdma_rb_remove,
122 	.invalidate = sdma_rb_invalidate
123 };
124 
defer_packet_queue(struct sdma_engine * sde,struct iowait * wait,struct sdma_txreq * txreq,uint seq,bool pkts_sent)125 static int defer_packet_queue(
126 	struct sdma_engine *sde,
127 	struct iowait *wait,
128 	struct sdma_txreq *txreq,
129 	uint seq,
130 	bool pkts_sent)
131 {
132 	struct hfi1_user_sdma_pkt_q *pq =
133 		container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
134 	struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
135 
136 	write_seqlock(&dev->iowait_lock);
137 	if (sdma_progress(sde, seq, txreq))
138 		goto eagain;
139 	/*
140 	 * We are assuming that if the list is enqueued somewhere, it
141 	 * is to the dmawait list since that is the only place where
142 	 * it is supposed to be enqueued.
143 	 */
144 	xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
145 	if (list_empty(&pq->busy.list))
146 		iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
147 	write_sequnlock(&dev->iowait_lock);
148 	return -EBUSY;
149 eagain:
150 	write_sequnlock(&dev->iowait_lock);
151 	return -EAGAIN;
152 }
153 
activate_packet_queue(struct iowait * wait,int reason)154 static void activate_packet_queue(struct iowait *wait, int reason)
155 {
156 	struct hfi1_user_sdma_pkt_q *pq =
157 		container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
158 	xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
159 	wake_up(&wait->wait_dma);
160 };
161 
hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata * uctxt,struct hfi1_filedata * fd)162 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
163 				struct hfi1_filedata *fd)
164 {
165 	int ret = -ENOMEM;
166 	char buf[64];
167 	struct hfi1_devdata *dd;
168 	struct hfi1_user_sdma_comp_q *cq;
169 	struct hfi1_user_sdma_pkt_q *pq;
170 
171 	if (!uctxt || !fd)
172 		return -EBADF;
173 
174 	if (!hfi1_sdma_comp_ring_size)
175 		return -EINVAL;
176 
177 	dd = uctxt->dd;
178 
179 	pq = kzalloc(sizeof(*pq), GFP_KERNEL);
180 	if (!pq)
181 		return -ENOMEM;
182 	pq->dd = dd;
183 	pq->ctxt = uctxt->ctxt;
184 	pq->subctxt = fd->subctxt;
185 	pq->n_max_reqs = hfi1_sdma_comp_ring_size;
186 	atomic_set(&pq->n_reqs, 0);
187 	init_waitqueue_head(&pq->wait);
188 	atomic_set(&pq->n_locked, 0);
189 	pq->mm = fd->mm;
190 
191 	iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
192 		    activate_packet_queue, NULL);
193 	pq->reqidx = 0;
194 
195 	pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
196 			   sizeof(*pq->reqs),
197 			   GFP_KERNEL);
198 	if (!pq->reqs)
199 		goto pq_reqs_nomem;
200 
201 	pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
202 				 sizeof(*pq->req_in_use),
203 				 GFP_KERNEL);
204 	if (!pq->req_in_use)
205 		goto pq_reqs_no_in_use;
206 
207 	snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
208 		 fd->subctxt);
209 	pq->txreq_cache = kmem_cache_create(buf,
210 					    sizeof(struct user_sdma_txreq),
211 					    L1_CACHE_BYTES,
212 					    SLAB_HWCACHE_ALIGN,
213 					    NULL);
214 	if (!pq->txreq_cache) {
215 		dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
216 			   uctxt->ctxt);
217 		goto pq_txreq_nomem;
218 	}
219 
220 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
221 	if (!cq)
222 		goto cq_nomem;
223 
224 	cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps)
225 				 * hfi1_sdma_comp_ring_size));
226 	if (!cq->comps)
227 		goto cq_comps_nomem;
228 
229 	cq->nentries = hfi1_sdma_comp_ring_size;
230 
231 	ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
232 				   &pq->handler);
233 	if (ret) {
234 		dd_dev_err(dd, "Failed to register with MMU %d", ret);
235 		goto pq_mmu_fail;
236 	}
237 
238 	rcu_assign_pointer(fd->pq, pq);
239 	fd->cq = cq;
240 
241 	return 0;
242 
243 pq_mmu_fail:
244 	vfree(cq->comps);
245 cq_comps_nomem:
246 	kfree(cq);
247 cq_nomem:
248 	kmem_cache_destroy(pq->txreq_cache);
249 pq_txreq_nomem:
250 	kfree(pq->req_in_use);
251 pq_reqs_no_in_use:
252 	kfree(pq->reqs);
253 pq_reqs_nomem:
254 	kfree(pq);
255 
256 	return ret;
257 }
258 
hfi1_user_sdma_free_queues(struct hfi1_filedata * fd,struct hfi1_ctxtdata * uctxt)259 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
260 			       struct hfi1_ctxtdata *uctxt)
261 {
262 	struct hfi1_user_sdma_pkt_q *pq;
263 
264 	trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
265 
266 	spin_lock(&fd->pq_rcu_lock);
267 	pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
268 				    lockdep_is_held(&fd->pq_rcu_lock));
269 	if (pq) {
270 		rcu_assign_pointer(fd->pq, NULL);
271 		spin_unlock(&fd->pq_rcu_lock);
272 		synchronize_srcu(&fd->pq_srcu);
273 		/* at this point there can be no more new requests */
274 		if (pq->handler)
275 			hfi1_mmu_rb_unregister(pq->handler);
276 		iowait_sdma_drain(&pq->busy);
277 		/* Wait until all requests have been freed. */
278 		wait_event_interruptible(
279 			pq->wait,
280 			!atomic_read(&pq->n_reqs));
281 		kfree(pq->reqs);
282 		kfree(pq->req_in_use);
283 		kmem_cache_destroy(pq->txreq_cache);
284 		kfree(pq);
285 	} else {
286 		spin_unlock(&fd->pq_rcu_lock);
287 	}
288 	if (fd->cq) {
289 		vfree(fd->cq->comps);
290 		kfree(fd->cq);
291 		fd->cq = NULL;
292 	}
293 	return 0;
294 }
295 
dlid_to_selector(u16 dlid)296 static u8 dlid_to_selector(u16 dlid)
297 {
298 	static u8 mapping[256];
299 	static int initialized;
300 	static u8 next;
301 	int hash;
302 
303 	if (!initialized) {
304 		memset(mapping, 0xFF, 256);
305 		initialized = 1;
306 	}
307 
308 	hash = ((dlid >> 8) ^ dlid) & 0xFF;
309 	if (mapping[hash] == 0xFF) {
310 		mapping[hash] = next;
311 		next = (next + 1) & 0x7F;
312 	}
313 
314 	return mapping[hash];
315 }
316 
317 /**
318  * hfi1_user_sdma_process_request() - Process and start a user sdma request
319  * @fd: valid file descriptor
320  * @iovec: array of io vectors to process
321  * @dim: overall iovec array size
322  * @count: number of io vector array entries processed
323  */
hfi1_user_sdma_process_request(struct hfi1_filedata * fd,struct iovec * iovec,unsigned long dim,unsigned long * count)324 int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
325 				   struct iovec *iovec, unsigned long dim,
326 				   unsigned long *count)
327 {
328 	int ret = 0, i;
329 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
330 	struct hfi1_user_sdma_pkt_q *pq =
331 		srcu_dereference(fd->pq, &fd->pq_srcu);
332 	struct hfi1_user_sdma_comp_q *cq = fd->cq;
333 	struct hfi1_devdata *dd = pq->dd;
334 	unsigned long idx = 0;
335 	u8 pcount = initial_pkt_count;
336 	struct sdma_req_info info;
337 	struct user_sdma_request *req;
338 	u8 opcode, sc, vl;
339 	u16 pkey;
340 	u32 slid;
341 	u16 dlid;
342 	u32 selector;
343 
344 	if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
345 		hfi1_cdbg(
346 		   SDMA,
347 		   "[%u:%u:%u] First vector not big enough for header %lu/%lu",
348 		   dd->unit, uctxt->ctxt, fd->subctxt,
349 		   iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
350 		return -EINVAL;
351 	}
352 	ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
353 	if (ret) {
354 		hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
355 			  dd->unit, uctxt->ctxt, fd->subctxt, ret);
356 		return -EFAULT;
357 	}
358 
359 	trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
360 				     (u16 *)&info);
361 	if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
362 		hfi1_cdbg(SDMA,
363 			  "[%u:%u:%u:%u] Invalid comp index",
364 			  dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
365 		return -EINVAL;
366 	}
367 
368 	/*
369 	 * Sanity check the header io vector count.  Need at least 1 vector
370 	 * (header) and cannot be larger than the actual io vector count.
371 	 */
372 	if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
373 		hfi1_cdbg(SDMA,
374 			  "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
375 			  dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
376 			  req_iovcnt(info.ctrl), dim);
377 		return -EINVAL;
378 	}
379 
380 	if (!info.fragsize) {
381 		hfi1_cdbg(SDMA,
382 			  "[%u:%u:%u:%u] Request does not specify fragsize",
383 			  dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
384 		return -EINVAL;
385 	}
386 
387 	/* Try to claim the request. */
388 	if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
389 		hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
390 			  dd->unit, uctxt->ctxt, fd->subctxt,
391 			  info.comp_idx);
392 		return -EBADSLT;
393 	}
394 	/*
395 	 * All safety checks have been done and this request has been claimed.
396 	 */
397 	trace_hfi1_sdma_user_process_request(dd, uctxt->ctxt, fd->subctxt,
398 					     info.comp_idx);
399 	req = pq->reqs + info.comp_idx;
400 	req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
401 	req->data_len  = 0;
402 	req->pq = pq;
403 	req->cq = cq;
404 	req->ahg_idx = -1;
405 	req->iov_idx = 0;
406 	req->sent = 0;
407 	req->seqnum = 0;
408 	req->seqcomp = 0;
409 	req->seqsubmitted = 0;
410 	req->tids = NULL;
411 	req->has_error = 0;
412 	INIT_LIST_HEAD(&req->txps);
413 
414 	memcpy(&req->info, &info, sizeof(info));
415 
416 	/* The request is initialized, count it */
417 	atomic_inc(&pq->n_reqs);
418 
419 	if (req_opcode(info.ctrl) == EXPECTED) {
420 		/* expected must have a TID info and at least one data vector */
421 		if (req->data_iovs < 2) {
422 			SDMA_DBG(req,
423 				 "Not enough vectors for expected request");
424 			ret = -EINVAL;
425 			goto free_req;
426 		}
427 		req->data_iovs--;
428 	}
429 
430 	if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
431 		SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
432 			 MAX_VECTORS_PER_REQ);
433 		ret = -EINVAL;
434 		goto free_req;
435 	}
436 	/* Copy the header from the user buffer */
437 	ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
438 			     sizeof(req->hdr));
439 	if (ret) {
440 		SDMA_DBG(req, "Failed to copy header template (%d)", ret);
441 		ret = -EFAULT;
442 		goto free_req;
443 	}
444 
445 	/* If Static rate control is not enabled, sanitize the header. */
446 	if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
447 		req->hdr.pbc[2] = 0;
448 
449 	/* Validate the opcode. Do not trust packets from user space blindly. */
450 	opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
451 	if ((opcode & USER_OPCODE_CHECK_MASK) !=
452 	     USER_OPCODE_CHECK_VAL) {
453 		SDMA_DBG(req, "Invalid opcode (%d)", opcode);
454 		ret = -EINVAL;
455 		goto free_req;
456 	}
457 	/*
458 	 * Validate the vl. Do not trust packets from user space blindly.
459 	 * VL comes from PBC, SC comes from LRH, and the VL needs to
460 	 * match the SC look up.
461 	 */
462 	vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
463 	sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
464 	      (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
465 	if (vl >= dd->pport->vls_operational ||
466 	    vl != sc_to_vlt(dd, sc)) {
467 		SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
468 		ret = -EINVAL;
469 		goto free_req;
470 	}
471 
472 	/* Checking P_KEY for requests from user-space */
473 	pkey = (u16)be32_to_cpu(req->hdr.bth[0]);
474 	slid = be16_to_cpu(req->hdr.lrh[3]);
475 	if (egress_pkey_check(dd->pport, slid, pkey, sc, PKEY_CHECK_INVALID)) {
476 		ret = -EINVAL;
477 		goto free_req;
478 	}
479 
480 	/*
481 	 * Also should check the BTH.lnh. If it says the next header is GRH then
482 	 * the RXE parsing will be off and will land in the middle of the KDETH
483 	 * or miss it entirely.
484 	 */
485 	if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
486 		SDMA_DBG(req, "User tried to pass in a GRH");
487 		ret = -EINVAL;
488 		goto free_req;
489 	}
490 
491 	req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
492 	/*
493 	 * Calculate the initial TID offset based on the values of
494 	 * KDETH.OFFSET and KDETH.OM that are passed in.
495 	 */
496 	req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
497 		(KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
498 		 KDETH_OM_LARGE : KDETH_OM_SMALL);
499 	trace_hfi1_sdma_user_initial_tidoffset(dd, uctxt->ctxt, fd->subctxt,
500 					       info.comp_idx, req->tidoffset);
501 	idx++;
502 
503 	/* Save all the IO vector structures */
504 	for (i = 0; i < req->data_iovs; i++) {
505 		req->iovs[i].offset = 0;
506 		INIT_LIST_HEAD(&req->iovs[i].list);
507 		memcpy(&req->iovs[i].iov,
508 		       iovec + idx++,
509 		       sizeof(req->iovs[i].iov));
510 		ret = pin_vector_pages(req, &req->iovs[i]);
511 		if (ret) {
512 			req->data_iovs = i;
513 			goto free_req;
514 		}
515 		req->data_len += req->iovs[i].iov.iov_len;
516 	}
517 	trace_hfi1_sdma_user_data_length(dd, uctxt->ctxt, fd->subctxt,
518 					 info.comp_idx, req->data_len);
519 	if (pcount > req->info.npkts)
520 		pcount = req->info.npkts;
521 	/*
522 	 * Copy any TID info
523 	 * User space will provide the TID info only when the
524 	 * request type is EXPECTED. This is true even if there is
525 	 * only one packet in the request and the header is already
526 	 * setup. The reason for the singular TID case is that the
527 	 * driver needs to perform safety checks.
528 	 */
529 	if (req_opcode(req->info.ctrl) == EXPECTED) {
530 		u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
531 		u32 *tmp;
532 
533 		if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
534 			ret = -EINVAL;
535 			goto free_req;
536 		}
537 
538 		/*
539 		 * We have to copy all of the tids because they may vary
540 		 * in size and, therefore, the TID count might not be
541 		 * equal to the pkt count. However, there is no way to
542 		 * tell at this point.
543 		 */
544 		tmp = memdup_user(iovec[idx].iov_base,
545 				  ntids * sizeof(*req->tids));
546 		if (IS_ERR(tmp)) {
547 			ret = PTR_ERR(tmp);
548 			SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
549 				 ntids, ret);
550 			goto free_req;
551 		}
552 		req->tids = tmp;
553 		req->n_tids = ntids;
554 		req->tididx = 0;
555 		idx++;
556 	}
557 
558 	dlid = be16_to_cpu(req->hdr.lrh[1]);
559 	selector = dlid_to_selector(dlid);
560 	selector += uctxt->ctxt + fd->subctxt;
561 	req->sde = sdma_select_user_engine(dd, selector, vl);
562 
563 	if (!req->sde || !sdma_running(req->sde)) {
564 		ret = -ECOMM;
565 		goto free_req;
566 	}
567 
568 	/* We don't need an AHG entry if the request contains only one packet */
569 	if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
570 		req->ahg_idx = sdma_ahg_alloc(req->sde);
571 
572 	set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
573 	pq->state = SDMA_PKT_Q_ACTIVE;
574 	/* Send the first N packets in the request to buy us some time */
575 	ret = user_sdma_send_pkts(req, pcount);
576 	if (unlikely(ret < 0 && ret != -EBUSY))
577 		goto free_req;
578 
579 	/*
580 	 * This is a somewhat blocking send implementation.
581 	 * The driver will block the caller until all packets of the
582 	 * request have been submitted to the SDMA engine. However, it
583 	 * will not wait for send completions.
584 	 */
585 	while (req->seqsubmitted != req->info.npkts) {
586 		ret = user_sdma_send_pkts(req, pcount);
587 		if (ret < 0) {
588 			if (ret != -EBUSY)
589 				goto free_req;
590 			wait_event_interruptible_timeout(
591 				pq->busy.wait_dma,
592 				(pq->state == SDMA_PKT_Q_ACTIVE),
593 				msecs_to_jiffies(
594 					SDMA_IOWAIT_TIMEOUT));
595 		}
596 	}
597 	*count += idx;
598 	return 0;
599 free_req:
600 	/*
601 	 * If the submitted seqsubmitted == npkts, the completion routine
602 	 * controls the final state.  If sequbmitted < npkts, wait for any
603 	 * outstanding packets to finish before cleaning up.
604 	 */
605 	if (req->seqsubmitted < req->info.npkts) {
606 		if (req->seqsubmitted)
607 			wait_event(pq->busy.wait_dma,
608 				   (req->seqcomp == req->seqsubmitted - 1));
609 		user_sdma_free_request(req, true);
610 		pq_update(pq);
611 		set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
612 	}
613 	return ret;
614 }
615 
compute_data_length(struct user_sdma_request * req,struct user_sdma_txreq * tx)616 static inline u32 compute_data_length(struct user_sdma_request *req,
617 				      struct user_sdma_txreq *tx)
618 {
619 	/*
620 	 * Determine the proper size of the packet data.
621 	 * The size of the data of the first packet is in the header
622 	 * template. However, it includes the header and ICRC, which need
623 	 * to be subtracted.
624 	 * The minimum representable packet data length in a header is 4 bytes,
625 	 * therefore, when the data length request is less than 4 bytes, there's
626 	 * only one packet, and the packet data length is equal to that of the
627 	 * request data length.
628 	 * The size of the remaining packets is the minimum of the frag
629 	 * size (MTU) or remaining data in the request.
630 	 */
631 	u32 len;
632 
633 	if (!req->seqnum) {
634 		if (req->data_len < sizeof(u32))
635 			len = req->data_len;
636 		else
637 			len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
638 			       (sizeof(tx->hdr) - 4));
639 	} else if (req_opcode(req->info.ctrl) == EXPECTED) {
640 		u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
641 			PAGE_SIZE;
642 		/*
643 		 * Get the data length based on the remaining space in the
644 		 * TID pair.
645 		 */
646 		len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
647 		/* If we've filled up the TID pair, move to the next one. */
648 		if (unlikely(!len) && ++req->tididx < req->n_tids &&
649 		    req->tids[req->tididx]) {
650 			tidlen = EXP_TID_GET(req->tids[req->tididx],
651 					     LEN) * PAGE_SIZE;
652 			req->tidoffset = 0;
653 			len = min_t(u32, tidlen, req->info.fragsize);
654 		}
655 		/*
656 		 * Since the TID pairs map entire pages, make sure that we
657 		 * are not going to try to send more data that we have
658 		 * remaining.
659 		 */
660 		len = min(len, req->data_len - req->sent);
661 	} else {
662 		len = min(req->data_len - req->sent, (u32)req->info.fragsize);
663 	}
664 	trace_hfi1_sdma_user_compute_length(req->pq->dd,
665 					    req->pq->ctxt,
666 					    req->pq->subctxt,
667 					    req->info.comp_idx,
668 					    len);
669 	return len;
670 }
671 
pad_len(u32 len)672 static inline u32 pad_len(u32 len)
673 {
674 	if (len & (sizeof(u32) - 1))
675 		len += sizeof(u32) - (len & (sizeof(u32) - 1));
676 	return len;
677 }
678 
get_lrh_len(struct hfi1_pkt_header hdr,u32 len)679 static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
680 {
681 	/* (Size of complete header - size of PBC) + 4B ICRC + data length */
682 	return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
683 }
684 
user_sdma_txadd_ahg(struct user_sdma_request * req,struct user_sdma_txreq * tx,u32 datalen)685 static int user_sdma_txadd_ahg(struct user_sdma_request *req,
686 			       struct user_sdma_txreq *tx,
687 			       u32 datalen)
688 {
689 	int ret;
690 	u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
691 	u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen));
692 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
693 
694 	/*
695 	 * Copy the request header into the tx header
696 	 * because the HW needs a cacheline-aligned
697 	 * address.
698 	 * This copy can be optimized out if the hdr
699 	 * member of user_sdma_request were also
700 	 * cacheline aligned.
701 	 */
702 	memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
703 	if (PBC2LRH(pbclen) != lrhlen) {
704 		pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
705 		tx->hdr.pbc[0] = cpu_to_le16(pbclen);
706 	}
707 	ret = check_header_template(req, &tx->hdr, lrhlen, datalen);
708 	if (ret)
709 		return ret;
710 	ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY,
711 			      sizeof(tx->hdr) + datalen, req->ahg_idx,
712 			      0, NULL, 0, user_sdma_txreq_cb);
713 	if (ret)
714 		return ret;
715 	ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr));
716 	if (ret)
717 		sdma_txclean(pq->dd, &tx->txreq);
718 	return ret;
719 }
720 
user_sdma_txadd(struct user_sdma_request * req,struct user_sdma_txreq * tx,struct user_sdma_iovec * iovec,u32 datalen,u32 * queued_ptr,u32 * data_sent_ptr,u64 * iov_offset_ptr)721 static int user_sdma_txadd(struct user_sdma_request *req,
722 			   struct user_sdma_txreq *tx,
723 			   struct user_sdma_iovec *iovec, u32 datalen,
724 			   u32 *queued_ptr, u32 *data_sent_ptr,
725 			   u64 *iov_offset_ptr)
726 {
727 	int ret;
728 	unsigned int pageidx, len;
729 	unsigned long base, offset;
730 	u64 iov_offset = *iov_offset_ptr;
731 	u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
732 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
733 
734 	base = (unsigned long)iovec->iov.iov_base;
735 	offset = offset_in_page(base + iovec->offset + iov_offset);
736 	pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >>
737 		   PAGE_SHIFT);
738 	len = offset + req->info.fragsize > PAGE_SIZE ?
739 		PAGE_SIZE - offset : req->info.fragsize;
740 	len = min((datalen - queued), len);
741 	ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
742 			      offset, len);
743 	if (ret) {
744 		SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
745 		return ret;
746 	}
747 	iov_offset += len;
748 	queued += len;
749 	data_sent += len;
750 	if (unlikely(queued < datalen && pageidx == iovec->npages &&
751 		     req->iov_idx < req->data_iovs - 1)) {
752 		iovec->offset += iov_offset;
753 		iovec = &req->iovs[++req->iov_idx];
754 		iov_offset = 0;
755 	}
756 
757 	*queued_ptr = queued;
758 	*data_sent_ptr = data_sent;
759 	*iov_offset_ptr = iov_offset;
760 	return ret;
761 }
762 
user_sdma_send_pkts(struct user_sdma_request * req,unsigned maxpkts)763 static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
764 {
765 	int ret = 0, count;
766 	unsigned npkts = 0;
767 	struct user_sdma_txreq *tx = NULL;
768 	struct hfi1_user_sdma_pkt_q *pq = NULL;
769 	struct user_sdma_iovec *iovec = NULL;
770 
771 	if (!req->pq)
772 		return -EINVAL;
773 
774 	pq = req->pq;
775 
776 	/* If tx completion has reported an error, we are done. */
777 	if (READ_ONCE(req->has_error))
778 		return -EFAULT;
779 
780 	/*
781 	 * Check if we might have sent the entire request already
782 	 */
783 	if (unlikely(req->seqnum == req->info.npkts)) {
784 		if (!list_empty(&req->txps))
785 			goto dosend;
786 		return ret;
787 	}
788 
789 	if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
790 		maxpkts = req->info.npkts - req->seqnum;
791 
792 	while (npkts < maxpkts) {
793 		u32 datalen = 0, queued = 0, data_sent = 0;
794 		u64 iov_offset = 0;
795 
796 		/*
797 		 * Check whether any of the completions have come back
798 		 * with errors. If so, we are not going to process any
799 		 * more packets from this request.
800 		 */
801 		if (READ_ONCE(req->has_error))
802 			return -EFAULT;
803 
804 		tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
805 		if (!tx)
806 			return -ENOMEM;
807 
808 		tx->flags = 0;
809 		tx->req = req;
810 		INIT_LIST_HEAD(&tx->list);
811 
812 		/*
813 		 * For the last packet set the ACK request
814 		 * and disable header suppression.
815 		 */
816 		if (req->seqnum == req->info.npkts - 1)
817 			tx->flags |= (TXREQ_FLAGS_REQ_ACK |
818 				      TXREQ_FLAGS_REQ_DISABLE_SH);
819 
820 		/*
821 		 * Calculate the payload size - this is min of the fragment
822 		 * (MTU) size or the remaining bytes in the request but only
823 		 * if we have payload data.
824 		 */
825 		if (req->data_len) {
826 			iovec = &req->iovs[req->iov_idx];
827 			if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
828 				if (++req->iov_idx == req->data_iovs) {
829 					ret = -EFAULT;
830 					goto free_tx;
831 				}
832 				iovec = &req->iovs[req->iov_idx];
833 				WARN_ON(iovec->offset);
834 			}
835 
836 			datalen = compute_data_length(req, tx);
837 
838 			/*
839 			 * Disable header suppression for the payload <= 8DWS.
840 			 * If there is an uncorrectable error in the receive
841 			 * data FIFO when the received payload size is less than
842 			 * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
843 			 * not reported.There is set RHF.EccErr if the header
844 			 * is not suppressed.
845 			 */
846 			if (!datalen) {
847 				SDMA_DBG(req,
848 					 "Request has data but pkt len is 0");
849 				ret = -EFAULT;
850 				goto free_tx;
851 			} else if (datalen <= 32) {
852 				tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
853 			}
854 		}
855 
856 		if (req->ahg_idx >= 0) {
857 			if (!req->seqnum) {
858 				ret = user_sdma_txadd_ahg(req, tx, datalen);
859 				if (ret)
860 					goto free_tx;
861 			} else {
862 				int changes;
863 
864 				changes = set_txreq_header_ahg(req, tx,
865 							       datalen);
866 				if (changes < 0) {
867 					ret = changes;
868 					goto free_tx;
869 				}
870 			}
871 		} else {
872 			ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
873 					  datalen, user_sdma_txreq_cb);
874 			if (ret)
875 				goto free_tx;
876 			/*
877 			 * Modify the header for this packet. This only needs
878 			 * to be done if we are not going to use AHG. Otherwise,
879 			 * the HW will do it based on the changes we gave it
880 			 * during sdma_txinit_ahg().
881 			 */
882 			ret = set_txreq_header(req, tx, datalen);
883 			if (ret)
884 				goto free_txreq;
885 		}
886 
887 		/*
888 		 * If the request contains any data vectors, add up to
889 		 * fragsize bytes to the descriptor.
890 		 */
891 		while (queued < datalen &&
892 		       (req->sent + data_sent) < req->data_len) {
893 			ret = user_sdma_txadd(req, tx, iovec, datalen,
894 					      &queued, &data_sent, &iov_offset);
895 			if (ret)
896 				goto free_txreq;
897 		}
898 		/*
899 		 * The txreq was submitted successfully so we can update
900 		 * the counters.
901 		 */
902 		req->koffset += datalen;
903 		if (req_opcode(req->info.ctrl) == EXPECTED)
904 			req->tidoffset += datalen;
905 		req->sent += data_sent;
906 		if (req->data_len)
907 			iovec->offset += iov_offset;
908 		list_add_tail(&tx->txreq.list, &req->txps);
909 		/*
910 		 * It is important to increment this here as it is used to
911 		 * generate the BTH.PSN and, therefore, can't be bulk-updated
912 		 * outside of the loop.
913 		 */
914 		tx->seqnum = req->seqnum++;
915 		npkts++;
916 	}
917 dosend:
918 	ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
919 	req->seqsubmitted += count;
920 	if (req->seqsubmitted == req->info.npkts) {
921 		/*
922 		 * The txreq has already been submitted to the HW queue
923 		 * so we can free the AHG entry now. Corruption will not
924 		 * happen due to the sequential manner in which
925 		 * descriptors are processed.
926 		 */
927 		if (req->ahg_idx >= 0)
928 			sdma_ahg_free(req->sde, req->ahg_idx);
929 	}
930 	return ret;
931 
932 free_txreq:
933 	sdma_txclean(pq->dd, &tx->txreq);
934 free_tx:
935 	kmem_cache_free(pq->txreq_cache, tx);
936 	return ret;
937 }
938 
sdma_cache_evict(struct hfi1_user_sdma_pkt_q * pq,u32 npages)939 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
940 {
941 	struct evict_data evict_data;
942 
943 	evict_data.cleared = 0;
944 	evict_data.target = npages;
945 	hfi1_mmu_rb_evict(pq->handler, &evict_data);
946 	return evict_data.cleared;
947 }
948 
pin_sdma_pages(struct user_sdma_request * req,struct user_sdma_iovec * iovec,struct sdma_mmu_node * node,int npages)949 static int pin_sdma_pages(struct user_sdma_request *req,
950 			  struct user_sdma_iovec *iovec,
951 			  struct sdma_mmu_node *node,
952 			  int npages)
953 {
954 	int pinned, cleared;
955 	struct page **pages;
956 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
957 
958 	pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
959 	if (!pages) {
960 		SDMA_DBG(req, "Failed page array alloc");
961 		return -ENOMEM;
962 	}
963 	memcpy(pages, node->pages, node->npages * sizeof(*pages));
964 
965 	npages -= node->npages;
966 retry:
967 	if (!hfi1_can_pin_pages(pq->dd, pq->mm,
968 				atomic_read(&pq->n_locked), npages)) {
969 		cleared = sdma_cache_evict(pq, npages);
970 		if (cleared >= npages)
971 			goto retry;
972 	}
973 	pinned = hfi1_acquire_user_pages(pq->mm,
974 					 ((unsigned long)iovec->iov.iov_base +
975 					 (node->npages * PAGE_SIZE)), npages, 0,
976 					 pages + node->npages);
977 	if (pinned < 0) {
978 		kfree(pages);
979 		return pinned;
980 	}
981 	if (pinned != npages) {
982 		unpin_vector_pages(pq->mm, pages, node->npages, pinned);
983 		return -EFAULT;
984 	}
985 	kfree(node->pages);
986 	node->rb.len = iovec->iov.iov_len;
987 	node->pages = pages;
988 	atomic_add(pinned, &pq->n_locked);
989 	return pinned;
990 }
991 
unpin_sdma_pages(struct sdma_mmu_node * node)992 static void unpin_sdma_pages(struct sdma_mmu_node *node)
993 {
994 	if (node->npages) {
995 		unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
996 		atomic_sub(node->npages, &node->pq->n_locked);
997 	}
998 }
999 
pin_vector_pages(struct user_sdma_request * req,struct user_sdma_iovec * iovec)1000 static int pin_vector_pages(struct user_sdma_request *req,
1001 			    struct user_sdma_iovec *iovec)
1002 {
1003 	int ret = 0, pinned, npages;
1004 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
1005 	struct sdma_mmu_node *node = NULL;
1006 	struct mmu_rb_node *rb_node;
1007 	struct iovec *iov;
1008 	bool extracted;
1009 
1010 	extracted =
1011 		hfi1_mmu_rb_remove_unless_exact(pq->handler,
1012 						(unsigned long)
1013 						iovec->iov.iov_base,
1014 						iovec->iov.iov_len, &rb_node);
1015 	if (rb_node) {
1016 		node = container_of(rb_node, struct sdma_mmu_node, rb);
1017 		if (!extracted) {
1018 			atomic_inc(&node->refcount);
1019 			iovec->pages = node->pages;
1020 			iovec->npages = node->npages;
1021 			iovec->node = node;
1022 			return 0;
1023 		}
1024 	}
1025 
1026 	if (!node) {
1027 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1028 		if (!node)
1029 			return -ENOMEM;
1030 
1031 		node->rb.addr = (unsigned long)iovec->iov.iov_base;
1032 		node->pq = pq;
1033 		atomic_set(&node->refcount, 0);
1034 	}
1035 
1036 	iov = &iovec->iov;
1037 	npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len);
1038 	if (node->npages < npages) {
1039 		pinned = pin_sdma_pages(req, iovec, node, npages);
1040 		if (pinned < 0) {
1041 			ret = pinned;
1042 			goto bail;
1043 		}
1044 		node->npages += pinned;
1045 		npages = node->npages;
1046 	}
1047 	iovec->pages = node->pages;
1048 	iovec->npages = npages;
1049 	iovec->node = node;
1050 
1051 	ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
1052 	if (ret) {
1053 		iovec->node = NULL;
1054 		goto bail;
1055 	}
1056 	return 0;
1057 bail:
1058 	unpin_sdma_pages(node);
1059 	kfree(node);
1060 	return ret;
1061 }
1062 
unpin_vector_pages(struct mm_struct * mm,struct page ** pages,unsigned start,unsigned npages)1063 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1064 			       unsigned start, unsigned npages)
1065 {
1066 	hfi1_release_user_pages(mm, pages + start, npages, false);
1067 	kfree(pages);
1068 }
1069 
check_header_template(struct user_sdma_request * req,struct hfi1_pkt_header * hdr,u32 lrhlen,u32 datalen)1070 static int check_header_template(struct user_sdma_request *req,
1071 				 struct hfi1_pkt_header *hdr, u32 lrhlen,
1072 				 u32 datalen)
1073 {
1074 	/*
1075 	 * Perform safety checks for any type of packet:
1076 	 *    - transfer size is multiple of 64bytes
1077 	 *    - packet length is multiple of 4 bytes
1078 	 *    - packet length is not larger than MTU size
1079 	 *
1080 	 * These checks are only done for the first packet of the
1081 	 * transfer since the header is "given" to us by user space.
1082 	 * For the remainder of the packets we compute the values.
1083 	 */
1084 	if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
1085 	    lrhlen > get_lrh_len(*hdr, req->info.fragsize))
1086 		return -EINVAL;
1087 
1088 	if (req_opcode(req->info.ctrl) == EXPECTED) {
1089 		/*
1090 		 * The header is checked only on the first packet. Furthermore,
1091 		 * we ensure that at least one TID entry is copied when the
1092 		 * request is submitted. Therefore, we don't have to verify that
1093 		 * tididx points to something sane.
1094 		 */
1095 		u32 tidval = req->tids[req->tididx],
1096 			tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
1097 			tididx = EXP_TID_GET(tidval, IDX),
1098 			tidctrl = EXP_TID_GET(tidval, CTRL),
1099 			tidoff;
1100 		__le32 kval = hdr->kdeth.ver_tid_offset;
1101 
1102 		tidoff = KDETH_GET(kval, OFFSET) *
1103 			  (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
1104 			   KDETH_OM_LARGE : KDETH_OM_SMALL);
1105 		/*
1106 		 * Expected receive packets have the following
1107 		 * additional checks:
1108 		 *     - offset is not larger than the TID size
1109 		 *     - TIDCtrl values match between header and TID array
1110 		 *     - TID indexes match between header and TID array
1111 		 */
1112 		if ((tidoff + datalen > tidlen) ||
1113 		    KDETH_GET(kval, TIDCTRL) != tidctrl ||
1114 		    KDETH_GET(kval, TID) != tididx)
1115 			return -EINVAL;
1116 	}
1117 	return 0;
1118 }
1119 
1120 /*
1121  * Correctly set the BTH.PSN field based on type of
1122  * transfer - eager packets can just increment the PSN but
1123  * expected packets encode generation and sequence in the
1124  * BTH.PSN field so just incrementing will result in errors.
1125  */
set_pkt_bth_psn(__be32 bthpsn,u8 expct,u32 frags)1126 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
1127 {
1128 	u32 val = be32_to_cpu(bthpsn),
1129 		mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
1130 			0xffffffull),
1131 		psn = val & mask;
1132 	if (expct)
1133 		psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
1134 	else
1135 		psn = psn + frags;
1136 	return psn & mask;
1137 }
1138 
set_txreq_header(struct user_sdma_request * req,struct user_sdma_txreq * tx,u32 datalen)1139 static int set_txreq_header(struct user_sdma_request *req,
1140 			    struct user_sdma_txreq *tx, u32 datalen)
1141 {
1142 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
1143 	struct hfi1_pkt_header *hdr = &tx->hdr;
1144 	u8 omfactor; /* KDETH.OM */
1145 	u16 pbclen;
1146 	int ret;
1147 	u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1148 
1149 	/* Copy the header template to the request before modification */
1150 	memcpy(hdr, &req->hdr, sizeof(*hdr));
1151 
1152 	/*
1153 	 * Check if the PBC and LRH length are mismatched. If so
1154 	 * adjust both in the header.
1155 	 */
1156 	pbclen = le16_to_cpu(hdr->pbc[0]);
1157 	if (PBC2LRH(pbclen) != lrhlen) {
1158 		pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
1159 		hdr->pbc[0] = cpu_to_le16(pbclen);
1160 		hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
1161 		/*
1162 		 * Third packet
1163 		 * This is the first packet in the sequence that has
1164 		 * a "static" size that can be used for the rest of
1165 		 * the packets (besides the last one).
1166 		 */
1167 		if (unlikely(req->seqnum == 2)) {
1168 			/*
1169 			 * From this point on the lengths in both the
1170 			 * PBC and LRH are the same until the last
1171 			 * packet.
1172 			 * Adjust the template so we don't have to update
1173 			 * every packet
1174 			 */
1175 			req->hdr.pbc[0] = hdr->pbc[0];
1176 			req->hdr.lrh[2] = hdr->lrh[2];
1177 		}
1178 	}
1179 	/*
1180 	 * We only have to modify the header if this is not the
1181 	 * first packet in the request. Otherwise, we use the
1182 	 * header given to us.
1183 	 */
1184 	if (unlikely(!req->seqnum)) {
1185 		ret = check_header_template(req, hdr, lrhlen, datalen);
1186 		if (ret)
1187 			return ret;
1188 		goto done;
1189 	}
1190 
1191 	hdr->bth[2] = cpu_to_be32(
1192 		set_pkt_bth_psn(hdr->bth[2],
1193 				(req_opcode(req->info.ctrl) == EXPECTED),
1194 				req->seqnum));
1195 
1196 	/* Set ACK request on last packet */
1197 	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1198 		hdr->bth[2] |= cpu_to_be32(1UL << 31);
1199 
1200 	/* Set the new offset */
1201 	hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1202 	/* Expected packets have to fill in the new TID information */
1203 	if (req_opcode(req->info.ctrl) == EXPECTED) {
1204 		tidval = req->tids[req->tididx];
1205 		/*
1206 		 * If the offset puts us at the end of the current TID,
1207 		 * advance everything.
1208 		 */
1209 		if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1210 					 PAGE_SIZE)) {
1211 			req->tidoffset = 0;
1212 			/*
1213 			 * Since we don't copy all the TIDs, all at once,
1214 			 * we have to check again.
1215 			 */
1216 			if (++req->tididx > req->n_tids - 1 ||
1217 			    !req->tids[req->tididx]) {
1218 				return -EINVAL;
1219 			}
1220 			tidval = req->tids[req->tididx];
1221 		}
1222 		omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1223 			KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT :
1224 			KDETH_OM_SMALL_SHIFT;
1225 		/* Set KDETH.TIDCtrl based on value for this TID. */
1226 		KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1227 			  EXP_TID_GET(tidval, CTRL));
1228 		/* Set KDETH.TID based on value for this TID */
1229 		KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1230 			  EXP_TID_GET(tidval, IDX));
1231 		/* Clear KDETH.SH when DISABLE_SH flag is set */
1232 		if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
1233 			KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1234 		/*
1235 		 * Set the KDETH.OFFSET and KDETH.OM based on size of
1236 		 * transfer.
1237 		 */
1238 		trace_hfi1_sdma_user_tid_info(
1239 			pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx,
1240 			req->tidoffset, req->tidoffset >> omfactor,
1241 			omfactor != KDETH_OM_SMALL_SHIFT);
1242 		KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1243 			  req->tidoffset >> omfactor);
1244 		KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1245 			  omfactor != KDETH_OM_SMALL_SHIFT);
1246 	}
1247 done:
1248 	trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1249 				    req->info.comp_idx, hdr, tidval);
1250 	return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1251 }
1252 
set_txreq_header_ahg(struct user_sdma_request * req,struct user_sdma_txreq * tx,u32 datalen)1253 static int set_txreq_header_ahg(struct user_sdma_request *req,
1254 				struct user_sdma_txreq *tx, u32 datalen)
1255 {
1256 	u32 ahg[AHG_KDETH_ARRAY_SIZE];
1257 	int diff = 0;
1258 	u8 omfactor; /* KDETH.OM */
1259 	struct hfi1_user_sdma_pkt_q *pq = req->pq;
1260 	struct hfi1_pkt_header *hdr = &req->hdr;
1261 	u16 pbclen = le16_to_cpu(hdr->pbc[0]);
1262 	u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1263 
1264 	if (PBC2LRH(pbclen) != lrhlen) {
1265 		/* PBC.PbcLengthDWs */
1266 		AHG_HEADER_SET(ahg, diff, 0, 0, 12,
1267 			       cpu_to_le16(LRH2PBC(lrhlen)));
1268 		/* LRH.PktLen (we need the full 16 bits due to byte swap) */
1269 		AHG_HEADER_SET(ahg, diff, 3, 0, 16,
1270 			       cpu_to_be16(lrhlen >> 2));
1271 	}
1272 
1273 	/*
1274 	 * Do the common updates
1275 	 */
1276 	/* BTH.PSN and BTH.A */
1277 	val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1278 		(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
1279 	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1280 		val32 |= 1UL << 31;
1281 	AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
1282 	AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
1283 	/* KDETH.Offset */
1284 	AHG_HEADER_SET(ahg, diff, 15, 0, 16,
1285 		       cpu_to_le16(req->koffset & 0xffff));
1286 	AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
1287 	if (req_opcode(req->info.ctrl) == EXPECTED) {
1288 		__le16 val;
1289 
1290 		tidval = req->tids[req->tididx];
1291 
1292 		/*
1293 		 * If the offset puts us at the end of the current TID,
1294 		 * advance everything.
1295 		 */
1296 		if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1297 					 PAGE_SIZE)) {
1298 			req->tidoffset = 0;
1299 			/*
1300 			 * Since we don't copy all the TIDs, all at once,
1301 			 * we have to check again.
1302 			 */
1303 			if (++req->tididx > req->n_tids - 1 ||
1304 			    !req->tids[req->tididx])
1305 				return -EINVAL;
1306 			tidval = req->tids[req->tididx];
1307 		}
1308 		omfactor = ((EXP_TID_GET(tidval, LEN) *
1309 				  PAGE_SIZE) >=
1310 				 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
1311 				 KDETH_OM_SMALL_SHIFT;
1312 		/* KDETH.OM and KDETH.OFFSET (TID) */
1313 		AHG_HEADER_SET(ahg, diff, 7, 0, 16,
1314 			       ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
1315 				((req->tidoffset >> omfactor)
1316 				 & 0x7fff)));
1317 		/* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
1318 		val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
1319 				   (EXP_TID_GET(tidval, IDX) & 0x3ff));
1320 
1321 		if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
1322 			val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1323 						      INTR) <<
1324 					    AHG_KDETH_INTR_SHIFT));
1325 		} else {
1326 			val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
1327 			       cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
1328 			       cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1329 						      INTR) <<
1330 					     AHG_KDETH_INTR_SHIFT));
1331 		}
1332 
1333 		AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
1334 	}
1335 	if (diff < 0)
1336 		return diff;
1337 
1338 	trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1339 					req->info.comp_idx, req->sde->this_idx,
1340 					req->ahg_idx, ahg, diff, tidval);
1341 	sdma_txinit_ahg(&tx->txreq,
1342 			SDMA_TXREQ_F_USE_AHG,
1343 			datalen, req->ahg_idx, diff,
1344 			ahg, sizeof(req->hdr),
1345 			user_sdma_txreq_cb);
1346 
1347 	return diff;
1348 }
1349 
1350 /**
1351  * user_sdma_txreq_cb() - SDMA tx request completion callback.
1352  * @txreq: valid sdma tx request
1353  * @status: success/failure of request
1354  *
1355  * Called when the SDMA progress state machine gets notification that
1356  * the SDMA descriptors for this tx request have been processed by the
1357  * DMA engine. Called in interrupt context.
1358  * Only do work on completed sequences.
1359  */
user_sdma_txreq_cb(struct sdma_txreq * txreq,int status)1360 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
1361 {
1362 	struct user_sdma_txreq *tx =
1363 		container_of(txreq, struct user_sdma_txreq, txreq);
1364 	struct user_sdma_request *req;
1365 	struct hfi1_user_sdma_pkt_q *pq;
1366 	struct hfi1_user_sdma_comp_q *cq;
1367 	enum hfi1_sdma_comp_state state = COMPLETE;
1368 
1369 	if (!tx->req)
1370 		return;
1371 
1372 	req = tx->req;
1373 	pq = req->pq;
1374 	cq = req->cq;
1375 
1376 	if (status != SDMA_TXREQ_S_OK) {
1377 		SDMA_DBG(req, "SDMA completion with error %d",
1378 			 status);
1379 		WRITE_ONCE(req->has_error, 1);
1380 		state = ERROR;
1381 	}
1382 
1383 	req->seqcomp = tx->seqnum;
1384 	kmem_cache_free(pq->txreq_cache, tx);
1385 
1386 	/* sequence isn't complete?  We are done */
1387 	if (req->seqcomp != req->info.npkts - 1)
1388 		return;
1389 
1390 	user_sdma_free_request(req, false);
1391 	set_comp_state(pq, cq, req->info.comp_idx, state, status);
1392 	pq_update(pq);
1393 }
1394 
pq_update(struct hfi1_user_sdma_pkt_q * pq)1395 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
1396 {
1397 	if (atomic_dec_and_test(&pq->n_reqs))
1398 		wake_up(&pq->wait);
1399 }
1400 
user_sdma_free_request(struct user_sdma_request * req,bool unpin)1401 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1402 {
1403 	if (!list_empty(&req->txps)) {
1404 		struct sdma_txreq *t, *p;
1405 
1406 		list_for_each_entry_safe(t, p, &req->txps, list) {
1407 			struct user_sdma_txreq *tx =
1408 				container_of(t, struct user_sdma_txreq, txreq);
1409 			list_del_init(&t->list);
1410 			sdma_txclean(req->pq->dd, t);
1411 			kmem_cache_free(req->pq->txreq_cache, tx);
1412 		}
1413 	}
1414 	if (req->data_iovs) {
1415 		struct sdma_mmu_node *node;
1416 		int i;
1417 
1418 		for (i = 0; i < req->data_iovs; i++) {
1419 			node = req->iovs[i].node;
1420 			if (!node)
1421 				continue;
1422 
1423 			req->iovs[i].node = NULL;
1424 
1425 			if (unpin)
1426 				hfi1_mmu_rb_remove(req->pq->handler,
1427 						   &node->rb);
1428 			else
1429 				atomic_dec(&node->refcount);
1430 		}
1431 	}
1432 	kfree(req->tids);
1433 	clear_bit(req->info.comp_idx, req->pq->req_in_use);
1434 }
1435 
set_comp_state(struct hfi1_user_sdma_pkt_q * pq,struct hfi1_user_sdma_comp_q * cq,u16 idx,enum hfi1_sdma_comp_state state,int ret)1436 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1437 				  struct hfi1_user_sdma_comp_q *cq,
1438 				  u16 idx, enum hfi1_sdma_comp_state state,
1439 				  int ret)
1440 {
1441 	if (state == ERROR)
1442 		cq->comps[idx].errcode = -ret;
1443 	smp_wmb(); /* make sure errcode is visible first */
1444 	cq->comps[idx].status = state;
1445 	trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
1446 					idx, state, ret);
1447 }
1448 
sdma_rb_filter(struct mmu_rb_node * node,unsigned long addr,unsigned long len)1449 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
1450 			   unsigned long len)
1451 {
1452 	return (bool)(node->addr == addr);
1453 }
1454 
sdma_rb_insert(void * arg,struct mmu_rb_node * mnode)1455 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
1456 {
1457 	struct sdma_mmu_node *node =
1458 		container_of(mnode, struct sdma_mmu_node, rb);
1459 
1460 	atomic_inc(&node->refcount);
1461 	return 0;
1462 }
1463 
1464 /*
1465  * Return 1 to remove the node from the rb tree and call the remove op.
1466  *
1467  * Called with the rb tree lock held.
1468  */
sdma_rb_evict(void * arg,struct mmu_rb_node * mnode,void * evict_arg,bool * stop)1469 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
1470 			 void *evict_arg, bool *stop)
1471 {
1472 	struct sdma_mmu_node *node =
1473 		container_of(mnode, struct sdma_mmu_node, rb);
1474 	struct evict_data *evict_data = evict_arg;
1475 
1476 	/* is this node still being used? */
1477 	if (atomic_read(&node->refcount))
1478 		return 0; /* keep this node */
1479 
1480 	/* this node will be evicted, add its pages to our count */
1481 	evict_data->cleared += node->npages;
1482 
1483 	/* have enough pages been cleared? */
1484 	if (evict_data->cleared >= evict_data->target)
1485 		*stop = true;
1486 
1487 	return 1; /* remove this node */
1488 }
1489 
sdma_rb_remove(void * arg,struct mmu_rb_node * mnode)1490 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
1491 {
1492 	struct sdma_mmu_node *node =
1493 		container_of(mnode, struct sdma_mmu_node, rb);
1494 
1495 	unpin_sdma_pages(node);
1496 	kfree(node);
1497 }
1498 
sdma_rb_invalidate(void * arg,struct mmu_rb_node * mnode)1499 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
1500 {
1501 	struct sdma_mmu_node *node =
1502 		container_of(mnode, struct sdma_mmu_node, rb);
1503 
1504 	if (!atomic_read(&node->refcount))
1505 		return 1;
1506 	return 0;
1507 }
1508