• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2016 - 2020 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
56 #include <rdma/uverbs_ioctl.h>
57 #include "qp.h"
58 #include "vt.h"
59 #include "trace.h"
60 
61 #define RVT_RWQ_COUNT_THRESHOLD 16
62 
63 static void rvt_rc_timeout(struct timer_list *t);
64 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
65 			 enum ib_qp_type type);
66 
67 /*
68  * Convert the AETH RNR timeout code into the number of microseconds.
69  */
70 static const u32 ib_rvt_rnr_table[32] = {
71 	655360, /* 00: 655.36 */
72 	10,     /* 01:    .01 */
73 	20,     /* 02     .02 */
74 	30,     /* 03:    .03 */
75 	40,     /* 04:    .04 */
76 	60,     /* 05:    .06 */
77 	80,     /* 06:    .08 */
78 	120,    /* 07:    .12 */
79 	160,    /* 08:    .16 */
80 	240,    /* 09:    .24 */
81 	320,    /* 0A:    .32 */
82 	480,    /* 0B:    .48 */
83 	640,    /* 0C:    .64 */
84 	960,    /* 0D:    .96 */
85 	1280,   /* 0E:   1.28 */
86 	1920,   /* 0F:   1.92 */
87 	2560,   /* 10:   2.56 */
88 	3840,   /* 11:   3.84 */
89 	5120,   /* 12:   5.12 */
90 	7680,   /* 13:   7.68 */
91 	10240,  /* 14:  10.24 */
92 	15360,  /* 15:  15.36 */
93 	20480,  /* 16:  20.48 */
94 	30720,  /* 17:  30.72 */
95 	40960,  /* 18:  40.96 */
96 	61440,  /* 19:  61.44 */
97 	81920,  /* 1A:  81.92 */
98 	122880, /* 1B: 122.88 */
99 	163840, /* 1C: 163.84 */
100 	245760, /* 1D: 245.76 */
101 	327680, /* 1E: 327.68 */
102 	491520  /* 1F: 491.52 */
103 };
104 
105 /*
106  * Note that it is OK to post send work requests in the SQE and ERR
107  * states; rvt_do_send() will process them and generate error
108  * completions as per IB 1.2 C10-96.
109  */
110 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
111 	[IB_QPS_RESET] = 0,
112 	[IB_QPS_INIT] = RVT_POST_RECV_OK,
113 	[IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
114 	[IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
115 	    RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
116 	    RVT_PROCESS_NEXT_SEND_OK,
117 	[IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
118 	    RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
119 	[IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
120 	    RVT_POST_SEND_OK | RVT_FLUSH_SEND,
121 	[IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
122 	    RVT_POST_SEND_OK | RVT_FLUSH_SEND,
123 };
124 EXPORT_SYMBOL(ib_rvt_state_ops);
125 
126 /* platform specific: return the last level cache (llc) size, in KiB */
rvt_wss_llc_size(void)127 static int rvt_wss_llc_size(void)
128 {
129 	/* assume that the boot CPU value is universal for all CPUs */
130 	return boot_cpu_data.x86_cache_size;
131 }
132 
133 /* platform specific: cacheless copy */
cacheless_memcpy(void * dst,void * src,size_t n)134 static void cacheless_memcpy(void *dst, void *src, size_t n)
135 {
136 	/*
137 	 * Use the only available X64 cacheless copy.  Add a __user cast
138 	 * to quiet sparse.  The src agument is already in the kernel so
139 	 * there are no security issues.  The extra fault recovery machinery
140 	 * is not invoked.
141 	 */
142 	__copy_user_nocache(dst, (void __user *)src, n, 0);
143 }
144 
rvt_wss_exit(struct rvt_dev_info * rdi)145 void rvt_wss_exit(struct rvt_dev_info *rdi)
146 {
147 	struct rvt_wss *wss = rdi->wss;
148 
149 	if (!wss)
150 		return;
151 
152 	/* coded to handle partially initialized and repeat callers */
153 	kfree(wss->entries);
154 	wss->entries = NULL;
155 	kfree(rdi->wss);
156 	rdi->wss = NULL;
157 }
158 
159 /**
160  * rvt_wss_init - Init wss data structures
161  *
162  * Return: 0 on success
163  */
rvt_wss_init(struct rvt_dev_info * rdi)164 int rvt_wss_init(struct rvt_dev_info *rdi)
165 {
166 	unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
167 	unsigned int wss_threshold = rdi->dparms.wss_threshold;
168 	unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
169 	long llc_size;
170 	long llc_bits;
171 	long table_size;
172 	long table_bits;
173 	struct rvt_wss *wss;
174 	int node = rdi->dparms.node;
175 
176 	if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
177 		rdi->wss = NULL;
178 		return 0;
179 	}
180 
181 	rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
182 	if (!rdi->wss)
183 		return -ENOMEM;
184 	wss = rdi->wss;
185 
186 	/* check for a valid percent range - default to 80 if none or invalid */
187 	if (wss_threshold < 1 || wss_threshold > 100)
188 		wss_threshold = 80;
189 
190 	/* reject a wildly large period */
191 	if (wss_clean_period > 1000000)
192 		wss_clean_period = 256;
193 
194 	/* reject a zero period */
195 	if (wss_clean_period == 0)
196 		wss_clean_period = 1;
197 
198 	/*
199 	 * Calculate the table size - the next power of 2 larger than the
200 	 * LLC size.  LLC size is in KiB.
201 	 */
202 	llc_size = rvt_wss_llc_size() * 1024;
203 	table_size = roundup_pow_of_two(llc_size);
204 
205 	/* one bit per page in rounded up table */
206 	llc_bits = llc_size / PAGE_SIZE;
207 	table_bits = table_size / PAGE_SIZE;
208 	wss->pages_mask = table_bits - 1;
209 	wss->num_entries = table_bits / BITS_PER_LONG;
210 
211 	wss->threshold = (llc_bits * wss_threshold) / 100;
212 	if (wss->threshold == 0)
213 		wss->threshold = 1;
214 
215 	wss->clean_period = wss_clean_period;
216 	atomic_set(&wss->clean_counter, wss_clean_period);
217 
218 	wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
219 				    GFP_KERNEL, node);
220 	if (!wss->entries) {
221 		rvt_wss_exit(rdi);
222 		return -ENOMEM;
223 	}
224 
225 	return 0;
226 }
227 
228 /*
229  * Advance the clean counter.  When the clean period has expired,
230  * clean an entry.
231  *
232  * This is implemented in atomics to avoid locking.  Because multiple
233  * variables are involved, it can be racy which can lead to slightly
234  * inaccurate information.  Since this is only a heuristic, this is
235  * OK.  Any innaccuracies will clean themselves out as the counter
236  * advances.  That said, it is unlikely the entry clean operation will
237  * race - the next possible racer will not start until the next clean
238  * period.
239  *
240  * The clean counter is implemented as a decrement to zero.  When zero
241  * is reached an entry is cleaned.
242  */
wss_advance_clean_counter(struct rvt_wss * wss)243 static void wss_advance_clean_counter(struct rvt_wss *wss)
244 {
245 	int entry;
246 	int weight;
247 	unsigned long bits;
248 
249 	/* become the cleaner if we decrement the counter to zero */
250 	if (atomic_dec_and_test(&wss->clean_counter)) {
251 		/*
252 		 * Set, not add, the clean period.  This avoids an issue
253 		 * where the counter could decrement below the clean period.
254 		 * Doing a set can result in lost decrements, slowing the
255 		 * clean advance.  Since this a heuristic, this possible
256 		 * slowdown is OK.
257 		 *
258 		 * An alternative is to loop, advancing the counter by a
259 		 * clean period until the result is > 0. However, this could
260 		 * lead to several threads keeping another in the clean loop.
261 		 * This could be mitigated by limiting the number of times
262 		 * we stay in the loop.
263 		 */
264 		atomic_set(&wss->clean_counter, wss->clean_period);
265 
266 		/*
267 		 * Uniquely grab the entry to clean and move to next.
268 		 * The current entry is always the lower bits of
269 		 * wss.clean_entry.  The table size, wss.num_entries,
270 		 * is always a power-of-2.
271 		 */
272 		entry = (atomic_inc_return(&wss->clean_entry) - 1)
273 			& (wss->num_entries - 1);
274 
275 		/* clear the entry and count the bits */
276 		bits = xchg(&wss->entries[entry], 0);
277 		weight = hweight64((u64)bits);
278 		/* only adjust the contended total count if needed */
279 		if (weight)
280 			atomic_sub(weight, &wss->total_count);
281 	}
282 }
283 
284 /*
285  * Insert the given address into the working set array.
286  */
wss_insert(struct rvt_wss * wss,void * address)287 static void wss_insert(struct rvt_wss *wss, void *address)
288 {
289 	u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
290 	u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
291 	u32 nr = page & (BITS_PER_LONG - 1);
292 
293 	if (!test_and_set_bit(nr, &wss->entries[entry]))
294 		atomic_inc(&wss->total_count);
295 
296 	wss_advance_clean_counter(wss);
297 }
298 
299 /*
300  * Is the working set larger than the threshold?
301  */
wss_exceeds_threshold(struct rvt_wss * wss)302 static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
303 {
304 	return atomic_read(&wss->total_count) >= wss->threshold;
305 }
306 
get_map_page(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map)307 static void get_map_page(struct rvt_qpn_table *qpt,
308 			 struct rvt_qpn_map *map)
309 {
310 	unsigned long page = get_zeroed_page(GFP_KERNEL);
311 
312 	/*
313 	 * Free the page if someone raced with us installing it.
314 	 */
315 
316 	spin_lock(&qpt->lock);
317 	if (map->page)
318 		free_page(page);
319 	else
320 		map->page = (void *)page;
321 	spin_unlock(&qpt->lock);
322 }
323 
324 /**
325  * init_qpn_table - initialize the QP number table for a device
326  * @qpt: the QPN table
327  */
init_qpn_table(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt)328 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
329 {
330 	u32 offset, i;
331 	struct rvt_qpn_map *map;
332 	int ret = 0;
333 
334 	if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
335 		return -EINVAL;
336 
337 	spin_lock_init(&qpt->lock);
338 
339 	qpt->last = rdi->dparms.qpn_start;
340 	qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
341 
342 	/*
343 	 * Drivers may want some QPs beyond what we need for verbs let them use
344 	 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
345 	 * for those. The reserved range must be *after* the range which verbs
346 	 * will pick from.
347 	 */
348 
349 	/* Figure out number of bit maps needed before reserved range */
350 	qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
351 
352 	/* This should always be zero */
353 	offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
354 
355 	/* Starting with the first reserved bit map */
356 	map = &qpt->map[qpt->nmaps];
357 
358 	rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
359 		    rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
360 	for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
361 		if (!map->page) {
362 			get_map_page(qpt, map);
363 			if (!map->page) {
364 				ret = -ENOMEM;
365 				break;
366 			}
367 		}
368 		set_bit(offset, map->page);
369 		offset++;
370 		if (offset == RVT_BITS_PER_PAGE) {
371 			/* next page */
372 			qpt->nmaps++;
373 			map++;
374 			offset = 0;
375 		}
376 	}
377 	return ret;
378 }
379 
380 /**
381  * free_qpn_table - free the QP number table for a device
382  * @qpt: the QPN table
383  */
free_qpn_table(struct rvt_qpn_table * qpt)384 static void free_qpn_table(struct rvt_qpn_table *qpt)
385 {
386 	int i;
387 
388 	for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
389 		free_page((unsigned long)qpt->map[i].page);
390 }
391 
392 /**
393  * rvt_driver_qp_init - Init driver qp resources
394  * @rdi: rvt dev strucutre
395  *
396  * Return: 0 on success
397  */
rvt_driver_qp_init(struct rvt_dev_info * rdi)398 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
399 {
400 	int i;
401 	int ret = -ENOMEM;
402 
403 	if (!rdi->dparms.qp_table_size)
404 		return -EINVAL;
405 
406 	/*
407 	 * If driver is not doing any QP allocation then make sure it is
408 	 * providing the necessary QP functions.
409 	 */
410 	if (!rdi->driver_f.free_all_qps ||
411 	    !rdi->driver_f.qp_priv_alloc ||
412 	    !rdi->driver_f.qp_priv_free ||
413 	    !rdi->driver_f.notify_qp_reset ||
414 	    !rdi->driver_f.notify_restart_rc)
415 		return -EINVAL;
416 
417 	/* allocate parent object */
418 	rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
419 				   rdi->dparms.node);
420 	if (!rdi->qp_dev)
421 		return -ENOMEM;
422 
423 	/* allocate hash table */
424 	rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
425 	rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
426 	rdi->qp_dev->qp_table =
427 		kmalloc_array_node(rdi->qp_dev->qp_table_size,
428 			     sizeof(*rdi->qp_dev->qp_table),
429 			     GFP_KERNEL, rdi->dparms.node);
430 	if (!rdi->qp_dev->qp_table)
431 		goto no_qp_table;
432 
433 	for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
434 		RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
435 
436 	spin_lock_init(&rdi->qp_dev->qpt_lock);
437 
438 	/* initialize qpn map */
439 	if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
440 		goto fail_table;
441 
442 	spin_lock_init(&rdi->n_qps_lock);
443 
444 	return 0;
445 
446 fail_table:
447 	kfree(rdi->qp_dev->qp_table);
448 	free_qpn_table(&rdi->qp_dev->qpn_table);
449 
450 no_qp_table:
451 	kfree(rdi->qp_dev);
452 
453 	return ret;
454 }
455 
456 /**
457  * rvt_free_qp_cb - callback function to reset a qp
458  * @qp: the qp to reset
459  * @v: a 64-bit value
460  *
461  * This function resets the qp and removes it from the
462  * qp hash table.
463  */
rvt_free_qp_cb(struct rvt_qp * qp,u64 v)464 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
465 {
466 	unsigned int *qp_inuse = (unsigned int *)v;
467 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
468 
469 	/* Reset the qp and remove it from the qp hash list */
470 	rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
471 
472 	/* Increment the qp_inuse count */
473 	(*qp_inuse)++;
474 }
475 
476 /**
477  * rvt_free_all_qps - check for QPs still in use
478  * @rdi: rvt device info structure
479  *
480  * There should not be any QPs still in use.
481  * Free memory for table.
482  * Return the number of QPs still in use.
483  */
rvt_free_all_qps(struct rvt_dev_info * rdi)484 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
485 {
486 	unsigned int qp_inuse = 0;
487 
488 	qp_inuse += rvt_mcast_tree_empty(rdi);
489 
490 	rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
491 
492 	return qp_inuse;
493 }
494 
495 /**
496  * rvt_qp_exit - clean up qps on device exit
497  * @rdi: rvt dev structure
498  *
499  * Check for qp leaks and free resources.
500  */
rvt_qp_exit(struct rvt_dev_info * rdi)501 void rvt_qp_exit(struct rvt_dev_info *rdi)
502 {
503 	u32 qps_inuse = rvt_free_all_qps(rdi);
504 
505 	if (qps_inuse)
506 		rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
507 			   qps_inuse);
508 
509 	kfree(rdi->qp_dev->qp_table);
510 	free_qpn_table(&rdi->qp_dev->qpn_table);
511 	kfree(rdi->qp_dev);
512 }
513 
mk_qpn(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map,unsigned off)514 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
515 			      struct rvt_qpn_map *map, unsigned off)
516 {
517 	return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
518 }
519 
520 /**
521  * alloc_qpn - Allocate the next available qpn or zero/one for QP type
522  *	       IB_QPT_SMI/IB_QPT_GSI
523  * @rdi: rvt device info structure
524  * @qpt: queue pair number table pointer
525  * @port_num: IB port number, 1 based, comes from core
526  * @exclude_prefix: prefix of special queue pair number being allocated
527  *
528  * Return: The queue pair number
529  */
alloc_qpn(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt,enum ib_qp_type type,u8 port_num,u8 exclude_prefix)530 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
531 		     enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
532 {
533 	u32 i, offset, max_scan, qpn;
534 	struct rvt_qpn_map *map;
535 	u32 ret;
536 	u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
537 		RVT_AIP_QPN_MAX : RVT_QPN_MAX;
538 
539 	if (rdi->driver_f.alloc_qpn)
540 		return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
541 
542 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
543 		unsigned n;
544 
545 		ret = type == IB_QPT_GSI;
546 		n = 1 << (ret + 2 * (port_num - 1));
547 		spin_lock(&qpt->lock);
548 		if (qpt->flags & n)
549 			ret = -EINVAL;
550 		else
551 			qpt->flags |= n;
552 		spin_unlock(&qpt->lock);
553 		goto bail;
554 	}
555 
556 	qpn = qpt->last + qpt->incr;
557 	if (qpn >= max_qpn)
558 		qpn = qpt->incr | ((qpt->last & 1) ^ 1);
559 	/* offset carries bit 0 */
560 	offset = qpn & RVT_BITS_PER_PAGE_MASK;
561 	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
562 	max_scan = qpt->nmaps - !offset;
563 	for (i = 0;;) {
564 		if (unlikely(!map->page)) {
565 			get_map_page(qpt, map);
566 			if (unlikely(!map->page))
567 				break;
568 		}
569 		do {
570 			if (!test_and_set_bit(offset, map->page)) {
571 				qpt->last = qpn;
572 				ret = qpn;
573 				goto bail;
574 			}
575 			offset += qpt->incr;
576 			/*
577 			 * This qpn might be bogus if offset >= BITS_PER_PAGE.
578 			 * That is OK.   It gets re-assigned below
579 			 */
580 			qpn = mk_qpn(qpt, map, offset);
581 		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
582 		/*
583 		 * In order to keep the number of pages allocated to a
584 		 * minimum, we scan the all existing pages before increasing
585 		 * the size of the bitmap table.
586 		 */
587 		if (++i > max_scan) {
588 			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
589 				break;
590 			map = &qpt->map[qpt->nmaps++];
591 			/* start at incr with current bit 0 */
592 			offset = qpt->incr | (offset & 1);
593 		} else if (map < &qpt->map[qpt->nmaps]) {
594 			++map;
595 			/* start at incr with current bit 0 */
596 			offset = qpt->incr | (offset & 1);
597 		} else {
598 			map = &qpt->map[0];
599 			/* wrap to first map page, invert bit 0 */
600 			offset = qpt->incr | ((offset & 1) ^ 1);
601 		}
602 		/* there can be no set bits in low-order QoS bits */
603 		WARN_ON(rdi->dparms.qos_shift > 1 &&
604 			offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
605 		qpn = mk_qpn(qpt, map, offset);
606 	}
607 
608 	ret = -ENOMEM;
609 
610 bail:
611 	return ret;
612 }
613 
614 /**
615  * rvt_clear_mr_refs - Drop help mr refs
616  * @qp: rvt qp data structure
617  * @clr_sends: If shoudl clear send side or not
618  */
rvt_clear_mr_refs(struct rvt_qp * qp,int clr_sends)619 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
620 {
621 	unsigned n;
622 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
623 
624 	if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
625 		rvt_put_ss(&qp->s_rdma_read_sge);
626 
627 	rvt_put_ss(&qp->r_sge);
628 
629 	if (clr_sends) {
630 		while (qp->s_last != qp->s_head) {
631 			struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
632 
633 			rvt_put_qp_swqe(qp, wqe);
634 			if (++qp->s_last >= qp->s_size)
635 				qp->s_last = 0;
636 			smp_wmb(); /* see qp_set_savail */
637 		}
638 		if (qp->s_rdma_mr) {
639 			rvt_put_mr(qp->s_rdma_mr);
640 			qp->s_rdma_mr = NULL;
641 		}
642 	}
643 
644 	for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
645 		struct rvt_ack_entry *e = &qp->s_ack_queue[n];
646 
647 		if (e->rdma_sge.mr) {
648 			rvt_put_mr(e->rdma_sge.mr);
649 			e->rdma_sge.mr = NULL;
650 		}
651 	}
652 }
653 
654 /**
655  * rvt_swqe_has_lkey - return true if lkey is used by swqe
656  * @wqe - the send wqe
657  * @lkey - the lkey
658  *
659  * Test the swqe for using lkey
660  */
rvt_swqe_has_lkey(struct rvt_swqe * wqe,u32 lkey)661 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
662 {
663 	int i;
664 
665 	for (i = 0; i < wqe->wr.num_sge; i++) {
666 		struct rvt_sge *sge = &wqe->sg_list[i];
667 
668 		if (rvt_mr_has_lkey(sge->mr, lkey))
669 			return true;
670 	}
671 	return false;
672 }
673 
674 /**
675  * rvt_qp_sends_has_lkey - return true is qp sends use lkey
676  * @qp - the rvt_qp
677  * @lkey - the lkey
678  */
rvt_qp_sends_has_lkey(struct rvt_qp * qp,u32 lkey)679 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
680 {
681 	u32 s_last = qp->s_last;
682 
683 	while (s_last != qp->s_head) {
684 		struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
685 
686 		if (rvt_swqe_has_lkey(wqe, lkey))
687 			return true;
688 
689 		if (++s_last >= qp->s_size)
690 			s_last = 0;
691 	}
692 	if (qp->s_rdma_mr)
693 		if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
694 			return true;
695 	return false;
696 }
697 
698 /**
699  * rvt_qp_acks_has_lkey - return true if acks have lkey
700  * @qp - the qp
701  * @lkey - the lkey
702  */
rvt_qp_acks_has_lkey(struct rvt_qp * qp,u32 lkey)703 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
704 {
705 	int i;
706 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
707 
708 	for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
709 		struct rvt_ack_entry *e = &qp->s_ack_queue[i];
710 
711 		if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
712 			return true;
713 	}
714 	return false;
715 }
716 
717 /*
718  * rvt_qp_mr_clean - clean up remote ops for lkey
719  * @qp - the qp
720  * @lkey - the lkey that is being de-registered
721  *
722  * This routine checks if the lkey is being used by
723  * the qp.
724  *
725  * If so, the qp is put into an error state to elminate
726  * any references from the qp.
727  */
rvt_qp_mr_clean(struct rvt_qp * qp,u32 lkey)728 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
729 {
730 	bool lastwqe = false;
731 
732 	if (qp->ibqp.qp_type == IB_QPT_SMI ||
733 	    qp->ibqp.qp_type == IB_QPT_GSI)
734 		/* avoid special QPs */
735 		return;
736 	spin_lock_irq(&qp->r_lock);
737 	spin_lock(&qp->s_hlock);
738 	spin_lock(&qp->s_lock);
739 
740 	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
741 		goto check_lwqe;
742 
743 	if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
744 	    rvt_qp_sends_has_lkey(qp, lkey) ||
745 	    rvt_qp_acks_has_lkey(qp, lkey))
746 		lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
747 check_lwqe:
748 	spin_unlock(&qp->s_lock);
749 	spin_unlock(&qp->s_hlock);
750 	spin_unlock_irq(&qp->r_lock);
751 	if (lastwqe) {
752 		struct ib_event ev;
753 
754 		ev.device = qp->ibqp.device;
755 		ev.element.qp = &qp->ibqp;
756 		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
757 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
758 	}
759 }
760 
761 /**
762  * rvt_remove_qp - remove qp form table
763  * @rdi: rvt dev struct
764  * @qp: qp to remove
765  *
766  * Remove the QP from the table so it can't be found asynchronously by
767  * the receive routine.
768  */
rvt_remove_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)769 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
770 {
771 	struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
772 	u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
773 	unsigned long flags;
774 	int removed = 1;
775 
776 	spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
777 
778 	if (rcu_dereference_protected(rvp->qp[0],
779 			lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
780 		RCU_INIT_POINTER(rvp->qp[0], NULL);
781 	} else if (rcu_dereference_protected(rvp->qp[1],
782 			lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
783 		RCU_INIT_POINTER(rvp->qp[1], NULL);
784 	} else {
785 		struct rvt_qp *q;
786 		struct rvt_qp __rcu **qpp;
787 
788 		removed = 0;
789 		qpp = &rdi->qp_dev->qp_table[n];
790 		for (; (q = rcu_dereference_protected(*qpp,
791 			lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
792 			qpp = &q->next) {
793 			if (q == qp) {
794 				RCU_INIT_POINTER(*qpp,
795 				     rcu_dereference_protected(qp->next,
796 				     lockdep_is_held(&rdi->qp_dev->qpt_lock)));
797 				removed = 1;
798 				trace_rvt_qpremove(qp, n);
799 				break;
800 			}
801 		}
802 	}
803 
804 	spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
805 	if (removed) {
806 		synchronize_rcu();
807 		rvt_put_qp(qp);
808 	}
809 }
810 
811 /**
812  * rvt_alloc_rq - allocate memory for user or kernel buffer
813  * @rq: receive queue data structure
814  * @size: number of request queue entries
815  * @node: The NUMA node
816  * @udata: True if user data is available or not false
817  *
818  * Return: If memory allocation failed, return -ENONEM
819  * This function is used by both shared receive
820  * queues and non-shared receive queues to allocate
821  * memory.
822  */
rvt_alloc_rq(struct rvt_rq * rq,u32 size,int node,struct ib_udata * udata)823 int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
824 		 struct ib_udata *udata)
825 {
826 	if (udata) {
827 		rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
828 		if (!rq->wq)
829 			goto bail;
830 		/* need kwq with no buffers */
831 		rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
832 		if (!rq->kwq)
833 			goto bail;
834 		rq->kwq->curr_wq = rq->wq->wq;
835 	} else {
836 		/* need kwq with buffers */
837 		rq->kwq =
838 			vzalloc_node(sizeof(struct rvt_krwq) + size, node);
839 		if (!rq->kwq)
840 			goto bail;
841 		rq->kwq->curr_wq = rq->kwq->wq;
842 	}
843 
844 	spin_lock_init(&rq->kwq->p_lock);
845 	spin_lock_init(&rq->kwq->c_lock);
846 	return 0;
847 bail:
848 	rvt_free_rq(rq);
849 	return -ENOMEM;
850 }
851 
852 /**
853  * rvt_init_qp - initialize the QP state to the reset state
854  * @qp: the QP to init or reinit
855  * @type: the QP type
856  *
857  * This function is called from both rvt_create_qp() and
858  * rvt_reset_qp().   The difference is that the reset
859  * patch the necessary locks to protect against concurent
860  * access.
861  */
rvt_init_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)862 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
863 			enum ib_qp_type type)
864 {
865 	qp->remote_qpn = 0;
866 	qp->qkey = 0;
867 	qp->qp_access_flags = 0;
868 	qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
869 	qp->s_hdrwords = 0;
870 	qp->s_wqe = NULL;
871 	qp->s_draining = 0;
872 	qp->s_next_psn = 0;
873 	qp->s_last_psn = 0;
874 	qp->s_sending_psn = 0;
875 	qp->s_sending_hpsn = 0;
876 	qp->s_psn = 0;
877 	qp->r_psn = 0;
878 	qp->r_msn = 0;
879 	if (type == IB_QPT_RC) {
880 		qp->s_state = IB_OPCODE_RC_SEND_LAST;
881 		qp->r_state = IB_OPCODE_RC_SEND_LAST;
882 	} else {
883 		qp->s_state = IB_OPCODE_UC_SEND_LAST;
884 		qp->r_state = IB_OPCODE_UC_SEND_LAST;
885 	}
886 	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
887 	qp->r_nak_state = 0;
888 	qp->r_aflags = 0;
889 	qp->r_flags = 0;
890 	qp->s_head = 0;
891 	qp->s_tail = 0;
892 	qp->s_cur = 0;
893 	qp->s_acked = 0;
894 	qp->s_last = 0;
895 	qp->s_ssn = 1;
896 	qp->s_lsn = 0;
897 	qp->s_mig_state = IB_MIG_MIGRATED;
898 	qp->r_head_ack_queue = 0;
899 	qp->s_tail_ack_queue = 0;
900 	qp->s_acked_ack_queue = 0;
901 	qp->s_num_rd_atomic = 0;
902 	qp->r_sge.num_sge = 0;
903 	atomic_set(&qp->s_reserved_used, 0);
904 }
905 
906 /**
907  * _rvt_reset_qp - initialize the QP state to the reset state
908  * @qp: the QP to reset
909  * @type: the QP type
910  *
911  * r_lock, s_hlock, and s_lock are required to be held by the caller
912  */
_rvt_reset_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)913 static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
914 			  enum ib_qp_type type)
915 	__must_hold(&qp->s_lock)
916 	__must_hold(&qp->s_hlock)
917 	__must_hold(&qp->r_lock)
918 {
919 	lockdep_assert_held(&qp->r_lock);
920 	lockdep_assert_held(&qp->s_hlock);
921 	lockdep_assert_held(&qp->s_lock);
922 	if (qp->state != IB_QPS_RESET) {
923 		qp->state = IB_QPS_RESET;
924 
925 		/* Let drivers flush their waitlist */
926 		rdi->driver_f.flush_qp_waiters(qp);
927 		rvt_stop_rc_timers(qp);
928 		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
929 		spin_unlock(&qp->s_lock);
930 		spin_unlock(&qp->s_hlock);
931 		spin_unlock_irq(&qp->r_lock);
932 
933 		/* Stop the send queue and the retry timer */
934 		rdi->driver_f.stop_send_queue(qp);
935 		rvt_del_timers_sync(qp);
936 		/* Wait for things to stop */
937 		rdi->driver_f.quiesce_qp(qp);
938 
939 		/* take qp out the hash and wait for it to be unused */
940 		rvt_remove_qp(rdi, qp);
941 
942 		/* grab the lock b/c it was locked at call time */
943 		spin_lock_irq(&qp->r_lock);
944 		spin_lock(&qp->s_hlock);
945 		spin_lock(&qp->s_lock);
946 
947 		rvt_clear_mr_refs(qp, 1);
948 		/*
949 		 * Let the driver do any tear down or re-init it needs to for
950 		 * a qp that has been reset
951 		 */
952 		rdi->driver_f.notify_qp_reset(qp);
953 	}
954 	rvt_init_qp(rdi, qp, type);
955 	lockdep_assert_held(&qp->r_lock);
956 	lockdep_assert_held(&qp->s_hlock);
957 	lockdep_assert_held(&qp->s_lock);
958 }
959 
960 /**
961  * rvt_reset_qp - initialize the QP state to the reset state
962  * @rdi: the device info
963  * @qp: the QP to reset
964  * @type: the QP type
965  *
966  * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
967  * before calling _rvt_reset_qp().
968  */
rvt_reset_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)969 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
970 			 enum ib_qp_type type)
971 {
972 	spin_lock_irq(&qp->r_lock);
973 	spin_lock(&qp->s_hlock);
974 	spin_lock(&qp->s_lock);
975 	_rvt_reset_qp(rdi, qp, type);
976 	spin_unlock(&qp->s_lock);
977 	spin_unlock(&qp->s_hlock);
978 	spin_unlock_irq(&qp->r_lock);
979 }
980 
981 /** rvt_free_qpn - Free a qpn from the bit map
982  * @qpt: QP table
983  * @qpn: queue pair number to free
984  */
rvt_free_qpn(struct rvt_qpn_table * qpt,u32 qpn)985 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
986 {
987 	struct rvt_qpn_map *map;
988 
989 	if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
990 		qpn &= RVT_AIP_QP_SUFFIX;
991 
992 	map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
993 	if (map->page)
994 		clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
995 }
996 
997 /**
998  * get_allowed_ops - Given a QP type return the appropriate allowed OP
999  * @type: valid, supported, QP type
1000  */
get_allowed_ops(enum ib_qp_type type)1001 static u8 get_allowed_ops(enum ib_qp_type type)
1002 {
1003 	return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
1004 		IB_OPCODE_UC : IB_OPCODE_UD;
1005 }
1006 
1007 /**
1008  * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
1009  * @qp: Valid QP with allowed_ops set
1010  *
1011  * The rvt_swqe data structure being used is a union, so this is
1012  * only valid for UD QPs.
1013  */
free_ud_wq_attr(struct rvt_qp * qp)1014 static void free_ud_wq_attr(struct rvt_qp *qp)
1015 {
1016 	struct rvt_swqe *wqe;
1017 	int i;
1018 
1019 	for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1020 		wqe = rvt_get_swqe_ptr(qp, i);
1021 		kfree(wqe->ud_wr.attr);
1022 		wqe->ud_wr.attr = NULL;
1023 	}
1024 }
1025 
1026 /**
1027  * alloc_ud_wq_attr - AH attribute cache for UD QPs
1028  * @qp: Valid QP with allowed_ops set
1029  * @node: Numa node for allocation
1030  *
1031  * The rvt_swqe data structure being used is a union, so this is
1032  * only valid for UD QPs.
1033  */
alloc_ud_wq_attr(struct rvt_qp * qp,int node)1034 static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
1035 {
1036 	struct rvt_swqe *wqe;
1037 	int i;
1038 
1039 	for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1040 		wqe = rvt_get_swqe_ptr(qp, i);
1041 		wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
1042 					       GFP_KERNEL, node);
1043 		if (!wqe->ud_wr.attr) {
1044 			free_ud_wq_attr(qp);
1045 			return -ENOMEM;
1046 		}
1047 	}
1048 
1049 	return 0;
1050 }
1051 
1052 /**
1053  * rvt_create_qp - create a queue pair for a device
1054  * @ibpd: the protection domain who's device we create the queue pair for
1055  * @init_attr: the attributes of the queue pair
1056  * @udata: user data for libibverbs.so
1057  *
1058  * Queue pair creation is mostly an rvt issue. However, drivers have their own
1059  * unique idea of what queue pair numbers mean. For instance there is a reserved
1060  * range for PSM.
1061  *
1062  * Return: the queue pair on success, otherwise returns an errno.
1063  *
1064  * Called by the ib_create_qp() core verbs function.
1065  */
rvt_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1066 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
1067 			    struct ib_qp_init_attr *init_attr,
1068 			    struct ib_udata *udata)
1069 {
1070 	struct rvt_qp *qp;
1071 	int err;
1072 	struct rvt_swqe *swq = NULL;
1073 	size_t sz;
1074 	size_t sg_list_sz;
1075 	struct ib_qp *ret = ERR_PTR(-ENOMEM);
1076 	struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
1077 	void *priv = NULL;
1078 	size_t sqsize;
1079 	u8 exclude_prefix = 0;
1080 
1081 	if (!rdi)
1082 		return ERR_PTR(-EINVAL);
1083 
1084 	if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
1085 	    init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
1086 	    (init_attr->create_flags &&
1087 	     init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
1088 		return ERR_PTR(-EINVAL);
1089 
1090 	/* Check receive queue parameters if no SRQ is specified. */
1091 	if (!init_attr->srq) {
1092 		if (init_attr->cap.max_recv_sge >
1093 		    rdi->dparms.props.max_recv_sge ||
1094 		    init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
1095 			return ERR_PTR(-EINVAL);
1096 
1097 		if (init_attr->cap.max_send_sge +
1098 		    init_attr->cap.max_send_wr +
1099 		    init_attr->cap.max_recv_sge +
1100 		    init_attr->cap.max_recv_wr == 0)
1101 			return ERR_PTR(-EINVAL);
1102 	}
1103 	sqsize =
1104 		init_attr->cap.max_send_wr + 1 +
1105 		rdi->dparms.reserved_operations;
1106 	switch (init_attr->qp_type) {
1107 	case IB_QPT_SMI:
1108 	case IB_QPT_GSI:
1109 		if (init_attr->port_num == 0 ||
1110 		    init_attr->port_num > ibpd->device->phys_port_cnt)
1111 			return ERR_PTR(-EINVAL);
1112 		fallthrough;
1113 	case IB_QPT_UC:
1114 	case IB_QPT_RC:
1115 	case IB_QPT_UD:
1116 		sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
1117 		swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1118 		if (!swq)
1119 			return ERR_PTR(-ENOMEM);
1120 
1121 		sz = sizeof(*qp);
1122 		sg_list_sz = 0;
1123 		if (init_attr->srq) {
1124 			struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1125 
1126 			if (srq->rq.max_sge > 1)
1127 				sg_list_sz = sizeof(*qp->r_sg_list) *
1128 					(srq->rq.max_sge - 1);
1129 		} else if (init_attr->cap.max_recv_sge > 1)
1130 			sg_list_sz = sizeof(*qp->r_sg_list) *
1131 				(init_attr->cap.max_recv_sge - 1);
1132 		qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1133 				  rdi->dparms.node);
1134 		if (!qp)
1135 			goto bail_swq;
1136 		qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
1137 
1138 		RCU_INIT_POINTER(qp->next, NULL);
1139 		if (init_attr->qp_type == IB_QPT_RC) {
1140 			qp->s_ack_queue =
1141 				kcalloc_node(rvt_max_atomic(rdi),
1142 					     sizeof(*qp->s_ack_queue),
1143 					     GFP_KERNEL,
1144 					     rdi->dparms.node);
1145 			if (!qp->s_ack_queue)
1146 				goto bail_qp;
1147 		}
1148 		/* initialize timers needed for rc qp */
1149 		timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1150 		hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1151 			     HRTIMER_MODE_REL);
1152 		qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1153 
1154 		/*
1155 		 * Driver needs to set up it's private QP structure and do any
1156 		 * initialization that is needed.
1157 		 */
1158 		priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1159 		if (IS_ERR(priv)) {
1160 			ret = priv;
1161 			goto bail_qp;
1162 		}
1163 		qp->priv = priv;
1164 		qp->timeout_jiffies =
1165 			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1166 				1000UL);
1167 		if (init_attr->srq) {
1168 			sz = 0;
1169 		} else {
1170 			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1171 			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1172 			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1173 				sizeof(struct rvt_rwqe);
1174 			err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
1175 					   rdi->dparms.node, udata);
1176 			if (err) {
1177 				ret = ERR_PTR(err);
1178 				goto bail_driver_priv;
1179 			}
1180 		}
1181 
1182 		/*
1183 		 * ib_create_qp() will initialize qp->ibqp
1184 		 * except for qp->ibqp.qp_num.
1185 		 */
1186 		spin_lock_init(&qp->r_lock);
1187 		spin_lock_init(&qp->s_hlock);
1188 		spin_lock_init(&qp->s_lock);
1189 		atomic_set(&qp->refcount, 0);
1190 		atomic_set(&qp->local_ops_pending, 0);
1191 		init_waitqueue_head(&qp->wait);
1192 		INIT_LIST_HEAD(&qp->rspwait);
1193 		qp->state = IB_QPS_RESET;
1194 		qp->s_wq = swq;
1195 		qp->s_size = sqsize;
1196 		qp->s_avail = init_attr->cap.max_send_wr;
1197 		qp->s_max_sge = init_attr->cap.max_send_sge;
1198 		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1199 			qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1200 		err = alloc_ud_wq_attr(qp, rdi->dparms.node);
1201 		if (err) {
1202 			ret = (ERR_PTR(err));
1203 			goto bail_rq_rvt;
1204 		}
1205 
1206 		if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1207 			exclude_prefix = RVT_AIP_QP_PREFIX;
1208 
1209 		err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1210 				init_attr->qp_type,
1211 				init_attr->port_num,
1212 				exclude_prefix);
1213 		if (err < 0) {
1214 			ret = ERR_PTR(err);
1215 			goto bail_rq_wq;
1216 		}
1217 		qp->ibqp.qp_num = err;
1218 		if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1219 			qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
1220 		qp->port_num = init_attr->port_num;
1221 		rvt_init_qp(rdi, qp, init_attr->qp_type);
1222 		if (rdi->driver_f.qp_priv_init) {
1223 			err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1224 			if (err) {
1225 				ret = ERR_PTR(err);
1226 				goto bail_rq_wq;
1227 			}
1228 		}
1229 		break;
1230 
1231 	default:
1232 		/* Don't support raw QPs */
1233 		return ERR_PTR(-EOPNOTSUPP);
1234 	}
1235 
1236 	init_attr->cap.max_inline_data = 0;
1237 
1238 	/*
1239 	 * Return the address of the RWQ as the offset to mmap.
1240 	 * See rvt_mmap() for details.
1241 	 */
1242 	if (udata && udata->outlen >= sizeof(__u64)) {
1243 		if (!qp->r_rq.wq) {
1244 			__u64 offset = 0;
1245 
1246 			err = ib_copy_to_udata(udata, &offset,
1247 					       sizeof(offset));
1248 			if (err) {
1249 				ret = ERR_PTR(err);
1250 				goto bail_qpn;
1251 			}
1252 		} else {
1253 			u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1254 
1255 			qp->ip = rvt_create_mmap_info(rdi, s, udata,
1256 						      qp->r_rq.wq);
1257 			if (IS_ERR(qp->ip)) {
1258 				ret = ERR_CAST(qp->ip);
1259 				goto bail_qpn;
1260 			}
1261 
1262 			err = ib_copy_to_udata(udata, &qp->ip->offset,
1263 					       sizeof(qp->ip->offset));
1264 			if (err) {
1265 				ret = ERR_PTR(err);
1266 				goto bail_ip;
1267 			}
1268 		}
1269 		qp->pid = current->pid;
1270 	}
1271 
1272 	spin_lock(&rdi->n_qps_lock);
1273 	if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1274 		spin_unlock(&rdi->n_qps_lock);
1275 		ret = ERR_PTR(-ENOMEM);
1276 		goto bail_ip;
1277 	}
1278 
1279 	rdi->n_qps_allocated++;
1280 	/*
1281 	 * Maintain a busy_jiffies variable that will be added to the timeout
1282 	 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1283 	 * is scaled by the number of rc qps created for the device to reduce
1284 	 * the number of timeouts occurring when there is a large number of
1285 	 * qps. busy_jiffies is incremented every rc qp scaling interval.
1286 	 * The scaling interval is selected based on extensive performance
1287 	 * evaluation of targeted workloads.
1288 	 */
1289 	if (init_attr->qp_type == IB_QPT_RC) {
1290 		rdi->n_rc_qps++;
1291 		rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1292 	}
1293 	spin_unlock(&rdi->n_qps_lock);
1294 
1295 	if (qp->ip) {
1296 		spin_lock_irq(&rdi->pending_lock);
1297 		list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1298 		spin_unlock_irq(&rdi->pending_lock);
1299 	}
1300 
1301 	ret = &qp->ibqp;
1302 
1303 	return ret;
1304 
1305 bail_ip:
1306 	if (qp->ip)
1307 		kref_put(&qp->ip->ref, rvt_release_mmap_info);
1308 
1309 bail_qpn:
1310 	rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1311 
1312 bail_rq_wq:
1313 	free_ud_wq_attr(qp);
1314 
1315 bail_rq_rvt:
1316 	rvt_free_rq(&qp->r_rq);
1317 
1318 bail_driver_priv:
1319 	rdi->driver_f.qp_priv_free(rdi, qp);
1320 
1321 bail_qp:
1322 	kfree(qp->s_ack_queue);
1323 	kfree(qp);
1324 
1325 bail_swq:
1326 	vfree(swq);
1327 
1328 	return ret;
1329 }
1330 
1331 /**
1332  * rvt_error_qp - put a QP into the error state
1333  * @qp: the QP to put into the error state
1334  * @err: the receive completion error to signal if a RWQE is active
1335  *
1336  * Flushes both send and receive work queues.
1337  *
1338  * Return: true if last WQE event should be generated.
1339  * The QP r_lock and s_lock should be held and interrupts disabled.
1340  * If we are already in error state, just return.
1341  */
rvt_error_qp(struct rvt_qp * qp,enum ib_wc_status err)1342 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1343 {
1344 	struct ib_wc wc;
1345 	int ret = 0;
1346 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1347 
1348 	lockdep_assert_held(&qp->r_lock);
1349 	lockdep_assert_held(&qp->s_lock);
1350 	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1351 		goto bail;
1352 
1353 	qp->state = IB_QPS_ERR;
1354 
1355 	if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1356 		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1357 		del_timer(&qp->s_timer);
1358 	}
1359 
1360 	if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1361 		qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1362 
1363 	rdi->driver_f.notify_error_qp(qp);
1364 
1365 	/* Schedule the sending tasklet to drain the send work queue. */
1366 	if (READ_ONCE(qp->s_last) != qp->s_head)
1367 		rdi->driver_f.schedule_send(qp);
1368 
1369 	rvt_clear_mr_refs(qp, 0);
1370 
1371 	memset(&wc, 0, sizeof(wc));
1372 	wc.qp = &qp->ibqp;
1373 	wc.opcode = IB_WC_RECV;
1374 
1375 	if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1376 		wc.wr_id = qp->r_wr_id;
1377 		wc.status = err;
1378 		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1379 	}
1380 	wc.status = IB_WC_WR_FLUSH_ERR;
1381 
1382 	if (qp->r_rq.kwq) {
1383 		u32 head;
1384 		u32 tail;
1385 		struct rvt_rwq *wq = NULL;
1386 		struct rvt_krwq *kwq = NULL;
1387 
1388 		spin_lock(&qp->r_rq.kwq->c_lock);
1389 		/* qp->ip used to validate if there is a  user buffer mmaped */
1390 		if (qp->ip) {
1391 			wq = qp->r_rq.wq;
1392 			head = RDMA_READ_UAPI_ATOMIC(wq->head);
1393 			tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
1394 		} else {
1395 			kwq = qp->r_rq.kwq;
1396 			head = kwq->head;
1397 			tail = kwq->tail;
1398 		}
1399 		/* sanity check pointers before trusting them */
1400 		if (head >= qp->r_rq.size)
1401 			head = 0;
1402 		if (tail >= qp->r_rq.size)
1403 			tail = 0;
1404 		while (tail != head) {
1405 			wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1406 			if (++tail >= qp->r_rq.size)
1407 				tail = 0;
1408 			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1409 		}
1410 		if (qp->ip)
1411 			RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
1412 		else
1413 			kwq->tail = tail;
1414 		spin_unlock(&qp->r_rq.kwq->c_lock);
1415 	} else if (qp->ibqp.event_handler) {
1416 		ret = 1;
1417 	}
1418 
1419 bail:
1420 	return ret;
1421 }
1422 EXPORT_SYMBOL(rvt_error_qp);
1423 
1424 /*
1425  * Put the QP into the hash table.
1426  * The hash table holds a reference to the QP.
1427  */
rvt_insert_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)1428 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1429 {
1430 	struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1431 	unsigned long flags;
1432 
1433 	rvt_get_qp(qp);
1434 	spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1435 
1436 	if (qp->ibqp.qp_num <= 1) {
1437 		rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1438 	} else {
1439 		u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1440 
1441 		qp->next = rdi->qp_dev->qp_table[n];
1442 		rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1443 		trace_rvt_qpinsert(qp, n);
1444 	}
1445 
1446 	spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1447 }
1448 
1449 /**
1450  * rvt_modify_qp - modify the attributes of a queue pair
1451  * @ibqp: the queue pair who's attributes we're modifying
1452  * @attr: the new attributes
1453  * @attr_mask: the mask of attributes to modify
1454  * @udata: user data for libibverbs.so
1455  *
1456  * Return: 0 on success, otherwise returns an errno.
1457  */
rvt_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1458 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1459 		  int attr_mask, struct ib_udata *udata)
1460 {
1461 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1462 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1463 	enum ib_qp_state cur_state, new_state;
1464 	struct ib_event ev;
1465 	int lastwqe = 0;
1466 	int mig = 0;
1467 	int pmtu = 0; /* for gcc warning only */
1468 	int opa_ah;
1469 
1470 	spin_lock_irq(&qp->r_lock);
1471 	spin_lock(&qp->s_hlock);
1472 	spin_lock(&qp->s_lock);
1473 
1474 	cur_state = attr_mask & IB_QP_CUR_STATE ?
1475 		attr->cur_qp_state : qp->state;
1476 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1477 	opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1478 
1479 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1480 				attr_mask))
1481 		goto inval;
1482 
1483 	if (rdi->driver_f.check_modify_qp &&
1484 	    rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1485 		goto inval;
1486 
1487 	if (attr_mask & IB_QP_AV) {
1488 		if (opa_ah) {
1489 			if (rdma_ah_get_dlid(&attr->ah_attr) >=
1490 				opa_get_mcast_base(OPA_MCAST_NR))
1491 				goto inval;
1492 		} else {
1493 			if (rdma_ah_get_dlid(&attr->ah_attr) >=
1494 				be16_to_cpu(IB_MULTICAST_LID_BASE))
1495 				goto inval;
1496 		}
1497 
1498 		if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1499 			goto inval;
1500 	}
1501 
1502 	if (attr_mask & IB_QP_ALT_PATH) {
1503 		if (opa_ah) {
1504 			if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1505 				opa_get_mcast_base(OPA_MCAST_NR))
1506 				goto inval;
1507 		} else {
1508 			if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1509 				be16_to_cpu(IB_MULTICAST_LID_BASE))
1510 				goto inval;
1511 		}
1512 
1513 		if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1514 			goto inval;
1515 		if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1516 			goto inval;
1517 	}
1518 
1519 	if (attr_mask & IB_QP_PKEY_INDEX)
1520 		if (attr->pkey_index >= rvt_get_npkeys(rdi))
1521 			goto inval;
1522 
1523 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
1524 		if (attr->min_rnr_timer > 31)
1525 			goto inval;
1526 
1527 	if (attr_mask & IB_QP_PORT)
1528 		if (qp->ibqp.qp_type == IB_QPT_SMI ||
1529 		    qp->ibqp.qp_type == IB_QPT_GSI ||
1530 		    attr->port_num == 0 ||
1531 		    attr->port_num > ibqp->device->phys_port_cnt)
1532 			goto inval;
1533 
1534 	if (attr_mask & IB_QP_DEST_QPN)
1535 		if (attr->dest_qp_num > RVT_QPN_MASK)
1536 			goto inval;
1537 
1538 	if (attr_mask & IB_QP_RETRY_CNT)
1539 		if (attr->retry_cnt > 7)
1540 			goto inval;
1541 
1542 	if (attr_mask & IB_QP_RNR_RETRY)
1543 		if (attr->rnr_retry > 7)
1544 			goto inval;
1545 
1546 	/*
1547 	 * Don't allow invalid path_mtu values.  OK to set greater
1548 	 * than the active mtu (or even the max_cap, if we have tuned
1549 	 * that to a small mtu.  We'll set qp->path_mtu
1550 	 * to the lesser of requested attribute mtu and active,
1551 	 * for packetizing messages.
1552 	 * Note that the QP port has to be set in INIT and MTU in RTR.
1553 	 */
1554 	if (attr_mask & IB_QP_PATH_MTU) {
1555 		pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1556 		if (pmtu < 0)
1557 			goto inval;
1558 	}
1559 
1560 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
1561 		if (attr->path_mig_state == IB_MIG_REARM) {
1562 			if (qp->s_mig_state == IB_MIG_ARMED)
1563 				goto inval;
1564 			if (new_state != IB_QPS_RTS)
1565 				goto inval;
1566 		} else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1567 			if (qp->s_mig_state == IB_MIG_REARM)
1568 				goto inval;
1569 			if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1570 				goto inval;
1571 			if (qp->s_mig_state == IB_MIG_ARMED)
1572 				mig = 1;
1573 		} else {
1574 			goto inval;
1575 		}
1576 	}
1577 
1578 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1579 		if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1580 			goto inval;
1581 
1582 	switch (new_state) {
1583 	case IB_QPS_RESET:
1584 		if (qp->state != IB_QPS_RESET)
1585 			_rvt_reset_qp(rdi, qp, ibqp->qp_type);
1586 		break;
1587 
1588 	case IB_QPS_RTR:
1589 		/* Allow event to re-trigger if QP set to RTR more than once */
1590 		qp->r_flags &= ~RVT_R_COMM_EST;
1591 		qp->state = new_state;
1592 		break;
1593 
1594 	case IB_QPS_SQD:
1595 		qp->s_draining = qp->s_last != qp->s_cur;
1596 		qp->state = new_state;
1597 		break;
1598 
1599 	case IB_QPS_SQE:
1600 		if (qp->ibqp.qp_type == IB_QPT_RC)
1601 			goto inval;
1602 		qp->state = new_state;
1603 		break;
1604 
1605 	case IB_QPS_ERR:
1606 		lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1607 		break;
1608 
1609 	default:
1610 		qp->state = new_state;
1611 		break;
1612 	}
1613 
1614 	if (attr_mask & IB_QP_PKEY_INDEX)
1615 		qp->s_pkey_index = attr->pkey_index;
1616 
1617 	if (attr_mask & IB_QP_PORT)
1618 		qp->port_num = attr->port_num;
1619 
1620 	if (attr_mask & IB_QP_DEST_QPN)
1621 		qp->remote_qpn = attr->dest_qp_num;
1622 
1623 	if (attr_mask & IB_QP_SQ_PSN) {
1624 		qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1625 		qp->s_psn = qp->s_next_psn;
1626 		qp->s_sending_psn = qp->s_next_psn;
1627 		qp->s_last_psn = qp->s_next_psn - 1;
1628 		qp->s_sending_hpsn = qp->s_last_psn;
1629 	}
1630 
1631 	if (attr_mask & IB_QP_RQ_PSN)
1632 		qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1633 
1634 	if (attr_mask & IB_QP_ACCESS_FLAGS)
1635 		qp->qp_access_flags = attr->qp_access_flags;
1636 
1637 	if (attr_mask & IB_QP_AV) {
1638 		rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1639 		qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1640 		qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1641 	}
1642 
1643 	if (attr_mask & IB_QP_ALT_PATH) {
1644 		rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1645 		qp->s_alt_pkey_index = attr->alt_pkey_index;
1646 	}
1647 
1648 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
1649 		qp->s_mig_state = attr->path_mig_state;
1650 		if (mig) {
1651 			qp->remote_ah_attr = qp->alt_ah_attr;
1652 			qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1653 			qp->s_pkey_index = qp->s_alt_pkey_index;
1654 		}
1655 	}
1656 
1657 	if (attr_mask & IB_QP_PATH_MTU) {
1658 		qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1659 		qp->log_pmtu = ilog2(qp->pmtu);
1660 	}
1661 
1662 	if (attr_mask & IB_QP_RETRY_CNT) {
1663 		qp->s_retry_cnt = attr->retry_cnt;
1664 		qp->s_retry = attr->retry_cnt;
1665 	}
1666 
1667 	if (attr_mask & IB_QP_RNR_RETRY) {
1668 		qp->s_rnr_retry_cnt = attr->rnr_retry;
1669 		qp->s_rnr_retry = attr->rnr_retry;
1670 	}
1671 
1672 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
1673 		qp->r_min_rnr_timer = attr->min_rnr_timer;
1674 
1675 	if (attr_mask & IB_QP_TIMEOUT) {
1676 		qp->timeout = attr->timeout;
1677 		qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1678 	}
1679 
1680 	if (attr_mask & IB_QP_QKEY)
1681 		qp->qkey = attr->qkey;
1682 
1683 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1684 		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1685 
1686 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1687 		qp->s_max_rd_atomic = attr->max_rd_atomic;
1688 
1689 	if (rdi->driver_f.modify_qp)
1690 		rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1691 
1692 	spin_unlock(&qp->s_lock);
1693 	spin_unlock(&qp->s_hlock);
1694 	spin_unlock_irq(&qp->r_lock);
1695 
1696 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1697 		rvt_insert_qp(rdi, qp);
1698 
1699 	if (lastwqe) {
1700 		ev.device = qp->ibqp.device;
1701 		ev.element.qp = &qp->ibqp;
1702 		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1703 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1704 	}
1705 	if (mig) {
1706 		ev.device = qp->ibqp.device;
1707 		ev.element.qp = &qp->ibqp;
1708 		ev.event = IB_EVENT_PATH_MIG;
1709 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1710 	}
1711 	return 0;
1712 
1713 inval:
1714 	spin_unlock(&qp->s_lock);
1715 	spin_unlock(&qp->s_hlock);
1716 	spin_unlock_irq(&qp->r_lock);
1717 	return -EINVAL;
1718 }
1719 
1720 /**
1721  * rvt_destroy_qp - destroy a queue pair
1722  * @ibqp: the queue pair to destroy
1723  *
1724  * Note that this can be called while the QP is actively sending or
1725  * receiving!
1726  *
1727  * Return: 0 on success.
1728  */
rvt_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)1729 int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1730 {
1731 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1732 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1733 
1734 	rvt_reset_qp(rdi, qp, ibqp->qp_type);
1735 
1736 	wait_event(qp->wait, !atomic_read(&qp->refcount));
1737 	/* qpn is now available for use again */
1738 	rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1739 
1740 	spin_lock(&rdi->n_qps_lock);
1741 	rdi->n_qps_allocated--;
1742 	if (qp->ibqp.qp_type == IB_QPT_RC) {
1743 		rdi->n_rc_qps--;
1744 		rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1745 	}
1746 	spin_unlock(&rdi->n_qps_lock);
1747 
1748 	if (qp->ip)
1749 		kref_put(&qp->ip->ref, rvt_release_mmap_info);
1750 	kvfree(qp->r_rq.kwq);
1751 	rdi->driver_f.qp_priv_free(rdi, qp);
1752 	kfree(qp->s_ack_queue);
1753 	rdma_destroy_ah_attr(&qp->remote_ah_attr);
1754 	rdma_destroy_ah_attr(&qp->alt_ah_attr);
1755 	free_ud_wq_attr(qp);
1756 	vfree(qp->s_wq);
1757 	kfree(qp);
1758 	return 0;
1759 }
1760 
1761 /**
1762  * rvt_query_qp - query an ipbq
1763  * @ibqp: IB qp to query
1764  * @attr: attr struct to fill in
1765  * @attr_mask: attr mask ignored
1766  * @init_attr: struct to fill in
1767  *
1768  * Return: always 0
1769  */
rvt_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)1770 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1771 		 int attr_mask, struct ib_qp_init_attr *init_attr)
1772 {
1773 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1774 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1775 
1776 	attr->qp_state = qp->state;
1777 	attr->cur_qp_state = attr->qp_state;
1778 	attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1779 	attr->path_mig_state = qp->s_mig_state;
1780 	attr->qkey = qp->qkey;
1781 	attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1782 	attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1783 	attr->dest_qp_num = qp->remote_qpn;
1784 	attr->qp_access_flags = qp->qp_access_flags;
1785 	attr->cap.max_send_wr = qp->s_size - 1 -
1786 		rdi->dparms.reserved_operations;
1787 	attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1788 	attr->cap.max_send_sge = qp->s_max_sge;
1789 	attr->cap.max_recv_sge = qp->r_rq.max_sge;
1790 	attr->cap.max_inline_data = 0;
1791 	attr->ah_attr = qp->remote_ah_attr;
1792 	attr->alt_ah_attr = qp->alt_ah_attr;
1793 	attr->pkey_index = qp->s_pkey_index;
1794 	attr->alt_pkey_index = qp->s_alt_pkey_index;
1795 	attr->en_sqd_async_notify = 0;
1796 	attr->sq_draining = qp->s_draining;
1797 	attr->max_rd_atomic = qp->s_max_rd_atomic;
1798 	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1799 	attr->min_rnr_timer = qp->r_min_rnr_timer;
1800 	attr->port_num = qp->port_num;
1801 	attr->timeout = qp->timeout;
1802 	attr->retry_cnt = qp->s_retry_cnt;
1803 	attr->rnr_retry = qp->s_rnr_retry_cnt;
1804 	attr->alt_port_num =
1805 		rdma_ah_get_port_num(&qp->alt_ah_attr);
1806 	attr->alt_timeout = qp->alt_timeout;
1807 
1808 	init_attr->event_handler = qp->ibqp.event_handler;
1809 	init_attr->qp_context = qp->ibqp.qp_context;
1810 	init_attr->send_cq = qp->ibqp.send_cq;
1811 	init_attr->recv_cq = qp->ibqp.recv_cq;
1812 	init_attr->srq = qp->ibqp.srq;
1813 	init_attr->cap = attr->cap;
1814 	if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1815 		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1816 	else
1817 		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1818 	init_attr->qp_type = qp->ibqp.qp_type;
1819 	init_attr->port_num = qp->port_num;
1820 	return 0;
1821 }
1822 
1823 /**
1824  * rvt_post_receive - post a receive on a QP
1825  * @ibqp: the QP to post the receive on
1826  * @wr: the WR to post
1827  * @bad_wr: the first bad WR is put here
1828  *
1829  * This may be called from interrupt context.
1830  *
1831  * Return: 0 on success otherwise errno
1832  */
rvt_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1833 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1834 		  const struct ib_recv_wr **bad_wr)
1835 {
1836 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1837 	struct rvt_krwq *wq = qp->r_rq.kwq;
1838 	unsigned long flags;
1839 	int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1840 				!qp->ibqp.srq;
1841 
1842 	/* Check that state is OK to post receive. */
1843 	if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1844 		*bad_wr = wr;
1845 		return -EINVAL;
1846 	}
1847 
1848 	for (; wr; wr = wr->next) {
1849 		struct rvt_rwqe *wqe;
1850 		u32 next;
1851 		int i;
1852 
1853 		if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1854 			*bad_wr = wr;
1855 			return -EINVAL;
1856 		}
1857 
1858 		spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
1859 		next = wq->head + 1;
1860 		if (next >= qp->r_rq.size)
1861 			next = 0;
1862 		if (next == READ_ONCE(wq->tail)) {
1863 			spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1864 			*bad_wr = wr;
1865 			return -ENOMEM;
1866 		}
1867 		if (unlikely(qp_err_flush)) {
1868 			struct ib_wc wc;
1869 
1870 			memset(&wc, 0, sizeof(wc));
1871 			wc.qp = &qp->ibqp;
1872 			wc.opcode = IB_WC_RECV;
1873 			wc.wr_id = wr->wr_id;
1874 			wc.status = IB_WC_WR_FLUSH_ERR;
1875 			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1876 		} else {
1877 			wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1878 			wqe->wr_id = wr->wr_id;
1879 			wqe->num_sge = wr->num_sge;
1880 			for (i = 0; i < wr->num_sge; i++) {
1881 				wqe->sg_list[i].addr = wr->sg_list[i].addr;
1882 				wqe->sg_list[i].length = wr->sg_list[i].length;
1883 				wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
1884 			}
1885 			/*
1886 			 * Make sure queue entry is written
1887 			 * before the head index.
1888 			 */
1889 			smp_store_release(&wq->head, next);
1890 		}
1891 		spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1892 	}
1893 	return 0;
1894 }
1895 
1896 /**
1897  * rvt_qp_valid_operation - validate post send wr request
1898  * @qp - the qp
1899  * @post-parms - the post send table for the driver
1900  * @wr - the work request
1901  *
1902  * The routine validates the operation based on the
1903  * validation table an returns the length of the operation
1904  * which can extend beyond the ib_send_bw.  Operation
1905  * dependent flags key atomic operation validation.
1906  *
1907  * There is an exception for UD qps that validates the pd and
1908  * overrides the length to include the additional UD specific
1909  * length.
1910  *
1911  * Returns a negative error or the length of the work request
1912  * for building the swqe.
1913  */
rvt_qp_valid_operation(struct rvt_qp * qp,const struct rvt_operation_params * post_parms,const struct ib_send_wr * wr)1914 static inline int rvt_qp_valid_operation(
1915 	struct rvt_qp *qp,
1916 	const struct rvt_operation_params *post_parms,
1917 	const struct ib_send_wr *wr)
1918 {
1919 	int len;
1920 
1921 	if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1922 		return -EINVAL;
1923 	if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1924 		return -EINVAL;
1925 	if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1926 	    ibpd_to_rvtpd(qp->ibqp.pd)->user)
1927 		return -EINVAL;
1928 	if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1929 	    (wr->num_sge == 0 ||
1930 	     wr->sg_list[0].length < sizeof(u64) ||
1931 	     wr->sg_list[0].addr & (sizeof(u64) - 1)))
1932 		return -EINVAL;
1933 	if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1934 	    !qp->s_max_rd_atomic)
1935 		return -EINVAL;
1936 	len = post_parms[wr->opcode].length;
1937 	/* UD specific */
1938 	if (qp->ibqp.qp_type != IB_QPT_UC &&
1939 	    qp->ibqp.qp_type != IB_QPT_RC) {
1940 		if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1941 			return -EINVAL;
1942 		len = sizeof(struct ib_ud_wr);
1943 	}
1944 	return len;
1945 }
1946 
1947 /**
1948  * rvt_qp_is_avail - determine queue capacity
1949  * @qp: the qp
1950  * @rdi: the rdmavt device
1951  * @reserved_op: is reserved operation
1952  *
1953  * This assumes the s_hlock is held but the s_last
1954  * qp variable is uncontrolled.
1955  *
1956  * For non reserved operations, the qp->s_avail
1957  * may be changed.
1958  *
1959  * The return value is zero or a -ENOMEM.
1960  */
rvt_qp_is_avail(struct rvt_qp * qp,struct rvt_dev_info * rdi,bool reserved_op)1961 static inline int rvt_qp_is_avail(
1962 	struct rvt_qp *qp,
1963 	struct rvt_dev_info *rdi,
1964 	bool reserved_op)
1965 {
1966 	u32 slast;
1967 	u32 avail;
1968 	u32 reserved_used;
1969 
1970 	/* see rvt_qp_wqe_unreserve() */
1971 	smp_mb__before_atomic();
1972 	if (unlikely(reserved_op)) {
1973 		/* see rvt_qp_wqe_unreserve() */
1974 		reserved_used = atomic_read(&qp->s_reserved_used);
1975 		if (reserved_used >= rdi->dparms.reserved_operations)
1976 			return -ENOMEM;
1977 		return 0;
1978 	}
1979 	/* non-reserved operations */
1980 	if (likely(qp->s_avail))
1981 		return 0;
1982 	/* See rvt_qp_complete_swqe() */
1983 	slast = smp_load_acquire(&qp->s_last);
1984 	if (qp->s_head >= slast)
1985 		avail = qp->s_size - (qp->s_head - slast);
1986 	else
1987 		avail = slast - qp->s_head;
1988 
1989 	reserved_used = atomic_read(&qp->s_reserved_used);
1990 	avail =  avail - 1 -
1991 		(rdi->dparms.reserved_operations - reserved_used);
1992 	/* insure we don't assign a negative s_avail */
1993 	if ((s32)avail <= 0)
1994 		return -ENOMEM;
1995 	qp->s_avail = avail;
1996 	if (WARN_ON(qp->s_avail >
1997 		    (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1998 		rvt_pr_err(rdi,
1999 			   "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
2000 			   qp->ibqp.qp_num, qp->s_size, qp->s_avail,
2001 			   qp->s_head, qp->s_tail, qp->s_cur,
2002 			   qp->s_acked, qp->s_last);
2003 	return 0;
2004 }
2005 
2006 /**
2007  * rvt_post_one_wr - post one RC, UC, or UD send work request
2008  * @qp: the QP to post on
2009  * @wr: the work request to send
2010  */
rvt_post_one_wr(struct rvt_qp * qp,const struct ib_send_wr * wr,bool * call_send)2011 static int rvt_post_one_wr(struct rvt_qp *qp,
2012 			   const struct ib_send_wr *wr,
2013 			   bool *call_send)
2014 {
2015 	struct rvt_swqe *wqe;
2016 	u32 next;
2017 	int i;
2018 	int j;
2019 	int acc;
2020 	struct rvt_lkey_table *rkt;
2021 	struct rvt_pd *pd;
2022 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2023 	u8 log_pmtu;
2024 	int ret;
2025 	size_t cplen;
2026 	bool reserved_op;
2027 	int local_ops_delayed = 0;
2028 
2029 	BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
2030 
2031 	/* IB spec says that num_sge == 0 is OK. */
2032 	if (unlikely(wr->num_sge > qp->s_max_sge))
2033 		return -EINVAL;
2034 
2035 	ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
2036 	if (ret < 0)
2037 		return ret;
2038 	cplen = ret;
2039 
2040 	/*
2041 	 * Local operations include fast register and local invalidate.
2042 	 * Fast register needs to be processed immediately because the
2043 	 * registered lkey may be used by following work requests and the
2044 	 * lkey needs to be valid at the time those requests are posted.
2045 	 * Local invalidate can be processed immediately if fencing is
2046 	 * not required and no previous local invalidate ops are pending.
2047 	 * Signaled local operations that have been processed immediately
2048 	 * need to have requests with "completion only" flags set posted
2049 	 * to the send queue in order to generate completions.
2050 	 */
2051 	if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
2052 		switch (wr->opcode) {
2053 		case IB_WR_REG_MR:
2054 			ret = rvt_fast_reg_mr(qp,
2055 					      reg_wr(wr)->mr,
2056 					      reg_wr(wr)->key,
2057 					      reg_wr(wr)->access);
2058 			if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2059 				return ret;
2060 			break;
2061 		case IB_WR_LOCAL_INV:
2062 			if ((wr->send_flags & IB_SEND_FENCE) ||
2063 			    atomic_read(&qp->local_ops_pending)) {
2064 				local_ops_delayed = 1;
2065 			} else {
2066 				ret = rvt_invalidate_rkey(
2067 					qp, wr->ex.invalidate_rkey);
2068 				if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2069 					return ret;
2070 			}
2071 			break;
2072 		default:
2073 			return -EINVAL;
2074 		}
2075 	}
2076 
2077 	reserved_op = rdi->post_parms[wr->opcode].flags &
2078 			RVT_OPERATION_USE_RESERVE;
2079 	/* check for avail */
2080 	ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2081 	if (ret)
2082 		return ret;
2083 	next = qp->s_head + 1;
2084 	if (next >= qp->s_size)
2085 		next = 0;
2086 
2087 	rkt = &rdi->lkey_table;
2088 	pd = ibpd_to_rvtpd(qp->ibqp.pd);
2089 	wqe = rvt_get_swqe_ptr(qp, qp->s_head);
2090 
2091 	/* cplen has length from above */
2092 	memcpy(&wqe->wr, wr, cplen);
2093 
2094 	wqe->length = 0;
2095 	j = 0;
2096 	if (wr->num_sge) {
2097 		struct rvt_sge *last_sge = NULL;
2098 
2099 		acc = wr->opcode >= IB_WR_RDMA_READ ?
2100 			IB_ACCESS_LOCAL_WRITE : 0;
2101 		for (i = 0; i < wr->num_sge; i++) {
2102 			u32 length = wr->sg_list[i].length;
2103 
2104 			if (length == 0)
2105 				continue;
2106 			ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
2107 					  &wr->sg_list[i], acc);
2108 			if (unlikely(ret < 0))
2109 				goto bail_inval_free;
2110 			wqe->length += length;
2111 			if (ret)
2112 				last_sge = &wqe->sg_list[j];
2113 			j += ret;
2114 		}
2115 		wqe->wr.num_sge = j;
2116 	}
2117 
2118 	/*
2119 	 * Calculate and set SWQE PSN values prior to handing it off
2120 	 * to the driver's check routine. This give the driver the
2121 	 * opportunity to adjust PSN values based on internal checks.
2122 	 */
2123 	log_pmtu = qp->log_pmtu;
2124 	if (qp->allowed_ops == IB_OPCODE_UD) {
2125 		struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
2126 
2127 		log_pmtu = ah->log_pmtu;
2128 		rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
2129 	}
2130 
2131 	if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2132 		if (local_ops_delayed)
2133 			atomic_inc(&qp->local_ops_pending);
2134 		else
2135 			wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2136 		wqe->ssn = 0;
2137 		wqe->psn = 0;
2138 		wqe->lpsn = 0;
2139 	} else {
2140 		wqe->ssn = qp->s_ssn++;
2141 		wqe->psn = qp->s_next_psn;
2142 		wqe->lpsn = wqe->psn +
2143 				(wqe->length ?
2144 					((wqe->length - 1) >> log_pmtu) :
2145 					0);
2146 	}
2147 
2148 	/* general part of wqe valid - allow for driver checks */
2149 	if (rdi->driver_f.setup_wqe) {
2150 		ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2151 		if (ret < 0)
2152 			goto bail_inval_free_ref;
2153 	}
2154 
2155 	if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2156 		qp->s_next_psn = wqe->lpsn + 1;
2157 
2158 	if (unlikely(reserved_op)) {
2159 		wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2160 		rvt_qp_wqe_reserve(qp, wqe);
2161 	} else {
2162 		wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2163 		qp->s_avail--;
2164 	}
2165 	trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2166 	smp_wmb(); /* see request builders */
2167 	qp->s_head = next;
2168 
2169 	return 0;
2170 
2171 bail_inval_free_ref:
2172 	if (qp->allowed_ops == IB_OPCODE_UD)
2173 		rdma_destroy_ah_attr(wqe->ud_wr.attr);
2174 bail_inval_free:
2175 	/* release mr holds */
2176 	while (j) {
2177 		struct rvt_sge *sge = &wqe->sg_list[--j];
2178 
2179 		rvt_put_mr(sge->mr);
2180 	}
2181 	return ret;
2182 }
2183 
2184 /**
2185  * rvt_post_send - post a send on a QP
2186  * @ibqp: the QP to post the send on
2187  * @wr: the list of work requests to post
2188  * @bad_wr: the first bad WR is put here
2189  *
2190  * This may be called from interrupt context.
2191  *
2192  * Return: 0 on success else errno
2193  */
rvt_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2194 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2195 		  const struct ib_send_wr **bad_wr)
2196 {
2197 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2198 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2199 	unsigned long flags = 0;
2200 	bool call_send;
2201 	unsigned nreq = 0;
2202 	int err = 0;
2203 
2204 	spin_lock_irqsave(&qp->s_hlock, flags);
2205 
2206 	/*
2207 	 * Ensure QP state is such that we can send. If not bail out early,
2208 	 * there is no need to do this every time we post a send.
2209 	 */
2210 	if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2211 		spin_unlock_irqrestore(&qp->s_hlock, flags);
2212 		return -EINVAL;
2213 	}
2214 
2215 	/*
2216 	 * If the send queue is empty, and we only have a single WR then just go
2217 	 * ahead and kick the send engine into gear. Otherwise we will always
2218 	 * just schedule the send to happen later.
2219 	 */
2220 	call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2221 
2222 	for (; wr; wr = wr->next) {
2223 		err = rvt_post_one_wr(qp, wr, &call_send);
2224 		if (unlikely(err)) {
2225 			*bad_wr = wr;
2226 			goto bail;
2227 		}
2228 		nreq++;
2229 	}
2230 bail:
2231 	spin_unlock_irqrestore(&qp->s_hlock, flags);
2232 	if (nreq) {
2233 		/*
2234 		 * Only call do_send if there is exactly one packet, and the
2235 		 * driver said it was ok.
2236 		 */
2237 		if (nreq == 1 && call_send)
2238 			rdi->driver_f.do_send(qp);
2239 		else
2240 			rdi->driver_f.schedule_send_no_lock(qp);
2241 	}
2242 	return err;
2243 }
2244 
2245 /**
2246  * rvt_post_srq_receive - post a receive on a shared receive queue
2247  * @ibsrq: the SRQ to post the receive on
2248  * @wr: the list of work requests to post
2249  * @bad_wr: A pointer to the first WR to cause a problem is put here
2250  *
2251  * This may be called from interrupt context.
2252  *
2253  * Return: 0 on success else errno
2254  */
rvt_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2255 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2256 		      const struct ib_recv_wr **bad_wr)
2257 {
2258 	struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2259 	struct rvt_krwq *wq;
2260 	unsigned long flags;
2261 
2262 	for (; wr; wr = wr->next) {
2263 		struct rvt_rwqe *wqe;
2264 		u32 next;
2265 		int i;
2266 
2267 		if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2268 			*bad_wr = wr;
2269 			return -EINVAL;
2270 		}
2271 
2272 		spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
2273 		wq = srq->rq.kwq;
2274 		next = wq->head + 1;
2275 		if (next >= srq->rq.size)
2276 			next = 0;
2277 		if (next == READ_ONCE(wq->tail)) {
2278 			spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2279 			*bad_wr = wr;
2280 			return -ENOMEM;
2281 		}
2282 
2283 		wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2284 		wqe->wr_id = wr->wr_id;
2285 		wqe->num_sge = wr->num_sge;
2286 		for (i = 0; i < wr->num_sge; i++) {
2287 			wqe->sg_list[i].addr = wr->sg_list[i].addr;
2288 			wqe->sg_list[i].length = wr->sg_list[i].length;
2289 			wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
2290 		}
2291 		/* Make sure queue entry is written before the head index. */
2292 		smp_store_release(&wq->head, next);
2293 		spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2294 	}
2295 	return 0;
2296 }
2297 
2298 /*
2299  * rvt used the internal kernel struct as part of its ABI, for now make sure
2300  * the kernel struct does not change layout. FIXME: rvt should never cast the
2301  * user struct to a kernel struct.
2302  */
rvt_cast_sge(struct rvt_wqe_sge * sge)2303 static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
2304 {
2305 	BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
2306 		     offsetof(struct rvt_wqe_sge, addr));
2307 	BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
2308 		     offsetof(struct rvt_wqe_sge, length));
2309 	BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
2310 		     offsetof(struct rvt_wqe_sge, lkey));
2311 	return (struct ib_sge *)sge;
2312 }
2313 
2314 /*
2315  * Validate a RWQE and fill in the SGE state.
2316  * Return 1 if OK.
2317  */
init_sge(struct rvt_qp * qp,struct rvt_rwqe * wqe)2318 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2319 {
2320 	int i, j, ret;
2321 	struct ib_wc wc;
2322 	struct rvt_lkey_table *rkt;
2323 	struct rvt_pd *pd;
2324 	struct rvt_sge_state *ss;
2325 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2326 
2327 	rkt = &rdi->lkey_table;
2328 	pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2329 	ss = &qp->r_sge;
2330 	ss->sg_list = qp->r_sg_list;
2331 	qp->r_len = 0;
2332 	for (i = j = 0; i < wqe->num_sge; i++) {
2333 		if (wqe->sg_list[i].length == 0)
2334 			continue;
2335 		/* Check LKEY */
2336 		ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2337 				  NULL, rvt_cast_sge(&wqe->sg_list[i]),
2338 				  IB_ACCESS_LOCAL_WRITE);
2339 		if (unlikely(ret <= 0))
2340 			goto bad_lkey;
2341 		qp->r_len += wqe->sg_list[i].length;
2342 		j++;
2343 	}
2344 	ss->num_sge = j;
2345 	ss->total_len = qp->r_len;
2346 	return 1;
2347 
2348 bad_lkey:
2349 	while (j) {
2350 		struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2351 
2352 		rvt_put_mr(sge->mr);
2353 	}
2354 	ss->num_sge = 0;
2355 	memset(&wc, 0, sizeof(wc));
2356 	wc.wr_id = wqe->wr_id;
2357 	wc.status = IB_WC_LOC_PROT_ERR;
2358 	wc.opcode = IB_WC_RECV;
2359 	wc.qp = &qp->ibqp;
2360 	/* Signal solicited completion event. */
2361 	rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2362 	return 0;
2363 }
2364 
2365 /**
2366  * get_rvt_head - get head indices of the circular buffer
2367  * @rq: data structure for request queue entry
2368  * @ip: the QP
2369  *
2370  * Return - head index value
2371  */
get_rvt_head(struct rvt_rq * rq,void * ip)2372 static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
2373 {
2374 	u32 head;
2375 
2376 	if (ip)
2377 		head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
2378 	else
2379 		head = rq->kwq->head;
2380 
2381 	return head;
2382 }
2383 
2384 /**
2385  * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2386  * @qp: the QP
2387  * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2388  *
2389  * Return -1 if there is a local error, 0 if no RWQE is available,
2390  * otherwise return 1.
2391  *
2392  * Can be called from interrupt level.
2393  */
rvt_get_rwqe(struct rvt_qp * qp,bool wr_id_only)2394 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2395 {
2396 	unsigned long flags;
2397 	struct rvt_rq *rq;
2398 	struct rvt_krwq *kwq = NULL;
2399 	struct rvt_rwq *wq;
2400 	struct rvt_srq *srq;
2401 	struct rvt_rwqe *wqe;
2402 	void (*handler)(struct ib_event *, void *);
2403 	u32 tail;
2404 	u32 head;
2405 	int ret;
2406 	void *ip = NULL;
2407 
2408 	if (qp->ibqp.srq) {
2409 		srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2410 		handler = srq->ibsrq.event_handler;
2411 		rq = &srq->rq;
2412 		ip = srq->ip;
2413 	} else {
2414 		srq = NULL;
2415 		handler = NULL;
2416 		rq = &qp->r_rq;
2417 		ip = qp->ip;
2418 	}
2419 
2420 	spin_lock_irqsave(&rq->kwq->c_lock, flags);
2421 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2422 		ret = 0;
2423 		goto unlock;
2424 	}
2425 	kwq = rq->kwq;
2426 	if (ip) {
2427 		wq = rq->wq;
2428 		tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
2429 	} else {
2430 		tail = kwq->tail;
2431 	}
2432 
2433 	/* Validate tail before using it since it is user writable. */
2434 	if (tail >= rq->size)
2435 		tail = 0;
2436 
2437 	if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
2438 		head = get_rvt_head(rq, ip);
2439 		kwq->count = rvt_get_rq_count(rq, head, tail);
2440 	}
2441 	if (unlikely(kwq->count == 0)) {
2442 		ret = 0;
2443 		goto unlock;
2444 	}
2445 	/* Make sure entry is read after the count is read. */
2446 	smp_rmb();
2447 	wqe = rvt_get_rwqe_ptr(rq, tail);
2448 	/*
2449 	 * Even though we update the tail index in memory, the verbs
2450 	 * consumer is not supposed to post more entries until a
2451 	 * completion is generated.
2452 	 */
2453 	if (++tail >= rq->size)
2454 		tail = 0;
2455 	if (ip)
2456 		RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
2457 	else
2458 		kwq->tail = tail;
2459 	if (!wr_id_only && !init_sge(qp, wqe)) {
2460 		ret = -1;
2461 		goto unlock;
2462 	}
2463 	qp->r_wr_id = wqe->wr_id;
2464 
2465 	kwq->count--;
2466 	ret = 1;
2467 	set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2468 	if (handler) {
2469 		/*
2470 		 * Validate head pointer value and compute
2471 		 * the number of remaining WQEs.
2472 		 */
2473 		if (kwq->count < srq->limit) {
2474 			kwq->count =
2475 				rvt_get_rq_count(rq,
2476 						 get_rvt_head(rq, ip), tail);
2477 			if (kwq->count < srq->limit) {
2478 				struct ib_event ev;
2479 
2480 				srq->limit = 0;
2481 				spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2482 				ev.device = qp->ibqp.device;
2483 				ev.element.srq = qp->ibqp.srq;
2484 				ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2485 				handler(&ev, srq->ibsrq.srq_context);
2486 				goto bail;
2487 			}
2488 		}
2489 	}
2490 unlock:
2491 	spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2492 bail:
2493 	return ret;
2494 }
2495 EXPORT_SYMBOL(rvt_get_rwqe);
2496 
2497 /**
2498  * qp_comm_est - handle trap with QP established
2499  * @qp: the QP
2500  */
rvt_comm_est(struct rvt_qp * qp)2501 void rvt_comm_est(struct rvt_qp *qp)
2502 {
2503 	qp->r_flags |= RVT_R_COMM_EST;
2504 	if (qp->ibqp.event_handler) {
2505 		struct ib_event ev;
2506 
2507 		ev.device = qp->ibqp.device;
2508 		ev.element.qp = &qp->ibqp;
2509 		ev.event = IB_EVENT_COMM_EST;
2510 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2511 	}
2512 }
2513 EXPORT_SYMBOL(rvt_comm_est);
2514 
rvt_rc_error(struct rvt_qp * qp,enum ib_wc_status err)2515 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2516 {
2517 	unsigned long flags;
2518 	int lastwqe;
2519 
2520 	spin_lock_irqsave(&qp->s_lock, flags);
2521 	lastwqe = rvt_error_qp(qp, err);
2522 	spin_unlock_irqrestore(&qp->s_lock, flags);
2523 
2524 	if (lastwqe) {
2525 		struct ib_event ev;
2526 
2527 		ev.device = qp->ibqp.device;
2528 		ev.element.qp = &qp->ibqp;
2529 		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2530 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2531 	}
2532 }
2533 EXPORT_SYMBOL(rvt_rc_error);
2534 
2535 /*
2536  *  rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2537  *  @index - the index
2538  *  return usec from an index into ib_rvt_rnr_table
2539  */
rvt_rnr_tbl_to_usec(u32 index)2540 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2541 {
2542 	return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2543 }
2544 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2545 
rvt_aeth_to_usec(u32 aeth)2546 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2547 {
2548 	return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2549 				  IB_AETH_CREDIT_MASK];
2550 }
2551 
2552 /*
2553  *  rvt_add_retry_timer_ext - add/start a retry timer
2554  *  @qp - the QP
2555  *  @shift - timeout shift to wait for multiple packets
2556  *  add a retry timer on the QP
2557  */
rvt_add_retry_timer_ext(struct rvt_qp * qp,u8 shift)2558 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
2559 {
2560 	struct ib_qp *ibqp = &qp->ibqp;
2561 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2562 
2563 	lockdep_assert_held(&qp->s_lock);
2564 	qp->s_flags |= RVT_S_TIMER;
2565        /* 4.096 usec. * (1 << qp->timeout) */
2566 	qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2567 			      (qp->timeout_jiffies << shift);
2568 	add_timer(&qp->s_timer);
2569 }
2570 EXPORT_SYMBOL(rvt_add_retry_timer_ext);
2571 
2572 /**
2573  * rvt_add_rnr_timer - add/start an rnr timer on the QP
2574  * @qp: the QP
2575  * @aeth: aeth of RNR timeout, simulated aeth for loopback
2576  */
rvt_add_rnr_timer(struct rvt_qp * qp,u32 aeth)2577 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2578 {
2579 	u32 to;
2580 
2581 	lockdep_assert_held(&qp->s_lock);
2582 	qp->s_flags |= RVT_S_WAIT_RNR;
2583 	to = rvt_aeth_to_usec(aeth);
2584 	trace_rvt_rnrnak_add(qp, to);
2585 	hrtimer_start(&qp->s_rnr_timer,
2586 		      ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2587 }
2588 EXPORT_SYMBOL(rvt_add_rnr_timer);
2589 
2590 /**
2591  * rvt_stop_rc_timers - stop all timers
2592  * @qp: the QP
2593  * stop any pending timers
2594  */
rvt_stop_rc_timers(struct rvt_qp * qp)2595 void rvt_stop_rc_timers(struct rvt_qp *qp)
2596 {
2597 	lockdep_assert_held(&qp->s_lock);
2598 	/* Remove QP from all timers */
2599 	if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2600 		qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2601 		del_timer(&qp->s_timer);
2602 		hrtimer_try_to_cancel(&qp->s_rnr_timer);
2603 	}
2604 }
2605 EXPORT_SYMBOL(rvt_stop_rc_timers);
2606 
2607 /**
2608  * rvt_stop_rnr_timer - stop an rnr timer
2609  * @qp - the QP
2610  *
2611  * stop an rnr timer and return if the timer
2612  * had been pending.
2613  */
rvt_stop_rnr_timer(struct rvt_qp * qp)2614 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2615 {
2616 	lockdep_assert_held(&qp->s_lock);
2617 	/* Remove QP from rnr timer */
2618 	if (qp->s_flags & RVT_S_WAIT_RNR) {
2619 		qp->s_flags &= ~RVT_S_WAIT_RNR;
2620 		trace_rvt_rnrnak_stop(qp, 0);
2621 	}
2622 }
2623 
2624 /**
2625  * rvt_del_timers_sync - wait for any timeout routines to exit
2626  * @qp: the QP
2627  */
rvt_del_timers_sync(struct rvt_qp * qp)2628 void rvt_del_timers_sync(struct rvt_qp *qp)
2629 {
2630 	del_timer_sync(&qp->s_timer);
2631 	hrtimer_cancel(&qp->s_rnr_timer);
2632 }
2633 EXPORT_SYMBOL(rvt_del_timers_sync);
2634 
2635 /*
2636  * This is called from s_timer for missing responses.
2637  */
rvt_rc_timeout(struct timer_list * t)2638 static void rvt_rc_timeout(struct timer_list *t)
2639 {
2640 	struct rvt_qp *qp = from_timer(qp, t, s_timer);
2641 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2642 	unsigned long flags;
2643 
2644 	spin_lock_irqsave(&qp->r_lock, flags);
2645 	spin_lock(&qp->s_lock);
2646 	if (qp->s_flags & RVT_S_TIMER) {
2647 		struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2648 
2649 		qp->s_flags &= ~RVT_S_TIMER;
2650 		rvp->n_rc_timeouts++;
2651 		del_timer(&qp->s_timer);
2652 		trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2653 		if (rdi->driver_f.notify_restart_rc)
2654 			rdi->driver_f.notify_restart_rc(qp,
2655 							qp->s_last_psn + 1,
2656 							1);
2657 		rdi->driver_f.schedule_send(qp);
2658 	}
2659 	spin_unlock(&qp->s_lock);
2660 	spin_unlock_irqrestore(&qp->r_lock, flags);
2661 }
2662 
2663 /*
2664  * This is called from s_timer for RNR timeouts.
2665  */
rvt_rc_rnr_retry(struct hrtimer * t)2666 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2667 {
2668 	struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2669 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2670 	unsigned long flags;
2671 
2672 	spin_lock_irqsave(&qp->s_lock, flags);
2673 	rvt_stop_rnr_timer(qp);
2674 	trace_rvt_rnrnak_timeout(qp, 0);
2675 	rdi->driver_f.schedule_send(qp);
2676 	spin_unlock_irqrestore(&qp->s_lock, flags);
2677 	return HRTIMER_NORESTART;
2678 }
2679 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2680 
2681 /**
2682  * rvt_qp_iter_init - initial for QP iteration
2683  * @rdi: rvt devinfo
2684  * @v: u64 value
2685  * @cb: user-defined callback
2686  *
2687  * This returns an iterator suitable for iterating QPs
2688  * in the system.
2689  *
2690  * The @cb is a user-defined callback and @v is a 64-bit
2691  * value passed to and relevant for processing in the
2692  * @cb.  An example use case would be to alter QP processing
2693  * based on criteria not part of the rvt_qp.
2694  *
2695  * Use cases that require memory allocation to succeed
2696  * must preallocate appropriately.
2697  *
2698  * Return: a pointer to an rvt_qp_iter or NULL
2699  */
rvt_qp_iter_init(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2700 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2701 				     u64 v,
2702 				     void (*cb)(struct rvt_qp *qp, u64 v))
2703 {
2704 	struct rvt_qp_iter *i;
2705 
2706 	i = kzalloc(sizeof(*i), GFP_KERNEL);
2707 	if (!i)
2708 		return NULL;
2709 
2710 	i->rdi = rdi;
2711 	/* number of special QPs (SMI/GSI) for device */
2712 	i->specials = rdi->ibdev.phys_port_cnt * 2;
2713 	i->v = v;
2714 	i->cb = cb;
2715 
2716 	return i;
2717 }
2718 EXPORT_SYMBOL(rvt_qp_iter_init);
2719 
2720 /**
2721  * rvt_qp_iter_next - return the next QP in iter
2722  * @iter: the iterator
2723  *
2724  * Fine grained QP iterator suitable for use
2725  * with debugfs seq_file mechanisms.
2726  *
2727  * Updates iter->qp with the current QP when the return
2728  * value is 0.
2729  *
2730  * Return: 0 - iter->qp is valid 1 - no more QPs
2731  */
rvt_qp_iter_next(struct rvt_qp_iter * iter)2732 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2733 	__must_hold(RCU)
2734 {
2735 	int n = iter->n;
2736 	int ret = 1;
2737 	struct rvt_qp *pqp = iter->qp;
2738 	struct rvt_qp *qp;
2739 	struct rvt_dev_info *rdi = iter->rdi;
2740 
2741 	/*
2742 	 * The approach is to consider the special qps
2743 	 * as additional table entries before the
2744 	 * real hash table.  Since the qp code sets
2745 	 * the qp->next hash link to NULL, this works just fine.
2746 	 *
2747 	 * iter->specials is 2 * # ports
2748 	 *
2749 	 * n = 0..iter->specials is the special qp indices
2750 	 *
2751 	 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2752 	 * the potential hash bucket entries
2753 	 *
2754 	 */
2755 	for (; n <  rdi->qp_dev->qp_table_size + iter->specials; n++) {
2756 		if (pqp) {
2757 			qp = rcu_dereference(pqp->next);
2758 		} else {
2759 			if (n < iter->specials) {
2760 				struct rvt_ibport *rvp;
2761 				int pidx;
2762 
2763 				pidx = n % rdi->ibdev.phys_port_cnt;
2764 				rvp = rdi->ports[pidx];
2765 				qp = rcu_dereference(rvp->qp[n & 1]);
2766 			} else {
2767 				qp = rcu_dereference(
2768 					rdi->qp_dev->qp_table[
2769 						(n - iter->specials)]);
2770 			}
2771 		}
2772 		pqp = qp;
2773 		if (qp) {
2774 			iter->qp = qp;
2775 			iter->n = n;
2776 			return 0;
2777 		}
2778 	}
2779 	return ret;
2780 }
2781 EXPORT_SYMBOL(rvt_qp_iter_next);
2782 
2783 /**
2784  * rvt_qp_iter - iterate all QPs
2785  * @rdi: rvt devinfo
2786  * @v: a 64-bit value
2787  * @cb: a callback
2788  *
2789  * This provides a way for iterating all QPs.
2790  *
2791  * The @cb is a user-defined callback and @v is a 64-bit
2792  * value passed to and relevant for processing in the
2793  * cb.  An example use case would be to alter QP processing
2794  * based on criteria not part of the rvt_qp.
2795  *
2796  * The code has an internal iterator to simplify
2797  * non seq_file use cases.
2798  */
rvt_qp_iter(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2799 void rvt_qp_iter(struct rvt_dev_info *rdi,
2800 		 u64 v,
2801 		 void (*cb)(struct rvt_qp *qp, u64 v))
2802 {
2803 	int ret;
2804 	struct rvt_qp_iter i = {
2805 		.rdi = rdi,
2806 		.specials = rdi->ibdev.phys_port_cnt * 2,
2807 		.v = v,
2808 		.cb = cb
2809 	};
2810 
2811 	rcu_read_lock();
2812 	do {
2813 		ret = rvt_qp_iter_next(&i);
2814 		if (!ret) {
2815 			rvt_get_qp(i.qp);
2816 			rcu_read_unlock();
2817 			i.cb(i.qp, i.v);
2818 			rcu_read_lock();
2819 			rvt_put_qp(i.qp);
2820 		}
2821 	} while (!ret);
2822 	rcu_read_unlock();
2823 }
2824 EXPORT_SYMBOL(rvt_qp_iter);
2825 
2826 /*
2827  * This should be called with s_lock and r_lock held.
2828  */
rvt_send_complete(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_status status)2829 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2830 		       enum ib_wc_status status)
2831 {
2832 	u32 old_last, last;
2833 	struct rvt_dev_info *rdi;
2834 
2835 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2836 		return;
2837 	rdi = ib_to_rvt(qp->ibqp.device);
2838 
2839 	old_last = qp->s_last;
2840 	trace_rvt_qp_send_completion(qp, wqe, old_last);
2841 	last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2842 				    status);
2843 	if (qp->s_acked == old_last)
2844 		qp->s_acked = last;
2845 	if (qp->s_cur == old_last)
2846 		qp->s_cur = last;
2847 	if (qp->s_tail == old_last)
2848 		qp->s_tail = last;
2849 	if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2850 		qp->s_draining = 0;
2851 }
2852 EXPORT_SYMBOL(rvt_send_complete);
2853 
2854 /**
2855  * rvt_copy_sge - copy data to SGE memory
2856  * @qp: associated QP
2857  * @ss: the SGE state
2858  * @data: the data to copy
2859  * @length: the length of the data
2860  * @release: boolean to release MR
2861  * @copy_last: do a separate copy of the last 8 bytes
2862  */
rvt_copy_sge(struct rvt_qp * qp,struct rvt_sge_state * ss,void * data,u32 length,bool release,bool copy_last)2863 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2864 		  void *data, u32 length,
2865 		  bool release, bool copy_last)
2866 {
2867 	struct rvt_sge *sge = &ss->sge;
2868 	int i;
2869 	bool in_last = false;
2870 	bool cacheless_copy = false;
2871 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2872 	struct rvt_wss *wss = rdi->wss;
2873 	unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2874 
2875 	if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2876 		cacheless_copy = length >= PAGE_SIZE;
2877 	} else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2878 		if (length >= PAGE_SIZE) {
2879 			/*
2880 			 * NOTE: this *assumes*:
2881 			 * o The first vaddr is the dest.
2882 			 * o If multiple pages, then vaddr is sequential.
2883 			 */
2884 			wss_insert(wss, sge->vaddr);
2885 			if (length >= (2 * PAGE_SIZE))
2886 				wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2887 
2888 			cacheless_copy = wss_exceeds_threshold(wss);
2889 		} else {
2890 			wss_advance_clean_counter(wss);
2891 		}
2892 	}
2893 
2894 	if (copy_last) {
2895 		if (length > 8) {
2896 			length -= 8;
2897 		} else {
2898 			copy_last = false;
2899 			in_last = true;
2900 		}
2901 	}
2902 
2903 again:
2904 	while (length) {
2905 		u32 len = rvt_get_sge_length(sge, length);
2906 
2907 		WARN_ON_ONCE(len == 0);
2908 		if (unlikely(in_last)) {
2909 			/* enforce byte transfer ordering */
2910 			for (i = 0; i < len; i++)
2911 				((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2912 		} else if (cacheless_copy) {
2913 			cacheless_memcpy(sge->vaddr, data, len);
2914 		} else {
2915 			memcpy(sge->vaddr, data, len);
2916 		}
2917 		rvt_update_sge(ss, len, release);
2918 		data += len;
2919 		length -= len;
2920 	}
2921 
2922 	if (copy_last) {
2923 		copy_last = false;
2924 		in_last = true;
2925 		length = 8;
2926 		goto again;
2927 	}
2928 }
2929 EXPORT_SYMBOL(rvt_copy_sge);
2930 
loopback_qp_drop(struct rvt_ibport * rvp,struct rvt_qp * sqp)2931 static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
2932 					  struct rvt_qp *sqp)
2933 {
2934 	rvp->n_pkt_drops++;
2935 	/*
2936 	 * For RC, the requester would timeout and retry so
2937 	 * shortcut the timeouts and just signal too many retries.
2938 	 */
2939 	return sqp->ibqp.qp_type == IB_QPT_RC ?
2940 		IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
2941 }
2942 
2943 /**
2944  * ruc_loopback - handle UC and RC loopback requests
2945  * @sqp: the sending QP
2946  *
2947  * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2948  * Note that although we are single threaded due to the send engine, we still
2949  * have to protect against post_send().  We don't have to worry about
2950  * receive interrupts since this is a connected protocol and all packets
2951  * will pass through here.
2952  */
rvt_ruc_loopback(struct rvt_qp * sqp)2953 void rvt_ruc_loopback(struct rvt_qp *sqp)
2954 {
2955 	struct rvt_ibport *rvp =  NULL;
2956 	struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2957 	struct rvt_qp *qp;
2958 	struct rvt_swqe *wqe;
2959 	struct rvt_sge *sge;
2960 	unsigned long flags;
2961 	struct ib_wc wc;
2962 	u64 sdata;
2963 	atomic64_t *maddr;
2964 	enum ib_wc_status send_status;
2965 	bool release;
2966 	int ret;
2967 	bool copy_last = false;
2968 	int local_ops = 0;
2969 
2970 	rcu_read_lock();
2971 	rvp = rdi->ports[sqp->port_num - 1];
2972 
2973 	/*
2974 	 * Note that we check the responder QP state after
2975 	 * checking the requester's state.
2976 	 */
2977 
2978 	qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2979 			    sqp->remote_qpn);
2980 
2981 	spin_lock_irqsave(&sqp->s_lock, flags);
2982 
2983 	/* Return if we are already busy processing a work request. */
2984 	if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2985 	    !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2986 		goto unlock;
2987 
2988 	sqp->s_flags |= RVT_S_BUSY;
2989 
2990 again:
2991 	if (sqp->s_last == READ_ONCE(sqp->s_head))
2992 		goto clr_busy;
2993 	wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
2994 
2995 	/* Return if it is not OK to start a new work request. */
2996 	if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
2997 		if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
2998 			goto clr_busy;
2999 		/* We are in the error state, flush the work request. */
3000 		send_status = IB_WC_WR_FLUSH_ERR;
3001 		goto flush_send;
3002 	}
3003 
3004 	/*
3005 	 * We can rely on the entry not changing without the s_lock
3006 	 * being held until we update s_last.
3007 	 * We increment s_cur to indicate s_last is in progress.
3008 	 */
3009 	if (sqp->s_last == sqp->s_cur) {
3010 		if (++sqp->s_cur >= sqp->s_size)
3011 			sqp->s_cur = 0;
3012 	}
3013 	spin_unlock_irqrestore(&sqp->s_lock, flags);
3014 
3015 	if (!qp) {
3016 		send_status = loopback_qp_drop(rvp, sqp);
3017 		goto serr_no_r_lock;
3018 	}
3019 	spin_lock_irqsave(&qp->r_lock, flags);
3020 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
3021 	    qp->ibqp.qp_type != sqp->ibqp.qp_type) {
3022 		send_status = loopback_qp_drop(rvp, sqp);
3023 		goto serr;
3024 	}
3025 
3026 	memset(&wc, 0, sizeof(wc));
3027 	send_status = IB_WC_SUCCESS;
3028 
3029 	release = true;
3030 	sqp->s_sge.sge = wqe->sg_list[0];
3031 	sqp->s_sge.sg_list = wqe->sg_list + 1;
3032 	sqp->s_sge.num_sge = wqe->wr.num_sge;
3033 	sqp->s_len = wqe->length;
3034 	switch (wqe->wr.opcode) {
3035 	case IB_WR_REG_MR:
3036 		goto send_comp;
3037 
3038 	case IB_WR_LOCAL_INV:
3039 		if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
3040 			if (rvt_invalidate_rkey(sqp,
3041 						wqe->wr.ex.invalidate_rkey))
3042 				send_status = IB_WC_LOC_PROT_ERR;
3043 			local_ops = 1;
3044 		}
3045 		goto send_comp;
3046 
3047 	case IB_WR_SEND_WITH_INV:
3048 	case IB_WR_SEND_WITH_IMM:
3049 	case IB_WR_SEND:
3050 		ret = rvt_get_rwqe(qp, false);
3051 		if (ret < 0)
3052 			goto op_err;
3053 		if (!ret)
3054 			goto rnr_nak;
3055 		if (wqe->length > qp->r_len)
3056 			goto inv_err;
3057 		switch (wqe->wr.opcode) {
3058 		case IB_WR_SEND_WITH_INV:
3059 			if (!rvt_invalidate_rkey(qp,
3060 						 wqe->wr.ex.invalidate_rkey)) {
3061 				wc.wc_flags = IB_WC_WITH_INVALIDATE;
3062 				wc.ex.invalidate_rkey =
3063 					wqe->wr.ex.invalidate_rkey;
3064 			}
3065 			break;
3066 		case IB_WR_SEND_WITH_IMM:
3067 			wc.wc_flags = IB_WC_WITH_IMM;
3068 			wc.ex.imm_data = wqe->wr.ex.imm_data;
3069 			break;
3070 		default:
3071 			break;
3072 		}
3073 		break;
3074 
3075 	case IB_WR_RDMA_WRITE_WITH_IMM:
3076 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3077 			goto inv_err;
3078 		wc.wc_flags = IB_WC_WITH_IMM;
3079 		wc.ex.imm_data = wqe->wr.ex.imm_data;
3080 		ret = rvt_get_rwqe(qp, true);
3081 		if (ret < 0)
3082 			goto op_err;
3083 		if (!ret)
3084 			goto rnr_nak;
3085 		/* skip copy_last set and qp_access_flags recheck */
3086 		goto do_write;
3087 	case IB_WR_RDMA_WRITE:
3088 		copy_last = rvt_is_user_qp(qp);
3089 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3090 			goto inv_err;
3091 do_write:
3092 		if (wqe->length == 0)
3093 			break;
3094 		if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
3095 					  wqe->rdma_wr.remote_addr,
3096 					  wqe->rdma_wr.rkey,
3097 					  IB_ACCESS_REMOTE_WRITE)))
3098 			goto acc_err;
3099 		qp->r_sge.sg_list = NULL;
3100 		qp->r_sge.num_sge = 1;
3101 		qp->r_sge.total_len = wqe->length;
3102 		break;
3103 
3104 	case IB_WR_RDMA_READ:
3105 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3106 			goto inv_err;
3107 		if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
3108 					  wqe->rdma_wr.remote_addr,
3109 					  wqe->rdma_wr.rkey,
3110 					  IB_ACCESS_REMOTE_READ)))
3111 			goto acc_err;
3112 		release = false;
3113 		sqp->s_sge.sg_list = NULL;
3114 		sqp->s_sge.num_sge = 1;
3115 		qp->r_sge.sge = wqe->sg_list[0];
3116 		qp->r_sge.sg_list = wqe->sg_list + 1;
3117 		qp->r_sge.num_sge = wqe->wr.num_sge;
3118 		qp->r_sge.total_len = wqe->length;
3119 		break;
3120 
3121 	case IB_WR_ATOMIC_CMP_AND_SWP:
3122 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3123 		if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
3124 			goto inv_err;
3125 		if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
3126 			goto inv_err;
3127 		if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3128 					  wqe->atomic_wr.remote_addr,
3129 					  wqe->atomic_wr.rkey,
3130 					  IB_ACCESS_REMOTE_ATOMIC)))
3131 			goto acc_err;
3132 		/* Perform atomic OP and save result. */
3133 		maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3134 		sdata = wqe->atomic_wr.compare_add;
3135 		*(u64 *)sqp->s_sge.sge.vaddr =
3136 			(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
3137 			(u64)atomic64_add_return(sdata, maddr) - sdata :
3138 			(u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3139 				      sdata, wqe->atomic_wr.swap);
3140 		rvt_put_mr(qp->r_sge.sge.mr);
3141 		qp->r_sge.num_sge = 0;
3142 		goto send_comp;
3143 
3144 	default:
3145 		send_status = IB_WC_LOC_QP_OP_ERR;
3146 		goto serr;
3147 	}
3148 
3149 	sge = &sqp->s_sge.sge;
3150 	while (sqp->s_len) {
3151 		u32 len = rvt_get_sge_length(sge, sqp->s_len);
3152 
3153 		WARN_ON_ONCE(len == 0);
3154 		rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3155 			     len, release, copy_last);
3156 		rvt_update_sge(&sqp->s_sge, len, !release);
3157 		sqp->s_len -= len;
3158 	}
3159 	if (release)
3160 		rvt_put_ss(&qp->r_sge);
3161 
3162 	if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3163 		goto send_comp;
3164 
3165 	if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3166 		wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3167 	else
3168 		wc.opcode = IB_WC_RECV;
3169 	wc.wr_id = qp->r_wr_id;
3170 	wc.status = IB_WC_SUCCESS;
3171 	wc.byte_len = wqe->length;
3172 	wc.qp = &qp->ibqp;
3173 	wc.src_qp = qp->remote_qpn;
3174 	wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3175 	wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3176 	wc.port_num = 1;
3177 	/* Signal completion event if the solicited bit is set. */
3178 	rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
3179 
3180 send_comp:
3181 	spin_unlock_irqrestore(&qp->r_lock, flags);
3182 	spin_lock_irqsave(&sqp->s_lock, flags);
3183 	rvp->n_loop_pkts++;
3184 flush_send:
3185 	sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3186 	spin_lock(&sqp->r_lock);
3187 	rvt_send_complete(sqp, wqe, send_status);
3188 	spin_unlock(&sqp->r_lock);
3189 	if (local_ops) {
3190 		atomic_dec(&sqp->local_ops_pending);
3191 		local_ops = 0;
3192 	}
3193 	goto again;
3194 
3195 rnr_nak:
3196 	/* Handle RNR NAK */
3197 	if (qp->ibqp.qp_type == IB_QPT_UC)
3198 		goto send_comp;
3199 	rvp->n_rnr_naks++;
3200 	/*
3201 	 * Note: we don't need the s_lock held since the BUSY flag
3202 	 * makes this single threaded.
3203 	 */
3204 	if (sqp->s_rnr_retry == 0) {
3205 		send_status = IB_WC_RNR_RETRY_EXC_ERR;
3206 		goto serr;
3207 	}
3208 	if (sqp->s_rnr_retry_cnt < 7)
3209 		sqp->s_rnr_retry--;
3210 	spin_unlock_irqrestore(&qp->r_lock, flags);
3211 	spin_lock_irqsave(&sqp->s_lock, flags);
3212 	if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3213 		goto clr_busy;
3214 	rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3215 				IB_AETH_CREDIT_SHIFT);
3216 	goto clr_busy;
3217 
3218 op_err:
3219 	send_status = IB_WC_REM_OP_ERR;
3220 	wc.status = IB_WC_LOC_QP_OP_ERR;
3221 	goto err;
3222 
3223 inv_err:
3224 	send_status =
3225 		sqp->ibqp.qp_type == IB_QPT_RC ?
3226 			IB_WC_REM_INV_REQ_ERR :
3227 			IB_WC_SUCCESS;
3228 	wc.status = IB_WC_LOC_QP_OP_ERR;
3229 	goto err;
3230 
3231 acc_err:
3232 	send_status = IB_WC_REM_ACCESS_ERR;
3233 	wc.status = IB_WC_LOC_PROT_ERR;
3234 err:
3235 	/* responder goes to error state */
3236 	rvt_rc_error(qp, wc.status);
3237 
3238 serr:
3239 	spin_unlock_irqrestore(&qp->r_lock, flags);
3240 serr_no_r_lock:
3241 	spin_lock_irqsave(&sqp->s_lock, flags);
3242 	spin_lock(&sqp->r_lock);
3243 	rvt_send_complete(sqp, wqe, send_status);
3244 	spin_unlock(&sqp->r_lock);
3245 	if (sqp->ibqp.qp_type == IB_QPT_RC) {
3246 		int lastwqe;
3247 
3248 		spin_lock(&sqp->r_lock);
3249 		lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3250 		spin_unlock(&sqp->r_lock);
3251 
3252 		sqp->s_flags &= ~RVT_S_BUSY;
3253 		spin_unlock_irqrestore(&sqp->s_lock, flags);
3254 		if (lastwqe) {
3255 			struct ib_event ev;
3256 
3257 			ev.device = sqp->ibqp.device;
3258 			ev.element.qp = &sqp->ibqp;
3259 			ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3260 			sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3261 		}
3262 		goto done;
3263 	}
3264 clr_busy:
3265 	sqp->s_flags &= ~RVT_S_BUSY;
3266 unlock:
3267 	spin_unlock_irqrestore(&sqp->s_lock, flags);
3268 done:
3269 	rcu_read_unlock();
3270 }
3271 EXPORT_SYMBOL(rvt_ruc_loopback);
3272