• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2015 - 2020 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/pci.h>
49 #include <linux/netdevice.h>
50 #include <linux/vmalloc.h>
51 #include <linux/delay.h>
52 #include <linux/xarray.h>
53 #include <linux/module.h>
54 #include <linux/printk.h>
55 #include <linux/hrtimer.h>
56 #include <linux/bitmap.h>
57 #include <linux/numa.h>
58 #include <rdma/rdma_vt.h>
59 
60 #include "hfi.h"
61 #include "device.h"
62 #include "common.h"
63 #include "trace.h"
64 #include "mad.h"
65 #include "sdma.h"
66 #include "debugfs.h"
67 #include "verbs.h"
68 #include "aspm.h"
69 #include "affinity.h"
70 #include "vnic.h"
71 #include "exp_rcv.h"
72 #include "netdev.h"
73 
74 #undef pr_fmt
75 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
76 
77 /*
78  * min buffers we want to have per context, after driver
79  */
80 #define HFI1_MIN_USER_CTXT_BUFCNT 7
81 
82 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
83 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
84 
85 #define NUM_IB_PORTS 1
86 
87 /*
88  * Number of user receive contexts we are configured to use (to allow for more
89  * pio buffers per ctxt, etc.)  Zero means use one user context per CPU.
90  */
91 int num_user_contexts = -1;
92 module_param_named(num_user_contexts, num_user_contexts, int, 0444);
93 MODULE_PARM_DESC(
94 	num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
95 
96 uint krcvqs[RXE_NUM_DATA_VL];
97 int krcvqsset;
98 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
99 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
100 
101 /* computed based on above array */
102 unsigned long n_krcvqs;
103 
104 static unsigned hfi1_rcvarr_split = 25;
105 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
106 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
107 
108 static uint eager_buffer_size = (8 << 20); /* 8MB */
109 module_param(eager_buffer_size, uint, S_IRUGO);
110 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
111 
112 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
113 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
114 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
115 
116 static uint hfi1_hdrq_entsize = 32;
117 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444);
118 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
119 
120 unsigned int user_credit_return_threshold = 33;	/* default is 33% */
121 module_param(user_credit_return_threshold, uint, S_IRUGO);
122 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
123 
124 DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
125 
hfi1_create_kctxt(struct hfi1_devdata * dd,struct hfi1_pportdata * ppd)126 static int hfi1_create_kctxt(struct hfi1_devdata *dd,
127 			     struct hfi1_pportdata *ppd)
128 {
129 	struct hfi1_ctxtdata *rcd;
130 	int ret;
131 
132 	/* Control context has to be always 0 */
133 	BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
134 
135 	ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
136 	if (ret < 0) {
137 		dd_dev_err(dd, "Kernel receive context allocation failed\n");
138 		return ret;
139 	}
140 
141 	/*
142 	 * Set up the kernel context flags here and now because they use
143 	 * default values for all receive side memories.  User contexts will
144 	 * be handled as they are created.
145 	 */
146 	rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
147 		HFI1_CAP_KGET(NODROP_RHQ_FULL) |
148 		HFI1_CAP_KGET(NODROP_EGR_FULL) |
149 		HFI1_CAP_KGET(DMA_RTAIL);
150 
151 	/* Control context must use DMA_RTAIL */
152 	if (rcd->ctxt == HFI1_CTRL_CTXT)
153 		rcd->flags |= HFI1_CAP_DMA_RTAIL;
154 	rcd->fast_handler = get_dma_rtail_setting(rcd) ?
155 				handle_receive_interrupt_dma_rtail :
156 				handle_receive_interrupt_nodma_rtail;
157 
158 	hfi1_set_seq_cnt(rcd, 1);
159 
160 	rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
161 	if (!rcd->sc) {
162 		dd_dev_err(dd, "Kernel send context allocation failed\n");
163 		return -ENOMEM;
164 	}
165 	hfi1_init_ctxt(rcd->sc);
166 
167 	return 0;
168 }
169 
170 /*
171  * Create the receive context array and one or more kernel contexts
172  */
hfi1_create_kctxts(struct hfi1_devdata * dd)173 int hfi1_create_kctxts(struct hfi1_devdata *dd)
174 {
175 	u16 i;
176 	int ret;
177 
178 	dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
179 			       GFP_KERNEL, dd->node);
180 	if (!dd->rcd)
181 		return -ENOMEM;
182 
183 	for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
184 		ret = hfi1_create_kctxt(dd, dd->pport);
185 		if (ret)
186 			goto bail;
187 	}
188 
189 	return 0;
190 bail:
191 	for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
192 		hfi1_free_ctxt(dd->rcd[i]);
193 
194 	/* All the contexts should be freed, free the array */
195 	kfree(dd->rcd);
196 	dd->rcd = NULL;
197 	return ret;
198 }
199 
200 /*
201  * Helper routines for the receive context reference count (rcd and uctxt).
202  */
hfi1_rcd_init(struct hfi1_ctxtdata * rcd)203 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
204 {
205 	kref_init(&rcd->kref);
206 }
207 
208 /**
209  * hfi1_rcd_free - When reference is zero clean up.
210  * @kref: pointer to an initialized rcd data structure
211  *
212  */
hfi1_rcd_free(struct kref * kref)213 static void hfi1_rcd_free(struct kref *kref)
214 {
215 	unsigned long flags;
216 	struct hfi1_ctxtdata *rcd =
217 		container_of(kref, struct hfi1_ctxtdata, kref);
218 
219 	spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
220 	rcd->dd->rcd[rcd->ctxt] = NULL;
221 	spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
222 
223 	hfi1_free_ctxtdata(rcd->dd, rcd);
224 
225 	kfree(rcd);
226 }
227 
228 /**
229  * hfi1_rcd_put - decrement reference for rcd
230  * @rcd: pointer to an initialized rcd data structure
231  *
232  * Use this to put a reference after the init.
233  */
hfi1_rcd_put(struct hfi1_ctxtdata * rcd)234 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
235 {
236 	if (rcd)
237 		return kref_put(&rcd->kref, hfi1_rcd_free);
238 
239 	return 0;
240 }
241 
242 /**
243  * hfi1_rcd_get - increment reference for rcd
244  * @rcd: pointer to an initialized rcd data structure
245  *
246  * Use this to get a reference after the init.
247  *
248  * Return : reflect kref_get_unless_zero(), which returns non-zero on
249  * increment, otherwise 0.
250  */
hfi1_rcd_get(struct hfi1_ctxtdata * rcd)251 int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
252 {
253 	return kref_get_unless_zero(&rcd->kref);
254 }
255 
256 /**
257  * allocate_rcd_index - allocate an rcd index from the rcd array
258  * @dd: pointer to a valid devdata structure
259  * @rcd: rcd data structure to assign
260  * @index: pointer to index that is allocated
261  *
262  * Find an empty index in the rcd array, and assign the given rcd to it.
263  * If the array is full, we are EBUSY.
264  *
265  */
allocate_rcd_index(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd,u16 * index)266 static int allocate_rcd_index(struct hfi1_devdata *dd,
267 			      struct hfi1_ctxtdata *rcd, u16 *index)
268 {
269 	unsigned long flags;
270 	u16 ctxt;
271 
272 	spin_lock_irqsave(&dd->uctxt_lock, flags);
273 	for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
274 		if (!dd->rcd[ctxt])
275 			break;
276 
277 	if (ctxt < dd->num_rcv_contexts) {
278 		rcd->ctxt = ctxt;
279 		dd->rcd[ctxt] = rcd;
280 		hfi1_rcd_init(rcd);
281 	}
282 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
283 
284 	if (ctxt >= dd->num_rcv_contexts)
285 		return -EBUSY;
286 
287 	*index = ctxt;
288 
289 	return 0;
290 }
291 
292 /**
293  * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
294  * array
295  * @dd: pointer to a valid devdata structure
296  * @ctxt: the index of an possilbe rcd
297  *
298  * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
299  * ctxt index is valid.
300  *
301  * The caller is responsible for making the _put().
302  *
303  */
hfi1_rcd_get_by_index_safe(struct hfi1_devdata * dd,u16 ctxt)304 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
305 						 u16 ctxt)
306 {
307 	if (ctxt < dd->num_rcv_contexts)
308 		return hfi1_rcd_get_by_index(dd, ctxt);
309 
310 	return NULL;
311 }
312 
313 /**
314  * hfi1_rcd_get_by_index
315  * @dd: pointer to a valid devdata structure
316  * @ctxt: the index of an possilbe rcd
317  *
318  * We need to protect access to the rcd array.  If access is needed to
319  * one or more index, get the protecting spinlock and then increment the
320  * kref.
321  *
322  * The caller is responsible for making the _put().
323  *
324  */
hfi1_rcd_get_by_index(struct hfi1_devdata * dd,u16 ctxt)325 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
326 {
327 	unsigned long flags;
328 	struct hfi1_ctxtdata *rcd = NULL;
329 
330 	spin_lock_irqsave(&dd->uctxt_lock, flags);
331 	if (dd->rcd[ctxt]) {
332 		rcd = dd->rcd[ctxt];
333 		if (!hfi1_rcd_get(rcd))
334 			rcd = NULL;
335 	}
336 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
337 
338 	return rcd;
339 }
340 
341 /*
342  * Common code for user and kernel context create and setup.
343  * NOTE: the initial kref is done here (hf1_rcd_init()).
344  */
hfi1_create_ctxtdata(struct hfi1_pportdata * ppd,int numa,struct hfi1_ctxtdata ** context)345 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
346 			 struct hfi1_ctxtdata **context)
347 {
348 	struct hfi1_devdata *dd = ppd->dd;
349 	struct hfi1_ctxtdata *rcd;
350 	unsigned kctxt_ngroups = 0;
351 	u32 base;
352 
353 	if (dd->rcv_entries.nctxt_extra >
354 	    dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
355 		kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
356 			 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
357 	rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
358 	if (rcd) {
359 		u32 rcvtids, max_entries;
360 		u16 ctxt;
361 		int ret;
362 
363 		ret = allocate_rcd_index(dd, rcd, &ctxt);
364 		if (ret) {
365 			*context = NULL;
366 			kfree(rcd);
367 			return ret;
368 		}
369 
370 		INIT_LIST_HEAD(&rcd->qp_wait_list);
371 		hfi1_exp_tid_group_init(rcd);
372 		rcd->ppd = ppd;
373 		rcd->dd = dd;
374 		rcd->numa_id = numa;
375 		rcd->rcv_array_groups = dd->rcv_entries.ngroups;
376 		rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
377 		rcd->slow_handler = handle_receive_interrupt;
378 		rcd->do_interrupt = rcd->slow_handler;
379 		rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
380 
381 		mutex_init(&rcd->exp_mutex);
382 		spin_lock_init(&rcd->exp_lock);
383 		INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
384 		INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
385 
386 		hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
387 
388 		/*
389 		 * Calculate the context's RcvArray entry starting point.
390 		 * We do this here because we have to take into account all
391 		 * the RcvArray entries that previous context would have
392 		 * taken and we have to account for any extra groups assigned
393 		 * to the static (kernel) or dynamic (vnic/user) contexts.
394 		 */
395 		if (ctxt < dd->first_dyn_alloc_ctxt) {
396 			if (ctxt < kctxt_ngroups) {
397 				base = ctxt * (dd->rcv_entries.ngroups + 1);
398 				rcd->rcv_array_groups++;
399 			} else {
400 				base = kctxt_ngroups +
401 					(ctxt * dd->rcv_entries.ngroups);
402 			}
403 		} else {
404 			u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
405 
406 			base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
407 				kctxt_ngroups);
408 			if (ct < dd->rcv_entries.nctxt_extra) {
409 				base += ct * (dd->rcv_entries.ngroups + 1);
410 				rcd->rcv_array_groups++;
411 			} else {
412 				base += dd->rcv_entries.nctxt_extra +
413 					(ct * dd->rcv_entries.ngroups);
414 			}
415 		}
416 		rcd->eager_base = base * dd->rcv_entries.group_size;
417 
418 		rcd->rcvhdrq_cnt = rcvhdrcnt;
419 		rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
420 		rcd->rhf_offset =
421 			rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
422 		/*
423 		 * Simple Eager buffer allocation: we have already pre-allocated
424 		 * the number of RcvArray entry groups. Each ctxtdata structure
425 		 * holds the number of groups for that context.
426 		 *
427 		 * To follow CSR requirements and maintain cacheline alignment,
428 		 * make sure all sizes and bases are multiples of group_size.
429 		 *
430 		 * The expected entry count is what is left after assigning
431 		 * eager.
432 		 */
433 		max_entries = rcd->rcv_array_groups *
434 			dd->rcv_entries.group_size;
435 		rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
436 		rcd->egrbufs.count = round_down(rcvtids,
437 						dd->rcv_entries.group_size);
438 		if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
439 			dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
440 				   rcd->ctxt);
441 			rcd->egrbufs.count = MAX_EAGER_ENTRIES;
442 		}
443 		hfi1_cdbg(PROC,
444 			  "ctxt%u: max Eager buffer RcvArray entries: %u\n",
445 			  rcd->ctxt, rcd->egrbufs.count);
446 
447 		/*
448 		 * Allocate array that will hold the eager buffer accounting
449 		 * data.
450 		 * This will allocate the maximum possible buffer count based
451 		 * on the value of the RcvArray split parameter.
452 		 * The resulting value will be rounded down to the closest
453 		 * multiple of dd->rcv_entries.group_size.
454 		 */
455 		rcd->egrbufs.buffers =
456 			kcalloc_node(rcd->egrbufs.count,
457 				     sizeof(*rcd->egrbufs.buffers),
458 				     GFP_KERNEL, numa);
459 		if (!rcd->egrbufs.buffers)
460 			goto bail;
461 		rcd->egrbufs.rcvtids =
462 			kcalloc_node(rcd->egrbufs.count,
463 				     sizeof(*rcd->egrbufs.rcvtids),
464 				     GFP_KERNEL, numa);
465 		if (!rcd->egrbufs.rcvtids)
466 			goto bail;
467 		rcd->egrbufs.size = eager_buffer_size;
468 		/*
469 		 * The size of the buffers programmed into the RcvArray
470 		 * entries needs to be big enough to handle the highest
471 		 * MTU supported.
472 		 */
473 		if (rcd->egrbufs.size < hfi1_max_mtu) {
474 			rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
475 			hfi1_cdbg(PROC,
476 				  "ctxt%u: eager bufs size too small. Adjusting to %u\n",
477 				    rcd->ctxt, rcd->egrbufs.size);
478 		}
479 		rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
480 
481 		/* Applicable only for statically created kernel contexts */
482 		if (ctxt < dd->first_dyn_alloc_ctxt) {
483 			rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
484 						    GFP_KERNEL, numa);
485 			if (!rcd->opstats)
486 				goto bail;
487 
488 			/* Initialize TID flow generations for the context */
489 			hfi1_kern_init_ctxt_generations(rcd);
490 		}
491 
492 		*context = rcd;
493 		return 0;
494 	}
495 
496 bail:
497 	*context = NULL;
498 	hfi1_free_ctxt(rcd);
499 	return -ENOMEM;
500 }
501 
502 /**
503  * hfi1_free_ctxt
504  * @rcd: pointer to an initialized rcd data structure
505  *
506  * This wrapper is the free function that matches hfi1_create_ctxtdata().
507  * When a context is done being used (kernel or user), this function is called
508  * for the "final" put to match the kref init from hf1i_create_ctxtdata().
509  * Other users of the context do a get/put sequence to make sure that the
510  * structure isn't removed while in use.
511  */
hfi1_free_ctxt(struct hfi1_ctxtdata * rcd)512 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
513 {
514 	hfi1_rcd_put(rcd);
515 }
516 
517 /*
518  * Select the largest ccti value over all SLs to determine the intra-
519  * packet gap for the link.
520  *
521  * called with cca_timer_lock held (to protect access to cca_timer
522  * array), and rcu_read_lock() (to protect access to cc_state).
523  */
set_link_ipg(struct hfi1_pportdata * ppd)524 void set_link_ipg(struct hfi1_pportdata *ppd)
525 {
526 	struct hfi1_devdata *dd = ppd->dd;
527 	struct cc_state *cc_state;
528 	int i;
529 	u16 cce, ccti_limit, max_ccti = 0;
530 	u16 shift, mult;
531 	u64 src;
532 	u32 current_egress_rate; /* Mbits /sec */
533 	u64 max_pkt_time;
534 	/*
535 	 * max_pkt_time is the maximum packet egress time in units
536 	 * of the fabric clock period 1/(805 MHz).
537 	 */
538 
539 	cc_state = get_cc_state(ppd);
540 
541 	if (!cc_state)
542 		/*
543 		 * This should _never_ happen - rcu_read_lock() is held,
544 		 * and set_link_ipg() should not be called if cc_state
545 		 * is NULL.
546 		 */
547 		return;
548 
549 	for (i = 0; i < OPA_MAX_SLS; i++) {
550 		u16 ccti = ppd->cca_timer[i].ccti;
551 
552 		if (ccti > max_ccti)
553 			max_ccti = ccti;
554 	}
555 
556 	ccti_limit = cc_state->cct.ccti_limit;
557 	if (max_ccti > ccti_limit)
558 		max_ccti = ccti_limit;
559 
560 	cce = cc_state->cct.entries[max_ccti].entry;
561 	shift = (cce & 0xc000) >> 14;
562 	mult = (cce & 0x3fff);
563 
564 	current_egress_rate = active_egress_rate(ppd);
565 
566 	max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
567 
568 	src = (max_pkt_time >> shift) * mult;
569 
570 	src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
571 	src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
572 
573 	write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
574 }
575 
cca_timer_fn(struct hrtimer * t)576 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
577 {
578 	struct cca_timer *cca_timer;
579 	struct hfi1_pportdata *ppd;
580 	int sl;
581 	u16 ccti_timer, ccti_min;
582 	struct cc_state *cc_state;
583 	unsigned long flags;
584 	enum hrtimer_restart ret = HRTIMER_NORESTART;
585 
586 	cca_timer = container_of(t, struct cca_timer, hrtimer);
587 	ppd = cca_timer->ppd;
588 	sl = cca_timer->sl;
589 
590 	rcu_read_lock();
591 
592 	cc_state = get_cc_state(ppd);
593 
594 	if (!cc_state) {
595 		rcu_read_unlock();
596 		return HRTIMER_NORESTART;
597 	}
598 
599 	/*
600 	 * 1) decrement ccti for SL
601 	 * 2) calculate IPG for link (set_link_ipg())
602 	 * 3) restart timer, unless ccti is at min value
603 	 */
604 
605 	ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
606 	ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
607 
608 	spin_lock_irqsave(&ppd->cca_timer_lock, flags);
609 
610 	if (cca_timer->ccti > ccti_min) {
611 		cca_timer->ccti--;
612 		set_link_ipg(ppd);
613 	}
614 
615 	if (cca_timer->ccti > ccti_min) {
616 		unsigned long nsec = 1024 * ccti_timer;
617 		/* ccti_timer is in units of 1.024 usec */
618 		hrtimer_forward_now(t, ns_to_ktime(nsec));
619 		ret = HRTIMER_RESTART;
620 	}
621 
622 	spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
623 	rcu_read_unlock();
624 	return ret;
625 }
626 
627 /*
628  * Common code for initializing the physical port structure.
629  */
hfi1_init_pportdata(struct pci_dev * pdev,struct hfi1_pportdata * ppd,struct hfi1_devdata * dd,u8 hw_pidx,u8 port)630 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
631 			 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
632 {
633 	int i;
634 	uint default_pkey_idx;
635 	struct cc_state *cc_state;
636 
637 	ppd->dd = dd;
638 	ppd->hw_pidx = hw_pidx;
639 	ppd->port = port; /* IB port number, not index */
640 	ppd->prev_link_width = LINK_WIDTH_DEFAULT;
641 	/*
642 	 * There are C_VL_COUNT number of PortVLXmitWait counters.
643 	 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
644 	 */
645 	for (i = 0; i < C_VL_COUNT + 1; i++) {
646 		ppd->port_vl_xmit_wait_last[i] = 0;
647 		ppd->vl_xmit_flit_cnt[i] = 0;
648 	}
649 
650 	default_pkey_idx = 1;
651 
652 	ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
653 	ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
654 	ppd->pkeys[0] = 0x8001;
655 
656 	INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
657 	INIT_WORK(&ppd->link_up_work, handle_link_up);
658 	INIT_WORK(&ppd->link_down_work, handle_link_down);
659 	INIT_WORK(&ppd->freeze_work, handle_freeze);
660 	INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
661 	INIT_WORK(&ppd->sma_message_work, handle_sma_message);
662 	INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
663 	INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
664 	INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
665 	INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
666 
667 	mutex_init(&ppd->hls_lock);
668 	spin_lock_init(&ppd->qsfp_info.qsfp_lock);
669 
670 	ppd->qsfp_info.ppd = ppd;
671 	ppd->sm_trap_qp = 0x0;
672 	ppd->sa_qp = 0x1;
673 
674 	ppd->hfi1_wq = NULL;
675 
676 	spin_lock_init(&ppd->cca_timer_lock);
677 
678 	for (i = 0; i < OPA_MAX_SLS; i++) {
679 		hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
680 			     HRTIMER_MODE_REL);
681 		ppd->cca_timer[i].ppd = ppd;
682 		ppd->cca_timer[i].sl = i;
683 		ppd->cca_timer[i].ccti = 0;
684 		ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
685 	}
686 
687 	ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
688 
689 	spin_lock_init(&ppd->cc_state_lock);
690 	spin_lock_init(&ppd->cc_log_lock);
691 	cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
692 	RCU_INIT_POINTER(ppd->cc_state, cc_state);
693 	if (!cc_state)
694 		goto bail;
695 	return;
696 
697 bail:
698 	dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
699 }
700 
701 /*
702  * Do initialization for device that is only needed on
703  * first detect, not on resets.
704  */
loadtime_init(struct hfi1_devdata * dd)705 static int loadtime_init(struct hfi1_devdata *dd)
706 {
707 	return 0;
708 }
709 
710 /**
711  * init_after_reset - re-initialize after a reset
712  * @dd: the hfi1_ib device
713  *
714  * sanity check at least some of the values after reset, and
715  * ensure no receive or transmit (explicitly, in case reset
716  * failed
717  */
init_after_reset(struct hfi1_devdata * dd)718 static int init_after_reset(struct hfi1_devdata *dd)
719 {
720 	int i;
721 	struct hfi1_ctxtdata *rcd;
722 	/*
723 	 * Ensure chip does no sends or receives, tail updates, or
724 	 * pioavail updates while we re-initialize.  This is mostly
725 	 * for the driver data structures, not chip registers.
726 	 */
727 	for (i = 0; i < dd->num_rcv_contexts; i++) {
728 		rcd = hfi1_rcd_get_by_index(dd, i);
729 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
730 			     HFI1_RCVCTRL_INTRAVAIL_DIS |
731 			     HFI1_RCVCTRL_TAILUPD_DIS, rcd);
732 		hfi1_rcd_put(rcd);
733 	}
734 	pio_send_control(dd, PSC_GLOBAL_DISABLE);
735 	for (i = 0; i < dd->num_send_contexts; i++)
736 		sc_disable(dd->send_contexts[i].sc);
737 
738 	return 0;
739 }
740 
enable_chip(struct hfi1_devdata * dd)741 static void enable_chip(struct hfi1_devdata *dd)
742 {
743 	struct hfi1_ctxtdata *rcd;
744 	u32 rcvmask;
745 	u16 i;
746 
747 	/* enable PIO send */
748 	pio_send_control(dd, PSC_GLOBAL_ENABLE);
749 
750 	/*
751 	 * Enable kernel ctxts' receive and receive interrupt.
752 	 * Other ctxts done as user opens and initializes them.
753 	 */
754 	for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
755 		rcd = hfi1_rcd_get_by_index(dd, i);
756 		if (!rcd)
757 			continue;
758 		rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
759 		rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
760 			HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
761 		if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
762 			rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
763 		if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
764 			rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
765 		if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
766 			rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
767 		if (HFI1_CAP_IS_KSET(TID_RDMA))
768 			rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB;
769 		hfi1_rcvctrl(dd, rcvmask, rcd);
770 		sc_enable(rcd->sc);
771 		hfi1_rcd_put(rcd);
772 	}
773 }
774 
775 /**
776  * create_workqueues - create per port workqueues
777  * @dd: the hfi1_ib device
778  */
create_workqueues(struct hfi1_devdata * dd)779 static int create_workqueues(struct hfi1_devdata *dd)
780 {
781 	int pidx;
782 	struct hfi1_pportdata *ppd;
783 
784 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
785 		ppd = dd->pport + pidx;
786 		if (!ppd->hfi1_wq) {
787 			ppd->hfi1_wq =
788 				alloc_workqueue(
789 				    "hfi%d_%d",
790 				    WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
791 				    WQ_MEM_RECLAIM,
792 				    HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
793 				    dd->unit, pidx);
794 			if (!ppd->hfi1_wq)
795 				goto wq_error;
796 		}
797 		if (!ppd->link_wq) {
798 			/*
799 			 * Make the link workqueue single-threaded to enforce
800 			 * serialization.
801 			 */
802 			ppd->link_wq =
803 				alloc_workqueue(
804 				    "hfi_link_%d_%d",
805 				    WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
806 				    1, /* max_active */
807 				    dd->unit, pidx);
808 			if (!ppd->link_wq)
809 				goto wq_error;
810 		}
811 	}
812 	return 0;
813 wq_error:
814 	pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
815 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
816 		ppd = dd->pport + pidx;
817 		if (ppd->hfi1_wq) {
818 			destroy_workqueue(ppd->hfi1_wq);
819 			ppd->hfi1_wq = NULL;
820 		}
821 		if (ppd->link_wq) {
822 			destroy_workqueue(ppd->link_wq);
823 			ppd->link_wq = NULL;
824 		}
825 	}
826 	return -ENOMEM;
827 }
828 
829 /**
830  * destroy_workqueues - destroy per port workqueues
831  * @dd: the hfi1_ib device
832  */
destroy_workqueues(struct hfi1_devdata * dd)833 static void destroy_workqueues(struct hfi1_devdata *dd)
834 {
835 	int pidx;
836 	struct hfi1_pportdata *ppd;
837 
838 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
839 		ppd = dd->pport + pidx;
840 
841 		if (ppd->hfi1_wq) {
842 			destroy_workqueue(ppd->hfi1_wq);
843 			ppd->hfi1_wq = NULL;
844 		}
845 		if (ppd->link_wq) {
846 			destroy_workqueue(ppd->link_wq);
847 			ppd->link_wq = NULL;
848 		}
849 	}
850 }
851 
852 /**
853  * enable_general_intr() - Enable the IRQs that will be handled by the
854  * general interrupt handler.
855  * @dd: valid devdata
856  *
857  */
enable_general_intr(struct hfi1_devdata * dd)858 static void enable_general_intr(struct hfi1_devdata *dd)
859 {
860 	set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
861 	set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
862 	set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
863 	set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
864 	set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
865 	set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
866 	set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
867 }
868 
869 /**
870  * hfi1_init - do the actual initialization sequence on the chip
871  * @dd: the hfi1_ib device
872  * @reinit: re-initializing, so don't allocate new memory
873  *
874  * Do the actual initialization sequence on the chip.  This is done
875  * both from the init routine called from the PCI infrastructure, and
876  * when we reset the chip, or detect that it was reset internally,
877  * or it's administratively re-enabled.
878  *
879  * Memory allocation here and in called routines is only done in
880  * the first case (reinit == 0).  We have to be careful, because even
881  * without memory allocation, we need to re-write all the chip registers
882  * TIDs, etc. after the reset or enable has completed.
883  */
hfi1_init(struct hfi1_devdata * dd,int reinit)884 int hfi1_init(struct hfi1_devdata *dd, int reinit)
885 {
886 	int ret = 0, pidx, lastfail = 0;
887 	unsigned long len;
888 	u16 i;
889 	struct hfi1_ctxtdata *rcd;
890 	struct hfi1_pportdata *ppd;
891 
892 	/* Set up send low level handlers */
893 	dd->process_pio_send = hfi1_verbs_send_pio;
894 	dd->process_dma_send = hfi1_verbs_send_dma;
895 	dd->pio_inline_send = pio_copy;
896 	dd->process_vnic_dma_send = hfi1_vnic_send_dma;
897 
898 	if (is_ax(dd)) {
899 		atomic_set(&dd->drop_packet, DROP_PACKET_ON);
900 		dd->do_drop = true;
901 	} else {
902 		atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
903 		dd->do_drop = false;
904 	}
905 
906 	/* make sure the link is not "up" */
907 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
908 		ppd = dd->pport + pidx;
909 		ppd->linkup = 0;
910 	}
911 
912 	if (reinit)
913 		ret = init_after_reset(dd);
914 	else
915 		ret = loadtime_init(dd);
916 	if (ret)
917 		goto done;
918 
919 	/* dd->rcd can be NULL if early initialization failed */
920 	for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
921 		/*
922 		 * Set up the (kernel) rcvhdr queue and egr TIDs.  If doing
923 		 * re-init, the simplest way to handle this is to free
924 		 * existing, and re-allocate.
925 		 * Need to re-create rest of ctxt 0 ctxtdata as well.
926 		 */
927 		rcd = hfi1_rcd_get_by_index(dd, i);
928 		if (!rcd)
929 			continue;
930 
931 		lastfail = hfi1_create_rcvhdrq(dd, rcd);
932 		if (!lastfail)
933 			lastfail = hfi1_setup_eagerbufs(rcd);
934 		if (!lastfail)
935 			lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
936 		if (lastfail) {
937 			dd_dev_err(dd,
938 				   "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
939 			ret = lastfail;
940 		}
941 		/* enable IRQ */
942 		hfi1_rcd_put(rcd);
943 	}
944 
945 	/* Allocate enough memory for user event notification. */
946 	len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
947 			 sizeof(*dd->events));
948 	dd->events = vmalloc_user(len);
949 	if (!dd->events)
950 		dd_dev_err(dd, "Failed to allocate user events page\n");
951 	/*
952 	 * Allocate a page for device and port status.
953 	 * Page will be shared amongst all user processes.
954 	 */
955 	dd->status = vmalloc_user(PAGE_SIZE);
956 	if (!dd->status)
957 		dd_dev_err(dd, "Failed to allocate dev status page\n");
958 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
959 		ppd = dd->pport + pidx;
960 		if (dd->status)
961 			/* Currently, we only have one port */
962 			ppd->statusp = &dd->status->port;
963 
964 		set_mtu(ppd);
965 	}
966 
967 	/* enable chip even if we have an error, so we can debug cause */
968 	enable_chip(dd);
969 
970 done:
971 	/*
972 	 * Set status even if port serdes is not initialized
973 	 * so that diags will work.
974 	 */
975 	if (dd->status)
976 		dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
977 			HFI1_STATUS_INITTED;
978 	if (!ret) {
979 		/* enable all interrupts from the chip */
980 		enable_general_intr(dd);
981 		init_qsfp_int(dd);
982 
983 		/* chip is OK for user apps; mark it as initialized */
984 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
985 			ppd = dd->pport + pidx;
986 
987 			/*
988 			 * start the serdes - must be after interrupts are
989 			 * enabled so we are notified when the link goes up
990 			 */
991 			lastfail = bringup_serdes(ppd);
992 			if (lastfail)
993 				dd_dev_info(dd,
994 					    "Failed to bring up port %u\n",
995 					    ppd->port);
996 
997 			/*
998 			 * Set status even if port serdes is not initialized
999 			 * so that diags will work.
1000 			 */
1001 			if (ppd->statusp)
1002 				*ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
1003 							HFI1_STATUS_INITTED;
1004 			if (!ppd->link_speed_enabled)
1005 				continue;
1006 		}
1007 	}
1008 
1009 	/* if ret is non-zero, we probably should do some cleanup here... */
1010 	return ret;
1011 }
1012 
hfi1_lookup(int unit)1013 struct hfi1_devdata *hfi1_lookup(int unit)
1014 {
1015 	return xa_load(&hfi1_dev_table, unit);
1016 }
1017 
1018 /*
1019  * Stop the timers during unit shutdown, or after an error late
1020  * in initialization.
1021  */
stop_timers(struct hfi1_devdata * dd)1022 static void stop_timers(struct hfi1_devdata *dd)
1023 {
1024 	struct hfi1_pportdata *ppd;
1025 	int pidx;
1026 
1027 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1028 		ppd = dd->pport + pidx;
1029 		if (ppd->led_override_timer.function) {
1030 			del_timer_sync(&ppd->led_override_timer);
1031 			atomic_set(&ppd->led_override_timer_active, 0);
1032 		}
1033 	}
1034 }
1035 
1036 /**
1037  * shutdown_device - shut down a device
1038  * @dd: the hfi1_ib device
1039  *
1040  * This is called to make the device quiet when we are about to
1041  * unload the driver, and also when the device is administratively
1042  * disabled.   It does not free any data structures.
1043  * Everything it does has to be setup again by hfi1_init(dd, 1)
1044  */
shutdown_device(struct hfi1_devdata * dd)1045 static void shutdown_device(struct hfi1_devdata *dd)
1046 {
1047 	struct hfi1_pportdata *ppd;
1048 	struct hfi1_ctxtdata *rcd;
1049 	unsigned pidx;
1050 	int i;
1051 
1052 	if (dd->flags & HFI1_SHUTDOWN)
1053 		return;
1054 	dd->flags |= HFI1_SHUTDOWN;
1055 
1056 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1057 		ppd = dd->pport + pidx;
1058 
1059 		ppd->linkup = 0;
1060 		if (ppd->statusp)
1061 			*ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
1062 					   HFI1_STATUS_IB_READY);
1063 	}
1064 	dd->flags &= ~HFI1_INITTED;
1065 
1066 	/* mask and clean up interrupts */
1067 	set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
1068 	msix_clean_up_interrupts(dd);
1069 
1070 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1071 		ppd = dd->pport + pidx;
1072 		for (i = 0; i < dd->num_rcv_contexts; i++) {
1073 			rcd = hfi1_rcd_get_by_index(dd, i);
1074 			hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
1075 				     HFI1_RCVCTRL_CTXT_DIS |
1076 				     HFI1_RCVCTRL_INTRAVAIL_DIS |
1077 				     HFI1_RCVCTRL_PKEY_DIS |
1078 				     HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
1079 			hfi1_rcd_put(rcd);
1080 		}
1081 		/*
1082 		 * Gracefully stop all sends allowing any in progress to
1083 		 * trickle out first.
1084 		 */
1085 		for (i = 0; i < dd->num_send_contexts; i++)
1086 			sc_flush(dd->send_contexts[i].sc);
1087 	}
1088 
1089 	/*
1090 	 * Enough for anything that's going to trickle out to have actually
1091 	 * done so.
1092 	 */
1093 	udelay(20);
1094 
1095 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1096 		ppd = dd->pport + pidx;
1097 
1098 		/* disable all contexts */
1099 		for (i = 0; i < dd->num_send_contexts; i++)
1100 			sc_disable(dd->send_contexts[i].sc);
1101 		/* disable the send device */
1102 		pio_send_control(dd, PSC_GLOBAL_DISABLE);
1103 
1104 		shutdown_led_override(ppd);
1105 
1106 		/*
1107 		 * Clear SerdesEnable.
1108 		 * We can't count on interrupts since we are stopping.
1109 		 */
1110 		hfi1_quiet_serdes(ppd);
1111 		if (ppd->hfi1_wq)
1112 			flush_workqueue(ppd->hfi1_wq);
1113 		if (ppd->link_wq)
1114 			flush_workqueue(ppd->link_wq);
1115 	}
1116 	sdma_exit(dd);
1117 }
1118 
1119 /**
1120  * hfi1_free_ctxtdata - free a context's allocated data
1121  * @dd: the hfi1_ib device
1122  * @rcd: the ctxtdata structure
1123  *
1124  * free up any allocated data for a context
1125  * It should never change any chip state, or global driver state.
1126  */
hfi1_free_ctxtdata(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd)1127 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1128 {
1129 	u32 e;
1130 
1131 	if (!rcd)
1132 		return;
1133 
1134 	if (rcd->rcvhdrq) {
1135 		dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
1136 				  rcd->rcvhdrq, rcd->rcvhdrq_dma);
1137 		rcd->rcvhdrq = NULL;
1138 		if (hfi1_rcvhdrtail_kvaddr(rcd)) {
1139 			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1140 					  (void *)hfi1_rcvhdrtail_kvaddr(rcd),
1141 					  rcd->rcvhdrqtailaddr_dma);
1142 			rcd->rcvhdrtail_kvaddr = NULL;
1143 		}
1144 	}
1145 
1146 	/* all the RcvArray entries should have been cleared by now */
1147 	kfree(rcd->egrbufs.rcvtids);
1148 	rcd->egrbufs.rcvtids = NULL;
1149 
1150 	for (e = 0; e < rcd->egrbufs.alloced; e++) {
1151 		if (rcd->egrbufs.buffers[e].addr)
1152 			dma_free_coherent(&dd->pcidev->dev,
1153 					  rcd->egrbufs.buffers[e].len,
1154 					  rcd->egrbufs.buffers[e].addr,
1155 					  rcd->egrbufs.buffers[e].dma);
1156 	}
1157 	kfree(rcd->egrbufs.buffers);
1158 	rcd->egrbufs.alloced = 0;
1159 	rcd->egrbufs.buffers = NULL;
1160 
1161 	sc_free(rcd->sc);
1162 	rcd->sc = NULL;
1163 
1164 	vfree(rcd->subctxt_uregbase);
1165 	vfree(rcd->subctxt_rcvegrbuf);
1166 	vfree(rcd->subctxt_rcvhdr_base);
1167 	kfree(rcd->opstats);
1168 
1169 	rcd->subctxt_uregbase = NULL;
1170 	rcd->subctxt_rcvegrbuf = NULL;
1171 	rcd->subctxt_rcvhdr_base = NULL;
1172 	rcd->opstats = NULL;
1173 }
1174 
1175 /*
1176  * Release our hold on the shared asic data.  If we are the last one,
1177  * return the structure to be finalized outside the lock.  Must be
1178  * holding hfi1_dev_table lock.
1179  */
release_asic_data(struct hfi1_devdata * dd)1180 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
1181 {
1182 	struct hfi1_asic_data *ad;
1183 	int other;
1184 
1185 	if (!dd->asic_data)
1186 		return NULL;
1187 	dd->asic_data->dds[dd->hfi1_id] = NULL;
1188 	other = dd->hfi1_id ? 0 : 1;
1189 	ad = dd->asic_data;
1190 	dd->asic_data = NULL;
1191 	/* return NULL if the other dd still has a link */
1192 	return ad->dds[other] ? NULL : ad;
1193 }
1194 
finalize_asic_data(struct hfi1_devdata * dd,struct hfi1_asic_data * ad)1195 static void finalize_asic_data(struct hfi1_devdata *dd,
1196 			       struct hfi1_asic_data *ad)
1197 {
1198 	clean_up_i2c(dd, ad);
1199 	kfree(ad);
1200 }
1201 
1202 /**
1203  * hfi1_free_devdata - cleans up and frees per-unit data structure
1204  * @dd: pointer to a valid devdata structure
1205  *
1206  * It cleans up and frees all data structures set up by
1207  * by hfi1_alloc_devdata().
1208  */
hfi1_free_devdata(struct hfi1_devdata * dd)1209 void hfi1_free_devdata(struct hfi1_devdata *dd)
1210 {
1211 	struct hfi1_asic_data *ad;
1212 	unsigned long flags;
1213 
1214 	xa_lock_irqsave(&hfi1_dev_table, flags);
1215 	__xa_erase(&hfi1_dev_table, dd->unit);
1216 	ad = release_asic_data(dd);
1217 	xa_unlock_irqrestore(&hfi1_dev_table, flags);
1218 
1219 	finalize_asic_data(dd, ad);
1220 	free_platform_config(dd);
1221 	rcu_barrier(); /* wait for rcu callbacks to complete */
1222 	free_percpu(dd->int_counter);
1223 	free_percpu(dd->rcv_limit);
1224 	free_percpu(dd->send_schedule);
1225 	free_percpu(dd->tx_opstats);
1226 	dd->int_counter   = NULL;
1227 	dd->rcv_limit     = NULL;
1228 	dd->send_schedule = NULL;
1229 	dd->tx_opstats    = NULL;
1230 	kfree(dd->comp_vect);
1231 	dd->comp_vect = NULL;
1232 	if (dd->rcvhdrtail_dummy_kvaddr)
1233 		dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1234 				  (void *)dd->rcvhdrtail_dummy_kvaddr,
1235 				  dd->rcvhdrtail_dummy_dma);
1236 	dd->rcvhdrtail_dummy_kvaddr = NULL;
1237 	sdma_clean(dd, dd->num_sdma);
1238 	rvt_dealloc_device(&dd->verbs_dev.rdi);
1239 }
1240 
1241 /**
1242  * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
1243  * @pdev: Valid PCI device
1244  * @extra: How many bytes to alloc past the default
1245  *
1246  * Must be done via verbs allocator, because the verbs cleanup process
1247  * both does cleanup and free of the data structure.
1248  * "extra" is for chip-specific data.
1249  */
hfi1_alloc_devdata(struct pci_dev * pdev,size_t extra)1250 static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
1251 					       size_t extra)
1252 {
1253 	struct hfi1_devdata *dd;
1254 	int ret, nports;
1255 
1256 	/* extra is * number of ports */
1257 	nports = extra / sizeof(struct hfi1_pportdata);
1258 
1259 	dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1260 						     nports);
1261 	if (!dd)
1262 		return ERR_PTR(-ENOMEM);
1263 	dd->num_pports = nports;
1264 	dd->pport = (struct hfi1_pportdata *)(dd + 1);
1265 	dd->pcidev = pdev;
1266 	pci_set_drvdata(pdev, dd);
1267 
1268 	ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
1269 			GFP_KERNEL);
1270 	if (ret < 0) {
1271 		dev_err(&pdev->dev,
1272 			"Could not allocate unit ID: error %d\n", -ret);
1273 		goto bail;
1274 	}
1275 	rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
1276 	/*
1277 	 * If the BIOS does not have the NUMA node information set, select
1278 	 * NUMA 0 so we get consistent performance.
1279 	 */
1280 	dd->node = pcibus_to_node(pdev->bus);
1281 	if (dd->node == NUMA_NO_NODE) {
1282 		dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
1283 		dd->node = 0;
1284 	}
1285 
1286 	/*
1287 	 * Initialize all locks for the device. This needs to be as early as
1288 	 * possible so locks are usable.
1289 	 */
1290 	spin_lock_init(&dd->sc_lock);
1291 	spin_lock_init(&dd->sendctrl_lock);
1292 	spin_lock_init(&dd->rcvctrl_lock);
1293 	spin_lock_init(&dd->uctxt_lock);
1294 	spin_lock_init(&dd->hfi1_diag_trans_lock);
1295 	spin_lock_init(&dd->sc_init_lock);
1296 	spin_lock_init(&dd->dc8051_memlock);
1297 	seqlock_init(&dd->sc2vl_lock);
1298 	spin_lock_init(&dd->sde_map_lock);
1299 	spin_lock_init(&dd->pio_map_lock);
1300 	mutex_init(&dd->dc8051_lock);
1301 	init_waitqueue_head(&dd->event_queue);
1302 	spin_lock_init(&dd->irq_src_lock);
1303 
1304 	dd->int_counter = alloc_percpu(u64);
1305 	if (!dd->int_counter) {
1306 		ret = -ENOMEM;
1307 		goto bail;
1308 	}
1309 
1310 	dd->rcv_limit = alloc_percpu(u64);
1311 	if (!dd->rcv_limit) {
1312 		ret = -ENOMEM;
1313 		goto bail;
1314 	}
1315 
1316 	dd->send_schedule = alloc_percpu(u64);
1317 	if (!dd->send_schedule) {
1318 		ret = -ENOMEM;
1319 		goto bail;
1320 	}
1321 
1322 	dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
1323 	if (!dd->tx_opstats) {
1324 		ret = -ENOMEM;
1325 		goto bail;
1326 	}
1327 
1328 	dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
1329 	if (!dd->comp_vect) {
1330 		ret = -ENOMEM;
1331 		goto bail;
1332 	}
1333 
1334 	/* allocate dummy tail memory for all receive contexts */
1335 	dd->rcvhdrtail_dummy_kvaddr =
1336 		dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
1337 				   &dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
1338 	if (!dd->rcvhdrtail_dummy_kvaddr) {
1339 		ret = -ENOMEM;
1340 		goto bail;
1341 	}
1342 
1343 	atomic_set(&dd->ipoib_rsm_usr_num, 0);
1344 	return dd;
1345 
1346 bail:
1347 	hfi1_free_devdata(dd);
1348 	return ERR_PTR(ret);
1349 }
1350 
1351 /*
1352  * Called from freeze mode handlers, and from PCI error
1353  * reporting code.  Should be paranoid about state of
1354  * system and data structures.
1355  */
hfi1_disable_after_error(struct hfi1_devdata * dd)1356 void hfi1_disable_after_error(struct hfi1_devdata *dd)
1357 {
1358 	if (dd->flags & HFI1_INITTED) {
1359 		u32 pidx;
1360 
1361 		dd->flags &= ~HFI1_INITTED;
1362 		if (dd->pport)
1363 			for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1364 				struct hfi1_pportdata *ppd;
1365 
1366 				ppd = dd->pport + pidx;
1367 				if (dd->flags & HFI1_PRESENT)
1368 					set_link_state(ppd, HLS_DN_DISABLE);
1369 
1370 				if (ppd->statusp)
1371 					*ppd->statusp &= ~HFI1_STATUS_IB_READY;
1372 			}
1373 	}
1374 
1375 	/*
1376 	 * Mark as having had an error for driver, and also
1377 	 * for /sys and status word mapped to user programs.
1378 	 * This marks unit as not usable, until reset.
1379 	 */
1380 	if (dd->status)
1381 		dd->status->dev |= HFI1_STATUS_HWERROR;
1382 }
1383 
1384 static void remove_one(struct pci_dev *);
1385 static int init_one(struct pci_dev *, const struct pci_device_id *);
1386 static void shutdown_one(struct pci_dev *);
1387 
1388 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1389 #define PFX DRIVER_NAME ": "
1390 
1391 const struct pci_device_id hfi1_pci_tbl[] = {
1392 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1393 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1394 	{ 0, }
1395 };
1396 
1397 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1398 
1399 static struct pci_driver hfi1_pci_driver = {
1400 	.name = DRIVER_NAME,
1401 	.probe = init_one,
1402 	.remove = remove_one,
1403 	.shutdown = shutdown_one,
1404 	.id_table = hfi1_pci_tbl,
1405 	.err_handler = &hfi1_pci_err_handler,
1406 };
1407 
compute_krcvqs(void)1408 static void __init compute_krcvqs(void)
1409 {
1410 	int i;
1411 
1412 	for (i = 0; i < krcvqsset; i++)
1413 		n_krcvqs += krcvqs[i];
1414 }
1415 
1416 /*
1417  * Do all the generic driver unit- and chip-independent memory
1418  * allocation and initialization.
1419  */
hfi1_mod_init(void)1420 static int __init hfi1_mod_init(void)
1421 {
1422 	int ret;
1423 
1424 	ret = dev_init();
1425 	if (ret)
1426 		goto bail;
1427 
1428 	ret = node_affinity_init();
1429 	if (ret)
1430 		goto bail;
1431 
1432 	/* validate max MTU before any devices start */
1433 	if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1434 		pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1435 		       hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1436 		hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1437 	}
1438 	/* valid CUs run from 1-128 in powers of 2 */
1439 	if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1440 		hfi1_cu = 1;
1441 	/* valid credit return threshold is 0-100, variable is unsigned */
1442 	if (user_credit_return_threshold > 100)
1443 		user_credit_return_threshold = 100;
1444 
1445 	compute_krcvqs();
1446 	/*
1447 	 * sanitize receive interrupt count, time must wait until after
1448 	 * the hardware type is known
1449 	 */
1450 	if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1451 		rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1452 	/* reject invalid combinations */
1453 	if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1454 		pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1455 		rcv_intr_count = 1;
1456 	}
1457 	if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1458 		/*
1459 		 * Avoid indefinite packet delivery by requiring a timeout
1460 		 * if count is > 1.
1461 		 */
1462 		pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1463 		rcv_intr_timeout = 1;
1464 	}
1465 	if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1466 		/*
1467 		 * The dynamic algorithm expects a non-zero timeout
1468 		 * and a count > 1.
1469 		 */
1470 		pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1471 		rcv_intr_dynamic = 0;
1472 	}
1473 
1474 	/* sanitize link CRC options */
1475 	link_crc_mask &= SUPPORTED_CRCS;
1476 
1477 	ret = opfn_init();
1478 	if (ret < 0) {
1479 		pr_err("Failed to allocate opfn_wq");
1480 		goto bail_dev;
1481 	}
1482 
1483 	/*
1484 	 * These must be called before the driver is registered with
1485 	 * the PCI subsystem.
1486 	 */
1487 	hfi1_dbg_init();
1488 	ret = pci_register_driver(&hfi1_pci_driver);
1489 	if (ret < 0) {
1490 		pr_err("Unable to register driver: error %d\n", -ret);
1491 		goto bail_dev;
1492 	}
1493 	goto bail; /* all OK */
1494 
1495 bail_dev:
1496 	hfi1_dbg_exit();
1497 	dev_cleanup();
1498 bail:
1499 	return ret;
1500 }
1501 
1502 module_init(hfi1_mod_init);
1503 
1504 /*
1505  * Do the non-unit driver cleanup, memory free, etc. at unload.
1506  */
hfi1_mod_cleanup(void)1507 static void __exit hfi1_mod_cleanup(void)
1508 {
1509 	pci_unregister_driver(&hfi1_pci_driver);
1510 	opfn_exit();
1511 	node_affinity_destroy_all();
1512 	hfi1_dbg_exit();
1513 
1514 	WARN_ON(!xa_empty(&hfi1_dev_table));
1515 	dispose_firmware();	/* asymmetric with obtain_firmware() */
1516 	dev_cleanup();
1517 }
1518 
1519 module_exit(hfi1_mod_cleanup);
1520 
1521 /* this can only be called after a successful initialization */
cleanup_device_data(struct hfi1_devdata * dd)1522 static void cleanup_device_data(struct hfi1_devdata *dd)
1523 {
1524 	int ctxt;
1525 	int pidx;
1526 
1527 	/* users can't do anything more with chip */
1528 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1529 		struct hfi1_pportdata *ppd = &dd->pport[pidx];
1530 		struct cc_state *cc_state;
1531 		int i;
1532 
1533 		if (ppd->statusp)
1534 			*ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1535 
1536 		for (i = 0; i < OPA_MAX_SLS; i++)
1537 			hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1538 
1539 		spin_lock(&ppd->cc_state_lock);
1540 		cc_state = get_cc_state_protected(ppd);
1541 		RCU_INIT_POINTER(ppd->cc_state, NULL);
1542 		spin_unlock(&ppd->cc_state_lock);
1543 
1544 		if (cc_state)
1545 			kfree_rcu(cc_state, rcu);
1546 	}
1547 
1548 	free_credit_return(dd);
1549 
1550 	/*
1551 	 * Free any resources still in use (usually just kernel contexts)
1552 	 * at unload; we do for ctxtcnt, because that's what we allocate.
1553 	 */
1554 	for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
1555 		struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
1556 
1557 		if (rcd) {
1558 			hfi1_free_ctxt_rcv_groups(rcd);
1559 			hfi1_free_ctxt(rcd);
1560 		}
1561 	}
1562 
1563 	kfree(dd->rcd);
1564 	dd->rcd = NULL;
1565 
1566 	free_pio_map(dd);
1567 	/* must follow rcv context free - need to remove rcv's hooks */
1568 	for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1569 		sc_free(dd->send_contexts[ctxt].sc);
1570 	dd->num_send_contexts = 0;
1571 	kfree(dd->send_contexts);
1572 	dd->send_contexts = NULL;
1573 	kfree(dd->hw_to_sw);
1574 	dd->hw_to_sw = NULL;
1575 	kfree(dd->boardname);
1576 	vfree(dd->events);
1577 	vfree(dd->status);
1578 }
1579 
1580 /*
1581  * Clean up on unit shutdown, or error during unit load after
1582  * successful initialization.
1583  */
postinit_cleanup(struct hfi1_devdata * dd)1584 static void postinit_cleanup(struct hfi1_devdata *dd)
1585 {
1586 	hfi1_start_cleanup(dd);
1587 	hfi1_comp_vectors_clean_up(dd);
1588 	hfi1_dev_affinity_clean_up(dd);
1589 
1590 	hfi1_pcie_ddcleanup(dd);
1591 	hfi1_pcie_cleanup(dd->pcidev);
1592 
1593 	cleanup_device_data(dd);
1594 
1595 	hfi1_free_devdata(dd);
1596 }
1597 
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1598 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1599 {
1600 	int ret = 0, j, pidx, initfail;
1601 	struct hfi1_devdata *dd;
1602 	struct hfi1_pportdata *ppd;
1603 
1604 	/* First, lock the non-writable module parameters */
1605 	HFI1_CAP_LOCK();
1606 
1607 	/* Validate dev ids */
1608 	if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1609 	      ent->device == PCI_DEVICE_ID_INTEL1)) {
1610 		dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
1611 			ent->device);
1612 		ret = -ENODEV;
1613 		goto bail;
1614 	}
1615 
1616 	/* Allocate the dd so we can get to work */
1617 	dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
1618 				sizeof(struct hfi1_pportdata));
1619 	if (IS_ERR(dd)) {
1620 		ret = PTR_ERR(dd);
1621 		goto bail;
1622 	}
1623 
1624 	/* Validate some global module parameters */
1625 	ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt);
1626 	if (ret)
1627 		goto bail;
1628 
1629 	/* use the encoding function as a sanitization check */
1630 	if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1631 		dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
1632 			   hfi1_hdrq_entsize);
1633 		ret = -EINVAL;
1634 		goto bail;
1635 	}
1636 
1637 	/* The receive eager buffer size must be set before the receive
1638 	 * contexts are created.
1639 	 *
1640 	 * Set the eager buffer size.  Validate that it falls in a range
1641 	 * allowed by the hardware - all powers of 2 between the min and
1642 	 * max.  The maximum valid MTU is within the eager buffer range
1643 	 * so we do not need to cap the max_mtu by an eager buffer size
1644 	 * setting.
1645 	 */
1646 	if (eager_buffer_size) {
1647 		if (!is_power_of_2(eager_buffer_size))
1648 			eager_buffer_size =
1649 				roundup_pow_of_two(eager_buffer_size);
1650 		eager_buffer_size =
1651 			clamp_val(eager_buffer_size,
1652 				  MIN_EAGER_BUFFER * 8,
1653 				  MAX_EAGER_BUFFER_TOTAL);
1654 		dd_dev_info(dd, "Eager buffer size %u\n",
1655 			    eager_buffer_size);
1656 	} else {
1657 		dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
1658 		ret = -EINVAL;
1659 		goto bail;
1660 	}
1661 
1662 	/* restrict value of hfi1_rcvarr_split */
1663 	hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1664 
1665 	ret = hfi1_pcie_init(dd);
1666 	if (ret)
1667 		goto bail;
1668 
1669 	/*
1670 	 * Do device-specific initialization, function table setup, dd
1671 	 * allocation, etc.
1672 	 */
1673 	ret = hfi1_init_dd(dd);
1674 	if (ret)
1675 		goto clean_bail; /* error already printed */
1676 
1677 	ret = create_workqueues(dd);
1678 	if (ret)
1679 		goto clean_bail;
1680 
1681 	/* do the generic initialization */
1682 	initfail = hfi1_init(dd, 0);
1683 
1684 	ret = hfi1_register_ib_device(dd);
1685 
1686 	/*
1687 	 * Now ready for use.  this should be cleared whenever we
1688 	 * detect a reset, or initiate one.  If earlier failure,
1689 	 * we still create devices, so diags, etc. can be used
1690 	 * to determine cause of problem.
1691 	 */
1692 	if (!initfail && !ret) {
1693 		dd->flags |= HFI1_INITTED;
1694 		/* create debufs files after init and ib register */
1695 		hfi1_dbg_ibdev_init(&dd->verbs_dev);
1696 	}
1697 
1698 	j = hfi1_device_create(dd);
1699 	if (j)
1700 		dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1701 
1702 	if (initfail || ret) {
1703 		msix_clean_up_interrupts(dd);
1704 		stop_timers(dd);
1705 		flush_workqueue(ib_wq);
1706 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1707 			hfi1_quiet_serdes(dd->pport + pidx);
1708 			ppd = dd->pport + pidx;
1709 			if (ppd->hfi1_wq) {
1710 				destroy_workqueue(ppd->hfi1_wq);
1711 				ppd->hfi1_wq = NULL;
1712 			}
1713 			if (ppd->link_wq) {
1714 				destroy_workqueue(ppd->link_wq);
1715 				ppd->link_wq = NULL;
1716 			}
1717 		}
1718 		if (!j)
1719 			hfi1_device_remove(dd);
1720 		if (!ret)
1721 			hfi1_unregister_ib_device(dd);
1722 		postinit_cleanup(dd);
1723 		if (initfail)
1724 			ret = initfail;
1725 		goto bail;	/* everything already cleaned */
1726 	}
1727 
1728 	sdma_start(dd);
1729 
1730 	return 0;
1731 
1732 clean_bail:
1733 	hfi1_pcie_cleanup(pdev);
1734 bail:
1735 	return ret;
1736 }
1737 
wait_for_clients(struct hfi1_devdata * dd)1738 static void wait_for_clients(struct hfi1_devdata *dd)
1739 {
1740 	/*
1741 	 * Remove the device init value and complete the device if there is
1742 	 * no clients or wait for active clients to finish.
1743 	 */
1744 	if (atomic_dec_and_test(&dd->user_refcount))
1745 		complete(&dd->user_comp);
1746 
1747 	wait_for_completion(&dd->user_comp);
1748 }
1749 
remove_one(struct pci_dev * pdev)1750 static void remove_one(struct pci_dev *pdev)
1751 {
1752 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1753 
1754 	/* close debugfs files before ib unregister */
1755 	hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1756 
1757 	/* remove the /dev hfi1 interface */
1758 	hfi1_device_remove(dd);
1759 
1760 	/* wait for existing user space clients to finish */
1761 	wait_for_clients(dd);
1762 
1763 	/* unregister from IB core */
1764 	hfi1_unregister_ib_device(dd);
1765 
1766 	/* free netdev data */
1767 	hfi1_netdev_free(dd);
1768 
1769 	/*
1770 	 * Disable the IB link, disable interrupts on the device,
1771 	 * clear dma engines, etc.
1772 	 */
1773 	shutdown_device(dd);
1774 	destroy_workqueues(dd);
1775 
1776 	stop_timers(dd);
1777 
1778 	/* wait until all of our (qsfp) queue_work() calls complete */
1779 	flush_workqueue(ib_wq);
1780 
1781 	postinit_cleanup(dd);
1782 }
1783 
shutdown_one(struct pci_dev * pdev)1784 static void shutdown_one(struct pci_dev *pdev)
1785 {
1786 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1787 
1788 	shutdown_device(dd);
1789 }
1790 
1791 /**
1792  * hfi1_create_rcvhdrq - create a receive header queue
1793  * @dd: the hfi1_ib device
1794  * @rcd: the context data
1795  *
1796  * This must be contiguous memory (from an i/o perspective), and must be
1797  * DMA'able (which means for some systems, it will go through an IOMMU,
1798  * or be forced into a low address range).
1799  */
hfi1_create_rcvhdrq(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd)1800 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1801 {
1802 	unsigned amt;
1803 
1804 	if (!rcd->rcvhdrq) {
1805 		gfp_t gfp_flags;
1806 
1807 		amt = rcvhdrq_size(rcd);
1808 
1809 		if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
1810 			gfp_flags = GFP_KERNEL;
1811 		else
1812 			gfp_flags = GFP_USER;
1813 		rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1814 						  &rcd->rcvhdrq_dma,
1815 						  gfp_flags | __GFP_COMP);
1816 
1817 		if (!rcd->rcvhdrq) {
1818 			dd_dev_err(dd,
1819 				   "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1820 				   amt, rcd->ctxt);
1821 			goto bail;
1822 		}
1823 
1824 		if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1825 		    HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1826 			rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1827 								    PAGE_SIZE,
1828 								    &rcd->rcvhdrqtailaddr_dma,
1829 								    gfp_flags);
1830 			if (!rcd->rcvhdrtail_kvaddr)
1831 				goto bail_free;
1832 		}
1833 	}
1834 
1835 	set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize,
1836 		      rcd->rcvhdrq_cnt);
1837 
1838 	return 0;
1839 
1840 bail_free:
1841 	dd_dev_err(dd,
1842 		   "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1843 		   rcd->ctxt);
1844 	dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1845 			  rcd->rcvhdrq_dma);
1846 	rcd->rcvhdrq = NULL;
1847 bail:
1848 	return -ENOMEM;
1849 }
1850 
1851 /**
1852  * allocate eager buffers, both kernel and user contexts.
1853  * @rcd: the context we are setting up.
1854  *
1855  * Allocate the eager TID buffers and program them into hip.
1856  * They are no longer completely contiguous, we do multiple allocation
1857  * calls.  Otherwise we get the OOM code involved, by asking for too
1858  * much per call, with disastrous results on some kernels.
1859  */
hfi1_setup_eagerbufs(struct hfi1_ctxtdata * rcd)1860 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1861 {
1862 	struct hfi1_devdata *dd = rcd->dd;
1863 	u32 max_entries, egrtop, alloced_bytes = 0;
1864 	gfp_t gfp_flags;
1865 	u16 order, idx = 0;
1866 	int ret = 0;
1867 	u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1868 
1869 	/*
1870 	 * GFP_USER, but without GFP_FS, so buffer cache can be
1871 	 * coalesced (we hope); otherwise, even at order 4,
1872 	 * heavy filesystem activity makes these fail, and we can
1873 	 * use compound pages.
1874 	 */
1875 	gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1876 
1877 	/*
1878 	 * The minimum size of the eager buffers is a groups of MTU-sized
1879 	 * buffers.
1880 	 * The global eager_buffer_size parameter is checked against the
1881 	 * theoretical lower limit of the value. Here, we check against the
1882 	 * MTU.
1883 	 */
1884 	if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1885 		rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1886 	/*
1887 	 * If using one-pkt-per-egr-buffer, lower the eager buffer
1888 	 * size to the max MTU (page-aligned).
1889 	 */
1890 	if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1891 		rcd->egrbufs.rcvtid_size = round_mtu;
1892 
1893 	/*
1894 	 * Eager buffers sizes of 1MB or less require smaller TID sizes
1895 	 * to satisfy the "multiple of 8 RcvArray entries" requirement.
1896 	 */
1897 	if (rcd->egrbufs.size <= (1 << 20))
1898 		rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1899 			rounddown_pow_of_two(rcd->egrbufs.size / 8));
1900 
1901 	while (alloced_bytes < rcd->egrbufs.size &&
1902 	       rcd->egrbufs.alloced < rcd->egrbufs.count) {
1903 		rcd->egrbufs.buffers[idx].addr =
1904 			dma_alloc_coherent(&dd->pcidev->dev,
1905 					   rcd->egrbufs.rcvtid_size,
1906 					   &rcd->egrbufs.buffers[idx].dma,
1907 					   gfp_flags);
1908 		if (rcd->egrbufs.buffers[idx].addr) {
1909 			rcd->egrbufs.buffers[idx].len =
1910 				rcd->egrbufs.rcvtid_size;
1911 			rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1912 				rcd->egrbufs.buffers[idx].addr;
1913 			rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1914 				rcd->egrbufs.buffers[idx].dma;
1915 			rcd->egrbufs.alloced++;
1916 			alloced_bytes += rcd->egrbufs.rcvtid_size;
1917 			idx++;
1918 		} else {
1919 			u32 new_size, i, j;
1920 			u64 offset = 0;
1921 
1922 			/*
1923 			 * Fail the eager buffer allocation if:
1924 			 *   - we are already using the lowest acceptable size
1925 			 *   - we are using one-pkt-per-egr-buffer (this implies
1926 			 *     that we are accepting only one size)
1927 			 */
1928 			if (rcd->egrbufs.rcvtid_size == round_mtu ||
1929 			    !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1930 				dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1931 					   rcd->ctxt);
1932 				ret = -ENOMEM;
1933 				goto bail_rcvegrbuf_phys;
1934 			}
1935 
1936 			new_size = rcd->egrbufs.rcvtid_size / 2;
1937 
1938 			/*
1939 			 * If the first attempt to allocate memory failed, don't
1940 			 * fail everything but continue with the next lower
1941 			 * size.
1942 			 */
1943 			if (idx == 0) {
1944 				rcd->egrbufs.rcvtid_size = new_size;
1945 				continue;
1946 			}
1947 
1948 			/*
1949 			 * Re-partition already allocated buffers to a smaller
1950 			 * size.
1951 			 */
1952 			rcd->egrbufs.alloced = 0;
1953 			for (i = 0, j = 0, offset = 0; j < idx; i++) {
1954 				if (i >= rcd->egrbufs.count)
1955 					break;
1956 				rcd->egrbufs.rcvtids[i].dma =
1957 					rcd->egrbufs.buffers[j].dma + offset;
1958 				rcd->egrbufs.rcvtids[i].addr =
1959 					rcd->egrbufs.buffers[j].addr + offset;
1960 				rcd->egrbufs.alloced++;
1961 				if ((rcd->egrbufs.buffers[j].dma + offset +
1962 				     new_size) ==
1963 				    (rcd->egrbufs.buffers[j].dma +
1964 				     rcd->egrbufs.buffers[j].len)) {
1965 					j++;
1966 					offset = 0;
1967 				} else {
1968 					offset += new_size;
1969 				}
1970 			}
1971 			rcd->egrbufs.rcvtid_size = new_size;
1972 		}
1973 	}
1974 	rcd->egrbufs.numbufs = idx;
1975 	rcd->egrbufs.size = alloced_bytes;
1976 
1977 	hfi1_cdbg(PROC,
1978 		  "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
1979 		  rcd->ctxt, rcd->egrbufs.alloced,
1980 		  rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
1981 
1982 	/*
1983 	 * Set the contexts rcv array head update threshold to the closest
1984 	 * power of 2 (so we can use a mask instead of modulo) below half
1985 	 * the allocated entries.
1986 	 */
1987 	rcd->egrbufs.threshold =
1988 		rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1989 	/*
1990 	 * Compute the expected RcvArray entry base. This is done after
1991 	 * allocating the eager buffers in order to maximize the
1992 	 * expected RcvArray entries for the context.
1993 	 */
1994 	max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1995 	egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1996 	rcd->expected_count = max_entries - egrtop;
1997 	if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
1998 		rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
1999 
2000 	rcd->expected_base = rcd->eager_base + egrtop;
2001 	hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
2002 		  rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
2003 		  rcd->eager_base, rcd->expected_base);
2004 
2005 	if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
2006 		hfi1_cdbg(PROC,
2007 			  "ctxt%u: current Eager buffer size is invalid %u\n",
2008 			  rcd->ctxt, rcd->egrbufs.rcvtid_size);
2009 		ret = -EINVAL;
2010 		goto bail_rcvegrbuf_phys;
2011 	}
2012 
2013 	for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
2014 		hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
2015 			     rcd->egrbufs.rcvtids[idx].dma, order);
2016 		cond_resched();
2017 	}
2018 
2019 	return 0;
2020 
2021 bail_rcvegrbuf_phys:
2022 	for (idx = 0; idx < rcd->egrbufs.alloced &&
2023 	     rcd->egrbufs.buffers[idx].addr;
2024 	     idx++) {
2025 		dma_free_coherent(&dd->pcidev->dev,
2026 				  rcd->egrbufs.buffers[idx].len,
2027 				  rcd->egrbufs.buffers[idx].addr,
2028 				  rcd->egrbufs.buffers[idx].dma);
2029 		rcd->egrbufs.buffers[idx].addr = NULL;
2030 		rcd->egrbufs.buffers[idx].dma = 0;
2031 		rcd->egrbufs.buffers[idx].len = 0;
2032 	}
2033 
2034 	return ret;
2035 }
2036