• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright(c) 2015-2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/pci.h>
49 #include <linux/netdevice.h>
50 #include <linux/vmalloc.h>
51 #include <linux/delay.h>
52 #include <linux/idr.h>
53 #include <linux/module.h>
54 #include <linux/printk.h>
55 #include <linux/hrtimer.h>
56 #include <linux/bitmap.h>
57 #include <rdma/rdma_vt.h>
58 
59 #include "hfi.h"
60 #include "device.h"
61 #include "common.h"
62 #include "trace.h"
63 #include "mad.h"
64 #include "sdma.h"
65 #include "debugfs.h"
66 #include "verbs.h"
67 #include "aspm.h"
68 #include "affinity.h"
69 #include "vnic.h"
70 #include "exp_rcv.h"
71 
72 #undef pr_fmt
73 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
74 
75 #define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5
76 /*
77  * min buffers we want to have per context, after driver
78  */
79 #define HFI1_MIN_USER_CTXT_BUFCNT 7
80 
81 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2
82 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
83 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
84 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
85 
86 /*
87  * Number of user receive contexts we are configured to use (to allow for more
88  * pio buffers per ctxt, etc.)  Zero means use one user context per CPU.
89  */
90 int num_user_contexts = -1;
91 module_param_named(num_user_contexts, num_user_contexts, int, 0444);
92 MODULE_PARM_DESC(
93 	num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
94 
95 uint krcvqs[RXE_NUM_DATA_VL];
96 int krcvqsset;
97 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
98 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
99 
100 /* computed based on above array */
101 unsigned long n_krcvqs;
102 
103 static unsigned hfi1_rcvarr_split = 25;
104 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
105 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
106 
107 static uint eager_buffer_size = (8 << 20); /* 8MB */
108 module_param(eager_buffer_size, uint, S_IRUGO);
109 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
110 
111 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
112 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
113 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
114 
115 static uint hfi1_hdrq_entsize = 32;
116 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
117 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
118 
119 unsigned int user_credit_return_threshold = 33;	/* default is 33% */
120 module_param(user_credit_return_threshold, uint, S_IRUGO);
121 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
122 
123 static inline u64 encode_rcv_header_entry_size(u16 size);
124 
125 static struct idr hfi1_unit_table;
126 u32 hfi1_cpulist_count;
127 unsigned long *hfi1_cpulist;
128 
hfi1_create_kctxt(struct hfi1_devdata * dd,struct hfi1_pportdata * ppd)129 static int hfi1_create_kctxt(struct hfi1_devdata *dd,
130 			     struct hfi1_pportdata *ppd)
131 {
132 	struct hfi1_ctxtdata *rcd;
133 	int ret;
134 
135 	/* Control context has to be always 0 */
136 	BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
137 
138 	ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
139 	if (ret < 0) {
140 		dd_dev_err(dd, "Kernel receive context allocation failed\n");
141 		return ret;
142 	}
143 
144 	/*
145 	 * Set up the kernel context flags here and now because they use
146 	 * default values for all receive side memories.  User contexts will
147 	 * be handled as they are created.
148 	 */
149 	rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
150 		HFI1_CAP_KGET(NODROP_RHQ_FULL) |
151 		HFI1_CAP_KGET(NODROP_EGR_FULL) |
152 		HFI1_CAP_KGET(DMA_RTAIL);
153 
154 	/* Control context must use DMA_RTAIL */
155 	if (rcd->ctxt == HFI1_CTRL_CTXT)
156 		rcd->flags |= HFI1_CAP_DMA_RTAIL;
157 	rcd->seq_cnt = 1;
158 
159 	rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
160 	if (!rcd->sc) {
161 		dd_dev_err(dd, "Kernel send context allocation failed\n");
162 		return -ENOMEM;
163 	}
164 	hfi1_init_ctxt(rcd->sc);
165 
166 	return 0;
167 }
168 
169 /*
170  * Create the receive context array and one or more kernel contexts
171  */
hfi1_create_kctxts(struct hfi1_devdata * dd)172 int hfi1_create_kctxts(struct hfi1_devdata *dd)
173 {
174 	u16 i;
175 	int ret;
176 
177 	dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
178 			       GFP_KERNEL, dd->node);
179 	if (!dd->rcd)
180 		return -ENOMEM;
181 
182 	for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
183 		ret = hfi1_create_kctxt(dd, dd->pport);
184 		if (ret)
185 			goto bail;
186 	}
187 
188 	return 0;
189 bail:
190 	for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
191 		hfi1_free_ctxt(dd->rcd[i]);
192 
193 	/* All the contexts should be freed, free the array */
194 	kfree(dd->rcd);
195 	dd->rcd = NULL;
196 	return ret;
197 }
198 
199 /*
200  * Helper routines for the receive context reference count (rcd and uctxt).
201  */
hfi1_rcd_init(struct hfi1_ctxtdata * rcd)202 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
203 {
204 	kref_init(&rcd->kref);
205 }
206 
207 /**
208  * hfi1_rcd_free - When reference is zero clean up.
209  * @kref: pointer to an initialized rcd data structure
210  *
211  */
hfi1_rcd_free(struct kref * kref)212 static void hfi1_rcd_free(struct kref *kref)
213 {
214 	unsigned long flags;
215 	struct hfi1_ctxtdata *rcd =
216 		container_of(kref, struct hfi1_ctxtdata, kref);
217 
218 	spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
219 	rcd->dd->rcd[rcd->ctxt] = NULL;
220 	spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
221 
222 	hfi1_free_ctxtdata(rcd->dd, rcd);
223 
224 	kfree(rcd);
225 }
226 
227 /**
228  * hfi1_rcd_put - decrement reference for rcd
229  * @rcd: pointer to an initialized rcd data structure
230  *
231  * Use this to put a reference after the init.
232  */
hfi1_rcd_put(struct hfi1_ctxtdata * rcd)233 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
234 {
235 	if (rcd)
236 		return kref_put(&rcd->kref, hfi1_rcd_free);
237 
238 	return 0;
239 }
240 
241 /**
242  * hfi1_rcd_get - increment reference for rcd
243  * @rcd: pointer to an initialized rcd data structure
244  *
245  * Use this to get a reference after the init.
246  *
247  * Return : reflect kref_get_unless_zero(), which returns non-zero on
248  * increment, otherwise 0.
249  */
hfi1_rcd_get(struct hfi1_ctxtdata * rcd)250 int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
251 {
252 	return kref_get_unless_zero(&rcd->kref);
253 }
254 
255 /**
256  * allocate_rcd_index - allocate an rcd index from the rcd array
257  * @dd: pointer to a valid devdata structure
258  * @rcd: rcd data structure to assign
259  * @index: pointer to index that is allocated
260  *
261  * Find an empty index in the rcd array, and assign the given rcd to it.
262  * If the array is full, we are EBUSY.
263  *
264  */
allocate_rcd_index(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd,u16 * index)265 static int allocate_rcd_index(struct hfi1_devdata *dd,
266 			      struct hfi1_ctxtdata *rcd, u16 *index)
267 {
268 	unsigned long flags;
269 	u16 ctxt;
270 
271 	spin_lock_irqsave(&dd->uctxt_lock, flags);
272 	for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
273 		if (!dd->rcd[ctxt])
274 			break;
275 
276 	if (ctxt < dd->num_rcv_contexts) {
277 		rcd->ctxt = ctxt;
278 		dd->rcd[ctxt] = rcd;
279 		hfi1_rcd_init(rcd);
280 	}
281 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
282 
283 	if (ctxt >= dd->num_rcv_contexts)
284 		return -EBUSY;
285 
286 	*index = ctxt;
287 
288 	return 0;
289 }
290 
291 /**
292  * hfi1_rcd_get_by_index
293  * @dd: pointer to a valid devdata structure
294  * @ctxt: the index of an possilbe rcd
295  *
296  * We need to protect access to the rcd array.  If access is needed to
297  * one or more index, get the protecting spinlock and then increment the
298  * kref.
299  *
300  * The caller is responsible for making the _put().
301  *
302  */
hfi1_rcd_get_by_index(struct hfi1_devdata * dd,u16 ctxt)303 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
304 {
305 	unsigned long flags;
306 	struct hfi1_ctxtdata *rcd = NULL;
307 
308 	spin_lock_irqsave(&dd->uctxt_lock, flags);
309 	if (dd->rcd[ctxt]) {
310 		rcd = dd->rcd[ctxt];
311 		if (!hfi1_rcd_get(rcd))
312 			rcd = NULL;
313 	}
314 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
315 
316 	return rcd;
317 }
318 
319 /*
320  * Common code for user and kernel context create and setup.
321  * NOTE: the initial kref is done here (hf1_rcd_init()).
322  */
hfi1_create_ctxtdata(struct hfi1_pportdata * ppd,int numa,struct hfi1_ctxtdata ** context)323 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
324 			 struct hfi1_ctxtdata **context)
325 {
326 	struct hfi1_devdata *dd = ppd->dd;
327 	struct hfi1_ctxtdata *rcd;
328 	unsigned kctxt_ngroups = 0;
329 	u32 base;
330 
331 	if (dd->rcv_entries.nctxt_extra >
332 	    dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
333 		kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
334 			 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
335 	rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
336 	if (rcd) {
337 		u32 rcvtids, max_entries;
338 		u16 ctxt;
339 		int ret;
340 
341 		ret = allocate_rcd_index(dd, rcd, &ctxt);
342 		if (ret) {
343 			*context = NULL;
344 			kfree(rcd);
345 			return ret;
346 		}
347 
348 		INIT_LIST_HEAD(&rcd->qp_wait_list);
349 		hfi1_exp_tid_group_init(&rcd->tid_group_list);
350 		hfi1_exp_tid_group_init(&rcd->tid_used_list);
351 		hfi1_exp_tid_group_init(&rcd->tid_full_list);
352 		rcd->ppd = ppd;
353 		rcd->dd = dd;
354 		__set_bit(0, rcd->in_use_ctxts);
355 		rcd->numa_id = numa;
356 		rcd->rcv_array_groups = dd->rcv_entries.ngroups;
357 
358 		mutex_init(&rcd->exp_lock);
359 
360 		hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
361 
362 		/*
363 		 * Calculate the context's RcvArray entry starting point.
364 		 * We do this here because we have to take into account all
365 		 * the RcvArray entries that previous context would have
366 		 * taken and we have to account for any extra groups assigned
367 		 * to the static (kernel) or dynamic (vnic/user) contexts.
368 		 */
369 		if (ctxt < dd->first_dyn_alloc_ctxt) {
370 			if (ctxt < kctxt_ngroups) {
371 				base = ctxt * (dd->rcv_entries.ngroups + 1);
372 				rcd->rcv_array_groups++;
373 			} else {
374 				base = kctxt_ngroups +
375 					(ctxt * dd->rcv_entries.ngroups);
376 			}
377 		} else {
378 			u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
379 
380 			base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
381 				kctxt_ngroups);
382 			if (ct < dd->rcv_entries.nctxt_extra) {
383 				base += ct * (dd->rcv_entries.ngroups + 1);
384 				rcd->rcv_array_groups++;
385 			} else {
386 				base += dd->rcv_entries.nctxt_extra +
387 					(ct * dd->rcv_entries.ngroups);
388 			}
389 		}
390 		rcd->eager_base = base * dd->rcv_entries.group_size;
391 
392 		rcd->rcvhdrq_cnt = rcvhdrcnt;
393 		rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
394 		/*
395 		 * Simple Eager buffer allocation: we have already pre-allocated
396 		 * the number of RcvArray entry groups. Each ctxtdata structure
397 		 * holds the number of groups for that context.
398 		 *
399 		 * To follow CSR requirements and maintain cacheline alignment,
400 		 * make sure all sizes and bases are multiples of group_size.
401 		 *
402 		 * The expected entry count is what is left after assigning
403 		 * eager.
404 		 */
405 		max_entries = rcd->rcv_array_groups *
406 			dd->rcv_entries.group_size;
407 		rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
408 		rcd->egrbufs.count = round_down(rcvtids,
409 						dd->rcv_entries.group_size);
410 		if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
411 			dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
412 				   rcd->ctxt);
413 			rcd->egrbufs.count = MAX_EAGER_ENTRIES;
414 		}
415 		hfi1_cdbg(PROC,
416 			  "ctxt%u: max Eager buffer RcvArray entries: %u\n",
417 			  rcd->ctxt, rcd->egrbufs.count);
418 
419 		/*
420 		 * Allocate array that will hold the eager buffer accounting
421 		 * data.
422 		 * This will allocate the maximum possible buffer count based
423 		 * on the value of the RcvArray split parameter.
424 		 * The resulting value will be rounded down to the closest
425 		 * multiple of dd->rcv_entries.group_size.
426 		 */
427 		rcd->egrbufs.buffers = kzalloc_node(
428 			rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers),
429 			GFP_KERNEL, numa);
430 		if (!rcd->egrbufs.buffers)
431 			goto bail;
432 		rcd->egrbufs.rcvtids = kzalloc_node(
433 				rcd->egrbufs.count *
434 				sizeof(*rcd->egrbufs.rcvtids),
435 				GFP_KERNEL, numa);
436 		if (!rcd->egrbufs.rcvtids)
437 			goto bail;
438 		rcd->egrbufs.size = eager_buffer_size;
439 		/*
440 		 * The size of the buffers programmed into the RcvArray
441 		 * entries needs to be big enough to handle the highest
442 		 * MTU supported.
443 		 */
444 		if (rcd->egrbufs.size < hfi1_max_mtu) {
445 			rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
446 			hfi1_cdbg(PROC,
447 				  "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
448 				    rcd->ctxt, rcd->egrbufs.size);
449 		}
450 		rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
451 
452 		/* Applicable only for statically created kernel contexts */
453 		if (ctxt < dd->first_dyn_alloc_ctxt) {
454 			rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
455 						    GFP_KERNEL, numa);
456 			if (!rcd->opstats)
457 				goto bail;
458 		}
459 
460 		*context = rcd;
461 		return 0;
462 	}
463 
464 bail:
465 	*context = NULL;
466 	hfi1_free_ctxt(rcd);
467 	return -ENOMEM;
468 }
469 
470 /**
471  * hfi1_free_ctxt
472  * @rcd: pointer to an initialized rcd data structure
473  *
474  * This wrapper is the free function that matches hfi1_create_ctxtdata().
475  * When a context is done being used (kernel or user), this function is called
476  * for the "final" put to match the kref init from hf1i_create_ctxtdata().
477  * Other users of the context do a get/put sequence to make sure that the
478  * structure isn't removed while in use.
479  */
hfi1_free_ctxt(struct hfi1_ctxtdata * rcd)480 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
481 {
482 	hfi1_rcd_put(rcd);
483 }
484 
485 /*
486  * Convert a receive header entry size that to the encoding used in the CSR.
487  *
488  * Return a zero if the given size is invalid.
489  */
encode_rcv_header_entry_size(u16 size)490 static inline u64 encode_rcv_header_entry_size(u16 size)
491 {
492 	/* there are only 3 valid receive header entry sizes */
493 	if (size == 2)
494 		return 1;
495 	if (size == 16)
496 		return 2;
497 	else if (size == 32)
498 		return 4;
499 	return 0; /* invalid */
500 }
501 
502 /*
503  * Select the largest ccti value over all SLs to determine the intra-
504  * packet gap for the link.
505  *
506  * called with cca_timer_lock held (to protect access to cca_timer
507  * array), and rcu_read_lock() (to protect access to cc_state).
508  */
set_link_ipg(struct hfi1_pportdata * ppd)509 void set_link_ipg(struct hfi1_pportdata *ppd)
510 {
511 	struct hfi1_devdata *dd = ppd->dd;
512 	struct cc_state *cc_state;
513 	int i;
514 	u16 cce, ccti_limit, max_ccti = 0;
515 	u16 shift, mult;
516 	u64 src;
517 	u32 current_egress_rate; /* Mbits /sec */
518 	u32 max_pkt_time;
519 	/*
520 	 * max_pkt_time is the maximum packet egress time in units
521 	 * of the fabric clock period 1/(805 MHz).
522 	 */
523 
524 	cc_state = get_cc_state(ppd);
525 
526 	if (!cc_state)
527 		/*
528 		 * This should _never_ happen - rcu_read_lock() is held,
529 		 * and set_link_ipg() should not be called if cc_state
530 		 * is NULL.
531 		 */
532 		return;
533 
534 	for (i = 0; i < OPA_MAX_SLS; i++) {
535 		u16 ccti = ppd->cca_timer[i].ccti;
536 
537 		if (ccti > max_ccti)
538 			max_ccti = ccti;
539 	}
540 
541 	ccti_limit = cc_state->cct.ccti_limit;
542 	if (max_ccti > ccti_limit)
543 		max_ccti = ccti_limit;
544 
545 	cce = cc_state->cct.entries[max_ccti].entry;
546 	shift = (cce & 0xc000) >> 14;
547 	mult = (cce & 0x3fff);
548 
549 	current_egress_rate = active_egress_rate(ppd);
550 
551 	max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
552 
553 	src = (max_pkt_time >> shift) * mult;
554 
555 	src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
556 	src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
557 
558 	write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
559 }
560 
cca_timer_fn(struct hrtimer * t)561 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
562 {
563 	struct cca_timer *cca_timer;
564 	struct hfi1_pportdata *ppd;
565 	int sl;
566 	u16 ccti_timer, ccti_min;
567 	struct cc_state *cc_state;
568 	unsigned long flags;
569 	enum hrtimer_restart ret = HRTIMER_NORESTART;
570 
571 	cca_timer = container_of(t, struct cca_timer, hrtimer);
572 	ppd = cca_timer->ppd;
573 	sl = cca_timer->sl;
574 
575 	rcu_read_lock();
576 
577 	cc_state = get_cc_state(ppd);
578 
579 	if (!cc_state) {
580 		rcu_read_unlock();
581 		return HRTIMER_NORESTART;
582 	}
583 
584 	/*
585 	 * 1) decrement ccti for SL
586 	 * 2) calculate IPG for link (set_link_ipg())
587 	 * 3) restart timer, unless ccti is at min value
588 	 */
589 
590 	ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
591 	ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
592 
593 	spin_lock_irqsave(&ppd->cca_timer_lock, flags);
594 
595 	if (cca_timer->ccti > ccti_min) {
596 		cca_timer->ccti--;
597 		set_link_ipg(ppd);
598 	}
599 
600 	if (cca_timer->ccti > ccti_min) {
601 		unsigned long nsec = 1024 * ccti_timer;
602 		/* ccti_timer is in units of 1.024 usec */
603 		hrtimer_forward_now(t, ns_to_ktime(nsec));
604 		ret = HRTIMER_RESTART;
605 	}
606 
607 	spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
608 	rcu_read_unlock();
609 	return ret;
610 }
611 
612 /*
613  * Common code for initializing the physical port structure.
614  */
hfi1_init_pportdata(struct pci_dev * pdev,struct hfi1_pportdata * ppd,struct hfi1_devdata * dd,u8 hw_pidx,u8 port)615 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
616 			 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
617 {
618 	int i;
619 	uint default_pkey_idx;
620 	struct cc_state *cc_state;
621 
622 	ppd->dd = dd;
623 	ppd->hw_pidx = hw_pidx;
624 	ppd->port = port; /* IB port number, not index */
625 
626 	default_pkey_idx = 1;
627 
628 	ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
629 	ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
630 
631 	if (loopback) {
632 		hfi1_early_err(&pdev->dev,
633 			       "Faking data partition 0x8001 in idx %u\n",
634 			       !default_pkey_idx);
635 		ppd->pkeys[!default_pkey_idx] = 0x8001;
636 	}
637 
638 	INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
639 	INIT_WORK(&ppd->link_up_work, handle_link_up);
640 	INIT_WORK(&ppd->link_down_work, handle_link_down);
641 	INIT_WORK(&ppd->freeze_work, handle_freeze);
642 	INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
643 	INIT_WORK(&ppd->sma_message_work, handle_sma_message);
644 	INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
645 	INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
646 	INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
647 	INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
648 
649 	mutex_init(&ppd->hls_lock);
650 	spin_lock_init(&ppd->qsfp_info.qsfp_lock);
651 
652 	ppd->qsfp_info.ppd = ppd;
653 	ppd->sm_trap_qp = 0x0;
654 	ppd->sa_qp = 0x1;
655 
656 	ppd->hfi1_wq = NULL;
657 
658 	spin_lock_init(&ppd->cca_timer_lock);
659 
660 	for (i = 0; i < OPA_MAX_SLS; i++) {
661 		hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
662 			     HRTIMER_MODE_REL);
663 		ppd->cca_timer[i].ppd = ppd;
664 		ppd->cca_timer[i].sl = i;
665 		ppd->cca_timer[i].ccti = 0;
666 		ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
667 	}
668 
669 	ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
670 
671 	spin_lock_init(&ppd->cc_state_lock);
672 	spin_lock_init(&ppd->cc_log_lock);
673 	cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
674 	RCU_INIT_POINTER(ppd->cc_state, cc_state);
675 	if (!cc_state)
676 		goto bail;
677 	return;
678 
679 bail:
680 
681 	hfi1_early_err(&pdev->dev,
682 		       "Congestion Control Agent disabled for port %d\n", port);
683 }
684 
685 /*
686  * Do initialization for device that is only needed on
687  * first detect, not on resets.
688  */
loadtime_init(struct hfi1_devdata * dd)689 static int loadtime_init(struct hfi1_devdata *dd)
690 {
691 	return 0;
692 }
693 
694 /**
695  * init_after_reset - re-initialize after a reset
696  * @dd: the hfi1_ib device
697  *
698  * sanity check at least some of the values after reset, and
699  * ensure no receive or transmit (explicitly, in case reset
700  * failed
701  */
init_after_reset(struct hfi1_devdata * dd)702 static int init_after_reset(struct hfi1_devdata *dd)
703 {
704 	int i;
705 	struct hfi1_ctxtdata *rcd;
706 	/*
707 	 * Ensure chip does no sends or receives, tail updates, or
708 	 * pioavail updates while we re-initialize.  This is mostly
709 	 * for the driver data structures, not chip registers.
710 	 */
711 	for (i = 0; i < dd->num_rcv_contexts; i++) {
712 		rcd = hfi1_rcd_get_by_index(dd, i);
713 		hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
714 			     HFI1_RCVCTRL_INTRAVAIL_DIS |
715 			     HFI1_RCVCTRL_TAILUPD_DIS, rcd);
716 		hfi1_rcd_put(rcd);
717 	}
718 	pio_send_control(dd, PSC_GLOBAL_DISABLE);
719 	for (i = 0; i < dd->num_send_contexts; i++)
720 		sc_disable(dd->send_contexts[i].sc);
721 
722 	return 0;
723 }
724 
enable_chip(struct hfi1_devdata * dd)725 static void enable_chip(struct hfi1_devdata *dd)
726 {
727 	struct hfi1_ctxtdata *rcd;
728 	u32 rcvmask;
729 	u16 i;
730 
731 	/* enable PIO send */
732 	pio_send_control(dd, PSC_GLOBAL_ENABLE);
733 
734 	/*
735 	 * Enable kernel ctxts' receive and receive interrupt.
736 	 * Other ctxts done as user opens and initializes them.
737 	 */
738 	for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
739 		rcd = hfi1_rcd_get_by_index(dd, i);
740 		if (!rcd)
741 			continue;
742 		rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
743 		rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
744 			HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
745 		if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
746 			rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
747 		if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
748 			rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
749 		if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
750 			rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
751 		hfi1_rcvctrl(dd, rcvmask, rcd);
752 		sc_enable(rcd->sc);
753 		hfi1_rcd_put(rcd);
754 	}
755 }
756 
757 /**
758  * create_workqueues - create per port workqueues
759  * @dd: the hfi1_ib device
760  */
create_workqueues(struct hfi1_devdata * dd)761 static int create_workqueues(struct hfi1_devdata *dd)
762 {
763 	int pidx;
764 	struct hfi1_pportdata *ppd;
765 
766 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
767 		ppd = dd->pport + pidx;
768 		if (!ppd->hfi1_wq) {
769 			ppd->hfi1_wq =
770 				alloc_workqueue(
771 				    "hfi%d_%d",
772 				    WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
773 				    WQ_MEM_RECLAIM,
774 				    HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
775 				    dd->unit, pidx);
776 			if (!ppd->hfi1_wq)
777 				goto wq_error;
778 		}
779 		if (!ppd->link_wq) {
780 			/*
781 			 * Make the link workqueue single-threaded to enforce
782 			 * serialization.
783 			 */
784 			ppd->link_wq =
785 				alloc_workqueue(
786 				    "hfi_link_%d_%d",
787 				    WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
788 				    1, /* max_active */
789 				    dd->unit, pidx);
790 			if (!ppd->link_wq)
791 				goto wq_error;
792 		}
793 	}
794 	return 0;
795 wq_error:
796 	pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
797 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
798 		ppd = dd->pport + pidx;
799 		if (ppd->hfi1_wq) {
800 			destroy_workqueue(ppd->hfi1_wq);
801 			ppd->hfi1_wq = NULL;
802 		}
803 		if (ppd->link_wq) {
804 			destroy_workqueue(ppd->link_wq);
805 			ppd->link_wq = NULL;
806 		}
807 	}
808 	return -ENOMEM;
809 }
810 
811 /**
812  * hfi1_init - do the actual initialization sequence on the chip
813  * @dd: the hfi1_ib device
814  * @reinit: re-initializing, so don't allocate new memory
815  *
816  * Do the actual initialization sequence on the chip.  This is done
817  * both from the init routine called from the PCI infrastructure, and
818  * when we reset the chip, or detect that it was reset internally,
819  * or it's administratively re-enabled.
820  *
821  * Memory allocation here and in called routines is only done in
822  * the first case (reinit == 0).  We have to be careful, because even
823  * without memory allocation, we need to re-write all the chip registers
824  * TIDs, etc. after the reset or enable has completed.
825  */
hfi1_init(struct hfi1_devdata * dd,int reinit)826 int hfi1_init(struct hfi1_devdata *dd, int reinit)
827 {
828 	int ret = 0, pidx, lastfail = 0;
829 	unsigned long len;
830 	u16 i;
831 	struct hfi1_ctxtdata *rcd;
832 	struct hfi1_pportdata *ppd;
833 
834 	/* Set up recv low level handlers */
835 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
836 						kdeth_process_expected;
837 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
838 						kdeth_process_eager;
839 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
840 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
841 						process_receive_error;
842 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
843 						process_receive_bypass;
844 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
845 						process_receive_invalid;
846 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
847 						process_receive_invalid;
848 	dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
849 						process_receive_invalid;
850 	dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
851 
852 	/* Set up send low level handlers */
853 	dd->process_pio_send = hfi1_verbs_send_pio;
854 	dd->process_dma_send = hfi1_verbs_send_dma;
855 	dd->pio_inline_send = pio_copy;
856 	dd->process_vnic_dma_send = hfi1_vnic_send_dma;
857 
858 	if (is_ax(dd)) {
859 		atomic_set(&dd->drop_packet, DROP_PACKET_ON);
860 		dd->do_drop = 1;
861 	} else {
862 		atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
863 		dd->do_drop = 0;
864 	}
865 
866 	/* make sure the link is not "up" */
867 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
868 		ppd = dd->pport + pidx;
869 		ppd->linkup = 0;
870 	}
871 
872 	if (reinit)
873 		ret = init_after_reset(dd);
874 	else
875 		ret = loadtime_init(dd);
876 	if (ret)
877 		goto done;
878 
879 	/* allocate dummy tail memory for all receive contexts */
880 	dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
881 		&dd->pcidev->dev, sizeof(u64),
882 		&dd->rcvhdrtail_dummy_dma,
883 		GFP_KERNEL);
884 
885 	if (!dd->rcvhdrtail_dummy_kvaddr) {
886 		dd_dev_err(dd, "cannot allocate dummy tail memory\n");
887 		ret = -ENOMEM;
888 		goto done;
889 	}
890 
891 	/* dd->rcd can be NULL if early initialization failed */
892 	for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
893 		/*
894 		 * Set up the (kernel) rcvhdr queue and egr TIDs.  If doing
895 		 * re-init, the simplest way to handle this is to free
896 		 * existing, and re-allocate.
897 		 * Need to re-create rest of ctxt 0 ctxtdata as well.
898 		 */
899 		rcd = hfi1_rcd_get_by_index(dd, i);
900 		if (!rcd)
901 			continue;
902 
903 		rcd->do_interrupt = &handle_receive_interrupt;
904 
905 		lastfail = hfi1_create_rcvhdrq(dd, rcd);
906 		if (!lastfail)
907 			lastfail = hfi1_setup_eagerbufs(rcd);
908 		if (lastfail) {
909 			dd_dev_err(dd,
910 				   "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
911 			ret = lastfail;
912 		}
913 		hfi1_rcd_put(rcd);
914 	}
915 
916 	/* Allocate enough memory for user event notification. */
917 	len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
918 			 sizeof(*dd->events));
919 	dd->events = vmalloc_user(len);
920 	if (!dd->events)
921 		dd_dev_err(dd, "Failed to allocate user events page\n");
922 	/*
923 	 * Allocate a page for device and port status.
924 	 * Page will be shared amongst all user processes.
925 	 */
926 	dd->status = vmalloc_user(PAGE_SIZE);
927 	if (!dd->status)
928 		dd_dev_err(dd, "Failed to allocate dev status page\n");
929 	else
930 		dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
931 					     sizeof(dd->status->freezemsg));
932 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
933 		ppd = dd->pport + pidx;
934 		if (dd->status)
935 			/* Currently, we only have one port */
936 			ppd->statusp = &dd->status->port;
937 
938 		set_mtu(ppd);
939 	}
940 
941 	/* enable chip even if we have an error, so we can debug cause */
942 	enable_chip(dd);
943 
944 done:
945 	/*
946 	 * Set status even if port serdes is not initialized
947 	 * so that diags will work.
948 	 */
949 	if (dd->status)
950 		dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
951 			HFI1_STATUS_INITTED;
952 	if (!ret) {
953 		/* enable all interrupts from the chip */
954 		set_intr_state(dd, 1);
955 
956 		/* chip is OK for user apps; mark it as initialized */
957 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
958 			ppd = dd->pport + pidx;
959 
960 			/*
961 			 * start the serdes - must be after interrupts are
962 			 * enabled so we are notified when the link goes up
963 			 */
964 			lastfail = bringup_serdes(ppd);
965 			if (lastfail)
966 				dd_dev_info(dd,
967 					    "Failed to bring up port %u\n",
968 					    ppd->port);
969 
970 			/*
971 			 * Set status even if port serdes is not initialized
972 			 * so that diags will work.
973 			 */
974 			if (ppd->statusp)
975 				*ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
976 							HFI1_STATUS_INITTED;
977 			if (!ppd->link_speed_enabled)
978 				continue;
979 		}
980 	}
981 
982 	/* if ret is non-zero, we probably should do some cleanup here... */
983 	return ret;
984 }
985 
__hfi1_lookup(int unit)986 static inline struct hfi1_devdata *__hfi1_lookup(int unit)
987 {
988 	return idr_find(&hfi1_unit_table, unit);
989 }
990 
hfi1_lookup(int unit)991 struct hfi1_devdata *hfi1_lookup(int unit)
992 {
993 	struct hfi1_devdata *dd;
994 	unsigned long flags;
995 
996 	spin_lock_irqsave(&hfi1_devs_lock, flags);
997 	dd = __hfi1_lookup(unit);
998 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
999 
1000 	return dd;
1001 }
1002 
1003 /*
1004  * Stop the timers during unit shutdown, or after an error late
1005  * in initialization.
1006  */
stop_timers(struct hfi1_devdata * dd)1007 static void stop_timers(struct hfi1_devdata *dd)
1008 {
1009 	struct hfi1_pportdata *ppd;
1010 	int pidx;
1011 
1012 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1013 		ppd = dd->pport + pidx;
1014 		if (ppd->led_override_timer.data) {
1015 			del_timer_sync(&ppd->led_override_timer);
1016 			atomic_set(&ppd->led_override_timer_active, 0);
1017 		}
1018 	}
1019 }
1020 
1021 /**
1022  * shutdown_device - shut down a device
1023  * @dd: the hfi1_ib device
1024  *
1025  * This is called to make the device quiet when we are about to
1026  * unload the driver, and also when the device is administratively
1027  * disabled.   It does not free any data structures.
1028  * Everything it does has to be setup again by hfi1_init(dd, 1)
1029  */
shutdown_device(struct hfi1_devdata * dd)1030 static void shutdown_device(struct hfi1_devdata *dd)
1031 {
1032 	struct hfi1_pportdata *ppd;
1033 	struct hfi1_ctxtdata *rcd;
1034 	unsigned pidx;
1035 	int i;
1036 
1037 	if (dd->flags & HFI1_SHUTDOWN)
1038 		return;
1039 	dd->flags |= HFI1_SHUTDOWN;
1040 
1041 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1042 		ppd = dd->pport + pidx;
1043 
1044 		ppd->linkup = 0;
1045 		if (ppd->statusp)
1046 			*ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
1047 					   HFI1_STATUS_IB_READY);
1048 	}
1049 	dd->flags &= ~HFI1_INITTED;
1050 
1051 	/* mask and clean up interrupts, but not errors */
1052 	set_intr_state(dd, 0);
1053 	hfi1_clean_up_interrupts(dd);
1054 
1055 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1056 		ppd = dd->pport + pidx;
1057 		for (i = 0; i < dd->num_rcv_contexts; i++) {
1058 			rcd = hfi1_rcd_get_by_index(dd, i);
1059 			hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
1060 				     HFI1_RCVCTRL_CTXT_DIS |
1061 				     HFI1_RCVCTRL_INTRAVAIL_DIS |
1062 				     HFI1_RCVCTRL_PKEY_DIS |
1063 				     HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
1064 			hfi1_rcd_put(rcd);
1065 		}
1066 		/*
1067 		 * Gracefully stop all sends allowing any in progress to
1068 		 * trickle out first.
1069 		 */
1070 		for (i = 0; i < dd->num_send_contexts; i++)
1071 			sc_flush(dd->send_contexts[i].sc);
1072 	}
1073 
1074 	/*
1075 	 * Enough for anything that's going to trickle out to have actually
1076 	 * done so.
1077 	 */
1078 	udelay(20);
1079 
1080 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1081 		ppd = dd->pport + pidx;
1082 
1083 		/* disable all contexts */
1084 		for (i = 0; i < dd->num_send_contexts; i++)
1085 			sc_disable(dd->send_contexts[i].sc);
1086 		/* disable the send device */
1087 		pio_send_control(dd, PSC_GLOBAL_DISABLE);
1088 
1089 		shutdown_led_override(ppd);
1090 
1091 		/*
1092 		 * Clear SerdesEnable.
1093 		 * We can't count on interrupts since we are stopping.
1094 		 */
1095 		hfi1_quiet_serdes(ppd);
1096 
1097 		if (ppd->hfi1_wq) {
1098 			destroy_workqueue(ppd->hfi1_wq);
1099 			ppd->hfi1_wq = NULL;
1100 		}
1101 		if (ppd->link_wq) {
1102 			destroy_workqueue(ppd->link_wq);
1103 			ppd->link_wq = NULL;
1104 		}
1105 	}
1106 	sdma_exit(dd);
1107 }
1108 
1109 /**
1110  * hfi1_free_ctxtdata - free a context's allocated data
1111  * @dd: the hfi1_ib device
1112  * @rcd: the ctxtdata structure
1113  *
1114  * free up any allocated data for a context
1115  * It should never change any chip state, or global driver state.
1116  */
hfi1_free_ctxtdata(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd)1117 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1118 {
1119 	u32 e;
1120 
1121 	if (!rcd)
1122 		return;
1123 
1124 	if (rcd->rcvhdrq) {
1125 		dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
1126 				  rcd->rcvhdrq, rcd->rcvhdrq_dma);
1127 		rcd->rcvhdrq = NULL;
1128 		if (rcd->rcvhdrtail_kvaddr) {
1129 			dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1130 					  (void *)rcd->rcvhdrtail_kvaddr,
1131 					  rcd->rcvhdrqtailaddr_dma);
1132 			rcd->rcvhdrtail_kvaddr = NULL;
1133 		}
1134 	}
1135 
1136 	/* all the RcvArray entries should have been cleared by now */
1137 	kfree(rcd->egrbufs.rcvtids);
1138 	rcd->egrbufs.rcvtids = NULL;
1139 
1140 	for (e = 0; e < rcd->egrbufs.alloced; e++) {
1141 		if (rcd->egrbufs.buffers[e].dma)
1142 			dma_free_coherent(&dd->pcidev->dev,
1143 					  rcd->egrbufs.buffers[e].len,
1144 					  rcd->egrbufs.buffers[e].addr,
1145 					  rcd->egrbufs.buffers[e].dma);
1146 	}
1147 	kfree(rcd->egrbufs.buffers);
1148 	rcd->egrbufs.alloced = 0;
1149 	rcd->egrbufs.buffers = NULL;
1150 
1151 	sc_free(rcd->sc);
1152 	rcd->sc = NULL;
1153 
1154 	vfree(rcd->subctxt_uregbase);
1155 	vfree(rcd->subctxt_rcvegrbuf);
1156 	vfree(rcd->subctxt_rcvhdr_base);
1157 	kfree(rcd->opstats);
1158 
1159 	rcd->subctxt_uregbase = NULL;
1160 	rcd->subctxt_rcvegrbuf = NULL;
1161 	rcd->subctxt_rcvhdr_base = NULL;
1162 	rcd->opstats = NULL;
1163 }
1164 
1165 /*
1166  * Release our hold on the shared asic data.  If we are the last one,
1167  * return the structure to be finalized outside the lock.  Must be
1168  * holding hfi1_devs_lock.
1169  */
release_asic_data(struct hfi1_devdata * dd)1170 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
1171 {
1172 	struct hfi1_asic_data *ad;
1173 	int other;
1174 
1175 	if (!dd->asic_data)
1176 		return NULL;
1177 	dd->asic_data->dds[dd->hfi1_id] = NULL;
1178 	other = dd->hfi1_id ? 0 : 1;
1179 	ad = dd->asic_data;
1180 	dd->asic_data = NULL;
1181 	/* return NULL if the other dd still has a link */
1182 	return ad->dds[other] ? NULL : ad;
1183 }
1184 
finalize_asic_data(struct hfi1_devdata * dd,struct hfi1_asic_data * ad)1185 static void finalize_asic_data(struct hfi1_devdata *dd,
1186 			       struct hfi1_asic_data *ad)
1187 {
1188 	clean_up_i2c(dd, ad);
1189 	kfree(ad);
1190 }
1191 
__hfi1_free_devdata(struct kobject * kobj)1192 static void __hfi1_free_devdata(struct kobject *kobj)
1193 {
1194 	struct hfi1_devdata *dd =
1195 		container_of(kobj, struct hfi1_devdata, kobj);
1196 	struct hfi1_asic_data *ad;
1197 	unsigned long flags;
1198 
1199 	spin_lock_irqsave(&hfi1_devs_lock, flags);
1200 	idr_remove(&hfi1_unit_table, dd->unit);
1201 	list_del(&dd->list);
1202 	ad = release_asic_data(dd);
1203 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1204 	if (ad)
1205 		finalize_asic_data(dd, ad);
1206 	free_platform_config(dd);
1207 	rcu_barrier(); /* wait for rcu callbacks to complete */
1208 	free_percpu(dd->int_counter);
1209 	free_percpu(dd->rcv_limit);
1210 	free_percpu(dd->send_schedule);
1211 	rvt_dealloc_device(&dd->verbs_dev.rdi);
1212 }
1213 
1214 static struct kobj_type hfi1_devdata_type = {
1215 	.release = __hfi1_free_devdata,
1216 };
1217 
hfi1_free_devdata(struct hfi1_devdata * dd)1218 void hfi1_free_devdata(struct hfi1_devdata *dd)
1219 {
1220 	kobject_put(&dd->kobj);
1221 }
1222 
1223 /*
1224  * Allocate our primary per-unit data structure.  Must be done via verbs
1225  * allocator, because the verbs cleanup process both does cleanup and
1226  * free of the data structure.
1227  * "extra" is for chip-specific data.
1228  *
1229  * Use the idr mechanism to get a unit number for this unit.
1230  */
hfi1_alloc_devdata(struct pci_dev * pdev,size_t extra)1231 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
1232 {
1233 	unsigned long flags;
1234 	struct hfi1_devdata *dd;
1235 	int ret, nports;
1236 
1237 	/* extra is * number of ports */
1238 	nports = extra / sizeof(struct hfi1_pportdata);
1239 
1240 	dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1241 						     nports);
1242 	if (!dd)
1243 		return ERR_PTR(-ENOMEM);
1244 	dd->num_pports = nports;
1245 	dd->pport = (struct hfi1_pportdata *)(dd + 1);
1246 	dd->pcidev = pdev;
1247 	pci_set_drvdata(pdev, dd);
1248 
1249 	INIT_LIST_HEAD(&dd->list);
1250 	idr_preload(GFP_KERNEL);
1251 	spin_lock_irqsave(&hfi1_devs_lock, flags);
1252 
1253 	ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
1254 	if (ret >= 0) {
1255 		dd->unit = ret;
1256 		list_add(&dd->list, &hfi1_dev_list);
1257 	}
1258 
1259 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1260 	idr_preload_end();
1261 
1262 	if (ret < 0) {
1263 		hfi1_early_err(&pdev->dev,
1264 			       "Could not allocate unit ID: error %d\n", -ret);
1265 		goto bail;
1266 	}
1267 	/*
1268 	 * Initialize all locks for the device. This needs to be as early as
1269 	 * possible so locks are usable.
1270 	 */
1271 	spin_lock_init(&dd->sc_lock);
1272 	spin_lock_init(&dd->sendctrl_lock);
1273 	spin_lock_init(&dd->rcvctrl_lock);
1274 	spin_lock_init(&dd->uctxt_lock);
1275 	spin_lock_init(&dd->hfi1_diag_trans_lock);
1276 	spin_lock_init(&dd->sc_init_lock);
1277 	spin_lock_init(&dd->dc8051_memlock);
1278 	seqlock_init(&dd->sc2vl_lock);
1279 	spin_lock_init(&dd->sde_map_lock);
1280 	spin_lock_init(&dd->pio_map_lock);
1281 	mutex_init(&dd->dc8051_lock);
1282 	init_waitqueue_head(&dd->event_queue);
1283 
1284 	dd->int_counter = alloc_percpu(u64);
1285 	if (!dd->int_counter) {
1286 		ret = -ENOMEM;
1287 		hfi1_early_err(&pdev->dev,
1288 			       "Could not allocate per-cpu int_counter\n");
1289 		goto bail;
1290 	}
1291 
1292 	dd->rcv_limit = alloc_percpu(u64);
1293 	if (!dd->rcv_limit) {
1294 		ret = -ENOMEM;
1295 		hfi1_early_err(&pdev->dev,
1296 			       "Could not allocate per-cpu rcv_limit\n");
1297 		goto bail;
1298 	}
1299 
1300 	dd->send_schedule = alloc_percpu(u64);
1301 	if (!dd->send_schedule) {
1302 		ret = -ENOMEM;
1303 		hfi1_early_err(&pdev->dev,
1304 			       "Could not allocate per-cpu int_counter\n");
1305 		goto bail;
1306 	}
1307 
1308 	if (!hfi1_cpulist_count) {
1309 		u32 count = num_online_cpus();
1310 
1311 		hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
1312 				       GFP_KERNEL);
1313 		if (hfi1_cpulist)
1314 			hfi1_cpulist_count = count;
1315 		else
1316 			hfi1_early_err(
1317 			&pdev->dev,
1318 			"Could not alloc cpulist info, cpu affinity might be wrong\n");
1319 	}
1320 	kobject_init(&dd->kobj, &hfi1_devdata_type);
1321 	return dd;
1322 
1323 bail:
1324 	if (!list_empty(&dd->list))
1325 		list_del_init(&dd->list);
1326 	rvt_dealloc_device(&dd->verbs_dev.rdi);
1327 	return ERR_PTR(ret);
1328 }
1329 
1330 /*
1331  * Called from freeze mode handlers, and from PCI error
1332  * reporting code.  Should be paranoid about state of
1333  * system and data structures.
1334  */
hfi1_disable_after_error(struct hfi1_devdata * dd)1335 void hfi1_disable_after_error(struct hfi1_devdata *dd)
1336 {
1337 	if (dd->flags & HFI1_INITTED) {
1338 		u32 pidx;
1339 
1340 		dd->flags &= ~HFI1_INITTED;
1341 		if (dd->pport)
1342 			for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1343 				struct hfi1_pportdata *ppd;
1344 
1345 				ppd = dd->pport + pidx;
1346 				if (dd->flags & HFI1_PRESENT)
1347 					set_link_state(ppd, HLS_DN_DISABLE);
1348 
1349 				if (ppd->statusp)
1350 					*ppd->statusp &= ~HFI1_STATUS_IB_READY;
1351 			}
1352 	}
1353 
1354 	/*
1355 	 * Mark as having had an error for driver, and also
1356 	 * for /sys and status word mapped to user programs.
1357 	 * This marks unit as not usable, until reset.
1358 	 */
1359 	if (dd->status)
1360 		dd->status->dev |= HFI1_STATUS_HWERROR;
1361 }
1362 
1363 static void remove_one(struct pci_dev *);
1364 static int init_one(struct pci_dev *, const struct pci_device_id *);
1365 static void shutdown_one(struct pci_dev *);
1366 
1367 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1368 #define PFX DRIVER_NAME ": "
1369 
1370 const struct pci_device_id hfi1_pci_tbl[] = {
1371 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1372 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1373 	{ 0, }
1374 };
1375 
1376 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1377 
1378 static struct pci_driver hfi1_pci_driver = {
1379 	.name = DRIVER_NAME,
1380 	.probe = init_one,
1381 	.remove = remove_one,
1382 	.shutdown = shutdown_one,
1383 	.id_table = hfi1_pci_tbl,
1384 	.err_handler = &hfi1_pci_err_handler,
1385 };
1386 
compute_krcvqs(void)1387 static void __init compute_krcvqs(void)
1388 {
1389 	int i;
1390 
1391 	for (i = 0; i < krcvqsset; i++)
1392 		n_krcvqs += krcvqs[i];
1393 }
1394 
1395 /*
1396  * Do all the generic driver unit- and chip-independent memory
1397  * allocation and initialization.
1398  */
hfi1_mod_init(void)1399 static int __init hfi1_mod_init(void)
1400 {
1401 	int ret;
1402 
1403 	ret = dev_init();
1404 	if (ret)
1405 		goto bail;
1406 
1407 	ret = node_affinity_init();
1408 	if (ret)
1409 		goto bail;
1410 
1411 	/* validate max MTU before any devices start */
1412 	if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1413 		pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1414 		       hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1415 		hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1416 	}
1417 	/* valid CUs run from 1-128 in powers of 2 */
1418 	if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1419 		hfi1_cu = 1;
1420 	/* valid credit return threshold is 0-100, variable is unsigned */
1421 	if (user_credit_return_threshold > 100)
1422 		user_credit_return_threshold = 100;
1423 
1424 	compute_krcvqs();
1425 	/*
1426 	 * sanitize receive interrupt count, time must wait until after
1427 	 * the hardware type is known
1428 	 */
1429 	if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1430 		rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1431 	/* reject invalid combinations */
1432 	if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1433 		pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1434 		rcv_intr_count = 1;
1435 	}
1436 	if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1437 		/*
1438 		 * Avoid indefinite packet delivery by requiring a timeout
1439 		 * if count is > 1.
1440 		 */
1441 		pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1442 		rcv_intr_timeout = 1;
1443 	}
1444 	if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1445 		/*
1446 		 * The dynamic algorithm expects a non-zero timeout
1447 		 * and a count > 1.
1448 		 */
1449 		pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1450 		rcv_intr_dynamic = 0;
1451 	}
1452 
1453 	/* sanitize link CRC options */
1454 	link_crc_mask &= SUPPORTED_CRCS;
1455 
1456 	/*
1457 	 * These must be called before the driver is registered with
1458 	 * the PCI subsystem.
1459 	 */
1460 	idr_init(&hfi1_unit_table);
1461 
1462 	hfi1_dbg_init();
1463 	ret = hfi1_wss_init();
1464 	if (ret < 0)
1465 		goto bail_wss;
1466 	ret = pci_register_driver(&hfi1_pci_driver);
1467 	if (ret < 0) {
1468 		pr_err("Unable to register driver: error %d\n", -ret);
1469 		goto bail_dev;
1470 	}
1471 	goto bail; /* all OK */
1472 
1473 bail_dev:
1474 	hfi1_wss_exit();
1475 bail_wss:
1476 	hfi1_dbg_exit();
1477 	idr_destroy(&hfi1_unit_table);
1478 	dev_cleanup();
1479 bail:
1480 	return ret;
1481 }
1482 
1483 module_init(hfi1_mod_init);
1484 
1485 /*
1486  * Do the non-unit driver cleanup, memory free, etc. at unload.
1487  */
hfi1_mod_cleanup(void)1488 static void __exit hfi1_mod_cleanup(void)
1489 {
1490 	pci_unregister_driver(&hfi1_pci_driver);
1491 	node_affinity_destroy();
1492 	hfi1_wss_exit();
1493 	hfi1_dbg_exit();
1494 	hfi1_cpulist_count = 0;
1495 	kfree(hfi1_cpulist);
1496 
1497 	idr_destroy(&hfi1_unit_table);
1498 	dispose_firmware();	/* asymmetric with obtain_firmware() */
1499 	dev_cleanup();
1500 }
1501 
1502 module_exit(hfi1_mod_cleanup);
1503 
1504 /* this can only be called after a successful initialization */
cleanup_device_data(struct hfi1_devdata * dd)1505 static void cleanup_device_data(struct hfi1_devdata *dd)
1506 {
1507 	int ctxt;
1508 	int pidx;
1509 
1510 	/* users can't do anything more with chip */
1511 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1512 		struct hfi1_pportdata *ppd = &dd->pport[pidx];
1513 		struct cc_state *cc_state;
1514 		int i;
1515 
1516 		if (ppd->statusp)
1517 			*ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1518 
1519 		for (i = 0; i < OPA_MAX_SLS; i++)
1520 			hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1521 
1522 		spin_lock(&ppd->cc_state_lock);
1523 		cc_state = get_cc_state_protected(ppd);
1524 		RCU_INIT_POINTER(ppd->cc_state, NULL);
1525 		spin_unlock(&ppd->cc_state_lock);
1526 
1527 		if (cc_state)
1528 			kfree_rcu(cc_state, rcu);
1529 	}
1530 
1531 	free_credit_return(dd);
1532 
1533 	if (dd->rcvhdrtail_dummy_kvaddr) {
1534 		dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1535 				  (void *)dd->rcvhdrtail_dummy_kvaddr,
1536 				  dd->rcvhdrtail_dummy_dma);
1537 		dd->rcvhdrtail_dummy_kvaddr = NULL;
1538 	}
1539 
1540 	/*
1541 	 * Free any resources still in use (usually just kernel contexts)
1542 	 * at unload; we do for ctxtcnt, because that's what we allocate.
1543 	 */
1544 	for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
1545 		struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
1546 
1547 		if (rcd) {
1548 			hfi1_clear_tids(rcd);
1549 			hfi1_free_ctxt(rcd);
1550 		}
1551 	}
1552 
1553 	kfree(dd->rcd);
1554 	dd->rcd = NULL;
1555 
1556 	free_pio_map(dd);
1557 	/* must follow rcv context free - need to remove rcv's hooks */
1558 	for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1559 		sc_free(dd->send_contexts[ctxt].sc);
1560 	dd->num_send_contexts = 0;
1561 	kfree(dd->send_contexts);
1562 	dd->send_contexts = NULL;
1563 	kfree(dd->hw_to_sw);
1564 	dd->hw_to_sw = NULL;
1565 	kfree(dd->boardname);
1566 	vfree(dd->events);
1567 	vfree(dd->status);
1568 }
1569 
1570 /*
1571  * Clean up on unit shutdown, or error during unit load after
1572  * successful initialization.
1573  */
postinit_cleanup(struct hfi1_devdata * dd)1574 static void postinit_cleanup(struct hfi1_devdata *dd)
1575 {
1576 	hfi1_start_cleanup(dd);
1577 
1578 	hfi1_pcie_ddcleanup(dd);
1579 	hfi1_pcie_cleanup(dd->pcidev);
1580 
1581 	cleanup_device_data(dd);
1582 
1583 	hfi1_free_devdata(dd);
1584 }
1585 
init_validate_rcvhdrcnt(struct device * dev,uint thecnt)1586 static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
1587 {
1588 	if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1589 		hfi1_early_err(dev, "Receive header queue count too small\n");
1590 		return -EINVAL;
1591 	}
1592 
1593 	if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1594 		hfi1_early_err(dev,
1595 			       "Receive header queue count cannot be greater than %u\n",
1596 			       HFI1_MAX_HDRQ_EGRBUF_CNT);
1597 		return -EINVAL;
1598 	}
1599 
1600 	if (thecnt % HDRQ_INCREMENT) {
1601 		hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
1602 			       thecnt, HDRQ_INCREMENT);
1603 		return -EINVAL;
1604 	}
1605 
1606 	return 0;
1607 }
1608 
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1609 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1610 {
1611 	int ret = 0, j, pidx, initfail;
1612 	struct hfi1_devdata *dd;
1613 	struct hfi1_pportdata *ppd;
1614 
1615 	/* First, lock the non-writable module parameters */
1616 	HFI1_CAP_LOCK();
1617 
1618 	/* Validate dev ids */
1619 	if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1620 	      ent->device == PCI_DEVICE_ID_INTEL1)) {
1621 		hfi1_early_err(&pdev->dev,
1622 			       "Failing on unknown Intel deviceid 0x%x\n",
1623 			       ent->device);
1624 		ret = -ENODEV;
1625 		goto bail;
1626 	}
1627 
1628 	/* Validate some global module parameters */
1629 	ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
1630 	if (ret)
1631 		goto bail;
1632 
1633 	/* use the encoding function as a sanitization check */
1634 	if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1635 		hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
1636 			       hfi1_hdrq_entsize);
1637 		ret = -EINVAL;
1638 		goto bail;
1639 	}
1640 
1641 	/* The receive eager buffer size must be set before the receive
1642 	 * contexts are created.
1643 	 *
1644 	 * Set the eager buffer size.  Validate that it falls in a range
1645 	 * allowed by the hardware - all powers of 2 between the min and
1646 	 * max.  The maximum valid MTU is within the eager buffer range
1647 	 * so we do not need to cap the max_mtu by an eager buffer size
1648 	 * setting.
1649 	 */
1650 	if (eager_buffer_size) {
1651 		if (!is_power_of_2(eager_buffer_size))
1652 			eager_buffer_size =
1653 				roundup_pow_of_two(eager_buffer_size);
1654 		eager_buffer_size =
1655 			clamp_val(eager_buffer_size,
1656 				  MIN_EAGER_BUFFER * 8,
1657 				  MAX_EAGER_BUFFER_TOTAL);
1658 		hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
1659 				eager_buffer_size);
1660 	} else {
1661 		hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
1662 		ret = -EINVAL;
1663 		goto bail;
1664 	}
1665 
1666 	/* restrict value of hfi1_rcvarr_split */
1667 	hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1668 
1669 	ret = hfi1_pcie_init(pdev, ent);
1670 	if (ret)
1671 		goto bail;
1672 
1673 	/*
1674 	 * Do device-specific initialization, function table setup, dd
1675 	 * allocation, etc.
1676 	 */
1677 	dd = hfi1_init_dd(pdev, ent);
1678 
1679 	if (IS_ERR(dd)) {
1680 		ret = PTR_ERR(dd);
1681 		goto clean_bail; /* error already printed */
1682 	}
1683 
1684 	ret = create_workqueues(dd);
1685 	if (ret)
1686 		goto clean_bail;
1687 
1688 	/* do the generic initialization */
1689 	initfail = hfi1_init(dd, 0);
1690 
1691 	/* setup vnic */
1692 	hfi1_vnic_setup(dd);
1693 
1694 	ret = hfi1_register_ib_device(dd);
1695 
1696 	/*
1697 	 * Now ready for use.  this should be cleared whenever we
1698 	 * detect a reset, or initiate one.  If earlier failure,
1699 	 * we still create devices, so diags, etc. can be used
1700 	 * to determine cause of problem.
1701 	 */
1702 	if (!initfail && !ret) {
1703 		dd->flags |= HFI1_INITTED;
1704 		/* create debufs files after init and ib register */
1705 		hfi1_dbg_ibdev_init(&dd->verbs_dev);
1706 	}
1707 
1708 	j = hfi1_device_create(dd);
1709 	if (j)
1710 		dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1711 
1712 	if (initfail || ret) {
1713 		hfi1_clean_up_interrupts(dd);
1714 		stop_timers(dd);
1715 		flush_workqueue(ib_wq);
1716 		for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1717 			hfi1_quiet_serdes(dd->pport + pidx);
1718 			ppd = dd->pport + pidx;
1719 			if (ppd->hfi1_wq) {
1720 				destroy_workqueue(ppd->hfi1_wq);
1721 				ppd->hfi1_wq = NULL;
1722 			}
1723 			if (ppd->link_wq) {
1724 				destroy_workqueue(ppd->link_wq);
1725 				ppd->link_wq = NULL;
1726 			}
1727 		}
1728 		if (!j)
1729 			hfi1_device_remove(dd);
1730 		if (!ret)
1731 			hfi1_unregister_ib_device(dd);
1732 		hfi1_vnic_cleanup(dd);
1733 		postinit_cleanup(dd);
1734 		if (initfail)
1735 			ret = initfail;
1736 		goto bail;	/* everything already cleaned */
1737 	}
1738 
1739 	sdma_start(dd);
1740 
1741 	return 0;
1742 
1743 clean_bail:
1744 	hfi1_pcie_cleanup(pdev);
1745 bail:
1746 	return ret;
1747 }
1748 
wait_for_clients(struct hfi1_devdata * dd)1749 static void wait_for_clients(struct hfi1_devdata *dd)
1750 {
1751 	/*
1752 	 * Remove the device init value and complete the device if there is
1753 	 * no clients or wait for active clients to finish.
1754 	 */
1755 	if (atomic_dec_and_test(&dd->user_refcount))
1756 		complete(&dd->user_comp);
1757 
1758 	wait_for_completion(&dd->user_comp);
1759 }
1760 
remove_one(struct pci_dev * pdev)1761 static void remove_one(struct pci_dev *pdev)
1762 {
1763 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1764 
1765 	/* close debugfs files before ib unregister */
1766 	hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1767 
1768 	/* remove the /dev hfi1 interface */
1769 	hfi1_device_remove(dd);
1770 
1771 	/* wait for existing user space clients to finish */
1772 	wait_for_clients(dd);
1773 
1774 	/* unregister from IB core */
1775 	hfi1_unregister_ib_device(dd);
1776 
1777 	/* cleanup vnic */
1778 	hfi1_vnic_cleanup(dd);
1779 
1780 	/*
1781 	 * Disable the IB link, disable interrupts on the device,
1782 	 * clear dma engines, etc.
1783 	 */
1784 	shutdown_device(dd);
1785 
1786 	stop_timers(dd);
1787 
1788 	/* wait until all of our (qsfp) queue_work() calls complete */
1789 	flush_workqueue(ib_wq);
1790 
1791 	postinit_cleanup(dd);
1792 }
1793 
shutdown_one(struct pci_dev * pdev)1794 static void shutdown_one(struct pci_dev *pdev)
1795 {
1796 	struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1797 
1798 	shutdown_device(dd);
1799 }
1800 
1801 /**
1802  * hfi1_create_rcvhdrq - create a receive header queue
1803  * @dd: the hfi1_ib device
1804  * @rcd: the context data
1805  *
1806  * This must be contiguous memory (from an i/o perspective), and must be
1807  * DMA'able (which means for some systems, it will go through an IOMMU,
1808  * or be forced into a low address range).
1809  */
hfi1_create_rcvhdrq(struct hfi1_devdata * dd,struct hfi1_ctxtdata * rcd)1810 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1811 {
1812 	unsigned amt;
1813 	u64 reg;
1814 
1815 	if (!rcd->rcvhdrq) {
1816 		gfp_t gfp_flags;
1817 
1818 		/*
1819 		 * rcvhdrqentsize is in DWs, so we have to convert to bytes
1820 		 * (* sizeof(u32)).
1821 		 */
1822 		amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
1823 				 sizeof(u32));
1824 
1825 		if ((rcd->ctxt < dd->first_dyn_alloc_ctxt) ||
1826 		    (rcd->sc && (rcd->sc->type == SC_KERNEL)))
1827 			gfp_flags = GFP_KERNEL;
1828 		else
1829 			gfp_flags = GFP_USER;
1830 		rcd->rcvhdrq = dma_zalloc_coherent(
1831 			&dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
1832 			gfp_flags | __GFP_COMP);
1833 
1834 		if (!rcd->rcvhdrq) {
1835 			dd_dev_err(dd,
1836 				   "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1837 				   amt, rcd->ctxt);
1838 			goto bail;
1839 		}
1840 
1841 		if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1842 		    HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1843 			rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
1844 				&dd->pcidev->dev, PAGE_SIZE,
1845 				&rcd->rcvhdrqtailaddr_dma, gfp_flags);
1846 			if (!rcd->rcvhdrtail_kvaddr)
1847 				goto bail_free;
1848 		}
1849 
1850 		rcd->rcvhdrq_size = amt;
1851 	}
1852 	/*
1853 	 * These values are per-context:
1854 	 *	RcvHdrCnt
1855 	 *	RcvHdrEntSize
1856 	 *	RcvHdrSize
1857 	 */
1858 	reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1859 			& RCV_HDR_CNT_CNT_MASK)
1860 		<< RCV_HDR_CNT_CNT_SHIFT;
1861 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1862 	reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1863 			& RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1864 		<< RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1865 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1866 	reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
1867 		<< RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1868 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
1869 
1870 	/*
1871 	 * Program dummy tail address for every receive context
1872 	 * before enabling any receive context
1873 	 */
1874 	write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
1875 			dd->rcvhdrtail_dummy_dma);
1876 
1877 	return 0;
1878 
1879 bail_free:
1880 	dd_dev_err(dd,
1881 		   "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1882 		   rcd->ctxt);
1883 	dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1884 			  rcd->rcvhdrq_dma);
1885 	rcd->rcvhdrq = NULL;
1886 bail:
1887 	return -ENOMEM;
1888 }
1889 
1890 /**
1891  * allocate eager buffers, both kernel and user contexts.
1892  * @rcd: the context we are setting up.
1893  *
1894  * Allocate the eager TID buffers and program them into hip.
1895  * They are no longer completely contiguous, we do multiple allocation
1896  * calls.  Otherwise we get the OOM code involved, by asking for too
1897  * much per call, with disastrous results on some kernels.
1898  */
hfi1_setup_eagerbufs(struct hfi1_ctxtdata * rcd)1899 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1900 {
1901 	struct hfi1_devdata *dd = rcd->dd;
1902 	u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
1903 	gfp_t gfp_flags;
1904 	u16 order;
1905 	int ret = 0;
1906 	u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1907 
1908 	/*
1909 	 * GFP_USER, but without GFP_FS, so buffer cache can be
1910 	 * coalesced (we hope); otherwise, even at order 4,
1911 	 * heavy filesystem activity makes these fail, and we can
1912 	 * use compound pages.
1913 	 */
1914 	gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1915 
1916 	/*
1917 	 * The minimum size of the eager buffers is a groups of MTU-sized
1918 	 * buffers.
1919 	 * The global eager_buffer_size parameter is checked against the
1920 	 * theoretical lower limit of the value. Here, we check against the
1921 	 * MTU.
1922 	 */
1923 	if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1924 		rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1925 	/*
1926 	 * If using one-pkt-per-egr-buffer, lower the eager buffer
1927 	 * size to the max MTU (page-aligned).
1928 	 */
1929 	if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1930 		rcd->egrbufs.rcvtid_size = round_mtu;
1931 
1932 	/*
1933 	 * Eager buffers sizes of 1MB or less require smaller TID sizes
1934 	 * to satisfy the "multiple of 8 RcvArray entries" requirement.
1935 	 */
1936 	if (rcd->egrbufs.size <= (1 << 20))
1937 		rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1938 			rounddown_pow_of_two(rcd->egrbufs.size / 8));
1939 
1940 	while (alloced_bytes < rcd->egrbufs.size &&
1941 	       rcd->egrbufs.alloced < rcd->egrbufs.count) {
1942 		rcd->egrbufs.buffers[idx].addr =
1943 			dma_zalloc_coherent(&dd->pcidev->dev,
1944 					    rcd->egrbufs.rcvtid_size,
1945 					    &rcd->egrbufs.buffers[idx].dma,
1946 					    gfp_flags);
1947 		if (rcd->egrbufs.buffers[idx].addr) {
1948 			rcd->egrbufs.buffers[idx].len =
1949 				rcd->egrbufs.rcvtid_size;
1950 			rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1951 				rcd->egrbufs.buffers[idx].addr;
1952 			rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1953 				rcd->egrbufs.buffers[idx].dma;
1954 			rcd->egrbufs.alloced++;
1955 			alloced_bytes += rcd->egrbufs.rcvtid_size;
1956 			idx++;
1957 		} else {
1958 			u32 new_size, i, j;
1959 			u64 offset = 0;
1960 
1961 			/*
1962 			 * Fail the eager buffer allocation if:
1963 			 *   - we are already using the lowest acceptable size
1964 			 *   - we are using one-pkt-per-egr-buffer (this implies
1965 			 *     that we are accepting only one size)
1966 			 */
1967 			if (rcd->egrbufs.rcvtid_size == round_mtu ||
1968 			    !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1969 				dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1970 					   rcd->ctxt);
1971 				ret = -ENOMEM;
1972 				goto bail_rcvegrbuf_phys;
1973 			}
1974 
1975 			new_size = rcd->egrbufs.rcvtid_size / 2;
1976 
1977 			/*
1978 			 * If the first attempt to allocate memory failed, don't
1979 			 * fail everything but continue with the next lower
1980 			 * size.
1981 			 */
1982 			if (idx == 0) {
1983 				rcd->egrbufs.rcvtid_size = new_size;
1984 				continue;
1985 			}
1986 
1987 			/*
1988 			 * Re-partition already allocated buffers to a smaller
1989 			 * size.
1990 			 */
1991 			rcd->egrbufs.alloced = 0;
1992 			for (i = 0, j = 0, offset = 0; j < idx; i++) {
1993 				if (i >= rcd->egrbufs.count)
1994 					break;
1995 				rcd->egrbufs.rcvtids[i].dma =
1996 					rcd->egrbufs.buffers[j].dma + offset;
1997 				rcd->egrbufs.rcvtids[i].addr =
1998 					rcd->egrbufs.buffers[j].addr + offset;
1999 				rcd->egrbufs.alloced++;
2000 				if ((rcd->egrbufs.buffers[j].dma + offset +
2001 				     new_size) ==
2002 				    (rcd->egrbufs.buffers[j].dma +
2003 				     rcd->egrbufs.buffers[j].len)) {
2004 					j++;
2005 					offset = 0;
2006 				} else {
2007 					offset += new_size;
2008 				}
2009 			}
2010 			rcd->egrbufs.rcvtid_size = new_size;
2011 		}
2012 	}
2013 	rcd->egrbufs.numbufs = idx;
2014 	rcd->egrbufs.size = alloced_bytes;
2015 
2016 	hfi1_cdbg(PROC,
2017 		  "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
2018 		  rcd->ctxt, rcd->egrbufs.alloced,
2019 		  rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
2020 
2021 	/*
2022 	 * Set the contexts rcv array head update threshold to the closest
2023 	 * power of 2 (so we can use a mask instead of modulo) below half
2024 	 * the allocated entries.
2025 	 */
2026 	rcd->egrbufs.threshold =
2027 		rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
2028 	/*
2029 	 * Compute the expected RcvArray entry base. This is done after
2030 	 * allocating the eager buffers in order to maximize the
2031 	 * expected RcvArray entries for the context.
2032 	 */
2033 	max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
2034 	egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
2035 	rcd->expected_count = max_entries - egrtop;
2036 	if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
2037 		rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
2038 
2039 	rcd->expected_base = rcd->eager_base + egrtop;
2040 	hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
2041 		  rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
2042 		  rcd->eager_base, rcd->expected_base);
2043 
2044 	if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
2045 		hfi1_cdbg(PROC,
2046 			  "ctxt%u: current Eager buffer size is invalid %u\n",
2047 			  rcd->ctxt, rcd->egrbufs.rcvtid_size);
2048 		ret = -EINVAL;
2049 		goto bail_rcvegrbuf_phys;
2050 	}
2051 
2052 	for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
2053 		hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
2054 			     rcd->egrbufs.rcvtids[idx].dma, order);
2055 		cond_resched();
2056 	}
2057 
2058 	return 0;
2059 
2060 bail_rcvegrbuf_phys:
2061 	for (idx = 0; idx < rcd->egrbufs.alloced &&
2062 	     rcd->egrbufs.buffers[idx].addr;
2063 	     idx++) {
2064 		dma_free_coherent(&dd->pcidev->dev,
2065 				  rcd->egrbufs.buffers[idx].len,
2066 				  rcd->egrbufs.buffers[idx].addr,
2067 				  rcd->egrbufs.buffers[idx].dma);
2068 		rcd->egrbufs.buffers[idx].addr = NULL;
2069 		rcd->egrbufs.buffers[idx].dma = 0;
2070 		rcd->egrbufs.buffers[idx].len = 0;
2071 	}
2072 
2073 	return ret;
2074 }
2075