1 /*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/spinlock.h>
34 #include <linux/pci.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/netdevice.h>
38 #include <linux/vmalloc.h>
39 #include <linux/moduleparam.h>
40
41 #include "qib.h"
42
43 static unsigned qib_hol_timeout_ms = 3000;
44 module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
45 MODULE_PARM_DESC(hol_timeout_ms,
46 "duration of user app suspension after link failure");
47
48 unsigned qib_sdma_fetch_arb = 1;
49 module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
50 MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
51
52 /**
53 * qib_disarm_piobufs - cancel a range of PIO buffers
54 * @dd: the qlogic_ib device
55 * @first: the first PIO buffer to cancel
56 * @cnt: the number of PIO buffers to cancel
57 *
58 * Cancel a range of PIO buffers. Used at user process close,
59 * in case it died while writing to a PIO buffer.
60 */
qib_disarm_piobufs(struct qib_devdata * dd,unsigned first,unsigned cnt)61 void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
62 {
63 unsigned long flags;
64 unsigned i;
65 unsigned last;
66
67 last = first + cnt;
68 spin_lock_irqsave(&dd->pioavail_lock, flags);
69 for (i = first; i < last; i++) {
70 __clear_bit(i, dd->pio_need_disarm);
71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
72 }
73 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
74 }
75
76 /*
77 * This is called by a user process when it sees the DISARM_BUFS event
78 * bit is set.
79 */
qib_disarm_piobufs_ifneeded(struct qib_ctxtdata * rcd)80 int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
81 {
82 struct qib_devdata *dd = rcd->dd;
83 unsigned i;
84 unsigned last;
85 unsigned n = 0;
86
87 last = rcd->pio_base + rcd->piocnt;
88 /*
89 * Don't need uctxt_lock here, since user has called in to us.
90 * Clear at start in case more interrupts set bits while we
91 * are disarming
92 */
93 if (rcd->user_event_mask) {
94 /*
95 * subctxt_cnt is 0 if not shared, so do base
96 * separately, first, then remaining subctxt, if any
97 */
98 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
99 for (i = 1; i < rcd->subctxt_cnt; i++)
100 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
101 &rcd->user_event_mask[i]);
102 }
103 spin_lock_irq(&dd->pioavail_lock);
104 for (i = rcd->pio_base; i < last; i++) {
105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
106 n++;
107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
108 }
109 }
110 spin_unlock_irq(&dd->pioavail_lock);
111 return 0;
112 }
113
is_sdma_buf(struct qib_devdata * dd,unsigned i)114 static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
115 {
116 struct qib_pportdata *ppd;
117 unsigned pidx;
118
119 for (pidx = 0; pidx < dd->num_pports; pidx++) {
120 ppd = dd->pport + pidx;
121 if (i >= ppd->sdma_state.first_sendbuf &&
122 i < ppd->sdma_state.last_sendbuf)
123 return ppd;
124 }
125 return NULL;
126 }
127
128 /*
129 * Return true if send buffer is being used by a user context.
130 * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
131 */
find_ctxt(struct qib_devdata * dd,unsigned bufn)132 static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
133 {
134 struct qib_ctxtdata *rcd;
135 unsigned ctxt;
136 int ret = 0;
137
138 spin_lock(&dd->uctxt_lock);
139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
140 rcd = dd->rcd[ctxt];
141 if (!rcd || bufn < rcd->pio_base ||
142 bufn >= rcd->pio_base + rcd->piocnt)
143 continue;
144 if (rcd->user_event_mask) {
145 int i;
146 /*
147 * subctxt_cnt is 0 if not shared, so do base
148 * separately, first, then remaining subctxt, if any
149 */
150 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
151 &rcd->user_event_mask[0]);
152 for (i = 1; i < rcd->subctxt_cnt; i++)
153 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
154 &rcd->user_event_mask[i]);
155 }
156 ret = 1;
157 break;
158 }
159 spin_unlock(&dd->uctxt_lock);
160
161 return ret;
162 }
163
164 /*
165 * Disarm a set of send buffers. If the buffer might be actively being
166 * written to, mark the buffer to be disarmed later when it is not being
167 * written to.
168 *
169 * This should only be called from the IRQ error handler.
170 */
qib_disarm_piobufs_set(struct qib_devdata * dd,unsigned long * mask,unsigned cnt)171 void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
172 unsigned cnt)
173 {
174 struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
175 unsigned i;
176 unsigned long flags;
177
178 for (i = 0; i < dd->num_pports; i++)
179 pppd[i] = NULL;
180
181 for (i = 0; i < cnt; i++) {
182 int which;
183
184 if (!test_bit(i, mask))
185 continue;
186 /*
187 * If the buffer is owned by the DMA hardware,
188 * reset the DMA engine.
189 */
190 ppd = is_sdma_buf(dd, i);
191 if (ppd) {
192 pppd[ppd->port] = ppd;
193 continue;
194 }
195 /*
196 * If the kernel is writing the buffer or the buffer is
197 * owned by a user process, we can't clear it yet.
198 */
199 spin_lock_irqsave(&dd->pioavail_lock, flags);
200 if (test_bit(i, dd->pio_writing) ||
201 (!test_bit(i << 1, dd->pioavailkernel) &&
202 find_ctxt(dd, i))) {
203 __set_bit(i, dd->pio_need_disarm);
204 which = 0;
205 } else {
206 which = 1;
207 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
208 }
209 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
210 }
211
212 /* do cancel_sends once per port that had sdma piobufs in error */
213 for (i = 0; i < dd->num_pports; i++)
214 if (pppd[i])
215 qib_cancel_sends(pppd[i]);
216 }
217
218 /**
219 * update_send_bufs - update shadow copy of the PIO availability map
220 * @dd: the qlogic_ib device
221 *
222 * called whenever our local copy indicates we have run out of send buffers
223 */
update_send_bufs(struct qib_devdata * dd)224 static void update_send_bufs(struct qib_devdata *dd)
225 {
226 unsigned long flags;
227 unsigned i;
228 const unsigned piobregs = dd->pioavregs;
229
230 /*
231 * If the generation (check) bits have changed, then we update the
232 * busy bit for the corresponding PIO buffer. This algorithm will
233 * modify positions to the value they already have in some cases
234 * (i.e., no change), but it's faster than changing only the bits
235 * that have changed.
236 *
237 * We would like to do this atomicly, to avoid spinlocks in the
238 * critical send path, but that's not really possible, given the
239 * type of changes, and that this routine could be called on
240 * multiple cpu's simultaneously, so we lock in this routine only,
241 * to avoid conflicting updates; all we change is the shadow, and
242 * it's a single 64 bit memory location, so by definition the update
243 * is atomic in terms of what other cpu's can see in testing the
244 * bits. The spin_lock overhead isn't too bad, since it only
245 * happens when all buffers are in use, so only cpu overhead, not
246 * latency or bandwidth is affected.
247 */
248 if (!dd->pioavailregs_dma)
249 return;
250 spin_lock_irqsave(&dd->pioavail_lock, flags);
251 for (i = 0; i < piobregs; i++) {
252 u64 pchbusy, pchg, piov, pnew;
253
254 piov = le64_to_cpu(dd->pioavailregs_dma[i]);
255 pchg = dd->pioavailkernel[i] &
256 ~(dd->pioavailshadow[i] ^ piov);
257 pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
258 if (pchg && (pchbusy & dd->pioavailshadow[i])) {
259 pnew = dd->pioavailshadow[i] & ~pchbusy;
260 pnew |= piov & pchbusy;
261 dd->pioavailshadow[i] = pnew;
262 }
263 }
264 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
265 }
266
267 /*
268 * Debugging code and stats updates if no pio buffers available.
269 */
no_send_bufs(struct qib_devdata * dd)270 static noinline void no_send_bufs(struct qib_devdata *dd)
271 {
272 dd->upd_pio_shadow = 1;
273
274 /* not atomic, but if we lose a stat count in a while, that's OK */
275 qib_stats.sps_nopiobufs++;
276 }
277
278 /*
279 * Common code for normal driver send buffer allocation, and reserved
280 * allocation.
281 *
282 * Do appropriate marking as busy, etc.
283 * Returns buffer pointer if one is found, otherwise NULL.
284 */
qib_getsendbuf_range(struct qib_devdata * dd,u32 * pbufnum,u32 first,u32 last)285 u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
286 u32 first, u32 last)
287 {
288 unsigned i, j, updated = 0;
289 unsigned nbufs;
290 unsigned long flags;
291 unsigned long *shadow = dd->pioavailshadow;
292 u32 __iomem *buf;
293
294 if (!(dd->flags & QIB_PRESENT))
295 return NULL;
296
297 nbufs = last - first + 1; /* number in range to check */
298 if (dd->upd_pio_shadow) {
299 update_shadow:
300 /*
301 * Minor optimization. If we had no buffers on last call,
302 * start out by doing the update; continue and do scan even
303 * if no buffers were updated, to be paranoid.
304 */
305 update_send_bufs(dd);
306 updated++;
307 }
308 i = first;
309 /*
310 * While test_and_set_bit() is atomic, we do that and then the
311 * change_bit(), and the pair is not. See if this is the cause
312 * of the remaining armlaunch errors.
313 */
314 spin_lock_irqsave(&dd->pioavail_lock, flags);
315 if (dd->last_pio >= first && dd->last_pio <= last)
316 i = dd->last_pio + 1;
317 if (!first)
318 /* adjust to min possible */
319 nbufs = last - dd->min_kernel_pio + 1;
320 for (j = 0; j < nbufs; j++, i++) {
321 if (i > last)
322 i = !first ? dd->min_kernel_pio : first;
323 if (__test_and_set_bit((2 * i) + 1, shadow))
324 continue;
325 /* flip generation bit */
326 __change_bit(2 * i, shadow);
327 /* remember that the buffer can be written to now */
328 __set_bit(i, dd->pio_writing);
329 if (!first && first != last) /* first == last on VL15, avoid */
330 dd->last_pio = i;
331 break;
332 }
333 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
334
335 if (j == nbufs) {
336 if (!updated)
337 /*
338 * First time through; shadow exhausted, but may be
339 * buffers available, try an update and then rescan.
340 */
341 goto update_shadow;
342 no_send_bufs(dd);
343 buf = NULL;
344 } else {
345 if (i < dd->piobcnt2k)
346 buf = (u32 __iomem *)(dd->pio2kbase +
347 i * dd->palign);
348 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
349 buf = (u32 __iomem *)(dd->pio4kbase +
350 (i - dd->piobcnt2k) * dd->align4k);
351 else
352 buf = (u32 __iomem *)(dd->piovl15base +
353 (i - (dd->piobcnt2k + dd->piobcnt4k)) *
354 dd->align4k);
355 if (pbufnum)
356 *pbufnum = i;
357 dd->upd_pio_shadow = 0;
358 }
359
360 return buf;
361 }
362
363 /*
364 * Record that the caller is finished writing to the buffer so we don't
365 * disarm it while it is being written and disarm it now if needed.
366 */
qib_sendbuf_done(struct qib_devdata * dd,unsigned n)367 void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
368 {
369 unsigned long flags;
370
371 spin_lock_irqsave(&dd->pioavail_lock, flags);
372 __clear_bit(n, dd->pio_writing);
373 if (__test_and_clear_bit(n, dd->pio_need_disarm))
374 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
375 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
376 }
377
378 /**
379 * qib_chg_pioavailkernel - change which send buffers are available for kernel
380 * @dd: the qlogic_ib device
381 * @start: the starting send buffer number
382 * @len: the number of send buffers
383 * @avail: true if the buffers are available for kernel use, false otherwise
384 */
qib_chg_pioavailkernel(struct qib_devdata * dd,unsigned start,unsigned len,u32 avail,struct qib_ctxtdata * rcd)385 void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
386 unsigned len, u32 avail, struct qib_ctxtdata *rcd)
387 {
388 unsigned long flags;
389 unsigned end;
390 unsigned ostart = start;
391
392 /* There are two bits per send buffer (busy and generation) */
393 start *= 2;
394 end = start + len * 2;
395
396 spin_lock_irqsave(&dd->pioavail_lock, flags);
397 /* Set or clear the busy bit in the shadow. */
398 while (start < end) {
399 if (avail) {
400 unsigned long dma;
401 int i;
402
403 /*
404 * The BUSY bit will never be set, because we disarm
405 * the user buffers before we hand them back to the
406 * kernel. We do have to make sure the generation
407 * bit is set correctly in shadow, since it could
408 * have changed many times while allocated to user.
409 * We can't use the bitmap functions on the full
410 * dma array because it is always little-endian, so
411 * we have to flip to host-order first.
412 * BITS_PER_LONG is slightly wrong, since it's
413 * always 64 bits per register in chip...
414 * We only work on 64 bit kernels, so that's OK.
415 */
416 i = start / BITS_PER_LONG;
417 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
418 dd->pioavailshadow);
419 dma = (unsigned long)
420 le64_to_cpu(dd->pioavailregs_dma[i]);
421 if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
422 start) % BITS_PER_LONG, &dma))
423 __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
424 start, dd->pioavailshadow);
425 else
426 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
427 + start, dd->pioavailshadow);
428 __set_bit(start, dd->pioavailkernel);
429 if ((start >> 1) < dd->min_kernel_pio)
430 dd->min_kernel_pio = start >> 1;
431 } else {
432 __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
433 dd->pioavailshadow);
434 __clear_bit(start, dd->pioavailkernel);
435 if ((start >> 1) > dd->min_kernel_pio)
436 dd->min_kernel_pio = start >> 1;
437 }
438 start += 2;
439 }
440
441 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
442 dd->last_pio = dd->min_kernel_pio - 1;
443 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
444
445 dd->f_txchk_change(dd, ostart, len, avail, rcd);
446 }
447
448 /*
449 * Flush all sends that might be in the ready to send state, as well as any
450 * that are in the process of being sent. Used whenever we need to be
451 * sure the send side is idle. Cleans up all buffer state by canceling
452 * all pio buffers, and issuing an abort, which cleans up anything in the
453 * launch fifo. The cancel is superfluous on some chip versions, but
454 * it's safer to always do it.
455 * PIOAvail bits are updated by the chip as if a normal send had happened.
456 */
qib_cancel_sends(struct qib_pportdata * ppd)457 void qib_cancel_sends(struct qib_pportdata *ppd)
458 {
459 struct qib_devdata *dd = ppd->dd;
460 struct qib_ctxtdata *rcd;
461 unsigned long flags;
462 unsigned ctxt;
463 unsigned i;
464 unsigned last;
465
466 /*
467 * Tell PSM to disarm buffers again before trying to reuse them.
468 * We need to be sure the rcd doesn't change out from under us
469 * while we do so. We hold the two locks sequentially. We might
470 * needlessly set some need_disarm bits as a result, if the
471 * context is closed after we release the uctxt_lock, but that's
472 * fairly benign, and safer than nesting the locks.
473 */
474 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
475 spin_lock_irqsave(&dd->uctxt_lock, flags);
476 rcd = dd->rcd[ctxt];
477 if (rcd && rcd->ppd == ppd) {
478 last = rcd->pio_base + rcd->piocnt;
479 if (rcd->user_event_mask) {
480 /*
481 * subctxt_cnt is 0 if not shared, so do base
482 * separately, first, then remaining subctxt,
483 * if any
484 */
485 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
486 &rcd->user_event_mask[0]);
487 for (i = 1; i < rcd->subctxt_cnt; i++)
488 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
489 &rcd->user_event_mask[i]);
490 }
491 i = rcd->pio_base;
492 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
493 spin_lock_irqsave(&dd->pioavail_lock, flags);
494 for (; i < last; i++)
495 __set_bit(i, dd->pio_need_disarm);
496 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
497 } else
498 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
499 }
500
501 if (!(dd->flags & QIB_HAS_SEND_DMA))
502 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
503 QIB_SENDCTRL_FLUSH);
504 }
505
506 /*
507 * Force an update of in-memory copy of the pioavail registers, when
508 * needed for any of a variety of reasons.
509 * If already off, this routine is a nop, on the assumption that the
510 * caller (or set of callers) will "do the right thing".
511 * This is a per-device operation, so just the first port.
512 */
qib_force_pio_avail_update(struct qib_devdata * dd)513 void qib_force_pio_avail_update(struct qib_devdata *dd)
514 {
515 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
516 }
517
qib_hol_down(struct qib_pportdata * ppd)518 void qib_hol_down(struct qib_pportdata *ppd)
519 {
520 /*
521 * Cancel sends when the link goes DOWN so that we aren't doing it
522 * at INIT when we might be trying to send SMI packets.
523 */
524 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
525 qib_cancel_sends(ppd);
526 }
527
528 /*
529 * Link is at INIT.
530 * We start the HoL timer so we can detect stuck packets blocking SMP replies.
531 * Timer may already be running, so use mod_timer, not add_timer.
532 */
qib_hol_init(struct qib_pportdata * ppd)533 void qib_hol_init(struct qib_pportdata *ppd)
534 {
535 if (ppd->hol_state != QIB_HOL_INIT) {
536 ppd->hol_state = QIB_HOL_INIT;
537 mod_timer(&ppd->hol_timer,
538 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
539 }
540 }
541
542 /*
543 * Link is up, continue any user processes, and ensure timer
544 * is a nop, if running. Let timer keep running, if set; it
545 * will nop when it sees the link is up.
546 */
qib_hol_up(struct qib_pportdata * ppd)547 void qib_hol_up(struct qib_pportdata *ppd)
548 {
549 ppd->hol_state = QIB_HOL_UP;
550 }
551
552 /*
553 * This is only called via the timer.
554 */
qib_hol_event(unsigned long opaque)555 void qib_hol_event(unsigned long opaque)
556 {
557 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
558
559 /* If hardware error, etc, skip. */
560 if (!(ppd->dd->flags & QIB_INITTED))
561 return;
562
563 if (ppd->hol_state != QIB_HOL_UP) {
564 /*
565 * Try to flush sends in case a stuck packet is blocking
566 * SMP replies.
567 */
568 qib_hol_down(ppd);
569 mod_timer(&ppd->hol_timer,
570 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
571 }
572 }
573