1 /*
2 * Copyright(c) 2015-2018 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/delay.h>
49 #include "hfi.h"
50 #include "qp.h"
51 #include "trace.h"
52
53 #define SC(name) SEND_CTXT_##name
54 /*
55 * Send Context functions
56 */
57 static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
58
59 /*
60 * Set the CM reset bit and wait for it to clear. Use the provided
61 * sendctrl register. This routine has no locking.
62 */
__cm_reset(struct hfi1_devdata * dd,u64 sendctrl)63 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
64 {
65 write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
66 while (1) {
67 udelay(1);
68 sendctrl = read_csr(dd, SEND_CTRL);
69 if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
70 break;
71 }
72 }
73
74 /* global control of PIO send */
pio_send_control(struct hfi1_devdata * dd,int op)75 void pio_send_control(struct hfi1_devdata *dd, int op)
76 {
77 u64 reg, mask;
78 unsigned long flags;
79 int write = 1; /* write sendctrl back */
80 int flush = 0; /* re-read sendctrl to make sure it is flushed */
81 int i;
82
83 spin_lock_irqsave(&dd->sendctrl_lock, flags);
84
85 reg = read_csr(dd, SEND_CTRL);
86 switch (op) {
87 case PSC_GLOBAL_ENABLE:
88 reg |= SEND_CTRL_SEND_ENABLE_SMASK;
89 fallthrough;
90 case PSC_DATA_VL_ENABLE:
91 mask = 0;
92 for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
93 if (!dd->vld[i].mtu)
94 mask |= BIT_ULL(i);
95 /* Disallow sending on VLs not enabled */
96 mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
97 SEND_CTRL_UNSUPPORTED_VL_SHIFT;
98 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
99 break;
100 case PSC_GLOBAL_DISABLE:
101 reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
102 break;
103 case PSC_GLOBAL_VLARB_ENABLE:
104 reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
105 break;
106 case PSC_GLOBAL_VLARB_DISABLE:
107 reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
108 break;
109 case PSC_CM_RESET:
110 __cm_reset(dd, reg);
111 write = 0; /* CSR already written (and flushed) */
112 break;
113 case PSC_DATA_VL_DISABLE:
114 reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
115 flush = 1;
116 break;
117 default:
118 dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
119 break;
120 }
121
122 if (write) {
123 write_csr(dd, SEND_CTRL, reg);
124 if (flush)
125 (void)read_csr(dd, SEND_CTRL); /* flush write */
126 }
127
128 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
129 }
130
131 /* number of send context memory pools */
132 #define NUM_SC_POOLS 2
133
134 /* Send Context Size (SCS) wildcards */
135 #define SCS_POOL_0 -1
136 #define SCS_POOL_1 -2
137
138 /* Send Context Count (SCC) wildcards */
139 #define SCC_PER_VL -1
140 #define SCC_PER_CPU -2
141 #define SCC_PER_KRCVQ -3
142
143 /* Send Context Size (SCS) constants */
144 #define SCS_ACK_CREDITS 32
145 #define SCS_VL15_CREDITS 102 /* 3 pkts of 2048B data + 128B header */
146
147 #define PIO_THRESHOLD_CEILING 4096
148
149 #define PIO_WAIT_BATCH_SIZE 5
150
151 /* default send context sizes */
152 static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
153 [SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */
154 .count = SCC_PER_VL }, /* one per NUMA */
155 [SC_ACK] = { .size = SCS_ACK_CREDITS,
156 .count = SCC_PER_KRCVQ },
157 [SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */
158 .count = SCC_PER_CPU }, /* one per CPU */
159 [SC_VL15] = { .size = SCS_VL15_CREDITS,
160 .count = 1 },
161
162 };
163
164 /* send context memory pool configuration */
165 struct mem_pool_config {
166 int centipercent; /* % of memory, in 100ths of 1% */
167 int absolute_blocks; /* absolute block count */
168 };
169
170 /* default memory pool configuration: 100% in pool 0 */
171 static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
172 /* centi%, abs blocks */
173 { 10000, -1 }, /* pool 0 */
174 { 0, -1 }, /* pool 1 */
175 };
176
177 /* memory pool information, used when calculating final sizes */
178 struct mem_pool_info {
179 int centipercent; /*
180 * 100th of 1% of memory to use, -1 if blocks
181 * already set
182 */
183 int count; /* count of contexts in the pool */
184 int blocks; /* block size of the pool */
185 int size; /* context size, in blocks */
186 };
187
188 /*
189 * Convert a pool wildcard to a valid pool index. The wildcards
190 * start at -1 and increase negatively. Map them as:
191 * -1 => 0
192 * -2 => 1
193 * etc.
194 *
195 * Return -1 on non-wildcard input, otherwise convert to a pool number.
196 */
wildcard_to_pool(int wc)197 static int wildcard_to_pool(int wc)
198 {
199 if (wc >= 0)
200 return -1; /* non-wildcard */
201 return -wc - 1;
202 }
203
204 static const char *sc_type_names[SC_MAX] = {
205 "kernel",
206 "ack",
207 "user",
208 "vl15"
209 };
210
sc_type_name(int index)211 static const char *sc_type_name(int index)
212 {
213 if (index < 0 || index >= SC_MAX)
214 return "unknown";
215 return sc_type_names[index];
216 }
217
218 /*
219 * Read the send context memory pool configuration and send context
220 * size configuration. Replace any wildcards and come up with final
221 * counts and sizes for the send context types.
222 */
init_sc_pools_and_sizes(struct hfi1_devdata * dd)223 int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
224 {
225 struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
226 int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1;
227 int total_contexts = 0;
228 int fixed_blocks;
229 int pool_blocks;
230 int used_blocks;
231 int cp_total; /* centipercent total */
232 int ab_total; /* absolute block total */
233 int extra;
234 int i;
235
236 /*
237 * When SDMA is enabled, kernel context pio packet size is capped by
238 * "piothreshold". Reduce pio buffer allocation for kernel context by
239 * setting it to a fixed size. The allocation allows 3-deep buffering
240 * of the largest pio packets plus up to 128 bytes header, sufficient
241 * to maintain verbs performance.
242 *
243 * When SDMA is disabled, keep the default pooling allocation.
244 */
245 if (HFI1_CAP_IS_KSET(SDMA)) {
246 u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ?
247 piothreshold : PIO_THRESHOLD_CEILING;
248 sc_config_sizes[SC_KERNEL].size =
249 3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE;
250 }
251
252 /*
253 * Step 0:
254 * - copy the centipercents/absolute sizes from the pool config
255 * - sanity check these values
256 * - add up centipercents, then later check for full value
257 * - add up absolute blocks, then later check for over-commit
258 */
259 cp_total = 0;
260 ab_total = 0;
261 for (i = 0; i < NUM_SC_POOLS; i++) {
262 int cp = sc_mem_pool_config[i].centipercent;
263 int ab = sc_mem_pool_config[i].absolute_blocks;
264
265 /*
266 * A negative value is "unused" or "invalid". Both *can*
267 * be valid, but centipercent wins, so check that first
268 */
269 if (cp >= 0) { /* centipercent valid */
270 cp_total += cp;
271 } else if (ab >= 0) { /* absolute blocks valid */
272 ab_total += ab;
273 } else { /* neither valid */
274 dd_dev_err(
275 dd,
276 "Send context memory pool %d: both the block count and centipercent are invalid\n",
277 i);
278 return -EINVAL;
279 }
280
281 mem_pool_info[i].centipercent = cp;
282 mem_pool_info[i].blocks = ab;
283 }
284
285 /* do not use both % and absolute blocks for different pools */
286 if (cp_total != 0 && ab_total != 0) {
287 dd_dev_err(
288 dd,
289 "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
290 return -EINVAL;
291 }
292
293 /* if any percentages are present, they must add up to 100% x 100 */
294 if (cp_total != 0 && cp_total != 10000) {
295 dd_dev_err(
296 dd,
297 "Send context memory pool centipercent is %d, expecting 10000\n",
298 cp_total);
299 return -EINVAL;
300 }
301
302 /* the absolute pool total cannot be more than the mem total */
303 if (ab_total > total_blocks) {
304 dd_dev_err(
305 dd,
306 "Send context memory pool absolute block count %d is larger than the memory size %d\n",
307 ab_total, total_blocks);
308 return -EINVAL;
309 }
310
311 /*
312 * Step 2:
313 * - copy from the context size config
314 * - replace context type wildcard counts with real values
315 * - add up non-memory pool block sizes
316 * - add up memory pool user counts
317 */
318 fixed_blocks = 0;
319 for (i = 0; i < SC_MAX; i++) {
320 int count = sc_config_sizes[i].count;
321 int size = sc_config_sizes[i].size;
322 int pool;
323
324 /*
325 * Sanity check count: Either a positive value or
326 * one of the expected wildcards is valid. The positive
327 * value is checked later when we compare against total
328 * memory available.
329 */
330 if (i == SC_ACK) {
331 count = dd->n_krcv_queues;
332 } else if (i == SC_KERNEL) {
333 count = INIT_SC_PER_VL * num_vls;
334 } else if (count == SCC_PER_CPU) {
335 count = dd->num_rcv_contexts - dd->n_krcv_queues;
336 } else if (count < 0) {
337 dd_dev_err(
338 dd,
339 "%s send context invalid count wildcard %d\n",
340 sc_type_name(i), count);
341 return -EINVAL;
342 }
343 if (total_contexts + count > chip_send_contexts(dd))
344 count = chip_send_contexts(dd) - total_contexts;
345
346 total_contexts += count;
347
348 /*
349 * Sanity check pool: The conversion will return a pool
350 * number or -1 if a fixed (non-negative) value. The fixed
351 * value is checked later when we compare against
352 * total memory available.
353 */
354 pool = wildcard_to_pool(size);
355 if (pool == -1) { /* non-wildcard */
356 fixed_blocks += size * count;
357 } else if (pool < NUM_SC_POOLS) { /* valid wildcard */
358 mem_pool_info[pool].count += count;
359 } else { /* invalid wildcard */
360 dd_dev_err(
361 dd,
362 "%s send context invalid pool wildcard %d\n",
363 sc_type_name(i), size);
364 return -EINVAL;
365 }
366
367 dd->sc_sizes[i].count = count;
368 dd->sc_sizes[i].size = size;
369 }
370 if (fixed_blocks > total_blocks) {
371 dd_dev_err(
372 dd,
373 "Send context fixed block count, %u, larger than total block count %u\n",
374 fixed_blocks, total_blocks);
375 return -EINVAL;
376 }
377
378 /* step 3: calculate the blocks in the pools, and pool context sizes */
379 pool_blocks = total_blocks - fixed_blocks;
380 if (ab_total > pool_blocks) {
381 dd_dev_err(
382 dd,
383 "Send context fixed pool sizes, %u, larger than pool block count %u\n",
384 ab_total, pool_blocks);
385 return -EINVAL;
386 }
387 /* subtract off the fixed pool blocks */
388 pool_blocks -= ab_total;
389
390 for (i = 0; i < NUM_SC_POOLS; i++) {
391 struct mem_pool_info *pi = &mem_pool_info[i];
392
393 /* % beats absolute blocks */
394 if (pi->centipercent >= 0)
395 pi->blocks = (pool_blocks * pi->centipercent) / 10000;
396
397 if (pi->blocks == 0 && pi->count != 0) {
398 dd_dev_err(
399 dd,
400 "Send context memory pool %d has %u contexts, but no blocks\n",
401 i, pi->count);
402 return -EINVAL;
403 }
404 if (pi->count == 0) {
405 /* warn about wasted blocks */
406 if (pi->blocks != 0)
407 dd_dev_err(
408 dd,
409 "Send context memory pool %d has %u blocks, but zero contexts\n",
410 i, pi->blocks);
411 pi->size = 0;
412 } else {
413 pi->size = pi->blocks / pi->count;
414 }
415 }
416
417 /* step 4: fill in the context type sizes from the pool sizes */
418 used_blocks = 0;
419 for (i = 0; i < SC_MAX; i++) {
420 if (dd->sc_sizes[i].size < 0) {
421 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
422
423 WARN_ON_ONCE(pool >= NUM_SC_POOLS);
424 dd->sc_sizes[i].size = mem_pool_info[pool].size;
425 }
426 /* make sure we are not larger than what is allowed by the HW */
427 #define PIO_MAX_BLOCKS 1024
428 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
429 dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
430
431 /* calculate our total usage */
432 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
433 }
434 extra = total_blocks - used_blocks;
435 if (extra != 0)
436 dd_dev_info(dd, "unused send context blocks: %d\n", extra);
437
438 return total_contexts;
439 }
440
init_send_contexts(struct hfi1_devdata * dd)441 int init_send_contexts(struct hfi1_devdata *dd)
442 {
443 u16 base;
444 int ret, i, j, context;
445
446 ret = init_credit_return(dd);
447 if (ret)
448 return ret;
449
450 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
451 GFP_KERNEL);
452 dd->send_contexts = kcalloc(dd->num_send_contexts,
453 sizeof(struct send_context_info),
454 GFP_KERNEL);
455 if (!dd->send_contexts || !dd->hw_to_sw) {
456 kfree(dd->hw_to_sw);
457 kfree(dd->send_contexts);
458 free_credit_return(dd);
459 return -ENOMEM;
460 }
461
462 /* hardware context map starts with invalid send context indices */
463 for (i = 0; i < TXE_NUM_CONTEXTS; i++)
464 dd->hw_to_sw[i] = INVALID_SCI;
465
466 /*
467 * All send contexts have their credit sizes. Allocate credits
468 * for each context one after another from the global space.
469 */
470 context = 0;
471 base = 1;
472 for (i = 0; i < SC_MAX; i++) {
473 struct sc_config_sizes *scs = &dd->sc_sizes[i];
474
475 for (j = 0; j < scs->count; j++) {
476 struct send_context_info *sci =
477 &dd->send_contexts[context];
478 sci->type = i;
479 sci->base = base;
480 sci->credits = scs->size;
481
482 context++;
483 base += scs->size;
484 }
485 }
486
487 return 0;
488 }
489
490 /*
491 * Allocate a software index and hardware context of the given type.
492 *
493 * Must be called with dd->sc_lock held.
494 */
sc_hw_alloc(struct hfi1_devdata * dd,int type,u32 * sw_index,u32 * hw_context)495 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
496 u32 *hw_context)
497 {
498 struct send_context_info *sci;
499 u32 index;
500 u32 context;
501
502 for (index = 0, sci = &dd->send_contexts[0];
503 index < dd->num_send_contexts; index++, sci++) {
504 if (sci->type == type && sci->allocated == 0) {
505 sci->allocated = 1;
506 /* use a 1:1 mapping, but make them non-equal */
507 context = chip_send_contexts(dd) - index - 1;
508 dd->hw_to_sw[context] = index;
509 *sw_index = index;
510 *hw_context = context;
511 return 0; /* success */
512 }
513 }
514 dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
515 return -ENOSPC;
516 }
517
518 /*
519 * Free the send context given by its software index.
520 *
521 * Must be called with dd->sc_lock held.
522 */
sc_hw_free(struct hfi1_devdata * dd,u32 sw_index,u32 hw_context)523 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
524 {
525 struct send_context_info *sci;
526
527 sci = &dd->send_contexts[sw_index];
528 if (!sci->allocated) {
529 dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
530 __func__, sw_index, hw_context);
531 }
532 sci->allocated = 0;
533 dd->hw_to_sw[hw_context] = INVALID_SCI;
534 }
535
536 /* return the base context of a context in a group */
group_context(u32 context,u32 group)537 static inline u32 group_context(u32 context, u32 group)
538 {
539 return (context >> group) << group;
540 }
541
542 /* return the size of a group */
group_size(u32 group)543 static inline u32 group_size(u32 group)
544 {
545 return 1 << group;
546 }
547
548 /*
549 * Obtain the credit return addresses, kernel virtual and bus, for the
550 * given sc.
551 *
552 * To understand this routine:
553 * o va and dma are arrays of struct credit_return. One for each physical
554 * send context, per NUMA.
555 * o Each send context always looks in its relative location in a struct
556 * credit_return for its credit return.
557 * o Each send context in a group must have its return address CSR programmed
558 * with the same value. Use the address of the first send context in the
559 * group.
560 */
cr_group_addresses(struct send_context * sc,dma_addr_t * dma)561 static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma)
562 {
563 u32 gc = group_context(sc->hw_context, sc->group);
564 u32 index = sc->hw_context & 0x7;
565
566 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
567 *dma = (unsigned long)
568 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
569 }
570
571 /*
572 * Work queue function triggered in error interrupt routine for
573 * kernel contexts.
574 */
sc_halted(struct work_struct * work)575 static void sc_halted(struct work_struct *work)
576 {
577 struct send_context *sc;
578
579 sc = container_of(work, struct send_context, halt_work);
580 sc_restart(sc);
581 }
582
583 /*
584 * Calculate PIO block threshold for this send context using the given MTU.
585 * Trigger a return when one MTU plus optional header of credits remain.
586 *
587 * Parameter mtu is in bytes.
588 * Parameter hdrqentsize is in DWORDs.
589 *
590 * Return value is what to write into the CSR: trigger return when
591 * unreturned credits pass this count.
592 */
sc_mtu_to_threshold(struct send_context * sc,u32 mtu,u32 hdrqentsize)593 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
594 {
595 u32 release_credits;
596 u32 threshold;
597
598 /* add in the header size, then divide by the PIO block size */
599 mtu += hdrqentsize << 2;
600 release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
601
602 /* check against this context's credits */
603 if (sc->credits <= release_credits)
604 threshold = 1;
605 else
606 threshold = sc->credits - release_credits;
607
608 return threshold;
609 }
610
611 /*
612 * Calculate credit threshold in terms of percent of the allocated credits.
613 * Trigger when unreturned credits equal or exceed the percentage of the whole.
614 *
615 * Return value is what to write into the CSR: trigger return when
616 * unreturned credits pass this count.
617 */
sc_percent_to_threshold(struct send_context * sc,u32 percent)618 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
619 {
620 return (sc->credits * percent) / 100;
621 }
622
623 /*
624 * Set the credit return threshold.
625 */
sc_set_cr_threshold(struct send_context * sc,u32 new_threshold)626 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
627 {
628 unsigned long flags;
629 u32 old_threshold;
630 int force_return = 0;
631
632 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
633
634 old_threshold = (sc->credit_ctrl >>
635 SC(CREDIT_CTRL_THRESHOLD_SHIFT))
636 & SC(CREDIT_CTRL_THRESHOLD_MASK);
637
638 if (new_threshold != old_threshold) {
639 sc->credit_ctrl =
640 (sc->credit_ctrl
641 & ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
642 | ((new_threshold
643 & SC(CREDIT_CTRL_THRESHOLD_MASK))
644 << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
645 write_kctxt_csr(sc->dd, sc->hw_context,
646 SC(CREDIT_CTRL), sc->credit_ctrl);
647
648 /* force a credit return on change to avoid a possible stall */
649 force_return = 1;
650 }
651
652 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
653
654 if (force_return)
655 sc_return_credits(sc);
656 }
657
658 /*
659 * set_pio_integrity
660 *
661 * Set the CHECK_ENABLE register for the send context 'sc'.
662 */
set_pio_integrity(struct send_context * sc)663 void set_pio_integrity(struct send_context *sc)
664 {
665 struct hfi1_devdata *dd = sc->dd;
666 u32 hw_context = sc->hw_context;
667 int type = sc->type;
668
669 write_kctxt_csr(dd, hw_context,
670 SC(CHECK_ENABLE),
671 hfi1_pkt_default_send_ctxt_mask(dd, type));
672 }
673
get_buffers_allocated(struct send_context * sc)674 static u32 get_buffers_allocated(struct send_context *sc)
675 {
676 int cpu;
677 u32 ret = 0;
678
679 for_each_possible_cpu(cpu)
680 ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
681 return ret;
682 }
683
reset_buffers_allocated(struct send_context * sc)684 static void reset_buffers_allocated(struct send_context *sc)
685 {
686 int cpu;
687
688 for_each_possible_cpu(cpu)
689 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
690 }
691
692 /*
693 * Allocate a NUMA relative send context structure of the given type along
694 * with a HW context.
695 */
sc_alloc(struct hfi1_devdata * dd,int type,uint hdrqentsize,int numa)696 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
697 uint hdrqentsize, int numa)
698 {
699 struct send_context_info *sci;
700 struct send_context *sc = NULL;
701 dma_addr_t dma;
702 unsigned long flags;
703 u64 reg;
704 u32 thresh;
705 u32 sw_index;
706 u32 hw_context;
707 int ret;
708 u8 opval, opmask;
709
710 /* do not allocate while frozen */
711 if (dd->flags & HFI1_FROZEN)
712 return NULL;
713
714 sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
715 if (!sc)
716 return NULL;
717
718 sc->buffers_allocated = alloc_percpu(u32);
719 if (!sc->buffers_allocated) {
720 kfree(sc);
721 dd_dev_err(dd,
722 "Cannot allocate buffers_allocated per cpu counters\n"
723 );
724 return NULL;
725 }
726
727 spin_lock_irqsave(&dd->sc_lock, flags);
728 ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
729 if (ret) {
730 spin_unlock_irqrestore(&dd->sc_lock, flags);
731 free_percpu(sc->buffers_allocated);
732 kfree(sc);
733 return NULL;
734 }
735
736 sci = &dd->send_contexts[sw_index];
737 sci->sc = sc;
738
739 sc->dd = dd;
740 sc->node = numa;
741 sc->type = type;
742 spin_lock_init(&sc->alloc_lock);
743 spin_lock_init(&sc->release_lock);
744 spin_lock_init(&sc->credit_ctrl_lock);
745 seqlock_init(&sc->waitlock);
746 INIT_LIST_HEAD(&sc->piowait);
747 INIT_WORK(&sc->halt_work, sc_halted);
748 init_waitqueue_head(&sc->halt_wait);
749
750 /* grouping is always single context for now */
751 sc->group = 0;
752
753 sc->sw_index = sw_index;
754 sc->hw_context = hw_context;
755 cr_group_addresses(sc, &dma);
756 sc->credits = sci->credits;
757 sc->size = sc->credits * PIO_BLOCK_SIZE;
758
759 /* PIO Send Memory Address details */
760 #define PIO_ADDR_CONTEXT_MASK 0xfful
761 #define PIO_ADDR_CONTEXT_SHIFT 16
762 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
763 << PIO_ADDR_CONTEXT_SHIFT);
764
765 /* set base and credits */
766 reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
767 << SC(CTRL_CTXT_DEPTH_SHIFT))
768 | ((sci->base & SC(CTRL_CTXT_BASE_MASK))
769 << SC(CTRL_CTXT_BASE_SHIFT));
770 write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
771
772 set_pio_integrity(sc);
773
774 /* unmask all errors */
775 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
776
777 /* set the default partition key */
778 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
779 (SC(CHECK_PARTITION_KEY_VALUE_MASK) &
780 DEFAULT_PKEY) <<
781 SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
782
783 /* per context type checks */
784 if (type == SC_USER) {
785 opval = USER_OPCODE_CHECK_VAL;
786 opmask = USER_OPCODE_CHECK_MASK;
787 } else {
788 opval = OPCODE_CHECK_VAL_DISABLED;
789 opmask = OPCODE_CHECK_MASK_DISABLED;
790 }
791
792 /* set the send context check opcode mask and value */
793 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
794 ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
795 ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
796
797 /* set up credit return */
798 reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
799 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
800
801 /*
802 * Calculate the initial credit return threshold.
803 *
804 * For Ack contexts, set a threshold for half the credits.
805 * For User contexts use the given percentage. This has been
806 * sanitized on driver start-up.
807 * For Kernel contexts, use the default MTU plus a header
808 * or half the credits, whichever is smaller. This should
809 * work for both the 3-deep buffering allocation and the
810 * pooling allocation.
811 */
812 if (type == SC_ACK) {
813 thresh = sc_percent_to_threshold(sc, 50);
814 } else if (type == SC_USER) {
815 thresh = sc_percent_to_threshold(sc,
816 user_credit_return_threshold);
817 } else { /* kernel */
818 thresh = min(sc_percent_to_threshold(sc, 50),
819 sc_mtu_to_threshold(sc, hfi1_max_mtu,
820 hdrqentsize));
821 }
822 reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
823 /* add in early return */
824 if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
825 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
826 else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */
827 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
828
829 /* set up write-through credit_ctrl */
830 sc->credit_ctrl = reg;
831 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
832
833 /* User send contexts should not allow sending on VL15 */
834 if (type == SC_USER) {
835 reg = 1ULL << 15;
836 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
837 }
838
839 spin_unlock_irqrestore(&dd->sc_lock, flags);
840
841 /*
842 * Allocate shadow ring to track outstanding PIO buffers _after_
843 * unlocking. We don't know the size until the lock is held and
844 * we can't allocate while the lock is held. No one is using
845 * the context yet, so allocate it now.
846 *
847 * User contexts do not get a shadow ring.
848 */
849 if (type != SC_USER) {
850 /*
851 * Size the shadow ring 1 larger than the number of credits
852 * so head == tail can mean empty.
853 */
854 sc->sr_size = sci->credits + 1;
855 sc->sr = kcalloc_node(sc->sr_size,
856 sizeof(union pio_shadow_ring),
857 GFP_KERNEL, numa);
858 if (!sc->sr) {
859 sc_free(sc);
860 return NULL;
861 }
862 }
863
864 hfi1_cdbg(PIO,
865 "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
866 sw_index,
867 hw_context,
868 sc_type_name(type),
869 sc->group,
870 sc->credits,
871 sc->credit_ctrl,
872 thresh);
873
874 return sc;
875 }
876
877 /* free a per-NUMA send context structure */
sc_free(struct send_context * sc)878 void sc_free(struct send_context *sc)
879 {
880 struct hfi1_devdata *dd;
881 unsigned long flags;
882 u32 sw_index;
883 u32 hw_context;
884
885 if (!sc)
886 return;
887
888 sc->flags |= SCF_IN_FREE; /* ensure no restarts */
889 dd = sc->dd;
890 if (!list_empty(&sc->piowait))
891 dd_dev_err(dd, "piowait list not empty!\n");
892 sw_index = sc->sw_index;
893 hw_context = sc->hw_context;
894 sc_disable(sc); /* make sure the HW is disabled */
895 flush_work(&sc->halt_work);
896
897 spin_lock_irqsave(&dd->sc_lock, flags);
898 dd->send_contexts[sw_index].sc = NULL;
899
900 /* clear/disable all registers set in sc_alloc */
901 write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
902 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
903 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
904 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
905 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
906 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
907 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
908
909 /* release the index and context for re-use */
910 sc_hw_free(dd, sw_index, hw_context);
911 spin_unlock_irqrestore(&dd->sc_lock, flags);
912
913 kfree(sc->sr);
914 free_percpu(sc->buffers_allocated);
915 kfree(sc);
916 }
917
918 /* disable the context */
sc_disable(struct send_context * sc)919 void sc_disable(struct send_context *sc)
920 {
921 u64 reg;
922 struct pio_buf *pbuf;
923 LIST_HEAD(wake_list);
924
925 if (!sc)
926 return;
927
928 /* do all steps, even if already disabled */
929 spin_lock_irq(&sc->alloc_lock);
930 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
931 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
932 sc->flags &= ~SCF_ENABLED;
933 sc_wait_for_packet_egress(sc, 1);
934 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
935
936 /*
937 * Flush any waiters. Once the context is disabled,
938 * credit return interrupts are stopped (although there
939 * could be one in-process when the context is disabled).
940 * Wait one microsecond for any lingering interrupts, then
941 * proceed with the flush.
942 */
943 udelay(1);
944 spin_lock(&sc->release_lock);
945 if (sc->sr) { /* this context has a shadow ring */
946 while (sc->sr_tail != sc->sr_head) {
947 pbuf = &sc->sr[sc->sr_tail].pbuf;
948 if (pbuf->cb)
949 (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
950 sc->sr_tail++;
951 if (sc->sr_tail >= sc->sr_size)
952 sc->sr_tail = 0;
953 }
954 }
955 spin_unlock(&sc->release_lock);
956
957 write_seqlock(&sc->waitlock);
958 list_splice_init(&sc->piowait, &wake_list);
959 write_sequnlock(&sc->waitlock);
960 while (!list_empty(&wake_list)) {
961 struct iowait *wait;
962 struct rvt_qp *qp;
963 struct hfi1_qp_priv *priv;
964
965 wait = list_first_entry(&wake_list, struct iowait, list);
966 qp = iowait_to_qp(wait);
967 priv = qp->priv;
968 list_del_init(&priv->s_iowait.list);
969 priv->s_iowait.lock = NULL;
970 hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
971 }
972
973 spin_unlock_irq(&sc->alloc_lock);
974 }
975
976 /* return SendEgressCtxtStatus.PacketOccupancy */
packet_occupancy(u64 reg)977 static u64 packet_occupancy(u64 reg)
978 {
979 return (reg &
980 SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
981 >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
982 }
983
984 /* is egress halted on the context? */
egress_halted(u64 reg)985 static bool egress_halted(u64 reg)
986 {
987 return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
988 }
989
990 /* is the send context halted? */
is_sc_halted(struct hfi1_devdata * dd,u32 hw_context)991 static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
992 {
993 return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
994 SC(STATUS_CTXT_HALTED_SMASK));
995 }
996
997 /**
998 * sc_wait_for_packet_egress
999 * @sc: valid send context
1000 * @pause: wait for credit return
1001 *
1002 * Wait for packet egress, optionally pause for credit return
1003 *
1004 * Egress halt and Context halt are not necessarily the same thing, so
1005 * check for both.
1006 *
1007 * NOTE: The context halt bit may not be set immediately. Because of this,
1008 * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
1009 * context bit to determine if the context is halted.
1010 */
sc_wait_for_packet_egress(struct send_context * sc,int pause)1011 static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
1012 {
1013 struct hfi1_devdata *dd = sc->dd;
1014 u64 reg = 0;
1015 u64 reg_prev;
1016 u32 loop = 0;
1017
1018 while (1) {
1019 reg_prev = reg;
1020 reg = read_csr(dd, sc->hw_context * 8 +
1021 SEND_EGRESS_CTXT_STATUS);
1022 /* done if any halt bits, SW or HW are set */
1023 if (sc->flags & SCF_HALTED ||
1024 is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
1025 break;
1026 reg = packet_occupancy(reg);
1027 if (reg == 0)
1028 break;
1029 /* counter is reset if occupancy count changes */
1030 if (reg != reg_prev)
1031 loop = 0;
1032 if (loop > 50000) {
1033 /* timed out - bounce the link */
1034 dd_dev_err(dd,
1035 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
1036 __func__, sc->sw_index,
1037 sc->hw_context, (u32)reg);
1038 queue_work(dd->pport->link_wq,
1039 &dd->pport->link_bounce_work);
1040 break;
1041 }
1042 loop++;
1043 udelay(1);
1044 }
1045
1046 if (pause)
1047 /* Add additional delay to ensure chip returns all credits */
1048 pause_for_credit_return(dd);
1049 }
1050
sc_wait(struct hfi1_devdata * dd)1051 void sc_wait(struct hfi1_devdata *dd)
1052 {
1053 int i;
1054
1055 for (i = 0; i < dd->num_send_contexts; i++) {
1056 struct send_context *sc = dd->send_contexts[i].sc;
1057
1058 if (!sc)
1059 continue;
1060 sc_wait_for_packet_egress(sc, 0);
1061 }
1062 }
1063
1064 /*
1065 * Restart a context after it has been halted due to error.
1066 *
1067 * If the first step fails - wait for the halt to be asserted, return early.
1068 * Otherwise complain about timeouts but keep going.
1069 *
1070 * It is expected that allocations (enabled flag bit) have been shut off
1071 * already (only applies to kernel contexts).
1072 */
sc_restart(struct send_context * sc)1073 int sc_restart(struct send_context *sc)
1074 {
1075 struct hfi1_devdata *dd = sc->dd;
1076 u64 reg;
1077 u32 loop;
1078 int count;
1079
1080 /* bounce off if not halted, or being free'd */
1081 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
1082 return -EINVAL;
1083
1084 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
1085 sc->hw_context);
1086
1087 /*
1088 * Step 1: Wait for the context to actually halt.
1089 *
1090 * The error interrupt is asynchronous to actually setting halt
1091 * on the context.
1092 */
1093 loop = 0;
1094 while (1) {
1095 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
1096 if (reg & SC(STATUS_CTXT_HALTED_SMASK))
1097 break;
1098 if (loop > 100) {
1099 dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
1100 __func__, sc->sw_index, sc->hw_context);
1101 return -ETIME;
1102 }
1103 loop++;
1104 udelay(1);
1105 }
1106
1107 /*
1108 * Step 2: Ensure no users are still trying to write to PIO.
1109 *
1110 * For kernel contexts, we have already turned off buffer allocation.
1111 * Now wait for the buffer count to go to zero.
1112 *
1113 * For user contexts, the user handling code has cut off write access
1114 * to the context's PIO pages before calling this routine and will
1115 * restore write access after this routine returns.
1116 */
1117 if (sc->type != SC_USER) {
1118 /* kernel context */
1119 loop = 0;
1120 while (1) {
1121 count = get_buffers_allocated(sc);
1122 if (count == 0)
1123 break;
1124 if (loop > 100) {
1125 dd_dev_err(dd,
1126 "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
1127 __func__, sc->sw_index,
1128 sc->hw_context, count);
1129 }
1130 loop++;
1131 udelay(1);
1132 }
1133 }
1134
1135 /*
1136 * Step 3: Wait for all packets to egress.
1137 * This is done while disabling the send context
1138 *
1139 * Step 4: Disable the context
1140 *
1141 * This is a superset of the halt. After the disable, the
1142 * errors can be cleared.
1143 */
1144 sc_disable(sc);
1145
1146 /*
1147 * Step 5: Enable the context
1148 *
1149 * This enable will clear the halted flag and per-send context
1150 * error flags.
1151 */
1152 return sc_enable(sc);
1153 }
1154
1155 /*
1156 * PIO freeze processing. To be called after the TXE block is fully frozen.
1157 * Go through all frozen send contexts and disable them. The contexts are
1158 * already stopped by the freeze.
1159 */
pio_freeze(struct hfi1_devdata * dd)1160 void pio_freeze(struct hfi1_devdata *dd)
1161 {
1162 struct send_context *sc;
1163 int i;
1164
1165 for (i = 0; i < dd->num_send_contexts; i++) {
1166 sc = dd->send_contexts[i].sc;
1167 /*
1168 * Don't disable unallocated, unfrozen, or user send contexts.
1169 * User send contexts will be disabled when the process
1170 * calls into the driver to reset its context.
1171 */
1172 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1173 continue;
1174
1175 /* only need to disable, the context is already stopped */
1176 sc_disable(sc);
1177 }
1178 }
1179
1180 /*
1181 * Unfreeze PIO for kernel send contexts. The precondition for calling this
1182 * is that all PIO send contexts have been disabled and the SPC freeze has
1183 * been cleared. Now perform the last step and re-enable each kernel context.
1184 * User (PSM) processing will occur when PSM calls into the kernel to
1185 * acknowledge the freeze.
1186 */
pio_kernel_unfreeze(struct hfi1_devdata * dd)1187 void pio_kernel_unfreeze(struct hfi1_devdata *dd)
1188 {
1189 struct send_context *sc;
1190 int i;
1191
1192 for (i = 0; i < dd->num_send_contexts; i++) {
1193 sc = dd->send_contexts[i].sc;
1194 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1195 continue;
1196 if (sc->flags & SCF_LINK_DOWN)
1197 continue;
1198
1199 sc_enable(sc); /* will clear the sc frozen flag */
1200 }
1201 }
1202
1203 /**
1204 * pio_kernel_linkup() - Re-enable send contexts after linkup event
1205 * @dd: valid devive data
1206 *
1207 * When the link goes down, the freeze path is taken. However, a link down
1208 * event is different from a freeze because if the send context is re-enabled
1209 * whowever is sending data will start sending data again, which will hang
1210 * any QP that is sending data.
1211 *
1212 * The freeze path now looks at the type of event that occurs and takes this
1213 * path for link down event.
1214 */
pio_kernel_linkup(struct hfi1_devdata * dd)1215 void pio_kernel_linkup(struct hfi1_devdata *dd)
1216 {
1217 struct send_context *sc;
1218 int i;
1219
1220 for (i = 0; i < dd->num_send_contexts; i++) {
1221 sc = dd->send_contexts[i].sc;
1222 if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
1223 continue;
1224
1225 sc_enable(sc); /* will clear the sc link down flag */
1226 }
1227 }
1228
1229 /*
1230 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
1231 * Returns:
1232 * -ETIMEDOUT - if we wait too long
1233 * -EIO - if there was an error
1234 */
pio_init_wait_progress(struct hfi1_devdata * dd)1235 static int pio_init_wait_progress(struct hfi1_devdata *dd)
1236 {
1237 u64 reg;
1238 int max, count = 0;
1239
1240 /* max is the longest possible HW init time / delay */
1241 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
1242 while (1) {
1243 reg = read_csr(dd, SEND_PIO_INIT_CTXT);
1244 if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
1245 break;
1246 if (count >= max)
1247 return -ETIMEDOUT;
1248 udelay(5);
1249 count++;
1250 }
1251
1252 return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
1253 }
1254
1255 /*
1256 * Reset all of the send contexts to their power-on state. Used
1257 * only during manual init - no lock against sc_enable needed.
1258 */
pio_reset_all(struct hfi1_devdata * dd)1259 void pio_reset_all(struct hfi1_devdata *dd)
1260 {
1261 int ret;
1262
1263 /* make sure the init engine is not busy */
1264 ret = pio_init_wait_progress(dd);
1265 /* ignore any timeout */
1266 if (ret == -EIO) {
1267 /* clear the error */
1268 write_csr(dd, SEND_PIO_ERR_CLEAR,
1269 SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
1270 }
1271
1272 /* reset init all */
1273 write_csr(dd, SEND_PIO_INIT_CTXT,
1274 SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
1275 udelay(2);
1276 ret = pio_init_wait_progress(dd);
1277 if (ret < 0) {
1278 dd_dev_err(dd,
1279 "PIO send context init %s while initializing all PIO blocks\n",
1280 ret == -ETIMEDOUT ? "is stuck" : "had an error");
1281 }
1282 }
1283
1284 /* enable the context */
sc_enable(struct send_context * sc)1285 int sc_enable(struct send_context *sc)
1286 {
1287 u64 sc_ctrl, reg, pio;
1288 struct hfi1_devdata *dd;
1289 unsigned long flags;
1290 int ret = 0;
1291
1292 if (!sc)
1293 return -EINVAL;
1294 dd = sc->dd;
1295
1296 /*
1297 * Obtain the allocator lock to guard against any allocation
1298 * attempts (which should not happen prior to context being
1299 * enabled). On the release/disable side we don't need to
1300 * worry about locking since the releaser will not do anything
1301 * if the context accounting values have not changed.
1302 */
1303 spin_lock_irqsave(&sc->alloc_lock, flags);
1304 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1305 if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
1306 goto unlock; /* already enabled */
1307
1308 /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
1309
1310 *sc->hw_free = 0;
1311 sc->free = 0;
1312 sc->alloc_free = 0;
1313 sc->fill = 0;
1314 sc->fill_wrap = 0;
1315 sc->sr_head = 0;
1316 sc->sr_tail = 0;
1317 sc->flags = 0;
1318 /* the alloc lock insures no fast path allocation */
1319 reset_buffers_allocated(sc);
1320
1321 /*
1322 * Clear all per-context errors. Some of these will be set when
1323 * we are re-enabling after a context halt. Now that the context
1324 * is disabled, the halt will not clear until after the PIO init
1325 * engine runs below.
1326 */
1327 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
1328 if (reg)
1329 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
1330
1331 /*
1332 * The HW PIO initialization engine can handle only one init
1333 * request at a time. Serialize access to each device's engine.
1334 */
1335 spin_lock(&dd->sc_init_lock);
1336 /*
1337 * Since access to this code block is serialized and
1338 * each access waits for the initialization to complete
1339 * before releasing the lock, the PIO initialization engine
1340 * should not be in use, so we don't have to wait for the
1341 * InProgress bit to go down.
1342 */
1343 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
1344 SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
1345 SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
1346 write_csr(dd, SEND_PIO_INIT_CTXT, pio);
1347 /*
1348 * Wait until the engine is done. Give the chip the required time
1349 * so, hopefully, we read the register just once.
1350 */
1351 udelay(2);
1352 ret = pio_init_wait_progress(dd);
1353 spin_unlock(&dd->sc_init_lock);
1354 if (ret) {
1355 dd_dev_err(dd,
1356 "sctxt%u(%u): Context not enabled due to init failure %d\n",
1357 sc->sw_index, sc->hw_context, ret);
1358 goto unlock;
1359 }
1360
1361 /*
1362 * All is well. Enable the context.
1363 */
1364 sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
1365 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
1366 /*
1367 * Read SendCtxtCtrl to force the write out and prevent a timing
1368 * hazard where a PIO write may reach the context before the enable.
1369 */
1370 read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
1371 sc->flags |= SCF_ENABLED;
1372
1373 unlock:
1374 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1375
1376 return ret;
1377 }
1378
1379 /* force a credit return on the context */
sc_return_credits(struct send_context * sc)1380 void sc_return_credits(struct send_context *sc)
1381 {
1382 if (!sc)
1383 return;
1384
1385 /* a 0->1 transition schedules a credit return */
1386 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
1387 SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
1388 /*
1389 * Ensure that the write is flushed and the credit return is
1390 * scheduled. We care more about the 0 -> 1 transition.
1391 */
1392 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
1393 /* set back to 0 for next time */
1394 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
1395 }
1396
1397 /* allow all in-flight packets to drain on the context */
sc_flush(struct send_context * sc)1398 void sc_flush(struct send_context *sc)
1399 {
1400 if (!sc)
1401 return;
1402
1403 sc_wait_for_packet_egress(sc, 1);
1404 }
1405
1406 /* drop all packets on the context, no waiting until they are sent */
sc_drop(struct send_context * sc)1407 void sc_drop(struct send_context *sc)
1408 {
1409 if (!sc)
1410 return;
1411
1412 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
1413 __func__, sc->sw_index, sc->hw_context);
1414 }
1415
1416 /*
1417 * Start the software reaction to a context halt or SPC freeze:
1418 * - mark the context as halted or frozen
1419 * - stop buffer allocations
1420 *
1421 * Called from the error interrupt. Other work is deferred until
1422 * out of the interrupt.
1423 */
sc_stop(struct send_context * sc,int flag)1424 void sc_stop(struct send_context *sc, int flag)
1425 {
1426 unsigned long flags;
1427
1428 /* stop buffer allocations */
1429 spin_lock_irqsave(&sc->alloc_lock, flags);
1430 /* mark the context */
1431 sc->flags |= flag;
1432 sc->flags &= ~SCF_ENABLED;
1433 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1434 wake_up(&sc->halt_wait);
1435 }
1436
1437 #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
1438 #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
1439
1440 /*
1441 * The send context buffer "allocator".
1442 *
1443 * @sc: the PIO send context we are allocating from
1444 * @len: length of whole packet - including PBC - in dwords
1445 * @cb: optional callback to call when the buffer is finished sending
1446 * @arg: argument for cb
1447 *
1448 * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
1449 * when link is down.
1450 */
sc_buffer_alloc(struct send_context * sc,u32 dw_len,pio_release_cb cb,void * arg)1451 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
1452 pio_release_cb cb, void *arg)
1453 {
1454 struct pio_buf *pbuf = NULL;
1455 unsigned long flags;
1456 unsigned long avail;
1457 unsigned long blocks = dwords_to_blocks(dw_len);
1458 u32 fill_wrap;
1459 int trycount = 0;
1460 u32 head, next;
1461
1462 spin_lock_irqsave(&sc->alloc_lock, flags);
1463 if (!(sc->flags & SCF_ENABLED)) {
1464 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1465 return ERR_PTR(-ECOMM);
1466 }
1467
1468 retry:
1469 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
1470 if (blocks > avail) {
1471 /* not enough room */
1472 if (unlikely(trycount)) { /* already tried to get more room */
1473 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1474 goto done;
1475 }
1476 /* copy from receiver cache line and recalculate */
1477 sc->alloc_free = READ_ONCE(sc->free);
1478 avail =
1479 (unsigned long)sc->credits -
1480 (sc->fill - sc->alloc_free);
1481 if (blocks > avail) {
1482 /* still no room, actively update */
1483 sc_release_update(sc);
1484 sc->alloc_free = READ_ONCE(sc->free);
1485 trycount++;
1486 goto retry;
1487 }
1488 }
1489
1490 /* there is enough room */
1491
1492 preempt_disable();
1493 this_cpu_inc(*sc->buffers_allocated);
1494
1495 /* read this once */
1496 head = sc->sr_head;
1497
1498 /* "allocate" the buffer */
1499 sc->fill += blocks;
1500 fill_wrap = sc->fill_wrap;
1501 sc->fill_wrap += blocks;
1502 if (sc->fill_wrap >= sc->credits)
1503 sc->fill_wrap = sc->fill_wrap - sc->credits;
1504
1505 /*
1506 * Fill the parts that the releaser looks at before moving the head.
1507 * The only necessary piece is the sent_at field. The credits
1508 * we have just allocated cannot have been returned yet, so the
1509 * cb and arg will not be looked at for a "while". Put them
1510 * on this side of the memory barrier anyway.
1511 */
1512 pbuf = &sc->sr[head].pbuf;
1513 pbuf->sent_at = sc->fill;
1514 pbuf->cb = cb;
1515 pbuf->arg = arg;
1516 pbuf->sc = sc; /* could be filled in at sc->sr init time */
1517 /* make sure this is in memory before updating the head */
1518
1519 /* calculate next head index, do not store */
1520 next = head + 1;
1521 if (next >= sc->sr_size)
1522 next = 0;
1523 /*
1524 * update the head - must be last! - the releaser can look at fields
1525 * in pbuf once we move the head
1526 */
1527 smp_wmb();
1528 sc->sr_head = next;
1529 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1530
1531 /* finish filling in the buffer outside the lock */
1532 pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE;
1533 pbuf->end = sc->base_addr + sc->size;
1534 pbuf->qw_written = 0;
1535 pbuf->carry_bytes = 0;
1536 pbuf->carry.val64 = 0;
1537 done:
1538 return pbuf;
1539 }
1540
1541 /*
1542 * There are at least two entities that can turn on credit return
1543 * interrupts and they can overlap. Avoid problems by implementing
1544 * a count scheme that is enforced by a lock. The lock is needed because
1545 * the count and CSR write must be paired.
1546 */
1547
1548 /*
1549 * Start credit return interrupts. This is managed by a count. If already
1550 * on, just increment the count.
1551 */
sc_add_credit_return_intr(struct send_context * sc)1552 void sc_add_credit_return_intr(struct send_context *sc)
1553 {
1554 unsigned long flags;
1555
1556 /* lock must surround both the count change and the CSR update */
1557 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1558 if (sc->credit_intr_count == 0) {
1559 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1560 write_kctxt_csr(sc->dd, sc->hw_context,
1561 SC(CREDIT_CTRL), sc->credit_ctrl);
1562 }
1563 sc->credit_intr_count++;
1564 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1565 }
1566
1567 /*
1568 * Stop credit return interrupts. This is managed by a count. Decrement the
1569 * count, if the last user, then turn the credit interrupts off.
1570 */
sc_del_credit_return_intr(struct send_context * sc)1571 void sc_del_credit_return_intr(struct send_context *sc)
1572 {
1573 unsigned long flags;
1574
1575 WARN_ON(sc->credit_intr_count == 0);
1576
1577 /* lock must surround both the count change and the CSR update */
1578 spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
1579 sc->credit_intr_count--;
1580 if (sc->credit_intr_count == 0) {
1581 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
1582 write_kctxt_csr(sc->dd, sc->hw_context,
1583 SC(CREDIT_CTRL), sc->credit_ctrl);
1584 }
1585 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
1586 }
1587
1588 /*
1589 * The caller must be careful when calling this. All needint calls
1590 * must be paired with !needint.
1591 */
hfi1_sc_wantpiobuf_intr(struct send_context * sc,u32 needint)1592 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
1593 {
1594 if (needint)
1595 sc_add_credit_return_intr(sc);
1596 else
1597 sc_del_credit_return_intr(sc);
1598 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
1599 if (needint)
1600 sc_return_credits(sc);
1601 }
1602
1603 /**
1604 * sc_piobufavail - callback when a PIO buffer is available
1605 * @sc: the send context
1606 *
1607 * This is called from the interrupt handler when a PIO buffer is
1608 * available after hfi1_verbs_send() returned an error that no buffers were
1609 * available. Disable the interrupt if there are no more QPs waiting.
1610 */
sc_piobufavail(struct send_context * sc)1611 static void sc_piobufavail(struct send_context *sc)
1612 {
1613 struct hfi1_devdata *dd = sc->dd;
1614 struct list_head *list;
1615 struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
1616 struct rvt_qp *qp;
1617 struct hfi1_qp_priv *priv;
1618 unsigned long flags;
1619 uint i, n = 0, top_idx = 0;
1620
1621 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
1622 dd->send_contexts[sc->sw_index].type != SC_VL15)
1623 return;
1624 list = &sc->piowait;
1625 /*
1626 * Note: checking that the piowait list is empty and clearing
1627 * the buffer available interrupt needs to be atomic or we
1628 * could end up with QPs on the wait list with the interrupt
1629 * disabled.
1630 */
1631 write_seqlock_irqsave(&sc->waitlock, flags);
1632 while (!list_empty(list)) {
1633 struct iowait *wait;
1634
1635 if (n == ARRAY_SIZE(qps))
1636 break;
1637 wait = list_first_entry(list, struct iowait, list);
1638 iowait_get_priority(wait);
1639 qp = iowait_to_qp(wait);
1640 priv = qp->priv;
1641 list_del_init(&priv->s_iowait.list);
1642 priv->s_iowait.lock = NULL;
1643 if (n) {
1644 priv = qps[top_idx]->priv;
1645 top_idx = iowait_priority_update_top(wait,
1646 &priv->s_iowait,
1647 n, top_idx);
1648 }
1649
1650 /* refcount held until actual wake up */
1651 qps[n++] = qp;
1652 }
1653 /*
1654 * If there had been waiters and there are more
1655 * insure that we redo the force to avoid a potential hang.
1656 */
1657 if (n) {
1658 hfi1_sc_wantpiobuf_intr(sc, 0);
1659 if (!list_empty(list))
1660 hfi1_sc_wantpiobuf_intr(sc, 1);
1661 }
1662 write_sequnlock_irqrestore(&sc->waitlock, flags);
1663
1664 /* Wake up the top-priority one first */
1665 if (n)
1666 hfi1_qp_wakeup(qps[top_idx],
1667 RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
1668 for (i = 0; i < n; i++)
1669 if (i != top_idx)
1670 hfi1_qp_wakeup(qps[i],
1671 RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
1672 }
1673
1674 /* translate a send credit update to a bit code of reasons */
fill_code(u64 hw_free)1675 static inline int fill_code(u64 hw_free)
1676 {
1677 int code = 0;
1678
1679 if (hw_free & CR_STATUS_SMASK)
1680 code |= PRC_STATUS_ERR;
1681 if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
1682 code |= PRC_PBC;
1683 if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
1684 code |= PRC_THRESHOLD;
1685 if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
1686 code |= PRC_FILL_ERR;
1687 if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
1688 code |= PRC_SC_DISABLE;
1689 return code;
1690 }
1691
1692 /* use the jiffies compare to get the wrap right */
1693 #define sent_before(a, b) time_before(a, b) /* a < b */
1694
1695 /*
1696 * The send context buffer "releaser".
1697 */
sc_release_update(struct send_context * sc)1698 void sc_release_update(struct send_context *sc)
1699 {
1700 struct pio_buf *pbuf;
1701 u64 hw_free;
1702 u32 head, tail;
1703 unsigned long old_free;
1704 unsigned long free;
1705 unsigned long extra;
1706 unsigned long flags;
1707 int code;
1708
1709 if (!sc)
1710 return;
1711
1712 spin_lock_irqsave(&sc->release_lock, flags);
1713 /* update free */
1714 hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */
1715 old_free = sc->free;
1716 extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
1717 - (old_free & CR_COUNTER_MASK))
1718 & CR_COUNTER_MASK;
1719 free = old_free + extra;
1720 trace_hfi1_piofree(sc, extra);
1721
1722 /* call sent buffer callbacks */
1723 code = -1; /* code not yet set */
1724 head = READ_ONCE(sc->sr_head); /* snapshot the head */
1725 tail = sc->sr_tail;
1726 while (head != tail) {
1727 pbuf = &sc->sr[tail].pbuf;
1728
1729 if (sent_before(free, pbuf->sent_at)) {
1730 /* not sent yet */
1731 break;
1732 }
1733 if (pbuf->cb) {
1734 if (code < 0) /* fill in code on first user */
1735 code = fill_code(hw_free);
1736 (*pbuf->cb)(pbuf->arg, code);
1737 }
1738
1739 tail++;
1740 if (tail >= sc->sr_size)
1741 tail = 0;
1742 }
1743 sc->sr_tail = tail;
1744 /* make sure tail is updated before free */
1745 smp_wmb();
1746 sc->free = free;
1747 spin_unlock_irqrestore(&sc->release_lock, flags);
1748 sc_piobufavail(sc);
1749 }
1750
1751 /*
1752 * Send context group releaser. Argument is the send context that caused
1753 * the interrupt. Called from the send context interrupt handler.
1754 *
1755 * Call release on all contexts in the group.
1756 *
1757 * This routine takes the sc_lock without an irqsave because it is only
1758 * called from an interrupt handler. Adjust if that changes.
1759 */
sc_group_release_update(struct hfi1_devdata * dd,u32 hw_context)1760 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
1761 {
1762 struct send_context *sc;
1763 u32 sw_index;
1764 u32 gc, gc_end;
1765
1766 spin_lock(&dd->sc_lock);
1767 sw_index = dd->hw_to_sw[hw_context];
1768 if (unlikely(sw_index >= dd->num_send_contexts)) {
1769 dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
1770 __func__, hw_context, sw_index);
1771 goto done;
1772 }
1773 sc = dd->send_contexts[sw_index].sc;
1774 if (unlikely(!sc))
1775 goto done;
1776
1777 gc = group_context(hw_context, sc->group);
1778 gc_end = gc + group_size(sc->group);
1779 for (; gc < gc_end; gc++) {
1780 sw_index = dd->hw_to_sw[gc];
1781 if (unlikely(sw_index >= dd->num_send_contexts)) {
1782 dd_dev_err(dd,
1783 "%s: invalid hw (%u) to sw (%u) mapping\n",
1784 __func__, hw_context, sw_index);
1785 continue;
1786 }
1787 sc_release_update(dd->send_contexts[sw_index].sc);
1788 }
1789 done:
1790 spin_unlock(&dd->sc_lock);
1791 }
1792
1793 /*
1794 * pio_select_send_context_vl() - select send context
1795 * @dd: devdata
1796 * @selector: a spreading factor
1797 * @vl: this vl
1798 *
1799 * This function returns a send context based on the selector and a vl.
1800 * The mapping fields are protected by RCU
1801 */
pio_select_send_context_vl(struct hfi1_devdata * dd,u32 selector,u8 vl)1802 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
1803 u32 selector, u8 vl)
1804 {
1805 struct pio_vl_map *m;
1806 struct pio_map_elem *e;
1807 struct send_context *rval;
1808
1809 /*
1810 * NOTE This should only happen if SC->VL changed after the initial
1811 * checks on the QP/AH
1812 * Default will return VL0's send context below
1813 */
1814 if (unlikely(vl >= num_vls)) {
1815 rval = NULL;
1816 goto done;
1817 }
1818
1819 rcu_read_lock();
1820 m = rcu_dereference(dd->pio_map);
1821 if (unlikely(!m)) {
1822 rcu_read_unlock();
1823 return dd->vld[0].sc;
1824 }
1825 e = m->map[vl & m->mask];
1826 rval = e->ksc[selector & e->mask];
1827 rcu_read_unlock();
1828
1829 done:
1830 rval = !rval ? dd->vld[0].sc : rval;
1831 return rval;
1832 }
1833
1834 /*
1835 * pio_select_send_context_sc() - select send context
1836 * @dd: devdata
1837 * @selector: a spreading factor
1838 * @sc5: the 5 bit sc
1839 *
1840 * This function returns an send context based on the selector and an sc
1841 */
pio_select_send_context_sc(struct hfi1_devdata * dd,u32 selector,u8 sc5)1842 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
1843 u32 selector, u8 sc5)
1844 {
1845 u8 vl = sc_to_vlt(dd, sc5);
1846
1847 return pio_select_send_context_vl(dd, selector, vl);
1848 }
1849
1850 /*
1851 * Free the indicated map struct
1852 */
pio_map_free(struct pio_vl_map * m)1853 static void pio_map_free(struct pio_vl_map *m)
1854 {
1855 int i;
1856
1857 for (i = 0; m && i < m->actual_vls; i++)
1858 kfree(m->map[i]);
1859 kfree(m);
1860 }
1861
1862 /*
1863 * Handle RCU callback
1864 */
pio_map_rcu_callback(struct rcu_head * list)1865 static void pio_map_rcu_callback(struct rcu_head *list)
1866 {
1867 struct pio_vl_map *m = container_of(list, struct pio_vl_map, list);
1868
1869 pio_map_free(m);
1870 }
1871
1872 /*
1873 * Set credit return threshold for the kernel send context
1874 */
set_threshold(struct hfi1_devdata * dd,int scontext,int i)1875 static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
1876 {
1877 u32 thres;
1878
1879 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
1880 50),
1881 sc_mtu_to_threshold(dd->kernel_send_context[scontext],
1882 dd->vld[i].mtu,
1883 dd->rcd[0]->rcvhdrqentsize));
1884 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
1885 }
1886
1887 /*
1888 * pio_map_init - called when #vls change
1889 * @dd: hfi1_devdata
1890 * @port: port number
1891 * @num_vls: number of vls
1892 * @vl_scontexts: per vl send context mapping (optional)
1893 *
1894 * This routine changes the mapping based on the number of vls.
1895 *
1896 * vl_scontexts is used to specify a non-uniform vl/send context
1897 * loading. NULL implies auto computing the loading and giving each
1898 * VL an uniform distribution of send contexts per VL.
1899 *
1900 * The auto algorithm computers the sc_per_vl and the number of extra
1901 * send contexts. Any extra send contexts are added from the last VL
1902 * on down
1903 *
1904 * rcu locking is used here to control access to the mapping fields.
1905 *
1906 * If either the num_vls or num_send_contexts are non-power of 2, the
1907 * array sizes in the struct pio_vl_map and the struct pio_map_elem are
1908 * rounded up to the next highest power of 2 and the first entry is
1909 * reused in a round robin fashion.
1910 *
1911 * If an error occurs the map change is not done and the mapping is not
1912 * chaged.
1913 *
1914 */
pio_map_init(struct hfi1_devdata * dd,u8 port,u8 num_vls,u8 * vl_scontexts)1915 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
1916 {
1917 int i, j;
1918 int extra, sc_per_vl;
1919 int scontext = 1;
1920 int num_kernel_send_contexts = 0;
1921 u8 lvl_scontexts[OPA_MAX_VLS];
1922 struct pio_vl_map *oldmap, *newmap;
1923
1924 if (!vl_scontexts) {
1925 for (i = 0; i < dd->num_send_contexts; i++)
1926 if (dd->send_contexts[i].type == SC_KERNEL)
1927 num_kernel_send_contexts++;
1928 /* truncate divide */
1929 sc_per_vl = num_kernel_send_contexts / num_vls;
1930 /* extras */
1931 extra = num_kernel_send_contexts % num_vls;
1932 vl_scontexts = lvl_scontexts;
1933 /* add extras from last vl down */
1934 for (i = num_vls - 1; i >= 0; i--, extra--)
1935 vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
1936 }
1937 /* build new map */
1938 newmap = kzalloc(sizeof(*newmap) +
1939 roundup_pow_of_two(num_vls) *
1940 sizeof(struct pio_map_elem *),
1941 GFP_KERNEL);
1942 if (!newmap)
1943 goto bail;
1944 newmap->actual_vls = num_vls;
1945 newmap->vls = roundup_pow_of_two(num_vls);
1946 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1947 for (i = 0; i < newmap->vls; i++) {
1948 /* save for wrap around */
1949 int first_scontext = scontext;
1950
1951 if (i < newmap->actual_vls) {
1952 int sz = roundup_pow_of_two(vl_scontexts[i]);
1953
1954 /* only allocate once */
1955 newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
1956 sz * sizeof(struct
1957 send_context *),
1958 GFP_KERNEL);
1959 if (!newmap->map[i])
1960 goto bail;
1961 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1962 /*
1963 * assign send contexts and
1964 * adjust credit return threshold
1965 */
1966 for (j = 0; j < sz; j++) {
1967 if (dd->kernel_send_context[scontext]) {
1968 newmap->map[i]->ksc[j] =
1969 dd->kernel_send_context[scontext];
1970 set_threshold(dd, scontext, i);
1971 }
1972 if (++scontext >= first_scontext +
1973 vl_scontexts[i])
1974 /* wrap back to first send context */
1975 scontext = first_scontext;
1976 }
1977 } else {
1978 /* just re-use entry without allocating */
1979 newmap->map[i] = newmap->map[i % num_vls];
1980 }
1981 scontext = first_scontext + vl_scontexts[i];
1982 }
1983 /* newmap in hand, save old map */
1984 spin_lock_irq(&dd->pio_map_lock);
1985 oldmap = rcu_dereference_protected(dd->pio_map,
1986 lockdep_is_held(&dd->pio_map_lock));
1987
1988 /* publish newmap */
1989 rcu_assign_pointer(dd->pio_map, newmap);
1990
1991 spin_unlock_irq(&dd->pio_map_lock);
1992 /* success, free any old map after grace period */
1993 if (oldmap)
1994 call_rcu(&oldmap->list, pio_map_rcu_callback);
1995 return 0;
1996 bail:
1997 /* free any partial allocation */
1998 pio_map_free(newmap);
1999 return -ENOMEM;
2000 }
2001
free_pio_map(struct hfi1_devdata * dd)2002 void free_pio_map(struct hfi1_devdata *dd)
2003 {
2004 /* Free PIO map if allocated */
2005 if (rcu_access_pointer(dd->pio_map)) {
2006 spin_lock_irq(&dd->pio_map_lock);
2007 pio_map_free(rcu_access_pointer(dd->pio_map));
2008 RCU_INIT_POINTER(dd->pio_map, NULL);
2009 spin_unlock_irq(&dd->pio_map_lock);
2010 synchronize_rcu();
2011 }
2012 kfree(dd->kernel_send_context);
2013 dd->kernel_send_context = NULL;
2014 }
2015
init_pervl_scs(struct hfi1_devdata * dd)2016 int init_pervl_scs(struct hfi1_devdata *dd)
2017 {
2018 int i;
2019 u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */
2020 u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */
2021 u32 ctxt;
2022 struct hfi1_pportdata *ppd = dd->pport;
2023
2024 dd->vld[15].sc = sc_alloc(dd, SC_VL15,
2025 dd->rcd[0]->rcvhdrqentsize, dd->node);
2026 if (!dd->vld[15].sc)
2027 return -ENOMEM;
2028
2029 hfi1_init_ctxt(dd->vld[15].sc);
2030 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
2031
2032 dd->kernel_send_context = kcalloc_node(dd->num_send_contexts,
2033 sizeof(struct send_context *),
2034 GFP_KERNEL, dd->node);
2035 if (!dd->kernel_send_context)
2036 goto freesc15;
2037
2038 dd->kernel_send_context[0] = dd->vld[15].sc;
2039
2040 for (i = 0; i < num_vls; i++) {
2041 /*
2042 * Since this function does not deal with a specific
2043 * receive context but we need the RcvHdrQ entry size,
2044 * use the size from rcd[0]. It is guaranteed to be
2045 * valid at this point and will remain the same for all
2046 * receive contexts.
2047 */
2048 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
2049 dd->rcd[0]->rcvhdrqentsize, dd->node);
2050 if (!dd->vld[i].sc)
2051 goto nomem;
2052 dd->kernel_send_context[i + 1] = dd->vld[i].sc;
2053 hfi1_init_ctxt(dd->vld[i].sc);
2054 /* non VL15 start with the max MTU */
2055 dd->vld[i].mtu = hfi1_max_mtu;
2056 }
2057 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
2058 dd->kernel_send_context[i + 1] =
2059 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
2060 if (!dd->kernel_send_context[i + 1])
2061 goto nomem;
2062 hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
2063 }
2064
2065 sc_enable(dd->vld[15].sc);
2066 ctxt = dd->vld[15].sc->hw_context;
2067 mask = all_vl_mask & ~(1LL << 15);
2068 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2069 dd_dev_info(dd,
2070 "Using send context %u(%u) for VL15\n",
2071 dd->vld[15].sc->sw_index, ctxt);
2072
2073 for (i = 0; i < num_vls; i++) {
2074 sc_enable(dd->vld[i].sc);
2075 ctxt = dd->vld[i].sc->hw_context;
2076 mask = all_vl_mask & ~(data_vls_mask);
2077 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2078 }
2079 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
2080 sc_enable(dd->kernel_send_context[i + 1]);
2081 ctxt = dd->kernel_send_context[i + 1]->hw_context;
2082 mask = all_vl_mask & ~(data_vls_mask);
2083 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
2084 }
2085
2086 if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
2087 goto nomem;
2088 return 0;
2089
2090 nomem:
2091 for (i = 0; i < num_vls; i++) {
2092 sc_free(dd->vld[i].sc);
2093 dd->vld[i].sc = NULL;
2094 }
2095
2096 for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
2097 sc_free(dd->kernel_send_context[i + 1]);
2098
2099 kfree(dd->kernel_send_context);
2100 dd->kernel_send_context = NULL;
2101
2102 freesc15:
2103 sc_free(dd->vld[15].sc);
2104 return -ENOMEM;
2105 }
2106
init_credit_return(struct hfi1_devdata * dd)2107 int init_credit_return(struct hfi1_devdata *dd)
2108 {
2109 int ret;
2110 int i;
2111
2112 dd->cr_base = kcalloc(
2113 node_affinity.num_possible_nodes,
2114 sizeof(struct credit_return_base),
2115 GFP_KERNEL);
2116 if (!dd->cr_base) {
2117 ret = -ENOMEM;
2118 goto done;
2119 }
2120 for_each_node_with_cpus(i) {
2121 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
2122
2123 set_dev_node(&dd->pcidev->dev, i);
2124 dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
2125 bytes,
2126 &dd->cr_base[i].dma,
2127 GFP_KERNEL);
2128 if (!dd->cr_base[i].va) {
2129 set_dev_node(&dd->pcidev->dev, dd->node);
2130 dd_dev_err(dd,
2131 "Unable to allocate credit return DMA range for NUMA %d\n",
2132 i);
2133 ret = -ENOMEM;
2134 goto done;
2135 }
2136 }
2137 set_dev_node(&dd->pcidev->dev, dd->node);
2138
2139 ret = 0;
2140 done:
2141 return ret;
2142 }
2143
free_credit_return(struct hfi1_devdata * dd)2144 void free_credit_return(struct hfi1_devdata *dd)
2145 {
2146 int i;
2147
2148 if (!dd->cr_base)
2149 return;
2150 for (i = 0; i < node_affinity.num_possible_nodes; i++) {
2151 if (dd->cr_base[i].va) {
2152 dma_free_coherent(&dd->pcidev->dev,
2153 TXE_NUM_CONTEXTS *
2154 sizeof(struct credit_return),
2155 dd->cr_base[i].va,
2156 dd->cr_base[i].dma);
2157 }
2158 }
2159 kfree(dd->cr_base);
2160 dd->cr_base = NULL;
2161 }
2162
seqfile_dump_sci(struct seq_file * s,u32 i,struct send_context_info * sci)2163 void seqfile_dump_sci(struct seq_file *s, u32 i,
2164 struct send_context_info *sci)
2165 {
2166 struct send_context *sc = sci->sc;
2167 u64 reg;
2168
2169 seq_printf(s, "SCI %u: type %u base %u credits %u\n",
2170 i, sci->type, sci->base, sci->credits);
2171 seq_printf(s, " flags 0x%x sw_inx %u hw_ctxt %u grp %u\n",
2172 sc->flags, sc->sw_index, sc->hw_context, sc->group);
2173 seq_printf(s, " sr_size %u credits %u sr_head %u sr_tail %u\n",
2174 sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail);
2175 seq_printf(s, " fill %lu free %lu fill_wrap %u alloc_free %lu\n",
2176 sc->fill, sc->free, sc->fill_wrap, sc->alloc_free);
2177 seq_printf(s, " credit_intr_count %u credit_ctrl 0x%llx\n",
2178 sc->credit_intr_count, sc->credit_ctrl);
2179 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS));
2180 seq_printf(s, " *hw_free %llu CurrentFree %llu LastReturned %llu\n",
2181 (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >>
2182 CR_COUNTER_SHIFT,
2183 (reg >> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT)) &
2184 SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK),
2185 reg & SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK));
2186 }
2187