• Home
  • Raw
  • Download

Lines Matching +full:precondition +full:- +full:timeout

2  * Copyright(c) 2015-2018 Intel Corporation.
24 * - Redistributions of source code must retain the above copyright
26 * - Redistributions in binary form must reproduce the above copyright
30 * - Neither the name of Intel Corporation nor the names of its
80 int flush = 0; /* re-read sendctrl to make sure it is flushed */ in pio_send_control()
83 spin_lock_irqsave(&dd->sendctrl_lock, flags); in pio_send_control()
92 for (i = 0; i < ARRAY_SIZE(dd->vld); i++) in pio_send_control()
93 if (!dd->vld[i].mtu) in pio_send_control()
128 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in pio_send_control()
135 #define SCS_POOL_0 -1
136 #define SCS_POOL_1 -2
139 #define SCC_PER_VL -1
140 #define SCC_PER_CPU -2
141 #define SCC_PER_KRCVQ -3
173 { 10000, -1 }, /* pool 0 */
174 { 0, -1 }, /* pool 1 */
180 * 100th of 1% of memory to use, -1 if blocks
190 * start at -1 and increase negatively. Map them as:
191 * -1 => 0
192 * -2 => 1
195 * Return -1 on non-wildcard input, otherwise convert to a pool number.
200 return -1; /* non-wildcard */ in wildcard_to_pool()
201 return -wc - 1; in wildcard_to_pool()
226 int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1; in init_sc_pools_and_sizes()
239 * setting it to a fixed size. The allocation allows 3-deep buffering in init_sc_pools_and_sizes()
254 * - copy the centipercents/absolute sizes from the pool config in init_sc_pools_and_sizes()
255 * - sanity check these values in init_sc_pools_and_sizes()
256 * - add up centipercents, then later check for full value in init_sc_pools_and_sizes()
257 * - add up absolute blocks, then later check for over-commit in init_sc_pools_and_sizes()
278 return -EINVAL; in init_sc_pools_and_sizes()
290 return -EINVAL; in init_sc_pools_and_sizes()
299 return -EINVAL; in init_sc_pools_and_sizes()
308 return -EINVAL; in init_sc_pools_and_sizes()
313 * - copy from the context size config in init_sc_pools_and_sizes()
314 * - replace context type wildcard counts with real values in init_sc_pools_and_sizes()
315 * - add up non-memory pool block sizes in init_sc_pools_and_sizes()
316 * - add up memory pool user counts in init_sc_pools_and_sizes()
331 count = dd->n_krcv_queues; in init_sc_pools_and_sizes()
335 count = dd->num_rcv_contexts - dd->n_krcv_queues; in init_sc_pools_and_sizes()
341 return -EINVAL; in init_sc_pools_and_sizes()
344 count = chip_send_contexts(dd) - total_contexts; in init_sc_pools_and_sizes()
350 * number or -1 if a fixed (non-negative) value. The fixed in init_sc_pools_and_sizes()
355 if (pool == -1) { /* non-wildcard */ in init_sc_pools_and_sizes()
364 return -EINVAL; in init_sc_pools_and_sizes()
367 dd->sc_sizes[i].count = count; in init_sc_pools_and_sizes()
368 dd->sc_sizes[i].size = size; in init_sc_pools_and_sizes()
375 return -EINVAL; in init_sc_pools_and_sizes()
379 pool_blocks = total_blocks - fixed_blocks; in init_sc_pools_and_sizes()
385 return -EINVAL; in init_sc_pools_and_sizes()
388 pool_blocks -= ab_total; in init_sc_pools_and_sizes()
394 if (pi->centipercent >= 0) in init_sc_pools_and_sizes()
395 pi->blocks = (pool_blocks * pi->centipercent) / 10000; in init_sc_pools_and_sizes()
397 if (pi->blocks == 0 && pi->count != 0) { in init_sc_pools_and_sizes()
401 i, pi->count); in init_sc_pools_and_sizes()
402 return -EINVAL; in init_sc_pools_and_sizes()
404 if (pi->count == 0) { in init_sc_pools_and_sizes()
406 if (pi->blocks != 0) in init_sc_pools_and_sizes()
410 i, pi->blocks); in init_sc_pools_and_sizes()
411 pi->size = 0; in init_sc_pools_and_sizes()
413 pi->size = pi->blocks / pi->count; in init_sc_pools_and_sizes()
420 if (dd->sc_sizes[i].size < 0) { in init_sc_pools_and_sizes()
421 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size); in init_sc_pools_and_sizes()
424 dd->sc_sizes[i].size = mem_pool_info[pool].size; in init_sc_pools_and_sizes()
428 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS) in init_sc_pools_and_sizes()
429 dd->sc_sizes[i].size = PIO_MAX_BLOCKS; in init_sc_pools_and_sizes()
432 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count; in init_sc_pools_and_sizes()
434 extra = total_blocks - used_blocks; in init_sc_pools_and_sizes()
450 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), in init_send_contexts()
452 dd->send_contexts = kcalloc(dd->num_send_contexts, in init_send_contexts()
455 if (!dd->send_contexts || !dd->hw_to_sw) { in init_send_contexts()
456 kfree(dd->hw_to_sw); in init_send_contexts()
457 kfree(dd->send_contexts); in init_send_contexts()
459 return -ENOMEM; in init_send_contexts()
464 dd->hw_to_sw[i] = INVALID_SCI; in init_send_contexts()
473 struct sc_config_sizes *scs = &dd->sc_sizes[i]; in init_send_contexts()
475 for (j = 0; j < scs->count; j++) { in init_send_contexts()
477 &dd->send_contexts[context]; in init_send_contexts()
478 sci->type = i; in init_send_contexts()
479 sci->base = base; in init_send_contexts()
480 sci->credits = scs->size; in init_send_contexts()
483 base += scs->size; in init_send_contexts()
493 * Must be called with dd->sc_lock held.
502 for (index = 0, sci = &dd->send_contexts[0]; in sc_hw_alloc()
503 index < dd->num_send_contexts; index++, sci++) { in sc_hw_alloc()
504 if (sci->type == type && sci->allocated == 0) { in sc_hw_alloc()
505 sci->allocated = 1; in sc_hw_alloc()
506 /* use a 1:1 mapping, but make them non-equal */ in sc_hw_alloc()
507 context = chip_send_contexts(dd) - index - 1; in sc_hw_alloc()
508 dd->hw_to_sw[context] = index; in sc_hw_alloc()
515 return -ENOSPC; in sc_hw_alloc()
521 * Must be called with dd->sc_lock held.
527 sci = &dd->send_contexts[sw_index]; in sc_hw_free()
528 if (!sci->allocated) { in sc_hw_free()
532 sci->allocated = 0; in sc_hw_free()
533 dd->hw_to_sw[hw_context] = INVALID_SCI; in sc_hw_free()
563 u32 gc = group_context(sc->hw_context, sc->group); in cr_group_addresses()
564 u32 index = sc->hw_context & 0x7; in cr_group_addresses()
566 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; in cr_group_addresses()
568 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; in cr_group_addresses()
603 if (sc->credits <= release_credits) in sc_mtu_to_threshold()
606 threshold = sc->credits - release_credits; in sc_mtu_to_threshold()
620 return (sc->credits * percent) / 100; in sc_percent_to_threshold()
632 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_set_cr_threshold()
634 old_threshold = (sc->credit_ctrl >> in sc_set_cr_threshold()
639 sc->credit_ctrl = in sc_set_cr_threshold()
640 (sc->credit_ctrl in sc_set_cr_threshold()
645 write_kctxt_csr(sc->dd, sc->hw_context, in sc_set_cr_threshold()
646 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_set_cr_threshold()
652 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_set_cr_threshold()
665 struct hfi1_devdata *dd = sc->dd; in set_pio_integrity()
666 u32 hw_context = sc->hw_context; in set_pio_integrity()
667 int type = sc->type; in set_pio_integrity()
680 ret += *per_cpu_ptr(sc->buffers_allocated, cpu); in get_buffers_allocated()
689 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0; in reset_buffers_allocated()
711 if (dd->flags & HFI1_FROZEN) in sc_alloc()
718 sc->buffers_allocated = alloc_percpu(u32); in sc_alloc()
719 if (!sc->buffers_allocated) { in sc_alloc()
727 spin_lock_irqsave(&dd->sc_lock, flags); in sc_alloc()
730 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_alloc()
731 free_percpu(sc->buffers_allocated); in sc_alloc()
736 sci = &dd->send_contexts[sw_index]; in sc_alloc()
737 sci->sc = sc; in sc_alloc()
739 sc->dd = dd; in sc_alloc()
740 sc->node = numa; in sc_alloc()
741 sc->type = type; in sc_alloc()
742 spin_lock_init(&sc->alloc_lock); in sc_alloc()
743 spin_lock_init(&sc->release_lock); in sc_alloc()
744 spin_lock_init(&sc->credit_ctrl_lock); in sc_alloc()
745 seqlock_init(&sc->waitlock); in sc_alloc()
746 INIT_LIST_HEAD(&sc->piowait); in sc_alloc()
747 INIT_WORK(&sc->halt_work, sc_halted); in sc_alloc()
748 init_waitqueue_head(&sc->halt_wait); in sc_alloc()
751 sc->group = 0; in sc_alloc()
753 sc->sw_index = sw_index; in sc_alloc()
754 sc->hw_context = hw_context; in sc_alloc()
756 sc->credits = sci->credits; in sc_alloc()
757 sc->size = sc->credits * PIO_BLOCK_SIZE; in sc_alloc()
762 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) in sc_alloc()
766 reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK)) in sc_alloc()
768 | ((sci->base & SC(CTRL_CTXT_BASE_MASK)) in sc_alloc()
775 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1); in sc_alloc()
806 * sanitized on driver start-up. in sc_alloc()
809 * work for both the 3-deep buffering allocation and the in sc_alloc()
829 /* set up write-through credit_ctrl */ in sc_alloc()
830 sc->credit_ctrl = reg; in sc_alloc()
839 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_alloc()
854 sc->sr_size = sci->credits + 1; in sc_alloc()
855 sc->sr = kcalloc_node(sc->sr_size, in sc_alloc()
858 if (!sc->sr) { in sc_alloc()
869 sc->group, in sc_alloc()
870 sc->credits, in sc_alloc()
871 sc->credit_ctrl, in sc_alloc()
877 /* free a per-NUMA send context structure */
888 sc->flags |= SCF_IN_FREE; /* ensure no restarts */ in sc_free()
889 dd = sc->dd; in sc_free()
890 if (!list_empty(&sc->piowait)) in sc_free()
892 sw_index = sc->sw_index; in sc_free()
893 hw_context = sc->hw_context; in sc_free()
895 flush_work(&sc->halt_work); in sc_free()
897 spin_lock_irqsave(&dd->sc_lock, flags); in sc_free()
898 dd->send_contexts[sw_index].sc = NULL; in sc_free()
909 /* release the index and context for re-use */ in sc_free()
911 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_free()
913 kfree(sc->sr); in sc_free()
914 free_percpu(sc->buffers_allocated); in sc_free()
929 spin_lock_irq(&sc->alloc_lock); in sc_disable()
930 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); in sc_disable()
932 sc->flags &= ~SCF_ENABLED; in sc_disable()
934 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); in sc_disable()
939 * could be one in-process when the context is disabled). in sc_disable()
944 spin_lock(&sc->release_lock); in sc_disable()
945 if (sc->sr) { /* this context has a shadow ring */ in sc_disable()
946 while (sc->sr_tail != sc->sr_head) { in sc_disable()
947 pbuf = &sc->sr[sc->sr_tail].pbuf; in sc_disable()
948 if (pbuf->cb) in sc_disable()
949 (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE); in sc_disable()
950 sc->sr_tail++; in sc_disable()
951 if (sc->sr_tail >= sc->sr_size) in sc_disable()
952 sc->sr_tail = 0; in sc_disable()
955 spin_unlock(&sc->release_lock); in sc_disable()
957 write_seqlock(&sc->waitlock); in sc_disable()
958 list_splice_init(&sc->piowait, &wake_list); in sc_disable()
959 write_sequnlock(&sc->waitlock); in sc_disable()
967 priv = qp->priv; in sc_disable()
968 list_del_init(&priv->s_iowait.list); in sc_disable()
969 priv->s_iowait.lock = NULL; in sc_disable()
973 spin_unlock_irq(&sc->alloc_lock); in sc_disable()
1013 struct hfi1_devdata *dd = sc->dd; in sc_wait_for_packet_egress()
1020 reg = read_csr(dd, sc->hw_context * 8 + in sc_wait_for_packet_egress()
1023 if (sc->flags & SCF_HALTED || in sc_wait_for_packet_egress()
1024 is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) in sc_wait_for_packet_egress()
1033 /* timed out - bounce the link */ in sc_wait_for_packet_egress()
1035 … "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", in sc_wait_for_packet_egress()
1036 __func__, sc->sw_index, in sc_wait_for_packet_egress()
1037 sc->hw_context, (u32)reg); in sc_wait_for_packet_egress()
1038 queue_work(dd->pport->link_wq, in sc_wait_for_packet_egress()
1039 &dd->pport->link_bounce_work); in sc_wait_for_packet_egress()
1055 for (i = 0; i < dd->num_send_contexts; i++) { in sc_wait()
1056 struct send_context *sc = dd->send_contexts[i].sc; in sc_wait()
1067 * If the first step fails - wait for the halt to be asserted, return early.
1075 struct hfi1_devdata *dd = sc->dd; in sc_restart()
1081 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE)) in sc_restart()
1082 return -EINVAL; in sc_restart()
1084 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, in sc_restart()
1085 sc->hw_context); in sc_restart()
1095 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); in sc_restart()
1100 __func__, sc->sw_index, sc->hw_context); in sc_restart()
1101 return -ETIME; in sc_restart()
1117 if (sc->type != SC_USER) { in sc_restart()
1126 "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n", in sc_restart()
1127 __func__, sc->sw_index, in sc_restart()
1128 sc->hw_context, count); in sc_restart()
1149 * This enable will clear the halted flag and per-send context in sc_restart()
1165 for (i = 0; i < dd->num_send_contexts; i++) { in pio_freeze()
1166 sc = dd->send_contexts[i].sc; in pio_freeze()
1172 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) in pio_freeze()
1181 * Unfreeze PIO for kernel send contexts. The precondition for calling this
1183 * been cleared. Now perform the last step and re-enable each kernel context.
1192 for (i = 0; i < dd->num_send_contexts; i++) { in pio_kernel_unfreeze()
1193 sc = dd->send_contexts[i].sc; in pio_kernel_unfreeze()
1194 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) in pio_kernel_unfreeze()
1196 if (sc->flags & SCF_LINK_DOWN) in pio_kernel_unfreeze()
1204 * pio_kernel_linkup() - Re-enable send contexts after linkup event
1208 * event is different from a freeze because if the send context is re-enabled
1220 for (i = 0; i < dd->num_send_contexts; i++) { in pio_kernel_linkup()
1221 sc = dd->send_contexts[i].sc; in pio_kernel_linkup()
1222 if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER) in pio_kernel_linkup()
1232 * -ETIMEDOUT - if we wait too long
1233 * -EIO - if there was an error
1241 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5; in pio_init_wait_progress()
1247 return -ETIMEDOUT; in pio_init_wait_progress()
1252 return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0; in pio_init_wait_progress()
1256 * Reset all of the send contexts to their power-on state. Used
1257 * only during manual init - no lock against sc_enable needed.
1265 /* ignore any timeout */ in pio_reset_all()
1266 if (ret == -EIO) { in pio_reset_all()
1280 ret == -ETIMEDOUT ? "is stuck" : "had an error"); in pio_reset_all()
1293 return -EINVAL; in sc_enable()
1294 dd = sc->dd; in sc_enable()
1303 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_enable()
1304 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1308 /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */ in sc_enable()
1310 *sc->hw_free = 0; in sc_enable()
1311 sc->free = 0; in sc_enable()
1312 sc->alloc_free = 0; in sc_enable()
1313 sc->fill = 0; in sc_enable()
1314 sc->fill_wrap = 0; in sc_enable()
1315 sc->sr_head = 0; in sc_enable()
1316 sc->sr_tail = 0; in sc_enable()
1317 sc->flags = 0; in sc_enable()
1322 * Clear all per-context errors. Some of these will be set when in sc_enable()
1323 * we are re-enabling after a context halt. Now that the context in sc_enable()
1327 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); in sc_enable()
1329 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); in sc_enable()
1335 spin_lock(&dd->sc_init_lock); in sc_enable()
1343 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) << in sc_enable()
1353 spin_unlock(&dd->sc_init_lock); in sc_enable()
1357 sc->sw_index, sc->hw_context, ret); in sc_enable()
1365 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); in sc_enable()
1370 read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1371 sc->flags |= SCF_ENABLED; in sc_enable()
1374 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_enable()
1385 /* a 0->1 transition schedules a credit return */ in sc_return_credits()
1386 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), in sc_return_credits()
1390 * scheduled. We care more about the 0 -> 1 transition. in sc_return_credits()
1392 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); in sc_return_credits()
1394 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); in sc_return_credits()
1397 /* allow all in-flight packets to drain on the context */
1412 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", in sc_drop()
1413 __func__, sc->sw_index, sc->hw_context); in sc_drop()
1418 * - mark the context as halted or frozen
1419 * - stop buffer allocations
1429 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_stop()
1431 sc->flags |= flag; in sc_stop()
1432 sc->flags &= ~SCF_ENABLED; in sc_stop()
1433 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_stop()
1434 wake_up(&sc->halt_wait); in sc_stop()
1444 * @len: length of whole packet - including PBC - in dwords
1448 * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
1462 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_buffer_alloc()
1463 if (!(sc->flags & SCF_ENABLED)) { in sc_buffer_alloc()
1464 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1465 return ERR_PTR(-ECOMM); in sc_buffer_alloc()
1469 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); in sc_buffer_alloc()
1473 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1477 sc->alloc_free = READ_ONCE(sc->free); in sc_buffer_alloc()
1479 (unsigned long)sc->credits - in sc_buffer_alloc()
1480 (sc->fill - sc->alloc_free); in sc_buffer_alloc()
1484 sc->alloc_free = READ_ONCE(sc->free); in sc_buffer_alloc()
1493 this_cpu_inc(*sc->buffers_allocated); in sc_buffer_alloc()
1496 head = sc->sr_head; in sc_buffer_alloc()
1499 sc->fill += blocks; in sc_buffer_alloc()
1500 fill_wrap = sc->fill_wrap; in sc_buffer_alloc()
1501 sc->fill_wrap += blocks; in sc_buffer_alloc()
1502 if (sc->fill_wrap >= sc->credits) in sc_buffer_alloc()
1503 sc->fill_wrap = sc->fill_wrap - sc->credits; in sc_buffer_alloc()
1512 pbuf = &sc->sr[head].pbuf; in sc_buffer_alloc()
1513 pbuf->sent_at = sc->fill; in sc_buffer_alloc()
1514 pbuf->cb = cb; in sc_buffer_alloc()
1515 pbuf->arg = arg; in sc_buffer_alloc()
1516 pbuf->sc = sc; /* could be filled in at sc->sr init time */ in sc_buffer_alloc()
1521 if (next >= sc->sr_size) in sc_buffer_alloc()
1524 * update the head - must be last! - the releaser can look at fields in sc_buffer_alloc()
1528 sc->sr_head = next; in sc_buffer_alloc()
1529 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1532 pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE; in sc_buffer_alloc()
1533 pbuf->end = sc->base_addr + sc->size; in sc_buffer_alloc()
1534 pbuf->qw_written = 0; in sc_buffer_alloc()
1535 pbuf->carry_bytes = 0; in sc_buffer_alloc()
1536 pbuf->carry.val64 = 0; in sc_buffer_alloc()
1557 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_add_credit_return_intr()
1558 if (sc->credit_intr_count == 0) { in sc_add_credit_return_intr()
1559 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK); in sc_add_credit_return_intr()
1560 write_kctxt_csr(sc->dd, sc->hw_context, in sc_add_credit_return_intr()
1561 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_add_credit_return_intr()
1563 sc->credit_intr_count++; in sc_add_credit_return_intr()
1564 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_add_credit_return_intr()
1575 WARN_ON(sc->credit_intr_count == 0); in sc_del_credit_return_intr()
1578 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_del_credit_return_intr()
1579 sc->credit_intr_count--; in sc_del_credit_return_intr()
1580 if (sc->credit_intr_count == 0) { in sc_del_credit_return_intr()
1581 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK); in sc_del_credit_return_intr()
1582 write_kctxt_csr(sc->dd, sc->hw_context, in sc_del_credit_return_intr()
1583 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_del_credit_return_intr()
1585 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_del_credit_return_intr()
1598 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl); in hfi1_sc_wantpiobuf_intr()
1604 * sc_piobufavail - callback when a PIO buffer is available
1613 struct hfi1_devdata *dd = sc->dd; in sc_piobufavail()
1621 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && in sc_piobufavail()
1622 dd->send_contexts[sc->sw_index].type != SC_VL15) in sc_piobufavail()
1624 list = &sc->piowait; in sc_piobufavail()
1631 write_seqlock_irqsave(&sc->waitlock, flags); in sc_piobufavail()
1640 priv = qp->priv; in sc_piobufavail()
1641 list_del_init(&priv->s_iowait.list); in sc_piobufavail()
1642 priv->s_iowait.lock = NULL; in sc_piobufavail()
1644 priv = qps[top_idx]->priv; in sc_piobufavail()
1646 &priv->s_iowait, in sc_piobufavail()
1662 write_sequnlock_irqrestore(&sc->waitlock, flags); in sc_piobufavail()
1664 /* Wake up the top-priority one first */ in sc_piobufavail()
1712 spin_lock_irqsave(&sc->release_lock, flags); in sc_release_update()
1714 hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */ in sc_release_update()
1715 old_free = sc->free; in sc_release_update()
1717 - (old_free & CR_COUNTER_MASK)) in sc_release_update()
1723 code = -1; /* code not yet set */ in sc_release_update()
1724 head = READ_ONCE(sc->sr_head); /* snapshot the head */ in sc_release_update()
1725 tail = sc->sr_tail; in sc_release_update()
1727 pbuf = &sc->sr[tail].pbuf; in sc_release_update()
1729 if (sent_before(free, pbuf->sent_at)) { in sc_release_update()
1733 if (pbuf->cb) { in sc_release_update()
1736 (*pbuf->cb)(pbuf->arg, code); in sc_release_update()
1740 if (tail >= sc->sr_size) in sc_release_update()
1743 sc->sr_tail = tail; in sc_release_update()
1746 sc->free = free; in sc_release_update()
1747 spin_unlock_irqrestore(&sc->release_lock, flags); in sc_release_update()
1766 spin_lock(&dd->sc_lock); in sc_group_release_update()
1767 sw_index = dd->hw_to_sw[hw_context]; in sc_group_release_update()
1768 if (unlikely(sw_index >= dd->num_send_contexts)) { in sc_group_release_update()
1773 sc = dd->send_contexts[sw_index].sc; in sc_group_release_update()
1777 gc = group_context(hw_context, sc->group); in sc_group_release_update()
1778 gc_end = gc + group_size(sc->group); in sc_group_release_update()
1780 sw_index = dd->hw_to_sw[gc]; in sc_group_release_update()
1781 if (unlikely(sw_index >= dd->num_send_contexts)) { in sc_group_release_update()
1787 sc_release_update(dd->send_contexts[sw_index].sc); in sc_group_release_update()
1790 spin_unlock(&dd->sc_lock); in sc_group_release_update()
1794 * pio_select_send_context_vl() - select send context
1810 * NOTE This should only happen if SC->VL changed after the initial in pio_select_send_context_vl()
1820 m = rcu_dereference(dd->pio_map); in pio_select_send_context_vl()
1823 return dd->vld[0].sc; in pio_select_send_context_vl()
1825 e = m->map[vl & m->mask]; in pio_select_send_context_vl()
1826 rval = e->ksc[selector & e->mask]; in pio_select_send_context_vl()
1830 rval = !rval ? dd->vld[0].sc : rval; in pio_select_send_context_vl()
1835 * pio_select_send_context_sc() - select send context
1857 for (i = 0; m && i < m->actual_vls; i++) in pio_map_free()
1858 kfree(m->map[i]); in pio_map_free()
1879 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], in set_threshold()
1881 sc_mtu_to_threshold(dd->kernel_send_context[scontext], in set_threshold()
1882 dd->vld[i].mtu, in set_threshold()
1883 dd->rcd[0]->rcvhdrqentsize)); in set_threshold()
1884 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); in set_threshold()
1888 * pio_map_init - called when #vls change
1896 * vl_scontexts is used to specify a non-uniform vl/send context
1906 * If either the num_vls or num_send_contexts are non-power of 2, the
1925 for (i = 0; i < dd->num_send_contexts; i++) in pio_map_init()
1926 if (dd->send_contexts[i].type == SC_KERNEL) in pio_map_init()
1934 for (i = num_vls - 1; i >= 0; i--, extra--) in pio_map_init()
1944 newmap->actual_vls = num_vls; in pio_map_init()
1945 newmap->vls = roundup_pow_of_two(num_vls); in pio_map_init()
1946 newmap->mask = (1 << ilog2(newmap->vls)) - 1; in pio_map_init()
1947 for (i = 0; i < newmap->vls; i++) { in pio_map_init()
1951 if (i < newmap->actual_vls) { in pio_map_init()
1955 newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) + in pio_map_init()
1959 if (!newmap->map[i]) in pio_map_init()
1961 newmap->map[i]->mask = (1 << ilog2(sz)) - 1; in pio_map_init()
1967 if (dd->kernel_send_context[scontext]) { in pio_map_init()
1968 newmap->map[i]->ksc[j] = in pio_map_init()
1969 dd->kernel_send_context[scontext]; in pio_map_init()
1978 /* just re-use entry without allocating */ in pio_map_init()
1979 newmap->map[i] = newmap->map[i % num_vls]; in pio_map_init()
1984 spin_lock_irq(&dd->pio_map_lock); in pio_map_init()
1985 oldmap = rcu_dereference_protected(dd->pio_map, in pio_map_init()
1986 lockdep_is_held(&dd->pio_map_lock)); in pio_map_init()
1989 rcu_assign_pointer(dd->pio_map, newmap); in pio_map_init()
1991 spin_unlock_irq(&dd->pio_map_lock); in pio_map_init()
1994 call_rcu(&oldmap->list, pio_map_rcu_callback); in pio_map_init()
1999 return -ENOMEM; in pio_map_init()
2005 if (rcu_access_pointer(dd->pio_map)) { in free_pio_map()
2006 spin_lock_irq(&dd->pio_map_lock); in free_pio_map()
2007 pio_map_free(rcu_access_pointer(dd->pio_map)); in free_pio_map()
2008 RCU_INIT_POINTER(dd->pio_map, NULL); in free_pio_map()
2009 spin_unlock_irq(&dd->pio_map_lock); in free_pio_map()
2012 kfree(dd->kernel_send_context); in free_pio_map()
2013 dd->kernel_send_context = NULL; in free_pio_map()
2019 u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */ in init_pervl_scs()
2020 u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */ in init_pervl_scs()
2022 struct hfi1_pportdata *ppd = dd->pport; in init_pervl_scs()
2024 dd->vld[15].sc = sc_alloc(dd, SC_VL15, in init_pervl_scs()
2025 dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2026 if (!dd->vld[15].sc) in init_pervl_scs()
2027 return -ENOMEM; in init_pervl_scs()
2029 hfi1_init_ctxt(dd->vld[15].sc); in init_pervl_scs()
2030 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); in init_pervl_scs()
2032 dd->kernel_send_context = kcalloc_node(dd->num_send_contexts, in init_pervl_scs()
2034 GFP_KERNEL, dd->node); in init_pervl_scs()
2035 if (!dd->kernel_send_context) in init_pervl_scs()
2038 dd->kernel_send_context[0] = dd->vld[15].sc; in init_pervl_scs()
2048 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, in init_pervl_scs()
2049 dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2050 if (!dd->vld[i].sc) in init_pervl_scs()
2052 dd->kernel_send_context[i + 1] = dd->vld[i].sc; in init_pervl_scs()
2053 hfi1_init_ctxt(dd->vld[i].sc); in init_pervl_scs()
2055 dd->vld[i].mtu = hfi1_max_mtu; in init_pervl_scs()
2058 dd->kernel_send_context[i + 1] = in init_pervl_scs()
2059 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2060 if (!dd->kernel_send_context[i + 1]) in init_pervl_scs()
2062 hfi1_init_ctxt(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2065 sc_enable(dd->vld[15].sc); in init_pervl_scs()
2066 ctxt = dd->vld[15].sc->hw_context; in init_pervl_scs()
2071 dd->vld[15].sc->sw_index, ctxt); in init_pervl_scs()
2074 sc_enable(dd->vld[i].sc); in init_pervl_scs()
2075 ctxt = dd->vld[i].sc->hw_context; in init_pervl_scs()
2080 sc_enable(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2081 ctxt = dd->kernel_send_context[i + 1]->hw_context; in init_pervl_scs()
2086 if (pio_map_init(dd, ppd->port - 1, num_vls, NULL)) in init_pervl_scs()
2092 sc_free(dd->vld[i].sc); in init_pervl_scs()
2093 dd->vld[i].sc = NULL; in init_pervl_scs()
2097 sc_free(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2099 kfree(dd->kernel_send_context); in init_pervl_scs()
2100 dd->kernel_send_context = NULL; in init_pervl_scs()
2103 sc_free(dd->vld[15].sc); in init_pervl_scs()
2104 return -ENOMEM; in init_pervl_scs()
2112 dd->cr_base = kcalloc( in init_credit_return()
2116 if (!dd->cr_base) { in init_credit_return()
2117 ret = -ENOMEM; in init_credit_return()
2123 set_dev_node(&dd->pcidev->dev, i); in init_credit_return()
2124 dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, in init_credit_return()
2126 &dd->cr_base[i].dma, in init_credit_return()
2128 if (!dd->cr_base[i].va) { in init_credit_return()
2129 set_dev_node(&dd->pcidev->dev, dd->node); in init_credit_return()
2133 ret = -ENOMEM; in init_credit_return()
2137 set_dev_node(&dd->pcidev->dev, dd->node); in init_credit_return()
2148 if (!dd->cr_base) in free_credit_return()
2151 if (dd->cr_base[i].va) { in free_credit_return()
2152 dma_free_coherent(&dd->pcidev->dev, in free_credit_return()
2155 dd->cr_base[i].va, in free_credit_return()
2156 dd->cr_base[i].dma); in free_credit_return()
2159 kfree(dd->cr_base); in free_credit_return()
2160 dd->cr_base = NULL; in free_credit_return()
2166 struct send_context *sc = sci->sc; in seqfile_dump_sci()
2170 i, sci->type, sci->base, sci->credits); in seqfile_dump_sci()
2172 sc->flags, sc->sw_index, sc->hw_context, sc->group); in seqfile_dump_sci()
2174 sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail); in seqfile_dump_sci()
2176 sc->fill, sc->free, sc->fill_wrap, sc->alloc_free); in seqfile_dump_sci()
2178 sc->credit_intr_count, sc->credit_ctrl); in seqfile_dump_sci()
2179 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS)); in seqfile_dump_sci()
2181 (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >> in seqfile_dump_sci()