Lines Matching full:sc
53 #define SC(name) SEND_CTXT_##name macro
57 static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
550 * given sc.
561 static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma) in cr_group_addresses() argument
563 u32 gc = group_context(sc->hw_context, sc->group); in cr_group_addresses()
564 u32 index = sc->hw_context & 0x7; in cr_group_addresses()
566 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; in cr_group_addresses()
568 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; in cr_group_addresses()
577 struct send_context *sc; in sc_halted() local
579 sc = container_of(work, struct send_context, halt_work); in sc_halted()
580 sc_restart(sc); in sc_halted()
593 u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize) in sc_mtu_to_threshold() argument
603 if (sc->credits <= release_credits) in sc_mtu_to_threshold()
606 threshold = sc->credits - release_credits; in sc_mtu_to_threshold()
618 u32 sc_percent_to_threshold(struct send_context *sc, u32 percent) in sc_percent_to_threshold() argument
620 return (sc->credits * percent) / 100; in sc_percent_to_threshold()
626 void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) in sc_set_cr_threshold() argument
632 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_set_cr_threshold()
634 old_threshold = (sc->credit_ctrl >> in sc_set_cr_threshold()
635 SC(CREDIT_CTRL_THRESHOLD_SHIFT)) in sc_set_cr_threshold()
636 & SC(CREDIT_CTRL_THRESHOLD_MASK); in sc_set_cr_threshold()
639 sc->credit_ctrl = in sc_set_cr_threshold()
640 (sc->credit_ctrl in sc_set_cr_threshold()
641 & ~SC(CREDIT_CTRL_THRESHOLD_SMASK)) in sc_set_cr_threshold()
643 & SC(CREDIT_CTRL_THRESHOLD_MASK)) in sc_set_cr_threshold()
644 << SC(CREDIT_CTRL_THRESHOLD_SHIFT)); in sc_set_cr_threshold()
645 write_kctxt_csr(sc->dd, sc->hw_context, in sc_set_cr_threshold()
646 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_set_cr_threshold()
652 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_set_cr_threshold()
655 sc_return_credits(sc); in sc_set_cr_threshold()
661 * Set the CHECK_ENABLE register for the send context 'sc'.
663 void set_pio_integrity(struct send_context *sc) in set_pio_integrity() argument
665 struct hfi1_devdata *dd = sc->dd; in set_pio_integrity()
666 u32 hw_context = sc->hw_context; in set_pio_integrity()
667 int type = sc->type; in set_pio_integrity()
670 SC(CHECK_ENABLE), in set_pio_integrity()
674 static u32 get_buffers_allocated(struct send_context *sc) in get_buffers_allocated() argument
680 ret += *per_cpu_ptr(sc->buffers_allocated, cpu); in get_buffers_allocated()
684 static void reset_buffers_allocated(struct send_context *sc) in reset_buffers_allocated() argument
689 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0; in reset_buffers_allocated()
700 struct send_context *sc = NULL; in sc_alloc() local
714 sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa); in sc_alloc()
715 if (!sc) in sc_alloc()
718 sc->buffers_allocated = alloc_percpu(u32); in sc_alloc()
719 if (!sc->buffers_allocated) { in sc_alloc()
720 kfree(sc); in sc_alloc()
731 free_percpu(sc->buffers_allocated); in sc_alloc()
732 kfree(sc); in sc_alloc()
737 sci->sc = sc; in sc_alloc()
739 sc->dd = dd; in sc_alloc()
740 sc->node = numa; in sc_alloc()
741 sc->type = type; in sc_alloc()
742 spin_lock_init(&sc->alloc_lock); in sc_alloc()
743 spin_lock_init(&sc->release_lock); in sc_alloc()
744 spin_lock_init(&sc->credit_ctrl_lock); in sc_alloc()
745 seqlock_init(&sc->waitlock); in sc_alloc()
746 INIT_LIST_HEAD(&sc->piowait); in sc_alloc()
747 INIT_WORK(&sc->halt_work, sc_halted); in sc_alloc()
748 init_waitqueue_head(&sc->halt_wait); in sc_alloc()
751 sc->group = 0; in sc_alloc()
753 sc->sw_index = sw_index; in sc_alloc()
754 sc->hw_context = hw_context; in sc_alloc()
755 cr_group_addresses(sc, &dma); in sc_alloc()
756 sc->credits = sci->credits; in sc_alloc()
757 sc->size = sc->credits * PIO_BLOCK_SIZE; in sc_alloc()
762 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) in sc_alloc()
766 reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK)) in sc_alloc()
767 << SC(CTRL_CTXT_DEPTH_SHIFT)) in sc_alloc()
768 | ((sci->base & SC(CTRL_CTXT_BASE_MASK)) in sc_alloc()
769 << SC(CTRL_CTXT_BASE_SHIFT)); in sc_alloc()
770 write_kctxt_csr(dd, hw_context, SC(CTRL), reg); in sc_alloc()
772 set_pio_integrity(sc); in sc_alloc()
775 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1); in sc_alloc()
778 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), in sc_alloc()
779 (SC(CHECK_PARTITION_KEY_VALUE_MASK) & in sc_alloc()
781 SC(CHECK_PARTITION_KEY_VALUE_SHIFT)); in sc_alloc()
793 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), in sc_alloc()
794 ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) | in sc_alloc()
795 ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT))); in sc_alloc()
798 reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK); in sc_alloc()
799 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg); in sc_alloc()
813 thresh = sc_percent_to_threshold(sc, 50); in sc_alloc()
815 thresh = sc_percent_to_threshold(sc, in sc_alloc()
818 thresh = min(sc_percent_to_threshold(sc, 50), in sc_alloc()
819 sc_mtu_to_threshold(sc, hfi1_max_mtu, in sc_alloc()
822 reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT); in sc_alloc()
825 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK); in sc_alloc()
827 reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK); in sc_alloc()
830 sc->credit_ctrl = reg; in sc_alloc()
831 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg); in sc_alloc()
836 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg); in sc_alloc()
854 sc->sr_size = sci->credits + 1; in sc_alloc()
855 sc->sr = kcalloc_node(sc->sr_size, in sc_alloc()
858 if (!sc->sr) { in sc_alloc()
859 sc_free(sc); in sc_alloc()
869 sc->group, in sc_alloc()
870 sc->credits, in sc_alloc()
871 sc->credit_ctrl, in sc_alloc()
874 return sc; in sc_alloc()
878 void sc_free(struct send_context *sc) in sc_free() argument
885 if (!sc) in sc_free()
888 sc->flags |= SCF_IN_FREE; /* ensure no restarts */ in sc_free()
889 dd = sc->dd; in sc_free()
890 if (!list_empty(&sc->piowait)) in sc_free()
892 sw_index = sc->sw_index; in sc_free()
893 hw_context = sc->hw_context; in sc_free()
894 sc_disable(sc); /* make sure the HW is disabled */ in sc_free()
895 flush_work(&sc->halt_work); in sc_free()
898 dd->send_contexts[sw_index].sc = NULL; in sc_free()
901 write_kctxt_csr(dd, hw_context, SC(CTRL), 0); in sc_free()
902 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0); in sc_free()
903 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0); in sc_free()
904 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0); in sc_free()
905 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0); in sc_free()
906 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0); in sc_free()
907 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0); in sc_free()
913 kfree(sc->sr); in sc_free()
914 free_percpu(sc->buffers_allocated); in sc_free()
915 kfree(sc); in sc_free()
919 void sc_disable(struct send_context *sc) in sc_disable() argument
925 if (!sc) in sc_disable()
929 spin_lock_irq(&sc->alloc_lock); in sc_disable()
930 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); in sc_disable()
931 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); in sc_disable()
932 sc->flags &= ~SCF_ENABLED; in sc_disable()
933 sc_wait_for_packet_egress(sc, 1); in sc_disable()
934 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); in sc_disable()
944 spin_lock(&sc->release_lock); in sc_disable()
945 if (sc->sr) { /* this context has a shadow ring */ in sc_disable()
946 while (sc->sr_tail != sc->sr_head) { in sc_disable()
947 pbuf = &sc->sr[sc->sr_tail].pbuf; in sc_disable()
950 sc->sr_tail++; in sc_disable()
951 if (sc->sr_tail >= sc->sr_size) in sc_disable()
952 sc->sr_tail = 0; in sc_disable()
955 spin_unlock(&sc->release_lock); in sc_disable()
957 write_seqlock(&sc->waitlock); in sc_disable()
958 if (!list_empty(&sc->piowait)) in sc_disable()
959 list_move(&sc->piowait, &wake_list); in sc_disable()
960 write_sequnlock(&sc->waitlock); in sc_disable()
974 spin_unlock_irq(&sc->alloc_lock); in sc_disable()
994 return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) & in is_sc_halted()
995 SC(STATUS_CTXT_HALTED_SMASK)); in is_sc_halted()
1000 * @sc: valid send context
1012 static void sc_wait_for_packet_egress(struct send_context *sc, int pause) in sc_wait_for_packet_egress() argument
1014 struct hfi1_devdata *dd = sc->dd; in sc_wait_for_packet_egress()
1021 reg = read_csr(dd, sc->hw_context * 8 + in sc_wait_for_packet_egress()
1024 if (sc->flags & SCF_HALTED || in sc_wait_for_packet_egress()
1025 is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) in sc_wait_for_packet_egress()
1037 __func__, sc->sw_index, in sc_wait_for_packet_egress()
1038 sc->hw_context, (u32)reg); in sc_wait_for_packet_egress()
1057 struct send_context *sc = dd->send_contexts[i].sc; in sc_wait() local
1059 if (!sc) in sc_wait()
1061 sc_wait_for_packet_egress(sc, 0); in sc_wait()
1074 int sc_restart(struct send_context *sc) in sc_restart() argument
1076 struct hfi1_devdata *dd = sc->dd; in sc_restart()
1082 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE)) in sc_restart()
1085 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, in sc_restart()
1086 sc->hw_context); in sc_restart()
1096 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); in sc_restart()
1097 if (reg & SC(STATUS_CTXT_HALTED_SMASK)) in sc_restart()
1101 __func__, sc->sw_index, sc->hw_context); in sc_restart()
1118 if (sc->type != SC_USER) { in sc_restart()
1122 count = get_buffers_allocated(sc); in sc_restart()
1128 __func__, sc->sw_index, in sc_restart()
1129 sc->hw_context, count); in sc_restart()
1145 sc_disable(sc); in sc_restart()
1153 return sc_enable(sc); in sc_restart()
1163 struct send_context *sc; in pio_freeze() local
1167 sc = dd->send_contexts[i].sc; in pio_freeze()
1173 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) in pio_freeze()
1177 sc_disable(sc); in pio_freeze()
1190 struct send_context *sc; in pio_kernel_unfreeze() local
1194 sc = dd->send_contexts[i].sc; in pio_kernel_unfreeze()
1195 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) in pio_kernel_unfreeze()
1197 if (sc->flags & SCF_LINK_DOWN) in pio_kernel_unfreeze()
1200 sc_enable(sc); /* will clear the sc frozen flag */ in pio_kernel_unfreeze()
1218 struct send_context *sc; in pio_kernel_linkup() local
1222 sc = dd->send_contexts[i].sc; in pio_kernel_linkup()
1223 if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER) in pio_kernel_linkup()
1226 sc_enable(sc); /* will clear the sc link down flag */ in pio_kernel_linkup()
1286 int sc_enable(struct send_context *sc) in sc_enable() argument
1293 if (!sc) in sc_enable()
1295 dd = sc->dd; in sc_enable()
1304 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_enable()
1305 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1306 if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK))) in sc_enable()
1311 *sc->hw_free = 0; in sc_enable()
1312 sc->free = 0; in sc_enable()
1313 sc->alloc_free = 0; in sc_enable()
1314 sc->fill = 0; in sc_enable()
1315 sc->fill_wrap = 0; in sc_enable()
1316 sc->sr_head = 0; in sc_enable()
1317 sc->sr_tail = 0; in sc_enable()
1318 sc->flags = 0; in sc_enable()
1320 reset_buffers_allocated(sc); in sc_enable()
1328 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); in sc_enable()
1330 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); in sc_enable()
1344 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) << in sc_enable()
1358 sc->sw_index, sc->hw_context, ret); in sc_enable()
1365 sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK); in sc_enable()
1366 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); in sc_enable()
1371 read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1372 sc->flags |= SCF_ENABLED; in sc_enable()
1375 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_enable()
1381 void sc_return_credits(struct send_context *sc) in sc_return_credits() argument
1383 if (!sc) in sc_return_credits()
1387 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), in sc_return_credits()
1388 SC(CREDIT_FORCE_FORCE_RETURN_SMASK)); in sc_return_credits()
1393 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); in sc_return_credits()
1395 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); in sc_return_credits()
1399 void sc_flush(struct send_context *sc) in sc_flush() argument
1401 if (!sc) in sc_flush()
1404 sc_wait_for_packet_egress(sc, 1); in sc_flush()
1408 void sc_drop(struct send_context *sc) in sc_drop() argument
1410 if (!sc) in sc_drop()
1413 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", in sc_drop()
1414 __func__, sc->sw_index, sc->hw_context); in sc_drop()
1425 void sc_stop(struct send_context *sc, int flag) in sc_stop() argument
1430 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_stop()
1432 sc->flags |= flag; in sc_stop()
1433 sc->flags &= ~SCF_ENABLED; in sc_stop()
1434 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_stop()
1435 wake_up(&sc->halt_wait); in sc_stop()
1444 * @sc: the PIO send context we are allocating from
1452 struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, in sc_buffer_alloc() argument
1463 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_buffer_alloc()
1464 if (!(sc->flags & SCF_ENABLED)) { in sc_buffer_alloc()
1465 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1470 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); in sc_buffer_alloc()
1474 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1478 sc->alloc_free = READ_ONCE(sc->free); in sc_buffer_alloc()
1480 (unsigned long)sc->credits - in sc_buffer_alloc()
1481 (sc->fill - sc->alloc_free); in sc_buffer_alloc()
1484 sc_release_update(sc); in sc_buffer_alloc()
1485 sc->alloc_free = READ_ONCE(sc->free); in sc_buffer_alloc()
1494 this_cpu_inc(*sc->buffers_allocated); in sc_buffer_alloc()
1497 head = sc->sr_head; in sc_buffer_alloc()
1500 sc->fill += blocks; in sc_buffer_alloc()
1501 fill_wrap = sc->fill_wrap; in sc_buffer_alloc()
1502 sc->fill_wrap += blocks; in sc_buffer_alloc()
1503 if (sc->fill_wrap >= sc->credits) in sc_buffer_alloc()
1504 sc->fill_wrap = sc->fill_wrap - sc->credits; in sc_buffer_alloc()
1513 pbuf = &sc->sr[head].pbuf; in sc_buffer_alloc()
1514 pbuf->sent_at = sc->fill; in sc_buffer_alloc()
1517 pbuf->sc = sc; /* could be filled in at sc->sr init time */ in sc_buffer_alloc()
1522 if (next >= sc->sr_size) in sc_buffer_alloc()
1529 sc->sr_head = next; in sc_buffer_alloc()
1530 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1533 pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE; in sc_buffer_alloc()
1534 pbuf->end = sc->base_addr + sc->size; in sc_buffer_alloc()
1553 void sc_add_credit_return_intr(struct send_context *sc) in sc_add_credit_return_intr() argument
1558 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_add_credit_return_intr()
1559 if (sc->credit_intr_count == 0) { in sc_add_credit_return_intr()
1560 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK); in sc_add_credit_return_intr()
1561 write_kctxt_csr(sc->dd, sc->hw_context, in sc_add_credit_return_intr()
1562 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_add_credit_return_intr()
1564 sc->credit_intr_count++; in sc_add_credit_return_intr()
1565 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_add_credit_return_intr()
1572 void sc_del_credit_return_intr(struct send_context *sc) in sc_del_credit_return_intr() argument
1576 WARN_ON(sc->credit_intr_count == 0); in sc_del_credit_return_intr()
1579 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_del_credit_return_intr()
1580 sc->credit_intr_count--; in sc_del_credit_return_intr()
1581 if (sc->credit_intr_count == 0) { in sc_del_credit_return_intr()
1582 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK); in sc_del_credit_return_intr()
1583 write_kctxt_csr(sc->dd, sc->hw_context, in sc_del_credit_return_intr()
1584 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_del_credit_return_intr()
1586 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_del_credit_return_intr()
1593 void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint) in hfi1_sc_wantpiobuf_intr() argument
1596 sc_add_credit_return_intr(sc); in hfi1_sc_wantpiobuf_intr()
1598 sc_del_credit_return_intr(sc); in hfi1_sc_wantpiobuf_intr()
1599 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl); in hfi1_sc_wantpiobuf_intr()
1601 sc_return_credits(sc); in hfi1_sc_wantpiobuf_intr()
1606 * @sc: the send context
1612 static void sc_piobufavail(struct send_context *sc) in sc_piobufavail() argument
1614 struct hfi1_devdata *dd = sc->dd; in sc_piobufavail()
1622 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && in sc_piobufavail()
1623 dd->send_contexts[sc->sw_index].type != SC_VL15) in sc_piobufavail()
1625 list = &sc->piowait; in sc_piobufavail()
1632 write_seqlock_irqsave(&sc->waitlock, flags); in sc_piobufavail()
1659 hfi1_sc_wantpiobuf_intr(sc, 0); in sc_piobufavail()
1661 hfi1_sc_wantpiobuf_intr(sc, 1); in sc_piobufavail()
1663 write_sequnlock_irqrestore(&sc->waitlock, flags); in sc_piobufavail()
1699 void sc_release_update(struct send_context *sc) in sc_release_update() argument
1710 if (!sc) in sc_release_update()
1713 spin_lock_irqsave(&sc->release_lock, flags); in sc_release_update()
1715 hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */ in sc_release_update()
1716 old_free = sc->free; in sc_release_update()
1721 trace_hfi1_piofree(sc, extra); in sc_release_update()
1725 head = READ_ONCE(sc->sr_head); /* snapshot the head */ in sc_release_update()
1726 tail = sc->sr_tail; in sc_release_update()
1728 pbuf = &sc->sr[tail].pbuf; in sc_release_update()
1741 if (tail >= sc->sr_size) in sc_release_update()
1744 sc->sr_tail = tail; in sc_release_update()
1747 sc->free = free; in sc_release_update()
1748 spin_unlock_irqrestore(&sc->release_lock, flags); in sc_release_update()
1749 sc_piobufavail(sc); in sc_release_update()
1763 struct send_context *sc; in sc_group_release_update() local
1774 sc = dd->send_contexts[sw_index].sc; in sc_group_release_update()
1775 if (unlikely(!sc)) in sc_group_release_update()
1778 gc = group_context(hw_context, sc->group); in sc_group_release_update()
1779 gc_end = gc + group_size(sc->group); in sc_group_release_update()
1788 sc_release_update(dd->send_contexts[sw_index].sc); in sc_group_release_update()
1811 * NOTE This should only happen if SC->VL changed after the initial in pio_select_send_context_vl()
1824 return dd->vld[0].sc; in pio_select_send_context_vl()
1831 rval = !rval ? dd->vld[0].sc : rval; in pio_select_send_context_vl()
1839 * @sc5: the 5 bit sc
1841 * This function returns an send context based on the selector and an sc
2025 dd->vld[15].sc = sc_alloc(dd, SC_VL15, in init_pervl_scs()
2027 if (!dd->vld[15].sc) in init_pervl_scs()
2030 hfi1_init_ctxt(dd->vld[15].sc); in init_pervl_scs()
2039 dd->kernel_send_context[0] = dd->vld[15].sc; in init_pervl_scs()
2049 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, in init_pervl_scs()
2051 if (!dd->vld[i].sc) in init_pervl_scs()
2053 dd->kernel_send_context[i + 1] = dd->vld[i].sc; in init_pervl_scs()
2054 hfi1_init_ctxt(dd->vld[i].sc); in init_pervl_scs()
2066 sc_enable(dd->vld[15].sc); in init_pervl_scs()
2067 ctxt = dd->vld[15].sc->hw_context; in init_pervl_scs()
2069 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2072 dd->vld[15].sc->sw_index, ctxt); in init_pervl_scs()
2075 sc_enable(dd->vld[i].sc); in init_pervl_scs()
2076 ctxt = dd->vld[i].sc->hw_context; in init_pervl_scs()
2078 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2084 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2093 sc_free(dd->vld[i].sc); in init_pervl_scs()
2094 dd->vld[i].sc = NULL; in init_pervl_scs()
2104 sc_free(dd->vld[15].sc); in init_pervl_scs()
2167 struct send_context *sc = sci->sc; in seqfile_dump_sci() local
2173 sc->flags, sc->sw_index, sc->hw_context, sc->group); in seqfile_dump_sci()
2175 sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail); in seqfile_dump_sci()
2177 sc->fill, sc->free, sc->fill_wrap, sc->alloc_free); in seqfile_dump_sci()
2179 sc->credit_intr_count, sc->credit_ctrl); in seqfile_dump_sci()
2180 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS)); in seqfile_dump_sci()
2182 (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >> in seqfile_dump_sci()
2184 (reg >> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT)) & in seqfile_dump_sci()
2185 SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK), in seqfile_dump_sci()
2186 reg & SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK)); in seqfile_dump_sci()