Home
last modified time | relevance | path

Searched refs:rcd (Results 1 – 25 of 55) sorted by relevance

123

/drivers/infiniband/hw/hfi1/
Dinit.c87 struct hfi1_ctxtdata *rcd; in hfi1_create_kctxt() local
93 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd); in hfi1_create_kctxt()
104 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) | in hfi1_create_kctxt()
110 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_create_kctxt()
111 rcd->flags |= HFI1_CAP_DMA_RTAIL; in hfi1_create_kctxt()
112 rcd->fast_handler = get_dma_rtail_setting(rcd) ? in hfi1_create_kctxt()
116 hfi1_set_seq_cnt(rcd, 1); in hfi1_create_kctxt()
118 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); in hfi1_create_kctxt()
119 if (!rcd->sc) { in hfi1_create_kctxt()
123 hfi1_init_ctxt(rcd->sc); in hfi1_create_kctxt()
[all …]
Daspm.c129 void __aspm_ctx_disable(struct hfi1_ctxtdata *rcd) in __aspm_ctx_disable() argument
136 spin_lock_irqsave(&rcd->aspm_lock, flags); in __aspm_ctx_disable()
138 if (!rcd->aspm_intr_enable) in __aspm_ctx_disable()
141 prev = rcd->aspm_ts_last_intr; in __aspm_ctx_disable()
143 rcd->aspm_ts_last_intr = now; in __aspm_ctx_disable()
149 restart_timer = ktime_to_ns(ktime_sub(now, rcd->aspm_ts_timer_sched)) > in __aspm_ctx_disable()
154 if (rcd->aspm_enabled && close_interrupts) { in __aspm_ctx_disable()
155 aspm_disable_inc(rcd->dd); in __aspm_ctx_disable()
156 rcd->aspm_enabled = false; in __aspm_ctx_disable()
161 mod_timer(&rcd->aspm_timer, in __aspm_ctx_disable()
[all …]
Ddriver.c161 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, in get_egrbuf() argument
166 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset; in get_egrbuf()
167 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) + in get_egrbuf()
171 static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd, in hfi1_get_header() argument
176 return (void *)(rhf_addr - rcd->rhf_offset + offset); in hfi1_get_header()
179 static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd, in hfi1_get_msgheader() argument
182 return (struct ib_header *)hfi1_get_header(rcd, rhf_addr); in hfi1_get_msgheader()
186 *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd, in hfi1_get_16B_header() argument
189 return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr); in hfi1_get_16B_header()
212 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, in rcv_hdrerr() argument
[all …]
Dexp_rcv.c23 void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd) in hfi1_exp_tid_group_init() argument
25 hfi1_exp_tid_set_init(&rcd->tid_group_list); in hfi1_exp_tid_group_init()
26 hfi1_exp_tid_set_init(&rcd->tid_used_list); in hfi1_exp_tid_group_init()
27 hfi1_exp_tid_set_init(&rcd->tid_full_list); in hfi1_exp_tid_group_init()
34 int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd) in hfi1_alloc_ctxt_rcv_groups() argument
36 struct hfi1_devdata *dd = rcd->dd; in hfi1_alloc_ctxt_rcv_groups()
42 ngroups = rcd->expected_count / dd->rcv_entries.group_size; in hfi1_alloc_ctxt_rcv_groups()
43 rcd->groups = in hfi1_alloc_ctxt_rcv_groups()
44 kcalloc_node(ngroups, sizeof(*rcd->groups), in hfi1_alloc_ctxt_rcv_groups()
45 GFP_KERNEL, rcd->numa_id); in hfi1_alloc_ctxt_rcv_groups()
[all …]
Dmsix.c126 static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd, in msix_request_rcd_irq_common() argument
131 int nr = msix_request_irq(rcd->dd, rcd, handler, thread, in msix_request_rcd_irq_common()
132 rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT, in msix_request_rcd_irq_common()
141 rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64; in msix_request_rcd_irq_common()
142 rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64); in msix_request_rcd_irq_common()
143 rcd->msix_intr = nr; in msix_request_rcd_irq_common()
144 remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr); in msix_request_rcd_irq_common()
154 int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd) in msix_request_rcd_irq() argument
159 rcd->dd->unit, rcd->ctxt); in msix_request_rcd_irq()
161 return msix_request_rcd_irq_common(rcd, receive_context_interrupt, in msix_request_rcd_irq()
[all …]
Dhfi.h161 typedef int (*intr_handler)(struct hfi1_ctxtdata *rcd, int data);
320 static inline u32 rcvhdrq_size(struct hfi1_ctxtdata *rcd) in rcvhdrq_size() argument
322 return PAGE_ALIGN(rcd->rcvhdrq_cnt * in rcvhdrq_size()
323 rcd->rcvhdrqentsize * sizeof(u32)); in rcvhdrq_size()
337 struct hfi1_ctxtdata *rcd; member
1309 struct hfi1_ctxtdata **rcd; member
1424 void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
1426 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
1427 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd);
1430 struct hfi1_ctxtdata **rcd);
[all …]
Dtid_rdma.c133 struct hfi1_ctxtdata *rcd,
197 p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt; in tid_rdma_opfn_init()
199 p->jkey = priv->rcd->jkey; in tid_rdma_opfn_init()
203 p->urg = is_urg_masked(priv->rcd); in tid_rdma_opfn_init()
298 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit) in hfi1_kern_exp_rcv_init() argument
305 rcd->jkey = TID_RDMA_JKEY; in hfi1_kern_exp_rcv_init()
306 hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey); in hfi1_kern_exp_rcv_init()
307 return hfi1_alloc_ctxt_rcv_groups(rcd); in hfi1_kern_exp_rcv_init()
335 return dd->rcd[ctxt]; in qp_to_rcd()
344 qpriv->rcd = qp_to_rcd(rdi, qp); in hfi1_qp_priv_init()
[all …]
Dnetdev_rx.c205 rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd); in hfi1_netdev_rxq_init()
209 hfi1_rcd_get(rxq->rcd); in hfi1_netdev_rxq_init()
211 rxq->rcd->napi = &rxq->napi; in hfi1_netdev_rxq_init()
213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init()
220 rc = msix_netdev_request_rcd_irq(rxq->rcd); in hfi1_netdev_rxq_init()
232 if (rxq->rcd) { in hfi1_netdev_rxq_init()
233 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd); in hfi1_netdev_rxq_init()
234 hfi1_rcd_put(rxq->rcd); in hfi1_netdev_rxq_init()
235 rxq->rcd = NULL; in hfi1_netdev_rxq_init()
253 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd); in hfi1_netdev_rxq_deinit()
[all …]
Dintr.c202 void handle_user_interrupt(struct hfi1_ctxtdata *rcd) in handle_user_interrupt() argument
204 struct hfi1_devdata *dd = rcd->dd; in handle_user_interrupt()
208 if (bitmap_empty(rcd->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) in handle_user_interrupt()
211 if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) { in handle_user_interrupt()
212 wake_up_interruptible(&rcd->wait); in handle_user_interrupt()
213 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd); in handle_user_interrupt()
215 &rcd->event_flags)) { in handle_user_interrupt()
216 rcd->urgent++; in handle_user_interrupt()
217 wake_up_interruptible(&rcd->wait); in handle_user_interrupt()
Dtrace_rx.h27 TP_STRUCT__entry(DD_DEV_ENTRY(packet->rcd->dd)
36 TP_fast_assign(DD_DEV_ASSIGN(packet->rcd->dd);
38 __entry->ctxt = packet->rcd->ctxt;
59 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd),
60 TP_ARGS(dd, rcd),
67 __entry->ctxt = rcd->ctxt;
68 __entry->slow_path = hfi1_is_slowpath(rcd);
69 __entry->dma_rtail = get_dma_rtail_setting(rcd);
Dexp_rcv.h150 hfi1_tid_group_to_idx(struct hfi1_ctxtdata *rcd, struct tid_group *grp) in hfi1_tid_group_to_idx() argument
152 return grp - &rcd->groups[0]; in hfi1_tid_group_to_idx()
161 hfi1_idx_to_tid_group(struct hfi1_ctxtdata *rcd, u16 idx) in hfi1_idx_to_tid_group() argument
163 return &rcd->groups[idx]; in hfi1_idx_to_tid_group()
166 int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
167 void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
168 void hfi1_exp_tid_group_init(struct hfi1_ctxtdata *rcd);
Daspm.h22 void __aspm_ctx_disable(struct hfi1_ctxtdata *rcd);
26 static inline void aspm_ctx_disable(struct hfi1_ctxtdata *rcd) in aspm_ctx_disable() argument
29 if (likely(!rcd->aspm_intr_supported)) in aspm_ctx_disable()
32 __aspm_ctx_disable(rcd); in aspm_ctx_disable()
Ddebugfs.c98 struct hfi1_ctxtdata *rcd; in _opcode_stats_seq_show() local
101 rcd = hfi1_rcd_get_by_index(dd, j); in _opcode_stats_seq_show()
102 if (rcd) { in _opcode_stats_seq_show()
103 n_packets += rcd->opstats->stats[i].n_packets; in _opcode_stats_seq_show()
104 n_bytes += rcd->opstats->stats[i].n_bytes; in _opcode_stats_seq_show()
106 hfi1_rcd_put(rcd); in _opcode_stats_seq_show()
189 struct hfi1_ctxtdata *rcd; in _ctx_stats_seq_show() local
199 rcd = hfi1_rcd_get_by_index_safe(dd, i); in _ctx_stats_seq_show()
200 if (!rcd) in _ctx_stats_seq_show()
203 for (j = 0; j < ARRAY_SIZE(rcd->opstats->stats); j++) in _ctx_stats_seq_show()
[all …]
Dfault.c51 struct hfi1_ctxtdata *rcd; in _fault_stats_seq_show() local
54 rcd = hfi1_rcd_get_by_index(dd, j); in _fault_stats_seq_show()
55 if (rcd) { in _fault_stats_seq_show()
56 n_packets += rcd->opstats->stats[i].n_packets; in _fault_stats_seq_show()
57 n_bytes += rcd->opstats->stats[i].n_bytes; in _fault_stats_seq_show()
59 hfi1_rcd_put(rcd); in _fault_stats_seq_show()
322 struct hfi1_ibdev *ibd = &packet->rcd->dd->verbs_dev; in hfi1_dbg_should_fault_rx()
Dtid_rdma.h99 struct hfi1_ctxtdata *rcd; member
209 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit);
236 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
237 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp);
238 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd);
255 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
Drc.h25 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, in rc_defered_ack() argument
31 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); in rc_defered_ack()
55 struct hfi1_ctxtdata *rcd);
/drivers/infiniband/hw/qib/
Dqib_file_ops.c101 struct qib_ctxtdata *rcd = ctxt_fp(fp); in qib_get_base_info() local
104 struct qib_devdata *dd = rcd->dd; in qib_get_base_info()
105 struct qib_pportdata *ppd = rcd->ppd; in qib_get_base_info()
110 subctxt_cnt = rcd->subctxt_cnt; in qib_get_base_info()
135 ret = dd->f_get_base_info(rcd, kinfo); in qib_get_base_info()
141 kinfo->spi_tidegrcnt = rcd->rcvegrcnt; in qib_get_base_info()
147 rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size; in qib_get_base_info()
148 kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk; in qib_get_base_info()
150 rcd->rcvegrbuf_chunks; in qib_get_base_info()
184 kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys; in qib_get_base_info()
[all …]
Dqib_init.c134 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL); in qib_create_ctxts()
135 if (!dd->rcd) in qib_create_ctxts()
141 struct qib_ctxtdata *rcd; in qib_create_ctxts() local
148 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id); in qib_create_ctxts()
149 if (!rcd) { in qib_create_ctxts()
152 kfree(dd->rcd); in qib_create_ctxts()
153 dd->rcd = NULL; in qib_create_ctxts()
156 rcd->pkeys[0] = QIB_DEFAULT_P_KEY; in qib_create_ctxts()
157 rcd->seq_cnt = 1; in qib_create_ctxts()
169 struct qib_ctxtdata *rcd; in qib_create_ctxtdata() local
[all …]
Dqib_tx.c80 int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd) in qib_disarm_piobufs_ifneeded() argument
82 struct qib_devdata *dd = rcd->dd; in qib_disarm_piobufs_ifneeded()
87 last = rcd->pio_base + rcd->piocnt; in qib_disarm_piobufs_ifneeded()
93 if (rcd->user_event_mask) { in qib_disarm_piobufs_ifneeded()
98 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]); in qib_disarm_piobufs_ifneeded()
99 for (i = 1; i < rcd->subctxt_cnt; i++) in qib_disarm_piobufs_ifneeded()
101 &rcd->user_event_mask[i]); in qib_disarm_piobufs_ifneeded()
104 for (i = rcd->pio_base; i < last; i++) { in qib_disarm_piobufs_ifneeded()
107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_ifneeded()
134 struct qib_ctxtdata *rcd; in find_ctxt() local
[all …]
Dqib_driver.c279 static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) in qib_get_egrbuf() argument
281 const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift; in qib_get_egrbuf()
282 const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1); in qib_get_egrbuf()
284 return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift); in qib_get_egrbuf()
291 static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, in qib_rcv_hdrerr() argument
405 &rcd->qp_wait_list); in qib_rcv_hdrerr()
440 u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) in qib_kreceive() argument
442 struct qib_devdata *dd = rcd->dd; in qib_kreceive()
443 struct qib_pportdata *ppd = rcd->ppd; in qib_kreceive()
455 l = rcd->head; in qib_kreceive()
[all …]
Dqib_intr.c191 struct qib_ctxtdata *rcd; in qib_handle_urcv() local
196 for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) { in qib_handle_urcv()
199 rcd = dd->rcd[i]; in qib_handle_urcv()
200 if (!rcd || !rcd->cnt) in qib_handle_urcv()
203 if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) { in qib_handle_urcv()
204 wake_up_interruptible(&rcd->wait); in qib_handle_urcv()
205 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS, in qib_handle_urcv()
206 rcd->ctxt); in qib_handle_urcv()
208 &rcd->flag)) { in qib_handle_urcv()
209 rcd->urgent++; in qib_handle_urcv()
[all …]
Dqib_debugfs.c103 if (!dd->rcd[j]) in _opcode_stats_seq_show()
105 n_packets += dd->rcd[j]->opstats->stats[i].n_packets; in _opcode_stats_seq_show()
106 n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes; in _opcode_stats_seq_show()
166 if (!dd->rcd[i]) in _ctx_stats_seq_show()
169 for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++) in _ctx_stats_seq_show()
170 n_packets += dd->rcd[i]->opstats->stats[j].n_packets; in _ctx_stats_seq_show()
/drivers/acpi/apei/
Derst.c977 struct cper_pstore_record *rcd; in erst_reader() local
978 size_t rcd_len = sizeof(*rcd) + erst_info.bufsize; in erst_reader()
983 rcd = kmalloc(rcd_len, GFP_KERNEL); in erst_reader()
984 if (!rcd) { in erst_reader()
999 len = erst_read(record_id, &rcd->hdr, rcd_len); in erst_reader()
1003 else if (len < 0 || len < sizeof(*rcd)) { in erst_reader()
1007 if (!guid_equal(&rcd->hdr.creator_id, &CPER_CREATOR_PSTORE)) in erst_reader()
1015 memcpy(record->buf, rcd->data, len - sizeof(*rcd)); in erst_reader()
1019 if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG_Z)) { in erst_reader()
1022 } else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG)) in erst_reader()
[all …]
/drivers/net/vmxnet3/
Dvmxnet3_drv.c302 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \ argument
304 vmxnet3_RxCompToCPU((rcd), (tmp)); \
317 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd) argument
665 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, in vmxnet3_append_frag() argument
674 skb_frag_size_set(frag, rcd->len); in vmxnet3_append_frag()
675 skb->data_len += rcd->len; in vmxnet3_append_frag()
1240 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { in vmxnet3_rx_csum()
1241 if (gdesc->rcd.v4 && in vmxnet3_rx_csum()
1249 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && in vmxnet3_rx_csum()
1252 WARN_ON_ONCE(gdesc->rcd.frg && in vmxnet3_rx_csum()
[all …]
/drivers/cpufreq/
Dsa1110-cpufreq.c130 static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd) in set_mdcas() argument
134 rcd = 2 * rcd - 1; in set_mdcas()
135 shift = delayed + 1 + rcd; in set_mdcas()
137 mdcas[0] = (1 << rcd) - 1; in set_mdcas()

123