/drivers/net/wireless/intel/iwlwifi/mvm/ |
D | phy-ctxt.c | 69 static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_hdr() argument 73 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, in iwl_mvm_phy_ctxt_cmd_hdr() 74 ctxt->color)); in iwl_mvm_phy_ctxt_cmd_hdr() 79 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_set_rxchain() argument 97 if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) { in iwl_mvm_phy_ctxt_set_rxchain() 117 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_data_v1() argument 128 iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info, in iwl_mvm_phy_ctxt_cmd_data_v1() 138 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_data() argument 149 iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info, in iwl_mvm_phy_ctxt_cmd_data() 160 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_apply() argument [all …]
|
D | Makefile | 3 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
|
/drivers/infiniband/hw/hfi1/ |
D | trace_ctxts.h | 25 __field(unsigned int, ctxt) 37 __entry->ctxt = uctxt->ctxt; 50 __entry->ctxt, 66 TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt, 69 TP_ARGS(dd, ctxt, subctxt, cinfo), 71 __field(unsigned int, ctxt) 80 __entry->ctxt = ctxt; 90 __entry->ctxt, 100 const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt); 102 TP_PROTO(unsigned int ctxt), [all …]
|
D | trace_rx.h | 29 __field(u32, ctxt) 38 __entry->ctxt = packet->rcd->ctxt; 48 __entry->ctxt, 62 __field(u32, ctxt) 67 __entry->ctxt = rcd->ctxt; 73 __entry->ctxt, 80 TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type, 82 TP_ARGS(ctxt, subctxt, type, start, end), 84 __field(unsigned int, ctxt) 91 __entry->ctxt = ctxt; [all …]
|
D | trace_tx.h | 170 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt), 171 TP_ARGS(dd, ctxt, subctxt), 173 __field(u16, ctxt) 177 __entry->ctxt = ctxt; 182 __entry->ctxt, 188 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, 190 TP_ARGS(dd, ctxt, subctxt, comp_idx), 192 __field(u16, ctxt) 197 __entry->ctxt = ctxt; 203 __entry->ctxt, [all …]
|
D | init.c | 110 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_create_kctxt() 178 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free() 228 u16 ctxt; in allocate_rcd_index() local 231 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index() 232 if (!dd->rcd[ctxt]) in allocate_rcd_index() 235 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index() 236 rcd->ctxt = ctxt; in allocate_rcd_index() 237 dd->rcd[ctxt] = rcd; in allocate_rcd_index() 242 if (ctxt >= dd->num_rcv_contexts) in allocate_rcd_index() 245 *index = ctxt; in allocate_rcd_index() [all …]
|
D | file_ops.c | 131 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ argument 134 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ 282 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); in hfi1_write_iter() 321 u16 ctxt; in hfi1_file_mmap() local 329 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); in hfi1_file_mmap() 332 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { in hfi1_file_mmap() 435 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); in hfi1_file_mmap() 525 uctxt->ctxt, fd->subctxt, in hfi1_file_mmap() 534 ctxt, subctxt, type, mapio, vmf, memaddr, memlen, in hfi1_file_mmap() 610 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); in hfi1_file_close() [all …]
|
D | netdev_rx.c | 59 struct hfi1_ctxtdata **ctxt) in hfi1_netdev_allocate_ctxt() argument 85 dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt); in hfi1_netdev_allocate_ctxt() 86 *ctxt = uctxt; in hfi1_netdev_allocate_ctxt() 122 struct hfi1_ctxtdata **ctxt) in hfi1_netdev_allot_ctxt() argument 127 rc = hfi1_netdev_allocate_ctxt(dd, ctxt); in hfi1_netdev_allot_ctxt() 133 rc = hfi1_netdev_setup_ctxt(rx, *ctxt); in hfi1_netdev_allot_ctxt() 136 hfi1_netdev_deallocate_ctxt(dd, *ctxt); in hfi1_netdev_allot_ctxt() 137 *ctxt = NULL; in hfi1_netdev_allot_ctxt() 213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init() 271 rxq->rcd->ctxt); in enable_queues() [all …]
|
D | trace_misc.h | 75 __field(u32, ctxt) 83 __entry->ctxt = packet->rcd->ctxt; 92 __entry->ctxt,
|
D | chip.h | 583 static inline u64 read_kctxt_csr(const struct hfi1_devdata *dd, int ctxt, in read_kctxt_csr() argument 587 return read_csr(dd, offset0 + (0x100 * ctxt)); in read_kctxt_csr() 590 static inline void write_kctxt_csr(struct hfi1_devdata *dd, int ctxt, in write_kctxt_csr() argument 594 write_csr(dd, offset0 + (0x100 * ctxt), value); in write_kctxt_csr() 606 int ctxt, in get_kctxt_csr_addr() argument 609 return get_csr_addr(dd, offset0 + (0x100 * ctxt)); in get_kctxt_csr_addr() 618 static inline u64 read_uctxt_csr(const struct hfi1_devdata *dd, int ctxt, in read_uctxt_csr() argument 622 return read_csr(dd, offset0 + (0x1000 * ctxt)); in read_uctxt_csr() 625 static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt, in write_uctxt_csr() argument 629 write_csr(dd, offset0 + (0x1000 * ctxt), value); in write_uctxt_csr() [all …]
|
D | user_sdma.c | 143 pq->ctxt = uctxt->ctxt; in hfi1_user_sdma_alloc_queues() 164 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, in hfi1_user_sdma_alloc_queues() 173 uctxt->ctxt); in hfi1_user_sdma_alloc_queues() 236 trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt); in hfi1_user_sdma_free_queues() 321 dd->unit, uctxt->ctxt, fd->subctxt, in hfi1_user_sdma_process_request() 328 dd->unit, uctxt->ctxt, fd->subctxt, ret); in hfi1_user_sdma_process_request() 332 trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt, in hfi1_user_sdma_process_request() 337 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); in hfi1_user_sdma_process_request() 348 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx, in hfi1_user_sdma_process_request() 356 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); in hfi1_user_sdma_process_request() [all …]
|
D | msix.c | 141 rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64; in msix_request_rcd_irq_common() 142 rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64); in msix_request_rcd_irq_common() 144 remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr); in msix_request_rcd_irq_common() 159 rcd->dd->unit, rcd->ctxt); in msix_request_rcd_irq() 175 rcd->dd->unit, rcd->ctxt); in msix_netdev_request_rcd_irq()
|
D | netdev.h | 71 struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt) in hfi1_netdev_get_ctxt() argument 73 return dd->netdev_rx->rxq[ctxt].rcd; in hfi1_netdev_get_ctxt()
|
/drivers/net/ethernet/intel/ice/ |
D | ice_lib.c | 274 struct ice_vsi_ctx *ctxt; in ice_vsi_delete() local 277 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_delete() 278 if (!ctxt) in ice_vsi_delete() 282 ctxt->vf_num = vsi->vf_id; in ice_vsi_delete() 283 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete() 285 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete() 287 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete() 292 kfree(ctxt); in ice_vsi_delete() 738 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) in ice_set_dflt_vsi_ctx() argument 742 memset(&ctxt->info, 0, sizeof(ctxt->info)); in ice_set_dflt_vsi_ctx() [all …]
|
/drivers/scsi/be2iscsi/ |
D | be_cmds.c | 784 void *ctxt = &req->context; in beiscsi_cmd_cq_create() local 798 ctxt, coalesce_wm); in beiscsi_cmd_cq_create() 799 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); in beiscsi_cmd_cq_create() 800 AMAP_SET_BITS(struct amap_cq_context, count, ctxt, in beiscsi_cmd_cq_create() 802 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); in beiscsi_cmd_cq_create() 803 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); in beiscsi_cmd_cq_create() 804 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); in beiscsi_cmd_cq_create() 805 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); in beiscsi_cmd_cq_create() 806 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); in beiscsi_cmd_cq_create() 807 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, in beiscsi_cmd_cq_create() [all …]
|
/drivers/infiniband/hw/qib/ |
D | qib_file_ops.c | 193 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_get_base_info() 242 kinfo->spi_ctxt = rcd->ctxt; in qib_get_base_info() 308 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_update() 503 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_free() 675 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); in qib_manage_rcvq() 745 what, rcd->ctxt, pfn, len, ret); in qib_mmap_mem() 1022 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_mmapf() 1115 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); in qib_poll_next() 1284 static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, in setup_ctxt() argument 1300 rcd = qib_create_ctxtdata(ppd, ctxt, numa_id); in setup_ctxt() [all …]
|
D | qib_tx.c | 135 unsigned ctxt; in find_ctxt() local 139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in find_ctxt() 140 rcd = dd->rcd[ctxt]; in find_ctxt() 459 unsigned ctxt; in qib_cancel_sends() local 471 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in qib_cancel_sends() 473 rcd = dd->rcd[ctxt]; in qib_cancel_sends()
|
D | qib_init.c | 165 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, in qib_create_ctxtdata() argument 178 rcd->ctxt = ctxt; in qib_create_ctxtdata() 179 dd->rcd[ctxt] = rcd; in qib_create_ctxtdata() 181 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ in qib_create_ctxtdata() 1293 int ctxt; in cleanup_device_data() local 1331 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { in cleanup_device_data() 1332 int ctxt_tidbase = ctxt * dd->rcvtidcnt; in cleanup_device_data() 1362 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { in cleanup_device_data() 1363 struct qib_ctxtdata *rcd = tmp[ctxt]; in cleanup_device_data() 1365 tmp[ctxt] = NULL; /* debugging paranoia */ in cleanup_device_data() [all …]
|
D | qib_iba6120.c | 307 enum qib_ureg regno, int ctxt) in qib_read_ureg32() argument 315 dd->ureg_align * ctxt)); in qib_read_ureg32() 320 dd->ureg_align * ctxt)); in qib_read_ureg32() 333 enum qib_ureg regno, u64 value, int ctxt) in qib_write_ureg() argument 340 dd->ureg_align * ctxt); in qib_write_ureg() 345 dd->ureg_align * ctxt); in qib_write_ureg() 383 const u16 regno, unsigned ctxt, in qib_write_kreg_ctxt() argument 386 qib_write_kreg(dd, regno + ctxt, value); in qib_write_kreg_ctxt() 1947 u32 ctxt; in qib_6120_clear_tids() local 1953 ctxt = rcd->ctxt; in qib_6120_clear_tids() [all …]
|
D | qib_iba7220.c | 231 enum qib_ureg regno, int ctxt) in qib_read_ureg32() argument 239 dd->ureg_align * ctxt)); in qib_read_ureg32() 244 dd->ureg_align * ctxt)); in qib_read_ureg32() 257 enum qib_ureg regno, u64 value, int ctxt) in qib_write_ureg() argument 264 dd->ureg_align * ctxt); in qib_write_ureg() 269 dd->ureg_align * ctxt); in qib_write_ureg() 283 const u16 regno, unsigned ctxt, in qib_write_kreg_ctxt() argument 286 qib_write_kreg(dd, regno + ctxt, value); in qib_write_kreg_ctxt() 2195 u32 ctxt; in qib_7220_clear_tids() local 2201 ctxt = rcd->ctxt; in qib_7220_clear_tids() [all …]
|
/drivers/net/ethernet/intel/i40e/ |
D | i40e_main.c | 1913 struct i40e_vsi_context *ctxt, in i40e_vsi_setup_queue_map_mqprio() argument 1962 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map_mqprio() 1963 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); in i40e_vsi_setup_queue_map_mqprio() 1964 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map_mqprio() 1965 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_vsi_setup_queue_map_mqprio() 2001 struct i40e_vsi_context *ctxt, in i40e_vsi_setup_queue_map() argument 2018 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); in i40e_vsi_setup_queue_map() 2120 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map() 2132 ctxt->info.up_enable_bits = enabled_tc; in i40e_vsi_setup_queue_map() 2135 ctxt->info.mapping_flags |= in i40e_vsi_setup_queue_map() [all …]
|
D | i40e_client.c | 675 struct i40e_vsi_context ctxt; in i40e_client_update_vsi_ctxt() local 683 ctxt.seid = pf->main_vsi_seid; in i40e_client_update_vsi_ctxt() 684 ctxt.pf_num = pf->hw.pf_id; in i40e_client_update_vsi_ctxt() 685 err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_client_update_vsi_ctxt() 686 ctxt.flags = I40E_AQ_VSI_TYPE_PF; in i40e_client_update_vsi_ctxt() 698 ctxt.info.valid_sections = in i40e_client_update_vsi_ctxt() 700 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; in i40e_client_update_vsi_ctxt() 703 ctxt.info.valid_sections = in i40e_client_update_vsi_ctxt() 705 ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA; in i40e_client_update_vsi_ctxt() 714 err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_client_update_vsi_ctxt()
|
/drivers/hwtracing/coresight/ |
D | coresight-etm-perf.c | 429 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); in etm_event_start() local 430 struct perf_output_handle *handle = &ctxt->handle; in etm_event_start() 438 if (WARN_ON(ctxt->event_data)) in etm_event_start() 482 ctxt->event_data = event_data; in etm_event_start() 507 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); in etm_event_stop() local 508 struct perf_output_handle *handle = &ctxt->handle; in etm_event_stop() 517 WARN_ON(perf_get_aux(handle) != ctxt->event_data)) in etm_event_stop() 520 event_data = ctxt->event_data; in etm_event_stop() 522 ctxt->event_data = NULL; in etm_event_stop()
|
/drivers/net/ethernet/emulex/benet/ |
D | be_cmds.c | 1164 void *ctxt; in be_cmd_cq_create() local 1172 ctxt = &req->context; in be_cmd_cq_create() 1181 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, in be_cmd_cq_create() 1184 ctxt, no_delay); in be_cmd_cq_create() 1185 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, in be_cmd_cq_create() 1187 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); in be_cmd_cq_create() 1188 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); in be_cmd_cq_create() 1189 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); in be_cmd_cq_create() 1199 ctxt, coalesce_wm); in be_cmd_cq_create() 1200 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, in be_cmd_cq_create() [all …]
|
/drivers/net/wwan/iosm/ |
D | iosm_ipc_wwan.c | 176 static int ipc_wwan_newlink(void *ctxt, struct net_device *dev, in ipc_wwan_newlink() argument 179 struct iosm_wwan *ipc_wwan = ctxt; in ipc_wwan_newlink() 214 static void ipc_wwan_dellink(void *ctxt, struct net_device *dev, in ipc_wwan_dellink() argument 218 struct iosm_wwan *ipc_wwan = ctxt; in ipc_wwan_dellink()
|