Home
last modified time | relevance | path

Searched refs:qe (Results 1 – 25 of 45) sorted by relevance

12

/drivers/soc/fsl/qe/
Dqe.c71 struct device_node *qe; in get_qe_base() local
78 qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); in get_qe_base()
79 if (!qe) { in get_qe_base()
80 qe = of_find_node_by_type(NULL, "qe"); in get_qe_base()
81 if (!qe) in get_qe_base()
85 ret = of_address_to_resource(qe, 0, &res); in get_qe_base()
88 of_node_put(qe); in get_qe_base()
169 struct device_node *qe; in qe_get_brg_clk() local
177 qe = of_find_compatible_node(NULL, NULL, "fsl,qe"); in qe_get_brg_clk()
178 if (!qe) { in qe_get_brg_clk()
[all …]
DMakefile5 obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
/drivers/net/ethernet/chelsio/cxgb4/
Dsched.c82 struct sched_queue_entry *qe; in t4_sched_bind_unbind_op() local
84 qe = (struct sched_queue_entry *)arg; in t4_sched_bind_unbind_op()
92 fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE; in t4_sched_bind_unbind_op()
93 fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id)); in t4_sched_bind_unbind_op()
122 struct sched_queue_entry *qe; in t4_sched_queue_lookup() local
128 list_for_each_entry(qe, &e->queue_list, list) { in t4_sched_queue_lookup()
129 if (qe->cntxt_id == qid) { in t4_sched_queue_lookup()
149 struct sched_queue_entry *qe = NULL; in t4_sched_queue_unbind() local
167 list_for_each_entry(qe, &e->queue_list, list) { in t4_sched_queue_unbind()
172 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, in t4_sched_queue_unbind()
[all …]
/drivers/atm/
Dfirestream.c598 static void submit_qentry (struct fs_dev *dev, struct queue *q, struct FS_QENTRY *qe) in submit_qentry() argument
614 if (qe != cqe) { in submit_qentry()
615 fs_dprintk (FS_DEBUG_TXQ, "q mismatch! %p %p\n", qe, cqe); in submit_qentry()
645 struct FS_QENTRY *qe; in submit_queue() local
647 qe = get_qentry (dev, q); in submit_queue()
648 qe->cmd = cmd; in submit_queue()
649 qe->p0 = p1; in submit_queue()
650 qe->p1 = p2; in submit_queue()
651 qe->p2 = p3; in submit_queue()
652 submit_qentry (dev, q, qe); in submit_queue()
[all …]
Dfirestream.h65 #define STATUS_CODE(qe) ((qe->cmd >> 22) & 0x3f) argument
/drivers/net/ethernet/sun/
Dsunqe.c836 struct sunqe *qe; in qec_ether_init() local
848 qe = netdev_priv(dev); in qec_ether_init()
855 qe->channel = i; in qec_ether_init()
856 spin_lock_init(&qe->lock); in qec_ether_init()
862 qecp->qes[qe->channel] = qe; in qec_ether_init()
863 qe->dev = dev; in qec_ether_init()
864 qe->parent = qecp; in qec_ether_init()
865 qe->op = op; in qec_ether_init()
868 qe->qcregs = of_ioremap(&op->resource[0], 0, in qec_ether_init()
870 if (!qe->qcregs) { in qec_ether_init()
[all …]
/drivers/net/ethernet/brocade/bna/
Dbna_tx_rx.c360 list_for_each_entry(mac, &rxf->mcast_active_q, qe) in bna_rxf_mcmac_get()
364 list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe) in bna_rxf_mcmac_get()
376 list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe) in bna_rxf_mchandle_get()
395 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q); in bna_rxf_mchandle_attach()
418 list_del(&mchandle->qe); in bna_rxf_mcast_del()
435 struct bna_mac, qe); in bna_rxf_mcast_cfg_apply()
437 list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna)); in bna_rxf_mcast_cfg_apply()
445 struct bna_mac, qe); in bna_rxf_mcast_cfg_apply()
446 list_move_tail(&mac->qe, &rxf->mcast_active_q); in bna_rxf_mcast_cfg_apply()
483 struct bna_mac, qe); in bna_rxf_mcast_cfg_reset()
[all …]
Dbna_types.h264 struct list_head qe; member
448 struct list_head qe; member
467 struct list_head qe; member
571 struct list_head qe; member
695 struct list_head qe; member
782 struct list_head qe; member
874 struct list_head qe; member
Dbfa_msgq.c70 struct bfa_msgq_cmd_entry, qe); in cmdq_sm_stopped_entry()
71 list_del(&cmdq_ent->qe); in cmdq_sm_stopped_entry()
247 struct bfa_msgq_cmd_entry, qe); in bfa_msgq_cmdq_ci_update()
250 list_del(&cmd->qe); in bfa_msgq_cmdq_ci_update()
640 list_add_tail(&cmd->qe, &msgq->cmdq.pending_q); in bfa_msgq_cmd_post()
Dbna_enet.c1796 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q); in bna_ucam_mod_init()
1801 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q); in bna_ucam_mod_init()
1823 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q); in bna_mcam_mod_init()
1830 list_add_tail(&mcam_mod->mchandle[i].qe, in bna_mcam_mod_init()
1836 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q); in bna_mcam_mod_init()
2062 mac = list_first_entry_or_null(head, struct bna_mac, qe); in bna_cam_mod_mac_get()
2064 list_del(&mac->qe); in bna_cam_mod_mac_get()
2075 struct bna_mcam_handle, qe); in bna_mcam_mod_handle_get()
2077 list_del(&handle->qe); in bna_mcam_mod_handle_get()
2086 list_add_tail(&handle->qe, &mcam_mod->free_handle_q); in bna_mcam_mod_handle_put()
Dbna.h215 list_for_each_entry(__tx, &__tx_mod->tx_active_q, qe) { \
228 list_for_each_entry(__rx, &__rx_mod->rx_active_q, qe) { \
250 list_for_each_entry(mac, q, qe) in bna_mac_find()
/drivers/scsi/arm/
Dmsgqueue.c58 msgq->qe = NULL; in msgqueue_initialise()
85 struct msgqueue_entry *mq = msgq->qe; in msgqueue_msglength()
88 for (mq = msgq->qe; mq; mq = mq->next) in msgqueue_msglength()
105 for (mq = msgq->qe; mq && msgno; mq = mq->next, msgno--); in msgqueue_getmsg()
136 mqp = &msgq->qe; in msgqueue_addmsg()
155 for (mq = msgq->qe; mq; mq = mqnext) { in msgqueue_flush()
159 msgq->qe = NULL; in msgqueue_flush()
Dmsgqueue.h29 struct msgqueue_entry *qe; member
/drivers/scsi/bfa/
Dbfa_fcpim.c33 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
37 list_del(&(__itnim)->qe); \
94 list_del(&(__ioim)->qe); \
95 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
346 struct list_head *qe, *qen; in bfa_fcpim_iocdisable() local
351 list_for_each_safe(qe, qen, &fcpim->itnim_q) { in bfa_fcpim_iocdisable()
352 itnim = (struct bfa_itnim_s *) qe; in bfa_fcpim_iocdisable()
433 struct list_head *qe, *qen; in bfa_fcpim_port_iostats() local
438 list_for_each_safe(qe, qen, &fcpim->itnim_q) { in bfa_fcpim_port_iostats()
439 itnim = (struct bfa_itnim_s *) qe; in bfa_fcpim_port_iostats()
[all …]
Dbfa_fcs.c958 struct list_head *qe, *qen; in bfa_fcs_fabric_notify_online() local
970 list_for_each_safe(qe, qen, &fabric->vport_q) { in bfa_fcs_fabric_notify_online()
971 vport = (struct bfa_fcs_vport_s *) qe; in bfa_fcs_fabric_notify_online()
980 struct list_head *qe, *qen; in bfa_fcs_fabric_notify_offline() local
988 list_for_each_safe(qe, qen, &fabric->vport_q) { in bfa_fcs_fabric_notify_offline()
989 vport = (struct bfa_fcs_vport_s *) qe; in bfa_fcs_fabric_notify_offline()
1014 struct list_head *qe, *qen; in bfa_fcs_fabric_stop() local
1018 list_for_each_safe(qe, qen, &fabric->vport_q) { in bfa_fcs_fabric_stop()
1019 vport = (struct bfa_fcs_vport_s *) qe; in bfa_fcs_fabric_stop()
1036 struct list_head *qe, *qen; in bfa_fcs_fabric_delete() local
[all …]
Dbfa_svc.c455 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q); in claim_fcxps_mem()
458 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q); in claim_fcxps_mem()
536 struct list_head *qe, *qen; in bfa_fcxp_iocdisable() local
542 list_for_each_safe(qe, qen, &mod->fcxp_active_q) { in bfa_fcxp_iocdisable()
543 fcxp = (struct bfa_fcxp_s *) qe; in bfa_fcxp_iocdisable()
567 list_add_tail(&fcxp->qe, &fm->fcxp_active_q); in bfa_fcxp_get()
661 list_del(&fcxp->qe); in bfa_fcxp_put()
664 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q); in bfa_fcxp_put()
666 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q); in bfa_fcxp_put()
1109 list_add_tail(&wqe->qe, &mod->req_wait_q); in bfa_fcxp_req_rsp_alloc_wait()
[all …]
Dbfa_fcs_lport.c198 struct list_head *qe, *qen; in bfa_fcs_lport_sm_online() local
221 list_for_each_safe(qe, qen, &port->rport_q) { in bfa_fcs_lport_sm_online()
222 rport = (struct bfa_fcs_rport_s *) qe; in bfa_fcs_lport_sm_online()
237 list_for_each_safe(qe, qen, &port->rport_q) { in bfa_fcs_lport_sm_online()
238 rport = (struct bfa_fcs_rport_s *) qe; in bfa_fcs_lport_sm_online()
258 struct list_head *qe, *qen; in bfa_fcs_lport_sm_offline() local
279 list_for_each_safe(qe, qen, &port->rport_q) { in bfa_fcs_lport_sm_offline()
280 rport = (struct bfa_fcs_rport_s *) qe; in bfa_fcs_lport_sm_offline()
292 list_for_each_safe(qe, qen, &port->rport_q) { in bfa_fcs_lport_sm_offline()
293 rport = (struct bfa_fcs_rport_s *) qe; in bfa_fcs_lport_sm_offline()
[all …]
Dbfa.h118 list_add_tail(&(__wqe)->qe, waitq); \
121 #define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
127 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
130 #define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe)
136 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
143 list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
442 bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
Dbfa_ioc.h40 struct list_head qe; member
92 struct list_head qe; /* Queue of DMA elements */ member
102 struct list_head qe; /* Queue of KVA elements */ member
121 list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe); in bfa_mem_dma_setup()
130 list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe); in bfa_mem_kva_setup()
237 struct list_head qe; member
280 struct list_head qe; member
367 struct list_head qe; member
378 struct list_head qe; member
Dbfa_svc.h39 struct list_head qe; /* queue sg page element */ member
48 struct list_head qe; /* queue sg page element */ member
164 struct list_head qe; /* fcxp queue element */ member
208 struct list_head qe; member
299 struct list_head qe; /* queue element */ member
327 struct list_head qe; /* queue element */ member
392 struct list_head qe; /* queue element */ member
Dbfa_fcpim.h34 struct list_head qe; /* queue element */ member
169 struct list_head qe; /* queue elememt */ member
201 struct list_head qe; member
223 struct list_head qe; /* queue element */ member
Dbfa_core.c702 struct list_head *waitq, *qe, *qen; in bfa_reqq_resume() local
706 list_for_each_safe(qe, qen, waitq) { in bfa_reqq_resume()
713 list_del(qe); in bfa_reqq_resume()
714 wqe = (struct bfa_reqq_wait_s *) qe; in bfa_reqq_resume()
1782 INIT_LIST_HEAD(&meminfo->dma_info.qe); in bfa_cfg_get_meminfo()
1783 INIT_LIST_HEAD(&meminfo->kva_info.qe); in bfa_cfg_get_meminfo()
1855 list_for_each(dm_qe, &dma_info->qe) { in bfa_attach()
1861 list_for_each(km_qe, &kva_info->qe) { in bfa_attach()
1915 struct list_head *qe; in bfa_comp_process() local
1920 list_for_each_safe(qe, qen, comp_q) { in bfa_comp_process()
[all …]
Dbfa_cs.h165 bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) in bfa_q_is_on_q_func() argument
171 if (tqe == qe) in bfa_q_is_on_q_func()
/drivers/soc/fsl/
DMakefile6 obj-$(CONFIG_QUICC_ENGINE) += qe/
7 obj-$(CONFIG_CPM) += qe/
/drivers/nvme/host/
Drdma.c176 static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, in nvme_rdma_free_qe() argument
179 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir); in nvme_rdma_free_qe()
180 kfree(qe->data); in nvme_rdma_free_qe()
183 static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, in nvme_rdma_alloc_qe() argument
186 qe->data = kzalloc(capsule_size, GFP_KERNEL); in nvme_rdma_alloc_qe()
187 if (!qe->data) in nvme_rdma_alloc_qe()
190 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); in nvme_rdma_alloc_qe()
191 if (ib_dma_mapping_error(ibdev, qe->dma)) { in nvme_rdma_alloc_qe()
192 kfree(qe->data); in nvme_rdma_alloc_qe()
1220 struct nvme_rdma_qe *qe = in nvme_rdma_send_done() local
[all …]

12