Home
last modified time | relevance | path

Searched refs:eqn (Results 1 – 25 of 27) sorted by relevance

12

/drivers/infiniband/hw/mthca/
Dmthca_eq.c184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), in tavor_set_eq_ci()
194 dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); in arbel_set_eq_ci()
207 static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) in tavor_eq_req_not() argument
209 mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0, in tavor_eq_req_not()
219 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) in disarm_cq() argument
222 mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn, in disarm_cq()
277 disarm_cq(dev, eq->eqn, disarm_cqn); in mthca_eq_int()
349 mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); in mthca_eq_int()
358 eqe->type, eqe->subtype, eq->eqn); in mthca_eq_int()
412 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); in mthca_tavor_interrupt()
[all …]
Dmthca_provider.h89 int eqn; member
Dmthca_cq.c833 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); in mthca_init_cq()
834 cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn); in mthca_init_cq()
/drivers/net/ethernet/mellanox/mlx5/core/
Deq.c79 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) in mlx5_cmd_destroy_eq() argument
84 MLX5_SET(destroy_eq_in, in, eq_number, eqn); in mlx5_cmd_destroy_eq()
330 eq->eqn = MLX5_GET(create_eq_out, out, eq_number); in create_map_eq()
343 mlx5_cmd_destroy_eq(dev, eq->eqn); in create_map_eq()
399 err = mlx5_cmd_destroy_eq(dev, eq->eqn); in destroy_unmap_eq()
402 eq->eqn); in destroy_unmap_eq()
432 eq->eqn, cq->cqn); in mlx5_eq_del_cq()
438 eq->eqn, cq->cqn); in mlx5_eq_del_cq()
772 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); in mlx5_eq_update_ci()
790 eq->core.eqn); in destroy_comp_eqs()
[all …]
Dcq.c92 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), in mlx5_core_create_cq() local
98 eq = mlx5_eqn2comp_eq(dev, eqn); in mlx5_core_create_cq()
Ddebugfs.c297 MLX5_SET(query_eq_in, in, eq_number, eq->eqn); in eq_read_field()
472 &eq->dbg, eq->eqn, eq_fields, in mlx5_debug_eq_add()
/drivers/net/ethernet/mellanox/mlx4/
Deq.c543 eq->eqn, eq->cons_index, ret); in mlx4_eq_int()
560 eq->eqn); in mlx4_eq_int()
573 eq->eqn, eq->cons_index, ret); in mlx4_eq_int()
697 eq->eqn, eq->cons_index, ret); in mlx4_eq_int()
713 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); in mlx4_eq_int()
781 eqe->type, eqe->subtype, eq->eqn, in mlx4_eq_int()
806 eqe->type, eqe->subtype, eq->eqn, in mlx4_eq_int()
818 eqe->type, eqe->subtype, eq->eqn, in mlx4_eq_int()
884 u32 eqn = in_modifier & 0x3FF; in mlx4_MAP_EQ_wrapper() local
890 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, in mlx4_MAP_EQ_wrapper()
[all …]
Dresource_tracker.c3100 int eqn = vhcr->in_modifier; in mlx4_SW2HW_EQ_wrapper() local
3101 int res_id = (slave << 10) | eqn; in mlx4_SW2HW_EQ_wrapper()
3307 int eqn = vhcr->in_modifier; in mlx4_HW2SW_EQ_wrapper() local
3308 int res_id = eqn | (slave << 10); in mlx4_HW2SW_EQ_wrapper()
3361 if (event_eq->eqn < 0) in mlx4_GEN_EQE()
3365 res_id = (slave << 10) | event_eq->eqn; in mlx4_GEN_EQE()
3388 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16); in mlx4_GEN_EQE()
3413 int eqn = vhcr->in_modifier; in mlx4_QUERY_EQ_wrapper() local
3414 int res_id = eqn | (slave << 10); in mlx4_QUERY_EQ_wrapper()
5126 int eqn; in rem_slave_eqs() local
[all …]
Dmlx4.h394 int eqn; member
414 int eqn; member
566 u32 eqn; member
Dcq.c383 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; in mlx4_cq_alloc()
Dmain.c2905 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) in mlx4_init_affinity_hint() argument
2913 if (eqn > dev->caps.num_comp_vectors) in mlx4_init_affinity_hint()
2919 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); in mlx4_init_affinity_hint()
2925 eq = &priv->eq_table.eq[eqn]; in mlx4_init_affinity_hint()
Dcmd.c2119 slave_state[slave].event_eq[i].eqn = -1; in mlx4_master_do_cmd()
2411 s_state->event_eq[j].eqn = -1; in mlx4_multi_func_init()
/drivers/net/ethernet/mellanox/mlx5/core/lib/
Deq.h33 u8 eqn; member
71 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); in eq_update_ci()
85 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
/drivers/clk/spear/
Dclk-aux-synth.c69 unsigned int num = 1, den = 1, val, eqn; in clk_aux_recalc_rate() local
80 eqn = (val >> aux->masks->eq_sel_shift) & aux->masks->eq_sel_mask; in clk_aux_recalc_rate()
81 if (eqn == aux->masks->eq1_mask) in clk_aux_recalc_rate()
/drivers/net/ethernet/mellanox/mlx5/core/en/
Dhealth.c115 err = devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn); in mlx5e_health_eq_diag_fmsg()
212 eq->core.eqn, eq->core.cons_index, eq->core.irqn); in mlx5e_health_channel_eq_recover()
219 eqe_count, eq->core.eqn); in mlx5e_health_channel_eq_recover()
/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_send.c764 int inlen, err, eqn; in dr_create_cq() local
797 err = mlx5_vector2eqn(mdev, vector, &eqn); in dr_create_cq()
805 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in dr_create_cq()
/drivers/net/ethernet/mellanox/mlx5/core/fpga/
Dconn.c419 int inlen, err, eqn; in mlx5_fpga_conn_create_cq() local
448 err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn); in mlx5_fpga_conn_create_cq()
456 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in mlx5_fpga_conn_create_cq()
/drivers/infiniband/hw/mlx5/
Dcq.c948 int eqn; in mlx5_ib_create_cq() local
986 err = mlx5_vector2eqn(dev->mdev, vector, &eqn); in mlx5_ib_create_cq()
999 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in mlx5_ib_create_cq()
/drivers/infiniband/hw/ocrdma/
Docrdma.h331 u16 eqn; member
Docrdma_hw.c541 cmd->eqn = eq->id; in ocrdma_mbx_mq_cq_create()
1830 cq->eqn = ocrdma_bind_eq(dev); in ocrdma_mbx_create_cq()
1855 cmd->cmd.eqn = cq->eqn; in ocrdma_mbx_create_cq()
1881 ocrdma_unbind_eq(dev, cq->eqn); in ocrdma_mbx_create_cq()
1903 ocrdma_unbind_eq(dev, cq->eqn); in ocrdma_mbx_destroy_cq()
Docrdma_sli.h794 u32 eqn; member
/drivers/infiniband/hw/hns/
Dhns_roce_hw_v2.c5647 hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn); in update_eq_db()
5730 event_type, eq->eqn, eq->cons_index); in hns_roce_v2_aeq_int()
5868 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) in hns_roce_v2_destroy_eqc() argument
5873 if (eqn < hr_dev->caps.num_comp_vectors) in hns_roce_v2_destroy_eqc()
5874 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, in hns_roce_v2_destroy_eqc()
5878 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, in hns_roce_v2_destroy_eqc()
5882 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); in hns_roce_v2_destroy_eqc()
5926 hr_reg_write(eqc, EQC_EQN, eq->eqn); in config_eqc()
6005 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, in hns_roce_v2_create_eq()
6141 eq->eqn = i; in hns_roce_v2_init_eq_table()
Dhns_roce_device.h715 int eqn; member
Dhns_roce_hw_v1.c3925 event_type, eq->eqn, eq->cons_index); in hns_roce_v1_aeq_int()
4189 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn]; in hns_roce_v1_create_eq()
4298 eq->eqn = i; in hns_roce_v1_init_eq_table()
/drivers/vdpa/mlx5/net/
Dmlx5_vnet.c583 int eqn; in cq_create() local
617 err = mlx5_vector2eqn(mdev, 0, &eqn); in cq_create()
624 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); in cq_create()

12