/kernel/linux/linux-5.10/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_eqs.c | 27 #define GET_EQ_NUM_PAGES(eq, pg_size) \ argument 28 (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size)) 30 #define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size) argument 32 #define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ argument 33 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ 34 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) 36 #define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ argument 37 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ 38 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) 40 #define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ argument [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
D | eq.c | 112 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) in mlx5_eq_cq_get() argument 114 struct mlx5_cq_table *table = &eq->cq_table; in mlx5_eq_cq_get() 132 struct mlx5_eq *eq = &eq_comp->core; in mlx5_eq_comp_int() local 137 eqe = next_eqe_sw(eq); in mlx5_eq_comp_int() 151 cq = mlx5_eq_cq_get(eq, cqn); in mlx5_eq_comp_int() 157 dev_dbg_ratelimited(eq->dev->device, in mlx5_eq_comp_int() 161 ++eq->cons_index; in mlx5_eq_comp_int() 163 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq))); in mlx5_eq_comp_int() 166 eq_update_ci(eq, 1); in mlx5_eq_comp_int() 179 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq) in mlx5_eq_poll_irq_disabled() argument [all …]
|
D | cq.c | 94 struct mlx5_eq_comp *eq; in mlx5_core_create_cq() local 97 eq = mlx5_eqn2comp_eq(dev, eqn); in mlx5_core_create_cq() 98 if (IS_ERR(eq)) in mlx5_core_create_cq() 99 return PTR_ERR(eq); in mlx5_core_create_cq() 110 cq->eq = eq; in mlx5_core_create_cq() 117 cq->tasklet_ctx.priv = &eq->tasklet_ctx; in mlx5_core_create_cq() 121 err = mlx5_eq_add_cq(&eq->core, cq); in mlx5_core_create_cq() 137 cq->irqn = eq->core.irqn; in mlx5_core_create_cq() 142 mlx5_eq_del_cq(&eq->core, cq); in mlx5_core_create_cq() 160 mlx5_eq_del_cq(&cq->eq->core, cq); in mlx5_core_destroy_cq()
|
/kernel/linux/linux-5.10/sound/pci/au88x0/ |
D | au88x0_eq.c | 56 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetLeftCoefs() 78 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetRightCoefs() 101 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetLeftStates() 118 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetRightStates() 164 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetBypassGain() 211 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetLeftGainsTarget() 221 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetRightGainsTarget() 231 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetLeftGainsCurrent() 241 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetRightGainsCurrent() 252 eqhw_t *eqhw = &(vortex->eq.this04); [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/ |
D | mthca_eq.c | 173 static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) in tavor_set_eq_ci() argument 184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), in tavor_set_eq_ci() 189 static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) in arbel_set_eq_ci() argument 194 dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); in arbel_set_eq_ci() 199 static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) in set_eq_ci() argument 202 arbel_set_eq_ci(dev, eq, ci); in set_eq_ci() 204 tavor_set_eq_ci(dev, eq, ci); in set_eq_ci() 228 static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry) in get_eqe() argument 230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; in get_eqe() 231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; in get_eqe() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
D | eq.c | 97 static void eq_set_ci(struct mlx4_eq *eq, int req_not) in eq_set_ci() argument 99 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | in eq_set_ci() 101 eq->doorbell); in eq_set_ci() 106 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, in get_eqe() argument 110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; in get_eqe() 118 …return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % … in get_eqe() 121 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) in next_eqe_sw() argument 123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); in next_eqe_sw() 124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw() 241 struct mlx4_eq *eq = &priv->eq_table.eq[vec]; in mlx4_set_eq_affinity_hint() local [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
D | eq.h | 50 static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) in get_eqe() argument 52 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); in get_eqe() 55 static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) in next_eqe_sw() argument 57 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); in next_eqe_sw() 59 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; in next_eqe_sw() 62 static inline void eq_update_ci(struct mlx5_eq *eq, int arm) in eq_update_ci() argument 64 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); in eq_update_ci() 65 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); in eq_update_ci() 77 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); 78 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/kernel/ |
D | cpu_setup_6xx.S | 191 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 192 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq 342 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq 344 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq 345 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq 346 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 347 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq 348 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq 413 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq 415 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq [all …]
|
/kernel/linux/linux-5.10/drivers/pci/controller/ |
D | pcie-iproc-msi.c | 64 unsigned int eq; member 130 unsigned int eq) in iproc_msi_read_reg() argument 134 return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); in iproc_msi_read_reg() 139 int eq, u32 val) in iproc_msi_write_reg() argument 143 writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); in iproc_msi_write_reg() 160 static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) in iproc_msi_eq_offset() argument 163 return eq * EQ_MEM_REGION_SIZE; in iproc_msi_eq_offset() 165 return eq * EQ_LEN * sizeof(u32); in iproc_msi_eq_offset() 303 static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) in decode_msi_hwirq() argument 309 offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); in decode_msi_hwirq() [all …]
|
/kernel/linux/linux-5.10/include/linux/mlx5/ |
D | eq.h | 24 mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 25 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 27 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 30 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); 31 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); 41 static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc) in mlx5_eq_update_cc() argument 44 mlx5_eq_update_ci(eq, cc, 0); in mlx5_eq_update_cc()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 236 struct ehea_eq *eq; in ehea_create_eq() local 238 eq = kzalloc(sizeof(*eq), GFP_KERNEL); in ehea_create_eq() 239 if (!eq) in ehea_create_eq() 242 eq->adapter = adapter; in ehea_create_eq() 243 eq->attr.type = type; in ehea_create_eq() 244 eq->attr.max_nr_of_eqes = max_nr_of_eqes; in ehea_create_eq() 245 eq->attr.eqe_gen = eqe_gen; in ehea_create_eq() 246 spin_lock_init(&eq->spinlock); in ehea_create_eq() 249 &eq->attr, &eq->fw_handle); in ehea_create_eq() 255 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, in ehea_create_eq() [all …]
|
/kernel/linux/linux-5.10/arch/hexagon/lib/ |
D | memset.S | 29 p0 = cmp.eq(r2, #0) 59 p1 = cmp.eq(r2, #1) 72 p1 = cmp.eq(r2, #2) 85 p1 = cmp.eq(r2, #4) 98 p1 = cmp.eq(r3, #1) 114 p1 = cmp.eq(r2, #8) 125 p1 = cmp.eq(r2, #4) 136 p1 = cmp.eq(r2, #2) 180 p1 = cmp.eq(r2, #1) 196 p0 = cmp.eq(r2, #2) [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/kernel/vdso64/ |
D | gettimeofday.S | 63 cror cr0*4+eq,cr0*4+eq,cr1*4+eq 67 cror cr5*4+eq,cr5*4+eq,cr6*4+eq 69 cror cr0*4+eq,cr0*4+eq,cr5*4+eq 187 cror cr0*4+eq,cr0*4+eq,cr1*4+eq
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | health.c | 108 int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg) in mlx5e_health_eq_diag_fmsg() argument 116 err = devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn); in mlx5e_health_eq_diag_fmsg() 120 err = devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn); in mlx5e_health_eq_diag_fmsg() 124 err = devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx); in mlx5e_health_eq_diag_fmsg() 128 err = devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index); in mlx5e_health_eq_diag_fmsg() 132 err = devlink_fmsg_u32_pair_put(fmsg, "size", eq->core.nent); in mlx5e_health_eq_diag_fmsg() 209 int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel) in mlx5e_health_channel_eq_recover() argument 214 eq->core.eqn, eq->core.cons_index, eq->core.irqn); in mlx5e_health_channel_eq_recover() 216 eqe_count = mlx5_eq_poll_irq_disabled(eq); in mlx5e_health_channel_eq_recover() 221 eqe_count, eq->core.eqn); in mlx5e_health_channel_eq_recover()
|
/kernel/linux/linux-5.10/drivers/clk/spear/ |
D | spear1340_clock.c | 267 {.xscale = 5, .yscale = 122, .eq = 0}, 269 {.xscale = 10, .yscale = 204, .eq = 0}, 271 {.xscale = 4, .yscale = 25, .eq = 0}, 273 {.xscale = 4, .yscale = 21, .eq = 0}, 275 {.xscale = 5, .yscale = 18, .eq = 0}, 277 {.xscale = 2, .yscale = 6, .eq = 0}, 279 {.xscale = 5, .yscale = 12, .eq = 0}, 281 {.xscale = 2, .yscale = 4, .eq = 0}, 283 {.xscale = 5, .yscale = 18, .eq = 1}, 285 {.xscale = 1, .yscale = 3, .eq = 1}, [all …]
|
/kernel/linux/linux-5.10/arch/arm64/lib/ |
D | crc32.S | 34 csel x3, x3, x4, eq 35 csel w0, w0, w8, eq 39 csel x3, x3, x4, eq 40 csel w0, w0, w8, eq 44 csel w3, w3, w4, eq 45 csel w0, w0, w8, eq 48 csel w0, w0, w8, eq 52 csel w0, w0, w8, eq
|
D | strncmp.S | 86 ccmp endloop, #0, #0, eq 87 b.eq .Lloop_aligned 94 b.eq .Lnot_limit 165 b.eq .Ltinycmp 169 b.eq .Lstart_align /*the last bytes are equal....*/ 179 b.eq .Lrecal_offset 194 ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/ 216 csinv endloop, diff, xzr, eq 228 ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/ 229 b.eq .Lloopcmp_proc [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
D | odp.c | 77 struct mlx5_ib_pf_eq *eq; member 1454 struct mlx5_ib_pf_eq *eq = pfault->eq; in mlx5_ib_eqe_pf_action() local 1456 mlx5_ib_pfault(eq->dev, pfault); in mlx5_ib_eqe_pf_action() 1457 mempool_free(pfault, eq->pool); in mlx5_ib_eqe_pf_action() 1460 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) in mlx5_ib_eq_pf_process() argument 1467 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) { in mlx5_ib_eq_pf_process() 1468 pfault = mempool_alloc(eq->pool, GFP_ATOMIC); in mlx5_ib_eq_pf_process() 1470 schedule_work(&eq->work); in mlx5_ib_eq_pf_process() 1478 mlx5_ib_dbg(eq->dev, in mlx5_ib_eq_pf_process() 1498 mlx5_ib_dbg(eq->dev, in mlx5_ib_eq_pf_process() [all …]
|
/kernel/linux/linux-5.10/arch/ia64/lib/ |
D | strlen.S | 104 cmp.eq p6,p0=r0,r0 // sets p6 to true for cmp.and 119 cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8 120 cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8 130 cmp.eq p8,p9=8,val1 // p6 = val1 had zero (disambiguate) 137 cmp.eq.and p7,p0=8,val1// val1==8? 174 cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop 184 cmp.eq p6,p0=8,val1 // val1==8 ?
|
/kernel/linux/linux-5.10/drivers/misc/habanalabs/common/ |
D | irq.c | 149 struct hl_eq *eq = arg; in hl_irq_handler_eq() local 150 struct hl_device *hdev = eq->hdev; in hl_irq_handler_eq() 155 eq_base = eq->kernel_address; in hl_irq_handler_eq() 159 ((le32_to_cpu(eq_base[eq->ci].hdr.ctl) & in hl_irq_handler_eq() 165 eq_entry = &eq_base[eq->ci]; in hl_irq_handler_eq() 196 eq->ci = hl_eq_inc_ptr(eq->ci); in hl_irq_handler_eq() 198 hdev->asic_funcs->update_eq_ci(hdev, eq->ci); in hl_irq_handler_eq()
|
/kernel/linux/linux-5.10/arch/hexagon/mm/ |
D | strnlen_user.S | 39 P0 = cmp.eq(mod8,#0); 50 P0 = cmp.eq(tmp1,#0); 57 P0 = cmp.eq(mod8,#0); 71 P0 = vcmpb.eq(dbuf,dcmp); 83 P0 = cmp.eq(tmp1,#32);
|
/kernel/linux/linux-5.10/net/dns_resolver/ |
D | dns_key.c | 150 const char *eq; in dns_resolver_preparse() local 161 eq = memchr(opt, '=', opt_len); in dns_resolver_preparse() 162 if (eq) { in dns_resolver_preparse() 163 opt_nlen = eq - opt; in dns_resolver_preparse() 164 eq++; in dns_resolver_preparse() 165 memcpy(optval, eq, next_opt - eq); in dns_resolver_preparse() 166 optval[next_opt - eq] = '\0'; in dns_resolver_preparse()
|
/kernel/linux/linux-5.10/arch/powerpc/kernel/vdso32/ |
D | gettimeofday.S | 70 cror cr0*4+eq,cr0*4+eq,cr1*4+eq 74 cror cr5*4+eq,cr5*4+eq,cr6*4+eq 76 cror cr0*4+eq,cr0*4+eq,cr5*4+eq
|
/kernel/linux/linux-5.10/drivers/firmware/broadcom/ |
D | bcm47xx_nvram.c | 167 char *var, *value, *end, *eq; in bcm47xx_nvram_getenv() local 183 eq = strchr(var, '='); in bcm47xx_nvram_getenv() 184 if (!eq) in bcm47xx_nvram_getenv() 186 value = eq + 1; in bcm47xx_nvram_getenv() 187 if (eq - var == strlen(name) && in bcm47xx_nvram_getenv() 188 strncmp(var, name, eq - var) == 0) in bcm47xx_nvram_getenv()
|
/kernel/linux/linux-5.10/arch/arc/lib/ |
D | strlen.S | 21 mov.eq r7,r4 24 or.eq r12,r12,r1 38 or.eq r12,r12,r1 57 mov.eq r1,r12 69 mov.eq r2,r6
|