Home
last modified time | relevance | path

Searched refs:imm (Results 1 – 25 of 26) sorted by relevance

12

/drivers/net/ethernet/netronome/nfp/bpf/
Djit.c294 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, in emit_immed() argument
305 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg); in emit_immed()
314 reg.breg, imm >> 8, width, invert, shift, in emit_immed()
590 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) in pack_immed() argument
592 if (!(imm & 0xffff0000)) { in pack_immed()
593 *val = imm; in pack_immed()
595 } else if (!(imm & 0xff0000ff)) { in pack_immed()
596 *val = imm >> 8; in pack_immed()
598 } else if (!(imm & 0x0000ffff)) { in pack_immed()
599 *val = imm >> 16; in pack_immed()
[all …]
Dverifier.c53 int imm; in nfp_record_adjust_head() local
61 imm = reg2->var_off.value; in nfp_record_adjust_head()
63 if (imm > ETH_ZLEN - ETH_HLEN) in nfp_record_adjust_head()
65 if (imm > (int)bpf->adjust_head.guaranteed_add || in nfp_record_adjust_head()
66 imm < -bpf->adjust_head.guaranteed_sub) in nfp_record_adjust_head()
74 if (meta->arg2.reg.var_off.value != imm) in nfp_record_adjust_head()
178 u32 func_id = meta->insn.imm; in nfp_bpf_check_helper_call()
315 u64 imm; in nfp_bpf_check_exit() local
329 imm = reg0->var_off.value; in nfp_bpf_check_exit()
331 imm <= TC_ACT_REDIRECT && in nfp_bpf_check_exit()
[all …]
/drivers/infiniband/sw/rdmavt/
Dtrace_cq.h123 __field(u32, imm)
134 __entry->imm = be32_to_cpu(wc->ex.imm_data);
146 __entry->imm
/drivers/crypto/chelsio/
Dchcr_crypto.h188 u16 imm; member
269 u8 imm; member
297 u16 imm; member
Dchcr_algo.c721 unsigned int imm, in create_wreq() argument
746 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm); in create_wreq()
783 temp = reqctx->imm ? roundup(wrparam->bytes, 16) : in create_cipher_wr()
837 + (reqctx->imm ? (wrparam->bytes) : 0); in create_cipher_wr()
838 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0, in create_cipher_wr()
1133 if (!reqctx->imm) { in chcr_handle_cipher_resp()
1227 reqctx->imm = (transhdr_len + IV + req->nbytes) <= in process_cipher()
1232 reqctx->imm = 0; in process_cipher()
1235 if (!reqctx->imm) { in process_cipher()
1509 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + in create_hash_wr()
[all …]
/drivers/infiniband/sw/rxe/
Drxe_hdr.h869 __be32 imm; member
876 return immdt->imm; in __immdt_imm()
879 static inline void __immdt_set_imm(void *arg, __be32 imm) in __immdt_set_imm() argument
883 immdt->imm = imm; in __immdt_set_imm()
892 static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm) in immdt_set_imm() argument
895 + rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm); in immdt_set_imm()
/drivers/net/ethernet/qlogic/qed/
Dqed_debug.c84 static u32 cond5(const u32 *r, const u32 *imm) in cond5() argument
86 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]); in cond5()
89 static u32 cond7(const u32 *r, const u32 *imm) in cond7() argument
91 return ((r[0] >> imm[0]) & imm[1]) != imm[2]; in cond7()
94 static u32 cond6(const u32 *r, const u32 *imm) in cond6() argument
96 return (r[0] & imm[0]) != imm[1]; in cond6()
99 static u32 cond9(const u32 *r, const u32 *imm) in cond9() argument
101 return ((r[0] & imm[0]) >> imm[1]) != in cond9()
102 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5])); in cond9()
105 static u32 cond10(const u32 *r, const u32 *imm) in cond10() argument
[all …]
/drivers/scsi/csiostor/
Dcsio_scsi.c207 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; in csio_scsi_init_cmd_wr() local
210 FW_SCSI_CMD_WR_IMMDLEN(imm)); in csio_scsi_init_cmd_wr()
367 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; in csio_scsi_init_read_wr() local
371 FW_SCSI_READ_WR_IMMDLEN(imm)); in csio_scsi_init_read_wr()
398 sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16)); in csio_scsi_init_read_wr()
420 uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; in csio_scsi_init_write_wr() local
424 FW_SCSI_WRITE_WR_IMMDLEN(imm)); in csio_scsi_init_write_wr()
451 sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16)); in csio_scsi_init_write_wr()
458 #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \ argument
461 ALIGN((imm), 16) + /* Immed data */ \
/drivers/infiniband/hw/mthca/
Dmthca_wqe.h61 __be32 imm; /* immediate data */ member
Dmthca_cmd.c1958 u64 imm; in mthca_MGID_HASH() local
1961 err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH, in mthca_MGID_HASH()
1964 *hash = imm; in mthca_MGID_HASH()
Dmthca_srq.c94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link()
Dmthca_qp.c1678 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_tavor_post_send()
2008 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_arbel_post_send()
/drivers/net/ethernet/mellanox/mlx4/
Dmcg.c58 u64 imm; in mlx4_QP_FLOW_STEERING_ATTACH() local
61 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, in mlx4_QP_FLOW_STEERING_ATTACH()
66 *reg_id = imm; in mlx4_QP_FLOW_STEERING_ATTACH()
110 u64 imm; in mlx4_GID_HASH() local
113 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, in mlx4_GID_HASH()
118 *hash = imm; in mlx4_GID_HASH()
Den_tx.c969 tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); in mlx4_en_xmit()
/drivers/net/ethernet/cavium/thunder/
Dnicvf_queues.c1441 struct sq_imm_subdesc *imm; in nicvf_sq_add_cqe_subdesc() local
1459 imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); in nicvf_sq_add_cqe_subdesc()
1460 memset(imm, 0, SND_QUEUE_DESC_SIZE); in nicvf_sq_add_cqe_subdesc()
1461 imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE; in nicvf_sq_add_cqe_subdesc()
1462 imm->len = 1; in nicvf_sq_add_cqe_subdesc()
/drivers/scsi/
DMakefile118 obj-$(CONFIG_SCSI_IMM) += imm.o
DKconfig811 then you should say N here and Y to "IOMEGA parallel port (imm -
826 tristate "IOMEGA parallel port (imm - newer drives)"
850 module will be called imm.
853 bool "ppa/imm option - Use slow (but safe) EPP-16"
868 bool "ppa/imm option - Assume slow parport control register"
/drivers/net/ethernet/chelsio/cxgb4/
Dcxgb4_uld.c129 rxq->stats.imm++; in uldrx_handler()
Dcxgb4.h719 unsigned long imm; /* # of immediate-data packets */ member
/drivers/infiniband/hw/mlx5/
Dqp.c4911 (*ctrl)->imm = send_ieth(wr); in __begin_wqe()
5071 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); in _mlx5_ib_post_send()
5078 ctrl->imm = cpu_to_be32(reg_wr(wr)->key); in _mlx5_ib_post_send()
5102 ctrl->imm = cpu_to_be32(reg_pi_wr.key); in _mlx5_ib_post_send()
5146 ctrl->imm = cpu_to_be32(mr->ibmr.rkey); in _mlx5_ib_post_send()
5272 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey); in _mlx5_ib_post_send()
/drivers/net/ethernet/mellanox/mlx5/core/steering/
Ddr_send.c240 wq_ctrl->imm = 0; in dr_rdma_segments()
/drivers/scsi/cxgbi/cxgb4i/
Dcxgb4i.c676 bool imm = is_ofld_imm(skb); in make_tx_data_wr() local
680 if (imm) { in make_tx_data_wr()
/drivers/net/ethernet/mellanox/mlx5/core/fpga/
Dconn.c164 ctrl->imm = 0; in mlx5_fpga_conn_post_send()
/drivers/infiniband/hw/mlx4/
Dqp.c3191 memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); in build_mlx_header()
3604 ctrl->imm = send_ieth(wr); in _mlx4_ib_post_send()
/drivers/scsi/qla2xxx/
Dqla_target.c127 struct imm_ntfy_from_isp *imm, int ha_locked);
3588 struct imm_ntfy_from_isp *imm, int ha_locked) in qlt_send_term_imm_notif() argument
3593 rc = __qlt_send_term_imm_notif(vha, imm); in qlt_send_term_imm_notif()

12