/kernel/linux/linux-5.10/net/sunrpc/xprtrdma/ |
D | backchannel.c | 60 static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) in rpcrdma_bc_marshal_reply() argument 62 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); in rpcrdma_bc_marshal_reply() 63 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); in rpcrdma_bc_marshal_reply() 68 rdmab_data(req->rl_rdmabuf), rqst); in rpcrdma_bc_marshal_reply() 73 *p++ = rqst->rq_xid; in rpcrdma_bc_marshal_reply() 82 &rqst->rq_snd_buf, rpcrdma_noch_pullup)) in rpcrdma_bc_marshal_reply() 85 trace_xprtrdma_cb_reply(rqst); in rpcrdma_bc_marshal_reply() 101 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst) in xprt_rdma_bc_send_reply() argument 103 struct rpc_xprt *xprt = rqst->rq_xprt; in xprt_rdma_bc_send_reply() 105 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); in xprt_rdma_bc_send_reply() [all …]
|
D | svc_rdma_backchannel.c | 76 struct rpc_rqst *rqst, in svc_rdma_bc_sendto() argument 81 ret = svc_rdma_map_reply_msg(rdma, ctxt, NULL, &rqst->rq_snd_buf); in svc_rdma_bc_sendto() 88 get_page(virt_to_page(rqst->rq_buffer)); in svc_rdma_bc_sendto() 100 struct rpc_rqst *rqst = task->tk_rqstp; in xprt_rdma_bc_allocate() local 101 size_t size = rqst->rq_callsize; in xprt_rdma_bc_allocate() 113 rqst->rq_buffer = page_address(page); in xprt_rdma_bc_allocate() 115 rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP); in xprt_rdma_bc_allocate() 116 if (!rqst->rq_rbuffer) { in xprt_rdma_bc_allocate() 126 struct rpc_rqst *rqst = task->tk_rqstp; in xprt_rdma_bc_free() local 128 put_page(virt_to_page(rqst->rq_buffer)); in xprt_rdma_bc_free() [all …]
|
D | rpc_rdma.c | 131 struct rpc_rqst *rqst) in rpcrdma_args_inline() argument 133 struct xdr_buf *xdr = &rqst->rq_snd_buf; in rpcrdma_args_inline() 163 struct rpc_rqst *rqst) in rpcrdma_results_inline() argument 165 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv; in rpcrdma_results_inline() 174 const struct rpc_rqst *rqst) in rpcrdma_nonpayload_inline() argument 176 const struct xdr_buf *buf = &rqst->rq_rcv_buf; in rpcrdma_nonpayload_inline() 364 struct rpc_rqst *rqst, in rpcrdma_encode_read_list() argument 376 pos = rqst->rq_snd_buf.head[0].iov_len; in rpcrdma_encode_read_list() 380 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, in rpcrdma_encode_read_list() 393 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs); in rpcrdma_encode_read_list() [all …]
|
D | transport.c | 533 xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst) in xprt_rdma_free_slot() argument 538 rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); in xprt_rdma_free_slot() 539 if (!xprt_wake_up_backlog(xprt, rqst)) { in xprt_rdma_free_slot() 540 memset(rqst, 0, sizeof(*rqst)); in xprt_rdma_free_slot() 541 rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); in xprt_rdma_free_slot() 569 struct rpc_rqst *rqst = task->tk_rqstp; in xprt_rdma_allocate() local 570 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); in xprt_rdma_allocate() 571 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); in xprt_rdma_allocate() 580 if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize, in xprt_rdma_allocate() 583 if (!rpcrdma_check_regbuf(r_xprt, req->rl_recvbuf, rqst->rq_rcvsize, in xprt_rdma_allocate() [all …]
|
D | xprt_rdma.h | 322 rpcr_to_rdmar(const struct rpc_rqst *rqst) in rpcr_to_rdmar() argument 324 return container_of(rqst, struct rpcrdma_req, rl_slot); in rpcr_to_rdmar() 552 int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst); 583 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst);
|
/kernel/linux/linux-5.10/fs/cifs/ |
D | smb2inode.c | 41 free_set_inf_compound(struct smb_rqst *rqst) in free_set_inf_compound() argument 43 if (rqst[1].rq_iov) in free_set_inf_compound() 44 SMB2_set_info_free(&rqst[1]); in free_set_inf_compound() 45 if (rqst[2].rq_iov) in free_set_inf_compound() 46 SMB2_close_free(&rqst[2]); in free_set_inf_compound() 53 struct smb_rqst rqst[3]; member 71 struct smb_rqst *rqst; in smb2_compound_op() local 90 rqst = &vars->rqst[0]; in smb2_compound_op() 120 rqst[num_rqst].rq_iov = &vars->open_iov[0]; in smb2_compound_op() 121 rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE; in smb2_compound_op() [all …]
|
D | transport.c | 268 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst) in smb_rqst_len() argument 276 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) { in smb_rqst_len() 277 iov = &rqst->rq_iov[1]; in smb_rqst_len() 278 nvec = rqst->rq_nvec - 1; in smb_rqst_len() 280 iov = rqst->rq_iov; in smb_rqst_len() 281 nvec = rqst->rq_nvec; in smb_rqst_len() 294 if (rqst->rq_npages) { in smb_rqst_len() 295 if (rqst->rq_npages == 1) in smb_rqst_len() 296 buflen += rqst->rq_tailsz; in smb_rqst_len() 302 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) - in smb_rqst_len() [all …]
|
D | smb2pdu.c | 786 struct smb_rqst rqst; in SMB2_negotiate() local 863 memset(&rqst, 0, sizeof(struct smb_rqst)); in SMB2_negotiate() 864 rqst.rq_iov = iov; in SMB2_negotiate() 865 rqst.rq_nvec = 1; in SMB2_negotiate() 868 &rqst, &resp_buftype, flags, &rsp_iov); in SMB2_negotiate() 1253 struct smb_rqst rqst; in SMB2_sess_sendreceive() local 1262 memset(&rqst, 0, sizeof(struct smb_rqst)); in SMB2_sess_sendreceive() 1263 rqst.rq_iov = sess_data->iov; in SMB2_sess_sendreceive() 1264 rqst.rq_nvec = 2; in SMB2_sess_sendreceive() 1269 &rqst, in SMB2_sess_sendreceive() [all …]
|
D | smb2proto.h | 50 struct smb_rqst *rqst); 52 struct TCP_Server_Info *server, struct smb_rqst *rqst); 57 extern int smb2_calc_signature(struct smb_rqst *rqst, 60 extern int smb3_calc_signature(struct smb_rqst *rqst, 128 struct smb_rqst *rqst); 130 struct smb_rqst *rqst); 131 extern void smb2_set_related(struct smb_rqst *rqst); 152 struct smb_rqst *rqst, 155 extern void SMB2_open_free(struct smb_rqst *rqst); 162 struct smb_rqst *rqst, [all …]
|
D | smb2ops.c | 682 struct smb_rqst rqst[2]; in open_shroot() local 718 memset(rqst, 0, sizeof(rqst)); in open_shroot() 724 rqst[0].rq_iov = open_iov; in open_shroot() 725 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; in open_shroot() 735 &rqst[0], &oplock, &oparms, &utf16_path); in open_shroot() 738 smb2_set_next_command(tcon, &rqst[0]); in open_shroot() 741 rqst[1].rq_iov = qi_iov; in open_shroot() 742 rqst[1].rq_nvec = 1; in open_shroot() 745 &rqst[1], COMPOUND_FID, in open_shroot() 753 smb2_set_related(&rqst[1]); in open_shroot() [all …]
|
D | cifsencrypt.c | 40 int __cifs_calc_signature(struct smb_rqst *rqst, in __cifs_calc_signature() argument 46 struct kvec *iov = rqst->rq_iov; in __cifs_calc_signature() 47 int n_vec = rqst->rq_nvec; in __cifs_calc_signature() 79 for (i = 0; i < rqst->rq_npages; i++) { in __cifs_calc_signature() 83 rqst_page_get_length(rqst, i, &len, &offset); in __cifs_calc_signature() 85 kaddr = (char *) kmap(rqst->rq_pages[i]) + offset; in __cifs_calc_signature() 91 kunmap(rqst->rq_pages[i]); in __cifs_calc_signature() 95 kunmap(rqst->rq_pages[i]); in __cifs_calc_signature() 112 static int cifs_calc_signature(struct smb_rqst *rqst, in cifs_calc_signature() argument 117 if (!rqst->rq_iov || !signature || !server) in cifs_calc_signature() [all …]
|
D | smb2transport.c | 214 smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, in smb2_calc_signature() argument 220 struct kvec *iov = rqst->rq_iov; in smb2_calc_signature() 272 drqst = *rqst; in smb2_calc_signature() 533 smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, in smb3_calc_signature() argument 539 struct kvec *iov = rqst->rq_iov; in smb3_calc_signature() 589 drqst = *rqst; in smb3_calc_signature() 614 smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server) in smb2_sign_rqst() argument 622 shdr = (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base; in smb2_sign_rqst() 638 rc = server->ops->calc_signature(rqst, server, false); in smb2_sign_rqst() 644 smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) in smb2_verify_signature() argument [all …]
|
D | misc.c | 977 void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page, in rqst_page_get_length() argument 980 *len = rqst->rq_pagesz; in rqst_page_get_length() 981 *offset = (page == 0) ? rqst->rq_offset : 0; in rqst_page_get_length() 983 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1) in rqst_page_get_length() 984 *len = rqst->rq_tailsz; in rqst_page_get_length() 986 *len = rqst->rq_pagesz - rqst->rq_offset; in rqst_page_get_length()
|
D | cifsproto.h | 95 struct smb_rqst *rqst, 102 struct smb_rqst *rqst, int *resp_buf_type, 107 struct smb_rqst *rqst, int *resp_buf_type, 488 extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server, 493 extern int cifs_verify_signature(struct smb_rqst *rqst, 591 int __cifs_calc_signature(struct smb_rqst *rqst, 605 void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
|
D | smbdirect.h | 273 int num_rqst, struct smb_rqst *rqst); 313 static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {r… in smbd_send() argument
|
D | smbdirect.c | 1987 struct smb_rqst *rqst; in smbd_send() local 2016 rqst = &rqst_array[rqst_idx]; in smbd_send() 2017 iov = rqst->rq_iov; in smbd_send() 2020 rqst_idx, smb_rqst_len(server, rqst)); in smbd_send() 2021 for (i = 0; i < rqst->rq_nvec; i++) in smbd_send() 2026 rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz, in smbd_send() 2027 rqst->rq_tailsz, smb_rqst_len(server, rqst)); in smbd_send() 2072 if (i == rqst->rq_nvec) in smbd_send() 2079 if (i == rqst->rq_nvec) { in smbd_send() 2096 for (i = 0; i < rqst->rq_npages; i++) { in smbd_send() [all …]
|
/kernel/linux/linux-5.10/drivers/nvme/host/ |
D | fc.h | 42 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); in nvme_fc_format_rsp_hdr() 43 acc->rqst.desc_len = in nvme_fc_format_rsp_hdr() 45 acc->rqst.w0.ls_cmd = rqst_ls_cmd; in nvme_fc_format_rsp_hdr() 194 struct fcnvme_ls_disconnect_assoc_rqst *rqst) in nvmefc_vldt_lsreq_discon_assoc() argument 200 else if (rqst->desc_list_len != in nvmefc_vldt_lsreq_discon_assoc() 204 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) in nvmefc_vldt_lsreq_discon_assoc() 206 else if (rqst->associd.desc_len != in nvmefc_vldt_lsreq_discon_assoc() 210 else if (rqst->discon_cmd.desc_tag != in nvmefc_vldt_lsreq_discon_assoc() 213 else if (rqst->discon_cmd.desc_len != in nvmefc_vldt_lsreq_discon_assoc() 221 else if (rqst->discon_cmd.rsvd8[0]) in nvmefc_vldt_lsreq_discon_assoc()
|
/kernel/linux/linux-5.10/include/trace/events/ |
D | sunrpc.h | 659 __string(progname, xdr->rqst ? 660 xdr->rqst->rq_task->tk_client->cl_program->name : "unknown") 661 __string(procedure, xdr->rqst ? 662 xdr->rqst->rq_task->tk_msg.rpc_proc->p_name : "unknown") 666 if (xdr->rqst) { 667 const struct rpc_task *task = xdr->rqst->rq_task; 727 xdr->rqst->rq_task->tk_client->cl_program->name) 729 xdr->rqst->rq_task->tk_msg.rpc_proc->p_name) 733 const struct rpc_task *task = xdr->rqst->rq_task; 922 const struct rpc_rqst *rqst, [all …]
|
D | rpcrdma.h | 352 const struct rpc_rqst *rqst 355 TP_ARGS(rqst), 358 __field(const void *, rqst) 365 __entry->rqst = rqst; 366 __entry->req = rpcr_to_rdmar(rqst); 367 __entry->rep = rpcr_to_rdmar(rqst)->rl_reply; 368 __entry->xid = be32_to_cpu(rqst->rq_xid); 372 __entry->xid, __entry->rqst, __entry->req, __entry->rep 379 const struct rpc_rqst *rqst \ 381 TP_ARGS(rqst)) [all …]
|
/kernel/linux/linux-5.10/include/linux/sunrpc/ |
D | svc.h | 306 #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) argument 311 static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst) in svc_addr_in() argument 313 return (struct sockaddr_in *) &rqst->rq_addr; in svc_addr_in() 316 static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst) in svc_addr_in6() argument 318 return (struct sockaddr_in6 *) &rqst->rq_addr; in svc_addr_in6() 321 static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst) in svc_addr() argument 323 return (struct sockaddr *) &rqst->rq_addr; in svc_addr() 326 static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst) in svc_daddr_in() argument 328 return (struct sockaddr_in *) &rqst->rq_daddr; in svc_daddr_in() 331 static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst) in svc_daddr_in6() argument [all …]
|
D | xdr.h | 222 struct rpc_rqst *rqst; /* For debugging */ member 234 __be32 *p, struct rpc_rqst *rqst); 246 __be32 *p, struct rpc_rqst *rqst);
|
/kernel/linux/linux-5.10/drivers/nvme/target/ |
D | fc.c | 1638 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; in nvmet_fc_ls_create_association() local 1655 else if (be32_to_cpu(rqst->desc_list_len) < in nvmet_fc_ls_create_association() 1658 else if (rqst->assoc_cmd.desc_tag != in nvmet_fc_ls_create_association() 1661 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < in nvmet_fc_ls_create_association() 1664 else if (!rqst->assoc_cmd.ersp_ratio || in nvmet_fc_ls_create_association() 1665 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= in nvmet_fc_ls_create_association() 1666 be16_to_cpu(rqst->assoc_cmd.sqsize))) in nvmet_fc_ls_create_association() 1677 be16_to_cpu(rqst->assoc_cmd.sqsize)); in nvmet_fc_ls_create_association() 1688 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_create_association() 1694 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); in nvmet_fc_ls_create_association() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/dsi/phy/ |
D | dsi_phy_14nm.c | 22 u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst; in dsi_14nm_dphy_set_timing() local 36 DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst)); in dsi_14nm_dphy_set_timing()
|
/kernel/linux/linux-5.10/net/sunrpc/ |
D | sched.c | 1003 struct rpc_rqst *rqst = task->tk_rqstp; in rpc_malloc() local 1004 size_t size = rqst->rq_callsize + rqst->rq_rcvsize; in rpc_malloc() 1023 rqst->rq_buffer = buf->data; in rpc_malloc() 1024 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize; in rpc_malloc()
|
/kernel/linux/linux-5.10/include/linux/ |
D | nvme-fc.h | 333 struct fcnvme_lsdesc_rqst rqst; member 341 struct fcnvme_lsdesc_rqst rqst; member
|