Home
last modified time | relevance | path

Searched refs:rqstp (Results 1 – 16 of 16) sorted by relevance

/net/sunrpc/xprtrdma/
Dsvc_rdma_recvfrom.c57 static void rdma_build_arg_xdr(struct svc_rqst *rqstp, in rdma_build_arg_xdr() argument
67 put_page(rqstp->rq_pages[0]); in rdma_build_arg_xdr()
68 rqstp->rq_pages[0] = page; in rdma_build_arg_xdr()
71 rqstp->rq_arg.head[0].iov_base = page_address(page); in rdma_build_arg_xdr()
72 rqstp->rq_arg.head[0].iov_len = min(byte_count, ctxt->sge[0].length); in rdma_build_arg_xdr()
73 rqstp->rq_arg.len = byte_count; in rdma_build_arg_xdr()
74 rqstp->rq_arg.buflen = byte_count; in rdma_build_arg_xdr()
77 bc = byte_count - rqstp->rq_arg.head[0].iov_len; in rdma_build_arg_xdr()
80 rqstp->rq_arg.page_len = bc; in rdma_build_arg_xdr()
81 rqstp->rq_arg.page_base = 0; in rdma_build_arg_xdr()
[all …]
Dsvc_rdma_marshal.c157 struct svc_rqst *rqstp) in svc_rdma_xdr_decode_req() argument
164 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; in svc_rdma_xdr_decode_req()
167 if (rqstp->rq_arg.len <= RPCRDMA_HDRLEN_MIN) { in svc_rdma_xdr_decode_req()
169 rqstp->rq_arg.len); in svc_rdma_xdr_decode_req()
191 rqstp->rq_arg.head[0].iov_base = va; in svc_rdma_xdr_decode_req()
193 rqstp->rq_arg.head[0].iov_len -= hdrlen; in svc_rdma_xdr_decode_req()
194 if (hdrlen > rqstp->rq_arg.len) in svc_rdma_xdr_decode_req()
203 vaend = (u32 *)((unsigned long)rmsgp + rqstp->rq_arg.len); in svc_rdma_xdr_decode_req()
214 rqstp->rq_arg.head[0].iov_base = va; in svc_rdma_xdr_decode_req()
216 rqstp->rq_arg.head[0].iov_len -= hdr_len; in svc_rdma_xdr_decode_req()
[all …]
Dsvc_rdma_sendto.c288 static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, in send_write() argument
331 dma_map_xdr(xprt, &rqstp->rq_res, xdr_off, in send_write()
380 struct svc_rqst *rqstp, in send_write_chunks() argument
383 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; in send_write_chunks()
405 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; in send_write_chunks()
425 ret = send_write(xprt, rqstp, in send_write_chunks()
445 return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; in send_write_chunks()
451 struct svc_rqst *rqstp, in send_reply_chunks() argument
454 u32 xfer_len = rqstp->rq_res.len; in send_reply_chunks()
499 ret = send_write(xprt, rqstp, in send_reply_chunks()
[all …]
Dsvc_rdma_transport.c1091 static void svc_rdma_release_rqst(struct svc_rqst *rqstp) in svc_rdma_release_rqst() argument
/net/sunrpc/
Dsvc.c568 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) in svc_init_buffer() argument
573 if (svc_is_backchannel(rqstp)) in svc_init_buffer()
587 rqstp->rq_pages[arghi++] = p; in svc_init_buffer()
597 svc_release_buffer(struct svc_rqst *rqstp) in svc_release_buffer() argument
601 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) in svc_release_buffer()
602 if (rqstp->rq_pages[i]) in svc_release_buffer()
603 put_page(rqstp->rq_pages[i]); in svc_release_buffer()
609 struct svc_rqst *rqstp; in svc_prepare_thread() local
611 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); in svc_prepare_thread()
612 if (!rqstp) in svc_prepare_thread()
[all …]
Dsvc_xprt.c22 static int svc_deferred_recv(struct svc_rqst *rqstp);
283 void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) in svc_xprt_copy_addrs() argument
285 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); in svc_xprt_copy_addrs()
286 rqstp->rq_addrlen = xprt->xpt_remotelen; in svc_xprt_copy_addrs()
292 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); in svc_xprt_copy_addrs()
293 rqstp->rq_daddrlen = xprt->xpt_locallen; in svc_xprt_copy_addrs()
304 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) in svc_print_addr() argument
306 return __svc_print_addr(svc_addr(rqstp), buf, len); in svc_print_addr()
316 static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) in svc_thread_enqueue() argument
318 list_add(&rqstp->rq_list, &pool->sp_threads); in svc_thread_enqueue()
[all …]
Dsvcsock.c120 static void svc_release_skb(struct svc_rqst *rqstp) in svc_release_skb() argument
122 struct sk_buff *skb = rqstp->rq_xprt_ctxt; in svc_release_skb()
126 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); in svc_release_skb()
127 rqstp->rq_xprt_ctxt = NULL; in svc_release_skb()
129 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); in svc_release_skb()
141 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) in svc_set_cmsg_data() argument
144 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); in svc_set_cmsg_data()
153 svc_daddr_in(rqstp)->sin_addr.s_addr; in svc_set_cmsg_data()
160 struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp); in svc_set_cmsg_data()
234 static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) in svc_sendto() argument
[all …]
Dsvcauth_unix.c630 static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp) in unix_gid_find() argument
635 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, in unix_gid_find()
641 ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle); in unix_gid_find()
657 svcauth_unix_set_client(struct svc_rqst *rqstp) in svcauth_unix_set_client() argument
663 struct svc_cred *cred = &rqstp->rq_cred; in svcauth_unix_set_client()
664 struct svc_xprt *xprt = rqstp->rq_xprt; in svcauth_unix_set_client()
668 switch (rqstp->rq_addr.ss_family) { in svcauth_unix_set_client()
670 sin = svc_addr_in(rqstp); in svcauth_unix_set_client()
675 sin6 = svc_addr_in6(rqstp); in svcauth_unix_set_client()
681 rqstp->rq_client = NULL; in svcauth_unix_set_client()
[all …]
Dsvcauth.c37 svc_authenticate(struct svc_rqst *rqstp, __be32 *authp) in svc_authenticate() argument
44 flavor = svc_getnl(&rqstp->rq_arg.head[0]); in svc_authenticate()
57 rqstp->rq_authop = aops; in svc_authenticate()
58 return aops->accept(rqstp, authp); in svc_authenticate()
62 int svc_set_client(struct svc_rqst *rqstp) in svc_set_client() argument
64 return rqstp->rq_authop->set_client(rqstp); in svc_set_client()
72 int svc_authorise(struct svc_rqst *rqstp) in svc_authorise() argument
74 struct auth_ops *aops = rqstp->rq_authop; in svc_authorise()
77 rqstp->rq_authop = NULL; in svc_authorise()
80 rv = aops->release(rqstp); in svc_authorise()
Dauth.c694 static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, in rpcauth_wrap_req_encode() argument
699 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data); in rpcauth_wrap_req_encode()
700 encode(rqstp, &xdr, obj); in rpcauth_wrap_req_encode()
704 rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, in rpcauth_wrap_req() argument
712 return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); in rpcauth_wrap_req()
714 rpcauth_wrap_req_encode(encode, rqstp, data, obj); in rpcauth_wrap_req()
719 rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, in rpcauth_unwrap_req_decode() argument
724 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data); in rpcauth_unwrap_req_decode()
725 return decode(rqstp, &xdr, obj); in rpcauth_unwrap_req_decode()
729 rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, in rpcauth_unwrap_resp() argument
[all …]
Dcache.c257 struct cache_head *h, struct cache_req *rqstp) in cache_check() argument
269 if (rqstp == NULL) { in cache_check()
291 if (!cache_defer_req(rqstp, h)) { in cache_check()
Dclnt.c2214 static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj) in rpcproc_encode_null() argument
2218 static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj) in rpcproc_decode_null() argument
/net/sunrpc/auth_gss/
Dsvcauth_gss.c661 gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, in gss_verify_header() argument
668 struct kvec *argv = &rqstp->rq_arg.head[0]; in gss_verify_header()
685 if (rqstp->rq_deferred) /* skip verification of revisited request */ in gss_verify_header()
707 gss_write_null_verf(struct svc_rqst *rqstp) in gss_write_null_verf() argument
711 svc_putnl(rqstp->rq_res.head, RPC_AUTH_NULL); in gss_write_null_verf()
712 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; in gss_write_null_verf()
715 if (!xdr_ressize_check(rqstp, p)) in gss_write_null_verf()
721 gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) in gss_write_verf() argument
730 svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS); in gss_write_verf()
736 p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; in gss_write_verf()
[all …]
Dauth_gss.c1331 static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, in gss_wrap_req_encode() argument
1336 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p); in gss_wrap_req_encode()
1337 encode(rqstp, &xdr, obj); in gss_wrap_req_encode()
1342 kxdreproc_t encode, struct rpc_rqst *rqstp, in gss_wrap_req_integ() argument
1345 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; in gss_wrap_req_integ()
1357 *p++ = htonl(rqstp->rq_seqno); in gss_wrap_req_integ()
1359 gss_wrap_req_encode(encode, rqstp, p, obj); in gss_wrap_req_integ()
1389 priv_release_snd_buf(struct rpc_rqst *rqstp) in priv_release_snd_buf() argument
1393 for (i=0; i < rqstp->rq_enc_pages_num; i++) in priv_release_snd_buf()
1394 __free_page(rqstp->rq_enc_pages[i]); in priv_release_snd_buf()
[all …]
Dgss_rpc_xdr.h181 int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
Dgss_rpc_xdr.c791 int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, in gssx_dec_accept_sec_context() argument