• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/include/linux/sunrpc/svc.h
3  *
4  * RPC server declarations.
5  *
6  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 
10 #ifndef SUNRPC_SVC_H
11 #define SUNRPC_SVC_H
12 
13 #include <linux/in.h>
14 #include <linux/in6.h>
15 #include <linux/sunrpc/types.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/auth.h>
18 #include <linux/sunrpc/svcauth.h>
19 #include <linux/wait.h>
20 #include <linux/mm.h>
21 
22 /* statistics for svc_pool structures */
23 struct svc_pool_stats {
24 	atomic_long_t	packets;
25 	unsigned long	sockets_queued;
26 	atomic_long_t	threads_woken;
27 	atomic_long_t	threads_timedout;
28 };
29 
30 /*
31  *
32  * RPC service thread pool.
33  *
34  * Pool of threads and temporary sockets.  Generally there is only
35  * a single one of these per RPC service, but on NUMA machines those
36  * services that can benefit from it (i.e. nfs but not lockd) will
37  * have one pool per NUMA node.  This optimisation reduces cross-
38  * node traffic on multi-node NUMA NFS servers.
39  */
40 struct svc_pool {
41 	unsigned int		sp_id;	    	/* pool id; also node id on NUMA */
42 	spinlock_t		sp_lock;	/* protects all fields */
43 	struct list_head	sp_sockets;	/* pending sockets */
44 	unsigned int		sp_nrthreads;	/* # of threads in pool */
45 	struct list_head	sp_all_threads;	/* all server threads */
46 	struct svc_pool_stats	sp_stats;	/* statistics on pool operation */
47 #define	SP_TASK_PENDING		(0)		/* still work to do even if no
48 						 * xprt is queued. */
49 	unsigned long		sp_flags;
50 } ____cacheline_aligned_in_smp;
51 
52 struct svc_serv;
53 
54 struct svc_serv_ops {
55 	/* Callback to use when last thread exits. */
56 	void		(*svo_shutdown)(struct svc_serv *, struct net *);
57 
58 	/* function for service threads to run */
59 	int		(*svo_function)(void *);
60 
61 	/* queue up a transport for servicing */
62 	void		(*svo_enqueue_xprt)(struct svc_xprt *);
63 
64 	/* set up thread (or whatever) execution context */
65 	int		(*svo_setup)(struct svc_serv *, struct svc_pool *, int);
66 
67 	/* optional module to count when adding threads (pooled svcs only) */
68 	struct module	*svo_module;
69 };
70 
71 /*
72  * RPC service.
73  *
74  * An RPC service is a ``daemon,'' possibly multithreaded, which
75  * receives and processes incoming RPC messages.
76  * It has one or more transport sockets associated with it, and maintains
77  * a list of idle threads waiting for input.
78  *
79  * We currently do not support more than one RPC program per daemon.
80  */
81 struct svc_serv {
82 	struct svc_program *	sv_program;	/* RPC program */
83 	struct svc_stat *	sv_stats;	/* RPC statistics */
84 	spinlock_t		sv_lock;
85 	unsigned int		sv_nrthreads;	/* # of server threads */
86 	unsigned int		sv_maxconn;	/* max connections allowed or
87 						 * '0' causing max to be based
88 						 * on number of threads. */
89 
90 	unsigned int		sv_max_payload;	/* datagram payload size */
91 	unsigned int		sv_max_mesg;	/* max_payload + 1 page for overheads */
92 	unsigned int		sv_xdrsize;	/* XDR buffer size */
93 	struct list_head	sv_permsocks;	/* all permanent sockets */
94 	struct list_head	sv_tempsocks;	/* all temporary sockets */
95 	int			sv_tmpcnt;	/* count of temporary sockets */
96 	struct timer_list	sv_temptimer;	/* timer for aging temporary sockets */
97 
98 	char *			sv_name;	/* service name */
99 
100 	unsigned int		sv_nrpools;	/* number of thread pools */
101 	struct svc_pool *	sv_pools;	/* array of thread pools */
102 	struct svc_serv_ops	*sv_ops;	/* server operations */
103 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
104 	struct list_head	sv_cb_list;	/* queue for callback requests
105 						 * that arrive over the same
106 						 * connection */
107 	spinlock_t		sv_cb_lock;	/* protects the svc_cb_list */
108 	wait_queue_head_t	sv_cb_waitq;	/* sleep here if there are no
109 						 * entries in the svc_cb_list */
110 	struct svc_xprt		*sv_bc_xprt;	/* callback on fore channel */
111 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
112 };
113 
114 /*
115  * We use sv_nrthreads as a reference count.  svc_destroy() drops
116  * this refcount, so we need to bump it up around operations that
117  * change the number of threads.  Horrible, but there it is.
118  * Should be called with the "service mutex" held.
119  */
svc_get(struct svc_serv * serv)120 static inline void svc_get(struct svc_serv *serv)
121 {
122 	serv->sv_nrthreads++;
123 }
124 
125 /*
126  * Maximum payload size supported by a kernel RPC server.
127  * This is use to determine the max number of pages nfsd is
128  * willing to return in a single READ operation.
129  *
130  * These happen to all be powers of 2, which is not strictly
131  * necessary but helps enforce the real limitation, which is
132  * that they should be multiples of PAGE_SIZE.
133  *
134  * For UDP transports, a block plus NFS,RPC, and UDP headers
135  * has to fit into the IP datagram limit of 64K.  The largest
136  * feasible number for all known page sizes is probably 48K,
137  * but we choose 32K here.  This is the same as the historical
138  * Linux limit; someone who cares more about NFS/UDP performance
139  * can test a larger number.
140  *
141  * For TCP transports we have more freedom.  A size of 1MB is
142  * chosen to match the client limit.  Other OSes are known to
143  * have larger limits, but those numbers are probably beyond
144  * the point of diminishing returns.
145  */
146 #define RPCSVC_MAXPAYLOAD	(1*1024*1024u)
147 #define RPCSVC_MAXPAYLOAD_TCP	RPCSVC_MAXPAYLOAD
148 #define RPCSVC_MAXPAYLOAD_UDP	(32*1024u)
149 
150 extern u32 svc_max_payload(const struct svc_rqst *rqstp);
151 
152 /*
153  * RPC Requsts and replies are stored in one or more pages.
154  * We maintain an array of pages for each server thread.
155  * Requests are copied into these pages as they arrive.  Remaining
156  * pages are available to write the reply into.
157  *
158  * Pages are sent using ->sendpage so each server thread needs to
159  * allocate more to replace those used in sending.  To help keep track
160  * of these pages we have a receive list where all pages initialy live,
161  * and a send list where pages are moved to when there are to be part
162  * of a reply.
163  *
164  * We use xdr_buf for holding responses as it fits well with NFS
165  * read responses (that have a header, and some data pages, and possibly
166  * a tail) and means we can share some client side routines.
167  *
168  * The xdr_buf.head kvec always points to the first page in the rq_*pages
169  * list.  The xdr_buf.pages pointer points to the second page on that
170  * list.  xdr_buf.tail points to the end of the first page.
171  * This assumes that the non-page part of an rpc reply will fit
172  * in a page - NFSd ensures this.  lockd also has no trouble.
173  *
174  * Each request/reply pair can have at most one "payload", plus two pages,
175  * one for the request, and one for the reply.
176  * We using ->sendfile to return read data, we might need one extra page
177  * if the request is not page-aligned.  So add another '1'.
178  */
179 #define RPCSVC_MAXPAGES		((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
180 				+ 2 + 1)
181 
svc_getnl(struct kvec * iov)182 static inline u32 svc_getnl(struct kvec *iov)
183 {
184 	__be32 val, *vp;
185 	vp = iov->iov_base;
186 	val = *vp++;
187 	iov->iov_base = (void*)vp;
188 	iov->iov_len -= sizeof(__be32);
189 	return ntohl(val);
190 }
191 
svc_putnl(struct kvec * iov,u32 val)192 static inline void svc_putnl(struct kvec *iov, u32 val)
193 {
194 	__be32 *vp = iov->iov_base + iov->iov_len;
195 	*vp = htonl(val);
196 	iov->iov_len += sizeof(__be32);
197 }
198 
svc_getu32(struct kvec * iov)199 static inline __be32 svc_getu32(struct kvec *iov)
200 {
201 	__be32 val, *vp;
202 	vp = iov->iov_base;
203 	val = *vp++;
204 	iov->iov_base = (void*)vp;
205 	iov->iov_len -= sizeof(__be32);
206 	return val;
207 }
208 
svc_ungetu32(struct kvec * iov)209 static inline void svc_ungetu32(struct kvec *iov)
210 {
211 	__be32 *vp = (__be32 *)iov->iov_base;
212 	iov->iov_base = (void *)(vp - 1);
213 	iov->iov_len += sizeof(*vp);
214 }
215 
svc_putu32(struct kvec * iov,__be32 val)216 static inline void svc_putu32(struct kvec *iov, __be32 val)
217 {
218 	__be32 *vp = iov->iov_base + iov->iov_len;
219 	*vp = val;
220 	iov->iov_len += sizeof(__be32);
221 }
222 
223 /*
224  * The context of a single thread, including the request currently being
225  * processed.
226  */
227 struct svc_rqst {
228 	struct list_head	rq_all;		/* all threads list */
229 	struct rcu_head		rq_rcu_head;	/* for RCU deferred kfree */
230 	struct svc_xprt *	rq_xprt;	/* transport ptr */
231 
232 	struct sockaddr_storage	rq_addr;	/* peer address */
233 	size_t			rq_addrlen;
234 	struct sockaddr_storage	rq_daddr;	/* dest addr of request
235 						 *  - reply from here */
236 	size_t			rq_daddrlen;
237 
238 	struct svc_serv *	rq_server;	/* RPC service definition */
239 	struct svc_pool *	rq_pool;	/* thread pool */
240 	struct svc_procedure *	rq_procinfo;	/* procedure info */
241 	struct auth_ops *	rq_authop;	/* authentication flavour */
242 	struct svc_cred		rq_cred;	/* auth info */
243 	void *			rq_xprt_ctxt;	/* transport specific context ptr */
244 	struct svc_deferred_req*rq_deferred;	/* deferred request we are replaying */
245 
246 	size_t			rq_xprt_hlen;	/* xprt header len */
247 	struct xdr_buf		rq_arg;
248 	struct xdr_buf		rq_res;
249 	struct page *		rq_pages[RPCSVC_MAXPAGES];
250 	struct page *		*rq_respages;	/* points into rq_pages */
251 	struct page *		*rq_next_page; /* next reply page to use */
252 	struct page *		*rq_page_end;  /* one past the last page */
253 
254 	struct kvec		rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
255 
256 	__be32			rq_xid;		/* transmission id */
257 	u32			rq_prog;	/* program number */
258 	u32			rq_vers;	/* program version */
259 	u32			rq_proc;	/* procedure number */
260 	u32			rq_prot;	/* IP protocol */
261 	int			rq_cachetype;	/* catering to nfsd */
262 #define	RQ_SECURE	(0)			/* secure port */
263 #define	RQ_LOCAL	(1)			/* local request */
264 #define	RQ_USEDEFERRAL	(2)			/* use deferral */
265 #define	RQ_DROPME	(3)			/* drop current reply */
266 #define	RQ_SPLICE_OK	(4)			/* turned off in gss privacy
267 						 * to prevent encrypting page
268 						 * cache pages */
269 #define	RQ_VICTIM	(5)			/* about to be shut down */
270 #define	RQ_BUSY		(6)			/* request is busy */
271 #define	RQ_DATA		(7)			/* request has data */
272 	unsigned long		rq_flags;	/* flags field */
273 
274 	void *			rq_argp;	/* decoded arguments */
275 	void *			rq_resp;	/* xdr'd results */
276 	void *			rq_auth_data;	/* flavor-specific data */
277 	int			rq_auth_slack;	/* extra space xdr code
278 						 * should leave in head
279 						 * for krb5i, krb5p.
280 						 */
281 	int			rq_reserved;	/* space on socket outq
282 						 * reserved for this request
283 						 */
284 
285 	struct cache_req	rq_chandle;	/* handle passed to caches for
286 						 * request delaying
287 						 */
288 	/* Catering to nfsd */
289 	struct auth_domain *	rq_client;	/* RPC peer info */
290 	struct auth_domain *	rq_gssclient;	/* "gss/"-style peer info */
291 	struct svc_cacherep *	rq_cacherep;	/* cache info */
292 	struct task_struct	*rq_task;	/* service thread */
293 	spinlock_t		rq_lock;	/* per-request lock */
294 };
295 
296 #define SVC_NET(svc_rqst)	(svc_rqst->rq_xprt->xpt_net)
297 
298 /*
299  * Rigorous type checking on sockaddr type conversions
300  */
svc_addr_in(const struct svc_rqst * rqst)301 static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst)
302 {
303 	return (struct sockaddr_in *) &rqst->rq_addr;
304 }
305 
svc_addr_in6(const struct svc_rqst * rqst)306 static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst)
307 {
308 	return (struct sockaddr_in6 *) &rqst->rq_addr;
309 }
310 
svc_addr(const struct svc_rqst * rqst)311 static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
312 {
313 	return (struct sockaddr *) &rqst->rq_addr;
314 }
315 
svc_daddr_in(const struct svc_rqst * rqst)316 static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
317 {
318 	return (struct sockaddr_in *) &rqst->rq_daddr;
319 }
320 
svc_daddr_in6(const struct svc_rqst * rqst)321 static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
322 {
323 	return (struct sockaddr_in6 *) &rqst->rq_daddr;
324 }
325 
svc_daddr(const struct svc_rqst * rqst)326 static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
327 {
328 	return (struct sockaddr *) &rqst->rq_daddr;
329 }
330 
331 /*
332  * Check buffer bounds after decoding arguments
333  */
334 static inline int
xdr_argsize_check(struct svc_rqst * rqstp,__be32 * p)335 xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
336 {
337 	char *cp = (char *)p;
338 	struct kvec *vec = &rqstp->rq_arg.head[0];
339 	return cp >= (char*)vec->iov_base
340 		&& cp <= (char*)vec->iov_base + vec->iov_len;
341 }
342 
343 static inline int
xdr_ressize_check(struct svc_rqst * rqstp,__be32 * p)344 xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
345 {
346 	struct kvec *vec = &rqstp->rq_res.head[0];
347 	char *cp = (char*)p;
348 
349 	vec->iov_len = cp - (char*)vec->iov_base;
350 
351 	return vec->iov_len <= PAGE_SIZE;
352 }
353 
svc_free_res_pages(struct svc_rqst * rqstp)354 static inline void svc_free_res_pages(struct svc_rqst *rqstp)
355 {
356 	while (rqstp->rq_next_page != rqstp->rq_respages) {
357 		struct page **pp = --rqstp->rq_next_page;
358 		if (*pp) {
359 			put_page(*pp);
360 			*pp = NULL;
361 		}
362 	}
363 }
364 
365 struct svc_deferred_req {
366 	u32			prot;	/* protocol (UDP or TCP) */
367 	struct svc_xprt		*xprt;
368 	struct sockaddr_storage	addr;	/* where reply must go */
369 	size_t			addrlen;
370 	struct sockaddr_storage	daddr;	/* where reply must come from */
371 	size_t			daddrlen;
372 	struct cache_deferred_req handle;
373 	size_t			xprt_hlen;
374 	int			argslen;
375 	__be32			args[0];
376 };
377 
378 /*
379  * List of RPC programs on the same transport endpoint
380  */
381 struct svc_program {
382 	struct svc_program *	pg_next;	/* other programs (same xprt) */
383 	u32			pg_prog;	/* program number */
384 	unsigned int		pg_lovers;	/* lowest version */
385 	unsigned int		pg_hivers;	/* highest version */
386 	unsigned int		pg_nvers;	/* number of versions */
387 	struct svc_version **	pg_vers;	/* version array */
388 	char *			pg_name;	/* service name */
389 	char *			pg_class;	/* class name: services sharing authentication */
390 	struct svc_stat *	pg_stats;	/* rpc statistics */
391 	int			(*pg_authenticate)(struct svc_rqst *);
392 };
393 
394 /*
395  * RPC program version
396  */
397 struct svc_version {
398 	u32			vs_vers;	/* version number */
399 	u32			vs_nproc;	/* number of procedures */
400 	struct svc_procedure *	vs_proc;	/* per-procedure info */
401 	u32			vs_xdrsize;	/* xdrsize needed for this version */
402 
403 	unsigned int		vs_hidden : 1,	/* Don't register with portmapper.
404 						 * Only used for nfsacl so far. */
405 				vs_rpcb_optnl:1;/* Don't care the result of register.
406 						 * Only used for nfsv4. */
407 
408 	/* Override dispatch function (e.g. when caching replies).
409 	 * A return value of 0 means drop the request.
410 	 * vs_dispatch == NULL means use default dispatcher.
411 	 */
412 	int			(*vs_dispatch)(struct svc_rqst *, __be32 *);
413 };
414 
415 /*
416  * RPC procedure info
417  */
418 typedef __be32	(*svc_procfunc)(struct svc_rqst *, void *argp, void *resp);
419 struct svc_procedure {
420 	svc_procfunc		pc_func;	/* process the request */
421 	kxdrproc_t		pc_decode;	/* XDR decode args */
422 	kxdrproc_t		pc_encode;	/* XDR encode result */
423 	kxdrproc_t		pc_release;	/* XDR free result */
424 	unsigned int		pc_argsize;	/* argument struct size */
425 	unsigned int		pc_ressize;	/* result struct size */
426 	unsigned int		pc_count;	/* call count */
427 	unsigned int		pc_cachetype;	/* cache info (NFS) */
428 	unsigned int		pc_xdrressize;	/* maximum size of XDR reply */
429 };
430 
431 /*
432  * Mode for mapping cpus to pools.
433  */
434 enum {
435 	SVC_POOL_AUTO = -1,	/* choose one of the others */
436 	SVC_POOL_GLOBAL,	/* no mapping, just a single global pool
437 				 * (legacy & UP mode) */
438 	SVC_POOL_PERCPU,	/* one pool per cpu */
439 	SVC_POOL_PERNODE	/* one pool per numa node */
440 };
441 
442 struct svc_pool_map {
443 	int count;			/* How many svc_servs use us */
444 	int mode;			/* Note: int not enum to avoid
445 					 * warnings about "enumeration value
446 					 * not handled in switch" */
447 	unsigned int npools;
448 	unsigned int *pool_to;		/* maps pool id to cpu or node */
449 	unsigned int *to_pool;		/* maps cpu or node to pool id */
450 };
451 
452 extern struct svc_pool_map svc_pool_map;
453 
454 /*
455  * Function prototypes.
456  */
457 int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
458 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
459 int svc_bind(struct svc_serv *serv, struct net *net);
460 struct svc_serv *svc_create(struct svc_program *, unsigned int,
461 			    struct svc_serv_ops *);
462 struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
463 					struct svc_pool *pool, int node);
464 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
465 					struct svc_pool *pool, int node);
466 void		   svc_rqst_free(struct svc_rqst *);
467 void		   svc_exit_thread(struct svc_rqst *);
468 unsigned int	   svc_pool_map_get(void);
469 void		   svc_pool_map_put(void);
470 struct svc_serv *  svc_create_pooled(struct svc_program *, unsigned int,
471 			struct svc_serv_ops *);
472 int		   svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
473 int		   svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
474 int		   svc_pool_stats_open(struct svc_serv *serv, struct file *file);
475 void		   svc_destroy(struct svc_serv *);
476 void		   svc_shutdown_net(struct svc_serv *, struct net *);
477 int		   svc_process(struct svc_rqst *);
478 int		   bc_svc_process(struct svc_serv *, struct rpc_rqst *,
479 			struct svc_rqst *);
480 int		   svc_register(const struct svc_serv *, struct net *, const int,
481 				const unsigned short, const unsigned short);
482 
483 void		   svc_wake_up(struct svc_serv *);
484 void		   svc_reserve(struct svc_rqst *rqstp, int space);
485 struct svc_pool *  svc_pool_for_cpu(struct svc_serv *serv, int cpu);
486 char *		   svc_print_addr(struct svc_rqst *, char *, size_t);
487 
488 #define	RPC_MAX_ADDRBUFLEN	(63U)
489 
490 /*
491  * When we want to reduce the size of the reserved space in the response
492  * buffer, we need to take into account the size of any checksum data that
493  * may be at the end of the packet. This is difficult to determine exactly
494  * for all cases without actually generating the checksum, so we just use a
495  * static value.
496  */
svc_reserve_auth(struct svc_rqst * rqstp,int space)497 static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
498 {
499 	svc_reserve(rqstp, space + rqstp->rq_auth_slack);
500 }
501 
502 #endif /* SUNRPC_SVC_H */
503