1 /*
2 * linux/net/sunrpc/svc.c
3 *
4 * High-level RPC service routines
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 *
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
11 */
12
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
17 #include <linux/in.h>
18 #include <linux/mm.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/kthread.h>
22
23 #include <linux/sunrpc/types.h>
24 #include <linux/sunrpc/xdr.h>
25 #include <linux/sunrpc/stats.h>
26 #include <linux/sunrpc/svcsock.h>
27 #include <linux/sunrpc/clnt.h>
28
29 #define RPCDBG_FACILITY RPCDBG_SVCDSP
30
31 static void svc_unregister(const struct svc_serv *serv);
32
33 #define svc_serv_is_pooled(serv) ((serv)->sv_function)
34
35 /*
36 * Mode for mapping cpus to pools.
37 */
38 enum {
39 SVC_POOL_AUTO = -1, /* choose one of the others */
40 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
41 * (legacy & UP mode) */
42 SVC_POOL_PERCPU, /* one pool per cpu */
43 SVC_POOL_PERNODE /* one pool per numa node */
44 };
45 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
46
47 /*
48 * Structure for mapping cpus to pools and vice versa.
49 * Setup once during sunrpc initialisation.
50 */
51 static struct svc_pool_map {
52 int count; /* How many svc_servs use us */
53 int mode; /* Note: int not enum to avoid
54 * warnings about "enumeration value
55 * not handled in switch" */
56 unsigned int npools;
57 unsigned int *pool_to; /* maps pool id to cpu or node */
58 unsigned int *to_pool; /* maps cpu or node to pool id */
59 } svc_pool_map = {
60 .count = 0,
61 .mode = SVC_POOL_DEFAULT
62 };
63 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
64
65 static int
param_set_pool_mode(const char * val,struct kernel_param * kp)66 param_set_pool_mode(const char *val, struct kernel_param *kp)
67 {
68 int *ip = (int *)kp->arg;
69 struct svc_pool_map *m = &svc_pool_map;
70 int err;
71
72 mutex_lock(&svc_pool_map_mutex);
73
74 err = -EBUSY;
75 if (m->count)
76 goto out;
77
78 err = 0;
79 if (!strncmp(val, "auto", 4))
80 *ip = SVC_POOL_AUTO;
81 else if (!strncmp(val, "global", 6))
82 *ip = SVC_POOL_GLOBAL;
83 else if (!strncmp(val, "percpu", 6))
84 *ip = SVC_POOL_PERCPU;
85 else if (!strncmp(val, "pernode", 7))
86 *ip = SVC_POOL_PERNODE;
87 else
88 err = -EINVAL;
89
90 out:
91 mutex_unlock(&svc_pool_map_mutex);
92 return err;
93 }
94
95 static int
param_get_pool_mode(char * buf,struct kernel_param * kp)96 param_get_pool_mode(char *buf, struct kernel_param *kp)
97 {
98 int *ip = (int *)kp->arg;
99
100 switch (*ip)
101 {
102 case SVC_POOL_AUTO:
103 return strlcpy(buf, "auto", 20);
104 case SVC_POOL_GLOBAL:
105 return strlcpy(buf, "global", 20);
106 case SVC_POOL_PERCPU:
107 return strlcpy(buf, "percpu", 20);
108 case SVC_POOL_PERNODE:
109 return strlcpy(buf, "pernode", 20);
110 default:
111 return sprintf(buf, "%d", *ip);
112 }
113 }
114
115 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
116 &svc_pool_map.mode, 0644);
117
118 /*
119 * Detect best pool mapping mode heuristically,
120 * according to the machine's topology.
121 */
122 static int
svc_pool_map_choose_mode(void)123 svc_pool_map_choose_mode(void)
124 {
125 unsigned int node;
126
127 if (num_online_nodes() > 1) {
128 /*
129 * Actually have multiple NUMA nodes,
130 * so split pools on NUMA node boundaries
131 */
132 return SVC_POOL_PERNODE;
133 }
134
135 node = any_online_node(node_online_map);
136 if (nr_cpus_node(node) > 2) {
137 /*
138 * Non-trivial SMP, or CONFIG_NUMA on
139 * non-NUMA hardware, e.g. with a generic
140 * x86_64 kernel on Xeons. In this case we
141 * want to divide the pools on cpu boundaries.
142 */
143 return SVC_POOL_PERCPU;
144 }
145
146 /* default: one global pool */
147 return SVC_POOL_GLOBAL;
148 }
149
150 /*
151 * Allocate the to_pool[] and pool_to[] arrays.
152 * Returns 0 on success or an errno.
153 */
154 static int
svc_pool_map_alloc_arrays(struct svc_pool_map * m,unsigned int maxpools)155 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
156 {
157 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
158 if (!m->to_pool)
159 goto fail;
160 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
161 if (!m->pool_to)
162 goto fail_free;
163
164 return 0;
165
166 fail_free:
167 kfree(m->to_pool);
168 fail:
169 return -ENOMEM;
170 }
171
172 /*
173 * Initialise the pool map for SVC_POOL_PERCPU mode.
174 * Returns number of pools or <0 on error.
175 */
176 static int
svc_pool_map_init_percpu(struct svc_pool_map * m)177 svc_pool_map_init_percpu(struct svc_pool_map *m)
178 {
179 unsigned int maxpools = nr_cpu_ids;
180 unsigned int pidx = 0;
181 unsigned int cpu;
182 int err;
183
184 err = svc_pool_map_alloc_arrays(m, maxpools);
185 if (err)
186 return err;
187
188 for_each_online_cpu(cpu) {
189 BUG_ON(pidx > maxpools);
190 m->to_pool[cpu] = pidx;
191 m->pool_to[pidx] = cpu;
192 pidx++;
193 }
194 /* cpus brought online later all get mapped to pool0, sorry */
195
196 return pidx;
197 };
198
199
200 /*
201 * Initialise the pool map for SVC_POOL_PERNODE mode.
202 * Returns number of pools or <0 on error.
203 */
204 static int
svc_pool_map_init_pernode(struct svc_pool_map * m)205 svc_pool_map_init_pernode(struct svc_pool_map *m)
206 {
207 unsigned int maxpools = nr_node_ids;
208 unsigned int pidx = 0;
209 unsigned int node;
210 int err;
211
212 err = svc_pool_map_alloc_arrays(m, maxpools);
213 if (err)
214 return err;
215
216 for_each_node_with_cpus(node) {
217 /* some architectures (e.g. SN2) have cpuless nodes */
218 BUG_ON(pidx > maxpools);
219 m->to_pool[node] = pidx;
220 m->pool_to[pidx] = node;
221 pidx++;
222 }
223 /* nodes brought online later all get mapped to pool0, sorry */
224
225 return pidx;
226 }
227
228
229 /*
230 * Add a reference to the global map of cpus to pools (and
231 * vice versa). Initialise the map if we're the first user.
232 * Returns the number of pools.
233 */
234 static unsigned int
svc_pool_map_get(void)235 svc_pool_map_get(void)
236 {
237 struct svc_pool_map *m = &svc_pool_map;
238 int npools = -1;
239
240 mutex_lock(&svc_pool_map_mutex);
241
242 if (m->count++) {
243 mutex_unlock(&svc_pool_map_mutex);
244 return m->npools;
245 }
246
247 if (m->mode == SVC_POOL_AUTO)
248 m->mode = svc_pool_map_choose_mode();
249
250 switch (m->mode) {
251 case SVC_POOL_PERCPU:
252 npools = svc_pool_map_init_percpu(m);
253 break;
254 case SVC_POOL_PERNODE:
255 npools = svc_pool_map_init_pernode(m);
256 break;
257 }
258
259 if (npools < 0) {
260 /* default, or memory allocation failure */
261 npools = 1;
262 m->mode = SVC_POOL_GLOBAL;
263 }
264 m->npools = npools;
265
266 mutex_unlock(&svc_pool_map_mutex);
267 return m->npools;
268 }
269
270
271 /*
272 * Drop a reference to the global map of cpus to pools.
273 * When the last reference is dropped, the map data is
274 * freed; this allows the sysadmin to change the pool
275 * mode using the pool_mode module option without
276 * rebooting or re-loading sunrpc.ko.
277 */
278 static void
svc_pool_map_put(void)279 svc_pool_map_put(void)
280 {
281 struct svc_pool_map *m = &svc_pool_map;
282
283 mutex_lock(&svc_pool_map_mutex);
284
285 if (!--m->count) {
286 m->mode = SVC_POOL_DEFAULT;
287 kfree(m->to_pool);
288 kfree(m->pool_to);
289 m->npools = 0;
290 }
291
292 mutex_unlock(&svc_pool_map_mutex);
293 }
294
295
296 /*
297 * Set the given thread's cpus_allowed mask so that it
298 * will only run on cpus in the given pool.
299 */
300 static inline void
svc_pool_map_set_cpumask(struct task_struct * task,unsigned int pidx)301 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
302 {
303 struct svc_pool_map *m = &svc_pool_map;
304 unsigned int node = m->pool_to[pidx];
305
306 /*
307 * The caller checks for sv_nrpools > 1, which
308 * implies that we've been initialized.
309 */
310 BUG_ON(m->count == 0);
311
312 switch (m->mode) {
313 case SVC_POOL_PERCPU:
314 {
315 set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
316 break;
317 }
318 case SVC_POOL_PERNODE:
319 {
320 node_to_cpumask_ptr(nodecpumask, node);
321 set_cpus_allowed_ptr(task, nodecpumask);
322 break;
323 }
324 }
325 }
326
327 /*
328 * Use the mapping mode to choose a pool for a given CPU.
329 * Used when enqueueing an incoming RPC. Always returns
330 * a non-NULL pool pointer.
331 */
332 struct svc_pool *
svc_pool_for_cpu(struct svc_serv * serv,int cpu)333 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
334 {
335 struct svc_pool_map *m = &svc_pool_map;
336 unsigned int pidx = 0;
337
338 /*
339 * An uninitialised map happens in a pure client when
340 * lockd is brought up, so silently treat it the
341 * same as SVC_POOL_GLOBAL.
342 */
343 if (svc_serv_is_pooled(serv)) {
344 switch (m->mode) {
345 case SVC_POOL_PERCPU:
346 pidx = m->to_pool[cpu];
347 break;
348 case SVC_POOL_PERNODE:
349 pidx = m->to_pool[cpu_to_node(cpu)];
350 break;
351 }
352 }
353 return &serv->sv_pools[pidx % serv->sv_nrpools];
354 }
355
356
357 /*
358 * Create an RPC service
359 */
360 static struct svc_serv *
__svc_create(struct svc_program * prog,unsigned int bufsize,int npools,sa_family_t family,void (* shutdown)(struct svc_serv * serv))361 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
362 sa_family_t family, void (*shutdown)(struct svc_serv *serv))
363 {
364 struct svc_serv *serv;
365 unsigned int vers;
366 unsigned int xdrsize;
367 unsigned int i;
368
369 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
370 return NULL;
371 serv->sv_family = family;
372 serv->sv_name = prog->pg_name;
373 serv->sv_program = prog;
374 serv->sv_nrthreads = 1;
375 serv->sv_stats = prog->pg_stats;
376 if (bufsize > RPCSVC_MAXPAYLOAD)
377 bufsize = RPCSVC_MAXPAYLOAD;
378 serv->sv_max_payload = bufsize? bufsize : 4096;
379 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
380 serv->sv_shutdown = shutdown;
381 xdrsize = 0;
382 while (prog) {
383 prog->pg_lovers = prog->pg_nvers-1;
384 for (vers=0; vers<prog->pg_nvers ; vers++)
385 if (prog->pg_vers[vers]) {
386 prog->pg_hivers = vers;
387 if (prog->pg_lovers > vers)
388 prog->pg_lovers = vers;
389 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
390 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
391 }
392 prog = prog->pg_next;
393 }
394 serv->sv_xdrsize = xdrsize;
395 INIT_LIST_HEAD(&serv->sv_tempsocks);
396 INIT_LIST_HEAD(&serv->sv_permsocks);
397 init_timer(&serv->sv_temptimer);
398 spin_lock_init(&serv->sv_lock);
399
400 serv->sv_nrpools = npools;
401 serv->sv_pools =
402 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
403 GFP_KERNEL);
404 if (!serv->sv_pools) {
405 kfree(serv);
406 return NULL;
407 }
408
409 for (i = 0; i < serv->sv_nrpools; i++) {
410 struct svc_pool *pool = &serv->sv_pools[i];
411
412 dprintk("svc: initialising pool %u for %s\n",
413 i, serv->sv_name);
414
415 pool->sp_id = i;
416 INIT_LIST_HEAD(&pool->sp_threads);
417 INIT_LIST_HEAD(&pool->sp_sockets);
418 INIT_LIST_HEAD(&pool->sp_all_threads);
419 spin_lock_init(&pool->sp_lock);
420 }
421
422 /* Remove any stale portmap registrations */
423 svc_unregister(serv);
424
425 return serv;
426 }
427
428 struct svc_serv *
svc_create(struct svc_program * prog,unsigned int bufsize,sa_family_t family,void (* shutdown)(struct svc_serv * serv))429 svc_create(struct svc_program *prog, unsigned int bufsize,
430 sa_family_t family, void (*shutdown)(struct svc_serv *serv))
431 {
432 return __svc_create(prog, bufsize, /*npools*/1, family, shutdown);
433 }
434 EXPORT_SYMBOL_GPL(svc_create);
435
436 struct svc_serv *
svc_create_pooled(struct svc_program * prog,unsigned int bufsize,sa_family_t family,void (* shutdown)(struct svc_serv * serv),svc_thread_fn func,struct module * mod)437 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
438 sa_family_t family, void (*shutdown)(struct svc_serv *serv),
439 svc_thread_fn func, struct module *mod)
440 {
441 struct svc_serv *serv;
442 unsigned int npools = svc_pool_map_get();
443
444 serv = __svc_create(prog, bufsize, npools, family, shutdown);
445
446 if (serv != NULL) {
447 serv->sv_function = func;
448 serv->sv_module = mod;
449 }
450
451 return serv;
452 }
453 EXPORT_SYMBOL_GPL(svc_create_pooled);
454
455 /*
456 * Destroy an RPC service. Should be called with appropriate locking to
457 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
458 */
459 void
svc_destroy(struct svc_serv * serv)460 svc_destroy(struct svc_serv *serv)
461 {
462 dprintk("svc: svc_destroy(%s, %d)\n",
463 serv->sv_program->pg_name,
464 serv->sv_nrthreads);
465
466 if (serv->sv_nrthreads) {
467 if (--(serv->sv_nrthreads) != 0) {
468 svc_sock_update_bufs(serv);
469 return;
470 }
471 } else
472 printk("svc_destroy: no threads for serv=%p!\n", serv);
473
474 del_timer_sync(&serv->sv_temptimer);
475
476 svc_close_all(&serv->sv_tempsocks);
477
478 if (serv->sv_shutdown)
479 serv->sv_shutdown(serv);
480
481 svc_close_all(&serv->sv_permsocks);
482
483 BUG_ON(!list_empty(&serv->sv_permsocks));
484 BUG_ON(!list_empty(&serv->sv_tempsocks));
485
486 cache_clean_deferred(serv);
487
488 if (svc_serv_is_pooled(serv))
489 svc_pool_map_put();
490
491 svc_unregister(serv);
492 kfree(serv->sv_pools);
493 kfree(serv);
494 }
495 EXPORT_SYMBOL_GPL(svc_destroy);
496
497 /*
498 * Allocate an RPC server's buffer space.
499 * We allocate pages and place them in rq_argpages.
500 */
501 static int
svc_init_buffer(struct svc_rqst * rqstp,unsigned int size)502 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
503 {
504 unsigned int pages, arghi;
505
506 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
507 * We assume one is at most one page
508 */
509 arghi = 0;
510 BUG_ON(pages > RPCSVC_MAXPAGES);
511 while (pages) {
512 struct page *p = alloc_page(GFP_KERNEL);
513 if (!p)
514 break;
515 rqstp->rq_pages[arghi++] = p;
516 pages--;
517 }
518 return pages == 0;
519 }
520
521 /*
522 * Release an RPC server buffer
523 */
524 static void
svc_release_buffer(struct svc_rqst * rqstp)525 svc_release_buffer(struct svc_rqst *rqstp)
526 {
527 unsigned int i;
528
529 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
530 if (rqstp->rq_pages[i])
531 put_page(rqstp->rq_pages[i]);
532 }
533
534 struct svc_rqst *
svc_prepare_thread(struct svc_serv * serv,struct svc_pool * pool)535 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
536 {
537 struct svc_rqst *rqstp;
538
539 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
540 if (!rqstp)
541 goto out_enomem;
542
543 init_waitqueue_head(&rqstp->rq_wait);
544
545 serv->sv_nrthreads++;
546 spin_lock_bh(&pool->sp_lock);
547 pool->sp_nrthreads++;
548 list_add(&rqstp->rq_all, &pool->sp_all_threads);
549 spin_unlock_bh(&pool->sp_lock);
550 rqstp->rq_server = serv;
551 rqstp->rq_pool = pool;
552
553 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
554 if (!rqstp->rq_argp)
555 goto out_thread;
556
557 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
558 if (!rqstp->rq_resp)
559 goto out_thread;
560
561 if (!svc_init_buffer(rqstp, serv->sv_max_mesg))
562 goto out_thread;
563
564 return rqstp;
565 out_thread:
566 svc_exit_thread(rqstp);
567 out_enomem:
568 return ERR_PTR(-ENOMEM);
569 }
570 EXPORT_SYMBOL_GPL(svc_prepare_thread);
571
572 /*
573 * Choose a pool in which to create a new thread, for svc_set_num_threads
574 */
575 static inline struct svc_pool *
choose_pool(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)576 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
577 {
578 if (pool != NULL)
579 return pool;
580
581 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
582 }
583
584 /*
585 * Choose a thread to kill, for svc_set_num_threads
586 */
587 static inline struct task_struct *
choose_victim(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)588 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
589 {
590 unsigned int i;
591 struct task_struct *task = NULL;
592
593 if (pool != NULL) {
594 spin_lock_bh(&pool->sp_lock);
595 } else {
596 /* choose a pool in round-robin fashion */
597 for (i = 0; i < serv->sv_nrpools; i++) {
598 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
599 spin_lock_bh(&pool->sp_lock);
600 if (!list_empty(&pool->sp_all_threads))
601 goto found_pool;
602 spin_unlock_bh(&pool->sp_lock);
603 }
604 return NULL;
605 }
606
607 found_pool:
608 if (!list_empty(&pool->sp_all_threads)) {
609 struct svc_rqst *rqstp;
610
611 /*
612 * Remove from the pool->sp_all_threads list
613 * so we don't try to kill it again.
614 */
615 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
616 list_del_init(&rqstp->rq_all);
617 task = rqstp->rq_task;
618 }
619 spin_unlock_bh(&pool->sp_lock);
620
621 return task;
622 }
623
624 /*
625 * Create or destroy enough new threads to make the number
626 * of threads the given number. If `pool' is non-NULL, applies
627 * only to threads in that pool, otherwise round-robins between
628 * all pools. Must be called with a svc_get() reference and
629 * the BKL or another lock to protect access to svc_serv fields.
630 *
631 * Destroying threads relies on the service threads filling in
632 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
633 * has been created using svc_create_pooled().
634 *
635 * Based on code that used to be in nfsd_svc() but tweaked
636 * to be pool-aware.
637 */
638 int
svc_set_num_threads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)639 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
640 {
641 struct svc_rqst *rqstp;
642 struct task_struct *task;
643 struct svc_pool *chosen_pool;
644 int error = 0;
645 unsigned int state = serv->sv_nrthreads-1;
646
647 if (pool == NULL) {
648 /* The -1 assumes caller has done a svc_get() */
649 nrservs -= (serv->sv_nrthreads-1);
650 } else {
651 spin_lock_bh(&pool->sp_lock);
652 nrservs -= pool->sp_nrthreads;
653 spin_unlock_bh(&pool->sp_lock);
654 }
655
656 /* create new threads */
657 while (nrservs > 0) {
658 nrservs--;
659 chosen_pool = choose_pool(serv, pool, &state);
660
661 rqstp = svc_prepare_thread(serv, chosen_pool);
662 if (IS_ERR(rqstp)) {
663 error = PTR_ERR(rqstp);
664 break;
665 }
666
667 __module_get(serv->sv_module);
668 task = kthread_create(serv->sv_function, rqstp, serv->sv_name);
669 if (IS_ERR(task)) {
670 error = PTR_ERR(task);
671 module_put(serv->sv_module);
672 svc_exit_thread(rqstp);
673 break;
674 }
675
676 rqstp->rq_task = task;
677 if (serv->sv_nrpools > 1)
678 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
679
680 svc_sock_update_bufs(serv);
681 wake_up_process(task);
682 }
683 /* destroy old threads */
684 while (nrservs < 0 &&
685 (task = choose_victim(serv, pool, &state)) != NULL) {
686 send_sig(SIGINT, task, 1);
687 nrservs++;
688 }
689
690 return error;
691 }
692 EXPORT_SYMBOL_GPL(svc_set_num_threads);
693
694 /*
695 * Called from a server thread as it's exiting. Caller must hold the BKL or
696 * the "service mutex", whichever is appropriate for the service.
697 */
698 void
svc_exit_thread(struct svc_rqst * rqstp)699 svc_exit_thread(struct svc_rqst *rqstp)
700 {
701 struct svc_serv *serv = rqstp->rq_server;
702 struct svc_pool *pool = rqstp->rq_pool;
703
704 svc_release_buffer(rqstp);
705 kfree(rqstp->rq_resp);
706 kfree(rqstp->rq_argp);
707 kfree(rqstp->rq_auth_data);
708
709 spin_lock_bh(&pool->sp_lock);
710 pool->sp_nrthreads--;
711 list_del(&rqstp->rq_all);
712 spin_unlock_bh(&pool->sp_lock);
713
714 kfree(rqstp);
715
716 /* Release the server */
717 if (serv)
718 svc_destroy(serv);
719 }
720 EXPORT_SYMBOL_GPL(svc_exit_thread);
721
722 #ifdef CONFIG_SUNRPC_REGISTER_V4
723
724 /*
725 * Register an "inet" protocol family netid with the local
726 * rpcbind daemon via an rpcbind v4 SET request.
727 *
728 * No netconfig infrastructure is available in the kernel, so
729 * we map IP_ protocol numbers to netids by hand.
730 *
731 * Returns zero on success; a negative errno value is returned
732 * if any error occurs.
733 */
__svc_rpcb_register4(const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)734 static int __svc_rpcb_register4(const u32 program, const u32 version,
735 const unsigned short protocol,
736 const unsigned short port)
737 {
738 struct sockaddr_in sin = {
739 .sin_family = AF_INET,
740 .sin_addr.s_addr = htonl(INADDR_ANY),
741 .sin_port = htons(port),
742 };
743 char *netid;
744
745 switch (protocol) {
746 case IPPROTO_UDP:
747 netid = RPCBIND_NETID_UDP;
748 break;
749 case IPPROTO_TCP:
750 netid = RPCBIND_NETID_TCP;
751 break;
752 default:
753 return -EPROTONOSUPPORT;
754 }
755
756 return rpcb_v4_register(program, version,
757 (struct sockaddr *)&sin, netid);
758 }
759
760 /*
761 * Register an "inet6" protocol family netid with the local
762 * rpcbind daemon via an rpcbind v4 SET request.
763 *
764 * No netconfig infrastructure is available in the kernel, so
765 * we map IP_ protocol numbers to netids by hand.
766 *
767 * Returns zero on success; a negative errno value is returned
768 * if any error occurs.
769 */
__svc_rpcb_register6(const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)770 static int __svc_rpcb_register6(const u32 program, const u32 version,
771 const unsigned short protocol,
772 const unsigned short port)
773 {
774 struct sockaddr_in6 sin6 = {
775 .sin6_family = AF_INET6,
776 .sin6_addr = IN6ADDR_ANY_INIT,
777 .sin6_port = htons(port),
778 };
779 char *netid;
780
781 switch (protocol) {
782 case IPPROTO_UDP:
783 netid = RPCBIND_NETID_UDP6;
784 break;
785 case IPPROTO_TCP:
786 netid = RPCBIND_NETID_TCP6;
787 break;
788 default:
789 return -EPROTONOSUPPORT;
790 }
791
792 return rpcb_v4_register(program, version,
793 (struct sockaddr *)&sin6, netid);
794 }
795
796 /*
797 * Register a kernel RPC service via rpcbind version 4.
798 *
799 * Returns zero on success; a negative errno value is returned
800 * if any error occurs.
801 */
__svc_register(const u32 program,const u32 version,const sa_family_t family,const unsigned short protocol,const unsigned short port)802 static int __svc_register(const u32 program, const u32 version,
803 const sa_family_t family,
804 const unsigned short protocol,
805 const unsigned short port)
806 {
807 int error;
808
809 switch (family) {
810 case AF_INET:
811 return __svc_rpcb_register4(program, version,
812 protocol, port);
813 case AF_INET6:
814 error = __svc_rpcb_register6(program, version,
815 protocol, port);
816 if (error < 0)
817 return error;
818
819 /*
820 * Work around bug in some versions of Linux rpcbind
821 * which don't allow registration of both inet and
822 * inet6 netids.
823 *
824 * Error return ignored for now.
825 */
826 __svc_rpcb_register4(program, version,
827 protocol, port);
828 return 0;
829 }
830
831 return -EAFNOSUPPORT;
832 }
833
834 #else /* CONFIG_SUNRPC_REGISTER_V4 */
835
836 /*
837 * Register a kernel RPC service via rpcbind version 2.
838 *
839 * Returns zero on success; a negative errno value is returned
840 * if any error occurs.
841 */
__svc_register(const u32 program,const u32 version,sa_family_t family,const unsigned short protocol,const unsigned short port)842 static int __svc_register(const u32 program, const u32 version,
843 sa_family_t family,
844 const unsigned short protocol,
845 const unsigned short port)
846 {
847 if (family != AF_INET)
848 return -EAFNOSUPPORT;
849
850 return rpcb_register(program, version, protocol, port);
851 }
852
853 #endif /* CONFIG_SUNRPC_REGISTER_V4 */
854
855 /**
856 * svc_register - register an RPC service with the local portmapper
857 * @serv: svc_serv struct for the service to register
858 * @proto: transport protocol number to advertise
859 * @port: port to advertise
860 *
861 * Service is registered for any address in serv's address family
862 */
svc_register(const struct svc_serv * serv,const unsigned short proto,const unsigned short port)863 int svc_register(const struct svc_serv *serv, const unsigned short proto,
864 const unsigned short port)
865 {
866 struct svc_program *progp;
867 unsigned int i;
868 int error = 0;
869
870 BUG_ON(proto == 0 && port == 0);
871
872 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
873 for (i = 0; i < progp->pg_nvers; i++) {
874 if (progp->pg_vers[i] == NULL)
875 continue;
876
877 dprintk("svc: svc_register(%sv%d, %s, %u, %u)%s\n",
878 progp->pg_name,
879 i,
880 proto == IPPROTO_UDP? "udp" : "tcp",
881 port,
882 serv->sv_family,
883 progp->pg_vers[i]->vs_hidden?
884 " (but not telling portmap)" : "");
885
886 if (progp->pg_vers[i]->vs_hidden)
887 continue;
888
889 error = __svc_register(progp->pg_prog, i,
890 serv->sv_family, proto, port);
891 if (error < 0)
892 break;
893 }
894 }
895
896 return error;
897 }
898
899 #ifdef CONFIG_SUNRPC_REGISTER_V4
900
__svc_unregister(const u32 program,const u32 version,const char * progname)901 static void __svc_unregister(const u32 program, const u32 version,
902 const char *progname)
903 {
904 struct sockaddr_in6 sin6 = {
905 .sin6_family = AF_INET6,
906 .sin6_addr = IN6ADDR_ANY_INIT,
907 .sin6_port = 0,
908 };
909 int error;
910
911 error = rpcb_v4_register(program, version,
912 (struct sockaddr *)&sin6, "");
913 dprintk("svc: %s(%sv%u), error %d\n",
914 __func__, progname, version, error);
915 }
916
917 #else /* CONFIG_SUNRPC_REGISTER_V4 */
918
__svc_unregister(const u32 program,const u32 version,const char * progname)919 static void __svc_unregister(const u32 program, const u32 version,
920 const char *progname)
921 {
922 int error;
923
924 error = rpcb_register(program, version, 0, 0);
925 dprintk("svc: %s(%sv%u), error %d\n",
926 __func__, progname, version, error);
927 }
928
929 #endif /* CONFIG_SUNRPC_REGISTER_V4 */
930
931 /*
932 * All netids, bind addresses and ports registered for [program, version]
933 * are removed from the local rpcbind database (if the service is not
934 * hidden) to make way for a new instance of the service.
935 *
936 * The result of unregistration is reported via dprintk for those who want
937 * verification of the result, but is otherwise not important.
938 */
svc_unregister(const struct svc_serv * serv)939 static void svc_unregister(const struct svc_serv *serv)
940 {
941 struct svc_program *progp;
942 unsigned long flags;
943 unsigned int i;
944
945 clear_thread_flag(TIF_SIGPENDING);
946
947 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
948 for (i = 0; i < progp->pg_nvers; i++) {
949 if (progp->pg_vers[i] == NULL)
950 continue;
951 if (progp->pg_vers[i]->vs_hidden)
952 continue;
953
954 __svc_unregister(progp->pg_prog, i, progp->pg_name);
955 }
956 }
957
958 spin_lock_irqsave(¤t->sighand->siglock, flags);
959 recalc_sigpending();
960 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
961 }
962
963 /*
964 * Printk the given error with the address of the client that caused it.
965 */
966 static int
967 __attribute__ ((format (printf, 2, 3)))
svc_printk(struct svc_rqst * rqstp,const char * fmt,...)968 svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
969 {
970 va_list args;
971 int r;
972 char buf[RPC_MAX_ADDRBUFLEN];
973
974 if (!net_ratelimit())
975 return 0;
976
977 printk(KERN_WARNING "svc: %s: ",
978 svc_print_addr(rqstp, buf, sizeof(buf)));
979
980 va_start(args, fmt);
981 r = vprintk(fmt, args);
982 va_end(args);
983
984 return r;
985 }
986
987 /*
988 * Process the RPC request.
989 */
990 int
svc_process(struct svc_rqst * rqstp)991 svc_process(struct svc_rqst *rqstp)
992 {
993 struct svc_program *progp;
994 struct svc_version *versp = NULL; /* compiler food */
995 struct svc_procedure *procp = NULL;
996 struct kvec * argv = &rqstp->rq_arg.head[0];
997 struct kvec * resv = &rqstp->rq_res.head[0];
998 struct svc_serv *serv = rqstp->rq_server;
999 kxdrproc_t xdr;
1000 __be32 *statp;
1001 u32 dir, prog, vers, proc;
1002 __be32 auth_stat, rpc_stat;
1003 int auth_res;
1004 __be32 *reply_statp;
1005
1006 rpc_stat = rpc_success;
1007
1008 if (argv->iov_len < 6*4)
1009 goto err_short_len;
1010
1011 /* setup response xdr_buf.
1012 * Initially it has just one page
1013 */
1014 rqstp->rq_resused = 1;
1015 resv->iov_base = page_address(rqstp->rq_respages[0]);
1016 resv->iov_len = 0;
1017 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1018 rqstp->rq_res.len = 0;
1019 rqstp->rq_res.page_base = 0;
1020 rqstp->rq_res.page_len = 0;
1021 rqstp->rq_res.buflen = PAGE_SIZE;
1022 rqstp->rq_res.tail[0].iov_base = NULL;
1023 rqstp->rq_res.tail[0].iov_len = 0;
1024 /* Will be turned off only in gss privacy case: */
1025 rqstp->rq_splice_ok = 1;
1026
1027 /* Setup reply header */
1028 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
1029
1030 rqstp->rq_xid = svc_getu32(argv);
1031 svc_putu32(resv, rqstp->rq_xid);
1032
1033 dir = svc_getnl(argv);
1034 vers = svc_getnl(argv);
1035
1036 /* First words of reply: */
1037 svc_putnl(resv, 1); /* REPLY */
1038
1039 if (dir != 0) /* direction != CALL */
1040 goto err_bad_dir;
1041 if (vers != 2) /* RPC version number */
1042 goto err_bad_rpc;
1043
1044 /* Save position in case we later decide to reject: */
1045 reply_statp = resv->iov_base + resv->iov_len;
1046
1047 svc_putnl(resv, 0); /* ACCEPT */
1048
1049 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
1050 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
1051 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
1052
1053 progp = serv->sv_program;
1054
1055 for (progp = serv->sv_program; progp; progp = progp->pg_next)
1056 if (prog == progp->pg_prog)
1057 break;
1058
1059 /*
1060 * Decode auth data, and add verifier to reply buffer.
1061 * We do this before anything else in order to get a decent
1062 * auth verifier.
1063 */
1064 auth_res = svc_authenticate(rqstp, &auth_stat);
1065 /* Also give the program a chance to reject this call: */
1066 if (auth_res == SVC_OK && progp) {
1067 auth_stat = rpc_autherr_badcred;
1068 auth_res = progp->pg_authenticate(rqstp);
1069 }
1070 switch (auth_res) {
1071 case SVC_OK:
1072 break;
1073 case SVC_GARBAGE:
1074 goto err_garbage;
1075 case SVC_SYSERR:
1076 rpc_stat = rpc_system_err;
1077 goto err_bad;
1078 case SVC_DENIED:
1079 goto err_bad_auth;
1080 case SVC_DROP:
1081 goto dropit;
1082 case SVC_COMPLETE:
1083 goto sendit;
1084 }
1085
1086 if (progp == NULL)
1087 goto err_bad_prog;
1088
1089 if (vers >= progp->pg_nvers ||
1090 !(versp = progp->pg_vers[vers]))
1091 goto err_bad_vers;
1092
1093 procp = versp->vs_proc + proc;
1094 if (proc >= versp->vs_nproc || !procp->pc_func)
1095 goto err_bad_proc;
1096 rqstp->rq_server = serv;
1097 rqstp->rq_procinfo = procp;
1098
1099 /* Syntactic check complete */
1100 serv->sv_stats->rpccnt++;
1101
1102 /* Build the reply header. */
1103 statp = resv->iov_base +resv->iov_len;
1104 svc_putnl(resv, RPC_SUCCESS);
1105
1106 /* Bump per-procedure stats counter */
1107 procp->pc_count++;
1108
1109 /* Initialize storage for argp and resp */
1110 memset(rqstp->rq_argp, 0, procp->pc_argsize);
1111 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1112
1113 /* un-reserve some of the out-queue now that we have a
1114 * better idea of reply size
1115 */
1116 if (procp->pc_xdrressize)
1117 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1118
1119 /* Call the function that processes the request. */
1120 if (!versp->vs_dispatch) {
1121 /* Decode arguments */
1122 xdr = procp->pc_decode;
1123 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
1124 goto err_garbage;
1125
1126 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
1127
1128 /* Encode reply */
1129 if (*statp == rpc_drop_reply) {
1130 if (procp->pc_release)
1131 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1132 goto dropit;
1133 }
1134 if (*statp == rpc_success && (xdr = procp->pc_encode)
1135 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
1136 dprintk("svc: failed to encode reply\n");
1137 /* serv->sv_stats->rpcsystemerr++; */
1138 *statp = rpc_system_err;
1139 }
1140 } else {
1141 dprintk("svc: calling dispatcher\n");
1142 if (!versp->vs_dispatch(rqstp, statp)) {
1143 /* Release reply info */
1144 if (procp->pc_release)
1145 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1146 goto dropit;
1147 }
1148 }
1149
1150 /* Check RPC status result */
1151 if (*statp != rpc_success)
1152 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
1153
1154 /* Release reply info */
1155 if (procp->pc_release)
1156 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
1157
1158 if (procp->pc_encode == NULL)
1159 goto dropit;
1160
1161 sendit:
1162 if (svc_authorise(rqstp))
1163 goto dropit;
1164 return svc_send(rqstp);
1165
1166 dropit:
1167 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1168 dprintk("svc: svc_process dropit\n");
1169 svc_drop(rqstp);
1170 return 0;
1171
1172 err_short_len:
1173 svc_printk(rqstp, "short len %Zd, dropping request\n",
1174 argv->iov_len);
1175
1176 goto dropit; /* drop request */
1177
1178 err_bad_dir:
1179 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1180
1181 serv->sv_stats->rpcbadfmt++;
1182 goto dropit; /* drop request */
1183
1184 err_bad_rpc:
1185 serv->sv_stats->rpcbadfmt++;
1186 svc_putnl(resv, 1); /* REJECT */
1187 svc_putnl(resv, 0); /* RPC_MISMATCH */
1188 svc_putnl(resv, 2); /* Only RPCv2 supported */
1189 svc_putnl(resv, 2);
1190 goto sendit;
1191
1192 err_bad_auth:
1193 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1194 serv->sv_stats->rpcbadauth++;
1195 /* Restore write pointer to location of accept status: */
1196 xdr_ressize_check(rqstp, reply_statp);
1197 svc_putnl(resv, 1); /* REJECT */
1198 svc_putnl(resv, 1); /* AUTH_ERROR */
1199 svc_putnl(resv, ntohl(auth_stat)); /* status */
1200 goto sendit;
1201
1202 err_bad_prog:
1203 dprintk("svc: unknown program %d\n", prog);
1204 serv->sv_stats->rpcbadfmt++;
1205 svc_putnl(resv, RPC_PROG_UNAVAIL);
1206 goto sendit;
1207
1208 err_bad_vers:
1209 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1210 vers, prog, progp->pg_name);
1211
1212 serv->sv_stats->rpcbadfmt++;
1213 svc_putnl(resv, RPC_PROG_MISMATCH);
1214 svc_putnl(resv, progp->pg_lovers);
1215 svc_putnl(resv, progp->pg_hivers);
1216 goto sendit;
1217
1218 err_bad_proc:
1219 svc_printk(rqstp, "unknown procedure (%d)\n", proc);
1220
1221 serv->sv_stats->rpcbadfmt++;
1222 svc_putnl(resv, RPC_PROC_UNAVAIL);
1223 goto sendit;
1224
1225 err_garbage:
1226 svc_printk(rqstp, "failed to decode args\n");
1227
1228 rpc_stat = rpc_garbage_args;
1229 err_bad:
1230 serv->sv_stats->rpcbadfmt++;
1231 svc_putnl(resv, ntohl(rpc_stat));
1232 goto sendit;
1233 }
1234 EXPORT_SYMBOL_GPL(svc_process);
1235
1236 /*
1237 * Return (transport-specific) limit on the rpc payload.
1238 */
svc_max_payload(const struct svc_rqst * rqstp)1239 u32 svc_max_payload(const struct svc_rqst *rqstp)
1240 {
1241 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1242
1243 if (rqstp->rq_server->sv_max_payload < max)
1244 max = rqstp->rq_server->sv_max_payload;
1245 return max;
1246 }
1247 EXPORT_SYMBOL_GPL(svc_max_payload);
1248