1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/net/sunrpc/svc.c
4 *
5 * High-level RPC service routines
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 *
9 * Multiple threads pools and NUMAisation
10 * Copyright (c) 2006 Silicon Graphics, Inc.
11 * by Greg Banks <gnb@melbourne.sgi.com>
12 */
13
14 #include <linux/linkage.h>
15 #include <linux/sched/signal.h>
16 #include <linux/errno.h>
17 #include <linux/net.h>
18 #include <linux/in.h>
19 #include <linux/mm.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/slab.h>
24
25 #include <linux/sunrpc/types.h>
26 #include <linux/sunrpc/xdr.h>
27 #include <linux/sunrpc/stats.h>
28 #include <linux/sunrpc/svcsock.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/bc_xprt.h>
31
32 #include <trace/events/sunrpc.h>
33
34 #define RPCDBG_FACILITY RPCDBG_SVCDSP
35
36 static void svc_unregister(const struct svc_serv *serv, struct net *net);
37
38 #define svc_serv_is_pooled(serv) ((serv)->sv_ops->svo_function)
39
40 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
41
42 /*
43 * Structure for mapping cpus to pools and vice versa.
44 * Setup once during sunrpc initialisation.
45 */
46 struct svc_pool_map svc_pool_map = {
47 .mode = SVC_POOL_DEFAULT
48 };
49 EXPORT_SYMBOL_GPL(svc_pool_map);
50
51 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
52
53 static int
param_set_pool_mode(const char * val,const struct kernel_param * kp)54 param_set_pool_mode(const char *val, const struct kernel_param *kp)
55 {
56 int *ip = (int *)kp->arg;
57 struct svc_pool_map *m = &svc_pool_map;
58 int err;
59
60 mutex_lock(&svc_pool_map_mutex);
61
62 err = -EBUSY;
63 if (m->count)
64 goto out;
65
66 err = 0;
67 if (!strncmp(val, "auto", 4))
68 *ip = SVC_POOL_AUTO;
69 else if (!strncmp(val, "global", 6))
70 *ip = SVC_POOL_GLOBAL;
71 else if (!strncmp(val, "percpu", 6))
72 *ip = SVC_POOL_PERCPU;
73 else if (!strncmp(val, "pernode", 7))
74 *ip = SVC_POOL_PERNODE;
75 else
76 err = -EINVAL;
77
78 out:
79 mutex_unlock(&svc_pool_map_mutex);
80 return err;
81 }
82
83 static int
param_get_pool_mode(char * buf,const struct kernel_param * kp)84 param_get_pool_mode(char *buf, const struct kernel_param *kp)
85 {
86 int *ip = (int *)kp->arg;
87
88 switch (*ip)
89 {
90 case SVC_POOL_AUTO:
91 return strlcpy(buf, "auto\n", 20);
92 case SVC_POOL_GLOBAL:
93 return strlcpy(buf, "global\n", 20);
94 case SVC_POOL_PERCPU:
95 return strlcpy(buf, "percpu\n", 20);
96 case SVC_POOL_PERNODE:
97 return strlcpy(buf, "pernode\n", 20);
98 default:
99 return sprintf(buf, "%d\n", *ip);
100 }
101 }
102
103 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
104 &svc_pool_map.mode, 0644);
105
106 /*
107 * Detect best pool mapping mode heuristically,
108 * according to the machine's topology.
109 */
110 static int
svc_pool_map_choose_mode(void)111 svc_pool_map_choose_mode(void)
112 {
113 unsigned int node;
114
115 if (nr_online_nodes > 1) {
116 /*
117 * Actually have multiple NUMA nodes,
118 * so split pools on NUMA node boundaries
119 */
120 return SVC_POOL_PERNODE;
121 }
122
123 node = first_online_node;
124 if (nr_cpus_node(node) > 2) {
125 /*
126 * Non-trivial SMP, or CONFIG_NUMA on
127 * non-NUMA hardware, e.g. with a generic
128 * x86_64 kernel on Xeons. In this case we
129 * want to divide the pools on cpu boundaries.
130 */
131 return SVC_POOL_PERCPU;
132 }
133
134 /* default: one global pool */
135 return SVC_POOL_GLOBAL;
136 }
137
138 /*
139 * Allocate the to_pool[] and pool_to[] arrays.
140 * Returns 0 on success or an errno.
141 */
142 static int
svc_pool_map_alloc_arrays(struct svc_pool_map * m,unsigned int maxpools)143 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
144 {
145 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
146 if (!m->to_pool)
147 goto fail;
148 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
149 if (!m->pool_to)
150 goto fail_free;
151
152 return 0;
153
154 fail_free:
155 kfree(m->to_pool);
156 m->to_pool = NULL;
157 fail:
158 return -ENOMEM;
159 }
160
161 /*
162 * Initialise the pool map for SVC_POOL_PERCPU mode.
163 * Returns number of pools or <0 on error.
164 */
165 static int
svc_pool_map_init_percpu(struct svc_pool_map * m)166 svc_pool_map_init_percpu(struct svc_pool_map *m)
167 {
168 unsigned int maxpools = nr_cpu_ids;
169 unsigned int pidx = 0;
170 unsigned int cpu;
171 int err;
172
173 err = svc_pool_map_alloc_arrays(m, maxpools);
174 if (err)
175 return err;
176
177 for_each_online_cpu(cpu) {
178 BUG_ON(pidx >= maxpools);
179 m->to_pool[cpu] = pidx;
180 m->pool_to[pidx] = cpu;
181 pidx++;
182 }
183 /* cpus brought online later all get mapped to pool0, sorry */
184
185 return pidx;
186 };
187
188
189 /*
190 * Initialise the pool map for SVC_POOL_PERNODE mode.
191 * Returns number of pools or <0 on error.
192 */
193 static int
svc_pool_map_init_pernode(struct svc_pool_map * m)194 svc_pool_map_init_pernode(struct svc_pool_map *m)
195 {
196 unsigned int maxpools = nr_node_ids;
197 unsigned int pidx = 0;
198 unsigned int node;
199 int err;
200
201 err = svc_pool_map_alloc_arrays(m, maxpools);
202 if (err)
203 return err;
204
205 for_each_node_with_cpus(node) {
206 /* some architectures (e.g. SN2) have cpuless nodes */
207 BUG_ON(pidx > maxpools);
208 m->to_pool[node] = pidx;
209 m->pool_to[pidx] = node;
210 pidx++;
211 }
212 /* nodes brought online later all get mapped to pool0, sorry */
213
214 return pidx;
215 }
216
217
218 /*
219 * Add a reference to the global map of cpus to pools (and
220 * vice versa). Initialise the map if we're the first user.
221 * Returns the number of pools.
222 */
223 unsigned int
svc_pool_map_get(void)224 svc_pool_map_get(void)
225 {
226 struct svc_pool_map *m = &svc_pool_map;
227 int npools = -1;
228
229 mutex_lock(&svc_pool_map_mutex);
230
231 if (m->count++) {
232 mutex_unlock(&svc_pool_map_mutex);
233 return m->npools;
234 }
235
236 if (m->mode == SVC_POOL_AUTO)
237 m->mode = svc_pool_map_choose_mode();
238
239 switch (m->mode) {
240 case SVC_POOL_PERCPU:
241 npools = svc_pool_map_init_percpu(m);
242 break;
243 case SVC_POOL_PERNODE:
244 npools = svc_pool_map_init_pernode(m);
245 break;
246 }
247
248 if (npools < 0) {
249 /* default, or memory allocation failure */
250 npools = 1;
251 m->mode = SVC_POOL_GLOBAL;
252 }
253 m->npools = npools;
254
255 mutex_unlock(&svc_pool_map_mutex);
256 return m->npools;
257 }
258 EXPORT_SYMBOL_GPL(svc_pool_map_get);
259
260 /*
261 * Drop a reference to the global map of cpus to pools.
262 * When the last reference is dropped, the map data is
263 * freed; this allows the sysadmin to change the pool
264 * mode using the pool_mode module option without
265 * rebooting or re-loading sunrpc.ko.
266 */
267 void
svc_pool_map_put(void)268 svc_pool_map_put(void)
269 {
270 struct svc_pool_map *m = &svc_pool_map;
271
272 mutex_lock(&svc_pool_map_mutex);
273
274 if (!--m->count) {
275 kfree(m->to_pool);
276 m->to_pool = NULL;
277 kfree(m->pool_to);
278 m->pool_to = NULL;
279 m->npools = 0;
280 }
281
282 mutex_unlock(&svc_pool_map_mutex);
283 }
284 EXPORT_SYMBOL_GPL(svc_pool_map_put);
285
svc_pool_map_get_node(unsigned int pidx)286 static int svc_pool_map_get_node(unsigned int pidx)
287 {
288 const struct svc_pool_map *m = &svc_pool_map;
289
290 if (m->count) {
291 if (m->mode == SVC_POOL_PERCPU)
292 return cpu_to_node(m->pool_to[pidx]);
293 if (m->mode == SVC_POOL_PERNODE)
294 return m->pool_to[pidx];
295 }
296 return NUMA_NO_NODE;
297 }
298 /*
299 * Set the given thread's cpus_allowed mask so that it
300 * will only run on cpus in the given pool.
301 */
302 static inline void
svc_pool_map_set_cpumask(struct task_struct * task,unsigned int pidx)303 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
304 {
305 struct svc_pool_map *m = &svc_pool_map;
306 unsigned int node = m->pool_to[pidx];
307
308 /*
309 * The caller checks for sv_nrpools > 1, which
310 * implies that we've been initialized.
311 */
312 WARN_ON_ONCE(m->count == 0);
313 if (m->count == 0)
314 return;
315
316 switch (m->mode) {
317 case SVC_POOL_PERCPU:
318 {
319 set_cpus_allowed_ptr(task, cpumask_of(node));
320 break;
321 }
322 case SVC_POOL_PERNODE:
323 {
324 set_cpus_allowed_ptr(task, cpumask_of_node(node));
325 break;
326 }
327 }
328 }
329
330 /*
331 * Use the mapping mode to choose a pool for a given CPU.
332 * Used when enqueueing an incoming RPC. Always returns
333 * a non-NULL pool pointer.
334 */
335 struct svc_pool *
svc_pool_for_cpu(struct svc_serv * serv,int cpu)336 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
337 {
338 struct svc_pool_map *m = &svc_pool_map;
339 unsigned int pidx = 0;
340
341 /*
342 * An uninitialised map happens in a pure client when
343 * lockd is brought up, so silently treat it the
344 * same as SVC_POOL_GLOBAL.
345 */
346 if (svc_serv_is_pooled(serv)) {
347 switch (m->mode) {
348 case SVC_POOL_PERCPU:
349 pidx = m->to_pool[cpu];
350 break;
351 case SVC_POOL_PERNODE:
352 pidx = m->to_pool[cpu_to_node(cpu)];
353 break;
354 }
355 }
356 return &serv->sv_pools[pidx % serv->sv_nrpools];
357 }
358
svc_rpcb_setup(struct svc_serv * serv,struct net * net)359 int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
360 {
361 int err;
362
363 err = rpcb_create_local(net);
364 if (err)
365 return err;
366
367 /* Remove any stale portmap registrations */
368 svc_unregister(serv, net);
369 return 0;
370 }
371 EXPORT_SYMBOL_GPL(svc_rpcb_setup);
372
svc_rpcb_cleanup(struct svc_serv * serv,struct net * net)373 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
374 {
375 svc_unregister(serv, net);
376 rpcb_put_local(net);
377 }
378 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
379
svc_uses_rpcbind(struct svc_serv * serv)380 static int svc_uses_rpcbind(struct svc_serv *serv)
381 {
382 struct svc_program *progp;
383 unsigned int i;
384
385 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
386 for (i = 0; i < progp->pg_nvers; i++) {
387 if (progp->pg_vers[i] == NULL)
388 continue;
389 if (!progp->pg_vers[i]->vs_hidden)
390 return 1;
391 }
392 }
393
394 return 0;
395 }
396
svc_bind(struct svc_serv * serv,struct net * net)397 int svc_bind(struct svc_serv *serv, struct net *net)
398 {
399 if (!svc_uses_rpcbind(serv))
400 return 0;
401 return svc_rpcb_setup(serv, net);
402 }
403 EXPORT_SYMBOL_GPL(svc_bind);
404
405 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
406 static void
__svc_init_bc(struct svc_serv * serv)407 __svc_init_bc(struct svc_serv *serv)
408 {
409 INIT_LIST_HEAD(&serv->sv_cb_list);
410 spin_lock_init(&serv->sv_cb_lock);
411 init_waitqueue_head(&serv->sv_cb_waitq);
412 }
413 #else
414 static void
__svc_init_bc(struct svc_serv * serv)415 __svc_init_bc(struct svc_serv *serv)
416 {
417 }
418 #endif
419
420 /*
421 * Create an RPC service
422 */
423 static struct svc_serv *
__svc_create(struct svc_program * prog,unsigned int bufsize,int npools,const struct svc_serv_ops * ops)424 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
425 const struct svc_serv_ops *ops)
426 {
427 struct svc_serv *serv;
428 unsigned int vers;
429 unsigned int xdrsize;
430 unsigned int i;
431
432 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
433 return NULL;
434 serv->sv_name = prog->pg_name;
435 serv->sv_program = prog;
436 serv->sv_nrthreads = 1;
437 serv->sv_stats = prog->pg_stats;
438 if (bufsize > RPCSVC_MAXPAYLOAD)
439 bufsize = RPCSVC_MAXPAYLOAD;
440 serv->sv_max_payload = bufsize? bufsize : 4096;
441 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
442 serv->sv_ops = ops;
443 xdrsize = 0;
444 while (prog) {
445 prog->pg_lovers = prog->pg_nvers-1;
446 for (vers=0; vers<prog->pg_nvers ; vers++)
447 if (prog->pg_vers[vers]) {
448 prog->pg_hivers = vers;
449 if (prog->pg_lovers > vers)
450 prog->pg_lovers = vers;
451 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
452 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
453 }
454 prog = prog->pg_next;
455 }
456 serv->sv_xdrsize = xdrsize;
457 INIT_LIST_HEAD(&serv->sv_tempsocks);
458 INIT_LIST_HEAD(&serv->sv_permsocks);
459 timer_setup(&serv->sv_temptimer, NULL, 0);
460 spin_lock_init(&serv->sv_lock);
461
462 __svc_init_bc(serv);
463
464 serv->sv_nrpools = npools;
465 serv->sv_pools =
466 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
467 GFP_KERNEL);
468 if (!serv->sv_pools) {
469 kfree(serv);
470 return NULL;
471 }
472
473 for (i = 0; i < serv->sv_nrpools; i++) {
474 struct svc_pool *pool = &serv->sv_pools[i];
475
476 dprintk("svc: initialising pool %u for %s\n",
477 i, serv->sv_name);
478
479 pool->sp_id = i;
480 INIT_LIST_HEAD(&pool->sp_sockets);
481 INIT_LIST_HEAD(&pool->sp_all_threads);
482 spin_lock_init(&pool->sp_lock);
483 }
484
485 return serv;
486 }
487
488 struct svc_serv *
svc_create(struct svc_program * prog,unsigned int bufsize,const struct svc_serv_ops * ops)489 svc_create(struct svc_program *prog, unsigned int bufsize,
490 const struct svc_serv_ops *ops)
491 {
492 return __svc_create(prog, bufsize, /*npools*/1, ops);
493 }
494 EXPORT_SYMBOL_GPL(svc_create);
495
496 struct svc_serv *
svc_create_pooled(struct svc_program * prog,unsigned int bufsize,const struct svc_serv_ops * ops)497 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
498 const struct svc_serv_ops *ops)
499 {
500 struct svc_serv *serv;
501 unsigned int npools = svc_pool_map_get();
502
503 serv = __svc_create(prog, bufsize, npools, ops);
504 if (!serv)
505 goto out_err;
506 return serv;
507 out_err:
508 svc_pool_map_put();
509 return NULL;
510 }
511 EXPORT_SYMBOL_GPL(svc_create_pooled);
512
svc_shutdown_net(struct svc_serv * serv,struct net * net)513 void svc_shutdown_net(struct svc_serv *serv, struct net *net)
514 {
515 svc_close_net(serv, net);
516
517 if (serv->sv_ops->svo_shutdown)
518 serv->sv_ops->svo_shutdown(serv, net);
519 }
520 EXPORT_SYMBOL_GPL(svc_shutdown_net);
521
522 /*
523 * Destroy an RPC service. Should be called with appropriate locking to
524 * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
525 */
526 void
svc_destroy(struct svc_serv * serv)527 svc_destroy(struct svc_serv *serv)
528 {
529 dprintk("svc: svc_destroy(%s, %d)\n",
530 serv->sv_program->pg_name,
531 serv->sv_nrthreads);
532
533 if (serv->sv_nrthreads) {
534 if (--(serv->sv_nrthreads) != 0) {
535 svc_sock_update_bufs(serv);
536 return;
537 }
538 } else
539 printk("svc_destroy: no threads for serv=%p!\n", serv);
540
541 del_timer_sync(&serv->sv_temptimer);
542
543 /*
544 * The last user is gone and thus all sockets have to be destroyed to
545 * the point. Check this.
546 */
547 BUG_ON(!list_empty(&serv->sv_permsocks));
548 BUG_ON(!list_empty(&serv->sv_tempsocks));
549
550 cache_clean_deferred(serv);
551
552 if (svc_serv_is_pooled(serv))
553 svc_pool_map_put();
554
555 kfree(serv->sv_pools);
556 kfree(serv);
557 }
558 EXPORT_SYMBOL_GPL(svc_destroy);
559
560 /*
561 * Allocate an RPC server's buffer space.
562 * We allocate pages and place them in rq_argpages.
563 */
564 static int
svc_init_buffer(struct svc_rqst * rqstp,unsigned int size,int node)565 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
566 {
567 unsigned int pages, arghi;
568
569 /* bc_xprt uses fore channel allocated buffers */
570 if (svc_is_backchannel(rqstp))
571 return 1;
572
573 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
574 * We assume one is at most one page
575 */
576 arghi = 0;
577 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
578 if (pages > RPCSVC_MAXPAGES)
579 pages = RPCSVC_MAXPAGES;
580 while (pages) {
581 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
582 if (!p)
583 break;
584 rqstp->rq_pages[arghi++] = p;
585 pages--;
586 }
587 return pages == 0;
588 }
589
590 /*
591 * Release an RPC server buffer
592 */
593 static void
svc_release_buffer(struct svc_rqst * rqstp)594 svc_release_buffer(struct svc_rqst *rqstp)
595 {
596 unsigned int i;
597
598 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
599 if (rqstp->rq_pages[i])
600 put_page(rqstp->rq_pages[i]);
601 }
602
603 struct svc_rqst *
svc_rqst_alloc(struct svc_serv * serv,struct svc_pool * pool,int node)604 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
605 {
606 struct svc_rqst *rqstp;
607
608 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
609 if (!rqstp)
610 return rqstp;
611
612 __set_bit(RQ_BUSY, &rqstp->rq_flags);
613 spin_lock_init(&rqstp->rq_lock);
614 rqstp->rq_server = serv;
615 rqstp->rq_pool = pool;
616
617 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
618 if (!rqstp->rq_argp)
619 goto out_enomem;
620
621 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
622 if (!rqstp->rq_resp)
623 goto out_enomem;
624
625 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
626 goto out_enomem;
627
628 return rqstp;
629 out_enomem:
630 svc_rqst_free(rqstp);
631 return NULL;
632 }
633 EXPORT_SYMBOL_GPL(svc_rqst_alloc);
634
635 struct svc_rqst *
svc_prepare_thread(struct svc_serv * serv,struct svc_pool * pool,int node)636 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
637 {
638 struct svc_rqst *rqstp;
639
640 rqstp = svc_rqst_alloc(serv, pool, node);
641 if (!rqstp)
642 return ERR_PTR(-ENOMEM);
643
644 serv->sv_nrthreads++;
645 spin_lock_bh(&pool->sp_lock);
646 pool->sp_nrthreads++;
647 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
648 spin_unlock_bh(&pool->sp_lock);
649 return rqstp;
650 }
651 EXPORT_SYMBOL_GPL(svc_prepare_thread);
652
653 /*
654 * Choose a pool in which to create a new thread, for svc_set_num_threads
655 */
656 static inline struct svc_pool *
choose_pool(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)657 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
658 {
659 if (pool != NULL)
660 return pool;
661
662 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
663 }
664
665 /*
666 * Choose a thread to kill, for svc_set_num_threads
667 */
668 static inline struct task_struct *
choose_victim(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)669 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
670 {
671 unsigned int i;
672 struct task_struct *task = NULL;
673
674 if (pool != NULL) {
675 spin_lock_bh(&pool->sp_lock);
676 } else {
677 /* choose a pool in round-robin fashion */
678 for (i = 0; i < serv->sv_nrpools; i++) {
679 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
680 spin_lock_bh(&pool->sp_lock);
681 if (!list_empty(&pool->sp_all_threads))
682 goto found_pool;
683 spin_unlock_bh(&pool->sp_lock);
684 }
685 return NULL;
686 }
687
688 found_pool:
689 if (!list_empty(&pool->sp_all_threads)) {
690 struct svc_rqst *rqstp;
691
692 /*
693 * Remove from the pool->sp_all_threads list
694 * so we don't try to kill it again.
695 */
696 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
697 set_bit(RQ_VICTIM, &rqstp->rq_flags);
698 list_del_rcu(&rqstp->rq_all);
699 task = rqstp->rq_task;
700 }
701 spin_unlock_bh(&pool->sp_lock);
702
703 return task;
704 }
705
706 /* create new threads */
707 static int
svc_start_kthreads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)708 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
709 {
710 struct svc_rqst *rqstp;
711 struct task_struct *task;
712 struct svc_pool *chosen_pool;
713 unsigned int state = serv->sv_nrthreads-1;
714 int node;
715
716 do {
717 nrservs--;
718 chosen_pool = choose_pool(serv, pool, &state);
719
720 node = svc_pool_map_get_node(chosen_pool->sp_id);
721 rqstp = svc_prepare_thread(serv, chosen_pool, node);
722 if (IS_ERR(rqstp))
723 return PTR_ERR(rqstp);
724
725 __module_get(serv->sv_ops->svo_module);
726 task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
727 node, "%s", serv->sv_name);
728 if (IS_ERR(task)) {
729 module_put(serv->sv_ops->svo_module);
730 svc_exit_thread(rqstp);
731 return PTR_ERR(task);
732 }
733
734 rqstp->rq_task = task;
735 if (serv->sv_nrpools > 1)
736 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
737
738 svc_sock_update_bufs(serv);
739 wake_up_process(task);
740 } while (nrservs > 0);
741
742 return 0;
743 }
744
745
746 /* destroy old threads */
747 static int
svc_signal_kthreads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)748 svc_signal_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
749 {
750 struct task_struct *task;
751 unsigned int state = serv->sv_nrthreads-1;
752
753 /* destroy old threads */
754 do {
755 task = choose_victim(serv, pool, &state);
756 if (task == NULL)
757 break;
758 send_sig(SIGINT, task, 1);
759 nrservs++;
760 } while (nrservs < 0);
761
762 return 0;
763 }
764
765 /*
766 * Create or destroy enough new threads to make the number
767 * of threads the given number. If `pool' is non-NULL, applies
768 * only to threads in that pool, otherwise round-robins between
769 * all pools. Caller must ensure that mutual exclusion between this and
770 * server startup or shutdown.
771 *
772 * Destroying threads relies on the service threads filling in
773 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
774 * has been created using svc_create_pooled().
775 *
776 * Based on code that used to be in nfsd_svc() but tweaked
777 * to be pool-aware.
778 */
779 int
svc_set_num_threads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)780 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
781 {
782 if (pool == NULL) {
783 /* The -1 assumes caller has done a svc_get() */
784 nrservs -= (serv->sv_nrthreads-1);
785 } else {
786 spin_lock_bh(&pool->sp_lock);
787 nrservs -= pool->sp_nrthreads;
788 spin_unlock_bh(&pool->sp_lock);
789 }
790
791 if (nrservs > 0)
792 return svc_start_kthreads(serv, pool, nrservs);
793 if (nrservs < 0)
794 return svc_signal_kthreads(serv, pool, nrservs);
795 return 0;
796 }
797 EXPORT_SYMBOL_GPL(svc_set_num_threads);
798
799 /* destroy old threads */
800 static int
svc_stop_kthreads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)801 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
802 {
803 struct task_struct *task;
804 unsigned int state = serv->sv_nrthreads-1;
805
806 /* destroy old threads */
807 do {
808 task = choose_victim(serv, pool, &state);
809 if (task == NULL)
810 break;
811 kthread_stop(task);
812 nrservs++;
813 } while (nrservs < 0);
814 return 0;
815 }
816
817 int
svc_set_num_threads_sync(struct svc_serv * serv,struct svc_pool * pool,int nrservs)818 svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
819 {
820 if (pool == NULL) {
821 /* The -1 assumes caller has done a svc_get() */
822 nrservs -= (serv->sv_nrthreads-1);
823 } else {
824 spin_lock_bh(&pool->sp_lock);
825 nrservs -= pool->sp_nrthreads;
826 spin_unlock_bh(&pool->sp_lock);
827 }
828
829 if (nrservs > 0)
830 return svc_start_kthreads(serv, pool, nrservs);
831 if (nrservs < 0)
832 return svc_stop_kthreads(serv, pool, nrservs);
833 return 0;
834 }
835 EXPORT_SYMBOL_GPL(svc_set_num_threads_sync);
836
837 /*
838 * Called from a server thread as it's exiting. Caller must hold the "service
839 * mutex" for the service.
840 */
841 void
svc_rqst_free(struct svc_rqst * rqstp)842 svc_rqst_free(struct svc_rqst *rqstp)
843 {
844 svc_release_buffer(rqstp);
845 kfree(rqstp->rq_resp);
846 kfree(rqstp->rq_argp);
847 kfree(rqstp->rq_auth_data);
848 kfree_rcu(rqstp, rq_rcu_head);
849 }
850 EXPORT_SYMBOL_GPL(svc_rqst_free);
851
852 void
svc_exit_thread(struct svc_rqst * rqstp)853 svc_exit_thread(struct svc_rqst *rqstp)
854 {
855 struct svc_serv *serv = rqstp->rq_server;
856 struct svc_pool *pool = rqstp->rq_pool;
857
858 spin_lock_bh(&pool->sp_lock);
859 pool->sp_nrthreads--;
860 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
861 list_del_rcu(&rqstp->rq_all);
862 spin_unlock_bh(&pool->sp_lock);
863
864 svc_rqst_free(rqstp);
865
866 /* Release the server */
867 if (serv)
868 svc_destroy(serv);
869 }
870 EXPORT_SYMBOL_GPL(svc_exit_thread);
871
872 /*
873 * Register an "inet" protocol family netid with the local
874 * rpcbind daemon via an rpcbind v4 SET request.
875 *
876 * No netconfig infrastructure is available in the kernel, so
877 * we map IP_ protocol numbers to netids by hand.
878 *
879 * Returns zero on success; a negative errno value is returned
880 * if any error occurs.
881 */
__svc_rpcb_register4(struct net * net,const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)882 static int __svc_rpcb_register4(struct net *net, const u32 program,
883 const u32 version,
884 const unsigned short protocol,
885 const unsigned short port)
886 {
887 const struct sockaddr_in sin = {
888 .sin_family = AF_INET,
889 .sin_addr.s_addr = htonl(INADDR_ANY),
890 .sin_port = htons(port),
891 };
892 const char *netid;
893 int error;
894
895 switch (protocol) {
896 case IPPROTO_UDP:
897 netid = RPCBIND_NETID_UDP;
898 break;
899 case IPPROTO_TCP:
900 netid = RPCBIND_NETID_TCP;
901 break;
902 default:
903 return -ENOPROTOOPT;
904 }
905
906 error = rpcb_v4_register(net, program, version,
907 (const struct sockaddr *)&sin, netid);
908
909 /*
910 * User space didn't support rpcbind v4, so retry this
911 * registration request with the legacy rpcbind v2 protocol.
912 */
913 if (error == -EPROTONOSUPPORT)
914 error = rpcb_register(net, program, version, protocol, port);
915
916 return error;
917 }
918
919 #if IS_ENABLED(CONFIG_IPV6)
920 /*
921 * Register an "inet6" protocol family netid with the local
922 * rpcbind daemon via an rpcbind v4 SET request.
923 *
924 * No netconfig infrastructure is available in the kernel, so
925 * we map IP_ protocol numbers to netids by hand.
926 *
927 * Returns zero on success; a negative errno value is returned
928 * if any error occurs.
929 */
__svc_rpcb_register6(struct net * net,const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)930 static int __svc_rpcb_register6(struct net *net, const u32 program,
931 const u32 version,
932 const unsigned short protocol,
933 const unsigned short port)
934 {
935 const struct sockaddr_in6 sin6 = {
936 .sin6_family = AF_INET6,
937 .sin6_addr = IN6ADDR_ANY_INIT,
938 .sin6_port = htons(port),
939 };
940 const char *netid;
941 int error;
942
943 switch (protocol) {
944 case IPPROTO_UDP:
945 netid = RPCBIND_NETID_UDP6;
946 break;
947 case IPPROTO_TCP:
948 netid = RPCBIND_NETID_TCP6;
949 break;
950 default:
951 return -ENOPROTOOPT;
952 }
953
954 error = rpcb_v4_register(net, program, version,
955 (const struct sockaddr *)&sin6, netid);
956
957 /*
958 * User space didn't support rpcbind version 4, so we won't
959 * use a PF_INET6 listener.
960 */
961 if (error == -EPROTONOSUPPORT)
962 error = -EAFNOSUPPORT;
963
964 return error;
965 }
966 #endif /* IS_ENABLED(CONFIG_IPV6) */
967
968 /*
969 * Register a kernel RPC service via rpcbind version 4.
970 *
971 * Returns zero on success; a negative errno value is returned
972 * if any error occurs.
973 */
__svc_register(struct net * net,const char * progname,const u32 program,const u32 version,const int family,const unsigned short protocol,const unsigned short port)974 static int __svc_register(struct net *net, const char *progname,
975 const u32 program, const u32 version,
976 const int family,
977 const unsigned short protocol,
978 const unsigned short port)
979 {
980 int error = -EAFNOSUPPORT;
981
982 switch (family) {
983 case PF_INET:
984 error = __svc_rpcb_register4(net, program, version,
985 protocol, port);
986 break;
987 #if IS_ENABLED(CONFIG_IPV6)
988 case PF_INET6:
989 error = __svc_rpcb_register6(net, program, version,
990 protocol, port);
991 #endif
992 }
993
994 trace_svc_register(progname, version, protocol, port, family, error);
995 return error;
996 }
997
svc_rpcbind_set_version(struct net * net,const struct svc_program * progp,u32 version,int family,unsigned short proto,unsigned short port)998 int svc_rpcbind_set_version(struct net *net,
999 const struct svc_program *progp,
1000 u32 version, int family,
1001 unsigned short proto,
1002 unsigned short port)
1003 {
1004 return __svc_register(net, progp->pg_name, progp->pg_prog,
1005 version, family, proto, port);
1006
1007 }
1008 EXPORT_SYMBOL_GPL(svc_rpcbind_set_version);
1009
svc_generic_rpcbind_set(struct net * net,const struct svc_program * progp,u32 version,int family,unsigned short proto,unsigned short port)1010 int svc_generic_rpcbind_set(struct net *net,
1011 const struct svc_program *progp,
1012 u32 version, int family,
1013 unsigned short proto,
1014 unsigned short port)
1015 {
1016 const struct svc_version *vers = progp->pg_vers[version];
1017 int error;
1018
1019 if (vers == NULL)
1020 return 0;
1021
1022 if (vers->vs_hidden) {
1023 trace_svc_noregister(progp->pg_name, version, proto,
1024 port, family, 0);
1025 return 0;
1026 }
1027
1028 /*
1029 * Don't register a UDP port if we need congestion
1030 * control.
1031 */
1032 if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP)
1033 return 0;
1034
1035 error = svc_rpcbind_set_version(net, progp, version,
1036 family, proto, port);
1037
1038 return (vers->vs_rpcb_optnl) ? 0 : error;
1039 }
1040 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set);
1041
1042 /**
1043 * svc_register - register an RPC service with the local portmapper
1044 * @serv: svc_serv struct for the service to register
1045 * @net: net namespace for the service to register
1046 * @family: protocol family of service's listener socket
1047 * @proto: transport protocol number to advertise
1048 * @port: port to advertise
1049 *
1050 * Service is registered for any address in the passed-in protocol family
1051 */
svc_register(const struct svc_serv * serv,struct net * net,const int family,const unsigned short proto,const unsigned short port)1052 int svc_register(const struct svc_serv *serv, struct net *net,
1053 const int family, const unsigned short proto,
1054 const unsigned short port)
1055 {
1056 struct svc_program *progp;
1057 unsigned int i;
1058 int error = 0;
1059
1060 WARN_ON_ONCE(proto == 0 && port == 0);
1061 if (proto == 0 && port == 0)
1062 return -EINVAL;
1063
1064 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1065 for (i = 0; i < progp->pg_nvers; i++) {
1066
1067 error = progp->pg_rpcbind_set(net, progp, i,
1068 family, proto, port);
1069 if (error < 0) {
1070 printk(KERN_WARNING "svc: failed to register "
1071 "%sv%u RPC service (errno %d).\n",
1072 progp->pg_name, i, -error);
1073 break;
1074 }
1075 }
1076 }
1077
1078 return error;
1079 }
1080
1081 /*
1082 * If user space is running rpcbind, it should take the v4 UNSET
1083 * and clear everything for this [program, version]. If user space
1084 * is running portmap, it will reject the v4 UNSET, but won't have
1085 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
1086 * in this case to clear all existing entries for [program, version].
1087 */
__svc_unregister(struct net * net,const u32 program,const u32 version,const char * progname)1088 static void __svc_unregister(struct net *net, const u32 program, const u32 version,
1089 const char *progname)
1090 {
1091 int error;
1092
1093 error = rpcb_v4_register(net, program, version, NULL, "");
1094
1095 /*
1096 * User space didn't support rpcbind v4, so retry this
1097 * request with the legacy rpcbind v2 protocol.
1098 */
1099 if (error == -EPROTONOSUPPORT)
1100 error = rpcb_register(net, program, version, 0, 0);
1101
1102 trace_svc_unregister(progname, version, error);
1103 }
1104
1105 /*
1106 * All netids, bind addresses and ports registered for [program, version]
1107 * are removed from the local rpcbind database (if the service is not
1108 * hidden) to make way for a new instance of the service.
1109 *
1110 * The result of unregistration is reported via dprintk for those who want
1111 * verification of the result, but is otherwise not important.
1112 */
svc_unregister(const struct svc_serv * serv,struct net * net)1113 static void svc_unregister(const struct svc_serv *serv, struct net *net)
1114 {
1115 struct svc_program *progp;
1116 unsigned long flags;
1117 unsigned int i;
1118
1119 clear_thread_flag(TIF_SIGPENDING);
1120
1121 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1122 for (i = 0; i < progp->pg_nvers; i++) {
1123 if (progp->pg_vers[i] == NULL)
1124 continue;
1125 if (progp->pg_vers[i]->vs_hidden)
1126 continue;
1127 __svc_unregister(net, progp->pg_prog, i, progp->pg_name);
1128 }
1129 }
1130
1131 spin_lock_irqsave(¤t->sighand->siglock, flags);
1132 recalc_sigpending();
1133 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
1134 }
1135
1136 /*
1137 * dprintk the given error with the address of the client that caused it.
1138 */
1139 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1140 static __printf(2, 3)
svc_printk(struct svc_rqst * rqstp,const char * fmt,...)1141 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1142 {
1143 struct va_format vaf;
1144 va_list args;
1145 char buf[RPC_MAX_ADDRBUFLEN];
1146
1147 va_start(args, fmt);
1148
1149 vaf.fmt = fmt;
1150 vaf.va = &args;
1151
1152 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1153
1154 va_end(args);
1155 }
1156 #else
svc_printk(struct svc_rqst * rqstp,const char * fmt,...)1157 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1158 #endif
1159
1160 __be32
svc_return_autherr(struct svc_rqst * rqstp,__be32 auth_err)1161 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err)
1162 {
1163 set_bit(RQ_AUTHERR, &rqstp->rq_flags);
1164 return auth_err;
1165 }
1166 EXPORT_SYMBOL_GPL(svc_return_autherr);
1167
1168 static __be32
svc_get_autherr(struct svc_rqst * rqstp,__be32 * statp)1169 svc_get_autherr(struct svc_rqst *rqstp, __be32 *statp)
1170 {
1171 if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags))
1172 return *statp;
1173 return rpc_auth_ok;
1174 }
1175
1176 static int
svc_generic_dispatch(struct svc_rqst * rqstp,__be32 * statp)1177 svc_generic_dispatch(struct svc_rqst *rqstp, __be32 *statp)
1178 {
1179 struct kvec *argv = &rqstp->rq_arg.head[0];
1180 struct kvec *resv = &rqstp->rq_res.head[0];
1181 const struct svc_procedure *procp = rqstp->rq_procinfo;
1182
1183 /*
1184 * Decode arguments
1185 * XXX: why do we ignore the return value?
1186 */
1187 if (procp->pc_decode &&
1188 !procp->pc_decode(rqstp, argv->iov_base)) {
1189 *statp = rpc_garbage_args;
1190 return 1;
1191 }
1192
1193 *statp = procp->pc_func(rqstp);
1194
1195 if (*statp == rpc_drop_reply ||
1196 test_bit(RQ_DROPME, &rqstp->rq_flags))
1197 return 0;
1198
1199 if (test_bit(RQ_AUTHERR, &rqstp->rq_flags))
1200 return 1;
1201
1202 if (*statp != rpc_success)
1203 return 1;
1204
1205 /* Encode reply */
1206 if (procp->pc_encode &&
1207 !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) {
1208 dprintk("svc: failed to encode reply\n");
1209 /* serv->sv_stats->rpcsystemerr++; */
1210 *statp = rpc_system_err;
1211 }
1212 return 1;
1213 }
1214
1215 __be32
svc_generic_init_request(struct svc_rqst * rqstp,const struct svc_program * progp,struct svc_process_info * ret)1216 svc_generic_init_request(struct svc_rqst *rqstp,
1217 const struct svc_program *progp,
1218 struct svc_process_info *ret)
1219 {
1220 const struct svc_version *versp = NULL; /* compiler food */
1221 const struct svc_procedure *procp = NULL;
1222
1223 if (rqstp->rq_vers >= progp->pg_nvers )
1224 goto err_bad_vers;
1225 versp = progp->pg_vers[rqstp->rq_vers];
1226 if (!versp)
1227 goto err_bad_vers;
1228
1229 /*
1230 * Some protocol versions (namely NFSv4) require some form of
1231 * congestion control. (See RFC 7530 section 3.1 paragraph 2)
1232 * In other words, UDP is not allowed. We mark those when setting
1233 * up the svc_xprt, and verify that here.
1234 *
1235 * The spec is not very clear about what error should be returned
1236 * when someone tries to access a server that is listening on UDP
1237 * for lower versions. RPC_PROG_MISMATCH seems to be the closest
1238 * fit.
1239 */
1240 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
1241 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
1242 goto err_bad_vers;
1243
1244 if (rqstp->rq_proc >= versp->vs_nproc)
1245 goto err_bad_proc;
1246 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
1247 if (!procp)
1248 goto err_bad_proc;
1249
1250 /* Initialize storage for argp and resp */
1251 memset(rqstp->rq_argp, 0, procp->pc_argsize);
1252 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1253
1254 /* Bump per-procedure stats counter */
1255 versp->vs_count[rqstp->rq_proc]++;
1256
1257 ret->dispatch = versp->vs_dispatch;
1258 return rpc_success;
1259 err_bad_vers:
1260 ret->mismatch.lovers = progp->pg_lovers;
1261 ret->mismatch.hivers = progp->pg_hivers;
1262 return rpc_prog_mismatch;
1263 err_bad_proc:
1264 return rpc_proc_unavail;
1265 }
1266 EXPORT_SYMBOL_GPL(svc_generic_init_request);
1267
1268 /*
1269 * Common routine for processing the RPC request.
1270 */
1271 static int
svc_process_common(struct svc_rqst * rqstp,struct kvec * argv,struct kvec * resv)1272 svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1273 {
1274 struct svc_program *progp;
1275 const struct svc_procedure *procp = NULL;
1276 struct svc_serv *serv = rqstp->rq_server;
1277 struct svc_process_info process;
1278 __be32 *statp;
1279 u32 prog, vers;
1280 __be32 auth_stat, rpc_stat;
1281 int auth_res;
1282 __be32 *reply_statp;
1283
1284 rpc_stat = rpc_success;
1285
1286 if (argv->iov_len < 6*4)
1287 goto err_short_len;
1288
1289 /* Will be turned off by GSS integrity and privacy services */
1290 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
1291 /* Will be turned off only when NFSv4 Sessions are used */
1292 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
1293 clear_bit(RQ_DROPME, &rqstp->rq_flags);
1294
1295 svc_putu32(resv, rqstp->rq_xid);
1296
1297 vers = svc_getnl(argv);
1298
1299 /* First words of reply: */
1300 svc_putnl(resv, 1); /* REPLY */
1301
1302 if (vers != 2) /* RPC version number */
1303 goto err_bad_rpc;
1304
1305 /* Save position in case we later decide to reject: */
1306 reply_statp = resv->iov_base + resv->iov_len;
1307
1308 svc_putnl(resv, 0); /* ACCEPT */
1309
1310 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
1311 rqstp->rq_vers = svc_getnl(argv); /* version number */
1312 rqstp->rq_proc = svc_getnl(argv); /* procedure number */
1313
1314 for (progp = serv->sv_program; progp; progp = progp->pg_next)
1315 if (prog == progp->pg_prog)
1316 break;
1317
1318 /*
1319 * Decode auth data, and add verifier to reply buffer.
1320 * We do this before anything else in order to get a decent
1321 * auth verifier.
1322 */
1323 auth_res = svc_authenticate(rqstp, &auth_stat);
1324 /* Also give the program a chance to reject this call: */
1325 if (auth_res == SVC_OK && progp) {
1326 auth_stat = rpc_autherr_badcred;
1327 auth_res = progp->pg_authenticate(rqstp);
1328 }
1329 if (auth_res != SVC_OK)
1330 trace_svc_authenticate(rqstp, auth_res, auth_stat);
1331 switch (auth_res) {
1332 case SVC_OK:
1333 break;
1334 case SVC_GARBAGE:
1335 goto err_garbage;
1336 case SVC_SYSERR:
1337 rpc_stat = rpc_system_err;
1338 goto err_bad;
1339 case SVC_DENIED:
1340 goto err_bad_auth;
1341 case SVC_CLOSE:
1342 goto close;
1343 case SVC_DROP:
1344 goto dropit;
1345 case SVC_COMPLETE:
1346 goto sendit;
1347 }
1348
1349 if (progp == NULL)
1350 goto err_bad_prog;
1351
1352 rpc_stat = progp->pg_init_request(rqstp, progp, &process);
1353 switch (rpc_stat) {
1354 case rpc_success:
1355 break;
1356 case rpc_prog_unavail:
1357 goto err_bad_prog;
1358 case rpc_prog_mismatch:
1359 goto err_bad_vers;
1360 case rpc_proc_unavail:
1361 goto err_bad_proc;
1362 }
1363
1364 procp = rqstp->rq_procinfo;
1365 /* Should this check go into the dispatcher? */
1366 if (!procp || !procp->pc_func)
1367 goto err_bad_proc;
1368
1369 /* Syntactic check complete */
1370 serv->sv_stats->rpccnt++;
1371 trace_svc_process(rqstp, progp->pg_name);
1372
1373 /* Build the reply header. */
1374 statp = resv->iov_base +resv->iov_len;
1375 svc_putnl(resv, RPC_SUCCESS);
1376
1377 /* un-reserve some of the out-queue now that we have a
1378 * better idea of reply size
1379 */
1380 if (procp->pc_xdrressize)
1381 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1382
1383 /* Call the function that processes the request. */
1384 if (!process.dispatch) {
1385 if (!svc_generic_dispatch(rqstp, statp))
1386 goto release_dropit;
1387 if (*statp == rpc_garbage_args)
1388 goto err_garbage;
1389 auth_stat = svc_get_autherr(rqstp, statp);
1390 if (auth_stat != rpc_auth_ok)
1391 goto err_release_bad_auth;
1392 } else {
1393 dprintk("svc: calling dispatcher\n");
1394 if (!process.dispatch(rqstp, statp))
1395 goto release_dropit; /* Release reply info */
1396 }
1397
1398 /* Check RPC status result */
1399 if (*statp != rpc_success)
1400 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
1401
1402 /* Release reply info */
1403 if (procp->pc_release)
1404 procp->pc_release(rqstp);
1405
1406 if (procp->pc_encode == NULL)
1407 goto dropit;
1408
1409 sendit:
1410 if (svc_authorise(rqstp))
1411 goto close_xprt;
1412 return 1; /* Caller can now send it */
1413
1414 release_dropit:
1415 if (procp->pc_release)
1416 procp->pc_release(rqstp);
1417 dropit:
1418 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1419 dprintk("svc: svc_process dropit\n");
1420 return 0;
1421
1422 close:
1423 svc_authorise(rqstp);
1424 close_xprt:
1425 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1426 svc_close_xprt(rqstp->rq_xprt);
1427 dprintk("svc: svc_process close\n");
1428 return 0;
1429
1430 err_short_len:
1431 svc_printk(rqstp, "short len %zd, dropping request\n",
1432 argv->iov_len);
1433 goto close_xprt;
1434
1435 err_bad_rpc:
1436 serv->sv_stats->rpcbadfmt++;
1437 svc_putnl(resv, 1); /* REJECT */
1438 svc_putnl(resv, 0); /* RPC_MISMATCH */
1439 svc_putnl(resv, 2); /* Only RPCv2 supported */
1440 svc_putnl(resv, 2);
1441 goto sendit;
1442
1443 err_release_bad_auth:
1444 if (procp->pc_release)
1445 procp->pc_release(rqstp);
1446 err_bad_auth:
1447 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1448 serv->sv_stats->rpcbadauth++;
1449 /* Restore write pointer to location of accept status: */
1450 xdr_ressize_check(rqstp, reply_statp);
1451 svc_putnl(resv, 1); /* REJECT */
1452 svc_putnl(resv, 1); /* AUTH_ERROR */
1453 svc_putnl(resv, ntohl(auth_stat)); /* status */
1454 goto sendit;
1455
1456 err_bad_prog:
1457 dprintk("svc: unknown program %d\n", prog);
1458 serv->sv_stats->rpcbadfmt++;
1459 svc_putnl(resv, RPC_PROG_UNAVAIL);
1460 goto sendit;
1461
1462 err_bad_vers:
1463 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1464 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
1465
1466 serv->sv_stats->rpcbadfmt++;
1467 svc_putnl(resv, RPC_PROG_MISMATCH);
1468 svc_putnl(resv, process.mismatch.lovers);
1469 svc_putnl(resv, process.mismatch.hivers);
1470 goto sendit;
1471
1472 err_bad_proc:
1473 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
1474
1475 serv->sv_stats->rpcbadfmt++;
1476 svc_putnl(resv, RPC_PROC_UNAVAIL);
1477 goto sendit;
1478
1479 err_garbage:
1480 svc_printk(rqstp, "failed to decode args\n");
1481
1482 rpc_stat = rpc_garbage_args;
1483 err_bad:
1484 serv->sv_stats->rpcbadfmt++;
1485 svc_putnl(resv, ntohl(rpc_stat));
1486 goto sendit;
1487 }
1488
1489 /*
1490 * Process the RPC request.
1491 */
1492 int
svc_process(struct svc_rqst * rqstp)1493 svc_process(struct svc_rqst *rqstp)
1494 {
1495 struct kvec *argv = &rqstp->rq_arg.head[0];
1496 struct kvec *resv = &rqstp->rq_res.head[0];
1497 struct svc_serv *serv = rqstp->rq_server;
1498 u32 dir;
1499
1500 /*
1501 * Setup response xdr_buf.
1502 * Initially it has just one page
1503 */
1504 rqstp->rq_next_page = &rqstp->rq_respages[1];
1505 resv->iov_base = page_address(rqstp->rq_respages[0]);
1506 resv->iov_len = 0;
1507 rqstp->rq_res.pages = rqstp->rq_respages + 1;
1508 rqstp->rq_res.len = 0;
1509 rqstp->rq_res.page_base = 0;
1510 rqstp->rq_res.page_len = 0;
1511 rqstp->rq_res.buflen = PAGE_SIZE;
1512 rqstp->rq_res.tail[0].iov_base = NULL;
1513 rqstp->rq_res.tail[0].iov_len = 0;
1514
1515 dir = svc_getnl(argv);
1516 if (dir != 0) {
1517 /* direction != CALL */
1518 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1519 serv->sv_stats->rpcbadfmt++;
1520 goto out_drop;
1521 }
1522
1523 /* Returns 1 for send, 0 for drop */
1524 if (likely(svc_process_common(rqstp, argv, resv)))
1525 return svc_send(rqstp);
1526
1527 out_drop:
1528 svc_drop(rqstp);
1529 return 0;
1530 }
1531 EXPORT_SYMBOL_GPL(svc_process);
1532
1533 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1534 /*
1535 * Process a backchannel RPC request that arrived over an existing
1536 * outbound connection
1537 */
1538 int
bc_svc_process(struct svc_serv * serv,struct rpc_rqst * req,struct svc_rqst * rqstp)1539 bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1540 struct svc_rqst *rqstp)
1541 {
1542 struct kvec *argv = &rqstp->rq_arg.head[0];
1543 struct kvec *resv = &rqstp->rq_res.head[0];
1544 struct rpc_task *task;
1545 int proc_error;
1546 int error;
1547
1548 dprintk("svc: %s(%p)\n", __func__, req);
1549
1550 /* Build the svc_rqst used by the common processing routine */
1551 rqstp->rq_xid = req->rq_xid;
1552 rqstp->rq_prot = req->rq_xprt->prot;
1553 rqstp->rq_server = serv;
1554 rqstp->rq_bc_net = req->rq_xprt->xprt_net;
1555
1556 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1557 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1558 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1559 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1560
1561 /* Adjust the argument buffer length */
1562 rqstp->rq_arg.len = req->rq_private_buf.len;
1563 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1564 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1565 rqstp->rq_arg.page_len = 0;
1566 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1567 rqstp->rq_arg.page_len)
1568 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1569 rqstp->rq_arg.head[0].iov_len;
1570 else
1571 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1572 rqstp->rq_arg.page_len;
1573
1574 /* reset result send buffer "put" position */
1575 resv->iov_len = 0;
1576
1577 /*
1578 * Skip the next two words because they've already been
1579 * processed in the transport
1580 */
1581 svc_getu32(argv); /* XID */
1582 svc_getnl(argv); /* CALLDIR */
1583
1584 /* Parse and execute the bc call */
1585 proc_error = svc_process_common(rqstp, argv, resv);
1586
1587 atomic_dec(&req->rq_xprt->bc_slot_count);
1588 if (!proc_error) {
1589 /* Processing error: drop the request */
1590 xprt_free_bc_request(req);
1591 error = -EINVAL;
1592 goto out;
1593 }
1594 /* Finally, send the reply synchronously */
1595 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1596 task = rpc_run_bc_task(req);
1597 if (IS_ERR(task)) {
1598 error = PTR_ERR(task);
1599 goto out;
1600 }
1601
1602 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
1603 error = task->tk_status;
1604 rpc_put_task(task);
1605
1606 out:
1607 dprintk("svc: %s(), error=%d\n", __func__, error);
1608 return error;
1609 }
1610 EXPORT_SYMBOL_GPL(bc_svc_process);
1611 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1612
1613 /*
1614 * Return (transport-specific) limit on the rpc payload.
1615 */
svc_max_payload(const struct svc_rqst * rqstp)1616 u32 svc_max_payload(const struct svc_rqst *rqstp)
1617 {
1618 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1619
1620 if (rqstp->rq_server->sv_max_payload < max)
1621 max = rqstp->rq_server->sv_max_payload;
1622 return max;
1623 }
1624 EXPORT_SYMBOL_GPL(svc_max_payload);
1625
1626 /**
1627 * svc_encode_read_payload - mark a range of bytes as a READ payload
1628 * @rqstp: svc_rqst to operate on
1629 * @offset: payload's byte offset in rqstp->rq_res
1630 * @length: size of payload, in bytes
1631 *
1632 * Returns zero on success, or a negative errno if a permanent
1633 * error occurred.
1634 */
svc_encode_read_payload(struct svc_rqst * rqstp,unsigned int offset,unsigned int length)1635 int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset,
1636 unsigned int length)
1637 {
1638 return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length);
1639 }
1640 EXPORT_SYMBOL_GPL(svc_encode_read_payload);
1641
1642 /**
1643 * svc_fill_write_vector - Construct data argument for VFS write call
1644 * @rqstp: svc_rqst to operate on
1645 * @pages: list of pages containing data payload
1646 * @first: buffer containing first section of write payload
1647 * @total: total number of bytes of write payload
1648 *
1649 * Fills in rqstp::rq_vec, and returns the number of elements.
1650 */
svc_fill_write_vector(struct svc_rqst * rqstp,struct page ** pages,struct kvec * first,size_t total)1651 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages,
1652 struct kvec *first, size_t total)
1653 {
1654 struct kvec *vec = rqstp->rq_vec;
1655 unsigned int i;
1656
1657 /* Some types of transport can present the write payload
1658 * entirely in rq_arg.pages. In this case, @first is empty.
1659 */
1660 i = 0;
1661 if (first->iov_len) {
1662 vec[i].iov_base = first->iov_base;
1663 vec[i].iov_len = min_t(size_t, total, first->iov_len);
1664 total -= vec[i].iov_len;
1665 ++i;
1666 }
1667
1668 while (total) {
1669 vec[i].iov_base = page_address(*pages);
1670 vec[i].iov_len = min_t(size_t, total, PAGE_SIZE);
1671 total -= vec[i].iov_len;
1672 ++i;
1673 ++pages;
1674 }
1675
1676 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec));
1677 return i;
1678 }
1679 EXPORT_SYMBOL_GPL(svc_fill_write_vector);
1680
1681 /**
1682 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
1683 * @rqstp: svc_rqst to operate on
1684 * @first: buffer containing first section of pathname
1685 * @p: buffer containing remaining section of pathname
1686 * @total: total length of the pathname argument
1687 *
1688 * The VFS symlink API demands a NUL-terminated pathname in mapped memory.
1689 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free
1690 * the returned string.
1691 */
svc_fill_symlink_pathname(struct svc_rqst * rqstp,struct kvec * first,void * p,size_t total)1692 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first,
1693 void *p, size_t total)
1694 {
1695 size_t len, remaining;
1696 char *result, *dst;
1697
1698 result = kmalloc(total + 1, GFP_KERNEL);
1699 if (!result)
1700 return ERR_PTR(-ESERVERFAULT);
1701
1702 dst = result;
1703 remaining = total;
1704
1705 len = min_t(size_t, total, first->iov_len);
1706 if (len) {
1707 memcpy(dst, first->iov_base, len);
1708 dst += len;
1709 remaining -= len;
1710 }
1711
1712 if (remaining) {
1713 len = min_t(size_t, remaining, PAGE_SIZE);
1714 memcpy(dst, p, len);
1715 dst += len;
1716 }
1717
1718 *dst = '\0';
1719
1720 /* Sanity check: Linux doesn't allow the pathname argument to
1721 * contain a NUL byte.
1722 */
1723 if (strlen(result) != total) {
1724 kfree(result);
1725 return ERR_PTR(-EINVAL);
1726 }
1727 return result;
1728 }
1729 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname);
1730