1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/net/sunrpc/svc.c
4 *
5 * High-level RPC service routines
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 *
9 * Multiple threads pools and NUMAisation
10 * Copyright (c) 2006 Silicon Graphics, Inc.
11 * by Greg Banks <gnb@melbourne.sgi.com>
12 */
13
14 #include <linux/linkage.h>
15 #include <linux/sched/signal.h>
16 #include <linux/errno.h>
17 #include <linux/net.h>
18 #include <linux/in.h>
19 #include <linux/mm.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/slab.h>
24
25 #include <linux/sunrpc/types.h>
26 #include <linux/sunrpc/xdr.h>
27 #include <linux/sunrpc/stats.h>
28 #include <linux/sunrpc/svcsock.h>
29 #include <linux/sunrpc/clnt.h>
30 #include <linux/sunrpc/bc_xprt.h>
31
32 #include <trace/events/sunrpc.h>
33
34 #include "fail.h"
35
36 #define RPCDBG_FACILITY RPCDBG_SVCDSP
37
38 static void svc_unregister(const struct svc_serv *serv, struct net *net);
39
40 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
41
42 /*
43 * Mode for mapping cpus to pools.
44 */
45 enum {
46 SVC_POOL_AUTO = -1, /* choose one of the others */
47 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
48 * (legacy & UP mode) */
49 SVC_POOL_PERCPU, /* one pool per cpu */
50 SVC_POOL_PERNODE /* one pool per numa node */
51 };
52
53 /*
54 * Structure for mapping cpus to pools and vice versa.
55 * Setup once during sunrpc initialisation.
56 */
57
58 struct svc_pool_map {
59 int count; /* How many svc_servs use us */
60 int mode; /* Note: int not enum to avoid
61 * warnings about "enumeration value
62 * not handled in switch" */
63 unsigned int npools;
64 unsigned int *pool_to; /* maps pool id to cpu or node */
65 unsigned int *to_pool; /* maps cpu or node to pool id */
66 };
67
68 static struct svc_pool_map svc_pool_map = {
69 .mode = SVC_POOL_DEFAULT
70 };
71
72 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
73
74 static int
param_set_pool_mode(const char * val,const struct kernel_param * kp)75 param_set_pool_mode(const char *val, const struct kernel_param *kp)
76 {
77 int *ip = (int *)kp->arg;
78 struct svc_pool_map *m = &svc_pool_map;
79 int err;
80
81 mutex_lock(&svc_pool_map_mutex);
82
83 err = -EBUSY;
84 if (m->count)
85 goto out;
86
87 err = 0;
88 if (!strncmp(val, "auto", 4))
89 *ip = SVC_POOL_AUTO;
90 else if (!strncmp(val, "global", 6))
91 *ip = SVC_POOL_GLOBAL;
92 else if (!strncmp(val, "percpu", 6))
93 *ip = SVC_POOL_PERCPU;
94 else if (!strncmp(val, "pernode", 7))
95 *ip = SVC_POOL_PERNODE;
96 else
97 err = -EINVAL;
98
99 out:
100 mutex_unlock(&svc_pool_map_mutex);
101 return err;
102 }
103
104 static int
param_get_pool_mode(char * buf,const struct kernel_param * kp)105 param_get_pool_mode(char *buf, const struct kernel_param *kp)
106 {
107 int *ip = (int *)kp->arg;
108
109 switch (*ip)
110 {
111 case SVC_POOL_AUTO:
112 return strlcpy(buf, "auto\n", 20);
113 case SVC_POOL_GLOBAL:
114 return strlcpy(buf, "global\n", 20);
115 case SVC_POOL_PERCPU:
116 return strlcpy(buf, "percpu\n", 20);
117 case SVC_POOL_PERNODE:
118 return strlcpy(buf, "pernode\n", 20);
119 default:
120 return sprintf(buf, "%d\n", *ip);
121 }
122 }
123
124 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
125 &svc_pool_map.mode, 0644);
126
127 /*
128 * Detect best pool mapping mode heuristically,
129 * according to the machine's topology.
130 */
131 static int
svc_pool_map_choose_mode(void)132 svc_pool_map_choose_mode(void)
133 {
134 unsigned int node;
135
136 if (nr_online_nodes > 1) {
137 /*
138 * Actually have multiple NUMA nodes,
139 * so split pools on NUMA node boundaries
140 */
141 return SVC_POOL_PERNODE;
142 }
143
144 node = first_online_node;
145 if (nr_cpus_node(node) > 2) {
146 /*
147 * Non-trivial SMP, or CONFIG_NUMA on
148 * non-NUMA hardware, e.g. with a generic
149 * x86_64 kernel on Xeons. In this case we
150 * want to divide the pools on cpu boundaries.
151 */
152 return SVC_POOL_PERCPU;
153 }
154
155 /* default: one global pool */
156 return SVC_POOL_GLOBAL;
157 }
158
159 /*
160 * Allocate the to_pool[] and pool_to[] arrays.
161 * Returns 0 on success or an errno.
162 */
163 static int
svc_pool_map_alloc_arrays(struct svc_pool_map * m,unsigned int maxpools)164 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
165 {
166 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
167 if (!m->to_pool)
168 goto fail;
169 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
170 if (!m->pool_to)
171 goto fail_free;
172
173 return 0;
174
175 fail_free:
176 kfree(m->to_pool);
177 m->to_pool = NULL;
178 fail:
179 return -ENOMEM;
180 }
181
182 /*
183 * Initialise the pool map for SVC_POOL_PERCPU mode.
184 * Returns number of pools or <0 on error.
185 */
186 static int
svc_pool_map_init_percpu(struct svc_pool_map * m)187 svc_pool_map_init_percpu(struct svc_pool_map *m)
188 {
189 unsigned int maxpools = nr_cpu_ids;
190 unsigned int pidx = 0;
191 unsigned int cpu;
192 int err;
193
194 err = svc_pool_map_alloc_arrays(m, maxpools);
195 if (err)
196 return err;
197
198 for_each_online_cpu(cpu) {
199 BUG_ON(pidx >= maxpools);
200 m->to_pool[cpu] = pidx;
201 m->pool_to[pidx] = cpu;
202 pidx++;
203 }
204 /* cpus brought online later all get mapped to pool0, sorry */
205
206 return pidx;
207 };
208
209
210 /*
211 * Initialise the pool map for SVC_POOL_PERNODE mode.
212 * Returns number of pools or <0 on error.
213 */
214 static int
svc_pool_map_init_pernode(struct svc_pool_map * m)215 svc_pool_map_init_pernode(struct svc_pool_map *m)
216 {
217 unsigned int maxpools = nr_node_ids;
218 unsigned int pidx = 0;
219 unsigned int node;
220 int err;
221
222 err = svc_pool_map_alloc_arrays(m, maxpools);
223 if (err)
224 return err;
225
226 for_each_node_with_cpus(node) {
227 /* some architectures (e.g. SN2) have cpuless nodes */
228 BUG_ON(pidx > maxpools);
229 m->to_pool[node] = pidx;
230 m->pool_to[pidx] = node;
231 pidx++;
232 }
233 /* nodes brought online later all get mapped to pool0, sorry */
234
235 return pidx;
236 }
237
238
239 /*
240 * Add a reference to the global map of cpus to pools (and
241 * vice versa) if pools are in use.
242 * Initialise the map if we're the first user.
243 * Returns the number of pools. If this is '1', no reference
244 * was taken.
245 */
246 static unsigned int
svc_pool_map_get(void)247 svc_pool_map_get(void)
248 {
249 struct svc_pool_map *m = &svc_pool_map;
250 int npools = -1;
251
252 mutex_lock(&svc_pool_map_mutex);
253
254 if (m->count++) {
255 mutex_unlock(&svc_pool_map_mutex);
256 WARN_ON_ONCE(m->npools <= 1);
257 return m->npools;
258 }
259
260 if (m->mode == SVC_POOL_AUTO)
261 m->mode = svc_pool_map_choose_mode();
262
263 switch (m->mode) {
264 case SVC_POOL_PERCPU:
265 npools = svc_pool_map_init_percpu(m);
266 break;
267 case SVC_POOL_PERNODE:
268 npools = svc_pool_map_init_pernode(m);
269 break;
270 }
271
272 if (npools <= 0) {
273 /* default, or memory allocation failure */
274 npools = 1;
275 m->mode = SVC_POOL_GLOBAL;
276 }
277 m->npools = npools;
278
279 if (npools == 1)
280 /* service is unpooled, so doesn't hold a reference */
281 m->count--;
282
283 mutex_unlock(&svc_pool_map_mutex);
284 return npools;
285 }
286
287 /*
288 * Drop a reference to the global map of cpus to pools, if
289 * pools were in use, i.e. if npools > 1.
290 * When the last reference is dropped, the map data is
291 * freed; this allows the sysadmin to change the pool
292 * mode using the pool_mode module option without
293 * rebooting or re-loading sunrpc.ko.
294 */
295 static void
svc_pool_map_put(int npools)296 svc_pool_map_put(int npools)
297 {
298 struct svc_pool_map *m = &svc_pool_map;
299
300 if (npools <= 1)
301 return;
302 mutex_lock(&svc_pool_map_mutex);
303
304 if (!--m->count) {
305 kfree(m->to_pool);
306 m->to_pool = NULL;
307 kfree(m->pool_to);
308 m->pool_to = NULL;
309 m->npools = 0;
310 }
311
312 mutex_unlock(&svc_pool_map_mutex);
313 }
314
svc_pool_map_get_node(unsigned int pidx)315 static int svc_pool_map_get_node(unsigned int pidx)
316 {
317 const struct svc_pool_map *m = &svc_pool_map;
318
319 if (m->count) {
320 if (m->mode == SVC_POOL_PERCPU)
321 return cpu_to_node(m->pool_to[pidx]);
322 if (m->mode == SVC_POOL_PERNODE)
323 return m->pool_to[pidx];
324 }
325 return NUMA_NO_NODE;
326 }
327 /*
328 * Set the given thread's cpus_allowed mask so that it
329 * will only run on cpus in the given pool.
330 */
331 static inline void
svc_pool_map_set_cpumask(struct task_struct * task,unsigned int pidx)332 svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
333 {
334 struct svc_pool_map *m = &svc_pool_map;
335 unsigned int node = m->pool_to[pidx];
336
337 /*
338 * The caller checks for sv_nrpools > 1, which
339 * implies that we've been initialized.
340 */
341 WARN_ON_ONCE(m->count == 0);
342 if (m->count == 0)
343 return;
344
345 switch (m->mode) {
346 case SVC_POOL_PERCPU:
347 {
348 set_cpus_allowed_ptr(task, cpumask_of(node));
349 break;
350 }
351 case SVC_POOL_PERNODE:
352 {
353 set_cpus_allowed_ptr(task, cpumask_of_node(node));
354 break;
355 }
356 }
357 }
358
359 /**
360 * svc_pool_for_cpu - Select pool to run a thread on this cpu
361 * @serv: An RPC service
362 *
363 * Use the active CPU and the svc_pool_map's mode setting to
364 * select the svc thread pool to use. Once initialized, the
365 * svc_pool_map does not change.
366 *
367 * Return value:
368 * A pointer to an svc_pool
369 */
svc_pool_for_cpu(struct svc_serv * serv)370 struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv)
371 {
372 struct svc_pool_map *m = &svc_pool_map;
373 int cpu = raw_smp_processor_id();
374 unsigned int pidx = 0;
375
376 if (serv->sv_nrpools <= 1)
377 return serv->sv_pools;
378
379 switch (m->mode) {
380 case SVC_POOL_PERCPU:
381 pidx = m->to_pool[cpu];
382 break;
383 case SVC_POOL_PERNODE:
384 pidx = m->to_pool[cpu_to_node(cpu)];
385 break;
386 }
387
388 return &serv->sv_pools[pidx % serv->sv_nrpools];
389 }
390
svc_rpcb_setup(struct svc_serv * serv,struct net * net)391 int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
392 {
393 int err;
394
395 err = rpcb_create_local(net);
396 if (err)
397 return err;
398
399 /* Remove any stale portmap registrations */
400 svc_unregister(serv, net);
401 return 0;
402 }
403 EXPORT_SYMBOL_GPL(svc_rpcb_setup);
404
svc_rpcb_cleanup(struct svc_serv * serv,struct net * net)405 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
406 {
407 svc_unregister(serv, net);
408 rpcb_put_local(net);
409 }
410 EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
411
svc_uses_rpcbind(struct svc_serv * serv)412 static int svc_uses_rpcbind(struct svc_serv *serv)
413 {
414 struct svc_program *progp;
415 unsigned int i;
416
417 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
418 for (i = 0; i < progp->pg_nvers; i++) {
419 if (progp->pg_vers[i] == NULL)
420 continue;
421 if (!progp->pg_vers[i]->vs_hidden)
422 return 1;
423 }
424 }
425
426 return 0;
427 }
428
svc_bind(struct svc_serv * serv,struct net * net)429 int svc_bind(struct svc_serv *serv, struct net *net)
430 {
431 if (!svc_uses_rpcbind(serv))
432 return 0;
433 return svc_rpcb_setup(serv, net);
434 }
435 EXPORT_SYMBOL_GPL(svc_bind);
436
437 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
438 static void
__svc_init_bc(struct svc_serv * serv)439 __svc_init_bc(struct svc_serv *serv)
440 {
441 INIT_LIST_HEAD(&serv->sv_cb_list);
442 spin_lock_init(&serv->sv_cb_lock);
443 init_waitqueue_head(&serv->sv_cb_waitq);
444 }
445 #else
446 static void
__svc_init_bc(struct svc_serv * serv)447 __svc_init_bc(struct svc_serv *serv)
448 {
449 }
450 #endif
451
452 /*
453 * Create an RPC service
454 */
455 static struct svc_serv *
__svc_create(struct svc_program * prog,unsigned int bufsize,int npools,int (* threadfn)(void * data))456 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
457 int (*threadfn)(void *data))
458 {
459 struct svc_serv *serv;
460 unsigned int vers;
461 unsigned int xdrsize;
462 unsigned int i;
463
464 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
465 return NULL;
466 serv->sv_name = prog->pg_name;
467 serv->sv_program = prog;
468 kref_init(&serv->sv_refcnt);
469 serv->sv_stats = prog->pg_stats;
470 if (bufsize > RPCSVC_MAXPAYLOAD)
471 bufsize = RPCSVC_MAXPAYLOAD;
472 serv->sv_max_payload = bufsize? bufsize : 4096;
473 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
474 serv->sv_threadfn = threadfn;
475 xdrsize = 0;
476 while (prog) {
477 prog->pg_lovers = prog->pg_nvers-1;
478 for (vers=0; vers<prog->pg_nvers ; vers++)
479 if (prog->pg_vers[vers]) {
480 prog->pg_hivers = vers;
481 if (prog->pg_lovers > vers)
482 prog->pg_lovers = vers;
483 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
484 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
485 }
486 prog = prog->pg_next;
487 }
488 serv->sv_xdrsize = xdrsize;
489 INIT_LIST_HEAD(&serv->sv_tempsocks);
490 INIT_LIST_HEAD(&serv->sv_permsocks);
491 timer_setup(&serv->sv_temptimer, NULL, 0);
492 spin_lock_init(&serv->sv_lock);
493
494 __svc_init_bc(serv);
495
496 serv->sv_nrpools = npools;
497 serv->sv_pools =
498 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
499 GFP_KERNEL);
500 if (!serv->sv_pools) {
501 kfree(serv);
502 return NULL;
503 }
504
505 for (i = 0; i < serv->sv_nrpools; i++) {
506 struct svc_pool *pool = &serv->sv_pools[i];
507
508 dprintk("svc: initialising pool %u for %s\n",
509 i, serv->sv_name);
510
511 pool->sp_id = i;
512 INIT_LIST_HEAD(&pool->sp_sockets);
513 INIT_LIST_HEAD(&pool->sp_all_threads);
514 spin_lock_init(&pool->sp_lock);
515
516 percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
517 percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL);
518 percpu_counter_init(&pool->sp_threads_timedout, 0, GFP_KERNEL);
519 }
520
521 return serv;
522 }
523
524 /**
525 * svc_create - Create an RPC service
526 * @prog: the RPC program the new service will handle
527 * @bufsize: maximum message size for @prog
528 * @threadfn: a function to service RPC requests for @prog
529 *
530 * Returns an instantiated struct svc_serv object or NULL.
531 */
svc_create(struct svc_program * prog,unsigned int bufsize,int (* threadfn)(void * data))532 struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,
533 int (*threadfn)(void *data))
534 {
535 return __svc_create(prog, bufsize, 1, threadfn);
536 }
537 EXPORT_SYMBOL_GPL(svc_create);
538
539 /**
540 * svc_create_pooled - Create an RPC service with pooled threads
541 * @prog: the RPC program the new service will handle
542 * @bufsize: maximum message size for @prog
543 * @threadfn: a function to service RPC requests for @prog
544 *
545 * Returns an instantiated struct svc_serv object or NULL.
546 */
svc_create_pooled(struct svc_program * prog,unsigned int bufsize,int (* threadfn)(void * data))547 struct svc_serv *svc_create_pooled(struct svc_program *prog,
548 unsigned int bufsize,
549 int (*threadfn)(void *data))
550 {
551 struct svc_serv *serv;
552 unsigned int npools = svc_pool_map_get();
553
554 serv = __svc_create(prog, bufsize, npools, threadfn);
555 if (!serv)
556 goto out_err;
557 return serv;
558 out_err:
559 svc_pool_map_put(npools);
560 return NULL;
561 }
562 EXPORT_SYMBOL_GPL(svc_create_pooled);
563
564 /*
565 * Destroy an RPC service. Should be called with appropriate locking to
566 * protect sv_permsocks and sv_tempsocks.
567 */
568 void
svc_destroy(struct kref * ref)569 svc_destroy(struct kref *ref)
570 {
571 struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt);
572 unsigned int i;
573
574 dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
575 timer_shutdown_sync(&serv->sv_temptimer);
576
577 /*
578 * The last user is gone and thus all sockets have to be destroyed to
579 * the point. Check this.
580 */
581 BUG_ON(!list_empty(&serv->sv_permsocks));
582 BUG_ON(!list_empty(&serv->sv_tempsocks));
583
584 cache_clean_deferred(serv);
585
586 svc_pool_map_put(serv->sv_nrpools);
587
588 for (i = 0; i < serv->sv_nrpools; i++) {
589 struct svc_pool *pool = &serv->sv_pools[i];
590
591 percpu_counter_destroy(&pool->sp_sockets_queued);
592 percpu_counter_destroy(&pool->sp_threads_woken);
593 percpu_counter_destroy(&pool->sp_threads_timedout);
594 }
595 kfree(serv->sv_pools);
596 kfree(serv);
597 }
598 EXPORT_SYMBOL_GPL(svc_destroy);
599
600 /*
601 * Allocate an RPC server's buffer space.
602 * We allocate pages and place them in rq_pages.
603 */
604 static int
svc_init_buffer(struct svc_rqst * rqstp,unsigned int size,int node)605 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
606 {
607 unsigned int pages, arghi;
608
609 /* bc_xprt uses fore channel allocated buffers */
610 if (svc_is_backchannel(rqstp))
611 return 1;
612
613 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
614 * We assume one is at most one page
615 */
616 arghi = 0;
617 WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
618 if (pages > RPCSVC_MAXPAGES)
619 pages = RPCSVC_MAXPAGES;
620 while (pages) {
621 struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
622 if (!p)
623 break;
624 rqstp->rq_pages[arghi++] = p;
625 pages--;
626 }
627 return pages == 0;
628 }
629
630 /*
631 * Release an RPC server buffer
632 */
633 static void
svc_release_buffer(struct svc_rqst * rqstp)634 svc_release_buffer(struct svc_rqst *rqstp)
635 {
636 unsigned int i;
637
638 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
639 if (rqstp->rq_pages[i])
640 put_page(rqstp->rq_pages[i]);
641 }
642
643 struct svc_rqst *
svc_rqst_alloc(struct svc_serv * serv,struct svc_pool * pool,int node)644 svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
645 {
646 struct svc_rqst *rqstp;
647
648 rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
649 if (!rqstp)
650 return rqstp;
651
652 __set_bit(RQ_BUSY, &rqstp->rq_flags);
653 rqstp->rq_server = serv;
654 rqstp->rq_pool = pool;
655
656 rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0);
657 if (!rqstp->rq_scratch_page)
658 goto out_enomem;
659
660 rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
661 if (!rqstp->rq_argp)
662 goto out_enomem;
663
664 rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
665 if (!rqstp->rq_resp)
666 goto out_enomem;
667
668 if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
669 goto out_enomem;
670
671 return rqstp;
672 out_enomem:
673 svc_rqst_free(rqstp);
674 return NULL;
675 }
676 EXPORT_SYMBOL_GPL(svc_rqst_alloc);
677
678 static struct svc_rqst *
svc_prepare_thread(struct svc_serv * serv,struct svc_pool * pool,int node)679 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
680 {
681 struct svc_rqst *rqstp;
682
683 rqstp = svc_rqst_alloc(serv, pool, node);
684 if (!rqstp)
685 return ERR_PTR(-ENOMEM);
686
687 svc_get(serv);
688 spin_lock_bh(&serv->sv_lock);
689 serv->sv_nrthreads += 1;
690 spin_unlock_bh(&serv->sv_lock);
691
692 spin_lock_bh(&pool->sp_lock);
693 pool->sp_nrthreads++;
694 list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
695 spin_unlock_bh(&pool->sp_lock);
696 return rqstp;
697 }
698
699 /*
700 * Choose a pool in which to create a new thread, for svc_set_num_threads
701 */
702 static inline struct svc_pool *
choose_pool(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)703 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
704 {
705 if (pool != NULL)
706 return pool;
707
708 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
709 }
710
711 /*
712 * Choose a thread to kill, for svc_set_num_threads
713 */
714 static inline struct task_struct *
choose_victim(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)715 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
716 {
717 unsigned int i;
718 struct task_struct *task = NULL;
719
720 if (pool != NULL) {
721 spin_lock_bh(&pool->sp_lock);
722 } else {
723 /* choose a pool in round-robin fashion */
724 for (i = 0; i < serv->sv_nrpools; i++) {
725 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
726 spin_lock_bh(&pool->sp_lock);
727 if (!list_empty(&pool->sp_all_threads))
728 goto found_pool;
729 spin_unlock_bh(&pool->sp_lock);
730 }
731 return NULL;
732 }
733
734 found_pool:
735 if (!list_empty(&pool->sp_all_threads)) {
736 struct svc_rqst *rqstp;
737
738 /*
739 * Remove from the pool->sp_all_threads list
740 * so we don't try to kill it again.
741 */
742 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
743 set_bit(RQ_VICTIM, &rqstp->rq_flags);
744 list_del_rcu(&rqstp->rq_all);
745 task = rqstp->rq_task;
746 }
747 spin_unlock_bh(&pool->sp_lock);
748
749 return task;
750 }
751
752 /* create new threads */
753 static int
svc_start_kthreads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)754 svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
755 {
756 struct svc_rqst *rqstp;
757 struct task_struct *task;
758 struct svc_pool *chosen_pool;
759 unsigned int state = serv->sv_nrthreads-1;
760 int node;
761
762 do {
763 nrservs--;
764 chosen_pool = choose_pool(serv, pool, &state);
765
766 node = svc_pool_map_get_node(chosen_pool->sp_id);
767 rqstp = svc_prepare_thread(serv, chosen_pool, node);
768 if (IS_ERR(rqstp))
769 return PTR_ERR(rqstp);
770
771 task = kthread_create_on_node(serv->sv_threadfn, rqstp,
772 node, "%s", serv->sv_name);
773 if (IS_ERR(task)) {
774 svc_exit_thread(rqstp);
775 return PTR_ERR(task);
776 }
777
778 rqstp->rq_task = task;
779 if (serv->sv_nrpools > 1)
780 svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
781
782 svc_sock_update_bufs(serv);
783 wake_up_process(task);
784 } while (nrservs > 0);
785
786 return 0;
787 }
788
789 /*
790 * Create or destroy enough new threads to make the number
791 * of threads the given number. If `pool' is non-NULL, applies
792 * only to threads in that pool, otherwise round-robins between
793 * all pools. Caller must ensure that mutual exclusion between this and
794 * server startup or shutdown.
795 */
796
797 /* destroy old threads */
798 static int
svc_stop_kthreads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)799 svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
800 {
801 struct svc_rqst *rqstp;
802 struct task_struct *task;
803 unsigned int state = serv->sv_nrthreads-1;
804
805 /* destroy old threads */
806 do {
807 task = choose_victim(serv, pool, &state);
808 if (task == NULL)
809 break;
810 rqstp = kthread_data(task);
811 /* Did we lose a race to svo_function threadfn? */
812 if (kthread_stop(task) == -EINTR)
813 svc_exit_thread(rqstp);
814 nrservs++;
815 } while (nrservs < 0);
816 return 0;
817 }
818
819 int
svc_set_num_threads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)820 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
821 {
822 if (pool == NULL) {
823 nrservs -= serv->sv_nrthreads;
824 } else {
825 spin_lock_bh(&pool->sp_lock);
826 nrservs -= pool->sp_nrthreads;
827 spin_unlock_bh(&pool->sp_lock);
828 }
829
830 if (nrservs > 0)
831 return svc_start_kthreads(serv, pool, nrservs);
832 if (nrservs < 0)
833 return svc_stop_kthreads(serv, pool, nrservs);
834 return 0;
835 }
836 EXPORT_SYMBOL_GPL(svc_set_num_threads);
837
838 /**
839 * svc_rqst_replace_page - Replace one page in rq_pages[]
840 * @rqstp: svc_rqst with pages to replace
841 * @page: replacement page
842 *
843 * When replacing a page in rq_pages, batch the release of the
844 * replaced pages to avoid hammering the page allocator.
845 */
svc_rqst_replace_page(struct svc_rqst * rqstp,struct page * page)846 void svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page)
847 {
848 if (*rqstp->rq_next_page) {
849 if (!pagevec_space(&rqstp->rq_pvec))
850 __pagevec_release(&rqstp->rq_pvec);
851 pagevec_add(&rqstp->rq_pvec, *rqstp->rq_next_page);
852 }
853
854 get_page(page);
855 *(rqstp->rq_next_page++) = page;
856 }
857 EXPORT_SYMBOL_GPL(svc_rqst_replace_page);
858
859 /*
860 * Called from a server thread as it's exiting. Caller must hold the "service
861 * mutex" for the service.
862 */
863 void
svc_rqst_free(struct svc_rqst * rqstp)864 svc_rqst_free(struct svc_rqst *rqstp)
865 {
866 svc_release_buffer(rqstp);
867 if (rqstp->rq_scratch_page)
868 put_page(rqstp->rq_scratch_page);
869 kfree(rqstp->rq_resp);
870 kfree(rqstp->rq_argp);
871 kfree(rqstp->rq_auth_data);
872 kfree_rcu(rqstp, rq_rcu_head);
873 }
874 EXPORT_SYMBOL_GPL(svc_rqst_free);
875
876 void
svc_exit_thread(struct svc_rqst * rqstp)877 svc_exit_thread(struct svc_rqst *rqstp)
878 {
879 struct svc_serv *serv = rqstp->rq_server;
880 struct svc_pool *pool = rqstp->rq_pool;
881
882 spin_lock_bh(&pool->sp_lock);
883 pool->sp_nrthreads--;
884 if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
885 list_del_rcu(&rqstp->rq_all);
886 spin_unlock_bh(&pool->sp_lock);
887
888 spin_lock_bh(&serv->sv_lock);
889 serv->sv_nrthreads -= 1;
890 spin_unlock_bh(&serv->sv_lock);
891 svc_sock_update_bufs(serv);
892
893 svc_rqst_free(rqstp);
894
895 svc_put(serv);
896 }
897 EXPORT_SYMBOL_GPL(svc_exit_thread);
898
899 /*
900 * Register an "inet" protocol family netid with the local
901 * rpcbind daemon via an rpcbind v4 SET request.
902 *
903 * No netconfig infrastructure is available in the kernel, so
904 * we map IP_ protocol numbers to netids by hand.
905 *
906 * Returns zero on success; a negative errno value is returned
907 * if any error occurs.
908 */
__svc_rpcb_register4(struct net * net,const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)909 static int __svc_rpcb_register4(struct net *net, const u32 program,
910 const u32 version,
911 const unsigned short protocol,
912 const unsigned short port)
913 {
914 const struct sockaddr_in sin = {
915 .sin_family = AF_INET,
916 .sin_addr.s_addr = htonl(INADDR_ANY),
917 .sin_port = htons(port),
918 };
919 const char *netid;
920 int error;
921
922 switch (protocol) {
923 case IPPROTO_UDP:
924 netid = RPCBIND_NETID_UDP;
925 break;
926 case IPPROTO_TCP:
927 netid = RPCBIND_NETID_TCP;
928 break;
929 default:
930 return -ENOPROTOOPT;
931 }
932
933 error = rpcb_v4_register(net, program, version,
934 (const struct sockaddr *)&sin, netid);
935
936 /*
937 * User space didn't support rpcbind v4, so retry this
938 * registration request with the legacy rpcbind v2 protocol.
939 */
940 if (error == -EPROTONOSUPPORT)
941 error = rpcb_register(net, program, version, protocol, port);
942
943 return error;
944 }
945
946 #if IS_ENABLED(CONFIG_IPV6)
947 /*
948 * Register an "inet6" protocol family netid with the local
949 * rpcbind daemon via an rpcbind v4 SET request.
950 *
951 * No netconfig infrastructure is available in the kernel, so
952 * we map IP_ protocol numbers to netids by hand.
953 *
954 * Returns zero on success; a negative errno value is returned
955 * if any error occurs.
956 */
__svc_rpcb_register6(struct net * net,const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)957 static int __svc_rpcb_register6(struct net *net, const u32 program,
958 const u32 version,
959 const unsigned short protocol,
960 const unsigned short port)
961 {
962 const struct sockaddr_in6 sin6 = {
963 .sin6_family = AF_INET6,
964 .sin6_addr = IN6ADDR_ANY_INIT,
965 .sin6_port = htons(port),
966 };
967 const char *netid;
968 int error;
969
970 switch (protocol) {
971 case IPPROTO_UDP:
972 netid = RPCBIND_NETID_UDP6;
973 break;
974 case IPPROTO_TCP:
975 netid = RPCBIND_NETID_TCP6;
976 break;
977 default:
978 return -ENOPROTOOPT;
979 }
980
981 error = rpcb_v4_register(net, program, version,
982 (const struct sockaddr *)&sin6, netid);
983
984 /*
985 * User space didn't support rpcbind version 4, so we won't
986 * use a PF_INET6 listener.
987 */
988 if (error == -EPROTONOSUPPORT)
989 error = -EAFNOSUPPORT;
990
991 return error;
992 }
993 #endif /* IS_ENABLED(CONFIG_IPV6) */
994
995 /*
996 * Register a kernel RPC service via rpcbind version 4.
997 *
998 * Returns zero on success; a negative errno value is returned
999 * if any error occurs.
1000 */
__svc_register(struct net * net,const char * progname,const u32 program,const u32 version,const int family,const unsigned short protocol,const unsigned short port)1001 static int __svc_register(struct net *net, const char *progname,
1002 const u32 program, const u32 version,
1003 const int family,
1004 const unsigned short protocol,
1005 const unsigned short port)
1006 {
1007 int error = -EAFNOSUPPORT;
1008
1009 switch (family) {
1010 case PF_INET:
1011 error = __svc_rpcb_register4(net, program, version,
1012 protocol, port);
1013 break;
1014 #if IS_ENABLED(CONFIG_IPV6)
1015 case PF_INET6:
1016 error = __svc_rpcb_register6(net, program, version,
1017 protocol, port);
1018 #endif
1019 }
1020
1021 trace_svc_register(progname, version, protocol, port, family, error);
1022 return error;
1023 }
1024
svc_rpcbind_set_version(struct net * net,const struct svc_program * progp,u32 version,int family,unsigned short proto,unsigned short port)1025 int svc_rpcbind_set_version(struct net *net,
1026 const struct svc_program *progp,
1027 u32 version, int family,
1028 unsigned short proto,
1029 unsigned short port)
1030 {
1031 return __svc_register(net, progp->pg_name, progp->pg_prog,
1032 version, family, proto, port);
1033
1034 }
1035 EXPORT_SYMBOL_GPL(svc_rpcbind_set_version);
1036
svc_generic_rpcbind_set(struct net * net,const struct svc_program * progp,u32 version,int family,unsigned short proto,unsigned short port)1037 int svc_generic_rpcbind_set(struct net *net,
1038 const struct svc_program *progp,
1039 u32 version, int family,
1040 unsigned short proto,
1041 unsigned short port)
1042 {
1043 const struct svc_version *vers = progp->pg_vers[version];
1044 int error;
1045
1046 if (vers == NULL)
1047 return 0;
1048
1049 if (vers->vs_hidden) {
1050 trace_svc_noregister(progp->pg_name, version, proto,
1051 port, family, 0);
1052 return 0;
1053 }
1054
1055 /*
1056 * Don't register a UDP port if we need congestion
1057 * control.
1058 */
1059 if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP)
1060 return 0;
1061
1062 error = svc_rpcbind_set_version(net, progp, version,
1063 family, proto, port);
1064
1065 return (vers->vs_rpcb_optnl) ? 0 : error;
1066 }
1067 EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set);
1068
1069 /**
1070 * svc_register - register an RPC service with the local portmapper
1071 * @serv: svc_serv struct for the service to register
1072 * @net: net namespace for the service to register
1073 * @family: protocol family of service's listener socket
1074 * @proto: transport protocol number to advertise
1075 * @port: port to advertise
1076 *
1077 * Service is registered for any address in the passed-in protocol family
1078 */
svc_register(const struct svc_serv * serv,struct net * net,const int family,const unsigned short proto,const unsigned short port)1079 int svc_register(const struct svc_serv *serv, struct net *net,
1080 const int family, const unsigned short proto,
1081 const unsigned short port)
1082 {
1083 struct svc_program *progp;
1084 unsigned int i;
1085 int error = 0;
1086
1087 WARN_ON_ONCE(proto == 0 && port == 0);
1088 if (proto == 0 && port == 0)
1089 return -EINVAL;
1090
1091 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1092 for (i = 0; i < progp->pg_nvers; i++) {
1093
1094 error = progp->pg_rpcbind_set(net, progp, i,
1095 family, proto, port);
1096 if (error < 0) {
1097 printk(KERN_WARNING "svc: failed to register "
1098 "%sv%u RPC service (errno %d).\n",
1099 progp->pg_name, i, -error);
1100 break;
1101 }
1102 }
1103 }
1104
1105 return error;
1106 }
1107
1108 /*
1109 * If user space is running rpcbind, it should take the v4 UNSET
1110 * and clear everything for this [program, version]. If user space
1111 * is running portmap, it will reject the v4 UNSET, but won't have
1112 * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient
1113 * in this case to clear all existing entries for [program, version].
1114 */
__svc_unregister(struct net * net,const u32 program,const u32 version,const char * progname)1115 static void __svc_unregister(struct net *net, const u32 program, const u32 version,
1116 const char *progname)
1117 {
1118 int error;
1119
1120 error = rpcb_v4_register(net, program, version, NULL, "");
1121
1122 /*
1123 * User space didn't support rpcbind v4, so retry this
1124 * request with the legacy rpcbind v2 protocol.
1125 */
1126 if (error == -EPROTONOSUPPORT)
1127 error = rpcb_register(net, program, version, 0, 0);
1128
1129 trace_svc_unregister(progname, version, error);
1130 }
1131
1132 /*
1133 * All netids, bind addresses and ports registered for [program, version]
1134 * are removed from the local rpcbind database (if the service is not
1135 * hidden) to make way for a new instance of the service.
1136 *
1137 * The result of unregistration is reported via dprintk for those who want
1138 * verification of the result, but is otherwise not important.
1139 */
svc_unregister(const struct svc_serv * serv,struct net * net)1140 static void svc_unregister(const struct svc_serv *serv, struct net *net)
1141 {
1142 struct svc_program *progp;
1143 unsigned long flags;
1144 unsigned int i;
1145
1146 clear_thread_flag(TIF_SIGPENDING);
1147
1148 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1149 for (i = 0; i < progp->pg_nvers; i++) {
1150 if (progp->pg_vers[i] == NULL)
1151 continue;
1152 if (progp->pg_vers[i]->vs_hidden)
1153 continue;
1154 __svc_unregister(net, progp->pg_prog, i, progp->pg_name);
1155 }
1156 }
1157
1158 spin_lock_irqsave(¤t->sighand->siglock, flags);
1159 recalc_sigpending();
1160 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
1161 }
1162
1163 /*
1164 * dprintk the given error with the address of the client that caused it.
1165 */
1166 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1167 static __printf(2, 3)
svc_printk(struct svc_rqst * rqstp,const char * fmt,...)1168 void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1169 {
1170 struct va_format vaf;
1171 va_list args;
1172 char buf[RPC_MAX_ADDRBUFLEN];
1173
1174 va_start(args, fmt);
1175
1176 vaf.fmt = fmt;
1177 vaf.va = &args;
1178
1179 dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1180
1181 va_end(args);
1182 }
1183 #else
svc_printk(struct svc_rqst * rqstp,const char * fmt,...)1184 static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1185 #endif
1186
1187 __be32
svc_generic_init_request(struct svc_rqst * rqstp,const struct svc_program * progp,struct svc_process_info * ret)1188 svc_generic_init_request(struct svc_rqst *rqstp,
1189 const struct svc_program *progp,
1190 struct svc_process_info *ret)
1191 {
1192 const struct svc_version *versp = NULL; /* compiler food */
1193 const struct svc_procedure *procp = NULL;
1194
1195 if (rqstp->rq_vers >= progp->pg_nvers )
1196 goto err_bad_vers;
1197 versp = progp->pg_vers[rqstp->rq_vers];
1198 if (!versp)
1199 goto err_bad_vers;
1200
1201 /*
1202 * Some protocol versions (namely NFSv4) require some form of
1203 * congestion control. (See RFC 7530 section 3.1 paragraph 2)
1204 * In other words, UDP is not allowed. We mark those when setting
1205 * up the svc_xprt, and verify that here.
1206 *
1207 * The spec is not very clear about what error should be returned
1208 * when someone tries to access a server that is listening on UDP
1209 * for lower versions. RPC_PROG_MISMATCH seems to be the closest
1210 * fit.
1211 */
1212 if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
1213 !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
1214 goto err_bad_vers;
1215
1216 if (rqstp->rq_proc >= versp->vs_nproc)
1217 goto err_bad_proc;
1218 rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
1219 if (!procp)
1220 goto err_bad_proc;
1221
1222 /* Initialize storage for argp and resp */
1223 memset(rqstp->rq_argp, 0, procp->pc_argzero);
1224 memset(rqstp->rq_resp, 0, procp->pc_ressize);
1225
1226 /* Bump per-procedure stats counter */
1227 this_cpu_inc(versp->vs_count[rqstp->rq_proc]);
1228
1229 ret->dispatch = versp->vs_dispatch;
1230 return rpc_success;
1231 err_bad_vers:
1232 ret->mismatch.lovers = progp->pg_lovers;
1233 ret->mismatch.hivers = progp->pg_hivers;
1234 return rpc_prog_mismatch;
1235 err_bad_proc:
1236 return rpc_proc_unavail;
1237 }
1238 EXPORT_SYMBOL_GPL(svc_generic_init_request);
1239
1240 /*
1241 * Common routine for processing the RPC request.
1242 */
1243 static int
svc_process_common(struct svc_rqst * rqstp)1244 svc_process_common(struct svc_rqst *rqstp)
1245 {
1246 struct xdr_stream *xdr = &rqstp->rq_res_stream;
1247 struct svc_program *progp;
1248 const struct svc_procedure *procp = NULL;
1249 struct svc_serv *serv = rqstp->rq_server;
1250 struct svc_process_info process;
1251 int auth_res, rc;
1252 unsigned int aoffset;
1253 __be32 *p;
1254
1255 /* Will be turned off by GSS integrity and privacy services */
1256 set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
1257 /* Will be turned off only when NFSv4 Sessions are used */
1258 set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
1259 clear_bit(RQ_DROPME, &rqstp->rq_flags);
1260
1261 /* Construct the first words of the reply: */
1262 svcxdr_init_encode(rqstp);
1263 xdr_stream_encode_be32(xdr, rqstp->rq_xid);
1264 xdr_stream_encode_be32(xdr, rpc_reply);
1265
1266 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4);
1267 if (unlikely(!p))
1268 goto err_short_len;
1269 if (*p++ != cpu_to_be32(RPC_VERSION))
1270 goto err_bad_rpc;
1271
1272 xdr_stream_encode_be32(xdr, rpc_msg_accepted);
1273
1274 rqstp->rq_prog = be32_to_cpup(p++);
1275 rqstp->rq_vers = be32_to_cpup(p++);
1276 rqstp->rq_proc = be32_to_cpup(p);
1277
1278 for (progp = serv->sv_program; progp; progp = progp->pg_next)
1279 if (rqstp->rq_prog == progp->pg_prog)
1280 break;
1281
1282 /*
1283 * Decode auth data, and add verifier to reply buffer.
1284 * We do this before anything else in order to get a decent
1285 * auth verifier.
1286 */
1287 auth_res = svc_authenticate(rqstp);
1288 /* Also give the program a chance to reject this call: */
1289 if (auth_res == SVC_OK && progp)
1290 auth_res = progp->pg_authenticate(rqstp);
1291 trace_svc_authenticate(rqstp, auth_res);
1292 switch (auth_res) {
1293 case SVC_OK:
1294 break;
1295 case SVC_GARBAGE:
1296 goto err_garbage_args;
1297 case SVC_SYSERR:
1298 goto err_system_err;
1299 case SVC_DENIED:
1300 goto err_bad_auth;
1301 case SVC_CLOSE:
1302 goto close;
1303 case SVC_DROP:
1304 goto dropit;
1305 case SVC_COMPLETE:
1306 goto sendit;
1307 }
1308
1309 if (progp == NULL)
1310 goto err_bad_prog;
1311
1312 switch (progp->pg_init_request(rqstp, progp, &process)) {
1313 case rpc_success:
1314 break;
1315 case rpc_prog_unavail:
1316 goto err_bad_prog;
1317 case rpc_prog_mismatch:
1318 goto err_bad_vers;
1319 case rpc_proc_unavail:
1320 goto err_bad_proc;
1321 }
1322
1323 procp = rqstp->rq_procinfo;
1324 /* Should this check go into the dispatcher? */
1325 if (!procp || !procp->pc_func)
1326 goto err_bad_proc;
1327
1328 /* Syntactic check complete */
1329 serv->sv_stats->rpccnt++;
1330 trace_svc_process(rqstp, progp->pg_name);
1331
1332 aoffset = xdr_stream_pos(xdr);
1333
1334 /* un-reserve some of the out-queue now that we have a
1335 * better idea of reply size
1336 */
1337 if (procp->pc_xdrressize)
1338 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1339
1340 /* Call the function that processes the request. */
1341 rc = process.dispatch(rqstp);
1342 if (procp->pc_release)
1343 procp->pc_release(rqstp);
1344 if (!rc)
1345 goto dropit;
1346 if (rqstp->rq_auth_stat != rpc_auth_ok)
1347 goto err_bad_auth;
1348
1349 if (*rqstp->rq_accept_statp != rpc_success)
1350 xdr_truncate_encode(xdr, aoffset);
1351
1352 if (procp->pc_encode == NULL)
1353 goto dropit;
1354
1355 sendit:
1356 if (svc_authorise(rqstp))
1357 goto close_xprt;
1358 return 1; /* Caller can now send it */
1359
1360 dropit:
1361 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1362 dprintk("svc: svc_process dropit\n");
1363 return 0;
1364
1365 close:
1366 svc_authorise(rqstp);
1367 close_xprt:
1368 if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1369 svc_xprt_close(rqstp->rq_xprt);
1370 dprintk("svc: svc_process close\n");
1371 return 0;
1372
1373 err_short_len:
1374 svc_printk(rqstp, "short len %u, dropping request\n",
1375 rqstp->rq_arg.len);
1376 goto close_xprt;
1377
1378 err_bad_rpc:
1379 serv->sv_stats->rpcbadfmt++;
1380 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
1381 xdr_stream_encode_u32(xdr, RPC_MISMATCH);
1382 /* Only RPCv2 supported */
1383 xdr_stream_encode_u32(xdr, RPC_VERSION);
1384 xdr_stream_encode_u32(xdr, RPC_VERSION);
1385 goto sendit;
1386
1387 err_bad_auth:
1388 dprintk("svc: authentication failed (%d)\n",
1389 be32_to_cpu(rqstp->rq_auth_stat));
1390 serv->sv_stats->rpcbadauth++;
1391 /* Restore write pointer to location of reply status: */
1392 xdr_truncate_encode(xdr, XDR_UNIT * 2);
1393 xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
1394 xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR);
1395 xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat);
1396 goto sendit;
1397
1398 err_bad_prog:
1399 dprintk("svc: unknown program %d\n", rqstp->rq_prog);
1400 serv->sv_stats->rpcbadfmt++;
1401 xdr_stream_encode_u32(xdr, RPC_PROG_UNAVAIL);
1402 goto sendit;
1403
1404 err_bad_vers:
1405 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1406 rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
1407
1408 serv->sv_stats->rpcbadfmt++;
1409 xdr_stream_encode_u32(xdr, RPC_PROG_MISMATCH);
1410 xdr_stream_encode_u32(xdr, process.mismatch.lovers);
1411 xdr_stream_encode_u32(xdr, process.mismatch.hivers);
1412 goto sendit;
1413
1414 err_bad_proc:
1415 svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
1416
1417 serv->sv_stats->rpcbadfmt++;
1418 xdr_stream_encode_u32(xdr, RPC_PROC_UNAVAIL);
1419 goto sendit;
1420
1421 err_garbage_args:
1422 svc_printk(rqstp, "failed to decode RPC header\n");
1423
1424 serv->sv_stats->rpcbadfmt++;
1425 xdr_stream_encode_u32(xdr, RPC_GARBAGE_ARGS);
1426 goto sendit;
1427
1428 err_system_err:
1429 serv->sv_stats->rpcbadfmt++;
1430 xdr_stream_encode_u32(xdr, RPC_SYSTEM_ERR);
1431 goto sendit;
1432 }
1433
1434 /*
1435 * Process the RPC request.
1436 */
1437 int
svc_process(struct svc_rqst * rqstp)1438 svc_process(struct svc_rqst *rqstp)
1439 {
1440 struct kvec *resv = &rqstp->rq_res.head[0];
1441 __be32 *p;
1442
1443 #if IS_ENABLED(CONFIG_FAIL_SUNRPC)
1444 if (!fail_sunrpc.ignore_server_disconnect &&
1445 should_fail(&fail_sunrpc.attr, 1))
1446 svc_xprt_deferred_close(rqstp->rq_xprt);
1447 #endif
1448
1449 /*
1450 * Setup response xdr_buf.
1451 * Initially it has just one page
1452 */
1453 rqstp->rq_next_page = &rqstp->rq_respages[1];
1454 resv->iov_base = page_address(rqstp->rq_respages[0]);
1455 resv->iov_len = 0;
1456 rqstp->rq_res.pages = rqstp->rq_next_page;
1457 rqstp->rq_res.len = 0;
1458 rqstp->rq_res.page_base = 0;
1459 rqstp->rq_res.page_len = 0;
1460 rqstp->rq_res.buflen = PAGE_SIZE;
1461 rqstp->rq_res.tail[0].iov_base = NULL;
1462 rqstp->rq_res.tail[0].iov_len = 0;
1463
1464 svcxdr_init_decode(rqstp);
1465 p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2);
1466 if (unlikely(!p))
1467 goto out_drop;
1468 rqstp->rq_xid = *p++;
1469 if (unlikely(*p != rpc_call))
1470 goto out_baddir;
1471
1472 if (!svc_process_common(rqstp))
1473 goto out_drop;
1474 return svc_send(rqstp);
1475
1476 out_baddir:
1477 svc_printk(rqstp, "bad direction 0x%08x, dropping request\n",
1478 be32_to_cpu(*p));
1479 rqstp->rq_server->sv_stats->rpcbadfmt++;
1480 out_drop:
1481 svc_drop(rqstp);
1482 return 0;
1483 }
1484 EXPORT_SYMBOL_GPL(svc_process);
1485
1486 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1487 /*
1488 * Process a backchannel RPC request that arrived over an existing
1489 * outbound connection
1490 */
1491 int
bc_svc_process(struct svc_serv * serv,struct rpc_rqst * req,struct svc_rqst * rqstp)1492 bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1493 struct svc_rqst *rqstp)
1494 {
1495 struct rpc_task *task;
1496 int proc_error;
1497 int error;
1498
1499 dprintk("svc: %s(%p)\n", __func__, req);
1500
1501 /* Build the svc_rqst used by the common processing routine */
1502 rqstp->rq_xid = req->rq_xid;
1503 rqstp->rq_prot = req->rq_xprt->prot;
1504 rqstp->rq_server = serv;
1505 rqstp->rq_bc_net = req->rq_xprt->xprt_net;
1506
1507 rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1508 memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1509 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1510 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1511
1512 /* Adjust the argument buffer length */
1513 rqstp->rq_arg.len = req->rq_private_buf.len;
1514 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1515 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1516 rqstp->rq_arg.page_len = 0;
1517 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1518 rqstp->rq_arg.page_len)
1519 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1520 rqstp->rq_arg.head[0].iov_len;
1521 else
1522 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1523 rqstp->rq_arg.page_len;
1524
1525 /* Reset the response buffer */
1526 rqstp->rq_res.head[0].iov_len = 0;
1527
1528 /*
1529 * Skip the XID and calldir fields because they've already
1530 * been processed by the caller.
1531 */
1532 svcxdr_init_decode(rqstp);
1533 if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) {
1534 error = -EINVAL;
1535 goto out;
1536 }
1537
1538 /* Parse and execute the bc call */
1539 proc_error = svc_process_common(rqstp);
1540
1541 atomic_dec(&req->rq_xprt->bc_slot_count);
1542 if (!proc_error) {
1543 /* Processing error: drop the request */
1544 xprt_free_bc_request(req);
1545 error = -EINVAL;
1546 goto out;
1547 }
1548 /* Finally, send the reply synchronously */
1549 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1550 task = rpc_run_bc_task(req);
1551 if (IS_ERR(task)) {
1552 error = PTR_ERR(task);
1553 goto out;
1554 }
1555
1556 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
1557 error = task->tk_status;
1558 rpc_put_task(task);
1559
1560 out:
1561 dprintk("svc: %s(), error=%d\n", __func__, error);
1562 return error;
1563 }
1564 EXPORT_SYMBOL_GPL(bc_svc_process);
1565 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1566
1567 /**
1568 * svc_max_payload - Return transport-specific limit on the RPC payload
1569 * @rqstp: RPC transaction context
1570 *
1571 * Returns the maximum number of payload bytes the current transport
1572 * allows.
1573 */
svc_max_payload(const struct svc_rqst * rqstp)1574 u32 svc_max_payload(const struct svc_rqst *rqstp)
1575 {
1576 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1577
1578 if (rqstp->rq_server->sv_max_payload < max)
1579 max = rqstp->rq_server->sv_max_payload;
1580 return max;
1581 }
1582 EXPORT_SYMBOL_GPL(svc_max_payload);
1583
1584 /**
1585 * svc_proc_name - Return RPC procedure name in string form
1586 * @rqstp: svc_rqst to operate on
1587 *
1588 * Return value:
1589 * Pointer to a NUL-terminated string
1590 */
svc_proc_name(const struct svc_rqst * rqstp)1591 const char *svc_proc_name(const struct svc_rqst *rqstp)
1592 {
1593 if (rqstp && rqstp->rq_procinfo)
1594 return rqstp->rq_procinfo->pc_name;
1595 return "unknown";
1596 }
1597
1598
1599 /**
1600 * svc_encode_result_payload - mark a range of bytes as a result payload
1601 * @rqstp: svc_rqst to operate on
1602 * @offset: payload's byte offset in rqstp->rq_res
1603 * @length: size of payload, in bytes
1604 *
1605 * Returns zero on success, or a negative errno if a permanent
1606 * error occurred.
1607 */
svc_encode_result_payload(struct svc_rqst * rqstp,unsigned int offset,unsigned int length)1608 int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset,
1609 unsigned int length)
1610 {
1611 return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset,
1612 length);
1613 }
1614 EXPORT_SYMBOL_GPL(svc_encode_result_payload);
1615
1616 /**
1617 * svc_fill_write_vector - Construct data argument for VFS write call
1618 * @rqstp: svc_rqst to operate on
1619 * @payload: xdr_buf containing only the write data payload
1620 *
1621 * Fills in rqstp::rq_vec, and returns the number of elements.
1622 */
svc_fill_write_vector(struct svc_rqst * rqstp,struct xdr_buf * payload)1623 unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
1624 struct xdr_buf *payload)
1625 {
1626 struct page **pages = payload->pages;
1627 struct kvec *first = payload->head;
1628 struct kvec *vec = rqstp->rq_vec;
1629 size_t total = payload->len;
1630 unsigned int i;
1631
1632 /* Some types of transport can present the write payload
1633 * entirely in rq_arg.pages. In this case, @first is empty.
1634 */
1635 i = 0;
1636 if (first->iov_len) {
1637 vec[i].iov_base = first->iov_base;
1638 vec[i].iov_len = min_t(size_t, total, first->iov_len);
1639 total -= vec[i].iov_len;
1640 ++i;
1641 }
1642
1643 while (total) {
1644 vec[i].iov_base = page_address(*pages);
1645 vec[i].iov_len = min_t(size_t, total, PAGE_SIZE);
1646 total -= vec[i].iov_len;
1647 ++i;
1648 ++pages;
1649 }
1650
1651 WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec));
1652 return i;
1653 }
1654 EXPORT_SYMBOL_GPL(svc_fill_write_vector);
1655
1656 /**
1657 * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
1658 * @rqstp: svc_rqst to operate on
1659 * @first: buffer containing first section of pathname
1660 * @p: buffer containing remaining section of pathname
1661 * @total: total length of the pathname argument
1662 *
1663 * The VFS symlink API demands a NUL-terminated pathname in mapped memory.
1664 * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free
1665 * the returned string.
1666 */
svc_fill_symlink_pathname(struct svc_rqst * rqstp,struct kvec * first,void * p,size_t total)1667 char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first,
1668 void *p, size_t total)
1669 {
1670 size_t len, remaining;
1671 char *result, *dst;
1672
1673 result = kmalloc(total + 1, GFP_KERNEL);
1674 if (!result)
1675 return ERR_PTR(-ESERVERFAULT);
1676
1677 dst = result;
1678 remaining = total;
1679
1680 len = min_t(size_t, total, first->iov_len);
1681 if (len) {
1682 memcpy(dst, first->iov_base, len);
1683 dst += len;
1684 remaining -= len;
1685 }
1686
1687 if (remaining) {
1688 len = min_t(size_t, remaining, PAGE_SIZE);
1689 memcpy(dst, p, len);
1690 dst += len;
1691 }
1692
1693 *dst = '\0';
1694
1695 /* Sanity check: Linux doesn't allow the pathname argument to
1696 * contain a NUL byte.
1697 */
1698 if (strlen(result) != total) {
1699 kfree(result);
1700 return ERR_PTR(-EINVAL);
1701 }
1702 return result;
1703 }
1704 EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname);
1705