1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Request reply cache. This is currently a global cache, but this may
4 * change in the future and be a per-client cache.
5 *
6 * This code is heavily inspired by the 44BSD implementation, although
7 * it does things a bit differently.
8 *
9 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
10 */
11
12 #include <linux/sunrpc/svc_xprt.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/sunrpc/addr.h>
16 #include <linux/highmem.h>
17 #include <linux/log2.h>
18 #include <linux/hash.h>
19 #include <net/checksum.h>
20
21 #include "nfsd.h"
22 #include "cache.h"
23 #include "trace.h"
24
25 /*
26 * We use this value to determine the number of hash buckets from the max
27 * cache size, the idea being that when the cache is at its maximum number
28 * of entries, then this should be the average number of entries per bucket.
29 */
30 #define TARGET_BUCKET_SIZE 64
31
32 struct nfsd_drc_bucket {
33 struct rb_root rb_head;
34 struct list_head lru_head;
35 spinlock_t cache_lock;
36 };
37
38 static struct kmem_cache *drc_slab;
39
40 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
41 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
42 struct shrink_control *sc);
43 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
44 struct shrink_control *sc);
45
46 /*
47 * Put a cap on the size of the DRC based on the amount of available
48 * low memory in the machine.
49 *
50 * 64MB: 8192
51 * 128MB: 11585
52 * 256MB: 16384
53 * 512MB: 23170
54 * 1GB: 32768
55 * 2GB: 46340
56 * 4GB: 65536
57 * 8GB: 92681
58 * 16GB: 131072
59 *
60 * ...with a hard cap of 256k entries. In the worst case, each entry will be
61 * ~1k, so the above numbers should give a rough max of the amount of memory
62 * used in k.
63 *
64 * XXX: these limits are per-container, so memory used will increase
65 * linearly with number of containers. Maybe that's OK.
66 */
67 static unsigned int
nfsd_cache_size_limit(void)68 nfsd_cache_size_limit(void)
69 {
70 unsigned int limit;
71 unsigned long low_pages = totalram_pages() - totalhigh_pages();
72
73 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
74 return min_t(unsigned int, limit, 256*1024);
75 }
76
77 /*
78 * Compute the number of hash buckets we need. Divide the max cachesize by
79 * the "target" max bucket size, and round up to next power of two.
80 */
81 static unsigned int
nfsd_hashsize(unsigned int limit)82 nfsd_hashsize(unsigned int limit)
83 {
84 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
85 }
86
87 static u32
nfsd_cache_hash(__be32 xid,struct nfsd_net * nn)88 nfsd_cache_hash(__be32 xid, struct nfsd_net *nn)
89 {
90 return hash_32(be32_to_cpu(xid), nn->maskbits);
91 }
92
93 static struct svc_cacherep *
nfsd_reply_cache_alloc(struct svc_rqst * rqstp,__wsum csum,struct nfsd_net * nn)94 nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
95 struct nfsd_net *nn)
96 {
97 struct svc_cacherep *rp;
98
99 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
100 if (rp) {
101 rp->c_state = RC_UNUSED;
102 rp->c_type = RC_NOCACHE;
103 RB_CLEAR_NODE(&rp->c_node);
104 INIT_LIST_HEAD(&rp->c_lru);
105
106 memset(&rp->c_key, 0, sizeof(rp->c_key));
107 rp->c_key.k_xid = rqstp->rq_xid;
108 rp->c_key.k_proc = rqstp->rq_proc;
109 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
110 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
111 rp->c_key.k_prot = rqstp->rq_prot;
112 rp->c_key.k_vers = rqstp->rq_vers;
113 rp->c_key.k_len = rqstp->rq_arg.len;
114 rp->c_key.k_csum = csum;
115 }
116 return rp;
117 }
118
119 static void
nfsd_reply_cache_free_locked(struct nfsd_drc_bucket * b,struct svc_cacherep * rp,struct nfsd_net * nn)120 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
121 struct nfsd_net *nn)
122 {
123 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
124 nn->drc_mem_usage -= rp->c_replvec.iov_len;
125 kfree(rp->c_replvec.iov_base);
126 }
127 if (rp->c_state != RC_UNUSED) {
128 rb_erase(&rp->c_node, &b->rb_head);
129 list_del(&rp->c_lru);
130 atomic_dec(&nn->num_drc_entries);
131 nn->drc_mem_usage -= sizeof(*rp);
132 }
133 kmem_cache_free(drc_slab, rp);
134 }
135
136 static void
nfsd_reply_cache_free(struct nfsd_drc_bucket * b,struct svc_cacherep * rp,struct nfsd_net * nn)137 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
138 struct nfsd_net *nn)
139 {
140 spin_lock(&b->cache_lock);
141 nfsd_reply_cache_free_locked(b, rp, nn);
142 spin_unlock(&b->cache_lock);
143 }
144
nfsd_drc_slab_create(void)145 int nfsd_drc_slab_create(void)
146 {
147 drc_slab = kmem_cache_create("nfsd_drc",
148 sizeof(struct svc_cacherep), 0, 0, NULL);
149 return drc_slab ? 0: -ENOMEM;
150 }
151
nfsd_drc_slab_free(void)152 void nfsd_drc_slab_free(void)
153 {
154 kmem_cache_destroy(drc_slab);
155 }
156
nfsd_reply_cache_init(struct nfsd_net * nn)157 int nfsd_reply_cache_init(struct nfsd_net *nn)
158 {
159 unsigned int hashsize;
160 unsigned int i;
161 int status = 0;
162
163 nn->max_drc_entries = nfsd_cache_size_limit();
164 atomic_set(&nn->num_drc_entries, 0);
165 hashsize = nfsd_hashsize(nn->max_drc_entries);
166 nn->maskbits = ilog2(hashsize);
167
168 nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
169 nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
170 nn->nfsd_reply_cache_shrinker.seeks = 1;
171 status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
172 if (status)
173 goto out_nomem;
174
175 nn->drc_hashtbl = kvzalloc(array_size(hashsize,
176 sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
177 if (!nn->drc_hashtbl)
178 goto out_shrinker;
179
180 for (i = 0; i < hashsize; i++) {
181 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
182 spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
183 }
184 nn->drc_hashsize = hashsize;
185
186 return 0;
187 out_shrinker:
188 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
189 out_nomem:
190 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
191 return -ENOMEM;
192 }
193
nfsd_reply_cache_shutdown(struct nfsd_net * nn)194 void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
195 {
196 struct svc_cacherep *rp;
197 unsigned int i;
198
199 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
200
201 for (i = 0; i < nn->drc_hashsize; i++) {
202 struct list_head *head = &nn->drc_hashtbl[i].lru_head;
203 while (!list_empty(head)) {
204 rp = list_first_entry(head, struct svc_cacherep, c_lru);
205 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
206 rp, nn);
207 }
208 }
209
210 kvfree(nn->drc_hashtbl);
211 nn->drc_hashtbl = NULL;
212 nn->drc_hashsize = 0;
213
214 }
215
216 /*
217 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
218 * not already scheduled.
219 */
220 static void
lru_put_end(struct nfsd_drc_bucket * b,struct svc_cacherep * rp)221 lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
222 {
223 rp->c_timestamp = jiffies;
224 list_move_tail(&rp->c_lru, &b->lru_head);
225 }
226
227 static long
prune_bucket(struct nfsd_drc_bucket * b,struct nfsd_net * nn)228 prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
229 {
230 struct svc_cacherep *rp, *tmp;
231 long freed = 0;
232
233 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
234 /*
235 * Don't free entries attached to calls that are still
236 * in-progress, but do keep scanning the list.
237 */
238 if (rp->c_state == RC_INPROG)
239 continue;
240 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
241 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
242 break;
243 nfsd_reply_cache_free_locked(b, rp, nn);
244 freed++;
245 }
246 return freed;
247 }
248
249 /*
250 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
251 * Also prune the oldest ones when the total exceeds the max number of entries.
252 */
253 static long
prune_cache_entries(struct nfsd_net * nn)254 prune_cache_entries(struct nfsd_net *nn)
255 {
256 unsigned int i;
257 long freed = 0;
258
259 for (i = 0; i < nn->drc_hashsize; i++) {
260 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
261
262 if (list_empty(&b->lru_head))
263 continue;
264 spin_lock(&b->cache_lock);
265 freed += prune_bucket(b, nn);
266 spin_unlock(&b->cache_lock);
267 }
268 return freed;
269 }
270
271 static unsigned long
nfsd_reply_cache_count(struct shrinker * shrink,struct shrink_control * sc)272 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
273 {
274 struct nfsd_net *nn = container_of(shrink,
275 struct nfsd_net, nfsd_reply_cache_shrinker);
276
277 return atomic_read(&nn->num_drc_entries);
278 }
279
280 static unsigned long
nfsd_reply_cache_scan(struct shrinker * shrink,struct shrink_control * sc)281 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
282 {
283 struct nfsd_net *nn = container_of(shrink,
284 struct nfsd_net, nfsd_reply_cache_shrinker);
285
286 return prune_cache_entries(nn);
287 }
288 /*
289 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
290 */
291 static __wsum
nfsd_cache_csum(struct svc_rqst * rqstp)292 nfsd_cache_csum(struct svc_rqst *rqstp)
293 {
294 int idx;
295 unsigned int base;
296 __wsum csum;
297 struct xdr_buf *buf = &rqstp->rq_arg;
298 const unsigned char *p = buf->head[0].iov_base;
299 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
300 RC_CSUMLEN);
301 size_t len = min(buf->head[0].iov_len, csum_len);
302
303 /* rq_arg.head first */
304 csum = csum_partial(p, len, 0);
305 csum_len -= len;
306
307 /* Continue into page array */
308 idx = buf->page_base / PAGE_SIZE;
309 base = buf->page_base & ~PAGE_MASK;
310 while (csum_len) {
311 p = page_address(buf->pages[idx]) + base;
312 len = min_t(size_t, PAGE_SIZE - base, csum_len);
313 csum = csum_partial(p, len, csum);
314 csum_len -= len;
315 base = 0;
316 ++idx;
317 }
318 return csum;
319 }
320
321 static int
nfsd_cache_key_cmp(const struct svc_cacherep * key,const struct svc_cacherep * rp,struct nfsd_net * nn)322 nfsd_cache_key_cmp(const struct svc_cacherep *key,
323 const struct svc_cacherep *rp, struct nfsd_net *nn)
324 {
325 if (key->c_key.k_xid == rp->c_key.k_xid &&
326 key->c_key.k_csum != rp->c_key.k_csum) {
327 ++nn->payload_misses;
328 trace_nfsd_drc_mismatch(nn, key, rp);
329 }
330
331 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
332 }
333
334 /*
335 * Search the request hash for an entry that matches the given rqstp.
336 * Must be called with cache_lock held. Returns the found entry or
337 * inserts an empty key on failure.
338 */
339 static struct svc_cacherep *
nfsd_cache_insert(struct nfsd_drc_bucket * b,struct svc_cacherep * key,struct nfsd_net * nn)340 nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
341 struct nfsd_net *nn)
342 {
343 struct svc_cacherep *rp, *ret = key;
344 struct rb_node **p = &b->rb_head.rb_node,
345 *parent = NULL;
346 unsigned int entries = 0;
347 int cmp;
348
349 while (*p != NULL) {
350 ++entries;
351 parent = *p;
352 rp = rb_entry(parent, struct svc_cacherep, c_node);
353
354 cmp = nfsd_cache_key_cmp(key, rp, nn);
355 if (cmp < 0)
356 p = &parent->rb_left;
357 else if (cmp > 0)
358 p = &parent->rb_right;
359 else {
360 ret = rp;
361 goto out;
362 }
363 }
364 rb_link_node(&key->c_node, parent, p);
365 rb_insert_color(&key->c_node, &b->rb_head);
366 out:
367 /* tally hash chain length stats */
368 if (entries > nn->longest_chain) {
369 nn->longest_chain = entries;
370 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
371 } else if (entries == nn->longest_chain) {
372 /* prefer to keep the smallest cachesize possible here */
373 nn->longest_chain_cachesize = min_t(unsigned int,
374 nn->longest_chain_cachesize,
375 atomic_read(&nn->num_drc_entries));
376 }
377
378 lru_put_end(b, ret);
379 return ret;
380 }
381
382 /**
383 * nfsd_cache_lookup - Find an entry in the duplicate reply cache
384 * @rqstp: Incoming Call to find
385 *
386 * Try to find an entry matching the current call in the cache. When none
387 * is found, we try to grab the oldest expired entry off the LRU list. If
388 * a suitable one isn't there, then drop the cache_lock and allocate a
389 * new one, then search again in case one got inserted while this thread
390 * didn't hold the lock.
391 *
392 * Return values:
393 * %RC_DOIT: Process the request normally
394 * %RC_REPLY: Reply from cache
395 * %RC_DROPIT: Do not process the request further
396 */
nfsd_cache_lookup(struct svc_rqst * rqstp)397 int nfsd_cache_lookup(struct svc_rqst *rqstp)
398 {
399 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
400 struct svc_cacherep *rp, *found;
401 __be32 xid = rqstp->rq_xid;
402 __wsum csum;
403 u32 hash = nfsd_cache_hash(xid, nn);
404 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash];
405 int type = rqstp->rq_cachetype;
406 int rtn = RC_DOIT;
407
408 rqstp->rq_cacherep = NULL;
409 if (type == RC_NOCACHE) {
410 nfsdstats.rcnocache++;
411 goto out;
412 }
413
414 csum = nfsd_cache_csum(rqstp);
415
416 /*
417 * Since the common case is a cache miss followed by an insert,
418 * preallocate an entry.
419 */
420 rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
421 if (!rp)
422 goto out;
423
424 spin_lock(&b->cache_lock);
425 found = nfsd_cache_insert(b, rp, nn);
426 if (found != rp) {
427 nfsd_reply_cache_free_locked(NULL, rp, nn);
428 rp = found;
429 goto found_entry;
430 }
431
432 nfsdstats.rcmisses++;
433 rqstp->rq_cacherep = rp;
434 rp->c_state = RC_INPROG;
435
436 atomic_inc(&nn->num_drc_entries);
437 nn->drc_mem_usage += sizeof(*rp);
438
439 /* go ahead and prune the cache */
440 prune_bucket(b, nn);
441
442 out_unlock:
443 spin_unlock(&b->cache_lock);
444 out:
445 return rtn;
446
447 found_entry:
448 /* We found a matching entry which is either in progress or done. */
449 nfsdstats.rchits++;
450 rtn = RC_DROPIT;
451
452 /* Request being processed */
453 if (rp->c_state == RC_INPROG)
454 goto out_trace;
455
456 /* From the hall of fame of impractical attacks:
457 * Is this a user who tries to snoop on the cache? */
458 rtn = RC_DOIT;
459 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
460 goto out_trace;
461
462 /* Compose RPC reply header */
463 switch (rp->c_type) {
464 case RC_NOCACHE:
465 break;
466 case RC_REPLSTAT:
467 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
468 rtn = RC_REPLY;
469 break;
470 case RC_REPLBUFF:
471 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
472 goto out_unlock; /* should not happen */
473 rtn = RC_REPLY;
474 break;
475 default:
476 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
477 }
478
479 out_trace:
480 trace_nfsd_drc_found(nn, rqstp, rtn);
481 goto out_unlock;
482 }
483
484 /**
485 * nfsd_cache_update - Update an entry in the duplicate reply cache.
486 * @rqstp: svc_rqst with a finished Reply
487 * @cachetype: which cache to update
488 * @statp: Reply's status code
489 *
490 * This is called from nfsd_dispatch when the procedure has been
491 * executed and the complete reply is in rqstp->rq_res.
492 *
493 * We're copying around data here rather than swapping buffers because
494 * the toplevel loop requires max-sized buffers, which would be a waste
495 * of memory for a cache with a max reply size of 100 bytes (diropokres).
496 *
497 * If we should start to use different types of cache entries tailored
498 * specifically for attrstat and fh's, we may save even more space.
499 *
500 * Also note that a cachetype of RC_NOCACHE can legally be passed when
501 * nfsd failed to encode a reply that otherwise would have been cached.
502 * In this case, nfsd_cache_update is called with statp == NULL.
503 */
nfsd_cache_update(struct svc_rqst * rqstp,int cachetype,__be32 * statp)504 void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
505 {
506 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
507 struct svc_cacherep *rp = rqstp->rq_cacherep;
508 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
509 u32 hash;
510 struct nfsd_drc_bucket *b;
511 int len;
512 size_t bufsize = 0;
513
514 if (!rp)
515 return;
516
517 hash = nfsd_cache_hash(rp->c_key.k_xid, nn);
518 b = &nn->drc_hashtbl[hash];
519
520 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
521 len >>= 2;
522
523 /* Don't cache excessive amounts of data and XDR failures */
524 if (!statp || len > (256 >> 2)) {
525 nfsd_reply_cache_free(b, rp, nn);
526 return;
527 }
528
529 switch (cachetype) {
530 case RC_REPLSTAT:
531 if (len != 1)
532 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
533 rp->c_replstat = *statp;
534 break;
535 case RC_REPLBUFF:
536 cachv = &rp->c_replvec;
537 bufsize = len << 2;
538 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
539 if (!cachv->iov_base) {
540 nfsd_reply_cache_free(b, rp, nn);
541 return;
542 }
543 cachv->iov_len = bufsize;
544 memcpy(cachv->iov_base, statp, bufsize);
545 break;
546 case RC_NOCACHE:
547 nfsd_reply_cache_free(b, rp, nn);
548 return;
549 }
550 spin_lock(&b->cache_lock);
551 nn->drc_mem_usage += bufsize;
552 lru_put_end(b, rp);
553 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
554 rp->c_type = cachetype;
555 rp->c_state = RC_DONE;
556 spin_unlock(&b->cache_lock);
557 return;
558 }
559
560 /*
561 * Copy cached reply to current reply buffer. Should always fit.
562 * FIXME as reply is in a page, we should just attach the page, and
563 * keep a refcount....
564 */
565 static int
nfsd_cache_append(struct svc_rqst * rqstp,struct kvec * data)566 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
567 {
568 struct kvec *vec = &rqstp->rq_res.head[0];
569
570 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
571 printk(KERN_WARNING "nfsd: cached reply too large (%zd).\n",
572 data->iov_len);
573 return 0;
574 }
575 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
576 vec->iov_len += data->iov_len;
577 return 1;
578 }
579
580 /*
581 * Note that fields may be added, removed or reordered in the future. Programs
582 * scraping this file for info should test the labels to ensure they're
583 * getting the correct field.
584 */
nfsd_reply_cache_stats_show(struct seq_file * m,void * v)585 static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
586 {
587 struct nfsd_net *nn = m->private;
588
589 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
590 seq_printf(m, "num entries: %u\n",
591 atomic_read(&nn->num_drc_entries));
592 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
593 seq_printf(m, "mem usage: %u\n", nn->drc_mem_usage);
594 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
595 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
596 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
597 seq_printf(m, "payload misses: %u\n", nn->payload_misses);
598 seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
599 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
600 return 0;
601 }
602
nfsd_reply_cache_stats_open(struct inode * inode,struct file * file)603 int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
604 {
605 struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info,
606 nfsd_net_id);
607
608 return single_open(file, nfsd_reply_cache_stats_show, nn);
609 }
610