• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Request reply cache. This is currently a global cache, but this may
3  * change in the future and be a per-client cache.
4  *
5  * This code is heavily inspired by the 44BSD implementation, although
6  * it does things a bit differently.
7  *
8  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9  */
10 
11 #include <linux/slab.h>
12 #include <linux/sunrpc/addr.h>
13 #include <linux/highmem.h>
14 #include <linux/log2.h>
15 #include <linux/hash.h>
16 #include <net/checksum.h>
17 
18 #include "nfsd.h"
19 #include "cache.h"
20 
21 #define NFSDDBG_FACILITY	NFSDDBG_REPCACHE
22 
23 /*
24  * We use this value to determine the number of hash buckets from the max
25  * cache size, the idea being that when the cache is at its maximum number
26  * of entries, then this should be the average number of entries per bucket.
27  */
28 #define TARGET_BUCKET_SIZE	64
29 
30 struct nfsd_drc_bucket {
31 	struct list_head lru_head;
32 	spinlock_t cache_lock;
33 };
34 
35 static struct nfsd_drc_bucket	*drc_hashtbl;
36 static struct kmem_cache	*drc_slab;
37 
38 /* max number of entries allowed in the cache */
39 static unsigned int		max_drc_entries;
40 
41 /* number of significant bits in the hash value */
42 static unsigned int		maskbits;
43 static unsigned int		drc_hashsize;
44 
45 /*
46  * Stats and other tracking of on the duplicate reply cache. All of these and
47  * the "rc" fields in nfsdstats are protected by the cache_lock
48  */
49 
50 /* total number of entries */
51 static atomic_t			num_drc_entries;
52 
53 /* cache misses due only to checksum comparison failures */
54 static unsigned int		payload_misses;
55 
56 /* amount of memory (in bytes) currently consumed by the DRC */
57 static unsigned int		drc_mem_usage;
58 
59 /* longest hash chain seen */
60 static unsigned int		longest_chain;
61 
62 /* size of cache when we saw the longest hash chain */
63 static unsigned int		longest_chain_cachesize;
64 
65 static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
66 static void	cache_cleaner_func(struct work_struct *unused);
67 static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
68 					    struct shrink_control *sc);
69 static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
70 					   struct shrink_control *sc);
71 
72 static struct shrinker nfsd_reply_cache_shrinker = {
73 	.scan_objects = nfsd_reply_cache_scan,
74 	.count_objects = nfsd_reply_cache_count,
75 	.seeks	= 1,
76 };
77 
78 /*
79  * locking for the reply cache:
80  * A cache entry is "single use" if c_state == RC_INPROG
81  * Otherwise, it when accessing _prev or _next, the lock must be held.
82  */
83 static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
84 
85 /*
86  * Put a cap on the size of the DRC based on the amount of available
87  * low memory in the machine.
88  *
89  *  64MB:    8192
90  * 128MB:   11585
91  * 256MB:   16384
92  * 512MB:   23170
93  *   1GB:   32768
94  *   2GB:   46340
95  *   4GB:   65536
96  *   8GB:   92681
97  *  16GB:  131072
98  *
99  * ...with a hard cap of 256k entries. In the worst case, each entry will be
100  * ~1k, so the above numbers should give a rough max of the amount of memory
101  * used in k.
102  */
103 static unsigned int
nfsd_cache_size_limit(void)104 nfsd_cache_size_limit(void)
105 {
106 	unsigned int limit;
107 	unsigned long low_pages = totalram_pages - totalhigh_pages;
108 
109 	limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
110 	return min_t(unsigned int, limit, 256*1024);
111 }
112 
113 /*
114  * Compute the number of hash buckets we need. Divide the max cachesize by
115  * the "target" max bucket size, and round up to next power of two.
116  */
117 static unsigned int
nfsd_hashsize(unsigned int limit)118 nfsd_hashsize(unsigned int limit)
119 {
120 	return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
121 }
122 
123 static u32
nfsd_cache_hash(__be32 xid)124 nfsd_cache_hash(__be32 xid)
125 {
126 	return hash_32(be32_to_cpu(xid), maskbits);
127 }
128 
129 static struct svc_cacherep *
nfsd_reply_cache_alloc(void)130 nfsd_reply_cache_alloc(void)
131 {
132 	struct svc_cacherep	*rp;
133 
134 	rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
135 	if (rp) {
136 		rp->c_state = RC_UNUSED;
137 		rp->c_type = RC_NOCACHE;
138 		INIT_LIST_HEAD(&rp->c_lru);
139 	}
140 	return rp;
141 }
142 
143 static void
nfsd_reply_cache_free_locked(struct svc_cacherep * rp)144 nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
145 {
146 	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
147 		drc_mem_usage -= rp->c_replvec.iov_len;
148 		kfree(rp->c_replvec.iov_base);
149 	}
150 	list_del(&rp->c_lru);
151 	atomic_dec(&num_drc_entries);
152 	drc_mem_usage -= sizeof(*rp);
153 	kmem_cache_free(drc_slab, rp);
154 }
155 
156 static void
nfsd_reply_cache_free(struct nfsd_drc_bucket * b,struct svc_cacherep * rp)157 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
158 {
159 	spin_lock(&b->cache_lock);
160 	nfsd_reply_cache_free_locked(rp);
161 	spin_unlock(&b->cache_lock);
162 }
163 
nfsd_reply_cache_init(void)164 int nfsd_reply_cache_init(void)
165 {
166 	unsigned int hashsize;
167 	unsigned int i;
168 
169 	max_drc_entries = nfsd_cache_size_limit();
170 	atomic_set(&num_drc_entries, 0);
171 	hashsize = nfsd_hashsize(max_drc_entries);
172 	maskbits = ilog2(hashsize);
173 
174 	register_shrinker(&nfsd_reply_cache_shrinker);
175 	drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
176 					0, 0, NULL);
177 	if (!drc_slab)
178 		goto out_nomem;
179 
180 	drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL);
181 	if (!drc_hashtbl)
182 		goto out_nomem;
183 	for (i = 0; i < hashsize; i++) {
184 		INIT_LIST_HEAD(&drc_hashtbl[i].lru_head);
185 		spin_lock_init(&drc_hashtbl[i].cache_lock);
186 	}
187 	drc_hashsize = hashsize;
188 
189 	return 0;
190 out_nomem:
191 	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
192 	nfsd_reply_cache_shutdown();
193 	return -ENOMEM;
194 }
195 
nfsd_reply_cache_shutdown(void)196 void nfsd_reply_cache_shutdown(void)
197 {
198 	struct svc_cacherep	*rp;
199 	unsigned int i;
200 
201 	unregister_shrinker(&nfsd_reply_cache_shrinker);
202 	cancel_delayed_work_sync(&cache_cleaner);
203 
204 	for (i = 0; i < drc_hashsize; i++) {
205 		struct list_head *head = &drc_hashtbl[i].lru_head;
206 		while (!list_empty(head)) {
207 			rp = list_first_entry(head, struct svc_cacherep, c_lru);
208 			nfsd_reply_cache_free_locked(rp);
209 		}
210 	}
211 
212 	kfree (drc_hashtbl);
213 	drc_hashtbl = NULL;
214 	drc_hashsize = 0;
215 
216 	if (drc_slab) {
217 		kmem_cache_destroy(drc_slab);
218 		drc_slab = NULL;
219 	}
220 }
221 
222 /*
223  * Move cache entry to end of LRU list, and queue the cleaner to run if it's
224  * not already scheduled.
225  */
226 static void
lru_put_end(struct nfsd_drc_bucket * b,struct svc_cacherep * rp)227 lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
228 {
229 	rp->c_timestamp = jiffies;
230 	list_move_tail(&rp->c_lru, &b->lru_head);
231 	schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
232 }
233 
234 static long
prune_bucket(struct nfsd_drc_bucket * b)235 prune_bucket(struct nfsd_drc_bucket *b)
236 {
237 	struct svc_cacherep *rp, *tmp;
238 	long freed = 0;
239 
240 	list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
241 		/*
242 		 * Don't free entries attached to calls that are still
243 		 * in-progress, but do keep scanning the list.
244 		 */
245 		if (rp->c_state == RC_INPROG)
246 			continue;
247 		if (atomic_read(&num_drc_entries) <= max_drc_entries &&
248 		    time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
249 			break;
250 		nfsd_reply_cache_free_locked(rp);
251 		freed++;
252 	}
253 	return freed;
254 }
255 
256 /*
257  * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
258  * Also prune the oldest ones when the total exceeds the max number of entries.
259  */
260 static long
prune_cache_entries(void)261 prune_cache_entries(void)
262 {
263 	unsigned int i;
264 	long freed = 0;
265 	bool cancel = true;
266 
267 	for (i = 0; i < drc_hashsize; i++) {
268 		struct nfsd_drc_bucket *b = &drc_hashtbl[i];
269 
270 		if (list_empty(&b->lru_head))
271 			continue;
272 		spin_lock(&b->cache_lock);
273 		freed += prune_bucket(b);
274 		if (!list_empty(&b->lru_head))
275 			cancel = false;
276 		spin_unlock(&b->cache_lock);
277 	}
278 
279 	/*
280 	 * Conditionally rearm the job to run in RC_EXPIRE since we just
281 	 * ran the pruner.
282 	 */
283 	if (!cancel)
284 		mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
285 	return freed;
286 }
287 
288 static void
cache_cleaner_func(struct work_struct * unused)289 cache_cleaner_func(struct work_struct *unused)
290 {
291 	prune_cache_entries();
292 }
293 
294 static unsigned long
nfsd_reply_cache_count(struct shrinker * shrink,struct shrink_control * sc)295 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
296 {
297 	return atomic_read(&num_drc_entries);
298 }
299 
300 static unsigned long
nfsd_reply_cache_scan(struct shrinker * shrink,struct shrink_control * sc)301 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
302 {
303 	return prune_cache_entries();
304 }
305 /*
306  * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
307  */
308 static __wsum
nfsd_cache_csum(struct svc_rqst * rqstp)309 nfsd_cache_csum(struct svc_rqst *rqstp)
310 {
311 	int idx;
312 	unsigned int base;
313 	__wsum csum;
314 	struct xdr_buf *buf = &rqstp->rq_arg;
315 	const unsigned char *p = buf->head[0].iov_base;
316 	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
317 				RC_CSUMLEN);
318 	size_t len = min(buf->head[0].iov_len, csum_len);
319 
320 	/* rq_arg.head first */
321 	csum = csum_partial(p, len, 0);
322 	csum_len -= len;
323 
324 	/* Continue into page array */
325 	idx = buf->page_base / PAGE_SIZE;
326 	base = buf->page_base & ~PAGE_MASK;
327 	while (csum_len) {
328 		p = page_address(buf->pages[idx]) + base;
329 		len = min_t(size_t, PAGE_SIZE - base, csum_len);
330 		csum = csum_partial(p, len, csum);
331 		csum_len -= len;
332 		base = 0;
333 		++idx;
334 	}
335 	return csum;
336 }
337 
338 static bool
nfsd_cache_match(struct svc_rqst * rqstp,__wsum csum,struct svc_cacherep * rp)339 nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
340 {
341 	/* Check RPC XID first */
342 	if (rqstp->rq_xid != rp->c_xid)
343 		return false;
344 	/* compare checksum of NFS data */
345 	if (csum != rp->c_csum) {
346 		++payload_misses;
347 		return false;
348 	}
349 
350 	/* Other discriminators */
351 	if (rqstp->rq_proc != rp->c_proc ||
352 	    rqstp->rq_prot != rp->c_prot ||
353 	    rqstp->rq_vers != rp->c_vers ||
354 	    rqstp->rq_arg.len != rp->c_len ||
355 	    !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
356 	    rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
357 		return false;
358 
359 	return true;
360 }
361 
362 /*
363  * Search the request hash for an entry that matches the given rqstp.
364  * Must be called with cache_lock held. Returns the found entry or
365  * NULL on failure.
366  */
367 static struct svc_cacherep *
nfsd_cache_search(struct nfsd_drc_bucket * b,struct svc_rqst * rqstp,__wsum csum)368 nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
369 		__wsum csum)
370 {
371 	struct svc_cacherep	*rp, *ret = NULL;
372 	struct list_head 	*rh = &b->lru_head;
373 	unsigned int		entries = 0;
374 
375 	list_for_each_entry(rp, rh, c_lru) {
376 		++entries;
377 		if (nfsd_cache_match(rqstp, csum, rp)) {
378 			ret = rp;
379 			break;
380 		}
381 	}
382 
383 	/* tally hash chain length stats */
384 	if (entries > longest_chain) {
385 		longest_chain = entries;
386 		longest_chain_cachesize = atomic_read(&num_drc_entries);
387 	} else if (entries == longest_chain) {
388 		/* prefer to keep the smallest cachesize possible here */
389 		longest_chain_cachesize = min_t(unsigned int,
390 				longest_chain_cachesize,
391 				atomic_read(&num_drc_entries));
392 	}
393 
394 	return ret;
395 }
396 
397 /*
398  * Try to find an entry matching the current call in the cache. When none
399  * is found, we try to grab the oldest expired entry off the LRU list. If
400  * a suitable one isn't there, then drop the cache_lock and allocate a
401  * new one, then search again in case one got inserted while this thread
402  * didn't hold the lock.
403  */
404 int
nfsd_cache_lookup(struct svc_rqst * rqstp)405 nfsd_cache_lookup(struct svc_rqst *rqstp)
406 {
407 	struct svc_cacherep	*rp, *found;
408 	__be32			xid = rqstp->rq_xid;
409 	u32			proto =  rqstp->rq_prot,
410 				vers = rqstp->rq_vers,
411 				proc = rqstp->rq_proc;
412 	__wsum			csum;
413 	u32 hash = nfsd_cache_hash(xid);
414 	struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
415 	unsigned long		age;
416 	int type = rqstp->rq_cachetype;
417 	int rtn = RC_DOIT;
418 
419 	rqstp->rq_cacherep = NULL;
420 	if (type == RC_NOCACHE) {
421 		nfsdstats.rcnocache++;
422 		return rtn;
423 	}
424 
425 	csum = nfsd_cache_csum(rqstp);
426 
427 	/*
428 	 * Since the common case is a cache miss followed by an insert,
429 	 * preallocate an entry.
430 	 */
431 	rp = nfsd_reply_cache_alloc();
432 	spin_lock(&b->cache_lock);
433 	if (likely(rp)) {
434 		atomic_inc(&num_drc_entries);
435 		drc_mem_usage += sizeof(*rp);
436 	}
437 
438 	/* go ahead and prune the cache */
439 	prune_bucket(b);
440 
441 	found = nfsd_cache_search(b, rqstp, csum);
442 	if (found) {
443 		if (likely(rp))
444 			nfsd_reply_cache_free_locked(rp);
445 		rp = found;
446 		goto found_entry;
447 	}
448 
449 	if (!rp) {
450 		dprintk("nfsd: unable to allocate DRC entry!\n");
451 		goto out;
452 	}
453 
454 	nfsdstats.rcmisses++;
455 	rqstp->rq_cacherep = rp;
456 	rp->c_state = RC_INPROG;
457 	rp->c_xid = xid;
458 	rp->c_proc = proc;
459 	rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
460 	rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
461 	rp->c_prot = proto;
462 	rp->c_vers = vers;
463 	rp->c_len = rqstp->rq_arg.len;
464 	rp->c_csum = csum;
465 
466 	lru_put_end(b, rp);
467 
468 	/* release any buffer */
469 	if (rp->c_type == RC_REPLBUFF) {
470 		drc_mem_usage -= rp->c_replvec.iov_len;
471 		kfree(rp->c_replvec.iov_base);
472 		rp->c_replvec.iov_base = NULL;
473 	}
474 	rp->c_type = RC_NOCACHE;
475  out:
476 	spin_unlock(&b->cache_lock);
477 	return rtn;
478 
479 found_entry:
480 	nfsdstats.rchits++;
481 	/* We found a matching entry which is either in progress or done. */
482 	age = jiffies - rp->c_timestamp;
483 	lru_put_end(b, rp);
484 
485 	rtn = RC_DROPIT;
486 	/* Request being processed or excessive rexmits */
487 	if (rp->c_state == RC_INPROG || age < RC_DELAY)
488 		goto out;
489 
490 	/* From the hall of fame of impractical attacks:
491 	 * Is this a user who tries to snoop on the cache? */
492 	rtn = RC_DOIT;
493 	if (!rqstp->rq_secure && rp->c_secure)
494 		goto out;
495 
496 	/* Compose RPC reply header */
497 	switch (rp->c_type) {
498 	case RC_NOCACHE:
499 		break;
500 	case RC_REPLSTAT:
501 		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
502 		rtn = RC_REPLY;
503 		break;
504 	case RC_REPLBUFF:
505 		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
506 			goto out;	/* should not happen */
507 		rtn = RC_REPLY;
508 		break;
509 	default:
510 		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
511 		nfsd_reply_cache_free_locked(rp);
512 	}
513 
514 	goto out;
515 }
516 
517 /*
518  * Update a cache entry. This is called from nfsd_dispatch when
519  * the procedure has been executed and the complete reply is in
520  * rqstp->rq_res.
521  *
522  * We're copying around data here rather than swapping buffers because
523  * the toplevel loop requires max-sized buffers, which would be a waste
524  * of memory for a cache with a max reply size of 100 bytes (diropokres).
525  *
526  * If we should start to use different types of cache entries tailored
527  * specifically for attrstat and fh's, we may save even more space.
528  *
529  * Also note that a cachetype of RC_NOCACHE can legally be passed when
530  * nfsd failed to encode a reply that otherwise would have been cached.
531  * In this case, nfsd_cache_update is called with statp == NULL.
532  */
533 void
nfsd_cache_update(struct svc_rqst * rqstp,int cachetype,__be32 * statp)534 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
535 {
536 	struct svc_cacherep *rp = rqstp->rq_cacherep;
537 	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
538 	u32		hash;
539 	struct nfsd_drc_bucket *b;
540 	int		len;
541 	size_t		bufsize = 0;
542 
543 	if (!rp)
544 		return;
545 
546 	hash = nfsd_cache_hash(rp->c_xid);
547 	b = &drc_hashtbl[hash];
548 
549 	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
550 	len >>= 2;
551 
552 	/* Don't cache excessive amounts of data and XDR failures */
553 	if (!statp || len > (256 >> 2)) {
554 		nfsd_reply_cache_free(b, rp);
555 		return;
556 	}
557 
558 	switch (cachetype) {
559 	case RC_REPLSTAT:
560 		if (len != 1)
561 			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
562 		rp->c_replstat = *statp;
563 		break;
564 	case RC_REPLBUFF:
565 		cachv = &rp->c_replvec;
566 		bufsize = len << 2;
567 		cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
568 		if (!cachv->iov_base) {
569 			nfsd_reply_cache_free(b, rp);
570 			return;
571 		}
572 		cachv->iov_len = bufsize;
573 		memcpy(cachv->iov_base, statp, bufsize);
574 		break;
575 	case RC_NOCACHE:
576 		nfsd_reply_cache_free(b, rp);
577 		return;
578 	}
579 	spin_lock(&b->cache_lock);
580 	drc_mem_usage += bufsize;
581 	lru_put_end(b, rp);
582 	rp->c_secure = rqstp->rq_secure;
583 	rp->c_type = cachetype;
584 	rp->c_state = RC_DONE;
585 	spin_unlock(&b->cache_lock);
586 	return;
587 }
588 
589 /*
590  * Copy cached reply to current reply buffer. Should always fit.
591  * FIXME as reply is in a page, we should just attach the page, and
592  * keep a refcount....
593  */
594 static int
nfsd_cache_append(struct svc_rqst * rqstp,struct kvec * data)595 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
596 {
597 	struct kvec	*vec = &rqstp->rq_res.head[0];
598 
599 	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
600 		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
601 				data->iov_len);
602 		return 0;
603 	}
604 	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
605 	vec->iov_len += data->iov_len;
606 	return 1;
607 }
608 
609 /*
610  * Note that fields may be added, removed or reordered in the future. Programs
611  * scraping this file for info should test the labels to ensure they're
612  * getting the correct field.
613  */
nfsd_reply_cache_stats_show(struct seq_file * m,void * v)614 static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
615 {
616 	seq_printf(m, "max entries:           %u\n", max_drc_entries);
617 	seq_printf(m, "num entries:           %u\n",
618 			atomic_read(&num_drc_entries));
619 	seq_printf(m, "hash buckets:          %u\n", 1 << maskbits);
620 	seq_printf(m, "mem usage:             %u\n", drc_mem_usage);
621 	seq_printf(m, "cache hits:            %u\n", nfsdstats.rchits);
622 	seq_printf(m, "cache misses:          %u\n", nfsdstats.rcmisses);
623 	seq_printf(m, "not cached:            %u\n", nfsdstats.rcnocache);
624 	seq_printf(m, "payload misses:        %u\n", payload_misses);
625 	seq_printf(m, "longest chain len:     %u\n", longest_chain);
626 	seq_printf(m, "cachesize at longest:  %u\n", longest_chain_cachesize);
627 	return 0;
628 }
629 
nfsd_reply_cache_stats_open(struct inode * inode,struct file * file)630 int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
631 {
632 	return single_open(file, nfsd_reply_cache_stats_show, NULL);
633 }
634