1 /*
2 * net/sunrpc/cache.c
3 *
4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
6 *
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8 *
9 * Released under terms in GPL version 2. See COPYING.
10 *
11 */
12
13 #include <linux/types.h>
14 #include <linux/fs.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/string_helpers.h>
24 #include <asm/uaccess.h>
25 #include <linux/poll.h>
26 #include <linux/seq_file.h>
27 #include <linux/proc_fs.h>
28 #include <linux/net.h>
29 #include <linux/workqueue.h>
30 #include <linux/mutex.h>
31 #include <linux/pagemap.h>
32 #include <asm/ioctls.h>
33 #include <linux/sunrpc/types.h>
34 #include <linux/sunrpc/cache.h>
35 #include <linux/sunrpc/stats.h>
36 #include <linux/sunrpc/rpc_pipe_fs.h>
37 #include "netns.h"
38
39 #define RPCDBG_FACILITY RPCDBG_CACHE
40
41 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
42 static void cache_revisit_request(struct cache_head *item);
43
cache_init(struct cache_head * h,struct cache_detail * detail)44 static void cache_init(struct cache_head *h, struct cache_detail *detail)
45 {
46 time_t now = seconds_since_boot();
47 INIT_HLIST_NODE(&h->cache_list);
48 h->flags = 0;
49 kref_init(&h->ref);
50 h->expiry_time = now + CACHE_NEW_EXPIRY;
51 if (now <= detail->flush_time)
52 /* ensure it isn't already expired */
53 now = detail->flush_time + 1;
54 h->last_refresh = now;
55 }
56
57 static void cache_fresh_unlocked(struct cache_head *head,
58 struct cache_detail *detail);
59
sunrpc_cache_lookup(struct cache_detail * detail,struct cache_head * key,int hash)60 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
61 struct cache_head *key, int hash)
62 {
63 struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL;
64 struct hlist_head *head;
65
66 head = &detail->hash_table[hash];
67
68 read_lock(&detail->hash_lock);
69
70 hlist_for_each_entry(tmp, head, cache_list) {
71 if (detail->match(tmp, key)) {
72 if (cache_is_expired(detail, tmp))
73 /* This entry is expired, we will discard it. */
74 break;
75 cache_get(tmp);
76 read_unlock(&detail->hash_lock);
77 return tmp;
78 }
79 }
80 read_unlock(&detail->hash_lock);
81 /* Didn't find anything, insert an empty entry */
82
83 new = detail->alloc();
84 if (!new)
85 return NULL;
86 /* must fully initialise 'new', else
87 * we might get lose if we need to
88 * cache_put it soon.
89 */
90 cache_init(new, detail);
91 detail->init(new, key);
92
93 write_lock(&detail->hash_lock);
94
95 /* check if entry appeared while we slept */
96 hlist_for_each_entry(tmp, head, cache_list) {
97 if (detail->match(tmp, key)) {
98 if (cache_is_expired(detail, tmp)) {
99 hlist_del_init(&tmp->cache_list);
100 detail->entries --;
101 freeme = tmp;
102 break;
103 }
104 cache_get(tmp);
105 write_unlock(&detail->hash_lock);
106 cache_put(new, detail);
107 return tmp;
108 }
109 }
110
111 hlist_add_head(&new->cache_list, head);
112 detail->entries++;
113 cache_get(new);
114 write_unlock(&detail->hash_lock);
115
116 if (freeme) {
117 cache_fresh_unlocked(freeme, detail);
118 cache_put(freeme, detail);
119 }
120 return new;
121 }
122 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
123
124
125 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
126
cache_fresh_locked(struct cache_head * head,time_t expiry,struct cache_detail * detail)127 static void cache_fresh_locked(struct cache_head *head, time_t expiry,
128 struct cache_detail *detail)
129 {
130 time_t now = seconds_since_boot();
131 if (now <= detail->flush_time)
132 /* ensure it isn't immediately treated as expired */
133 now = detail->flush_time + 1;
134 head->expiry_time = expiry;
135 head->last_refresh = now;
136 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
137 set_bit(CACHE_VALID, &head->flags);
138 }
139
cache_fresh_unlocked(struct cache_head * head,struct cache_detail * detail)140 static void cache_fresh_unlocked(struct cache_head *head,
141 struct cache_detail *detail)
142 {
143 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
144 cache_revisit_request(head);
145 cache_dequeue(detail, head);
146 }
147 }
148
sunrpc_cache_update(struct cache_detail * detail,struct cache_head * new,struct cache_head * old,int hash)149 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
150 struct cache_head *new, struct cache_head *old, int hash)
151 {
152 /* The 'old' entry is to be replaced by 'new'.
153 * If 'old' is not VALID, we update it directly,
154 * otherwise we need to replace it
155 */
156 struct cache_head *tmp;
157
158 if (!test_bit(CACHE_VALID, &old->flags)) {
159 write_lock(&detail->hash_lock);
160 if (!test_bit(CACHE_VALID, &old->flags)) {
161 if (test_bit(CACHE_NEGATIVE, &new->flags))
162 set_bit(CACHE_NEGATIVE, &old->flags);
163 else
164 detail->update(old, new);
165 cache_fresh_locked(old, new->expiry_time, detail);
166 write_unlock(&detail->hash_lock);
167 cache_fresh_unlocked(old, detail);
168 return old;
169 }
170 write_unlock(&detail->hash_lock);
171 }
172 /* We need to insert a new entry */
173 tmp = detail->alloc();
174 if (!tmp) {
175 cache_put(old, detail);
176 return NULL;
177 }
178 cache_init(tmp, detail);
179 detail->init(tmp, old);
180
181 write_lock(&detail->hash_lock);
182 if (test_bit(CACHE_NEGATIVE, &new->flags))
183 set_bit(CACHE_NEGATIVE, &tmp->flags);
184 else
185 detail->update(tmp, new);
186 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
187 detail->entries++;
188 cache_get(tmp);
189 cache_fresh_locked(tmp, new->expiry_time, detail);
190 cache_fresh_locked(old, 0, detail);
191 write_unlock(&detail->hash_lock);
192 cache_fresh_unlocked(tmp, detail);
193 cache_fresh_unlocked(old, detail);
194 cache_put(old, detail);
195 return tmp;
196 }
197 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
198
cache_make_upcall(struct cache_detail * cd,struct cache_head * h)199 static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
200 {
201 if (cd->cache_upcall)
202 return cd->cache_upcall(cd, h);
203 return sunrpc_cache_pipe_upcall(cd, h);
204 }
205
cache_is_valid(struct cache_head * h)206 static inline int cache_is_valid(struct cache_head *h)
207 {
208 if (!test_bit(CACHE_VALID, &h->flags))
209 return -EAGAIN;
210 else {
211 /* entry is valid */
212 if (test_bit(CACHE_NEGATIVE, &h->flags))
213 return -ENOENT;
214 else {
215 /*
216 * In combination with write barrier in
217 * sunrpc_cache_update, ensures that anyone
218 * using the cache entry after this sees the
219 * updated contents:
220 */
221 smp_rmb();
222 return 0;
223 }
224 }
225 }
226
try_to_negate_entry(struct cache_detail * detail,struct cache_head * h)227 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
228 {
229 int rv;
230
231 write_lock(&detail->hash_lock);
232 rv = cache_is_valid(h);
233 if (rv == -EAGAIN) {
234 set_bit(CACHE_NEGATIVE, &h->flags);
235 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
236 detail);
237 rv = -ENOENT;
238 }
239 write_unlock(&detail->hash_lock);
240 cache_fresh_unlocked(h, detail);
241 return rv;
242 }
243
244 /*
245 * This is the generic cache management routine for all
246 * the authentication caches.
247 * It checks the currency of a cache item and will (later)
248 * initiate an upcall to fill it if needed.
249 *
250 *
251 * Returns 0 if the cache_head can be used, or cache_puts it and returns
252 * -EAGAIN if upcall is pending and request has been queued
253 * -ETIMEDOUT if upcall failed or request could not be queue or
254 * upcall completed but item is still invalid (implying that
255 * the cache item has been replaced with a newer one).
256 * -ENOENT if cache entry was negative
257 */
cache_check(struct cache_detail * detail,struct cache_head * h,struct cache_req * rqstp)258 int cache_check(struct cache_detail *detail,
259 struct cache_head *h, struct cache_req *rqstp)
260 {
261 int rv;
262 long refresh_age, age;
263
264 /* First decide return status as best we can */
265 rv = cache_is_valid(h);
266
267 /* now see if we want to start an upcall */
268 refresh_age = (h->expiry_time - h->last_refresh);
269 age = seconds_since_boot() - h->last_refresh;
270
271 if (rqstp == NULL) {
272 if (rv == -EAGAIN)
273 rv = -ENOENT;
274 } else if (rv == -EAGAIN ||
275 (h->expiry_time != 0 && age > refresh_age/2)) {
276 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
277 refresh_age, age);
278 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
279 switch (cache_make_upcall(detail, h)) {
280 case -EINVAL:
281 rv = try_to_negate_entry(detail, h);
282 break;
283 case -EAGAIN:
284 cache_fresh_unlocked(h, detail);
285 break;
286 }
287 }
288 }
289
290 if (rv == -EAGAIN) {
291 if (!cache_defer_req(rqstp, h)) {
292 /*
293 * Request was not deferred; handle it as best
294 * we can ourselves:
295 */
296 rv = cache_is_valid(h);
297 if (rv == -EAGAIN)
298 rv = -ETIMEDOUT;
299 }
300 }
301 if (rv)
302 cache_put(h, detail);
303 return rv;
304 }
305 EXPORT_SYMBOL_GPL(cache_check);
306
307 /*
308 * caches need to be periodically cleaned.
309 * For this we maintain a list of cache_detail and
310 * a current pointer into that list and into the table
311 * for that entry.
312 *
313 * Each time cache_clean is called it finds the next non-empty entry
314 * in the current table and walks the list in that entry
315 * looking for entries that can be removed.
316 *
317 * An entry gets removed if:
318 * - The expiry is before current time
319 * - The last_refresh time is before the flush_time for that cache
320 *
321 * later we might drop old entries with non-NEVER expiry if that table
322 * is getting 'full' for some definition of 'full'
323 *
324 * The question of "how often to scan a table" is an interesting one
325 * and is answered in part by the use of the "nextcheck" field in the
326 * cache_detail.
327 * When a scan of a table begins, the nextcheck field is set to a time
328 * that is well into the future.
329 * While scanning, if an expiry time is found that is earlier than the
330 * current nextcheck time, nextcheck is set to that expiry time.
331 * If the flush_time is ever set to a time earlier than the nextcheck
332 * time, the nextcheck time is then set to that flush_time.
333 *
334 * A table is then only scanned if the current time is at least
335 * the nextcheck time.
336 *
337 */
338
339 static LIST_HEAD(cache_list);
340 static DEFINE_SPINLOCK(cache_list_lock);
341 static struct cache_detail *current_detail;
342 static int current_index;
343
344 static void do_cache_clean(struct work_struct *work);
345 static struct delayed_work cache_cleaner;
346
sunrpc_init_cache_detail(struct cache_detail * cd)347 void sunrpc_init_cache_detail(struct cache_detail *cd)
348 {
349 rwlock_init(&cd->hash_lock);
350 INIT_LIST_HEAD(&cd->queue);
351 spin_lock(&cache_list_lock);
352 cd->nextcheck = 0;
353 cd->entries = 0;
354 atomic_set(&cd->readers, 0);
355 cd->last_close = 0;
356 cd->last_warn = -1;
357 list_add(&cd->others, &cache_list);
358 spin_unlock(&cache_list_lock);
359
360 /* start the cleaning process */
361 schedule_delayed_work(&cache_cleaner, 0);
362 }
363 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
364
sunrpc_destroy_cache_detail(struct cache_detail * cd)365 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
366 {
367 cache_purge(cd);
368 spin_lock(&cache_list_lock);
369 write_lock(&cd->hash_lock);
370 if (cd->entries || atomic_read(&cd->inuse)) {
371 write_unlock(&cd->hash_lock);
372 spin_unlock(&cache_list_lock);
373 goto out;
374 }
375 if (current_detail == cd)
376 current_detail = NULL;
377 list_del_init(&cd->others);
378 write_unlock(&cd->hash_lock);
379 spin_unlock(&cache_list_lock);
380 if (list_empty(&cache_list)) {
381 /* module must be being unloaded so its safe to kill the worker */
382 cancel_delayed_work_sync(&cache_cleaner);
383 }
384 return;
385 out:
386 printk(KERN_ERR "RPC: failed to unregister %s cache\n", cd->name);
387 }
388 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
389
390 /* clean cache tries to find something to clean
391 * and cleans it.
392 * It returns 1 if it cleaned something,
393 * 0 if it didn't find anything this time
394 * -1 if it fell off the end of the list.
395 */
cache_clean(void)396 static int cache_clean(void)
397 {
398 int rv = 0;
399 struct list_head *next;
400
401 spin_lock(&cache_list_lock);
402
403 /* find a suitable table if we don't already have one */
404 while (current_detail == NULL ||
405 current_index >= current_detail->hash_size) {
406 if (current_detail)
407 next = current_detail->others.next;
408 else
409 next = cache_list.next;
410 if (next == &cache_list) {
411 current_detail = NULL;
412 spin_unlock(&cache_list_lock);
413 return -1;
414 }
415 current_detail = list_entry(next, struct cache_detail, others);
416 if (current_detail->nextcheck > seconds_since_boot())
417 current_index = current_detail->hash_size;
418 else {
419 current_index = 0;
420 current_detail->nextcheck = seconds_since_boot()+30*60;
421 }
422 }
423
424 /* find a non-empty bucket in the table */
425 while (current_detail &&
426 current_index < current_detail->hash_size &&
427 hlist_empty(¤t_detail->hash_table[current_index]))
428 current_index++;
429
430 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
431
432 if (current_detail && current_index < current_detail->hash_size) {
433 struct cache_head *ch = NULL;
434 struct cache_detail *d;
435 struct hlist_head *head;
436 struct hlist_node *tmp;
437
438 write_lock(¤t_detail->hash_lock);
439
440 /* Ok, now to clean this strand */
441
442 head = ¤t_detail->hash_table[current_index];
443 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
444 if (current_detail->nextcheck > ch->expiry_time)
445 current_detail->nextcheck = ch->expiry_time+1;
446 if (!cache_is_expired(current_detail, ch))
447 continue;
448
449 hlist_del_init(&ch->cache_list);
450 current_detail->entries--;
451 rv = 1;
452 break;
453 }
454
455 write_unlock(¤t_detail->hash_lock);
456 d = current_detail;
457 if (!ch)
458 current_index ++;
459 spin_unlock(&cache_list_lock);
460 if (ch) {
461 set_bit(CACHE_CLEANED, &ch->flags);
462 cache_fresh_unlocked(ch, d);
463 cache_put(ch, d);
464 }
465 } else
466 spin_unlock(&cache_list_lock);
467
468 return rv;
469 }
470
471 /*
472 * We want to regularly clean the cache, so we need to schedule some work ...
473 */
do_cache_clean(struct work_struct * work)474 static void do_cache_clean(struct work_struct *work)
475 {
476 int delay = 5;
477 if (cache_clean() == -1)
478 delay = round_jiffies_relative(30*HZ);
479
480 if (list_empty(&cache_list))
481 delay = 0;
482
483 if (delay)
484 schedule_delayed_work(&cache_cleaner, delay);
485 }
486
487
488 /*
489 * Clean all caches promptly. This just calls cache_clean
490 * repeatedly until we are sure that every cache has had a chance to
491 * be fully cleaned
492 */
cache_flush(void)493 void cache_flush(void)
494 {
495 while (cache_clean() != -1)
496 cond_resched();
497 while (cache_clean() != -1)
498 cond_resched();
499 }
500 EXPORT_SYMBOL_GPL(cache_flush);
501
cache_purge(struct cache_detail * detail)502 void cache_purge(struct cache_detail *detail)
503 {
504 time_t now = seconds_since_boot();
505 if (detail->flush_time >= now)
506 now = detail->flush_time + 1;
507 /* 'now' is the maximum value any 'last_refresh' can have */
508 detail->flush_time = now;
509 detail->nextcheck = seconds_since_boot();
510 cache_flush();
511 }
512 EXPORT_SYMBOL_GPL(cache_purge);
513
514
515 /*
516 * Deferral and Revisiting of Requests.
517 *
518 * If a cache lookup finds a pending entry, we
519 * need to defer the request and revisit it later.
520 * All deferred requests are stored in a hash table,
521 * indexed by "struct cache_head *".
522 * As it may be wasteful to store a whole request
523 * structure, we allow the request to provide a
524 * deferred form, which must contain a
525 * 'struct cache_deferred_req'
526 * This cache_deferred_req contains a method to allow
527 * it to be revisited when cache info is available
528 */
529
530 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
531 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
532
533 #define DFR_MAX 300 /* ??? */
534
535 static DEFINE_SPINLOCK(cache_defer_lock);
536 static LIST_HEAD(cache_defer_list);
537 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
538 static int cache_defer_cnt;
539
__unhash_deferred_req(struct cache_deferred_req * dreq)540 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
541 {
542 hlist_del_init(&dreq->hash);
543 if (!list_empty(&dreq->recent)) {
544 list_del_init(&dreq->recent);
545 cache_defer_cnt--;
546 }
547 }
548
__hash_deferred_req(struct cache_deferred_req * dreq,struct cache_head * item)549 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
550 {
551 int hash = DFR_HASH(item);
552
553 INIT_LIST_HEAD(&dreq->recent);
554 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
555 }
556
setup_deferral(struct cache_deferred_req * dreq,struct cache_head * item,int count_me)557 static void setup_deferral(struct cache_deferred_req *dreq,
558 struct cache_head *item,
559 int count_me)
560 {
561
562 dreq->item = item;
563
564 spin_lock(&cache_defer_lock);
565
566 __hash_deferred_req(dreq, item);
567
568 if (count_me) {
569 cache_defer_cnt++;
570 list_add(&dreq->recent, &cache_defer_list);
571 }
572
573 spin_unlock(&cache_defer_lock);
574
575 }
576
577 struct thread_deferred_req {
578 struct cache_deferred_req handle;
579 struct completion completion;
580 };
581
cache_restart_thread(struct cache_deferred_req * dreq,int too_many)582 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
583 {
584 struct thread_deferred_req *dr =
585 container_of(dreq, struct thread_deferred_req, handle);
586 complete(&dr->completion);
587 }
588
cache_wait_req(struct cache_req * req,struct cache_head * item)589 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
590 {
591 struct thread_deferred_req sleeper;
592 struct cache_deferred_req *dreq = &sleeper.handle;
593
594 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
595 dreq->revisit = cache_restart_thread;
596
597 setup_deferral(dreq, item, 0);
598
599 if (!test_bit(CACHE_PENDING, &item->flags) ||
600 wait_for_completion_interruptible_timeout(
601 &sleeper.completion, req->thread_wait) <= 0) {
602 /* The completion wasn't completed, so we need
603 * to clean up
604 */
605 spin_lock(&cache_defer_lock);
606 if (!hlist_unhashed(&sleeper.handle.hash)) {
607 __unhash_deferred_req(&sleeper.handle);
608 spin_unlock(&cache_defer_lock);
609 } else {
610 /* cache_revisit_request already removed
611 * this from the hash table, but hasn't
612 * called ->revisit yet. It will very soon
613 * and we need to wait for it.
614 */
615 spin_unlock(&cache_defer_lock);
616 wait_for_completion(&sleeper.completion);
617 }
618 }
619 }
620
cache_limit_defers(void)621 static void cache_limit_defers(void)
622 {
623 /* Make sure we haven't exceed the limit of allowed deferred
624 * requests.
625 */
626 struct cache_deferred_req *discard = NULL;
627
628 if (cache_defer_cnt <= DFR_MAX)
629 return;
630
631 spin_lock(&cache_defer_lock);
632
633 /* Consider removing either the first or the last */
634 if (cache_defer_cnt > DFR_MAX) {
635 if (prandom_u32() & 1)
636 discard = list_entry(cache_defer_list.next,
637 struct cache_deferred_req, recent);
638 else
639 discard = list_entry(cache_defer_list.prev,
640 struct cache_deferred_req, recent);
641 __unhash_deferred_req(discard);
642 }
643 spin_unlock(&cache_defer_lock);
644 if (discard)
645 discard->revisit(discard, 1);
646 }
647
648 /* Return true if and only if a deferred request is queued. */
cache_defer_req(struct cache_req * req,struct cache_head * item)649 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
650 {
651 struct cache_deferred_req *dreq;
652
653 if (req->thread_wait) {
654 cache_wait_req(req, item);
655 if (!test_bit(CACHE_PENDING, &item->flags))
656 return false;
657 }
658 dreq = req->defer(req);
659 if (dreq == NULL)
660 return false;
661 setup_deferral(dreq, item, 1);
662 if (!test_bit(CACHE_PENDING, &item->flags))
663 /* Bit could have been cleared before we managed to
664 * set up the deferral, so need to revisit just in case
665 */
666 cache_revisit_request(item);
667
668 cache_limit_defers();
669 return true;
670 }
671
cache_revisit_request(struct cache_head * item)672 static void cache_revisit_request(struct cache_head *item)
673 {
674 struct cache_deferred_req *dreq;
675 struct list_head pending;
676 struct hlist_node *tmp;
677 int hash = DFR_HASH(item);
678
679 INIT_LIST_HEAD(&pending);
680 spin_lock(&cache_defer_lock);
681
682 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
683 if (dreq->item == item) {
684 __unhash_deferred_req(dreq);
685 list_add(&dreq->recent, &pending);
686 }
687
688 spin_unlock(&cache_defer_lock);
689
690 while (!list_empty(&pending)) {
691 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
692 list_del_init(&dreq->recent);
693 dreq->revisit(dreq, 0);
694 }
695 }
696
cache_clean_deferred(void * owner)697 void cache_clean_deferred(void *owner)
698 {
699 struct cache_deferred_req *dreq, *tmp;
700 struct list_head pending;
701
702
703 INIT_LIST_HEAD(&pending);
704 spin_lock(&cache_defer_lock);
705
706 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
707 if (dreq->owner == owner) {
708 __unhash_deferred_req(dreq);
709 list_add(&dreq->recent, &pending);
710 }
711 }
712 spin_unlock(&cache_defer_lock);
713
714 while (!list_empty(&pending)) {
715 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
716 list_del_init(&dreq->recent);
717 dreq->revisit(dreq, 1);
718 }
719 }
720
721 /*
722 * communicate with user-space
723 *
724 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
725 * On read, you get a full request, or block.
726 * On write, an update request is processed.
727 * Poll works if anything to read, and always allows write.
728 *
729 * Implemented by linked list of requests. Each open file has
730 * a ->private that also exists in this list. New requests are added
731 * to the end and may wakeup and preceding readers.
732 * New readers are added to the head. If, on read, an item is found with
733 * CACHE_UPCALLING clear, we free it from the list.
734 *
735 */
736
737 static DEFINE_SPINLOCK(queue_lock);
738 static DEFINE_MUTEX(queue_io_mutex);
739
740 struct cache_queue {
741 struct list_head list;
742 int reader; /* if 0, then request */
743 };
744 struct cache_request {
745 struct cache_queue q;
746 struct cache_head *item;
747 char * buf;
748 int len;
749 int readers;
750 };
751 struct cache_reader {
752 struct cache_queue q;
753 int offset; /* if non-0, we have a refcnt on next request */
754 };
755
cache_request(struct cache_detail * detail,struct cache_request * crq)756 static int cache_request(struct cache_detail *detail,
757 struct cache_request *crq)
758 {
759 char *bp = crq->buf;
760 int len = PAGE_SIZE;
761
762 detail->cache_request(detail, crq->item, &bp, &len);
763 if (len < 0)
764 return -EAGAIN;
765 return PAGE_SIZE - len;
766 }
767
cache_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)768 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
769 loff_t *ppos, struct cache_detail *cd)
770 {
771 struct cache_reader *rp = filp->private_data;
772 struct cache_request *rq;
773 struct inode *inode = file_inode(filp);
774 int err;
775
776 if (count == 0)
777 return 0;
778
779 mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
780 * readers on this file */
781 again:
782 spin_lock(&queue_lock);
783 /* need to find next request */
784 while (rp->q.list.next != &cd->queue &&
785 list_entry(rp->q.list.next, struct cache_queue, list)
786 ->reader) {
787 struct list_head *next = rp->q.list.next;
788 list_move(&rp->q.list, next);
789 }
790 if (rp->q.list.next == &cd->queue) {
791 spin_unlock(&queue_lock);
792 mutex_unlock(&inode->i_mutex);
793 WARN_ON_ONCE(rp->offset);
794 return 0;
795 }
796 rq = container_of(rp->q.list.next, struct cache_request, q.list);
797 WARN_ON_ONCE(rq->q.reader);
798 if (rp->offset == 0)
799 rq->readers++;
800 spin_unlock(&queue_lock);
801
802 if (rq->len == 0) {
803 err = cache_request(cd, rq);
804 if (err < 0)
805 goto out;
806 rq->len = err;
807 }
808
809 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
810 err = -EAGAIN;
811 spin_lock(&queue_lock);
812 list_move(&rp->q.list, &rq->q.list);
813 spin_unlock(&queue_lock);
814 } else {
815 if (rp->offset + count > rq->len)
816 count = rq->len - rp->offset;
817 err = -EFAULT;
818 if (copy_to_user(buf, rq->buf + rp->offset, count))
819 goto out;
820 rp->offset += count;
821 if (rp->offset >= rq->len) {
822 rp->offset = 0;
823 spin_lock(&queue_lock);
824 list_move(&rp->q.list, &rq->q.list);
825 spin_unlock(&queue_lock);
826 }
827 err = 0;
828 }
829 out:
830 if (rp->offset == 0) {
831 /* need to release rq */
832 spin_lock(&queue_lock);
833 rq->readers--;
834 if (rq->readers == 0 &&
835 !test_bit(CACHE_PENDING, &rq->item->flags)) {
836 list_del(&rq->q.list);
837 spin_unlock(&queue_lock);
838 cache_put(rq->item, cd);
839 kfree(rq->buf);
840 kfree(rq);
841 } else
842 spin_unlock(&queue_lock);
843 }
844 if (err == -EAGAIN)
845 goto again;
846 mutex_unlock(&inode->i_mutex);
847 return err ? err : count;
848 }
849
cache_do_downcall(char * kaddr,const char __user * buf,size_t count,struct cache_detail * cd)850 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
851 size_t count, struct cache_detail *cd)
852 {
853 ssize_t ret;
854
855 if (count == 0)
856 return -EINVAL;
857 if (copy_from_user(kaddr, buf, count))
858 return -EFAULT;
859 kaddr[count] = '\0';
860 ret = cd->cache_parse(cd, kaddr, count);
861 if (!ret)
862 ret = count;
863 return ret;
864 }
865
cache_slow_downcall(const char __user * buf,size_t count,struct cache_detail * cd)866 static ssize_t cache_slow_downcall(const char __user *buf,
867 size_t count, struct cache_detail *cd)
868 {
869 static char write_buf[8192]; /* protected by queue_io_mutex */
870 ssize_t ret = -EINVAL;
871
872 if (count >= sizeof(write_buf))
873 goto out;
874 mutex_lock(&queue_io_mutex);
875 ret = cache_do_downcall(write_buf, buf, count, cd);
876 mutex_unlock(&queue_io_mutex);
877 out:
878 return ret;
879 }
880
cache_downcall(struct address_space * mapping,const char __user * buf,size_t count,struct cache_detail * cd)881 static ssize_t cache_downcall(struct address_space *mapping,
882 const char __user *buf,
883 size_t count, struct cache_detail *cd)
884 {
885 struct page *page;
886 char *kaddr;
887 ssize_t ret = -ENOMEM;
888
889 if (count >= PAGE_CACHE_SIZE)
890 goto out_slow;
891
892 page = find_or_create_page(mapping, 0, GFP_KERNEL);
893 if (!page)
894 goto out_slow;
895
896 kaddr = kmap(page);
897 ret = cache_do_downcall(kaddr, buf, count, cd);
898 kunmap(page);
899 unlock_page(page);
900 page_cache_release(page);
901 return ret;
902 out_slow:
903 return cache_slow_downcall(buf, count, cd);
904 }
905
cache_write(struct file * filp,const char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)906 static ssize_t cache_write(struct file *filp, const char __user *buf,
907 size_t count, loff_t *ppos,
908 struct cache_detail *cd)
909 {
910 struct address_space *mapping = filp->f_mapping;
911 struct inode *inode = file_inode(filp);
912 ssize_t ret = -EINVAL;
913
914 if (!cd->cache_parse)
915 goto out;
916
917 mutex_lock(&inode->i_mutex);
918 ret = cache_downcall(mapping, buf, count, cd);
919 mutex_unlock(&inode->i_mutex);
920 out:
921 return ret;
922 }
923
924 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
925
cache_poll(struct file * filp,poll_table * wait,struct cache_detail * cd)926 static unsigned int cache_poll(struct file *filp, poll_table *wait,
927 struct cache_detail *cd)
928 {
929 unsigned int mask;
930 struct cache_reader *rp = filp->private_data;
931 struct cache_queue *cq;
932
933 poll_wait(filp, &queue_wait, wait);
934
935 /* alway allow write */
936 mask = POLLOUT | POLLWRNORM;
937
938 if (!rp)
939 return mask;
940
941 spin_lock(&queue_lock);
942
943 for (cq= &rp->q; &cq->list != &cd->queue;
944 cq = list_entry(cq->list.next, struct cache_queue, list))
945 if (!cq->reader) {
946 mask |= POLLIN | POLLRDNORM;
947 break;
948 }
949 spin_unlock(&queue_lock);
950 return mask;
951 }
952
cache_ioctl(struct inode * ino,struct file * filp,unsigned int cmd,unsigned long arg,struct cache_detail * cd)953 static int cache_ioctl(struct inode *ino, struct file *filp,
954 unsigned int cmd, unsigned long arg,
955 struct cache_detail *cd)
956 {
957 int len = 0;
958 struct cache_reader *rp = filp->private_data;
959 struct cache_queue *cq;
960
961 if (cmd != FIONREAD || !rp)
962 return -EINVAL;
963
964 spin_lock(&queue_lock);
965
966 /* only find the length remaining in current request,
967 * or the length of the next request
968 */
969 for (cq= &rp->q; &cq->list != &cd->queue;
970 cq = list_entry(cq->list.next, struct cache_queue, list))
971 if (!cq->reader) {
972 struct cache_request *cr =
973 container_of(cq, struct cache_request, q);
974 len = cr->len - rp->offset;
975 break;
976 }
977 spin_unlock(&queue_lock);
978
979 return put_user(len, (int __user *)arg);
980 }
981
cache_open(struct inode * inode,struct file * filp,struct cache_detail * cd)982 static int cache_open(struct inode *inode, struct file *filp,
983 struct cache_detail *cd)
984 {
985 struct cache_reader *rp = NULL;
986
987 if (!cd || !try_module_get(cd->owner))
988 return -EACCES;
989 nonseekable_open(inode, filp);
990 if (filp->f_mode & FMODE_READ) {
991 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
992 if (!rp) {
993 module_put(cd->owner);
994 return -ENOMEM;
995 }
996 rp->offset = 0;
997 rp->q.reader = 1;
998 atomic_inc(&cd->readers);
999 spin_lock(&queue_lock);
1000 list_add(&rp->q.list, &cd->queue);
1001 spin_unlock(&queue_lock);
1002 }
1003 filp->private_data = rp;
1004 return 0;
1005 }
1006
cache_release(struct inode * inode,struct file * filp,struct cache_detail * cd)1007 static int cache_release(struct inode *inode, struct file *filp,
1008 struct cache_detail *cd)
1009 {
1010 struct cache_reader *rp = filp->private_data;
1011
1012 if (rp) {
1013 spin_lock(&queue_lock);
1014 if (rp->offset) {
1015 struct cache_queue *cq;
1016 for (cq= &rp->q; &cq->list != &cd->queue;
1017 cq = list_entry(cq->list.next, struct cache_queue, list))
1018 if (!cq->reader) {
1019 container_of(cq, struct cache_request, q)
1020 ->readers--;
1021 break;
1022 }
1023 rp->offset = 0;
1024 }
1025 list_del(&rp->q.list);
1026 spin_unlock(&queue_lock);
1027
1028 filp->private_data = NULL;
1029 kfree(rp);
1030
1031 cd->last_close = seconds_since_boot();
1032 atomic_dec(&cd->readers);
1033 }
1034 module_put(cd->owner);
1035 return 0;
1036 }
1037
1038
1039
cache_dequeue(struct cache_detail * detail,struct cache_head * ch)1040 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1041 {
1042 struct cache_queue *cq, *tmp;
1043 struct cache_request *cr;
1044 struct list_head dequeued;
1045
1046 INIT_LIST_HEAD(&dequeued);
1047 spin_lock(&queue_lock);
1048 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1049 if (!cq->reader) {
1050 cr = container_of(cq, struct cache_request, q);
1051 if (cr->item != ch)
1052 continue;
1053 if (test_bit(CACHE_PENDING, &ch->flags))
1054 /* Lost a race and it is pending again */
1055 break;
1056 if (cr->readers != 0)
1057 continue;
1058 list_move(&cr->q.list, &dequeued);
1059 }
1060 spin_unlock(&queue_lock);
1061 while (!list_empty(&dequeued)) {
1062 cr = list_entry(dequeued.next, struct cache_request, q.list);
1063 list_del(&cr->q.list);
1064 cache_put(cr->item, detail);
1065 kfree(cr->buf);
1066 kfree(cr);
1067 }
1068 }
1069
1070 /*
1071 * Support routines for text-based upcalls.
1072 * Fields are separated by spaces.
1073 * Fields are either mangled to quote space tab newline slosh with slosh
1074 * or a hexified with a leading \x
1075 * Record is terminated with newline.
1076 *
1077 */
1078
qword_add(char ** bpp,int * lp,char * str)1079 void qword_add(char **bpp, int *lp, char *str)
1080 {
1081 char *bp = *bpp;
1082 int len = *lp;
1083 int ret;
1084
1085 if (len < 0) return;
1086
1087 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1088 if (ret >= len) {
1089 bp += len;
1090 len = -1;
1091 } else {
1092 bp += ret;
1093 len -= ret;
1094 *bp++ = ' ';
1095 len--;
1096 }
1097 *bpp = bp;
1098 *lp = len;
1099 }
1100 EXPORT_SYMBOL_GPL(qword_add);
1101
qword_addhex(char ** bpp,int * lp,char * buf,int blen)1102 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1103 {
1104 char *bp = *bpp;
1105 int len = *lp;
1106
1107 if (len < 0) return;
1108
1109 if (len > 2) {
1110 *bp++ = '\\';
1111 *bp++ = 'x';
1112 len -= 2;
1113 while (blen && len >= 2) {
1114 bp = hex_byte_pack(bp, *buf++);
1115 len -= 2;
1116 blen--;
1117 }
1118 }
1119 if (blen || len<1) len = -1;
1120 else {
1121 *bp++ = ' ';
1122 len--;
1123 }
1124 *bpp = bp;
1125 *lp = len;
1126 }
1127 EXPORT_SYMBOL_GPL(qword_addhex);
1128
warn_no_listener(struct cache_detail * detail)1129 static void warn_no_listener(struct cache_detail *detail)
1130 {
1131 if (detail->last_warn != detail->last_close) {
1132 detail->last_warn = detail->last_close;
1133 if (detail->warn_no_listener)
1134 detail->warn_no_listener(detail, detail->last_close != 0);
1135 }
1136 }
1137
cache_listeners_exist(struct cache_detail * detail)1138 static bool cache_listeners_exist(struct cache_detail *detail)
1139 {
1140 if (atomic_read(&detail->readers))
1141 return true;
1142 if (detail->last_close == 0)
1143 /* This cache was never opened */
1144 return false;
1145 if (detail->last_close < seconds_since_boot() - 30)
1146 /*
1147 * We allow for the possibility that someone might
1148 * restart a userspace daemon without restarting the
1149 * server; but after 30 seconds, we give up.
1150 */
1151 return false;
1152 return true;
1153 }
1154
1155 /*
1156 * register an upcall request to user-space and queue it up for read() by the
1157 * upcall daemon.
1158 *
1159 * Each request is at most one page long.
1160 */
sunrpc_cache_pipe_upcall(struct cache_detail * detail,struct cache_head * h)1161 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1162 {
1163
1164 char *buf;
1165 struct cache_request *crq;
1166 int ret = 0;
1167
1168 if (!detail->cache_request)
1169 return -EINVAL;
1170
1171 if (!cache_listeners_exist(detail)) {
1172 warn_no_listener(detail);
1173 return -EINVAL;
1174 }
1175 if (test_bit(CACHE_CLEANED, &h->flags))
1176 /* Too late to make an upcall */
1177 return -EAGAIN;
1178
1179 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1180 if (!buf)
1181 return -EAGAIN;
1182
1183 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1184 if (!crq) {
1185 kfree(buf);
1186 return -EAGAIN;
1187 }
1188
1189 crq->q.reader = 0;
1190 crq->buf = buf;
1191 crq->len = 0;
1192 crq->readers = 0;
1193 spin_lock(&queue_lock);
1194 if (test_bit(CACHE_PENDING, &h->flags)) {
1195 crq->item = cache_get(h);
1196 list_add_tail(&crq->q.list, &detail->queue);
1197 } else
1198 /* Lost a race, no longer PENDING, so don't enqueue */
1199 ret = -EAGAIN;
1200 spin_unlock(&queue_lock);
1201 wake_up(&queue_wait);
1202 if (ret == -EAGAIN) {
1203 kfree(buf);
1204 kfree(crq);
1205 }
1206 return ret;
1207 }
1208 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1209
1210 /*
1211 * parse a message from user-space and pass it
1212 * to an appropriate cache
1213 * Messages are, like requests, separated into fields by
1214 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1215 *
1216 * Message is
1217 * reply cachename expiry key ... content....
1218 *
1219 * key and content are both parsed by cache
1220 */
1221
qword_get(char ** bpp,char * dest,int bufsize)1222 int qword_get(char **bpp, char *dest, int bufsize)
1223 {
1224 /* return bytes copied, or -1 on error */
1225 char *bp = *bpp;
1226 int len = 0;
1227
1228 while (*bp == ' ') bp++;
1229
1230 if (bp[0] == '\\' && bp[1] == 'x') {
1231 /* HEX STRING */
1232 bp += 2;
1233 while (len < bufsize - 1) {
1234 int h, l;
1235
1236 h = hex_to_bin(bp[0]);
1237 if (h < 0)
1238 break;
1239
1240 l = hex_to_bin(bp[1]);
1241 if (l < 0)
1242 break;
1243
1244 *dest++ = (h << 4) | l;
1245 bp += 2;
1246 len++;
1247 }
1248 } else {
1249 /* text with \nnn octal quoting */
1250 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1251 if (*bp == '\\' &&
1252 isodigit(bp[1]) && (bp[1] <= '3') &&
1253 isodigit(bp[2]) &&
1254 isodigit(bp[3])) {
1255 int byte = (*++bp -'0');
1256 bp++;
1257 byte = (byte << 3) | (*bp++ - '0');
1258 byte = (byte << 3) | (*bp++ - '0');
1259 *dest++ = byte;
1260 len++;
1261 } else {
1262 *dest++ = *bp++;
1263 len++;
1264 }
1265 }
1266 }
1267
1268 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1269 return -1;
1270 while (*bp == ' ') bp++;
1271 *bpp = bp;
1272 *dest = '\0';
1273 return len;
1274 }
1275 EXPORT_SYMBOL_GPL(qword_get);
1276
1277
1278 /*
1279 * support /proc/sunrpc/cache/$CACHENAME/content
1280 * as a seqfile.
1281 * We call ->cache_show passing NULL for the item to
1282 * get a header, then pass each real item in the cache
1283 */
1284
cache_seq_start(struct seq_file * m,loff_t * pos)1285 void *cache_seq_start(struct seq_file *m, loff_t *pos)
1286 __acquires(cd->hash_lock)
1287 {
1288 loff_t n = *pos;
1289 unsigned int hash, entry;
1290 struct cache_head *ch;
1291 struct cache_detail *cd = m->private;
1292
1293 read_lock(&cd->hash_lock);
1294 if (!n--)
1295 return SEQ_START_TOKEN;
1296 hash = n >> 32;
1297 entry = n & ((1LL<<32) - 1);
1298
1299 hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list)
1300 if (!entry--)
1301 return ch;
1302 n &= ~((1LL<<32) - 1);
1303 do {
1304 hash++;
1305 n += 1LL<<32;
1306 } while(hash < cd->hash_size &&
1307 hlist_empty(&cd->hash_table[hash]));
1308 if (hash >= cd->hash_size)
1309 return NULL;
1310 *pos = n+1;
1311 return hlist_entry_safe(cd->hash_table[hash].first,
1312 struct cache_head, cache_list);
1313 }
1314 EXPORT_SYMBOL_GPL(cache_seq_start);
1315
cache_seq_next(struct seq_file * m,void * p,loff_t * pos)1316 void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1317 {
1318 struct cache_head *ch = p;
1319 int hash = (*pos >> 32);
1320 struct cache_detail *cd = m->private;
1321
1322 if (p == SEQ_START_TOKEN)
1323 hash = 0;
1324 else if (ch->cache_list.next == NULL) {
1325 hash++;
1326 *pos += 1LL<<32;
1327 } else {
1328 ++*pos;
1329 return hlist_entry_safe(ch->cache_list.next,
1330 struct cache_head, cache_list);
1331 }
1332 *pos &= ~((1LL<<32) - 1);
1333 while (hash < cd->hash_size &&
1334 hlist_empty(&cd->hash_table[hash])) {
1335 hash++;
1336 *pos += 1LL<<32;
1337 }
1338 if (hash >= cd->hash_size)
1339 return NULL;
1340 ++*pos;
1341 return hlist_entry_safe(cd->hash_table[hash].first,
1342 struct cache_head, cache_list);
1343 }
1344 EXPORT_SYMBOL_GPL(cache_seq_next);
1345
cache_seq_stop(struct seq_file * m,void * p)1346 void cache_seq_stop(struct seq_file *m, void *p)
1347 __releases(cd->hash_lock)
1348 {
1349 struct cache_detail *cd = m->private;
1350 read_unlock(&cd->hash_lock);
1351 }
1352 EXPORT_SYMBOL_GPL(cache_seq_stop);
1353
c_show(struct seq_file * m,void * p)1354 static int c_show(struct seq_file *m, void *p)
1355 {
1356 struct cache_head *cp = p;
1357 struct cache_detail *cd = m->private;
1358
1359 if (p == SEQ_START_TOKEN)
1360 return cd->cache_show(m, cd, NULL);
1361
1362 ifdebug(CACHE)
1363 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1364 convert_to_wallclock(cp->expiry_time),
1365 atomic_read(&cp->ref.refcount), cp->flags);
1366 cache_get(cp);
1367 if (cache_check(cd, cp, NULL))
1368 /* cache_check does a cache_put on failure */
1369 seq_printf(m, "# ");
1370 else {
1371 if (cache_is_expired(cd, cp))
1372 seq_printf(m, "# ");
1373 cache_put(cp, cd);
1374 }
1375
1376 return cd->cache_show(m, cd, cp);
1377 }
1378
1379 static const struct seq_operations cache_content_op = {
1380 .start = cache_seq_start,
1381 .next = cache_seq_next,
1382 .stop = cache_seq_stop,
1383 .show = c_show,
1384 };
1385
content_open(struct inode * inode,struct file * file,struct cache_detail * cd)1386 static int content_open(struct inode *inode, struct file *file,
1387 struct cache_detail *cd)
1388 {
1389 struct seq_file *seq;
1390 int err;
1391
1392 if (!cd || !try_module_get(cd->owner))
1393 return -EACCES;
1394
1395 err = seq_open(file, &cache_content_op);
1396 if (err) {
1397 module_put(cd->owner);
1398 return err;
1399 }
1400
1401 seq = file->private_data;
1402 seq->private = cd;
1403 return 0;
1404 }
1405
content_release(struct inode * inode,struct file * file,struct cache_detail * cd)1406 static int content_release(struct inode *inode, struct file *file,
1407 struct cache_detail *cd)
1408 {
1409 int ret = seq_release(inode, file);
1410 module_put(cd->owner);
1411 return ret;
1412 }
1413
open_flush(struct inode * inode,struct file * file,struct cache_detail * cd)1414 static int open_flush(struct inode *inode, struct file *file,
1415 struct cache_detail *cd)
1416 {
1417 if (!cd || !try_module_get(cd->owner))
1418 return -EACCES;
1419 return nonseekable_open(inode, file);
1420 }
1421
release_flush(struct inode * inode,struct file * file,struct cache_detail * cd)1422 static int release_flush(struct inode *inode, struct file *file,
1423 struct cache_detail *cd)
1424 {
1425 module_put(cd->owner);
1426 return 0;
1427 }
1428
read_flush(struct file * file,char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)1429 static ssize_t read_flush(struct file *file, char __user *buf,
1430 size_t count, loff_t *ppos,
1431 struct cache_detail *cd)
1432 {
1433 char tbuf[22];
1434 unsigned long p = *ppos;
1435 size_t len;
1436
1437 snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
1438 len = strlen(tbuf);
1439 if (p >= len)
1440 return 0;
1441 len -= p;
1442 if (len > count)
1443 len = count;
1444 if (copy_to_user(buf, (void*)(tbuf+p), len))
1445 return -EFAULT;
1446 *ppos += len;
1447 return len;
1448 }
1449
write_flush(struct file * file,const char __user * buf,size_t count,loff_t * ppos,struct cache_detail * cd)1450 static ssize_t write_flush(struct file *file, const char __user *buf,
1451 size_t count, loff_t *ppos,
1452 struct cache_detail *cd)
1453 {
1454 char tbuf[20];
1455 char *bp, *ep;
1456 time_t then, now;
1457
1458 if (*ppos || count > sizeof(tbuf)-1)
1459 return -EINVAL;
1460 if (copy_from_user(tbuf, buf, count))
1461 return -EFAULT;
1462 tbuf[count] = 0;
1463 simple_strtoul(tbuf, &ep, 0);
1464 if (*ep && *ep != '\n')
1465 return -EINVAL;
1466
1467 bp = tbuf;
1468 then = get_expiry(&bp);
1469 now = seconds_since_boot();
1470 cd->nextcheck = now;
1471 /* Can only set flush_time to 1 second beyond "now", or
1472 * possibly 1 second beyond flushtime. This is because
1473 * flush_time never goes backwards so it mustn't get too far
1474 * ahead of time.
1475 */
1476 if (then >= now) {
1477 /* Want to flush everything, so behave like cache_purge() */
1478 if (cd->flush_time >= now)
1479 now = cd->flush_time + 1;
1480 then = now;
1481 }
1482
1483 cd->flush_time = then;
1484 cache_flush();
1485
1486 *ppos += count;
1487 return count;
1488 }
1489
cache_read_procfs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1490 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1491 size_t count, loff_t *ppos)
1492 {
1493 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1494
1495 return cache_read(filp, buf, count, ppos, cd);
1496 }
1497
cache_write_procfs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1498 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1499 size_t count, loff_t *ppos)
1500 {
1501 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1502
1503 return cache_write(filp, buf, count, ppos, cd);
1504 }
1505
cache_poll_procfs(struct file * filp,poll_table * wait)1506 static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1507 {
1508 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1509
1510 return cache_poll(filp, wait, cd);
1511 }
1512
cache_ioctl_procfs(struct file * filp,unsigned int cmd,unsigned long arg)1513 static long cache_ioctl_procfs(struct file *filp,
1514 unsigned int cmd, unsigned long arg)
1515 {
1516 struct inode *inode = file_inode(filp);
1517 struct cache_detail *cd = PDE_DATA(inode);
1518
1519 return cache_ioctl(inode, filp, cmd, arg, cd);
1520 }
1521
cache_open_procfs(struct inode * inode,struct file * filp)1522 static int cache_open_procfs(struct inode *inode, struct file *filp)
1523 {
1524 struct cache_detail *cd = PDE_DATA(inode);
1525
1526 return cache_open(inode, filp, cd);
1527 }
1528
cache_release_procfs(struct inode * inode,struct file * filp)1529 static int cache_release_procfs(struct inode *inode, struct file *filp)
1530 {
1531 struct cache_detail *cd = PDE_DATA(inode);
1532
1533 return cache_release(inode, filp, cd);
1534 }
1535
1536 static const struct file_operations cache_file_operations_procfs = {
1537 .owner = THIS_MODULE,
1538 .llseek = no_llseek,
1539 .read = cache_read_procfs,
1540 .write = cache_write_procfs,
1541 .poll = cache_poll_procfs,
1542 .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1543 .open = cache_open_procfs,
1544 .release = cache_release_procfs,
1545 };
1546
content_open_procfs(struct inode * inode,struct file * filp)1547 static int content_open_procfs(struct inode *inode, struct file *filp)
1548 {
1549 struct cache_detail *cd = PDE_DATA(inode);
1550
1551 return content_open(inode, filp, cd);
1552 }
1553
content_release_procfs(struct inode * inode,struct file * filp)1554 static int content_release_procfs(struct inode *inode, struct file *filp)
1555 {
1556 struct cache_detail *cd = PDE_DATA(inode);
1557
1558 return content_release(inode, filp, cd);
1559 }
1560
1561 static const struct file_operations content_file_operations_procfs = {
1562 .open = content_open_procfs,
1563 .read = seq_read,
1564 .llseek = seq_lseek,
1565 .release = content_release_procfs,
1566 };
1567
open_flush_procfs(struct inode * inode,struct file * filp)1568 static int open_flush_procfs(struct inode *inode, struct file *filp)
1569 {
1570 struct cache_detail *cd = PDE_DATA(inode);
1571
1572 return open_flush(inode, filp, cd);
1573 }
1574
release_flush_procfs(struct inode * inode,struct file * filp)1575 static int release_flush_procfs(struct inode *inode, struct file *filp)
1576 {
1577 struct cache_detail *cd = PDE_DATA(inode);
1578
1579 return release_flush(inode, filp, cd);
1580 }
1581
read_flush_procfs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1582 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1583 size_t count, loff_t *ppos)
1584 {
1585 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1586
1587 return read_flush(filp, buf, count, ppos, cd);
1588 }
1589
write_flush_procfs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1590 static ssize_t write_flush_procfs(struct file *filp,
1591 const char __user *buf,
1592 size_t count, loff_t *ppos)
1593 {
1594 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1595
1596 return write_flush(filp, buf, count, ppos, cd);
1597 }
1598
1599 static const struct file_operations cache_flush_operations_procfs = {
1600 .open = open_flush_procfs,
1601 .read = read_flush_procfs,
1602 .write = write_flush_procfs,
1603 .release = release_flush_procfs,
1604 .llseek = no_llseek,
1605 };
1606
remove_cache_proc_entries(struct cache_detail * cd,struct net * net)1607 static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
1608 {
1609 struct sunrpc_net *sn;
1610
1611 if (cd->u.procfs.proc_ent == NULL)
1612 return;
1613 if (cd->u.procfs.flush_ent)
1614 remove_proc_entry("flush", cd->u.procfs.proc_ent);
1615 if (cd->u.procfs.channel_ent)
1616 remove_proc_entry("channel", cd->u.procfs.proc_ent);
1617 if (cd->u.procfs.content_ent)
1618 remove_proc_entry("content", cd->u.procfs.proc_ent);
1619 cd->u.procfs.proc_ent = NULL;
1620 sn = net_generic(net, sunrpc_net_id);
1621 remove_proc_entry(cd->name, sn->proc_net_rpc);
1622 }
1623
1624 #ifdef CONFIG_PROC_FS
create_cache_proc_entries(struct cache_detail * cd,struct net * net)1625 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1626 {
1627 struct proc_dir_entry *p;
1628 struct sunrpc_net *sn;
1629
1630 sn = net_generic(net, sunrpc_net_id);
1631 cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
1632 if (cd->u.procfs.proc_ent == NULL)
1633 goto out_nomem;
1634 cd->u.procfs.channel_ent = NULL;
1635 cd->u.procfs.content_ent = NULL;
1636
1637 p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1638 cd->u.procfs.proc_ent,
1639 &cache_flush_operations_procfs, cd);
1640 cd->u.procfs.flush_ent = p;
1641 if (p == NULL)
1642 goto out_nomem;
1643
1644 if (cd->cache_request || cd->cache_parse) {
1645 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1646 cd->u.procfs.proc_ent,
1647 &cache_file_operations_procfs, cd);
1648 cd->u.procfs.channel_ent = p;
1649 if (p == NULL)
1650 goto out_nomem;
1651 }
1652 if (cd->cache_show) {
1653 p = proc_create_data("content", S_IFREG|S_IRUSR,
1654 cd->u.procfs.proc_ent,
1655 &content_file_operations_procfs, cd);
1656 cd->u.procfs.content_ent = p;
1657 if (p == NULL)
1658 goto out_nomem;
1659 }
1660 return 0;
1661 out_nomem:
1662 remove_cache_proc_entries(cd, net);
1663 return -ENOMEM;
1664 }
1665 #else /* CONFIG_PROC_FS */
create_cache_proc_entries(struct cache_detail * cd,struct net * net)1666 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1667 {
1668 return 0;
1669 }
1670 #endif
1671
cache_initialize(void)1672 void __init cache_initialize(void)
1673 {
1674 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1675 }
1676
cache_register_net(struct cache_detail * cd,struct net * net)1677 int cache_register_net(struct cache_detail *cd, struct net *net)
1678 {
1679 int ret;
1680
1681 sunrpc_init_cache_detail(cd);
1682 ret = create_cache_proc_entries(cd, net);
1683 if (ret)
1684 sunrpc_destroy_cache_detail(cd);
1685 return ret;
1686 }
1687 EXPORT_SYMBOL_GPL(cache_register_net);
1688
cache_unregister_net(struct cache_detail * cd,struct net * net)1689 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1690 {
1691 remove_cache_proc_entries(cd, net);
1692 sunrpc_destroy_cache_detail(cd);
1693 }
1694 EXPORT_SYMBOL_GPL(cache_unregister_net);
1695
cache_create_net(struct cache_detail * tmpl,struct net * net)1696 struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
1697 {
1698 struct cache_detail *cd;
1699 int i;
1700
1701 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1702 if (cd == NULL)
1703 return ERR_PTR(-ENOMEM);
1704
1705 cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head),
1706 GFP_KERNEL);
1707 if (cd->hash_table == NULL) {
1708 kfree(cd);
1709 return ERR_PTR(-ENOMEM);
1710 }
1711
1712 for (i = 0; i < cd->hash_size; i++)
1713 INIT_HLIST_HEAD(&cd->hash_table[i]);
1714 cd->net = net;
1715 return cd;
1716 }
1717 EXPORT_SYMBOL_GPL(cache_create_net);
1718
cache_destroy_net(struct cache_detail * cd,struct net * net)1719 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1720 {
1721 kfree(cd->hash_table);
1722 kfree(cd);
1723 }
1724 EXPORT_SYMBOL_GPL(cache_destroy_net);
1725
cache_read_pipefs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1726 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1727 size_t count, loff_t *ppos)
1728 {
1729 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1730
1731 return cache_read(filp, buf, count, ppos, cd);
1732 }
1733
cache_write_pipefs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1734 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1735 size_t count, loff_t *ppos)
1736 {
1737 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1738
1739 return cache_write(filp, buf, count, ppos, cd);
1740 }
1741
cache_poll_pipefs(struct file * filp,poll_table * wait)1742 static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1743 {
1744 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1745
1746 return cache_poll(filp, wait, cd);
1747 }
1748
cache_ioctl_pipefs(struct file * filp,unsigned int cmd,unsigned long arg)1749 static long cache_ioctl_pipefs(struct file *filp,
1750 unsigned int cmd, unsigned long arg)
1751 {
1752 struct inode *inode = file_inode(filp);
1753 struct cache_detail *cd = RPC_I(inode)->private;
1754
1755 return cache_ioctl(inode, filp, cmd, arg, cd);
1756 }
1757
cache_open_pipefs(struct inode * inode,struct file * filp)1758 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1759 {
1760 struct cache_detail *cd = RPC_I(inode)->private;
1761
1762 return cache_open(inode, filp, cd);
1763 }
1764
cache_release_pipefs(struct inode * inode,struct file * filp)1765 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1766 {
1767 struct cache_detail *cd = RPC_I(inode)->private;
1768
1769 return cache_release(inode, filp, cd);
1770 }
1771
1772 const struct file_operations cache_file_operations_pipefs = {
1773 .owner = THIS_MODULE,
1774 .llseek = no_llseek,
1775 .read = cache_read_pipefs,
1776 .write = cache_write_pipefs,
1777 .poll = cache_poll_pipefs,
1778 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1779 .open = cache_open_pipefs,
1780 .release = cache_release_pipefs,
1781 };
1782
content_open_pipefs(struct inode * inode,struct file * filp)1783 static int content_open_pipefs(struct inode *inode, struct file *filp)
1784 {
1785 struct cache_detail *cd = RPC_I(inode)->private;
1786
1787 return content_open(inode, filp, cd);
1788 }
1789
content_release_pipefs(struct inode * inode,struct file * filp)1790 static int content_release_pipefs(struct inode *inode, struct file *filp)
1791 {
1792 struct cache_detail *cd = RPC_I(inode)->private;
1793
1794 return content_release(inode, filp, cd);
1795 }
1796
1797 const struct file_operations content_file_operations_pipefs = {
1798 .open = content_open_pipefs,
1799 .read = seq_read,
1800 .llseek = seq_lseek,
1801 .release = content_release_pipefs,
1802 };
1803
open_flush_pipefs(struct inode * inode,struct file * filp)1804 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1805 {
1806 struct cache_detail *cd = RPC_I(inode)->private;
1807
1808 return open_flush(inode, filp, cd);
1809 }
1810
release_flush_pipefs(struct inode * inode,struct file * filp)1811 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1812 {
1813 struct cache_detail *cd = RPC_I(inode)->private;
1814
1815 return release_flush(inode, filp, cd);
1816 }
1817
read_flush_pipefs(struct file * filp,char __user * buf,size_t count,loff_t * ppos)1818 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1819 size_t count, loff_t *ppos)
1820 {
1821 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1822
1823 return read_flush(filp, buf, count, ppos, cd);
1824 }
1825
write_flush_pipefs(struct file * filp,const char __user * buf,size_t count,loff_t * ppos)1826 static ssize_t write_flush_pipefs(struct file *filp,
1827 const char __user *buf,
1828 size_t count, loff_t *ppos)
1829 {
1830 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1831
1832 return write_flush(filp, buf, count, ppos, cd);
1833 }
1834
1835 const struct file_operations cache_flush_operations_pipefs = {
1836 .open = open_flush_pipefs,
1837 .read = read_flush_pipefs,
1838 .write = write_flush_pipefs,
1839 .release = release_flush_pipefs,
1840 .llseek = no_llseek,
1841 };
1842
sunrpc_cache_register_pipefs(struct dentry * parent,const char * name,umode_t umode,struct cache_detail * cd)1843 int sunrpc_cache_register_pipefs(struct dentry *parent,
1844 const char *name, umode_t umode,
1845 struct cache_detail *cd)
1846 {
1847 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1848 if (IS_ERR(dir))
1849 return PTR_ERR(dir);
1850 cd->u.pipefs.dir = dir;
1851 return 0;
1852 }
1853 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1854
sunrpc_cache_unregister_pipefs(struct cache_detail * cd)1855 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1856 {
1857 rpc_remove_cache_dir(cd->u.pipefs.dir);
1858 cd->u.pipefs.dir = NULL;
1859 }
1860 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1861
1862