1 /*
2 * Open file cache.
3 *
4 * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
5 */
6
7 #include <linux/hash.h>
8 #include <linux/slab.h>
9 #include <linux/file.h>
10 #include <linux/sched.h>
11 #include <linux/list_lru.h>
12 #include <linux/fsnotify_backend.h>
13 #include <linux/fsnotify.h>
14 #include <linux/seq_file.h>
15
16 #include "vfs.h"
17 #include "nfsd.h"
18 #include "nfsfh.h"
19 #include "netns.h"
20 #include "filecache.h"
21 #include "trace.h"
22
23 #define NFSDDBG_FACILITY NFSDDBG_FH
24
25 /* FIXME: dynamically size this for the machine somehow? */
26 #define NFSD_FILE_HASH_BITS 12
27 #define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
28 #define NFSD_LAUNDRETTE_DELAY (2 * HZ)
29
30 #define NFSD_FILE_LRU_RESCAN (0)
31 #define NFSD_FILE_SHUTDOWN (1)
32 #define NFSD_FILE_LRU_THRESHOLD (4096UL)
33 #define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
34
35 /* We only care about NFSD_MAY_READ/WRITE for this cache */
36 #define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
37
38 struct nfsd_fcache_bucket {
39 struct hlist_head nfb_head;
40 spinlock_t nfb_lock;
41 unsigned int nfb_count;
42 unsigned int nfb_maxcount;
43 };
44
45 static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
46
47 struct nfsd_fcache_disposal {
48 struct list_head list;
49 struct work_struct work;
50 struct net *net;
51 spinlock_t lock;
52 struct list_head freeme;
53 struct rcu_head rcu;
54 };
55
56 struct workqueue_struct *nfsd_filecache_wq __read_mostly;
57
58 static struct kmem_cache *nfsd_file_slab;
59 static struct kmem_cache *nfsd_file_mark_slab;
60 static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
61 static struct list_lru nfsd_file_lru;
62 static long nfsd_file_lru_flags;
63 static struct fsnotify_group *nfsd_file_fsnotify_group;
64 static atomic_long_t nfsd_filecache_count;
65 static struct delayed_work nfsd_filecache_laundrette;
66 static DEFINE_SPINLOCK(laundrette_lock);
67 static LIST_HEAD(laundrettes);
68
69 static void nfsd_file_gc(void);
70
71 static void
nfsd_file_schedule_laundrette(void)72 nfsd_file_schedule_laundrette(void)
73 {
74 long count = atomic_long_read(&nfsd_filecache_count);
75
76 if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
77 return;
78
79 queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
80 NFSD_LAUNDRETTE_DELAY);
81 }
82
83 static void
nfsd_file_slab_free(struct rcu_head * rcu)84 nfsd_file_slab_free(struct rcu_head *rcu)
85 {
86 struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
87
88 put_cred(nf->nf_cred);
89 kmem_cache_free(nfsd_file_slab, nf);
90 }
91
92 static void
nfsd_file_mark_free(struct fsnotify_mark * mark)93 nfsd_file_mark_free(struct fsnotify_mark *mark)
94 {
95 struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
96 nfm_mark);
97
98 kmem_cache_free(nfsd_file_mark_slab, nfm);
99 }
100
101 static struct nfsd_file_mark *
nfsd_file_mark_get(struct nfsd_file_mark * nfm)102 nfsd_file_mark_get(struct nfsd_file_mark *nfm)
103 {
104 if (!atomic_inc_not_zero(&nfm->nfm_ref))
105 return NULL;
106 return nfm;
107 }
108
109 static void
nfsd_file_mark_put(struct nfsd_file_mark * nfm)110 nfsd_file_mark_put(struct nfsd_file_mark *nfm)
111 {
112 if (atomic_dec_and_test(&nfm->nfm_ref)) {
113
114 fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
115 fsnotify_put_mark(&nfm->nfm_mark);
116 }
117 }
118
119 static struct nfsd_file_mark *
nfsd_file_mark_find_or_create(struct nfsd_file * nf)120 nfsd_file_mark_find_or_create(struct nfsd_file *nf)
121 {
122 int err;
123 struct fsnotify_mark *mark;
124 struct nfsd_file_mark *nfm = NULL, *new;
125 struct inode *inode = nf->nf_inode;
126
127 do {
128 mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
129 mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
130 nfsd_file_fsnotify_group);
131 if (mark) {
132 nfm = nfsd_file_mark_get(container_of(mark,
133 struct nfsd_file_mark,
134 nfm_mark));
135 mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
136 if (nfm) {
137 fsnotify_put_mark(mark);
138 break;
139 }
140 /* Avoid soft lockup race with nfsd_file_mark_put() */
141 fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group);
142 fsnotify_put_mark(mark);
143 } else
144 mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
145
146 /* allocate a new nfm */
147 new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
148 if (!new)
149 return NULL;
150 fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
151 new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
152 atomic_set(&new->nfm_ref, 1);
153
154 err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
155
156 /*
157 * If the add was successful, then return the object.
158 * Otherwise, we need to put the reference we hold on the
159 * nfm_mark. The fsnotify code will take a reference and put
160 * it on failure, so we can't just free it directly. It's also
161 * not safe to call fsnotify_destroy_mark on it as the
162 * mark->group will be NULL. Thus, we can't let the nfm_ref
163 * counter drive the destruction at this point.
164 */
165 if (likely(!err))
166 nfm = new;
167 else
168 fsnotify_put_mark(&new->nfm_mark);
169 } while (unlikely(err == -EEXIST));
170
171 return nfm;
172 }
173
174 static struct nfsd_file *
nfsd_file_alloc(struct inode * inode,unsigned int may,unsigned int hashval,struct net * net)175 nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
176 struct net *net)
177 {
178 struct nfsd_file *nf;
179
180 nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
181 if (nf) {
182 INIT_HLIST_NODE(&nf->nf_node);
183 INIT_LIST_HEAD(&nf->nf_lru);
184 nf->nf_file = NULL;
185 nf->nf_cred = get_current_cred();
186 nf->nf_net = net;
187 nf->nf_flags = 0;
188 nf->nf_inode = inode;
189 nf->nf_hashval = hashval;
190 atomic_set(&nf->nf_ref, 1);
191 nf->nf_may = may & NFSD_FILE_MAY_MASK;
192 if (may & NFSD_MAY_NOT_BREAK_LEASE) {
193 if (may & NFSD_MAY_WRITE)
194 __set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
195 if (may & NFSD_MAY_READ)
196 __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
197 }
198 nf->nf_mark = NULL;
199 trace_nfsd_file_alloc(nf);
200 }
201 return nf;
202 }
203
204 static bool
nfsd_file_free(struct nfsd_file * nf)205 nfsd_file_free(struct nfsd_file *nf)
206 {
207 bool flush = false;
208
209 trace_nfsd_file_put_final(nf);
210 if (nf->nf_mark)
211 nfsd_file_mark_put(nf->nf_mark);
212 if (nf->nf_file) {
213 get_file(nf->nf_file);
214 filp_close(nf->nf_file, NULL);
215 fput(nf->nf_file);
216 flush = true;
217 }
218 call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
219 return flush;
220 }
221
222 static bool
nfsd_file_check_writeback(struct nfsd_file * nf)223 nfsd_file_check_writeback(struct nfsd_file *nf)
224 {
225 struct file *file = nf->nf_file;
226 struct address_space *mapping;
227
228 if (!file || !(file->f_mode & FMODE_WRITE))
229 return false;
230 mapping = file->f_mapping;
231 return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
232 mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
233 }
234
235 static int
nfsd_file_check_write_error(struct nfsd_file * nf)236 nfsd_file_check_write_error(struct nfsd_file *nf)
237 {
238 struct file *file = nf->nf_file;
239
240 if (!file || !(file->f_mode & FMODE_WRITE))
241 return 0;
242 return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
243 }
244
245 static bool
nfsd_file_in_use(struct nfsd_file * nf)246 nfsd_file_in_use(struct nfsd_file *nf)
247 {
248 return nfsd_file_check_writeback(nf) ||
249 nfsd_file_check_write_error(nf);
250 }
251
252 static void
nfsd_file_do_unhash(struct nfsd_file * nf)253 nfsd_file_do_unhash(struct nfsd_file *nf)
254 {
255 lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
256
257 trace_nfsd_file_unhash(nf);
258
259 if (nfsd_file_check_write_error(nf))
260 nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
261 --nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
262 hlist_del_rcu(&nf->nf_node);
263 atomic_long_dec(&nfsd_filecache_count);
264 }
265
266 static bool
nfsd_file_unhash(struct nfsd_file * nf)267 nfsd_file_unhash(struct nfsd_file *nf)
268 {
269 if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
270 nfsd_file_do_unhash(nf);
271 if (!list_empty(&nf->nf_lru))
272 list_lru_del(&nfsd_file_lru, &nf->nf_lru);
273 return true;
274 }
275 return false;
276 }
277
278 /*
279 * Return true if the file was unhashed.
280 */
281 static bool
nfsd_file_unhash_and_release_locked(struct nfsd_file * nf,struct list_head * dispose)282 nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
283 {
284 lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
285
286 trace_nfsd_file_unhash_and_release_locked(nf);
287 if (!nfsd_file_unhash(nf))
288 return false;
289 /* keep final reference for nfsd_file_lru_dispose */
290 if (atomic_add_unless(&nf->nf_ref, -1, 1))
291 return true;
292
293 list_add(&nf->nf_lru, dispose);
294 return true;
295 }
296
297 static int
nfsd_file_put_noref(struct nfsd_file * nf)298 nfsd_file_put_noref(struct nfsd_file *nf)
299 {
300 int count;
301 trace_nfsd_file_put(nf);
302
303 count = atomic_dec_return(&nf->nf_ref);
304 if (!count) {
305 WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
306 nfsd_file_free(nf);
307 }
308 return count;
309 }
310
311 void
nfsd_file_put(struct nfsd_file * nf)312 nfsd_file_put(struct nfsd_file *nf)
313 {
314 bool is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
315 bool unused = !nfsd_file_in_use(nf);
316
317 set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
318 if (nfsd_file_put_noref(nf) == 1 && is_hashed && unused)
319 nfsd_file_schedule_laundrette();
320 if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
321 nfsd_file_gc();
322 }
323
324 struct nfsd_file *
nfsd_file_get(struct nfsd_file * nf)325 nfsd_file_get(struct nfsd_file *nf)
326 {
327 if (likely(atomic_inc_not_zero(&nf->nf_ref)))
328 return nf;
329 return NULL;
330 }
331
332 static void
nfsd_file_dispose_list(struct list_head * dispose)333 nfsd_file_dispose_list(struct list_head *dispose)
334 {
335 struct nfsd_file *nf;
336
337 while(!list_empty(dispose)) {
338 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
339 list_del(&nf->nf_lru);
340 nfsd_file_put_noref(nf);
341 }
342 }
343
344 static void
nfsd_file_dispose_list_sync(struct list_head * dispose)345 nfsd_file_dispose_list_sync(struct list_head *dispose)
346 {
347 bool flush = false;
348 struct nfsd_file *nf;
349
350 while(!list_empty(dispose)) {
351 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
352 list_del(&nf->nf_lru);
353 if (!atomic_dec_and_test(&nf->nf_ref))
354 continue;
355 if (nfsd_file_free(nf))
356 flush = true;
357 }
358 if (flush)
359 flush_delayed_fput();
360 }
361
362 static void
nfsd_file_list_remove_disposal(struct list_head * dst,struct nfsd_fcache_disposal * l)363 nfsd_file_list_remove_disposal(struct list_head *dst,
364 struct nfsd_fcache_disposal *l)
365 {
366 spin_lock(&l->lock);
367 list_splice_init(&l->freeme, dst);
368 spin_unlock(&l->lock);
369 }
370
371 static void
nfsd_file_list_add_disposal(struct list_head * files,struct net * net)372 nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
373 {
374 struct nfsd_fcache_disposal *l;
375
376 rcu_read_lock();
377 list_for_each_entry_rcu(l, &laundrettes, list) {
378 if (l->net == net) {
379 spin_lock(&l->lock);
380 list_splice_tail_init(files, &l->freeme);
381 spin_unlock(&l->lock);
382 queue_work(nfsd_filecache_wq, &l->work);
383 break;
384 }
385 }
386 rcu_read_unlock();
387 }
388
389 static void
nfsd_file_list_add_pernet(struct list_head * dst,struct list_head * src,struct net * net)390 nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
391 struct net *net)
392 {
393 struct nfsd_file *nf, *tmp;
394
395 list_for_each_entry_safe(nf, tmp, src, nf_lru) {
396 if (nf->nf_net == net)
397 list_move_tail(&nf->nf_lru, dst);
398 }
399 }
400
401 static void
nfsd_file_dispose_list_delayed(struct list_head * dispose)402 nfsd_file_dispose_list_delayed(struct list_head *dispose)
403 {
404 LIST_HEAD(list);
405 struct nfsd_file *nf;
406
407 while(!list_empty(dispose)) {
408 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
409 nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
410 nfsd_file_list_add_disposal(&list, nf->nf_net);
411 }
412 }
413
414 /*
415 * Note this can deadlock with nfsd_file_cache_purge.
416 */
417 static enum lru_status
nfsd_file_lru_cb(struct list_head * item,struct list_lru_one * lru,spinlock_t * lock,void * arg)418 nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
419 spinlock_t *lock, void *arg)
420 __releases(lock)
421 __acquires(lock)
422 {
423 struct list_head *head = arg;
424 struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
425
426 /*
427 * Do a lockless refcount check. The hashtable holds one reference, so
428 * we look to see if anything else has a reference, or if any have
429 * been put since the shrinker last ran. Those don't get unhashed and
430 * released.
431 *
432 * Note that in the put path, we set the flag and then decrement the
433 * counter. Here we check the counter and then test and clear the flag.
434 * That order is deliberate to ensure that we can do this locklessly.
435 */
436 if (atomic_read(&nf->nf_ref) > 1)
437 goto out_skip;
438
439 /*
440 * Don't throw out files that are still undergoing I/O or
441 * that have uncleared errors pending.
442 */
443 if (nfsd_file_check_writeback(nf))
444 goto out_skip;
445
446 if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
447 goto out_rescan;
448
449 if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
450 goto out_skip;
451
452 list_lru_isolate_move(lru, &nf->nf_lru, head);
453 return LRU_REMOVED;
454 out_rescan:
455 set_bit(NFSD_FILE_LRU_RESCAN, &nfsd_file_lru_flags);
456 out_skip:
457 return LRU_SKIP;
458 }
459
460 static unsigned long
nfsd_file_lru_walk_list(struct shrink_control * sc)461 nfsd_file_lru_walk_list(struct shrink_control *sc)
462 {
463 LIST_HEAD(head);
464 struct nfsd_file *nf;
465 unsigned long ret;
466
467 if (sc)
468 ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
469 nfsd_file_lru_cb, &head);
470 else
471 ret = list_lru_walk(&nfsd_file_lru,
472 nfsd_file_lru_cb,
473 &head, LONG_MAX);
474 list_for_each_entry(nf, &head, nf_lru) {
475 spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
476 nfsd_file_do_unhash(nf);
477 spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
478 }
479 nfsd_file_dispose_list_delayed(&head);
480 return ret;
481 }
482
483 static void
nfsd_file_gc(void)484 nfsd_file_gc(void)
485 {
486 nfsd_file_lru_walk_list(NULL);
487 }
488
489 static void
nfsd_file_gc_worker(struct work_struct * work)490 nfsd_file_gc_worker(struct work_struct *work)
491 {
492 nfsd_file_gc();
493 nfsd_file_schedule_laundrette();
494 }
495
496 static unsigned long
nfsd_file_lru_count(struct shrinker * s,struct shrink_control * sc)497 nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
498 {
499 return list_lru_count(&nfsd_file_lru);
500 }
501
502 static unsigned long
nfsd_file_lru_scan(struct shrinker * s,struct shrink_control * sc)503 nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
504 {
505 return nfsd_file_lru_walk_list(sc);
506 }
507
508 static struct shrinker nfsd_file_shrinker = {
509 .scan_objects = nfsd_file_lru_scan,
510 .count_objects = nfsd_file_lru_count,
511 .seeks = 1,
512 };
513
514 static void
__nfsd_file_close_inode(struct inode * inode,unsigned int hashval,struct list_head * dispose)515 __nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
516 struct list_head *dispose)
517 {
518 struct nfsd_file *nf;
519 struct hlist_node *tmp;
520
521 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
522 hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
523 if (inode == nf->nf_inode)
524 nfsd_file_unhash_and_release_locked(nf, dispose);
525 }
526 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
527 }
528
529 /**
530 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
531 * @inode: inode of the file to attempt to remove
532 *
533 * Walk the whole hash bucket, looking for any files that correspond to "inode".
534 * If any do, then unhash them and put the hashtable reference to them and
535 * destroy any that had their last reference put. Also ensure that any of the
536 * fputs also have their final __fput done as well.
537 */
538 void
nfsd_file_close_inode_sync(struct inode * inode)539 nfsd_file_close_inode_sync(struct inode *inode)
540 {
541 unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
542 NFSD_FILE_HASH_BITS);
543 LIST_HEAD(dispose);
544
545 __nfsd_file_close_inode(inode, hashval, &dispose);
546 trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
547 nfsd_file_dispose_list_sync(&dispose);
548 }
549
550 /**
551 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
552 * @inode: inode of the file to attempt to remove
553 *
554 * Walk the whole hash bucket, looking for any files that correspond to "inode".
555 * If any do, then unhash them and put the hashtable reference to them and
556 * destroy any that had their last reference put.
557 */
558 static void
nfsd_file_close_inode(struct inode * inode)559 nfsd_file_close_inode(struct inode *inode)
560 {
561 unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
562 NFSD_FILE_HASH_BITS);
563 LIST_HEAD(dispose);
564
565 __nfsd_file_close_inode(inode, hashval, &dispose);
566 trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
567 nfsd_file_dispose_list_delayed(&dispose);
568 }
569
570 /**
571 * nfsd_file_delayed_close - close unused nfsd_files
572 * @work: dummy
573 *
574 * Walk the LRU list and close any entries that have not been used since
575 * the last scan.
576 *
577 * Note this can deadlock with nfsd_file_cache_purge.
578 */
579 static void
nfsd_file_delayed_close(struct work_struct * work)580 nfsd_file_delayed_close(struct work_struct *work)
581 {
582 LIST_HEAD(head);
583 struct nfsd_fcache_disposal *l = container_of(work,
584 struct nfsd_fcache_disposal, work);
585
586 nfsd_file_list_remove_disposal(&head, l);
587 nfsd_file_dispose_list(&head);
588 }
589
590 static int
nfsd_file_lease_notifier_call(struct notifier_block * nb,unsigned long arg,void * data)591 nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
592 void *data)
593 {
594 struct file_lock *fl = data;
595
596 /* Only close files for F_SETLEASE leases */
597 if (fl->fl_flags & FL_LEASE)
598 nfsd_file_close_inode_sync(file_inode(fl->fl_file));
599 return 0;
600 }
601
602 static struct notifier_block nfsd_file_lease_notifier = {
603 .notifier_call = nfsd_file_lease_notifier_call,
604 };
605
606 static int
nfsd_file_fsnotify_handle_event(struct fsnotify_group * group,struct inode * inode,u32 mask,const void * data,int data_type,const struct qstr * file_name,u32 cookie,struct fsnotify_iter_info * iter_info)607 nfsd_file_fsnotify_handle_event(struct fsnotify_group *group,
608 struct inode *inode,
609 u32 mask, const void *data, int data_type,
610 const struct qstr *file_name, u32 cookie,
611 struct fsnotify_iter_info *iter_info)
612 {
613 trace_nfsd_file_fsnotify_handle_event(inode, mask);
614
615 /* Should be no marks on non-regular files */
616 if (!S_ISREG(inode->i_mode)) {
617 WARN_ON_ONCE(1);
618 return 0;
619 }
620
621 /* don't close files if this was not the last link */
622 if (mask & FS_ATTRIB) {
623 if (inode->i_nlink)
624 return 0;
625 }
626
627 nfsd_file_close_inode(inode);
628 return 0;
629 }
630
631
632 static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
633 .handle_event = nfsd_file_fsnotify_handle_event,
634 .free_mark = nfsd_file_mark_free,
635 };
636
637 int
nfsd_file_cache_init(void)638 nfsd_file_cache_init(void)
639 {
640 int ret = -ENOMEM;
641 unsigned int i;
642
643 clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
644
645 if (nfsd_file_hashtbl)
646 return 0;
647
648 nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
649 if (!nfsd_filecache_wq)
650 goto out;
651
652 nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
653 sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
654 if (!nfsd_file_hashtbl) {
655 pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
656 goto out_err;
657 }
658
659 nfsd_file_slab = kmem_cache_create("nfsd_file",
660 sizeof(struct nfsd_file), 0, 0, NULL);
661 if (!nfsd_file_slab) {
662 pr_err("nfsd: unable to create nfsd_file_slab\n");
663 goto out_err;
664 }
665
666 nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
667 sizeof(struct nfsd_file_mark), 0, 0, NULL);
668 if (!nfsd_file_mark_slab) {
669 pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
670 goto out_err;
671 }
672
673
674 ret = list_lru_init(&nfsd_file_lru);
675 if (ret) {
676 pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
677 goto out_err;
678 }
679
680 ret = register_shrinker(&nfsd_file_shrinker);
681 if (ret) {
682 pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
683 goto out_lru;
684 }
685
686 ret = lease_register_notifier(&nfsd_file_lease_notifier);
687 if (ret) {
688 pr_err("nfsd: unable to register lease notifier: %d\n", ret);
689 goto out_shrinker;
690 }
691
692 nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
693 if (IS_ERR(nfsd_file_fsnotify_group)) {
694 pr_err("nfsd: unable to create fsnotify group: %ld\n",
695 PTR_ERR(nfsd_file_fsnotify_group));
696 nfsd_file_fsnotify_group = NULL;
697 goto out_notifier;
698 }
699
700 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
701 INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
702 spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
703 }
704
705 INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
706 out:
707 return ret;
708 out_notifier:
709 lease_unregister_notifier(&nfsd_file_lease_notifier);
710 out_shrinker:
711 unregister_shrinker(&nfsd_file_shrinker);
712 out_lru:
713 list_lru_destroy(&nfsd_file_lru);
714 out_err:
715 kmem_cache_destroy(nfsd_file_slab);
716 nfsd_file_slab = NULL;
717 kmem_cache_destroy(nfsd_file_mark_slab);
718 nfsd_file_mark_slab = NULL;
719 kfree(nfsd_file_hashtbl);
720 nfsd_file_hashtbl = NULL;
721 destroy_workqueue(nfsd_filecache_wq);
722 nfsd_filecache_wq = NULL;
723 goto out;
724 }
725
726 /*
727 * Note this can deadlock with nfsd_file_lru_cb.
728 */
729 void
nfsd_file_cache_purge(struct net * net)730 nfsd_file_cache_purge(struct net *net)
731 {
732 unsigned int i;
733 struct nfsd_file *nf;
734 struct hlist_node *next;
735 LIST_HEAD(dispose);
736 bool del;
737
738 if (!nfsd_file_hashtbl)
739 return;
740
741 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
742 struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
743
744 spin_lock(&nfb->nfb_lock);
745 hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
746 if (net && nf->nf_net != net)
747 continue;
748 del = nfsd_file_unhash_and_release_locked(nf, &dispose);
749
750 /*
751 * Deadlock detected! Something marked this entry as
752 * unhased, but hasn't removed it from the hash list.
753 */
754 WARN_ON_ONCE(!del);
755 }
756 spin_unlock(&nfb->nfb_lock);
757 nfsd_file_dispose_list(&dispose);
758 }
759 }
760
761 static struct nfsd_fcache_disposal *
nfsd_alloc_fcache_disposal(struct net * net)762 nfsd_alloc_fcache_disposal(struct net *net)
763 {
764 struct nfsd_fcache_disposal *l;
765
766 l = kmalloc(sizeof(*l), GFP_KERNEL);
767 if (!l)
768 return NULL;
769 INIT_WORK(&l->work, nfsd_file_delayed_close);
770 l->net = net;
771 spin_lock_init(&l->lock);
772 INIT_LIST_HEAD(&l->freeme);
773 return l;
774 }
775
776 static void
nfsd_free_fcache_disposal(struct nfsd_fcache_disposal * l)777 nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
778 {
779 rcu_assign_pointer(l->net, NULL);
780 cancel_work_sync(&l->work);
781 nfsd_file_dispose_list(&l->freeme);
782 kfree_rcu(l, rcu);
783 }
784
785 static void
nfsd_add_fcache_disposal(struct nfsd_fcache_disposal * l)786 nfsd_add_fcache_disposal(struct nfsd_fcache_disposal *l)
787 {
788 spin_lock(&laundrette_lock);
789 list_add_tail_rcu(&l->list, &laundrettes);
790 spin_unlock(&laundrette_lock);
791 }
792
793 static void
nfsd_del_fcache_disposal(struct nfsd_fcache_disposal * l)794 nfsd_del_fcache_disposal(struct nfsd_fcache_disposal *l)
795 {
796 spin_lock(&laundrette_lock);
797 list_del_rcu(&l->list);
798 spin_unlock(&laundrette_lock);
799 }
800
801 static int
nfsd_alloc_fcache_disposal_net(struct net * net)802 nfsd_alloc_fcache_disposal_net(struct net *net)
803 {
804 struct nfsd_fcache_disposal *l;
805
806 l = nfsd_alloc_fcache_disposal(net);
807 if (!l)
808 return -ENOMEM;
809 nfsd_add_fcache_disposal(l);
810 return 0;
811 }
812
813 static void
nfsd_free_fcache_disposal_net(struct net * net)814 nfsd_free_fcache_disposal_net(struct net *net)
815 {
816 struct nfsd_fcache_disposal *l;
817
818 rcu_read_lock();
819 list_for_each_entry_rcu(l, &laundrettes, list) {
820 if (l->net != net)
821 continue;
822 nfsd_del_fcache_disposal(l);
823 rcu_read_unlock();
824 nfsd_free_fcache_disposal(l);
825 return;
826 }
827 rcu_read_unlock();
828 }
829
830 int
nfsd_file_cache_start_net(struct net * net)831 nfsd_file_cache_start_net(struct net *net)
832 {
833 return nfsd_alloc_fcache_disposal_net(net);
834 }
835
836 void
nfsd_file_cache_shutdown_net(struct net * net)837 nfsd_file_cache_shutdown_net(struct net *net)
838 {
839 nfsd_file_cache_purge(net);
840 nfsd_free_fcache_disposal_net(net);
841 }
842
843 void
nfsd_file_cache_shutdown(void)844 nfsd_file_cache_shutdown(void)
845 {
846 LIST_HEAD(dispose);
847
848 set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
849
850 lease_unregister_notifier(&nfsd_file_lease_notifier);
851 unregister_shrinker(&nfsd_file_shrinker);
852 /*
853 * make sure all callers of nfsd_file_lru_cb are done before
854 * calling nfsd_file_cache_purge
855 */
856 cancel_delayed_work_sync(&nfsd_filecache_laundrette);
857 nfsd_file_cache_purge(NULL);
858 list_lru_destroy(&nfsd_file_lru);
859 rcu_barrier();
860 fsnotify_put_group(nfsd_file_fsnotify_group);
861 nfsd_file_fsnotify_group = NULL;
862 kmem_cache_destroy(nfsd_file_slab);
863 nfsd_file_slab = NULL;
864 fsnotify_wait_marks_destroyed();
865 kmem_cache_destroy(nfsd_file_mark_slab);
866 nfsd_file_mark_slab = NULL;
867 kfree(nfsd_file_hashtbl);
868 nfsd_file_hashtbl = NULL;
869 destroy_workqueue(nfsd_filecache_wq);
870 nfsd_filecache_wq = NULL;
871 }
872
873 static bool
nfsd_match_cred(const struct cred * c1,const struct cred * c2)874 nfsd_match_cred(const struct cred *c1, const struct cred *c2)
875 {
876 int i;
877
878 if (!uid_eq(c1->fsuid, c2->fsuid))
879 return false;
880 if (!gid_eq(c1->fsgid, c2->fsgid))
881 return false;
882 if (c1->group_info == NULL || c2->group_info == NULL)
883 return c1->group_info == c2->group_info;
884 if (c1->group_info->ngroups != c2->group_info->ngroups)
885 return false;
886 for (i = 0; i < c1->group_info->ngroups; i++) {
887 if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
888 return false;
889 }
890 return true;
891 }
892
893 static struct nfsd_file *
nfsd_file_find_locked(struct inode * inode,unsigned int may_flags,unsigned int hashval,struct net * net)894 nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
895 unsigned int hashval, struct net *net)
896 {
897 struct nfsd_file *nf;
898 unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
899
900 hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
901 nf_node) {
902 if ((need & nf->nf_may) != need)
903 continue;
904 if (nf->nf_inode != inode)
905 continue;
906 if (nf->nf_net != net)
907 continue;
908 if (!nfsd_match_cred(nf->nf_cred, current_cred()))
909 continue;
910 if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
911 continue;
912 if (nfsd_file_get(nf) != NULL)
913 return nf;
914 }
915 return NULL;
916 }
917
918 /**
919 * nfsd_file_is_cached - are there any cached open files for this fh?
920 * @inode: inode of the file to check
921 *
922 * Scan the hashtable for open files that match this fh. Returns true if there
923 * are any, and false if not.
924 */
925 bool
nfsd_file_is_cached(struct inode * inode)926 nfsd_file_is_cached(struct inode *inode)
927 {
928 bool ret = false;
929 struct nfsd_file *nf;
930 unsigned int hashval;
931
932 hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
933
934 rcu_read_lock();
935 hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
936 nf_node) {
937 if (inode == nf->nf_inode) {
938 ret = true;
939 break;
940 }
941 }
942 rcu_read_unlock();
943 trace_nfsd_file_is_cached(inode, hashval, (int)ret);
944 return ret;
945 }
946
947 __be32
nfsd_file_acquire(struct svc_rqst * rqstp,struct svc_fh * fhp,unsigned int may_flags,struct nfsd_file ** pnf)948 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
949 unsigned int may_flags, struct nfsd_file **pnf)
950 {
951 __be32 status;
952 struct net *net = SVC_NET(rqstp);
953 struct nfsd_file *nf, *new;
954 struct inode *inode;
955 unsigned int hashval;
956 bool retry = true;
957
958 /* FIXME: skip this if fh_dentry is already set? */
959 status = fh_verify(rqstp, fhp, S_IFREG,
960 may_flags|NFSD_MAY_OWNER_OVERRIDE);
961 if (status != nfs_ok)
962 return status;
963
964 inode = d_inode(fhp->fh_dentry);
965 hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
966 retry:
967 rcu_read_lock();
968 nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
969 rcu_read_unlock();
970 if (nf)
971 goto wait_for_construction;
972
973 new = nfsd_file_alloc(inode, may_flags, hashval, net);
974 if (!new) {
975 trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
976 NULL, nfserr_jukebox);
977 return nfserr_jukebox;
978 }
979
980 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
981 nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
982 if (nf == NULL)
983 goto open_file;
984 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
985 nfsd_file_slab_free(&new->nf_rcu);
986
987 wait_for_construction:
988 wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
989
990 /* Did construction of this file fail? */
991 if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
992 if (!retry) {
993 status = nfserr_jukebox;
994 goto out;
995 }
996 retry = false;
997 nfsd_file_put_noref(nf);
998 goto retry;
999 }
1000
1001 this_cpu_inc(nfsd_file_cache_hits);
1002
1003 if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
1004 bool write = (may_flags & NFSD_MAY_WRITE);
1005
1006 if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
1007 (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
1008 status = nfserrno(nfsd_open_break_lease(
1009 file_inode(nf->nf_file), may_flags));
1010 if (status == nfs_ok) {
1011 clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
1012 if (write)
1013 clear_bit(NFSD_FILE_BREAK_WRITE,
1014 &nf->nf_flags);
1015 }
1016 }
1017 }
1018 out:
1019 if (status == nfs_ok) {
1020 *pnf = nf;
1021 } else {
1022 nfsd_file_put(nf);
1023 nf = NULL;
1024 }
1025
1026 trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
1027 return status;
1028 open_file:
1029 nf = new;
1030 /* Take reference for the hashtable */
1031 atomic_inc(&nf->nf_ref);
1032 __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
1033 __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
1034 list_lru_add(&nfsd_file_lru, &nf->nf_lru);
1035 hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
1036 ++nfsd_file_hashtbl[hashval].nfb_count;
1037 nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
1038 nfsd_file_hashtbl[hashval].nfb_count);
1039 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
1040 if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD)
1041 nfsd_file_gc();
1042
1043 nf->nf_mark = nfsd_file_mark_find_or_create(nf);
1044 if (nf->nf_mark)
1045 status = nfsd_open_verified(rqstp, fhp, S_IFREG,
1046 may_flags, &nf->nf_file);
1047 else
1048 status = nfserr_jukebox;
1049 /*
1050 * If construction failed, or we raced with a call to unlink()
1051 * then unhash.
1052 */
1053 if (status != nfs_ok || inode->i_nlink == 0) {
1054 bool do_free;
1055 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
1056 do_free = nfsd_file_unhash(nf);
1057 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
1058 if (do_free)
1059 nfsd_file_put_noref(nf);
1060 }
1061 clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
1062 smp_mb__after_atomic();
1063 wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
1064 goto out;
1065 }
1066
1067 /*
1068 * Note that fields may be added, removed or reordered in the future. Programs
1069 * scraping this file for info should test the labels to ensure they're
1070 * getting the correct field.
1071 */
nfsd_file_cache_stats_show(struct seq_file * m,void * v)1072 static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
1073 {
1074 unsigned int i, count = 0, longest = 0;
1075 unsigned long hits = 0;
1076
1077 /*
1078 * No need for spinlocks here since we're not terribly interested in
1079 * accuracy. We do take the nfsd_mutex simply to ensure that we
1080 * don't end up racing with server shutdown
1081 */
1082 mutex_lock(&nfsd_mutex);
1083 if (nfsd_file_hashtbl) {
1084 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
1085 count += nfsd_file_hashtbl[i].nfb_count;
1086 longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
1087 }
1088 }
1089 mutex_unlock(&nfsd_mutex);
1090
1091 for_each_possible_cpu(i)
1092 hits += per_cpu(nfsd_file_cache_hits, i);
1093
1094 seq_printf(m, "total entries: %u\n", count);
1095 seq_printf(m, "longest chain: %u\n", longest);
1096 seq_printf(m, "cache hits: %lu\n", hits);
1097 return 0;
1098 }
1099
nfsd_file_cache_stats_open(struct inode * inode,struct file * file)1100 int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
1101 {
1102 return single_open(file, nfsd_file_cache_stats_show, NULL);
1103 }
1104