1 /*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9 /*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
41 #include <linux/kasan.h>
42
43 #include "internal.h"
44 #include "mount.h"
45
46 /*
47 * Usage:
48 * dcache->d_inode->i_lock protects:
49 * - i_dentry, d_u.d_alias, d_inode of aliases
50 * dcache_hash_bucket lock protects:
51 * - the dcache hash table
52 * s_anon bl list spinlock protects:
53 * - the s_anon list (see __d_drop)
54 * dentry->d_sb->s_dentry_lru_lock protects:
55 * - the dcache lru lists and counters
56 * d_lock protects:
57 * - d_flags
58 * - d_name
59 * - d_lru
60 * - d_count
61 * - d_unhashed()
62 * - d_parent and d_subdirs
63 * - childrens' d_child and d_parent
64 * - d_u.d_alias, d_inode
65 *
66 * Ordering:
67 * dentry->d_inode->i_lock
68 * dentry->d_lock
69 * dentry->d_sb->s_dentry_lru_lock
70 * dcache_hash_bucket lock
71 * s_anon lock
72 *
73 * If there is an ancestor relationship:
74 * dentry->d_parent->...->d_parent->d_lock
75 * ...
76 * dentry->d_parent->d_lock
77 * dentry->d_lock
78 *
79 * If no ancestor relationship:
80 * if (dentry1 < dentry2)
81 * dentry1->d_lock
82 * dentry2->d_lock
83 */
84 int sysctl_vfs_cache_pressure __read_mostly = 100;
85 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
86
87 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
88
89 EXPORT_SYMBOL(rename_lock);
90
91 static struct kmem_cache *dentry_cache __read_mostly;
92
93 /*
94 * This is the single most critical data structure when it comes
95 * to the dcache: the hashtable for lookups. Somebody should try
96 * to make this good - I've just made it work.
97 *
98 * This hash-function tries to avoid losing too many bits of hash
99 * information, yet avoid using a prime hash-size or similar.
100 */
101
102 static unsigned int d_hash_mask __read_mostly;
103 static unsigned int d_hash_shift __read_mostly;
104
105 static struct hlist_bl_head *dentry_hashtable __read_mostly;
106
d_hash(unsigned int hash)107 static inline struct hlist_bl_head *d_hash(unsigned int hash)
108 {
109 return dentry_hashtable + (hash >> (32 - d_hash_shift));
110 }
111
112 #define IN_LOOKUP_SHIFT 10
113 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
114
in_lookup_hash(const struct dentry * parent,unsigned int hash)115 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
116 unsigned int hash)
117 {
118 hash += (unsigned long) parent / L1_CACHE_BYTES;
119 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
120 }
121
122
123 /* Statistics gathering. */
124 struct dentry_stat_t dentry_stat = {
125 .age_limit = 45,
126 };
127
128 static DEFINE_PER_CPU(long, nr_dentry);
129 static DEFINE_PER_CPU(long, nr_dentry_unused);
130
131 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
132
133 /*
134 * Here we resort to our own counters instead of using generic per-cpu counters
135 * for consistency with what the vfs inode code does. We are expected to harvest
136 * better code and performance by having our own specialized counters.
137 *
138 * Please note that the loop is done over all possible CPUs, not over all online
139 * CPUs. The reason for this is that we don't want to play games with CPUs going
140 * on and off. If one of them goes off, we will just keep their counters.
141 *
142 * glommer: See cffbc8a for details, and if you ever intend to change this,
143 * please update all vfs counters to match.
144 */
get_nr_dentry(void)145 static long get_nr_dentry(void)
146 {
147 int i;
148 long sum = 0;
149 for_each_possible_cpu(i)
150 sum += per_cpu(nr_dentry, i);
151 return sum < 0 ? 0 : sum;
152 }
153
get_nr_dentry_unused(void)154 static long get_nr_dentry_unused(void)
155 {
156 int i;
157 long sum = 0;
158 for_each_possible_cpu(i)
159 sum += per_cpu(nr_dentry_unused, i);
160 return sum < 0 ? 0 : sum;
161 }
162
proc_nr_dentry(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)163 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
164 size_t *lenp, loff_t *ppos)
165 {
166 dentry_stat.nr_dentry = get_nr_dentry();
167 dentry_stat.nr_unused = get_nr_dentry_unused();
168 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
169 }
170 #endif
171
172 /*
173 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
174 * The strings are both count bytes long, and count is non-zero.
175 */
176 #ifdef CONFIG_DCACHE_WORD_ACCESS
177
178 #include <asm/word-at-a-time.h>
179 /*
180 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
181 * aligned allocation for this particular component. We don't
182 * strictly need the load_unaligned_zeropad() safety, but it
183 * doesn't hurt either.
184 *
185 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
186 * need the careful unaligned handling.
187 */
dentry_string_cmp(const unsigned char * cs,const unsigned char * ct,unsigned tcount)188 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
189 {
190 unsigned long a,b,mask;
191
192 for (;;) {
193 a = *(unsigned long *)cs;
194 b = load_unaligned_zeropad(ct);
195 if (tcount < sizeof(unsigned long))
196 break;
197 if (unlikely(a != b))
198 return 1;
199 cs += sizeof(unsigned long);
200 ct += sizeof(unsigned long);
201 tcount -= sizeof(unsigned long);
202 if (!tcount)
203 return 0;
204 }
205 mask = bytemask_from_count(tcount);
206 return unlikely(!!((a ^ b) & mask));
207 }
208
209 #else
210
dentry_string_cmp(const unsigned char * cs,const unsigned char * ct,unsigned tcount)211 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
212 {
213 do {
214 if (*cs != *ct)
215 return 1;
216 cs++;
217 ct++;
218 tcount--;
219 } while (tcount);
220 return 0;
221 }
222
223 #endif
224
dentry_cmp(const struct dentry * dentry,const unsigned char * ct,unsigned tcount)225 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
226 {
227 /*
228 * Be careful about RCU walk racing with rename:
229 * use 'lockless_dereference' to fetch the name pointer.
230 *
231 * NOTE! Even if a rename will mean that the length
232 * was not loaded atomically, we don't care. The
233 * RCU walk will check the sequence count eventually,
234 * and catch it. And we won't overrun the buffer,
235 * because we're reading the name pointer atomically,
236 * and a dentry name is guaranteed to be properly
237 * terminated with a NUL byte.
238 *
239 * End result: even if 'len' is wrong, we'll exit
240 * early because the data cannot match (there can
241 * be no NUL in the ct/tcount data)
242 */
243 const unsigned char *cs = lockless_dereference(dentry->d_name.name);
244
245 return dentry_string_cmp(cs, ct, tcount);
246 }
247
248 struct external_name {
249 union {
250 atomic_t count;
251 struct rcu_head head;
252 } u;
253 unsigned char name[];
254 };
255
external_name(struct dentry * dentry)256 static inline struct external_name *external_name(struct dentry *dentry)
257 {
258 return container_of(dentry->d_name.name, struct external_name, name[0]);
259 }
260
__d_free(struct rcu_head * head)261 static void __d_free(struct rcu_head *head)
262 {
263 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
264
265 kmem_cache_free(dentry_cache, dentry);
266 }
267
__d_free_external(struct rcu_head * head)268 static void __d_free_external(struct rcu_head *head)
269 {
270 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
271 kfree(external_name(dentry));
272 kmem_cache_free(dentry_cache, dentry);
273 }
274
dname_external(const struct dentry * dentry)275 static inline int dname_external(const struct dentry *dentry)
276 {
277 return dentry->d_name.name != dentry->d_iname;
278 }
279
take_dentry_name_snapshot(struct name_snapshot * name,struct dentry * dentry)280 void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
281 {
282 spin_lock(&dentry->d_lock);
283 if (unlikely(dname_external(dentry))) {
284 struct external_name *p = external_name(dentry);
285 atomic_inc(&p->u.count);
286 spin_unlock(&dentry->d_lock);
287 name->name = p->name;
288 } else {
289 memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
290 spin_unlock(&dentry->d_lock);
291 name->name = name->inline_name;
292 }
293 }
294 EXPORT_SYMBOL(take_dentry_name_snapshot);
295
release_dentry_name_snapshot(struct name_snapshot * name)296 void release_dentry_name_snapshot(struct name_snapshot *name)
297 {
298 if (unlikely(name->name != name->inline_name)) {
299 struct external_name *p;
300 p = container_of(name->name, struct external_name, name[0]);
301 if (unlikely(atomic_dec_and_test(&p->u.count)))
302 kfree_rcu(p, u.head);
303 }
304 }
305 EXPORT_SYMBOL(release_dentry_name_snapshot);
306
__d_set_inode_and_type(struct dentry * dentry,struct inode * inode,unsigned type_flags)307 static inline void __d_set_inode_and_type(struct dentry *dentry,
308 struct inode *inode,
309 unsigned type_flags)
310 {
311 unsigned flags;
312
313 dentry->d_inode = inode;
314 flags = READ_ONCE(dentry->d_flags);
315 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
316 flags |= type_flags;
317 WRITE_ONCE(dentry->d_flags, flags);
318 }
319
__d_clear_type_and_inode(struct dentry * dentry)320 static inline void __d_clear_type_and_inode(struct dentry *dentry)
321 {
322 unsigned flags = READ_ONCE(dentry->d_flags);
323
324 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
325 WRITE_ONCE(dentry->d_flags, flags);
326 dentry->d_inode = NULL;
327 }
328
dentry_free(struct dentry * dentry)329 static void dentry_free(struct dentry *dentry)
330 {
331 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
332 if (unlikely(dname_external(dentry))) {
333 struct external_name *p = external_name(dentry);
334 if (likely(atomic_dec_and_test(&p->u.count))) {
335 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
336 return;
337 }
338 }
339 /* if dentry was never visible to RCU, immediate free is OK */
340 if (!(dentry->d_flags & DCACHE_RCUACCESS))
341 __d_free(&dentry->d_u.d_rcu);
342 else
343 call_rcu(&dentry->d_u.d_rcu, __d_free);
344 }
345
346 /*
347 * Release the dentry's inode, using the filesystem
348 * d_iput() operation if defined.
349 */
dentry_unlink_inode(struct dentry * dentry)350 static void dentry_unlink_inode(struct dentry * dentry)
351 __releases(dentry->d_lock)
352 __releases(dentry->d_inode->i_lock)
353 {
354 struct inode *inode = dentry->d_inode;
355 bool hashed = !d_unhashed(dentry);
356
357 if (hashed)
358 raw_write_seqcount_begin(&dentry->d_seq);
359 __d_clear_type_and_inode(dentry);
360 hlist_del_init(&dentry->d_u.d_alias);
361 if (hashed)
362 raw_write_seqcount_end(&dentry->d_seq);
363 spin_unlock(&dentry->d_lock);
364 spin_unlock(&inode->i_lock);
365 if (!inode->i_nlink)
366 fsnotify_inoderemove(inode);
367 if (dentry->d_op && dentry->d_op->d_iput)
368 dentry->d_op->d_iput(dentry, inode);
369 else
370 iput(inode);
371 }
372
373 /*
374 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
375 * is in use - which includes both the "real" per-superblock
376 * LRU list _and_ the DCACHE_SHRINK_LIST use.
377 *
378 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
379 * on the shrink list (ie not on the superblock LRU list).
380 *
381 * The per-cpu "nr_dentry_unused" counters are updated with
382 * the DCACHE_LRU_LIST bit.
383 *
384 * These helper functions make sure we always follow the
385 * rules. d_lock must be held by the caller.
386 */
387 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
d_lru_add(struct dentry * dentry)388 static void d_lru_add(struct dentry *dentry)
389 {
390 D_FLAG_VERIFY(dentry, 0);
391 dentry->d_flags |= DCACHE_LRU_LIST;
392 this_cpu_inc(nr_dentry_unused);
393 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
394 }
395
d_lru_del(struct dentry * dentry)396 static void d_lru_del(struct dentry *dentry)
397 {
398 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
399 dentry->d_flags &= ~DCACHE_LRU_LIST;
400 this_cpu_dec(nr_dentry_unused);
401 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
402 }
403
d_shrink_del(struct dentry * dentry)404 static void d_shrink_del(struct dentry *dentry)
405 {
406 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
407 list_del_init(&dentry->d_lru);
408 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
409 this_cpu_dec(nr_dentry_unused);
410 }
411
d_shrink_add(struct dentry * dentry,struct list_head * list)412 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
413 {
414 D_FLAG_VERIFY(dentry, 0);
415 list_add(&dentry->d_lru, list);
416 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
417 this_cpu_inc(nr_dentry_unused);
418 }
419
420 /*
421 * These can only be called under the global LRU lock, ie during the
422 * callback for freeing the LRU list. "isolate" removes it from the
423 * LRU lists entirely, while shrink_move moves it to the indicated
424 * private list.
425 */
d_lru_isolate(struct list_lru_one * lru,struct dentry * dentry)426 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
427 {
428 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
429 dentry->d_flags &= ~DCACHE_LRU_LIST;
430 this_cpu_dec(nr_dentry_unused);
431 list_lru_isolate(lru, &dentry->d_lru);
432 }
433
d_lru_shrink_move(struct list_lru_one * lru,struct dentry * dentry,struct list_head * list)434 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
435 struct list_head *list)
436 {
437 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
438 dentry->d_flags |= DCACHE_SHRINK_LIST;
439 list_lru_isolate_move(lru, &dentry->d_lru, list);
440 }
441
442 /*
443 * dentry_lru_(add|del)_list) must be called with d_lock held.
444 */
dentry_lru_add(struct dentry * dentry)445 static void dentry_lru_add(struct dentry *dentry)
446 {
447 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
448 d_lru_add(dentry);
449 }
450
451 /**
452 * d_drop - drop a dentry
453 * @dentry: dentry to drop
454 *
455 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
456 * be found through a VFS lookup any more. Note that this is different from
457 * deleting the dentry - d_delete will try to mark the dentry negative if
458 * possible, giving a successful _negative_ lookup, while d_drop will
459 * just make the cache lookup fail.
460 *
461 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
462 * reason (NFS timeouts or autofs deletes).
463 *
464 * __d_drop requires dentry->d_lock
465 * ___d_drop doesn't mark dentry as "unhashed"
466 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
467 */
___d_drop(struct dentry * dentry)468 static void ___d_drop(struct dentry *dentry)
469 {
470 if (!d_unhashed(dentry)) {
471 struct hlist_bl_head *b;
472 /*
473 * Hashed dentries are normally on the dentry hashtable,
474 * with the exception of those newly allocated by
475 * d_obtain_alias, which are always IS_ROOT:
476 */
477 if (unlikely(IS_ROOT(dentry)))
478 b = &dentry->d_sb->s_anon;
479 else
480 b = d_hash(dentry->d_name.hash);
481
482 hlist_bl_lock(b);
483 __hlist_bl_del(&dentry->d_hash);
484 hlist_bl_unlock(b);
485 /* After this call, in-progress rcu-walk path lookup will fail. */
486 write_seqcount_invalidate(&dentry->d_seq);
487 }
488 }
489
__d_drop(struct dentry * dentry)490 void __d_drop(struct dentry *dentry)
491 {
492 ___d_drop(dentry);
493 dentry->d_hash.pprev = NULL;
494 }
495 EXPORT_SYMBOL(__d_drop);
496
d_drop(struct dentry * dentry)497 void d_drop(struct dentry *dentry)
498 {
499 spin_lock(&dentry->d_lock);
500 __d_drop(dentry);
501 spin_unlock(&dentry->d_lock);
502 }
503 EXPORT_SYMBOL(d_drop);
504
dentry_unlist(struct dentry * dentry,struct dentry * parent)505 static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
506 {
507 struct dentry *next;
508 /*
509 * Inform d_walk() and shrink_dentry_list() that we are no longer
510 * attached to the dentry tree
511 */
512 dentry->d_flags |= DCACHE_DENTRY_KILLED;
513 if (unlikely(list_empty(&dentry->d_child)))
514 return;
515 __list_del_entry(&dentry->d_child);
516 /*
517 * Cursors can move around the list of children. While we'd been
518 * a normal list member, it didn't matter - ->d_child.next would've
519 * been updated. However, from now on it won't be and for the
520 * things like d_walk() it might end up with a nasty surprise.
521 * Normally d_walk() doesn't care about cursors moving around -
522 * ->d_lock on parent prevents that and since a cursor has no children
523 * of its own, we get through it without ever unlocking the parent.
524 * There is one exception, though - if we ascend from a child that
525 * gets killed as soon as we unlock it, the next sibling is found
526 * using the value left in its ->d_child.next. And if _that_
527 * pointed to a cursor, and cursor got moved (e.g. by lseek())
528 * before d_walk() regains parent->d_lock, we'll end up skipping
529 * everything the cursor had been moved past.
530 *
531 * Solution: make sure that the pointer left behind in ->d_child.next
532 * points to something that won't be moving around. I.e. skip the
533 * cursors.
534 */
535 while (dentry->d_child.next != &parent->d_subdirs) {
536 next = list_entry(dentry->d_child.next, struct dentry, d_child);
537 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
538 break;
539 dentry->d_child.next = next->d_child.next;
540 }
541 }
542
__dentry_kill(struct dentry * dentry)543 static void __dentry_kill(struct dentry *dentry)
544 {
545 struct dentry *parent = NULL;
546 bool can_free = true;
547 if (!IS_ROOT(dentry))
548 parent = dentry->d_parent;
549
550 /*
551 * The dentry is now unrecoverably dead to the world.
552 */
553 lockref_mark_dead(&dentry->d_lockref);
554
555 /*
556 * inform the fs via d_prune that this dentry is about to be
557 * unhashed and destroyed.
558 */
559 if (dentry->d_flags & DCACHE_OP_PRUNE)
560 dentry->d_op->d_prune(dentry);
561
562 if (dentry->d_flags & DCACHE_LRU_LIST) {
563 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
564 d_lru_del(dentry);
565 }
566 /* if it was on the hash then remove it */
567 __d_drop(dentry);
568 dentry_unlist(dentry, parent);
569 if (parent)
570 spin_unlock(&parent->d_lock);
571 if (dentry->d_inode)
572 dentry_unlink_inode(dentry);
573 else
574 spin_unlock(&dentry->d_lock);
575 this_cpu_dec(nr_dentry);
576 if (dentry->d_op && dentry->d_op->d_release)
577 dentry->d_op->d_release(dentry);
578
579 spin_lock(&dentry->d_lock);
580 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
581 dentry->d_flags |= DCACHE_MAY_FREE;
582 can_free = false;
583 }
584 spin_unlock(&dentry->d_lock);
585 if (likely(can_free))
586 dentry_free(dentry);
587 }
588
589 /*
590 * Finish off a dentry we've decided to kill.
591 * dentry->d_lock must be held, returns with it unlocked.
592 * If ref is non-zero, then decrement the refcount too.
593 * Returns dentry requiring refcount drop, or NULL if we're done.
594 */
dentry_kill(struct dentry * dentry)595 static struct dentry *dentry_kill(struct dentry *dentry)
596 __releases(dentry->d_lock)
597 {
598 struct inode *inode = dentry->d_inode;
599 struct dentry *parent = NULL;
600
601 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
602 goto failed;
603
604 if (!IS_ROOT(dentry)) {
605 parent = dentry->d_parent;
606 if (unlikely(!spin_trylock(&parent->d_lock))) {
607 if (inode)
608 spin_unlock(&inode->i_lock);
609 goto failed;
610 }
611 }
612
613 __dentry_kill(dentry);
614 return parent;
615
616 failed:
617 spin_unlock(&dentry->d_lock);
618 return dentry; /* try again with same dentry */
619 }
620
lock_parent(struct dentry * dentry)621 static inline struct dentry *lock_parent(struct dentry *dentry)
622 {
623 struct dentry *parent = dentry->d_parent;
624 if (IS_ROOT(dentry))
625 return NULL;
626 if (unlikely(dentry->d_lockref.count < 0))
627 return NULL;
628 if (likely(spin_trylock(&parent->d_lock)))
629 return parent;
630 rcu_read_lock();
631 spin_unlock(&dentry->d_lock);
632 again:
633 parent = ACCESS_ONCE(dentry->d_parent);
634 spin_lock(&parent->d_lock);
635 /*
636 * We can't blindly lock dentry until we are sure
637 * that we won't violate the locking order.
638 * Any changes of dentry->d_parent must have
639 * been done with parent->d_lock held, so
640 * spin_lock() above is enough of a barrier
641 * for checking if it's still our child.
642 */
643 if (unlikely(parent != dentry->d_parent)) {
644 spin_unlock(&parent->d_lock);
645 goto again;
646 }
647 if (parent != dentry) {
648 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
649 if (unlikely(dentry->d_lockref.count < 0)) {
650 spin_unlock(&parent->d_lock);
651 parent = NULL;
652 }
653 } else {
654 parent = NULL;
655 }
656 rcu_read_unlock();
657 return parent;
658 }
659
660 /*
661 * Try to do a lockless dput(), and return whether that was successful.
662 *
663 * If unsuccessful, we return false, having already taken the dentry lock.
664 *
665 * The caller needs to hold the RCU read lock, so that the dentry is
666 * guaranteed to stay around even if the refcount goes down to zero!
667 */
fast_dput(struct dentry * dentry)668 static inline bool fast_dput(struct dentry *dentry)
669 {
670 int ret;
671 unsigned int d_flags;
672
673 /*
674 * If we have a d_op->d_delete() operation, we sould not
675 * let the dentry count go to zero, so use "put_or_lock".
676 */
677 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
678 return lockref_put_or_lock(&dentry->d_lockref);
679
680 /*
681 * .. otherwise, we can try to just decrement the
682 * lockref optimistically.
683 */
684 ret = lockref_put_return(&dentry->d_lockref);
685
686 /*
687 * If the lockref_put_return() failed due to the lock being held
688 * by somebody else, the fast path has failed. We will need to
689 * get the lock, and then check the count again.
690 */
691 if (unlikely(ret < 0)) {
692 spin_lock(&dentry->d_lock);
693 if (dentry->d_lockref.count > 1) {
694 dentry->d_lockref.count--;
695 spin_unlock(&dentry->d_lock);
696 return 1;
697 }
698 return 0;
699 }
700
701 /*
702 * If we weren't the last ref, we're done.
703 */
704 if (ret)
705 return 1;
706
707 /*
708 * Careful, careful. The reference count went down
709 * to zero, but we don't hold the dentry lock, so
710 * somebody else could get it again, and do another
711 * dput(), and we need to not race with that.
712 *
713 * However, there is a very special and common case
714 * where we don't care, because there is nothing to
715 * do: the dentry is still hashed, it does not have
716 * a 'delete' op, and it's referenced and already on
717 * the LRU list.
718 *
719 * NOTE! Since we aren't locked, these values are
720 * not "stable". However, it is sufficient that at
721 * some point after we dropped the reference the
722 * dentry was hashed and the flags had the proper
723 * value. Other dentry users may have re-gotten
724 * a reference to the dentry and change that, but
725 * our work is done - we can leave the dentry
726 * around with a zero refcount.
727 */
728 smp_rmb();
729 d_flags = ACCESS_ONCE(dentry->d_flags);
730 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
731
732 /* Nothing to do? Dropping the reference was all we needed? */
733 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
734 return 1;
735
736 /*
737 * Not the fast normal case? Get the lock. We've already decremented
738 * the refcount, but we'll need to re-check the situation after
739 * getting the lock.
740 */
741 spin_lock(&dentry->d_lock);
742
743 /*
744 * Did somebody else grab a reference to it in the meantime, and
745 * we're no longer the last user after all? Alternatively, somebody
746 * else could have killed it and marked it dead. Either way, we
747 * don't need to do anything else.
748 */
749 if (dentry->d_lockref.count) {
750 spin_unlock(&dentry->d_lock);
751 return 1;
752 }
753
754 /*
755 * Re-get the reference we optimistically dropped. We hold the
756 * lock, and we just tested that it was zero, so we can just
757 * set it to 1.
758 */
759 dentry->d_lockref.count = 1;
760 return 0;
761 }
762
763
764 /*
765 * This is dput
766 *
767 * This is complicated by the fact that we do not want to put
768 * dentries that are no longer on any hash chain on the unused
769 * list: we'd much rather just get rid of them immediately.
770 *
771 * However, that implies that we have to traverse the dentry
772 * tree upwards to the parents which might _also_ now be
773 * scheduled for deletion (it may have been only waiting for
774 * its last child to go away).
775 *
776 * This tail recursion is done by hand as we don't want to depend
777 * on the compiler to always get this right (gcc generally doesn't).
778 * Real recursion would eat up our stack space.
779 */
780
781 /*
782 * dput - release a dentry
783 * @dentry: dentry to release
784 *
785 * Release a dentry. This will drop the usage count and if appropriate
786 * call the dentry unlink method as well as removing it from the queues and
787 * releasing its resources. If the parent dentries were scheduled for release
788 * they too may now get deleted.
789 */
dput(struct dentry * dentry)790 void dput(struct dentry *dentry)
791 {
792 if (unlikely(!dentry))
793 return;
794
795 repeat:
796 might_sleep();
797
798 rcu_read_lock();
799 if (likely(fast_dput(dentry))) {
800 rcu_read_unlock();
801 return;
802 }
803
804 /* Slow case: now with the dentry lock held */
805 rcu_read_unlock();
806
807 WARN_ON(d_in_lookup(dentry));
808
809 /* Unreachable? Get rid of it */
810 if (unlikely(d_unhashed(dentry)))
811 goto kill_it;
812
813 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
814 goto kill_it;
815
816 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
817 if (dentry->d_op->d_delete(dentry))
818 goto kill_it;
819 }
820
821 if (!(dentry->d_flags & DCACHE_REFERENCED))
822 dentry->d_flags |= DCACHE_REFERENCED;
823 dentry_lru_add(dentry);
824
825 dentry->d_lockref.count--;
826 spin_unlock(&dentry->d_lock);
827 return;
828
829 kill_it:
830 dentry = dentry_kill(dentry);
831 if (dentry) {
832 cond_resched();
833 goto repeat;
834 }
835 }
836 EXPORT_SYMBOL(dput);
837
838
839 /* This must be called with d_lock held */
__dget_dlock(struct dentry * dentry)840 static inline void __dget_dlock(struct dentry *dentry)
841 {
842 dentry->d_lockref.count++;
843 }
844
__dget(struct dentry * dentry)845 static inline void __dget(struct dentry *dentry)
846 {
847 lockref_get(&dentry->d_lockref);
848 }
849
dget_parent(struct dentry * dentry)850 struct dentry *dget_parent(struct dentry *dentry)
851 {
852 int gotref;
853 struct dentry *ret;
854
855 /*
856 * Do optimistic parent lookup without any
857 * locking.
858 */
859 rcu_read_lock();
860 ret = ACCESS_ONCE(dentry->d_parent);
861 gotref = lockref_get_not_zero(&ret->d_lockref);
862 rcu_read_unlock();
863 if (likely(gotref)) {
864 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
865 return ret;
866 dput(ret);
867 }
868
869 repeat:
870 /*
871 * Don't need rcu_dereference because we re-check it was correct under
872 * the lock.
873 */
874 rcu_read_lock();
875 ret = dentry->d_parent;
876 spin_lock(&ret->d_lock);
877 if (unlikely(ret != dentry->d_parent)) {
878 spin_unlock(&ret->d_lock);
879 rcu_read_unlock();
880 goto repeat;
881 }
882 rcu_read_unlock();
883 BUG_ON(!ret->d_lockref.count);
884 ret->d_lockref.count++;
885 spin_unlock(&ret->d_lock);
886 return ret;
887 }
888 EXPORT_SYMBOL(dget_parent);
889
890 /**
891 * d_find_alias - grab a hashed alias of inode
892 * @inode: inode in question
893 *
894 * If inode has a hashed alias, or is a directory and has any alias,
895 * acquire the reference to alias and return it. Otherwise return NULL.
896 * Notice that if inode is a directory there can be only one alias and
897 * it can be unhashed only if it has no children, or if it is the root
898 * of a filesystem, or if the directory was renamed and d_revalidate
899 * was the first vfs operation to notice.
900 *
901 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
902 * any other hashed alias over that one.
903 */
__d_find_alias(struct inode * inode)904 static struct dentry *__d_find_alias(struct inode *inode)
905 {
906 struct dentry *alias, *discon_alias;
907
908 again:
909 discon_alias = NULL;
910 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
911 spin_lock(&alias->d_lock);
912 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
913 if (IS_ROOT(alias) &&
914 (alias->d_flags & DCACHE_DISCONNECTED)) {
915 discon_alias = alias;
916 } else {
917 __dget_dlock(alias);
918 spin_unlock(&alias->d_lock);
919 return alias;
920 }
921 }
922 spin_unlock(&alias->d_lock);
923 }
924 if (discon_alias) {
925 alias = discon_alias;
926 spin_lock(&alias->d_lock);
927 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
928 __dget_dlock(alias);
929 spin_unlock(&alias->d_lock);
930 return alias;
931 }
932 spin_unlock(&alias->d_lock);
933 goto again;
934 }
935 return NULL;
936 }
937
d_find_alias(struct inode * inode)938 struct dentry *d_find_alias(struct inode *inode)
939 {
940 struct dentry *de = NULL;
941
942 if (!hlist_empty(&inode->i_dentry)) {
943 spin_lock(&inode->i_lock);
944 de = __d_find_alias(inode);
945 spin_unlock(&inode->i_lock);
946 }
947 return de;
948 }
949 EXPORT_SYMBOL(d_find_alias);
950
951 /*
952 * Try to kill dentries associated with this inode.
953 * WARNING: you must own a reference to inode.
954 */
d_prune_aliases(struct inode * inode)955 void d_prune_aliases(struct inode *inode)
956 {
957 struct dentry *dentry;
958 restart:
959 spin_lock(&inode->i_lock);
960 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
961 spin_lock(&dentry->d_lock);
962 if (!dentry->d_lockref.count) {
963 struct dentry *parent = lock_parent(dentry);
964 if (likely(!dentry->d_lockref.count)) {
965 __dentry_kill(dentry);
966 dput(parent);
967 goto restart;
968 }
969 if (parent)
970 spin_unlock(&parent->d_lock);
971 }
972 spin_unlock(&dentry->d_lock);
973 }
974 spin_unlock(&inode->i_lock);
975 }
976 EXPORT_SYMBOL(d_prune_aliases);
977
shrink_dentry_list(struct list_head * list)978 static void shrink_dentry_list(struct list_head *list)
979 {
980 struct dentry *dentry, *parent;
981
982 while (!list_empty(list)) {
983 struct inode *inode;
984 dentry = list_entry(list->prev, struct dentry, d_lru);
985 spin_lock(&dentry->d_lock);
986 parent = lock_parent(dentry);
987
988 /*
989 * The dispose list is isolated and dentries are not accounted
990 * to the LRU here, so we can simply remove it from the list
991 * here regardless of whether it is referenced or not.
992 */
993 d_shrink_del(dentry);
994
995 /*
996 * We found an inuse dentry which was not removed from
997 * the LRU because of laziness during lookup. Do not free it.
998 */
999 if (dentry->d_lockref.count > 0) {
1000 spin_unlock(&dentry->d_lock);
1001 if (parent)
1002 spin_unlock(&parent->d_lock);
1003 continue;
1004 }
1005
1006
1007 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
1008 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
1009 spin_unlock(&dentry->d_lock);
1010 if (parent)
1011 spin_unlock(&parent->d_lock);
1012 if (can_free)
1013 dentry_free(dentry);
1014 continue;
1015 }
1016
1017 inode = dentry->d_inode;
1018 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1019 d_shrink_add(dentry, list);
1020 spin_unlock(&dentry->d_lock);
1021 if (parent)
1022 spin_unlock(&parent->d_lock);
1023 continue;
1024 }
1025
1026 __dentry_kill(dentry);
1027
1028 /*
1029 * We need to prune ancestors too. This is necessary to prevent
1030 * quadratic behavior of shrink_dcache_parent(), but is also
1031 * expected to be beneficial in reducing dentry cache
1032 * fragmentation.
1033 */
1034 dentry = parent;
1035 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
1036 parent = lock_parent(dentry);
1037 if (dentry->d_lockref.count != 1) {
1038 dentry->d_lockref.count--;
1039 spin_unlock(&dentry->d_lock);
1040 if (parent)
1041 spin_unlock(&parent->d_lock);
1042 break;
1043 }
1044 inode = dentry->d_inode; /* can't be NULL */
1045 if (unlikely(!spin_trylock(&inode->i_lock))) {
1046 spin_unlock(&dentry->d_lock);
1047 if (parent)
1048 spin_unlock(&parent->d_lock);
1049 cpu_relax();
1050 continue;
1051 }
1052 __dentry_kill(dentry);
1053 dentry = parent;
1054 }
1055 }
1056 }
1057
dentry_lru_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)1058 static enum lru_status dentry_lru_isolate(struct list_head *item,
1059 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1060 {
1061 struct list_head *freeable = arg;
1062 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1063
1064
1065 /*
1066 * we are inverting the lru lock/dentry->d_lock here,
1067 * so use a trylock. If we fail to get the lock, just skip
1068 * it
1069 */
1070 if (!spin_trylock(&dentry->d_lock))
1071 return LRU_SKIP;
1072
1073 /*
1074 * Referenced dentries are still in use. If they have active
1075 * counts, just remove them from the LRU. Otherwise give them
1076 * another pass through the LRU.
1077 */
1078 if (dentry->d_lockref.count) {
1079 d_lru_isolate(lru, dentry);
1080 spin_unlock(&dentry->d_lock);
1081 return LRU_REMOVED;
1082 }
1083
1084 if (dentry->d_flags & DCACHE_REFERENCED) {
1085 dentry->d_flags &= ~DCACHE_REFERENCED;
1086 spin_unlock(&dentry->d_lock);
1087
1088 /*
1089 * The list move itself will be made by the common LRU code. At
1090 * this point, we've dropped the dentry->d_lock but keep the
1091 * lru lock. This is safe to do, since every list movement is
1092 * protected by the lru lock even if both locks are held.
1093 *
1094 * This is guaranteed by the fact that all LRU management
1095 * functions are intermediated by the LRU API calls like
1096 * list_lru_add and list_lru_del. List movement in this file
1097 * only ever occur through this functions or through callbacks
1098 * like this one, that are called from the LRU API.
1099 *
1100 * The only exceptions to this are functions like
1101 * shrink_dentry_list, and code that first checks for the
1102 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
1103 * operating only with stack provided lists after they are
1104 * properly isolated from the main list. It is thus, always a
1105 * local access.
1106 */
1107 return LRU_ROTATE;
1108 }
1109
1110 d_lru_shrink_move(lru, dentry, freeable);
1111 spin_unlock(&dentry->d_lock);
1112
1113 return LRU_REMOVED;
1114 }
1115
1116 /**
1117 * prune_dcache_sb - shrink the dcache
1118 * @sb: superblock
1119 * @sc: shrink control, passed to list_lru_shrink_walk()
1120 *
1121 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1122 * is done when we need more memory and called from the superblock shrinker
1123 * function.
1124 *
1125 * This function may fail to free any resources if all the dentries are in
1126 * use.
1127 */
prune_dcache_sb(struct super_block * sb,struct shrink_control * sc)1128 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1129 {
1130 LIST_HEAD(dispose);
1131 long freed;
1132
1133 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1134 dentry_lru_isolate, &dispose);
1135 shrink_dentry_list(&dispose);
1136 return freed;
1137 }
1138
dentry_lru_isolate_shrink(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)1139 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1140 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1141 {
1142 struct list_head *freeable = arg;
1143 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1144
1145 /*
1146 * we are inverting the lru lock/dentry->d_lock here,
1147 * so use a trylock. If we fail to get the lock, just skip
1148 * it
1149 */
1150 if (!spin_trylock(&dentry->d_lock))
1151 return LRU_SKIP;
1152
1153 d_lru_shrink_move(lru, dentry, freeable);
1154 spin_unlock(&dentry->d_lock);
1155
1156 return LRU_REMOVED;
1157 }
1158
1159
1160 /**
1161 * shrink_dcache_sb - shrink dcache for a superblock
1162 * @sb: superblock
1163 *
1164 * Shrink the dcache for the specified super block. This is used to free
1165 * the dcache before unmounting a file system.
1166 */
shrink_dcache_sb(struct super_block * sb)1167 void shrink_dcache_sb(struct super_block *sb)
1168 {
1169 long freed;
1170
1171 do {
1172 LIST_HEAD(dispose);
1173
1174 freed = list_lru_walk(&sb->s_dentry_lru,
1175 dentry_lru_isolate_shrink, &dispose, 1024);
1176
1177 this_cpu_sub(nr_dentry_unused, freed);
1178 shrink_dentry_list(&dispose);
1179 cond_resched();
1180 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1181 }
1182 EXPORT_SYMBOL(shrink_dcache_sb);
1183
1184 /**
1185 * enum d_walk_ret - action to talke during tree walk
1186 * @D_WALK_CONTINUE: contrinue walk
1187 * @D_WALK_QUIT: quit walk
1188 * @D_WALK_NORETRY: quit when retry is needed
1189 * @D_WALK_SKIP: skip this dentry and its children
1190 */
1191 enum d_walk_ret {
1192 D_WALK_CONTINUE,
1193 D_WALK_QUIT,
1194 D_WALK_NORETRY,
1195 D_WALK_SKIP,
1196 };
1197
1198 /**
1199 * d_walk - walk the dentry tree
1200 * @parent: start of walk
1201 * @data: data passed to @enter() and @finish()
1202 * @enter: callback when first entering the dentry
1203 * @finish: callback when successfully finished the walk
1204 *
1205 * The @enter() and @finish() callbacks are called with d_lock held.
1206 */
d_walk(struct dentry * parent,void * data,enum d_walk_ret (* enter)(void *,struct dentry *),void (* finish)(void *))1207 static void d_walk(struct dentry *parent, void *data,
1208 enum d_walk_ret (*enter)(void *, struct dentry *),
1209 void (*finish)(void *))
1210 {
1211 struct dentry *this_parent;
1212 struct list_head *next;
1213 unsigned seq = 0;
1214 enum d_walk_ret ret;
1215 bool retry = true;
1216
1217 again:
1218 read_seqbegin_or_lock(&rename_lock, &seq);
1219 this_parent = parent;
1220 spin_lock(&this_parent->d_lock);
1221
1222 ret = enter(data, this_parent);
1223 switch (ret) {
1224 case D_WALK_CONTINUE:
1225 break;
1226 case D_WALK_QUIT:
1227 case D_WALK_SKIP:
1228 goto out_unlock;
1229 case D_WALK_NORETRY:
1230 retry = false;
1231 break;
1232 }
1233 repeat:
1234 next = this_parent->d_subdirs.next;
1235 resume:
1236 while (next != &this_parent->d_subdirs) {
1237 struct list_head *tmp = next;
1238 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1239 next = tmp->next;
1240
1241 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1242 continue;
1243
1244 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1245
1246 ret = enter(data, dentry);
1247 switch (ret) {
1248 case D_WALK_CONTINUE:
1249 break;
1250 case D_WALK_QUIT:
1251 spin_unlock(&dentry->d_lock);
1252 goto out_unlock;
1253 case D_WALK_NORETRY:
1254 retry = false;
1255 break;
1256 case D_WALK_SKIP:
1257 spin_unlock(&dentry->d_lock);
1258 continue;
1259 }
1260
1261 if (!list_empty(&dentry->d_subdirs)) {
1262 spin_unlock(&this_parent->d_lock);
1263 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1264 this_parent = dentry;
1265 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1266 goto repeat;
1267 }
1268 spin_unlock(&dentry->d_lock);
1269 }
1270 /*
1271 * All done at this level ... ascend and resume the search.
1272 */
1273 rcu_read_lock();
1274 ascend:
1275 if (this_parent != parent) {
1276 struct dentry *child = this_parent;
1277 this_parent = child->d_parent;
1278
1279 spin_unlock(&child->d_lock);
1280 spin_lock(&this_parent->d_lock);
1281
1282 /* might go back up the wrong parent if we have had a rename. */
1283 if (need_seqretry(&rename_lock, seq))
1284 goto rename_retry;
1285 /* go into the first sibling still alive */
1286 do {
1287 next = child->d_child.next;
1288 if (next == &this_parent->d_subdirs)
1289 goto ascend;
1290 child = list_entry(next, struct dentry, d_child);
1291 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1292 rcu_read_unlock();
1293 goto resume;
1294 }
1295 if (need_seqretry(&rename_lock, seq))
1296 goto rename_retry;
1297 rcu_read_unlock();
1298 if (finish)
1299 finish(data);
1300
1301 out_unlock:
1302 spin_unlock(&this_parent->d_lock);
1303 done_seqretry(&rename_lock, seq);
1304 return;
1305
1306 rename_retry:
1307 spin_unlock(&this_parent->d_lock);
1308 rcu_read_unlock();
1309 BUG_ON(seq & 1);
1310 if (!retry)
1311 return;
1312 seq = 1;
1313 goto again;
1314 }
1315
1316 /*
1317 * Search for at least 1 mount point in the dentry's subdirs.
1318 * We descend to the next level whenever the d_subdirs
1319 * list is non-empty and continue searching.
1320 */
1321
check_mount(void * data,struct dentry * dentry)1322 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1323 {
1324 int *ret = data;
1325 if (d_mountpoint(dentry)) {
1326 *ret = 1;
1327 return D_WALK_QUIT;
1328 }
1329 return D_WALK_CONTINUE;
1330 }
1331
1332 /**
1333 * have_submounts - check for mounts over a dentry
1334 * @parent: dentry to check.
1335 *
1336 * Return true if the parent or its subdirectories contain
1337 * a mount point
1338 */
have_submounts(struct dentry * parent)1339 int have_submounts(struct dentry *parent)
1340 {
1341 int ret = 0;
1342
1343 d_walk(parent, &ret, check_mount, NULL);
1344
1345 return ret;
1346 }
1347 EXPORT_SYMBOL(have_submounts);
1348
1349 /*
1350 * Called by mount code to set a mountpoint and check if the mountpoint is
1351 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1352 * subtree can become unreachable).
1353 *
1354 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1355 * this reason take rename_lock and d_lock on dentry and ancestors.
1356 */
d_set_mounted(struct dentry * dentry)1357 int d_set_mounted(struct dentry *dentry)
1358 {
1359 struct dentry *p;
1360 int ret = -ENOENT;
1361 write_seqlock(&rename_lock);
1362 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1363 /* Need exclusion wrt. d_invalidate() */
1364 spin_lock(&p->d_lock);
1365 if (unlikely(d_unhashed(p))) {
1366 spin_unlock(&p->d_lock);
1367 goto out;
1368 }
1369 spin_unlock(&p->d_lock);
1370 }
1371 spin_lock(&dentry->d_lock);
1372 if (!d_unlinked(dentry)) {
1373 ret = -EBUSY;
1374 if (!d_mountpoint(dentry)) {
1375 dentry->d_flags |= DCACHE_MOUNTED;
1376 ret = 0;
1377 }
1378 }
1379 spin_unlock(&dentry->d_lock);
1380 out:
1381 write_sequnlock(&rename_lock);
1382 return ret;
1383 }
1384
1385 /*
1386 * Search the dentry child list of the specified parent,
1387 * and move any unused dentries to the end of the unused
1388 * list for prune_dcache(). We descend to the next level
1389 * whenever the d_subdirs list is non-empty and continue
1390 * searching.
1391 *
1392 * It returns zero iff there are no unused children,
1393 * otherwise it returns the number of children moved to
1394 * the end of the unused list. This may not be the total
1395 * number of unused children, because select_parent can
1396 * drop the lock and return early due to latency
1397 * constraints.
1398 */
1399
1400 struct select_data {
1401 struct dentry *start;
1402 struct list_head dispose;
1403 int found;
1404 };
1405
select_collect(void * _data,struct dentry * dentry)1406 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1407 {
1408 struct select_data *data = _data;
1409 enum d_walk_ret ret = D_WALK_CONTINUE;
1410
1411 if (data->start == dentry)
1412 goto out;
1413
1414 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1415 data->found++;
1416 } else {
1417 if (dentry->d_flags & DCACHE_LRU_LIST)
1418 d_lru_del(dentry);
1419 if (!dentry->d_lockref.count) {
1420 d_shrink_add(dentry, &data->dispose);
1421 data->found++;
1422 }
1423 }
1424 /*
1425 * We can return to the caller if we have found some (this
1426 * ensures forward progress). We'll be coming back to find
1427 * the rest.
1428 */
1429 if (!list_empty(&data->dispose))
1430 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1431 out:
1432 return ret;
1433 }
1434
1435 /**
1436 * shrink_dcache_parent - prune dcache
1437 * @parent: parent of entries to prune
1438 *
1439 * Prune the dcache to remove unused children of the parent dentry.
1440 */
shrink_dcache_parent(struct dentry * parent)1441 void shrink_dcache_parent(struct dentry *parent)
1442 {
1443 for (;;) {
1444 struct select_data data;
1445
1446 INIT_LIST_HEAD(&data.dispose);
1447 data.start = parent;
1448 data.found = 0;
1449
1450 d_walk(parent, &data, select_collect, NULL);
1451 if (!data.found)
1452 break;
1453
1454 shrink_dentry_list(&data.dispose);
1455 cond_resched();
1456 }
1457 }
1458 EXPORT_SYMBOL(shrink_dcache_parent);
1459
umount_check(void * _data,struct dentry * dentry)1460 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1461 {
1462 /* it has busy descendents; complain about those instead */
1463 if (!list_empty(&dentry->d_subdirs))
1464 return D_WALK_CONTINUE;
1465
1466 /* root with refcount 1 is fine */
1467 if (dentry == _data && dentry->d_lockref.count == 1)
1468 return D_WALK_CONTINUE;
1469
1470 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1471 " still in use (%d) [unmount of %s %s]\n",
1472 dentry,
1473 dentry->d_inode ?
1474 dentry->d_inode->i_ino : 0UL,
1475 dentry,
1476 dentry->d_lockref.count,
1477 dentry->d_sb->s_type->name,
1478 dentry->d_sb->s_id);
1479 WARN_ON(1);
1480 return D_WALK_CONTINUE;
1481 }
1482
do_one_tree(struct dentry * dentry)1483 static void do_one_tree(struct dentry *dentry)
1484 {
1485 shrink_dcache_parent(dentry);
1486 d_walk(dentry, dentry, umount_check, NULL);
1487 d_drop(dentry);
1488 dput(dentry);
1489 }
1490
1491 /*
1492 * destroy the dentries attached to a superblock on unmounting
1493 */
shrink_dcache_for_umount(struct super_block * sb)1494 void shrink_dcache_for_umount(struct super_block *sb)
1495 {
1496 struct dentry *dentry;
1497
1498 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1499
1500 dentry = sb->s_root;
1501 sb->s_root = NULL;
1502 do_one_tree(dentry);
1503
1504 while (!hlist_bl_empty(&sb->s_anon)) {
1505 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1506 do_one_tree(dentry);
1507 }
1508 }
1509
1510 struct detach_data {
1511 struct select_data select;
1512 struct dentry *mountpoint;
1513 };
detach_and_collect(void * _data,struct dentry * dentry)1514 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1515 {
1516 struct detach_data *data = _data;
1517
1518 if (d_mountpoint(dentry)) {
1519 __dget_dlock(dentry);
1520 data->mountpoint = dentry;
1521 return D_WALK_QUIT;
1522 }
1523
1524 return select_collect(&data->select, dentry);
1525 }
1526
check_and_drop(void * _data)1527 static void check_and_drop(void *_data)
1528 {
1529 struct detach_data *data = _data;
1530
1531 if (!data->mountpoint && !data->select.found)
1532 __d_drop(data->select.start);
1533 }
1534
1535 /**
1536 * d_invalidate - detach submounts, prune dcache, and drop
1537 * @dentry: dentry to invalidate (aka detach, prune and drop)
1538 *
1539 * no dcache lock.
1540 *
1541 * The final d_drop is done as an atomic operation relative to
1542 * rename_lock ensuring there are no races with d_set_mounted. This
1543 * ensures there are no unhashed dentries on the path to a mountpoint.
1544 */
d_invalidate(struct dentry * dentry)1545 void d_invalidate(struct dentry *dentry)
1546 {
1547 /*
1548 * If it's already been dropped, return OK.
1549 */
1550 spin_lock(&dentry->d_lock);
1551 if (d_unhashed(dentry)) {
1552 spin_unlock(&dentry->d_lock);
1553 return;
1554 }
1555 spin_unlock(&dentry->d_lock);
1556
1557 /* Negative dentries can be dropped without further checks */
1558 if (!dentry->d_inode) {
1559 d_drop(dentry);
1560 return;
1561 }
1562
1563 for (;;) {
1564 struct detach_data data;
1565
1566 data.mountpoint = NULL;
1567 INIT_LIST_HEAD(&data.select.dispose);
1568 data.select.start = dentry;
1569 data.select.found = 0;
1570
1571 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1572
1573 if (data.select.found)
1574 shrink_dentry_list(&data.select.dispose);
1575
1576 if (data.mountpoint) {
1577 detach_mounts(data.mountpoint);
1578 dput(data.mountpoint);
1579 }
1580
1581 if (!data.mountpoint && !data.select.found)
1582 break;
1583
1584 cond_resched();
1585 }
1586 }
1587 EXPORT_SYMBOL(d_invalidate);
1588
1589 /**
1590 * __d_alloc - allocate a dcache entry
1591 * @sb: filesystem it will belong to
1592 * @name: qstr of the name
1593 *
1594 * Allocates a dentry. It returns %NULL if there is insufficient memory
1595 * available. On a success the dentry is returned. The name passed in is
1596 * copied and the copy passed in may be reused after this call.
1597 */
1598
__d_alloc(struct super_block * sb,const struct qstr * name)1599 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1600 {
1601 struct dentry *dentry;
1602 char *dname;
1603 int err;
1604
1605 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1606 if (!dentry)
1607 return NULL;
1608
1609 /*
1610 * We guarantee that the inline name is always NUL-terminated.
1611 * This way the memcpy() done by the name switching in rename
1612 * will still always have a NUL at the end, even if we might
1613 * be overwriting an internal NUL character
1614 */
1615 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1616 if (unlikely(!name)) {
1617 static const struct qstr anon = QSTR_INIT("/", 1);
1618 name = &anon;
1619 dname = dentry->d_iname;
1620 } else if (name->len > DNAME_INLINE_LEN-1) {
1621 size_t size = offsetof(struct external_name, name[1]);
1622 struct external_name *p = kmalloc(size + name->len,
1623 GFP_KERNEL_ACCOUNT);
1624 if (!p) {
1625 kmem_cache_free(dentry_cache, dentry);
1626 return NULL;
1627 }
1628 atomic_set(&p->u.count, 1);
1629 dname = p->name;
1630 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
1631 kasan_unpoison_shadow(dname,
1632 round_up(name->len + 1, sizeof(unsigned long)));
1633 } else {
1634 dname = dentry->d_iname;
1635 }
1636
1637 dentry->d_name.len = name->len;
1638 dentry->d_name.hash = name->hash;
1639 memcpy(dname, name->name, name->len);
1640 dname[name->len] = 0;
1641
1642 /* Make sure we always see the terminating NUL character */
1643 smp_wmb();
1644 dentry->d_name.name = dname;
1645
1646 dentry->d_lockref.count = 1;
1647 dentry->d_flags = 0;
1648 spin_lock_init(&dentry->d_lock);
1649 seqcount_init(&dentry->d_seq);
1650 dentry->d_inode = NULL;
1651 dentry->d_parent = dentry;
1652 dentry->d_sb = sb;
1653 dentry->d_op = NULL;
1654 dentry->d_fsdata = NULL;
1655 INIT_HLIST_BL_NODE(&dentry->d_hash);
1656 INIT_LIST_HEAD(&dentry->d_lru);
1657 INIT_LIST_HEAD(&dentry->d_subdirs);
1658 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1659 INIT_LIST_HEAD(&dentry->d_child);
1660 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1661
1662 if (dentry->d_op && dentry->d_op->d_init) {
1663 err = dentry->d_op->d_init(dentry);
1664 if (err) {
1665 if (dname_external(dentry))
1666 kfree(external_name(dentry));
1667 kmem_cache_free(dentry_cache, dentry);
1668 return NULL;
1669 }
1670 }
1671
1672 this_cpu_inc(nr_dentry);
1673
1674 return dentry;
1675 }
1676
1677 /**
1678 * d_alloc - allocate a dcache entry
1679 * @parent: parent of entry to allocate
1680 * @name: qstr of the name
1681 *
1682 * Allocates a dentry. It returns %NULL if there is insufficient memory
1683 * available. On a success the dentry is returned. The name passed in is
1684 * copied and the copy passed in may be reused after this call.
1685 */
d_alloc(struct dentry * parent,const struct qstr * name)1686 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1687 {
1688 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1689 if (!dentry)
1690 return NULL;
1691 dentry->d_flags |= DCACHE_RCUACCESS;
1692 spin_lock(&parent->d_lock);
1693 /*
1694 * don't need child lock because it is not subject
1695 * to concurrency here
1696 */
1697 __dget_dlock(parent);
1698 dentry->d_parent = parent;
1699 list_add(&dentry->d_child, &parent->d_subdirs);
1700 spin_unlock(&parent->d_lock);
1701
1702 return dentry;
1703 }
1704 EXPORT_SYMBOL(d_alloc);
1705
d_alloc_cursor(struct dentry * parent)1706 struct dentry *d_alloc_cursor(struct dentry * parent)
1707 {
1708 struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
1709 if (dentry) {
1710 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1711 dentry->d_parent = dget(parent);
1712 }
1713 return dentry;
1714 }
1715
1716 /**
1717 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1718 * @sb: the superblock
1719 * @name: qstr of the name
1720 *
1721 * For a filesystem that just pins its dentries in memory and never
1722 * performs lookups at all, return an unhashed IS_ROOT dentry.
1723 */
d_alloc_pseudo(struct super_block * sb,const struct qstr * name)1724 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1725 {
1726 return __d_alloc(sb, name);
1727 }
1728 EXPORT_SYMBOL(d_alloc_pseudo);
1729
d_alloc_name(struct dentry * parent,const char * name)1730 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1731 {
1732 struct qstr q;
1733
1734 q.name = name;
1735 q.hash_len = hashlen_string(parent, name);
1736 return d_alloc(parent, &q);
1737 }
1738 EXPORT_SYMBOL(d_alloc_name);
1739
d_set_d_op(struct dentry * dentry,const struct dentry_operations * op)1740 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1741 {
1742 WARN_ON_ONCE(dentry->d_op);
1743 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1744 DCACHE_OP_COMPARE |
1745 DCACHE_OP_REVALIDATE |
1746 DCACHE_OP_WEAK_REVALIDATE |
1747 DCACHE_OP_DELETE |
1748 DCACHE_OP_REAL));
1749 dentry->d_op = op;
1750 if (!op)
1751 return;
1752 if (op->d_hash)
1753 dentry->d_flags |= DCACHE_OP_HASH;
1754 if (op->d_compare)
1755 dentry->d_flags |= DCACHE_OP_COMPARE;
1756 if (op->d_revalidate)
1757 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1758 if (op->d_weak_revalidate)
1759 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1760 if (op->d_delete)
1761 dentry->d_flags |= DCACHE_OP_DELETE;
1762 if (op->d_prune)
1763 dentry->d_flags |= DCACHE_OP_PRUNE;
1764 if (op->d_real)
1765 dentry->d_flags |= DCACHE_OP_REAL;
1766
1767 }
1768 EXPORT_SYMBOL(d_set_d_op);
1769
1770
1771 /*
1772 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1773 * @dentry - The dentry to mark
1774 *
1775 * Mark a dentry as falling through to the lower layer (as set with
1776 * d_pin_lower()). This flag may be recorded on the medium.
1777 */
d_set_fallthru(struct dentry * dentry)1778 void d_set_fallthru(struct dentry *dentry)
1779 {
1780 spin_lock(&dentry->d_lock);
1781 dentry->d_flags |= DCACHE_FALLTHRU;
1782 spin_unlock(&dentry->d_lock);
1783 }
1784 EXPORT_SYMBOL(d_set_fallthru);
1785
d_flags_for_inode(struct inode * inode)1786 static unsigned d_flags_for_inode(struct inode *inode)
1787 {
1788 unsigned add_flags = DCACHE_REGULAR_TYPE;
1789
1790 if (!inode)
1791 return DCACHE_MISS_TYPE;
1792
1793 if (S_ISDIR(inode->i_mode)) {
1794 add_flags = DCACHE_DIRECTORY_TYPE;
1795 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1796 if (unlikely(!inode->i_op->lookup))
1797 add_flags = DCACHE_AUTODIR_TYPE;
1798 else
1799 inode->i_opflags |= IOP_LOOKUP;
1800 }
1801 goto type_determined;
1802 }
1803
1804 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1805 if (unlikely(inode->i_op->get_link)) {
1806 add_flags = DCACHE_SYMLINK_TYPE;
1807 goto type_determined;
1808 }
1809 inode->i_opflags |= IOP_NOFOLLOW;
1810 }
1811
1812 if (unlikely(!S_ISREG(inode->i_mode)))
1813 add_flags = DCACHE_SPECIAL_TYPE;
1814
1815 type_determined:
1816 if (unlikely(IS_AUTOMOUNT(inode)))
1817 add_flags |= DCACHE_NEED_AUTOMOUNT;
1818 return add_flags;
1819 }
1820
__d_instantiate(struct dentry * dentry,struct inode * inode)1821 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1822 {
1823 unsigned add_flags = d_flags_for_inode(inode);
1824 WARN_ON(d_in_lookup(dentry));
1825
1826 spin_lock(&dentry->d_lock);
1827 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1828 raw_write_seqcount_begin(&dentry->d_seq);
1829 __d_set_inode_and_type(dentry, inode, add_flags);
1830 raw_write_seqcount_end(&dentry->d_seq);
1831 fsnotify_update_flags(dentry);
1832 spin_unlock(&dentry->d_lock);
1833 }
1834
1835 /**
1836 * d_instantiate - fill in inode information for a dentry
1837 * @entry: dentry to complete
1838 * @inode: inode to attach to this dentry
1839 *
1840 * Fill in inode information in the entry.
1841 *
1842 * This turns negative dentries into productive full members
1843 * of society.
1844 *
1845 * NOTE! This assumes that the inode count has been incremented
1846 * (or otherwise set) by the caller to indicate that it is now
1847 * in use by the dcache.
1848 */
1849
d_instantiate(struct dentry * entry,struct inode * inode)1850 void d_instantiate(struct dentry *entry, struct inode * inode)
1851 {
1852 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1853 if (inode) {
1854 security_d_instantiate(entry, inode);
1855 spin_lock(&inode->i_lock);
1856 __d_instantiate(entry, inode);
1857 spin_unlock(&inode->i_lock);
1858 }
1859 }
1860 EXPORT_SYMBOL(d_instantiate);
1861
1862 /**
1863 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1864 * @entry: dentry to complete
1865 * @inode: inode to attach to this dentry
1866 *
1867 * Fill in inode information in the entry. If a directory alias is found, then
1868 * return an error (and drop inode). Together with d_materialise_unique() this
1869 * guarantees that a directory inode may never have more than one alias.
1870 */
d_instantiate_no_diralias(struct dentry * entry,struct inode * inode)1871 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1872 {
1873 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1874
1875 security_d_instantiate(entry, inode);
1876 spin_lock(&inode->i_lock);
1877 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1878 spin_unlock(&inode->i_lock);
1879 iput(inode);
1880 return -EBUSY;
1881 }
1882 __d_instantiate(entry, inode);
1883 spin_unlock(&inode->i_lock);
1884
1885 return 0;
1886 }
1887 EXPORT_SYMBOL(d_instantiate_no_diralias);
1888
d_make_root(struct inode * root_inode)1889 struct dentry *d_make_root(struct inode *root_inode)
1890 {
1891 struct dentry *res = NULL;
1892
1893 if (root_inode) {
1894 res = __d_alloc(root_inode->i_sb, NULL);
1895 if (res)
1896 d_instantiate(res, root_inode);
1897 else
1898 iput(root_inode);
1899 }
1900 return res;
1901 }
1902 EXPORT_SYMBOL(d_make_root);
1903
__d_find_any_alias(struct inode * inode)1904 static struct dentry * __d_find_any_alias(struct inode *inode)
1905 {
1906 struct dentry *alias;
1907
1908 if (hlist_empty(&inode->i_dentry))
1909 return NULL;
1910 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1911 __dget(alias);
1912 return alias;
1913 }
1914
1915 /**
1916 * d_find_any_alias - find any alias for a given inode
1917 * @inode: inode to find an alias for
1918 *
1919 * If any aliases exist for the given inode, take and return a
1920 * reference for one of them. If no aliases exist, return %NULL.
1921 */
d_find_any_alias(struct inode * inode)1922 struct dentry *d_find_any_alias(struct inode *inode)
1923 {
1924 struct dentry *de;
1925
1926 spin_lock(&inode->i_lock);
1927 de = __d_find_any_alias(inode);
1928 spin_unlock(&inode->i_lock);
1929 return de;
1930 }
1931 EXPORT_SYMBOL(d_find_any_alias);
1932
__d_obtain_alias(struct inode * inode,int disconnected)1933 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1934 {
1935 struct dentry *tmp;
1936 struct dentry *res;
1937 unsigned add_flags;
1938
1939 if (!inode)
1940 return ERR_PTR(-ESTALE);
1941 if (IS_ERR(inode))
1942 return ERR_CAST(inode);
1943
1944 res = d_find_any_alias(inode);
1945 if (res)
1946 goto out_iput;
1947
1948 tmp = __d_alloc(inode->i_sb, NULL);
1949 if (!tmp) {
1950 res = ERR_PTR(-ENOMEM);
1951 goto out_iput;
1952 }
1953
1954 security_d_instantiate(tmp, inode);
1955 spin_lock(&inode->i_lock);
1956 res = __d_find_any_alias(inode);
1957 if (res) {
1958 spin_unlock(&inode->i_lock);
1959 dput(tmp);
1960 goto out_iput;
1961 }
1962
1963 /* attach a disconnected dentry */
1964 add_flags = d_flags_for_inode(inode);
1965
1966 if (disconnected)
1967 add_flags |= DCACHE_DISCONNECTED;
1968
1969 spin_lock(&tmp->d_lock);
1970 __d_set_inode_and_type(tmp, inode, add_flags);
1971 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1972 hlist_bl_lock(&tmp->d_sb->s_anon);
1973 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1974 hlist_bl_unlock(&tmp->d_sb->s_anon);
1975 spin_unlock(&tmp->d_lock);
1976 spin_unlock(&inode->i_lock);
1977
1978 return tmp;
1979
1980 out_iput:
1981 iput(inode);
1982 return res;
1983 }
1984
1985 /**
1986 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1987 * @inode: inode to allocate the dentry for
1988 *
1989 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1990 * similar open by handle operations. The returned dentry may be anonymous,
1991 * or may have a full name (if the inode was already in the cache).
1992 *
1993 * When called on a directory inode, we must ensure that the inode only ever
1994 * has one dentry. If a dentry is found, that is returned instead of
1995 * allocating a new one.
1996 *
1997 * On successful return, the reference to the inode has been transferred
1998 * to the dentry. In case of an error the reference on the inode is released.
1999 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2000 * be passed in and the error will be propagated to the return value,
2001 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2002 */
d_obtain_alias(struct inode * inode)2003 struct dentry *d_obtain_alias(struct inode *inode)
2004 {
2005 return __d_obtain_alias(inode, 1);
2006 }
2007 EXPORT_SYMBOL(d_obtain_alias);
2008
2009 /**
2010 * d_obtain_root - find or allocate a dentry for a given inode
2011 * @inode: inode to allocate the dentry for
2012 *
2013 * Obtain an IS_ROOT dentry for the root of a filesystem.
2014 *
2015 * We must ensure that directory inodes only ever have one dentry. If a
2016 * dentry is found, that is returned instead of allocating a new one.
2017 *
2018 * On successful return, the reference to the inode has been transferred
2019 * to the dentry. In case of an error the reference on the inode is
2020 * released. A %NULL or IS_ERR inode may be passed in and will be the
2021 * error will be propagate to the return value, with a %NULL @inode
2022 * replaced by ERR_PTR(-ESTALE).
2023 */
d_obtain_root(struct inode * inode)2024 struct dentry *d_obtain_root(struct inode *inode)
2025 {
2026 return __d_obtain_alias(inode, 0);
2027 }
2028 EXPORT_SYMBOL(d_obtain_root);
2029
2030 /**
2031 * d_add_ci - lookup or allocate new dentry with case-exact name
2032 * @inode: the inode case-insensitive lookup has found
2033 * @dentry: the negative dentry that was passed to the parent's lookup func
2034 * @name: the case-exact name to be associated with the returned dentry
2035 *
2036 * This is to avoid filling the dcache with case-insensitive names to the
2037 * same inode, only the actual correct case is stored in the dcache for
2038 * case-insensitive filesystems.
2039 *
2040 * For a case-insensitive lookup match and if the the case-exact dentry
2041 * already exists in in the dcache, use it and return it.
2042 *
2043 * If no entry exists with the exact case name, allocate new dentry with
2044 * the exact case, and return the spliced entry.
2045 */
d_add_ci(struct dentry * dentry,struct inode * inode,struct qstr * name)2046 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2047 struct qstr *name)
2048 {
2049 struct dentry *found, *res;
2050
2051 /*
2052 * First check if a dentry matching the name already exists,
2053 * if not go ahead and create it now.
2054 */
2055 found = d_hash_and_lookup(dentry->d_parent, name);
2056 if (found) {
2057 iput(inode);
2058 return found;
2059 }
2060 if (d_in_lookup(dentry)) {
2061 found = d_alloc_parallel(dentry->d_parent, name,
2062 dentry->d_wait);
2063 if (IS_ERR(found) || !d_in_lookup(found)) {
2064 iput(inode);
2065 return found;
2066 }
2067 } else {
2068 found = d_alloc(dentry->d_parent, name);
2069 if (!found) {
2070 iput(inode);
2071 return ERR_PTR(-ENOMEM);
2072 }
2073 }
2074 res = d_splice_alias(inode, found);
2075 if (res) {
2076 dput(found);
2077 return res;
2078 }
2079 return found;
2080 }
2081 EXPORT_SYMBOL(d_add_ci);
2082
2083
d_same_name(const struct dentry * dentry,const struct dentry * parent,const struct qstr * name)2084 static inline bool d_same_name(const struct dentry *dentry,
2085 const struct dentry *parent,
2086 const struct qstr *name)
2087 {
2088 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2089 if (dentry->d_name.len != name->len)
2090 return false;
2091 return dentry_cmp(dentry, name->name, name->len) == 0;
2092 }
2093 return parent->d_op->d_compare(dentry,
2094 dentry->d_name.len, dentry->d_name.name,
2095 name) == 0;
2096 }
2097
2098 /**
2099 * __d_lookup_rcu - search for a dentry (racy, store-free)
2100 * @parent: parent dentry
2101 * @name: qstr of name we wish to find
2102 * @seqp: returns d_seq value at the point where the dentry was found
2103 * Returns: dentry, or NULL
2104 *
2105 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2106 * resolution (store-free path walking) design described in
2107 * Documentation/filesystems/path-lookup.txt.
2108 *
2109 * This is not to be used outside core vfs.
2110 *
2111 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2112 * held, and rcu_read_lock held. The returned dentry must not be stored into
2113 * without taking d_lock and checking d_seq sequence count against @seq
2114 * returned here.
2115 *
2116 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2117 * function.
2118 *
2119 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2120 * the returned dentry, so long as its parent's seqlock is checked after the
2121 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2122 * is formed, giving integrity down the path walk.
2123 *
2124 * NOTE! The caller *has* to check the resulting dentry against the sequence
2125 * number we've returned before using any of the resulting dentry state!
2126 */
__d_lookup_rcu(const struct dentry * parent,const struct qstr * name,unsigned * seqp)2127 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2128 const struct qstr *name,
2129 unsigned *seqp)
2130 {
2131 u64 hashlen = name->hash_len;
2132 const unsigned char *str = name->name;
2133 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2134 struct hlist_bl_node *node;
2135 struct dentry *dentry;
2136
2137 /*
2138 * Note: There is significant duplication with __d_lookup_rcu which is
2139 * required to prevent single threaded performance regressions
2140 * especially on architectures where smp_rmb (in seqcounts) are costly.
2141 * Keep the two functions in sync.
2142 */
2143
2144 /*
2145 * The hash list is protected using RCU.
2146 *
2147 * Carefully use d_seq when comparing a candidate dentry, to avoid
2148 * races with d_move().
2149 *
2150 * It is possible that concurrent renames can mess up our list
2151 * walk here and result in missing our dentry, resulting in the
2152 * false-negative result. d_lookup() protects against concurrent
2153 * renames using rename_lock seqlock.
2154 *
2155 * See Documentation/filesystems/path-lookup.txt for more details.
2156 */
2157 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2158 unsigned seq;
2159
2160 seqretry:
2161 /*
2162 * The dentry sequence count protects us from concurrent
2163 * renames, and thus protects parent and name fields.
2164 *
2165 * The caller must perform a seqcount check in order
2166 * to do anything useful with the returned dentry.
2167 *
2168 * NOTE! We do a "raw" seqcount_begin here. That means that
2169 * we don't wait for the sequence count to stabilize if it
2170 * is in the middle of a sequence change. If we do the slow
2171 * dentry compare, we will do seqretries until it is stable,
2172 * and if we end up with a successful lookup, we actually
2173 * want to exit RCU lookup anyway.
2174 *
2175 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2176 * we are still guaranteed NUL-termination of ->d_name.name.
2177 */
2178 seq = raw_seqcount_begin(&dentry->d_seq);
2179 if (dentry->d_parent != parent)
2180 continue;
2181 if (d_unhashed(dentry))
2182 continue;
2183
2184 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2185 int tlen;
2186 const char *tname;
2187 if (dentry->d_name.hash != hashlen_hash(hashlen))
2188 continue;
2189 tlen = dentry->d_name.len;
2190 tname = dentry->d_name.name;
2191 /* we want a consistent (name,len) pair */
2192 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2193 cpu_relax();
2194 goto seqretry;
2195 }
2196 if (parent->d_op->d_compare(dentry,
2197 tlen, tname, name) != 0)
2198 continue;
2199 } else {
2200 if (dentry->d_name.hash_len != hashlen)
2201 continue;
2202 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2203 continue;
2204 }
2205 *seqp = seq;
2206 return dentry;
2207 }
2208 return NULL;
2209 }
2210
2211 /**
2212 * d_lookup - search for a dentry
2213 * @parent: parent dentry
2214 * @name: qstr of name we wish to find
2215 * Returns: dentry, or NULL
2216 *
2217 * d_lookup searches the children of the parent dentry for the name in
2218 * question. If the dentry is found its reference count is incremented and the
2219 * dentry is returned. The caller must use dput to free the entry when it has
2220 * finished using it. %NULL is returned if the dentry does not exist.
2221 */
d_lookup(const struct dentry * parent,const struct qstr * name)2222 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2223 {
2224 struct dentry *dentry;
2225 unsigned seq;
2226
2227 do {
2228 seq = read_seqbegin(&rename_lock);
2229 dentry = __d_lookup(parent, name);
2230 if (dentry)
2231 break;
2232 } while (read_seqretry(&rename_lock, seq));
2233 return dentry;
2234 }
2235 EXPORT_SYMBOL(d_lookup);
2236
2237 /**
2238 * __d_lookup - search for a dentry (racy)
2239 * @parent: parent dentry
2240 * @name: qstr of name we wish to find
2241 * Returns: dentry, or NULL
2242 *
2243 * __d_lookup is like d_lookup, however it may (rarely) return a
2244 * false-negative result due to unrelated rename activity.
2245 *
2246 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2247 * however it must be used carefully, eg. with a following d_lookup in
2248 * the case of failure.
2249 *
2250 * __d_lookup callers must be commented.
2251 */
__d_lookup(const struct dentry * parent,const struct qstr * name)2252 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2253 {
2254 unsigned int hash = name->hash;
2255 struct hlist_bl_head *b = d_hash(hash);
2256 struct hlist_bl_node *node;
2257 struct dentry *found = NULL;
2258 struct dentry *dentry;
2259
2260 /*
2261 * Note: There is significant duplication with __d_lookup_rcu which is
2262 * required to prevent single threaded performance regressions
2263 * especially on architectures where smp_rmb (in seqcounts) are costly.
2264 * Keep the two functions in sync.
2265 */
2266
2267 /*
2268 * The hash list is protected using RCU.
2269 *
2270 * Take d_lock when comparing a candidate dentry, to avoid races
2271 * with d_move().
2272 *
2273 * It is possible that concurrent renames can mess up our list
2274 * walk here and result in missing our dentry, resulting in the
2275 * false-negative result. d_lookup() protects against concurrent
2276 * renames using rename_lock seqlock.
2277 *
2278 * See Documentation/filesystems/path-lookup.txt for more details.
2279 */
2280 rcu_read_lock();
2281
2282 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2283
2284 if (dentry->d_name.hash != hash)
2285 continue;
2286
2287 spin_lock(&dentry->d_lock);
2288 if (dentry->d_parent != parent)
2289 goto next;
2290 if (d_unhashed(dentry))
2291 goto next;
2292
2293 if (!d_same_name(dentry, parent, name))
2294 goto next;
2295
2296 dentry->d_lockref.count++;
2297 found = dentry;
2298 spin_unlock(&dentry->d_lock);
2299 break;
2300 next:
2301 spin_unlock(&dentry->d_lock);
2302 }
2303 rcu_read_unlock();
2304
2305 return found;
2306 }
2307
2308 /**
2309 * d_hash_and_lookup - hash the qstr then search for a dentry
2310 * @dir: Directory to search in
2311 * @name: qstr of name we wish to find
2312 *
2313 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2314 */
d_hash_and_lookup(struct dentry * dir,struct qstr * name)2315 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2316 {
2317 /*
2318 * Check for a fs-specific hash function. Note that we must
2319 * calculate the standard hash first, as the d_op->d_hash()
2320 * routine may choose to leave the hash value unchanged.
2321 */
2322 name->hash = full_name_hash(dir, name->name, name->len);
2323 if (dir->d_flags & DCACHE_OP_HASH) {
2324 int err = dir->d_op->d_hash(dir, name);
2325 if (unlikely(err < 0))
2326 return ERR_PTR(err);
2327 }
2328 return d_lookup(dir, name);
2329 }
2330 EXPORT_SYMBOL(d_hash_and_lookup);
2331
2332 /*
2333 * When a file is deleted, we have two options:
2334 * - turn this dentry into a negative dentry
2335 * - unhash this dentry and free it.
2336 *
2337 * Usually, we want to just turn this into
2338 * a negative dentry, but if anybody else is
2339 * currently using the dentry or the inode
2340 * we can't do that and we fall back on removing
2341 * it from the hash queues and waiting for
2342 * it to be deleted later when it has no users
2343 */
2344
2345 /**
2346 * d_delete - delete a dentry
2347 * @dentry: The dentry to delete
2348 *
2349 * Turn the dentry into a negative dentry if possible, otherwise
2350 * remove it from the hash queues so it can be deleted later
2351 */
2352
d_delete(struct dentry * dentry)2353 void d_delete(struct dentry * dentry)
2354 {
2355 struct inode *inode;
2356 int isdir = 0;
2357 /*
2358 * Are we the only user?
2359 */
2360 again:
2361 spin_lock(&dentry->d_lock);
2362 inode = dentry->d_inode;
2363 isdir = S_ISDIR(inode->i_mode);
2364 if (dentry->d_lockref.count == 1) {
2365 if (!spin_trylock(&inode->i_lock)) {
2366 spin_unlock(&dentry->d_lock);
2367 cpu_relax();
2368 goto again;
2369 }
2370 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2371 dentry_unlink_inode(dentry);
2372 fsnotify_nameremove(dentry, isdir);
2373 return;
2374 }
2375
2376 if (!d_unhashed(dentry))
2377 __d_drop(dentry);
2378
2379 spin_unlock(&dentry->d_lock);
2380
2381 fsnotify_nameremove(dentry, isdir);
2382 }
2383 EXPORT_SYMBOL(d_delete);
2384
__d_rehash(struct dentry * entry)2385 static void __d_rehash(struct dentry *entry)
2386 {
2387 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2388
2389 hlist_bl_lock(b);
2390 hlist_bl_add_head_rcu(&entry->d_hash, b);
2391 hlist_bl_unlock(b);
2392 }
2393
2394 /**
2395 * d_rehash - add an entry back to the hash
2396 * @entry: dentry to add to the hash
2397 *
2398 * Adds a dentry to the hash according to its name.
2399 */
2400
d_rehash(struct dentry * entry)2401 void d_rehash(struct dentry * entry)
2402 {
2403 spin_lock(&entry->d_lock);
2404 __d_rehash(entry);
2405 spin_unlock(&entry->d_lock);
2406 }
2407 EXPORT_SYMBOL(d_rehash);
2408
start_dir_add(struct inode * dir)2409 static inline unsigned start_dir_add(struct inode *dir)
2410 {
2411
2412 for (;;) {
2413 unsigned n = dir->i_dir_seq;
2414 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2415 return n;
2416 cpu_relax();
2417 }
2418 }
2419
end_dir_add(struct inode * dir,unsigned n)2420 static inline void end_dir_add(struct inode *dir, unsigned n)
2421 {
2422 smp_store_release(&dir->i_dir_seq, n + 2);
2423 }
2424
d_wait_lookup(struct dentry * dentry)2425 static void d_wait_lookup(struct dentry *dentry)
2426 {
2427 if (d_in_lookup(dentry)) {
2428 DECLARE_WAITQUEUE(wait, current);
2429 add_wait_queue(dentry->d_wait, &wait);
2430 do {
2431 set_current_state(TASK_UNINTERRUPTIBLE);
2432 spin_unlock(&dentry->d_lock);
2433 schedule();
2434 spin_lock(&dentry->d_lock);
2435 } while (d_in_lookup(dentry));
2436 }
2437 }
2438
d_alloc_parallel(struct dentry * parent,const struct qstr * name,wait_queue_head_t * wq)2439 struct dentry *d_alloc_parallel(struct dentry *parent,
2440 const struct qstr *name,
2441 wait_queue_head_t *wq)
2442 {
2443 unsigned int hash = name->hash;
2444 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2445 struct hlist_bl_node *node;
2446 struct dentry *new = d_alloc(parent, name);
2447 struct dentry *dentry;
2448 unsigned seq, r_seq, d_seq;
2449
2450 if (unlikely(!new))
2451 return ERR_PTR(-ENOMEM);
2452
2453 retry:
2454 rcu_read_lock();
2455 seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
2456 r_seq = read_seqbegin(&rename_lock);
2457 dentry = __d_lookup_rcu(parent, name, &d_seq);
2458 if (unlikely(dentry)) {
2459 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2460 rcu_read_unlock();
2461 goto retry;
2462 }
2463 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2464 rcu_read_unlock();
2465 dput(dentry);
2466 goto retry;
2467 }
2468 rcu_read_unlock();
2469 dput(new);
2470 return dentry;
2471 }
2472 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2473 rcu_read_unlock();
2474 goto retry;
2475 }
2476 hlist_bl_lock(b);
2477 if (unlikely(parent->d_inode->i_dir_seq != seq)) {
2478 hlist_bl_unlock(b);
2479 rcu_read_unlock();
2480 goto retry;
2481 }
2482 /*
2483 * No changes for the parent since the beginning of d_lookup().
2484 * Since all removals from the chain happen with hlist_bl_lock(),
2485 * any potential in-lookup matches are going to stay here until
2486 * we unlock the chain. All fields are stable in everything
2487 * we encounter.
2488 */
2489 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2490 if (dentry->d_name.hash != hash)
2491 continue;
2492 if (dentry->d_parent != parent)
2493 continue;
2494 if (!d_same_name(dentry, parent, name))
2495 continue;
2496 hlist_bl_unlock(b);
2497 /* now we can try to grab a reference */
2498 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2499 rcu_read_unlock();
2500 goto retry;
2501 }
2502
2503 rcu_read_unlock();
2504 /*
2505 * somebody is likely to be still doing lookup for it;
2506 * wait for them to finish
2507 */
2508 spin_lock(&dentry->d_lock);
2509 d_wait_lookup(dentry);
2510 /*
2511 * it's not in-lookup anymore; in principle we should repeat
2512 * everything from dcache lookup, but it's likely to be what
2513 * d_lookup() would've found anyway. If it is, just return it;
2514 * otherwise we really have to repeat the whole thing.
2515 */
2516 if (unlikely(dentry->d_name.hash != hash))
2517 goto mismatch;
2518 if (unlikely(dentry->d_parent != parent))
2519 goto mismatch;
2520 if (unlikely(d_unhashed(dentry)))
2521 goto mismatch;
2522 if (unlikely(!d_same_name(dentry, parent, name)))
2523 goto mismatch;
2524 /* OK, it *is* a hashed match; return it */
2525 spin_unlock(&dentry->d_lock);
2526 dput(new);
2527 return dentry;
2528 }
2529 rcu_read_unlock();
2530 /* we can't take ->d_lock here; it's OK, though. */
2531 new->d_flags |= DCACHE_PAR_LOOKUP;
2532 new->d_wait = wq;
2533 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2534 hlist_bl_unlock(b);
2535 return new;
2536 mismatch:
2537 spin_unlock(&dentry->d_lock);
2538 dput(dentry);
2539 goto retry;
2540 }
2541 EXPORT_SYMBOL(d_alloc_parallel);
2542
__d_lookup_done(struct dentry * dentry)2543 void __d_lookup_done(struct dentry *dentry)
2544 {
2545 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2546 dentry->d_name.hash);
2547 hlist_bl_lock(b);
2548 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2549 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2550 wake_up_all(dentry->d_wait);
2551 dentry->d_wait = NULL;
2552 hlist_bl_unlock(b);
2553 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2554 INIT_LIST_HEAD(&dentry->d_lru);
2555 }
2556 EXPORT_SYMBOL(__d_lookup_done);
2557
2558 /* inode->i_lock held if inode is non-NULL */
2559
__d_add(struct dentry * dentry,struct inode * inode)2560 static inline void __d_add(struct dentry *dentry, struct inode *inode)
2561 {
2562 struct inode *dir = NULL;
2563 unsigned n;
2564 spin_lock(&dentry->d_lock);
2565 if (unlikely(d_in_lookup(dentry))) {
2566 dir = dentry->d_parent->d_inode;
2567 n = start_dir_add(dir);
2568 __d_lookup_done(dentry);
2569 }
2570 if (inode) {
2571 unsigned add_flags = d_flags_for_inode(inode);
2572 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2573 raw_write_seqcount_begin(&dentry->d_seq);
2574 __d_set_inode_and_type(dentry, inode, add_flags);
2575 raw_write_seqcount_end(&dentry->d_seq);
2576 fsnotify_update_flags(dentry);
2577 }
2578 __d_rehash(dentry);
2579 if (dir)
2580 end_dir_add(dir, n);
2581 spin_unlock(&dentry->d_lock);
2582 if (inode)
2583 spin_unlock(&inode->i_lock);
2584 }
2585
2586 /**
2587 * d_add - add dentry to hash queues
2588 * @entry: dentry to add
2589 * @inode: The inode to attach to this dentry
2590 *
2591 * This adds the entry to the hash queues and initializes @inode.
2592 * The entry was actually filled in earlier during d_alloc().
2593 */
2594
d_add(struct dentry * entry,struct inode * inode)2595 void d_add(struct dentry *entry, struct inode *inode)
2596 {
2597 if (inode) {
2598 security_d_instantiate(entry, inode);
2599 spin_lock(&inode->i_lock);
2600 }
2601 __d_add(entry, inode);
2602 }
2603 EXPORT_SYMBOL(d_add);
2604
2605 /**
2606 * d_exact_alias - find and hash an exact unhashed alias
2607 * @entry: dentry to add
2608 * @inode: The inode to go with this dentry
2609 *
2610 * If an unhashed dentry with the same name/parent and desired
2611 * inode already exists, hash and return it. Otherwise, return
2612 * NULL.
2613 *
2614 * Parent directory should be locked.
2615 */
d_exact_alias(struct dentry * entry,struct inode * inode)2616 struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2617 {
2618 struct dentry *alias;
2619 unsigned int hash = entry->d_name.hash;
2620
2621 spin_lock(&inode->i_lock);
2622 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2623 /*
2624 * Don't need alias->d_lock here, because aliases with
2625 * d_parent == entry->d_parent are not subject to name or
2626 * parent changes, because the parent inode i_mutex is held.
2627 */
2628 if (alias->d_name.hash != hash)
2629 continue;
2630 if (alias->d_parent != entry->d_parent)
2631 continue;
2632 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2633 continue;
2634 spin_lock(&alias->d_lock);
2635 if (!d_unhashed(alias)) {
2636 spin_unlock(&alias->d_lock);
2637 alias = NULL;
2638 } else {
2639 __dget_dlock(alias);
2640 __d_rehash(alias);
2641 spin_unlock(&alias->d_lock);
2642 }
2643 spin_unlock(&inode->i_lock);
2644 return alias;
2645 }
2646 spin_unlock(&inode->i_lock);
2647 return NULL;
2648 }
2649 EXPORT_SYMBOL(d_exact_alias);
2650
2651 /**
2652 * dentry_update_name_case - update case insensitive dentry with a new name
2653 * @dentry: dentry to be updated
2654 * @name: new name
2655 *
2656 * Update a case insensitive dentry with new case of name.
2657 *
2658 * dentry must have been returned by d_lookup with name @name. Old and new
2659 * name lengths must match (ie. no d_compare which allows mismatched name
2660 * lengths).
2661 *
2662 * Parent inode i_mutex must be held over d_lookup and into this call (to
2663 * keep renames and concurrent inserts, and readdir(2) away).
2664 */
dentry_update_name_case(struct dentry * dentry,const struct qstr * name)2665 void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
2666 {
2667 BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2668 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2669
2670 spin_lock(&dentry->d_lock);
2671 write_seqcount_begin(&dentry->d_seq);
2672 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2673 write_seqcount_end(&dentry->d_seq);
2674 spin_unlock(&dentry->d_lock);
2675 }
2676 EXPORT_SYMBOL(dentry_update_name_case);
2677
swap_names(struct dentry * dentry,struct dentry * target)2678 static void swap_names(struct dentry *dentry, struct dentry *target)
2679 {
2680 if (unlikely(dname_external(target))) {
2681 if (unlikely(dname_external(dentry))) {
2682 /*
2683 * Both external: swap the pointers
2684 */
2685 swap(target->d_name.name, dentry->d_name.name);
2686 } else {
2687 /*
2688 * dentry:internal, target:external. Steal target's
2689 * storage and make target internal.
2690 */
2691 memcpy(target->d_iname, dentry->d_name.name,
2692 dentry->d_name.len + 1);
2693 dentry->d_name.name = target->d_name.name;
2694 target->d_name.name = target->d_iname;
2695 }
2696 } else {
2697 if (unlikely(dname_external(dentry))) {
2698 /*
2699 * dentry:external, target:internal. Give dentry's
2700 * storage to target and make dentry internal
2701 */
2702 memcpy(dentry->d_iname, target->d_name.name,
2703 target->d_name.len + 1);
2704 target->d_name.name = dentry->d_name.name;
2705 dentry->d_name.name = dentry->d_iname;
2706 } else {
2707 /*
2708 * Both are internal.
2709 */
2710 unsigned int i;
2711 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2712 kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
2713 kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
2714 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2715 swap(((long *) &dentry->d_iname)[i],
2716 ((long *) &target->d_iname)[i]);
2717 }
2718 }
2719 }
2720 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2721 }
2722
copy_name(struct dentry * dentry,struct dentry * target)2723 static void copy_name(struct dentry *dentry, struct dentry *target)
2724 {
2725 struct external_name *old_name = NULL;
2726 if (unlikely(dname_external(dentry)))
2727 old_name = external_name(dentry);
2728 if (unlikely(dname_external(target))) {
2729 atomic_inc(&external_name(target)->u.count);
2730 dentry->d_name = target->d_name;
2731 } else {
2732 memcpy(dentry->d_iname, target->d_name.name,
2733 target->d_name.len + 1);
2734 dentry->d_name.name = dentry->d_iname;
2735 dentry->d_name.hash_len = target->d_name.hash_len;
2736 }
2737 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2738 kfree_rcu(old_name, u.head);
2739 }
2740
dentry_lock_for_move(struct dentry * dentry,struct dentry * target)2741 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2742 {
2743 /*
2744 * XXXX: do we really need to take target->d_lock?
2745 */
2746 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2747 spin_lock(&target->d_parent->d_lock);
2748 else {
2749 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2750 spin_lock(&dentry->d_parent->d_lock);
2751 spin_lock_nested(&target->d_parent->d_lock,
2752 DENTRY_D_LOCK_NESTED);
2753 } else {
2754 spin_lock(&target->d_parent->d_lock);
2755 spin_lock_nested(&dentry->d_parent->d_lock,
2756 DENTRY_D_LOCK_NESTED);
2757 }
2758 }
2759 if (target < dentry) {
2760 spin_lock_nested(&target->d_lock, 2);
2761 spin_lock_nested(&dentry->d_lock, 3);
2762 } else {
2763 spin_lock_nested(&dentry->d_lock, 2);
2764 spin_lock_nested(&target->d_lock, 3);
2765 }
2766 }
2767
dentry_unlock_for_move(struct dentry * dentry,struct dentry * target)2768 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2769 {
2770 if (target->d_parent != dentry->d_parent)
2771 spin_unlock(&dentry->d_parent->d_lock);
2772 if (target->d_parent != target)
2773 spin_unlock(&target->d_parent->d_lock);
2774 spin_unlock(&target->d_lock);
2775 spin_unlock(&dentry->d_lock);
2776 }
2777
2778 /*
2779 * When switching names, the actual string doesn't strictly have to
2780 * be preserved in the target - because we're dropping the target
2781 * anyway. As such, we can just do a simple memcpy() to copy over
2782 * the new name before we switch, unless we are going to rehash
2783 * it. Note that if we *do* unhash the target, we are not allowed
2784 * to rehash it without giving it a new name/hash key - whether
2785 * we swap or overwrite the names here, resulting name won't match
2786 * the reality in filesystem; it's only there for d_path() purposes.
2787 * Note that all of this is happening under rename_lock, so the
2788 * any hash lookup seeing it in the middle of manipulations will
2789 * be discarded anyway. So we do not care what happens to the hash
2790 * key in that case.
2791 */
2792 /*
2793 * __d_move - move a dentry
2794 * @dentry: entry to move
2795 * @target: new dentry
2796 * @exchange: exchange the two dentries
2797 *
2798 * Update the dcache to reflect the move of a file name. Negative
2799 * dcache entries should not be moved in this way. Caller must hold
2800 * rename_lock, the i_mutex of the source and target directories,
2801 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2802 */
__d_move(struct dentry * dentry,struct dentry * target,bool exchange)2803 static void __d_move(struct dentry *dentry, struct dentry *target,
2804 bool exchange)
2805 {
2806 struct inode *dir = NULL;
2807 unsigned n;
2808 if (!dentry->d_inode)
2809 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2810
2811 BUG_ON(d_ancestor(dentry, target));
2812 BUG_ON(d_ancestor(target, dentry));
2813
2814 dentry_lock_for_move(dentry, target);
2815 if (unlikely(d_in_lookup(target))) {
2816 dir = target->d_parent->d_inode;
2817 n = start_dir_add(dir);
2818 __d_lookup_done(target);
2819 }
2820
2821 write_seqcount_begin(&dentry->d_seq);
2822 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2823
2824 /* unhash both */
2825 /* ___d_drop does write_seqcount_barrier, but they're OK to nest. */
2826 ___d_drop(dentry);
2827 ___d_drop(target);
2828
2829 /* Switch the names.. */
2830 if (exchange)
2831 swap_names(dentry, target);
2832 else
2833 copy_name(dentry, target);
2834
2835 /* rehash in new place(s) */
2836 __d_rehash(dentry);
2837 if (exchange)
2838 __d_rehash(target);
2839 else
2840 target->d_hash.pprev = NULL;
2841
2842 /* ... and switch them in the tree */
2843 if (IS_ROOT(dentry)) {
2844 /* splicing a tree */
2845 dentry->d_flags |= DCACHE_RCUACCESS;
2846 dentry->d_parent = target->d_parent;
2847 target->d_parent = target;
2848 list_del_init(&target->d_child);
2849 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2850 } else {
2851 /* swapping two dentries */
2852 swap(dentry->d_parent, target->d_parent);
2853 list_move(&target->d_child, &target->d_parent->d_subdirs);
2854 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2855 if (exchange)
2856 fsnotify_update_flags(target);
2857 fsnotify_update_flags(dentry);
2858 }
2859
2860 write_seqcount_end(&target->d_seq);
2861 write_seqcount_end(&dentry->d_seq);
2862
2863 if (dir)
2864 end_dir_add(dir, n);
2865 dentry_unlock_for_move(dentry, target);
2866 }
2867
2868 /*
2869 * d_move - move a dentry
2870 * @dentry: entry to move
2871 * @target: new dentry
2872 *
2873 * Update the dcache to reflect the move of a file name. Negative
2874 * dcache entries should not be moved in this way. See the locking
2875 * requirements for __d_move.
2876 */
d_move(struct dentry * dentry,struct dentry * target)2877 void d_move(struct dentry *dentry, struct dentry *target)
2878 {
2879 write_seqlock(&rename_lock);
2880 __d_move(dentry, target, false);
2881 write_sequnlock(&rename_lock);
2882 }
2883 EXPORT_SYMBOL(d_move);
2884
2885 /*
2886 * d_exchange - exchange two dentries
2887 * @dentry1: first dentry
2888 * @dentry2: second dentry
2889 */
d_exchange(struct dentry * dentry1,struct dentry * dentry2)2890 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2891 {
2892 write_seqlock(&rename_lock);
2893
2894 WARN_ON(!dentry1->d_inode);
2895 WARN_ON(!dentry2->d_inode);
2896 WARN_ON(IS_ROOT(dentry1));
2897 WARN_ON(IS_ROOT(dentry2));
2898
2899 __d_move(dentry1, dentry2, true);
2900
2901 write_sequnlock(&rename_lock);
2902 }
2903
2904 /**
2905 * d_ancestor - search for an ancestor
2906 * @p1: ancestor dentry
2907 * @p2: child dentry
2908 *
2909 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2910 * an ancestor of p2, else NULL.
2911 */
d_ancestor(struct dentry * p1,struct dentry * p2)2912 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2913 {
2914 struct dentry *p;
2915
2916 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2917 if (p->d_parent == p1)
2918 return p;
2919 }
2920 return NULL;
2921 }
2922
2923 /*
2924 * This helper attempts to cope with remotely renamed directories
2925 *
2926 * It assumes that the caller is already holding
2927 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2928 *
2929 * Note: If ever the locking in lock_rename() changes, then please
2930 * remember to update this too...
2931 */
__d_unalias(struct inode * inode,struct dentry * dentry,struct dentry * alias)2932 static int __d_unalias(struct inode *inode,
2933 struct dentry *dentry, struct dentry *alias)
2934 {
2935 struct mutex *m1 = NULL;
2936 struct rw_semaphore *m2 = NULL;
2937 int ret = -ESTALE;
2938
2939 /* If alias and dentry share a parent, then no extra locks required */
2940 if (alias->d_parent == dentry->d_parent)
2941 goto out_unalias;
2942
2943 /* See lock_rename() */
2944 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2945 goto out_err;
2946 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2947 if (!inode_trylock_shared(alias->d_parent->d_inode))
2948 goto out_err;
2949 m2 = &alias->d_parent->d_inode->i_rwsem;
2950 out_unalias:
2951 __d_move(alias, dentry, false);
2952 ret = 0;
2953 out_err:
2954 if (m2)
2955 up_read(m2);
2956 if (m1)
2957 mutex_unlock(m1);
2958 return ret;
2959 }
2960
2961 /**
2962 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2963 * @inode: the inode which may have a disconnected dentry
2964 * @dentry: a negative dentry which we want to point to the inode.
2965 *
2966 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2967 * place of the given dentry and return it, else simply d_add the inode
2968 * to the dentry and return NULL.
2969 *
2970 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2971 * we should error out: directories can't have multiple aliases.
2972 *
2973 * This is needed in the lookup routine of any filesystem that is exportable
2974 * (via knfsd) so that we can build dcache paths to directories effectively.
2975 *
2976 * If a dentry was found and moved, then it is returned. Otherwise NULL
2977 * is returned. This matches the expected return value of ->lookup.
2978 *
2979 * Cluster filesystems may call this function with a negative, hashed dentry.
2980 * In that case, we know that the inode will be a regular file, and also this
2981 * will only occur during atomic_open. So we need to check for the dentry
2982 * being already hashed only in the final case.
2983 */
d_splice_alias(struct inode * inode,struct dentry * dentry)2984 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2985 {
2986 if (IS_ERR(inode))
2987 return ERR_CAST(inode);
2988
2989 BUG_ON(!d_unhashed(dentry));
2990
2991 if (!inode)
2992 goto out;
2993
2994 security_d_instantiate(dentry, inode);
2995 spin_lock(&inode->i_lock);
2996 if (S_ISDIR(inode->i_mode)) {
2997 struct dentry *new = __d_find_any_alias(inode);
2998 if (unlikely(new)) {
2999 /* The reference to new ensures it remains an alias */
3000 spin_unlock(&inode->i_lock);
3001 write_seqlock(&rename_lock);
3002 if (unlikely(d_ancestor(new, dentry))) {
3003 write_sequnlock(&rename_lock);
3004 dput(new);
3005 new = ERR_PTR(-ELOOP);
3006 pr_warn_ratelimited(
3007 "VFS: Lookup of '%s' in %s %s"
3008 " would have caused loop\n",
3009 dentry->d_name.name,
3010 inode->i_sb->s_type->name,
3011 inode->i_sb->s_id);
3012 } else if (!IS_ROOT(new)) {
3013 int err = __d_unalias(inode, dentry, new);
3014 write_sequnlock(&rename_lock);
3015 if (err) {
3016 dput(new);
3017 new = ERR_PTR(err);
3018 }
3019 } else {
3020 __d_move(new, dentry, false);
3021 write_sequnlock(&rename_lock);
3022 }
3023 iput(inode);
3024 return new;
3025 }
3026 }
3027 out:
3028 __d_add(dentry, inode);
3029 return NULL;
3030 }
3031 EXPORT_SYMBOL(d_splice_alias);
3032
prepend(char ** buffer,int * buflen,const char * str,int namelen)3033 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
3034 {
3035 *buflen -= namelen;
3036 if (*buflen < 0)
3037 return -ENAMETOOLONG;
3038 *buffer -= namelen;
3039 memcpy(*buffer, str, namelen);
3040 return 0;
3041 }
3042
3043 /**
3044 * prepend_name - prepend a pathname in front of current buffer pointer
3045 * @buffer: buffer pointer
3046 * @buflen: allocated length of the buffer
3047 * @name: name string and length qstr structure
3048 *
3049 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
3050 * make sure that either the old or the new name pointer and length are
3051 * fetched. However, there may be mismatch between length and pointer.
3052 * The length cannot be trusted, we need to copy it byte-by-byte until
3053 * the length is reached or a null byte is found. It also prepends "/" at
3054 * the beginning of the name. The sequence number check at the caller will
3055 * retry it again when a d_move() does happen. So any garbage in the buffer
3056 * due to mismatched pointer and length will be discarded.
3057 *
3058 * Data dependency barrier is needed to make sure that we see that terminating
3059 * NUL. Alpha strikes again, film at 11...
3060 */
prepend_name(char ** buffer,int * buflen,const struct qstr * name)3061 static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
3062 {
3063 const char *dname = ACCESS_ONCE(name->name);
3064 u32 dlen = ACCESS_ONCE(name->len);
3065 char *p;
3066
3067 smp_read_barrier_depends();
3068
3069 *buflen -= dlen + 1;
3070 if (*buflen < 0)
3071 return -ENAMETOOLONG;
3072 p = *buffer -= dlen + 1;
3073 *p++ = '/';
3074 while (dlen--) {
3075 char c = *dname++;
3076 if (!c)
3077 break;
3078 *p++ = c;
3079 }
3080 return 0;
3081 }
3082
3083 /**
3084 * prepend_path - Prepend path string to a buffer
3085 * @path: the dentry/vfsmount to report
3086 * @root: root vfsmnt/dentry
3087 * @buffer: pointer to the end of the buffer
3088 * @buflen: pointer to buffer length
3089 *
3090 * The function will first try to write out the pathname without taking any
3091 * lock other than the RCU read lock to make sure that dentries won't go away.
3092 * It only checks the sequence number of the global rename_lock as any change
3093 * in the dentry's d_seq will be preceded by changes in the rename_lock
3094 * sequence number. If the sequence number had been changed, it will restart
3095 * the whole pathname back-tracing sequence again by taking the rename_lock.
3096 * In this case, there is no need to take the RCU read lock as the recursive
3097 * parent pointer references will keep the dentry chain alive as long as no
3098 * rename operation is performed.
3099 */
prepend_path(const struct path * path,const struct path * root,char ** buffer,int * buflen)3100 static int prepend_path(const struct path *path,
3101 const struct path *root,
3102 char **buffer, int *buflen)
3103 {
3104 struct dentry *dentry;
3105 struct vfsmount *vfsmnt;
3106 struct mount *mnt;
3107 int error = 0;
3108 unsigned seq, m_seq = 0;
3109 char *bptr;
3110 int blen;
3111
3112 rcu_read_lock();
3113 restart_mnt:
3114 read_seqbegin_or_lock(&mount_lock, &m_seq);
3115 seq = 0;
3116 rcu_read_lock();
3117 restart:
3118 bptr = *buffer;
3119 blen = *buflen;
3120 error = 0;
3121 dentry = path->dentry;
3122 vfsmnt = path->mnt;
3123 mnt = real_mount(vfsmnt);
3124 read_seqbegin_or_lock(&rename_lock, &seq);
3125 while (dentry != root->dentry || vfsmnt != root->mnt) {
3126 struct dentry * parent;
3127
3128 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
3129 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
3130 /* Escaped? */
3131 if (dentry != vfsmnt->mnt_root) {
3132 bptr = *buffer;
3133 blen = *buflen;
3134 error = 3;
3135 break;
3136 }
3137 /* Global root? */
3138 if (mnt != parent) {
3139 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
3140 mnt = parent;
3141 vfsmnt = &mnt->mnt;
3142 continue;
3143 }
3144 if (!error)
3145 error = is_mounted(vfsmnt) ? 1 : 2;
3146 break;
3147 }
3148 parent = dentry->d_parent;
3149 prefetch(parent);
3150 error = prepend_name(&bptr, &blen, &dentry->d_name);
3151 if (error)
3152 break;
3153
3154 dentry = parent;
3155 }
3156 if (!(seq & 1))
3157 rcu_read_unlock();
3158 if (need_seqretry(&rename_lock, seq)) {
3159 seq = 1;
3160 goto restart;
3161 }
3162 done_seqretry(&rename_lock, seq);
3163
3164 if (!(m_seq & 1))
3165 rcu_read_unlock();
3166 if (need_seqretry(&mount_lock, m_seq)) {
3167 m_seq = 1;
3168 goto restart_mnt;
3169 }
3170 done_seqretry(&mount_lock, m_seq);
3171
3172 if (error >= 0 && bptr == *buffer) {
3173 if (--blen < 0)
3174 error = -ENAMETOOLONG;
3175 else
3176 *--bptr = '/';
3177 }
3178 *buffer = bptr;
3179 *buflen = blen;
3180 return error;
3181 }
3182
3183 /**
3184 * __d_path - return the path of a dentry
3185 * @path: the dentry/vfsmount to report
3186 * @root: root vfsmnt/dentry
3187 * @buf: buffer to return value in
3188 * @buflen: buffer length
3189 *
3190 * Convert a dentry into an ASCII path name.
3191 *
3192 * Returns a pointer into the buffer or an error code if the
3193 * path was too long.
3194 *
3195 * "buflen" should be positive.
3196 *
3197 * If the path is not reachable from the supplied root, return %NULL.
3198 */
__d_path(const struct path * path,const struct path * root,char * buf,int buflen)3199 char *__d_path(const struct path *path,
3200 const struct path *root,
3201 char *buf, int buflen)
3202 {
3203 char *res = buf + buflen;
3204 int error;
3205
3206 prepend(&res, &buflen, "\0", 1);
3207 error = prepend_path(path, root, &res, &buflen);
3208
3209 if (error < 0)
3210 return ERR_PTR(error);
3211 if (error > 0)
3212 return NULL;
3213 return res;
3214 }
3215
d_absolute_path(const struct path * path,char * buf,int buflen)3216 char *d_absolute_path(const struct path *path,
3217 char *buf, int buflen)
3218 {
3219 struct path root = {};
3220 char *res = buf + buflen;
3221 int error;
3222
3223 prepend(&res, &buflen, "\0", 1);
3224 error = prepend_path(path, &root, &res, &buflen);
3225
3226 if (error > 1)
3227 error = -EINVAL;
3228 if (error < 0)
3229 return ERR_PTR(error);
3230 return res;
3231 }
3232 EXPORT_SYMBOL(d_absolute_path);
3233
3234 /*
3235 * same as __d_path but appends "(deleted)" for unlinked files.
3236 */
path_with_deleted(const struct path * path,const struct path * root,char ** buf,int * buflen)3237 static int path_with_deleted(const struct path *path,
3238 const struct path *root,
3239 char **buf, int *buflen)
3240 {
3241 prepend(buf, buflen, "\0", 1);
3242 if (d_unlinked(path->dentry)) {
3243 int error = prepend(buf, buflen, " (deleted)", 10);
3244 if (error)
3245 return error;
3246 }
3247
3248 return prepend_path(path, root, buf, buflen);
3249 }
3250
prepend_unreachable(char ** buffer,int * buflen)3251 static int prepend_unreachable(char **buffer, int *buflen)
3252 {
3253 return prepend(buffer, buflen, "(unreachable)", 13);
3254 }
3255
get_fs_root_rcu(struct fs_struct * fs,struct path * root)3256 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3257 {
3258 unsigned seq;
3259
3260 do {
3261 seq = read_seqcount_begin(&fs->seq);
3262 *root = fs->root;
3263 } while (read_seqcount_retry(&fs->seq, seq));
3264 }
3265
3266 /**
3267 * d_path - return the path of a dentry
3268 * @path: path to report
3269 * @buf: buffer to return value in
3270 * @buflen: buffer length
3271 *
3272 * Convert a dentry into an ASCII path name. If the entry has been deleted
3273 * the string " (deleted)" is appended. Note that this is ambiguous.
3274 *
3275 * Returns a pointer into the buffer or an error code if the path was
3276 * too long. Note: Callers should use the returned pointer, not the passed
3277 * in buffer, to use the name! The implementation often starts at an offset
3278 * into the buffer, and may leave 0 bytes at the start.
3279 *
3280 * "buflen" should be positive.
3281 */
d_path(const struct path * path,char * buf,int buflen)3282 char *d_path(const struct path *path, char *buf, int buflen)
3283 {
3284 char *res = buf + buflen;
3285 struct path root;
3286 int error;
3287
3288 /*
3289 * We have various synthetic filesystems that never get mounted. On
3290 * these filesystems dentries are never used for lookup purposes, and
3291 * thus don't need to be hashed. They also don't need a name until a
3292 * user wants to identify the object in /proc/pid/fd/. The little hack
3293 * below allows us to generate a name for these objects on demand:
3294 *
3295 * Some pseudo inodes are mountable. When they are mounted
3296 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3297 * and instead have d_path return the mounted path.
3298 */
3299 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3300 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3301 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3302
3303 rcu_read_lock();
3304 get_fs_root_rcu(current->fs, &root);
3305 error = path_with_deleted(path, &root, &res, &buflen);
3306 rcu_read_unlock();
3307
3308 if (error < 0)
3309 res = ERR_PTR(error);
3310 return res;
3311 }
3312 EXPORT_SYMBOL(d_path);
3313
3314 /*
3315 * Helper function for dentry_operations.d_dname() members
3316 */
dynamic_dname(struct dentry * dentry,char * buffer,int buflen,const char * fmt,...)3317 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3318 const char *fmt, ...)
3319 {
3320 va_list args;
3321 char temp[64];
3322 int sz;
3323
3324 va_start(args, fmt);
3325 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3326 va_end(args);
3327
3328 if (sz > sizeof(temp) || sz > buflen)
3329 return ERR_PTR(-ENAMETOOLONG);
3330
3331 buffer += buflen - sz;
3332 return memcpy(buffer, temp, sz);
3333 }
3334
simple_dname(struct dentry * dentry,char * buffer,int buflen)3335 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3336 {
3337 char *end = buffer + buflen;
3338 /* these dentries are never renamed, so d_lock is not needed */
3339 if (prepend(&end, &buflen, " (deleted)", 11) ||
3340 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3341 prepend(&end, &buflen, "/", 1))
3342 end = ERR_PTR(-ENAMETOOLONG);
3343 return end;
3344 }
3345 EXPORT_SYMBOL(simple_dname);
3346
3347 /*
3348 * Write full pathname from the root of the filesystem into the buffer.
3349 */
__dentry_path(struct dentry * d,char * buf,int buflen)3350 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3351 {
3352 struct dentry *dentry;
3353 char *end, *retval;
3354 int len, seq = 0;
3355 int error = 0;
3356
3357 if (buflen < 2)
3358 goto Elong;
3359
3360 rcu_read_lock();
3361 restart:
3362 dentry = d;
3363 end = buf + buflen;
3364 len = buflen;
3365 prepend(&end, &len, "\0", 1);
3366 /* Get '/' right */
3367 retval = end-1;
3368 *retval = '/';
3369 read_seqbegin_or_lock(&rename_lock, &seq);
3370 while (!IS_ROOT(dentry)) {
3371 struct dentry *parent = dentry->d_parent;
3372
3373 prefetch(parent);
3374 error = prepend_name(&end, &len, &dentry->d_name);
3375 if (error)
3376 break;
3377
3378 retval = end;
3379 dentry = parent;
3380 }
3381 if (!(seq & 1))
3382 rcu_read_unlock();
3383 if (need_seqretry(&rename_lock, seq)) {
3384 seq = 1;
3385 goto restart;
3386 }
3387 done_seqretry(&rename_lock, seq);
3388 if (error)
3389 goto Elong;
3390 return retval;
3391 Elong:
3392 return ERR_PTR(-ENAMETOOLONG);
3393 }
3394
dentry_path_raw(struct dentry * dentry,char * buf,int buflen)3395 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3396 {
3397 return __dentry_path(dentry, buf, buflen);
3398 }
3399 EXPORT_SYMBOL(dentry_path_raw);
3400
dentry_path(struct dentry * dentry,char * buf,int buflen)3401 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3402 {
3403 char *p = NULL;
3404 char *retval;
3405
3406 if (d_unlinked(dentry)) {
3407 p = buf + buflen;
3408 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3409 goto Elong;
3410 buflen++;
3411 }
3412 retval = __dentry_path(dentry, buf, buflen);
3413 if (!IS_ERR(retval) && p)
3414 *p = '/'; /* restore '/' overriden with '\0' */
3415 return retval;
3416 Elong:
3417 return ERR_PTR(-ENAMETOOLONG);
3418 }
3419
get_fs_root_and_pwd_rcu(struct fs_struct * fs,struct path * root,struct path * pwd)3420 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3421 struct path *pwd)
3422 {
3423 unsigned seq;
3424
3425 do {
3426 seq = read_seqcount_begin(&fs->seq);
3427 *root = fs->root;
3428 *pwd = fs->pwd;
3429 } while (read_seqcount_retry(&fs->seq, seq));
3430 }
3431
3432 /*
3433 * NOTE! The user-level library version returns a
3434 * character pointer. The kernel system call just
3435 * returns the length of the buffer filled (which
3436 * includes the ending '\0' character), or a negative
3437 * error value. So libc would do something like
3438 *
3439 * char *getcwd(char * buf, size_t size)
3440 * {
3441 * int retval;
3442 *
3443 * retval = sys_getcwd(buf, size);
3444 * if (retval >= 0)
3445 * return buf;
3446 * errno = -retval;
3447 * return NULL;
3448 * }
3449 */
SYSCALL_DEFINE2(getcwd,char __user *,buf,unsigned long,size)3450 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3451 {
3452 int error;
3453 struct path pwd, root;
3454 char *page = __getname();
3455
3456 if (!page)
3457 return -ENOMEM;
3458
3459 rcu_read_lock();
3460 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3461
3462 error = -ENOENT;
3463 if (!d_unlinked(pwd.dentry)) {
3464 unsigned long len;
3465 char *cwd = page + PATH_MAX;
3466 int buflen = PATH_MAX;
3467
3468 prepend(&cwd, &buflen, "\0", 1);
3469 error = prepend_path(&pwd, &root, &cwd, &buflen);
3470 rcu_read_unlock();
3471
3472 if (error < 0)
3473 goto out;
3474
3475 /* Unreachable from current root */
3476 if (error > 0) {
3477 error = prepend_unreachable(&cwd, &buflen);
3478 if (error)
3479 goto out;
3480 }
3481
3482 error = -ERANGE;
3483 len = PATH_MAX + page - cwd;
3484 if (len <= size) {
3485 error = len;
3486 if (copy_to_user(buf, cwd, len))
3487 error = -EFAULT;
3488 }
3489 } else {
3490 rcu_read_unlock();
3491 }
3492
3493 out:
3494 __putname(page);
3495 return error;
3496 }
3497
3498 /*
3499 * Test whether new_dentry is a subdirectory of old_dentry.
3500 *
3501 * Trivially implemented using the dcache structure
3502 */
3503
3504 /**
3505 * is_subdir - is new dentry a subdirectory of old_dentry
3506 * @new_dentry: new dentry
3507 * @old_dentry: old dentry
3508 *
3509 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3510 * Returns false otherwise.
3511 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3512 */
3513
is_subdir(struct dentry * new_dentry,struct dentry * old_dentry)3514 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3515 {
3516 bool result;
3517 unsigned seq;
3518
3519 if (new_dentry == old_dentry)
3520 return true;
3521
3522 do {
3523 /* for restarting inner loop in case of seq retry */
3524 seq = read_seqbegin(&rename_lock);
3525 /*
3526 * Need rcu_readlock to protect against the d_parent trashing
3527 * due to d_move
3528 */
3529 rcu_read_lock();
3530 if (d_ancestor(old_dentry, new_dentry))
3531 result = true;
3532 else
3533 result = false;
3534 rcu_read_unlock();
3535 } while (read_seqretry(&rename_lock, seq));
3536
3537 return result;
3538 }
3539
d_genocide_kill(void * data,struct dentry * dentry)3540 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3541 {
3542 struct dentry *root = data;
3543 if (dentry != root) {
3544 if (d_unhashed(dentry) || !dentry->d_inode)
3545 return D_WALK_SKIP;
3546
3547 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3548 dentry->d_flags |= DCACHE_GENOCIDE;
3549 dentry->d_lockref.count--;
3550 }
3551 }
3552 return D_WALK_CONTINUE;
3553 }
3554
d_genocide(struct dentry * parent)3555 void d_genocide(struct dentry *parent)
3556 {
3557 d_walk(parent, parent, d_genocide_kill, NULL);
3558 }
3559
d_tmpfile(struct dentry * dentry,struct inode * inode)3560 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3561 {
3562 inode_dec_link_count(inode);
3563 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3564 !hlist_unhashed(&dentry->d_u.d_alias) ||
3565 !d_unlinked(dentry));
3566 spin_lock(&dentry->d_parent->d_lock);
3567 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3568 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3569 (unsigned long long)inode->i_ino);
3570 spin_unlock(&dentry->d_lock);
3571 spin_unlock(&dentry->d_parent->d_lock);
3572 d_instantiate(dentry, inode);
3573 }
3574 EXPORT_SYMBOL(d_tmpfile);
3575
3576 static __initdata unsigned long dhash_entries;
set_dhash_entries(char * str)3577 static int __init set_dhash_entries(char *str)
3578 {
3579 if (!str)
3580 return 0;
3581 dhash_entries = simple_strtoul(str, &str, 0);
3582 return 1;
3583 }
3584 __setup("dhash_entries=", set_dhash_entries);
3585
dcache_init_early(void)3586 static void __init dcache_init_early(void)
3587 {
3588 unsigned int loop;
3589
3590 /* If hashes are distributed across NUMA nodes, defer
3591 * hash allocation until vmalloc space is available.
3592 */
3593 if (hashdist)
3594 return;
3595
3596 dentry_hashtable =
3597 alloc_large_system_hash("Dentry cache",
3598 sizeof(struct hlist_bl_head),
3599 dhash_entries,
3600 13,
3601 HASH_EARLY,
3602 &d_hash_shift,
3603 &d_hash_mask,
3604 0,
3605 0);
3606
3607 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3608 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3609 }
3610
dcache_init(void)3611 static void __init dcache_init(void)
3612 {
3613 unsigned int loop;
3614
3615 /*
3616 * A constructor could be added for stable state like the lists,
3617 * but it is probably not worth it because of the cache nature
3618 * of the dcache.
3619 */
3620 dentry_cache = KMEM_CACHE(dentry,
3621 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
3622
3623 /* Hash may have been set up in dcache_init_early */
3624 if (!hashdist)
3625 return;
3626
3627 dentry_hashtable =
3628 alloc_large_system_hash("Dentry cache",
3629 sizeof(struct hlist_bl_head),
3630 dhash_entries,
3631 13,
3632 0,
3633 &d_hash_shift,
3634 &d_hash_mask,
3635 0,
3636 0);
3637
3638 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3639 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3640 }
3641
3642 /* SLAB cache for __getname() consumers */
3643 struct kmem_cache *names_cachep __read_mostly;
3644 EXPORT_SYMBOL(names_cachep);
3645
3646 EXPORT_SYMBOL(d_genocide);
3647
vfs_caches_init_early(void)3648 void __init vfs_caches_init_early(void)
3649 {
3650 dcache_init_early();
3651 inode_init_early();
3652 }
3653
vfs_caches_init(void)3654 void __init vfs_caches_init(void)
3655 {
3656 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3657 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3658
3659 dcache_init();
3660 inode_init();
3661 files_init();
3662 files_maxfiles_init();
3663 mnt_init();
3664 bdev_cache_init();
3665 chrdev_init();
3666 }
3667