1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DFS referral cache routines
4 *
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
6 */
7
8 #include <linux/jhash.h>
9 #include <linux/ktime.h>
10 #include <linux/slab.h>
11 #include <linux/proc_fs.h>
12 #include <linux/nls.h>
13 #include <linux/workqueue.h>
14 #include "cifsglob.h"
15 #include "smb2pdu.h"
16 #include "smb2proto.h"
17 #include "cifsproto.h"
18 #include "cifs_debug.h"
19 #include "cifs_unicode.h"
20 #include "smb2glob.h"
21
22 #include "dfs_cache.h"
23
24 #define CACHE_HTABLE_SIZE 32
25 #define CACHE_MAX_ENTRIES 64
26
27 #define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
28 DFSREF_STORAGE_SERVER))
29
30 struct cache_dfs_tgt {
31 char *name;
32 int path_consumed;
33 struct list_head list;
34 };
35
36 struct cache_entry {
37 struct hlist_node hlist;
38 const char *path;
39 int ttl;
40 int srvtype;
41 int flags;
42 struct timespec64 etime;
43 int path_consumed;
44 int numtgts;
45 struct list_head tlist;
46 struct cache_dfs_tgt *tgthint;
47 };
48
49 struct vol_info {
50 char *fullpath;
51 spinlock_t smb_vol_lock;
52 struct smb_vol smb_vol;
53 char *mntdata;
54 struct list_head list;
55 struct list_head rlist;
56 struct kref refcnt;
57 };
58
59 static struct kmem_cache *cache_slab __read_mostly;
60 static struct workqueue_struct *dfscache_wq __read_mostly;
61
62 static int cache_ttl;
63 static DEFINE_SPINLOCK(cache_ttl_lock);
64
65 static struct nls_table *cache_nlsc;
66
67 /*
68 * Number of entries in the cache
69 */
70 static atomic_t cache_count;
71
72 static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
73 static DECLARE_RWSEM(htable_rw_lock);
74
75 static LIST_HEAD(vol_list);
76 static DEFINE_SPINLOCK(vol_list_lock);
77
78 static void refresh_cache_worker(struct work_struct *work);
79
80 static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
81
get_normalized_path(const char * path,char ** npath)82 static int get_normalized_path(const char *path, char **npath)
83 {
84 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
85 return -EINVAL;
86
87 if (*path == '\\') {
88 *npath = (char *)path;
89 } else {
90 *npath = kstrndup(path, strlen(path), GFP_KERNEL);
91 if (!*npath)
92 return -ENOMEM;
93 convert_delimiter(*npath, '\\');
94 }
95 return 0;
96 }
97
free_normalized_path(const char * path,char * npath)98 static inline void free_normalized_path(const char *path, char *npath)
99 {
100 if (path != npath)
101 kfree(npath);
102 }
103
cache_entry_expired(const struct cache_entry * ce)104 static inline bool cache_entry_expired(const struct cache_entry *ce)
105 {
106 struct timespec64 ts;
107
108 ktime_get_coarse_real_ts64(&ts);
109 return timespec64_compare(&ts, &ce->etime) >= 0;
110 }
111
free_tgts(struct cache_entry * ce)112 static inline void free_tgts(struct cache_entry *ce)
113 {
114 struct cache_dfs_tgt *t, *n;
115
116 list_for_each_entry_safe(t, n, &ce->tlist, list) {
117 list_del(&t->list);
118 kfree(t->name);
119 kfree(t);
120 }
121 }
122
flush_cache_ent(struct cache_entry * ce)123 static inline void flush_cache_ent(struct cache_entry *ce)
124 {
125 hlist_del_init(&ce->hlist);
126 kfree(ce->path);
127 free_tgts(ce);
128 atomic_dec(&cache_count);
129 kmem_cache_free(cache_slab, ce);
130 }
131
flush_cache_ents(void)132 static void flush_cache_ents(void)
133 {
134 int i;
135
136 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
137 struct hlist_head *l = &cache_htable[i];
138 struct hlist_node *n;
139 struct cache_entry *ce;
140
141 hlist_for_each_entry_safe(ce, n, l, hlist) {
142 if (!hlist_unhashed(&ce->hlist))
143 flush_cache_ent(ce);
144 }
145 }
146 }
147
148 /*
149 * dfs cache /proc file
150 */
dfscache_proc_show(struct seq_file * m,void * v)151 static int dfscache_proc_show(struct seq_file *m, void *v)
152 {
153 int i;
154 struct cache_entry *ce;
155 struct cache_dfs_tgt *t;
156
157 seq_puts(m, "DFS cache\n---------\n");
158
159 down_read(&htable_rw_lock);
160 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
161 struct hlist_head *l = &cache_htable[i];
162
163 hlist_for_each_entry(ce, l, hlist) {
164 if (hlist_unhashed(&ce->hlist))
165 continue;
166
167 seq_printf(m,
168 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
169 "interlink=%s,path_consumed=%d,expired=%s\n",
170 ce->path,
171 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
172 ce->ttl, ce->etime.tv_nsec,
173 IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
174 ce->path_consumed,
175 cache_entry_expired(ce) ? "yes" : "no");
176
177 list_for_each_entry(t, &ce->tlist, list) {
178 seq_printf(m, " %s%s\n",
179 t->name,
180 ce->tgthint == t ? " (target hint)" : "");
181 }
182 }
183 }
184 up_read(&htable_rw_lock);
185
186 return 0;
187 }
188
dfscache_proc_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)189 static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
190 size_t count, loff_t *ppos)
191 {
192 char c;
193 int rc;
194
195 rc = get_user(c, buffer);
196 if (rc)
197 return rc;
198
199 if (c != '0')
200 return -EINVAL;
201
202 cifs_dbg(FYI, "clearing dfs cache\n");
203
204 down_write(&htable_rw_lock);
205 flush_cache_ents();
206 up_write(&htable_rw_lock);
207
208 return count;
209 }
210
dfscache_proc_open(struct inode * inode,struct file * file)211 static int dfscache_proc_open(struct inode *inode, struct file *file)
212 {
213 return single_open(file, dfscache_proc_show, NULL);
214 }
215
216 const struct proc_ops dfscache_proc_ops = {
217 .proc_open = dfscache_proc_open,
218 .proc_read = seq_read,
219 .proc_lseek = seq_lseek,
220 .proc_release = single_release,
221 .proc_write = dfscache_proc_write,
222 };
223
224 #ifdef CONFIG_CIFS_DEBUG2
dump_tgts(const struct cache_entry * ce)225 static inline void dump_tgts(const struct cache_entry *ce)
226 {
227 struct cache_dfs_tgt *t;
228
229 cifs_dbg(FYI, "target list:\n");
230 list_for_each_entry(t, &ce->tlist, list) {
231 cifs_dbg(FYI, " %s%s\n", t->name,
232 ce->tgthint == t ? " (target hint)" : "");
233 }
234 }
235
dump_ce(const struct cache_entry * ce)236 static inline void dump_ce(const struct cache_entry *ce)
237 {
238 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s\n",
239 ce->path,
240 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
241 ce->etime.tv_nsec,
242 IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
243 ce->path_consumed,
244 cache_entry_expired(ce) ? "yes" : "no");
245 dump_tgts(ce);
246 }
247
dump_refs(const struct dfs_info3_param * refs,int numrefs)248 static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
249 {
250 int i;
251
252 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
253 for (i = 0; i < numrefs; i++) {
254 const struct dfs_info3_param *ref = &refs[i];
255
256 cifs_dbg(FYI,
257 "\n"
258 "flags: 0x%x\n"
259 "path_consumed: %d\n"
260 "server_type: 0x%x\n"
261 "ref_flag: 0x%x\n"
262 "path_name: %s\n"
263 "node_name: %s\n"
264 "ttl: %d (%dm)\n",
265 ref->flags, ref->path_consumed, ref->server_type,
266 ref->ref_flag, ref->path_name, ref->node_name,
267 ref->ttl, ref->ttl / 60);
268 }
269 }
270 #else
271 #define dump_tgts(e)
272 #define dump_ce(e)
273 #define dump_refs(r, n)
274 #endif
275
276 /**
277 * dfs_cache_init - Initialize DFS referral cache.
278 *
279 * Return zero if initialized successfully, otherwise non-zero.
280 */
dfs_cache_init(void)281 int dfs_cache_init(void)
282 {
283 int rc;
284 int i;
285
286 dfscache_wq = alloc_workqueue("cifs-dfscache",
287 WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
288 if (!dfscache_wq)
289 return -ENOMEM;
290
291 cache_slab = kmem_cache_create("cifs_dfs_cache",
292 sizeof(struct cache_entry), 0,
293 SLAB_HWCACHE_ALIGN, NULL);
294 if (!cache_slab) {
295 rc = -ENOMEM;
296 goto out_destroy_wq;
297 }
298
299 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
300 INIT_HLIST_HEAD(&cache_htable[i]);
301
302 atomic_set(&cache_count, 0);
303 cache_nlsc = load_nls_default();
304
305 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
306 return 0;
307
308 out_destroy_wq:
309 destroy_workqueue(dfscache_wq);
310 return rc;
311 }
312
cache_entry_hash(const void * data,int size)313 static inline unsigned int cache_entry_hash(const void *data, int size)
314 {
315 unsigned int h;
316
317 h = jhash(data, size, 0);
318 return h & (CACHE_HTABLE_SIZE - 1);
319 }
320
321 /* Check whether second path component of @path is SYSVOL or NETLOGON */
is_sysvol_or_netlogon(const char * path)322 static inline bool is_sysvol_or_netlogon(const char *path)
323 {
324 const char *s;
325 char sep = path[0];
326
327 s = strchr(path + 1, sep) + 1;
328 return !strncasecmp(s, "sysvol", strlen("sysvol")) ||
329 !strncasecmp(s, "netlogon", strlen("netlogon"));
330 }
331
332 /* Return target hint of a DFS cache entry */
get_tgt_name(const struct cache_entry * ce)333 static inline char *get_tgt_name(const struct cache_entry *ce)
334 {
335 struct cache_dfs_tgt *t = ce->tgthint;
336
337 return t ? t->name : ERR_PTR(-ENOENT);
338 }
339
340 /* Return expire time out of a new entry's TTL */
get_expire_time(int ttl)341 static inline struct timespec64 get_expire_time(int ttl)
342 {
343 struct timespec64 ts = {
344 .tv_sec = ttl,
345 .tv_nsec = 0,
346 };
347 struct timespec64 now;
348
349 ktime_get_coarse_real_ts64(&now);
350 return timespec64_add(now, ts);
351 }
352
353 /* Allocate a new DFS target */
alloc_target(const char * name,int path_consumed)354 static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
355 {
356 struct cache_dfs_tgt *t;
357
358 t = kmalloc(sizeof(*t), GFP_ATOMIC);
359 if (!t)
360 return ERR_PTR(-ENOMEM);
361 t->name = kstrndup(name, strlen(name), GFP_ATOMIC);
362 if (!t->name) {
363 kfree(t);
364 return ERR_PTR(-ENOMEM);
365 }
366 t->path_consumed = path_consumed;
367 INIT_LIST_HEAD(&t->list);
368 return t;
369 }
370
371 /*
372 * Copy DFS referral information to a cache entry and conditionally update
373 * target hint.
374 */
copy_ref_data(const struct dfs_info3_param * refs,int numrefs,struct cache_entry * ce,const char * tgthint)375 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
376 struct cache_entry *ce, const char *tgthint)
377 {
378 int i;
379
380 ce->ttl = refs[0].ttl;
381 ce->etime = get_expire_time(ce->ttl);
382 ce->srvtype = refs[0].server_type;
383 ce->flags = refs[0].ref_flag;
384 ce->path_consumed = refs[0].path_consumed;
385
386 for (i = 0; i < numrefs; i++) {
387 struct cache_dfs_tgt *t;
388
389 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
390 if (IS_ERR(t)) {
391 free_tgts(ce);
392 return PTR_ERR(t);
393 }
394 if (tgthint && !strcasecmp(t->name, tgthint)) {
395 list_add(&t->list, &ce->tlist);
396 tgthint = NULL;
397 } else {
398 list_add_tail(&t->list, &ce->tlist);
399 }
400 ce->numtgts++;
401 }
402
403 ce->tgthint = list_first_entry_or_null(&ce->tlist,
404 struct cache_dfs_tgt, list);
405
406 return 0;
407 }
408
409 /* Allocate a new cache entry */
alloc_cache_entry(const char * path,const struct dfs_info3_param * refs,int numrefs)410 static struct cache_entry *alloc_cache_entry(const char *path,
411 const struct dfs_info3_param *refs,
412 int numrefs)
413 {
414 struct cache_entry *ce;
415 int rc;
416
417 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
418 if (!ce)
419 return ERR_PTR(-ENOMEM);
420
421 ce->path = kstrndup(path, strlen(path), GFP_KERNEL);
422 if (!ce->path) {
423 kmem_cache_free(cache_slab, ce);
424 return ERR_PTR(-ENOMEM);
425 }
426 INIT_HLIST_NODE(&ce->hlist);
427 INIT_LIST_HEAD(&ce->tlist);
428
429 rc = copy_ref_data(refs, numrefs, ce, NULL);
430 if (rc) {
431 kfree(ce->path);
432 kmem_cache_free(cache_slab, ce);
433 ce = ERR_PTR(rc);
434 }
435 return ce;
436 }
437
438 /* Must be called with htable_rw_lock held */
remove_oldest_entry(void)439 static void remove_oldest_entry(void)
440 {
441 int i;
442 struct cache_entry *ce;
443 struct cache_entry *to_del = NULL;
444
445 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
446 struct hlist_head *l = &cache_htable[i];
447
448 hlist_for_each_entry(ce, l, hlist) {
449 if (hlist_unhashed(&ce->hlist))
450 continue;
451 if (!to_del || timespec64_compare(&ce->etime,
452 &to_del->etime) < 0)
453 to_del = ce;
454 }
455 }
456
457 if (!to_del) {
458 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
459 return;
460 }
461
462 cifs_dbg(FYI, "%s: removing entry\n", __func__);
463 dump_ce(to_del);
464 flush_cache_ent(to_del);
465 }
466
467 /* Add a new DFS cache entry */
add_cache_entry(const char * path,unsigned int hash,struct dfs_info3_param * refs,int numrefs)468 static int add_cache_entry(const char *path, unsigned int hash,
469 struct dfs_info3_param *refs, int numrefs)
470 {
471 struct cache_entry *ce;
472
473 ce = alloc_cache_entry(path, refs, numrefs);
474 if (IS_ERR(ce))
475 return PTR_ERR(ce);
476
477 spin_lock(&cache_ttl_lock);
478 if (!cache_ttl) {
479 cache_ttl = ce->ttl;
480 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
481 } else {
482 cache_ttl = min_t(int, cache_ttl, ce->ttl);
483 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
484 }
485 spin_unlock(&cache_ttl_lock);
486
487 down_write(&htable_rw_lock);
488 hlist_add_head(&ce->hlist, &cache_htable[hash]);
489 dump_ce(ce);
490 up_write(&htable_rw_lock);
491
492 return 0;
493 }
494
__lookup_cache_entry(const char * path)495 static struct cache_entry *__lookup_cache_entry(const char *path)
496 {
497 struct cache_entry *ce;
498 unsigned int h;
499 bool found = false;
500
501 h = cache_entry_hash(path, strlen(path));
502
503 hlist_for_each_entry(ce, &cache_htable[h], hlist) {
504 if (!strcasecmp(path, ce->path)) {
505 found = true;
506 dump_ce(ce);
507 break;
508 }
509 }
510
511 if (!found)
512 ce = ERR_PTR(-ENOENT);
513 return ce;
514 }
515
516 /*
517 * Find a DFS cache entry in hash table and optionally check prefix path against
518 * @path.
519 * Use whole path components in the match.
520 * Must be called with htable_rw_lock held.
521 *
522 * Return ERR_PTR(-ENOENT) if the entry is not found.
523 */
lookup_cache_entry(const char * path,unsigned int * hash)524 static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
525 {
526 struct cache_entry *ce = ERR_PTR(-ENOENT);
527 unsigned int h;
528 int cnt = 0;
529 char *npath;
530 char *s, *e;
531 char sep;
532
533 npath = kstrndup(path, strlen(path), GFP_KERNEL);
534 if (!npath)
535 return ERR_PTR(-ENOMEM);
536
537 s = npath;
538 sep = *npath;
539 while ((s = strchr(s, sep)) && ++cnt < 3)
540 s++;
541
542 if (cnt < 3) {
543 h = cache_entry_hash(path, strlen(path));
544 ce = __lookup_cache_entry(path);
545 goto out;
546 }
547 /*
548 * Handle paths that have more than two path components and are a complete prefix of the DFS
549 * referral request path (@path).
550 *
551 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
552 */
553 h = cache_entry_hash(npath, strlen(npath));
554 e = npath + strlen(npath) - 1;
555 while (e > s) {
556 char tmp;
557
558 /* skip separators */
559 while (e > s && *e == sep)
560 e--;
561 if (e == s)
562 goto out;
563
564 tmp = *(e+1);
565 *(e+1) = 0;
566
567 ce = __lookup_cache_entry(npath);
568 if (!IS_ERR(ce)) {
569 h = cache_entry_hash(npath, strlen(npath));
570 break;
571 }
572
573 *(e+1) = tmp;
574 /* backward until separator */
575 while (e > s && *e != sep)
576 e--;
577 }
578 out:
579 if (hash)
580 *hash = h;
581 kfree(npath);
582 return ce;
583 }
584
__vol_release(struct vol_info * vi)585 static void __vol_release(struct vol_info *vi)
586 {
587 kfree(vi->fullpath);
588 kfree(vi->mntdata);
589 cifs_cleanup_volume_info_contents(&vi->smb_vol);
590 kfree(vi);
591 }
592
vol_release(struct kref * kref)593 static void vol_release(struct kref *kref)
594 {
595 struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
596
597 spin_lock(&vol_list_lock);
598 list_del(&vi->list);
599 spin_unlock(&vol_list_lock);
600 __vol_release(vi);
601 }
602
free_vol_list(void)603 static inline void free_vol_list(void)
604 {
605 struct vol_info *vi, *nvi;
606
607 list_for_each_entry_safe(vi, nvi, &vol_list, list) {
608 list_del_init(&vi->list);
609 __vol_release(vi);
610 }
611 }
612
613 /**
614 * dfs_cache_destroy - destroy DFS referral cache
615 */
dfs_cache_destroy(void)616 void dfs_cache_destroy(void)
617 {
618 cancel_delayed_work_sync(&refresh_task);
619 unload_nls(cache_nlsc);
620 free_vol_list();
621 flush_cache_ents();
622 kmem_cache_destroy(cache_slab);
623 destroy_workqueue(dfscache_wq);
624
625 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
626 }
627
628 /* Must be called with htable_rw_lock held */
__update_cache_entry(const char * path,const struct dfs_info3_param * refs,int numrefs)629 static int __update_cache_entry(const char *path,
630 const struct dfs_info3_param *refs,
631 int numrefs)
632 {
633 int rc;
634 struct cache_entry *ce;
635 char *s, *th = NULL;
636
637 ce = lookup_cache_entry(path, NULL);
638 if (IS_ERR(ce))
639 return PTR_ERR(ce);
640
641 if (ce->tgthint) {
642 s = ce->tgthint->name;
643 th = kstrndup(s, strlen(s), GFP_ATOMIC);
644 if (!th)
645 return -ENOMEM;
646 }
647
648 free_tgts(ce);
649 ce->numtgts = 0;
650
651 rc = copy_ref_data(refs, numrefs, ce, th);
652
653 kfree(th);
654
655 return rc;
656 }
657
get_dfs_referral(const unsigned int xid,struct cifs_ses * ses,const struct nls_table * nls_codepage,int remap,const char * path,struct dfs_info3_param ** refs,int * numrefs)658 static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
659 const struct nls_table *nls_codepage, int remap,
660 const char *path, struct dfs_info3_param **refs,
661 int *numrefs)
662 {
663 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
664
665 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
666 return -EOPNOTSUPP;
667 if (unlikely(!nls_codepage))
668 return -EINVAL;
669
670 *refs = NULL;
671 *numrefs = 0;
672
673 return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
674 nls_codepage, remap);
675 }
676
677 /* Update an expired cache entry by getting a new DFS referral from server */
update_cache_entry(const char * path,const struct dfs_info3_param * refs,int numrefs)678 static int update_cache_entry(const char *path,
679 const struct dfs_info3_param *refs,
680 int numrefs)
681 {
682
683 int rc;
684
685 down_write(&htable_rw_lock);
686 rc = __update_cache_entry(path, refs, numrefs);
687 up_write(&htable_rw_lock);
688
689 return rc;
690 }
691
692 /*
693 * Find, create or update a DFS cache entry.
694 *
695 * If the entry wasn't found, it will create a new one. Or if it was found but
696 * expired, then it will update the entry accordingly.
697 *
698 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
699 * handle them properly.
700 */
__dfs_cache_find(const unsigned int xid,struct cifs_ses * ses,const struct nls_table * nls_codepage,int remap,const char * path,bool noreq)701 static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
702 const struct nls_table *nls_codepage, int remap,
703 const char *path, bool noreq)
704 {
705 int rc;
706 unsigned int hash;
707 struct cache_entry *ce;
708 struct dfs_info3_param *refs = NULL;
709 int numrefs = 0;
710 bool newent = false;
711
712 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
713
714 down_read(&htable_rw_lock);
715
716 ce = lookup_cache_entry(path, &hash);
717
718 /*
719 * If @noreq is set, no requests will be sent to the server. Just return
720 * the cache entry.
721 */
722 if (noreq) {
723 up_read(&htable_rw_lock);
724 return PTR_ERR_OR_ZERO(ce);
725 }
726
727 if (!IS_ERR(ce)) {
728 if (!cache_entry_expired(ce)) {
729 dump_ce(ce);
730 up_read(&htable_rw_lock);
731 return 0;
732 }
733 } else {
734 newent = true;
735 }
736
737 up_read(&htable_rw_lock);
738
739 /*
740 * No entry was found.
741 *
742 * Request a new DFS referral in order to create a new cache entry, or
743 * updating an existing one.
744 */
745 rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
746 &refs, &numrefs);
747 if (rc)
748 return rc;
749
750 dump_refs(refs, numrefs);
751
752 if (!newent) {
753 rc = update_cache_entry(path, refs, numrefs);
754 goto out_free_refs;
755 }
756
757 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
758 cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
759 __func__, CACHE_MAX_ENTRIES);
760 down_write(&htable_rw_lock);
761 remove_oldest_entry();
762 up_write(&htable_rw_lock);
763 }
764
765 rc = add_cache_entry(path, hash, refs, numrefs);
766 if (!rc)
767 atomic_inc(&cache_count);
768
769 out_free_refs:
770 free_dfs_info_array(refs, numrefs);
771 return rc;
772 }
773
774 /*
775 * Set up a DFS referral from a given cache entry.
776 *
777 * Must be called with htable_rw_lock held.
778 */
setup_referral(const char * path,struct cache_entry * ce,struct dfs_info3_param * ref,const char * target)779 static int setup_referral(const char *path, struct cache_entry *ce,
780 struct dfs_info3_param *ref, const char *target)
781 {
782 int rc;
783
784 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
785
786 memset(ref, 0, sizeof(*ref));
787
788 ref->path_name = kstrndup(path, strlen(path), GFP_ATOMIC);
789 if (!ref->path_name)
790 return -ENOMEM;
791
792 ref->node_name = kstrndup(target, strlen(target), GFP_ATOMIC);
793 if (!ref->node_name) {
794 rc = -ENOMEM;
795 goto err_free_path;
796 }
797
798 ref->path_consumed = ce->path_consumed;
799 ref->ttl = ce->ttl;
800 ref->server_type = ce->srvtype;
801 ref->ref_flag = ce->flags;
802
803 return 0;
804
805 err_free_path:
806 kfree(ref->path_name);
807 ref->path_name = NULL;
808 return rc;
809 }
810
811 /* Return target list of a DFS cache entry */
get_targets(struct cache_entry * ce,struct dfs_cache_tgt_list * tl)812 static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
813 {
814 int rc;
815 struct list_head *head = &tl->tl_list;
816 struct cache_dfs_tgt *t;
817 struct dfs_cache_tgt_iterator *it, *nit;
818
819 memset(tl, 0, sizeof(*tl));
820 INIT_LIST_HEAD(head);
821
822 list_for_each_entry(t, &ce->tlist, list) {
823 it = kzalloc(sizeof(*it), GFP_ATOMIC);
824 if (!it) {
825 rc = -ENOMEM;
826 goto err_free_it;
827 }
828
829 it->it_name = kstrndup(t->name, strlen(t->name), GFP_ATOMIC);
830 if (!it->it_name) {
831 kfree(it);
832 rc = -ENOMEM;
833 goto err_free_it;
834 }
835 it->it_path_consumed = t->path_consumed;
836
837 if (ce->tgthint == t)
838 list_add(&it->it_list, head);
839 else
840 list_add_tail(&it->it_list, head);
841 }
842
843 tl->tl_numtgts = ce->numtgts;
844
845 return 0;
846
847 err_free_it:
848 list_for_each_entry_safe(it, nit, head, it_list) {
849 kfree(it->it_name);
850 kfree(it);
851 }
852 return rc;
853 }
854
855 /**
856 * dfs_cache_find - find a DFS cache entry
857 *
858 * If it doesn't find the cache entry, then it will get a DFS referral
859 * for @path and create a new entry.
860 *
861 * In case the cache entry exists but expired, it will get a DFS referral
862 * for @path and then update the respective cache entry.
863 *
864 * These parameters are passed down to the get_dfs_refer() call if it
865 * needs to be issued:
866 * @xid: syscall xid
867 * @ses: smb session to issue the request on
868 * @nls_codepage: charset conversion
869 * @remap: path character remapping type
870 * @path: path to lookup in DFS referral cache.
871 *
872 * @ref: when non-NULL, store single DFS referral result in it.
873 * @tgt_list: when non-NULL, store complete DFS target list in it.
874 *
875 * Return zero if the target was found, otherwise non-zero.
876 */
dfs_cache_find(const unsigned int xid,struct cifs_ses * ses,const struct nls_table * nls_codepage,int remap,const char * path,struct dfs_info3_param * ref,struct dfs_cache_tgt_list * tgt_list)877 int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
878 const struct nls_table *nls_codepage, int remap,
879 const char *path, struct dfs_info3_param *ref,
880 struct dfs_cache_tgt_list *tgt_list)
881 {
882 int rc;
883 char *npath;
884 struct cache_entry *ce;
885
886 rc = get_normalized_path(path, &npath);
887 if (rc)
888 return rc;
889
890 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
891 if (rc)
892 goto out_free_path;
893
894 down_read(&htable_rw_lock);
895
896 ce = lookup_cache_entry(npath, NULL);
897 if (IS_ERR(ce)) {
898 up_read(&htable_rw_lock);
899 rc = PTR_ERR(ce);
900 goto out_free_path;
901 }
902
903 if (ref)
904 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
905 else
906 rc = 0;
907 if (!rc && tgt_list)
908 rc = get_targets(ce, tgt_list);
909
910 up_read(&htable_rw_lock);
911
912 out_free_path:
913 free_normalized_path(path, npath);
914 return rc;
915 }
916
917 /**
918 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
919 * the currently connected server.
920 *
921 * NOTE: This function will neither update a cache entry in case it was
922 * expired, nor create a new cache entry if @path hasn't been found. It heavily
923 * relies on an existing cache entry.
924 *
925 * @path: path to lookup in the DFS referral cache.
926 * @ref: when non-NULL, store single DFS referral result in it.
927 * @tgt_list: when non-NULL, store complete DFS target list in it.
928 *
929 * Return 0 if successful.
930 * Return -ENOENT if the entry was not found.
931 * Return non-zero for other errors.
932 */
dfs_cache_noreq_find(const char * path,struct dfs_info3_param * ref,struct dfs_cache_tgt_list * tgt_list)933 int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
934 struct dfs_cache_tgt_list *tgt_list)
935 {
936 int rc;
937 char *npath;
938 struct cache_entry *ce;
939
940 rc = get_normalized_path(path, &npath);
941 if (rc)
942 return rc;
943
944 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
945
946 down_read(&htable_rw_lock);
947
948 ce = lookup_cache_entry(npath, NULL);
949 if (IS_ERR(ce)) {
950 rc = PTR_ERR(ce);
951 goto out_unlock;
952 }
953
954 if (ref)
955 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
956 else
957 rc = 0;
958 if (!rc && tgt_list)
959 rc = get_targets(ce, tgt_list);
960
961 out_unlock:
962 up_read(&htable_rw_lock);
963 free_normalized_path(path, npath);
964
965 return rc;
966 }
967
968 /**
969 * dfs_cache_update_tgthint - update target hint of a DFS cache entry
970 *
971 * If it doesn't find the cache entry, then it will get a DFS referral for @path
972 * and create a new entry.
973 *
974 * In case the cache entry exists but expired, it will get a DFS referral
975 * for @path and then update the respective cache entry.
976 *
977 * @xid: syscall id
978 * @ses: smb session
979 * @nls_codepage: charset conversion
980 * @remap: type of character remapping for paths
981 * @path: path to lookup in DFS referral cache.
982 * @it: DFS target iterator
983 *
984 * Return zero if the target hint was updated successfully, otherwise non-zero.
985 */
dfs_cache_update_tgthint(const unsigned int xid,struct cifs_ses * ses,const struct nls_table * nls_codepage,int remap,const char * path,const struct dfs_cache_tgt_iterator * it)986 int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
987 const struct nls_table *nls_codepage, int remap,
988 const char *path,
989 const struct dfs_cache_tgt_iterator *it)
990 {
991 int rc;
992 char *npath;
993 struct cache_entry *ce;
994 struct cache_dfs_tgt *t;
995
996 rc = get_normalized_path(path, &npath);
997 if (rc)
998 return rc;
999
1000 cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
1001
1002 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
1003 if (rc)
1004 goto out_free_path;
1005
1006 down_write(&htable_rw_lock);
1007
1008 ce = lookup_cache_entry(npath, NULL);
1009 if (IS_ERR(ce)) {
1010 rc = PTR_ERR(ce);
1011 goto out_unlock;
1012 }
1013
1014 t = ce->tgthint;
1015
1016 if (likely(!strcasecmp(it->it_name, t->name)))
1017 goto out_unlock;
1018
1019 list_for_each_entry(t, &ce->tlist, list) {
1020 if (!strcasecmp(t->name, it->it_name)) {
1021 ce->tgthint = t;
1022 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1023 it->it_name);
1024 break;
1025 }
1026 }
1027
1028 out_unlock:
1029 up_write(&htable_rw_lock);
1030 out_free_path:
1031 free_normalized_path(path, npath);
1032
1033 return rc;
1034 }
1035
1036 /**
1037 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
1038 * without sending any requests to the currently connected server.
1039 *
1040 * NOTE: This function will neither update a cache entry in case it was
1041 * expired, nor create a new cache entry if @path hasn't been found. It heavily
1042 * relies on an existing cache entry.
1043 *
1044 * @path: path to lookup in DFS referral cache.
1045 * @it: target iterator which contains the target hint to update the cache
1046 * entry with.
1047 *
1048 * Return zero if the target hint was updated successfully, otherwise non-zero.
1049 */
dfs_cache_noreq_update_tgthint(const char * path,const struct dfs_cache_tgt_iterator * it)1050 int dfs_cache_noreq_update_tgthint(const char *path,
1051 const struct dfs_cache_tgt_iterator *it)
1052 {
1053 int rc;
1054 char *npath;
1055 struct cache_entry *ce;
1056 struct cache_dfs_tgt *t;
1057
1058 if (!it)
1059 return -EINVAL;
1060
1061 rc = get_normalized_path(path, &npath);
1062 if (rc)
1063 return rc;
1064
1065 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1066
1067 down_write(&htable_rw_lock);
1068
1069 ce = lookup_cache_entry(npath, NULL);
1070 if (IS_ERR(ce)) {
1071 rc = PTR_ERR(ce);
1072 goto out_unlock;
1073 }
1074
1075 rc = 0;
1076 t = ce->tgthint;
1077
1078 if (unlikely(!strcasecmp(it->it_name, t->name)))
1079 goto out_unlock;
1080
1081 list_for_each_entry(t, &ce->tlist, list) {
1082 if (!strcasecmp(t->name, it->it_name)) {
1083 ce->tgthint = t;
1084 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1085 it->it_name);
1086 break;
1087 }
1088 }
1089
1090 out_unlock:
1091 up_write(&htable_rw_lock);
1092 free_normalized_path(path, npath);
1093
1094 return rc;
1095 }
1096
1097 /**
1098 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1099 * target iterator (@it).
1100 *
1101 * @path: path to lookup in DFS referral cache.
1102 * @it: DFS target iterator.
1103 * @ref: DFS referral pointer to set up the gathered information.
1104 *
1105 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1106 */
dfs_cache_get_tgt_referral(const char * path,const struct dfs_cache_tgt_iterator * it,struct dfs_info3_param * ref)1107 int dfs_cache_get_tgt_referral(const char *path,
1108 const struct dfs_cache_tgt_iterator *it,
1109 struct dfs_info3_param *ref)
1110 {
1111 int rc;
1112 char *npath;
1113 struct cache_entry *ce;
1114
1115 if (!it || !ref)
1116 return -EINVAL;
1117
1118 rc = get_normalized_path(path, &npath);
1119 if (rc)
1120 return rc;
1121
1122 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1123
1124 down_read(&htable_rw_lock);
1125
1126 ce = lookup_cache_entry(npath, NULL);
1127 if (IS_ERR(ce)) {
1128 rc = PTR_ERR(ce);
1129 goto out_unlock;
1130 }
1131
1132 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1133
1134 rc = setup_referral(path, ce, ref, it->it_name);
1135
1136 out_unlock:
1137 up_read(&htable_rw_lock);
1138 free_normalized_path(path, npath);
1139
1140 return rc;
1141 }
1142
dup_vol(struct smb_vol * vol,struct smb_vol * new)1143 static int dup_vol(struct smb_vol *vol, struct smb_vol *new)
1144 {
1145 memcpy(new, vol, sizeof(*new));
1146
1147 if (vol->username) {
1148 new->username = kstrndup(vol->username, strlen(vol->username),
1149 GFP_KERNEL);
1150 if (!new->username)
1151 return -ENOMEM;
1152 }
1153 if (vol->password) {
1154 new->password = kstrndup(vol->password, strlen(vol->password),
1155 GFP_KERNEL);
1156 if (!new->password)
1157 goto err_free_username;
1158 }
1159 if (vol->UNC) {
1160 cifs_dbg(FYI, "%s: vol->UNC: %s\n", __func__, vol->UNC);
1161 new->UNC = kstrndup(vol->UNC, strlen(vol->UNC), GFP_KERNEL);
1162 if (!new->UNC)
1163 goto err_free_password;
1164 }
1165 if (vol->domainname) {
1166 new->domainname = kstrndup(vol->domainname,
1167 strlen(vol->domainname), GFP_KERNEL);
1168 if (!new->domainname)
1169 goto err_free_unc;
1170 }
1171 if (vol->iocharset) {
1172 new->iocharset = kstrndup(vol->iocharset,
1173 strlen(vol->iocharset), GFP_KERNEL);
1174 if (!new->iocharset)
1175 goto err_free_domainname;
1176 }
1177 if (vol->prepath) {
1178 cifs_dbg(FYI, "%s: vol->prepath: %s\n", __func__, vol->prepath);
1179 new->prepath = kstrndup(vol->prepath, strlen(vol->prepath),
1180 GFP_KERNEL);
1181 if (!new->prepath)
1182 goto err_free_iocharset;
1183 }
1184
1185 return 0;
1186
1187 err_free_iocharset:
1188 kfree(new->iocharset);
1189 err_free_domainname:
1190 kfree(new->domainname);
1191 err_free_unc:
1192 kfree(new->UNC);
1193 err_free_password:
1194 kfree_sensitive(new->password);
1195 err_free_username:
1196 kfree(new->username);
1197 kfree(new);
1198 return -ENOMEM;
1199 }
1200
1201 /**
1202 * dfs_cache_add_vol - add a cifs volume during mount() that will be handled by
1203 * DFS cache refresh worker.
1204 *
1205 * @mntdata: mount data.
1206 * @vol: cifs volume.
1207 * @fullpath: origin full path.
1208 *
1209 * Return zero if volume was set up correctly, otherwise non-zero.
1210 */
dfs_cache_add_vol(char * mntdata,struct smb_vol * vol,const char * fullpath)1211 int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath)
1212 {
1213 int rc;
1214 struct vol_info *vi;
1215
1216 if (!vol || !fullpath || !mntdata)
1217 return -EINVAL;
1218
1219 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1220
1221 vi = kzalloc(sizeof(*vi), GFP_KERNEL);
1222 if (!vi)
1223 return -ENOMEM;
1224
1225 vi->fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
1226 if (!vi->fullpath) {
1227 rc = -ENOMEM;
1228 goto err_free_vi;
1229 }
1230
1231 rc = dup_vol(vol, &vi->smb_vol);
1232 if (rc)
1233 goto err_free_fullpath;
1234
1235 vi->mntdata = mntdata;
1236 spin_lock_init(&vi->smb_vol_lock);
1237 kref_init(&vi->refcnt);
1238
1239 spin_lock(&vol_list_lock);
1240 list_add_tail(&vi->list, &vol_list);
1241 spin_unlock(&vol_list_lock);
1242
1243 return 0;
1244
1245 err_free_fullpath:
1246 kfree(vi->fullpath);
1247 err_free_vi:
1248 kfree(vi);
1249 return rc;
1250 }
1251
1252 /* Must be called with vol_list_lock held */
find_vol(const char * fullpath)1253 static struct vol_info *find_vol(const char *fullpath)
1254 {
1255 struct vol_info *vi;
1256
1257 list_for_each_entry(vi, &vol_list, list) {
1258 cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
1259 if (!strcasecmp(vi->fullpath, fullpath))
1260 return vi;
1261 }
1262 return ERR_PTR(-ENOENT);
1263 }
1264
1265 /**
1266 * dfs_cache_update_vol - update vol info in DFS cache after failover
1267 *
1268 * @fullpath: fullpath to look up in volume list.
1269 * @server: TCP ses pointer.
1270 *
1271 * Return zero if volume was updated, otherwise non-zero.
1272 */
dfs_cache_update_vol(const char * fullpath,struct TCP_Server_Info * server)1273 int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
1274 {
1275 struct vol_info *vi;
1276
1277 if (!fullpath || !server)
1278 return -EINVAL;
1279
1280 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1281
1282 spin_lock(&vol_list_lock);
1283 vi = find_vol(fullpath);
1284 if (IS_ERR(vi)) {
1285 spin_unlock(&vol_list_lock);
1286 return PTR_ERR(vi);
1287 }
1288 kref_get(&vi->refcnt);
1289 spin_unlock(&vol_list_lock);
1290
1291 cifs_dbg(FYI, "%s: updating volume info\n", __func__);
1292 spin_lock(&vi->smb_vol_lock);
1293 memcpy(&vi->smb_vol.dstaddr, &server->dstaddr,
1294 sizeof(vi->smb_vol.dstaddr));
1295 spin_unlock(&vi->smb_vol_lock);
1296
1297 kref_put(&vi->refcnt, vol_release);
1298
1299 return 0;
1300 }
1301
1302 /**
1303 * dfs_cache_del_vol - remove volume info in DFS cache during umount()
1304 *
1305 * @fullpath: fullpath to look up in volume list.
1306 */
dfs_cache_del_vol(const char * fullpath)1307 void dfs_cache_del_vol(const char *fullpath)
1308 {
1309 struct vol_info *vi;
1310
1311 if (!fullpath || !*fullpath)
1312 return;
1313
1314 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1315
1316 spin_lock(&vol_list_lock);
1317 vi = find_vol(fullpath);
1318 spin_unlock(&vol_list_lock);
1319
1320 if (!IS_ERR(vi))
1321 kref_put(&vi->refcnt, vol_release);
1322 }
1323
1324 /**
1325 * dfs_cache_get_tgt_share - parse a DFS target
1326 *
1327 * @path: DFS full path
1328 * @it: DFS target iterator.
1329 * @share: tree name.
1330 * @prefix: prefix path.
1331 *
1332 * Return zero if target was parsed correctly, otherwise non-zero.
1333 */
dfs_cache_get_tgt_share(char * path,const struct dfs_cache_tgt_iterator * it,char ** share,char ** prefix)1334 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
1335 char **share, char **prefix)
1336 {
1337 char *s, sep, *p;
1338 size_t len;
1339 size_t plen1, plen2;
1340
1341 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1342 return -EINVAL;
1343
1344 *share = NULL;
1345 *prefix = NULL;
1346
1347 sep = it->it_name[0];
1348 if (sep != '\\' && sep != '/')
1349 return -EINVAL;
1350
1351 s = strchr(it->it_name + 1, sep);
1352 if (!s)
1353 return -EINVAL;
1354
1355 /* point to prefix in target node */
1356 s = strchrnul(s + 1, sep);
1357
1358 /* extract target share */
1359 *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
1360 if (!*share)
1361 return -ENOMEM;
1362
1363 /* skip separator */
1364 if (*s)
1365 s++;
1366 /* point to prefix in DFS path */
1367 p = path + it->it_path_consumed;
1368 if (*p == sep)
1369 p++;
1370
1371 /* merge prefix paths from DFS path and target node */
1372 plen1 = it->it_name + strlen(it->it_name) - s;
1373 plen2 = path + strlen(path) - p;
1374 if (plen1 || plen2) {
1375 len = plen1 + plen2 + 2;
1376 *prefix = kmalloc(len, GFP_KERNEL);
1377 if (!*prefix) {
1378 kfree(*share);
1379 *share = NULL;
1380 return -ENOMEM;
1381 }
1382 if (plen1)
1383 scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
1384 else
1385 strscpy(*prefix, p, len);
1386 }
1387 return 0;
1388 }
1389
1390 /* Get all tcons that are within a DFS namespace and can be refreshed */
get_tcons(struct TCP_Server_Info * server,struct list_head * head)1391 static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
1392 {
1393 struct cifs_ses *ses;
1394 struct cifs_tcon *tcon;
1395
1396 INIT_LIST_HEAD(head);
1397
1398 spin_lock(&cifs_tcp_ses_lock);
1399 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1400 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1401 if (!tcon->need_reconnect && !tcon->need_reopen_files &&
1402 tcon->dfs_path) {
1403 tcon->tc_count++;
1404 list_add_tail(&tcon->ulist, head);
1405 }
1406 }
1407 if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
1408 ses->tcon_ipc->dfs_path) {
1409 list_add_tail(&ses->tcon_ipc->ulist, head);
1410 }
1411 }
1412 spin_unlock(&cifs_tcp_ses_lock);
1413 }
1414
is_dfs_link(const char * path)1415 static bool is_dfs_link(const char *path)
1416 {
1417 char *s;
1418
1419 s = strchr(path + 1, '\\');
1420 if (!s)
1421 return false;
1422 return !!strchr(s + 1, '\\');
1423 }
1424
get_dfs_root(const char * path)1425 static char *get_dfs_root(const char *path)
1426 {
1427 char *s, *npath;
1428
1429 s = strchr(path + 1, '\\');
1430 if (!s)
1431 return ERR_PTR(-EINVAL);
1432
1433 s = strchr(s + 1, '\\');
1434 if (!s)
1435 return ERR_PTR(-EINVAL);
1436
1437 npath = kstrndup(path, s - path, GFP_KERNEL);
1438 if (!npath)
1439 return ERR_PTR(-ENOMEM);
1440
1441 return npath;
1442 }
1443
put_tcp_server(struct TCP_Server_Info * server)1444 static inline void put_tcp_server(struct TCP_Server_Info *server)
1445 {
1446 cifs_put_tcp_session(server, 0);
1447 }
1448
get_tcp_server(struct smb_vol * vol)1449 static struct TCP_Server_Info *get_tcp_server(struct smb_vol *vol)
1450 {
1451 struct TCP_Server_Info *server;
1452
1453 server = cifs_find_tcp_session(vol);
1454 if (IS_ERR_OR_NULL(server))
1455 return NULL;
1456
1457 spin_lock(&GlobalMid_Lock);
1458 if (server->tcpStatus != CifsGood) {
1459 spin_unlock(&GlobalMid_Lock);
1460 put_tcp_server(server);
1461 return NULL;
1462 }
1463 spin_unlock(&GlobalMid_Lock);
1464
1465 return server;
1466 }
1467
1468 /* Find root SMB session out of a DFS link path */
find_root_ses(struct vol_info * vi,struct cifs_tcon * tcon,const char * path)1469 static struct cifs_ses *find_root_ses(struct vol_info *vi,
1470 struct cifs_tcon *tcon,
1471 const char *path)
1472 {
1473 char *rpath;
1474 int rc;
1475 struct cache_entry *ce;
1476 struct dfs_info3_param ref = {0};
1477 char *mdata = NULL, *devname = NULL;
1478 struct TCP_Server_Info *server;
1479 struct cifs_ses *ses;
1480 struct smb_vol vol = {NULL};
1481
1482 rpath = get_dfs_root(path);
1483 if (IS_ERR(rpath))
1484 return ERR_CAST(rpath);
1485
1486 down_read(&htable_rw_lock);
1487
1488 ce = lookup_cache_entry(rpath, NULL);
1489 if (IS_ERR(ce)) {
1490 up_read(&htable_rw_lock);
1491 ses = ERR_CAST(ce);
1492 goto out;
1493 }
1494
1495 rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
1496 if (rc) {
1497 up_read(&htable_rw_lock);
1498 ses = ERR_PTR(rc);
1499 goto out;
1500 }
1501
1502 up_read(&htable_rw_lock);
1503
1504 mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
1505 &devname);
1506 free_dfs_info_param(&ref);
1507
1508 if (IS_ERR(mdata)) {
1509 ses = ERR_CAST(mdata);
1510 mdata = NULL;
1511 goto out;
1512 }
1513
1514 rc = cifs_setup_volume_info(&vol, mdata, devname, false);
1515 kfree(devname);
1516
1517 if (rc) {
1518 ses = ERR_PTR(rc);
1519 goto out;
1520 }
1521
1522 server = get_tcp_server(&vol);
1523 if (!server) {
1524 ses = ERR_PTR(-EHOSTDOWN);
1525 goto out;
1526 }
1527
1528 ses = cifs_get_smb_ses(server, &vol);
1529
1530 out:
1531 cifs_cleanup_volume_info_contents(&vol);
1532 kfree(mdata);
1533 kfree(rpath);
1534
1535 return ses;
1536 }
1537
1538 /* Refresh DFS cache entry from a given tcon */
refresh_tcon(struct vol_info * vi,struct cifs_tcon * tcon)1539 static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
1540 {
1541 int rc = 0;
1542 unsigned int xid;
1543 char *path, *npath;
1544 struct cache_entry *ce;
1545 struct cifs_ses *root_ses = NULL, *ses;
1546 struct dfs_info3_param *refs = NULL;
1547 int numrefs = 0;
1548
1549 xid = get_xid();
1550
1551 path = tcon->dfs_path + 1;
1552
1553 rc = get_normalized_path(path, &npath);
1554 if (rc)
1555 goto out_free_xid;
1556
1557 down_read(&htable_rw_lock);
1558
1559 ce = lookup_cache_entry(npath, NULL);
1560 if (IS_ERR(ce)) {
1561 rc = PTR_ERR(ce);
1562 up_read(&htable_rw_lock);
1563 goto out_free_path;
1564 }
1565
1566 if (!cache_entry_expired(ce)) {
1567 up_read(&htable_rw_lock);
1568 goto out_free_path;
1569 }
1570
1571 up_read(&htable_rw_lock);
1572
1573 /* If it's a DFS Link, then use root SMB session for refreshing it */
1574 if (is_dfs_link(npath)) {
1575 ses = root_ses = find_root_ses(vi, tcon, npath);
1576 if (IS_ERR(ses)) {
1577 rc = PTR_ERR(ses);
1578 root_ses = NULL;
1579 goto out_free_path;
1580 }
1581 } else {
1582 ses = tcon->ses;
1583 }
1584
1585 rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
1586 &numrefs);
1587 if (!rc) {
1588 dump_refs(refs, numrefs);
1589 rc = update_cache_entry(npath, refs, numrefs);
1590 free_dfs_info_array(refs, numrefs);
1591 }
1592
1593 if (root_ses)
1594 cifs_put_smb_ses(root_ses);
1595
1596 out_free_path:
1597 free_normalized_path(path, npath);
1598
1599 out_free_xid:
1600 free_xid(xid);
1601 return rc;
1602 }
1603
1604 /*
1605 * Worker that will refresh DFS cache based on lowest TTL value from a DFS
1606 * referral.
1607 */
refresh_cache_worker(struct work_struct * work)1608 static void refresh_cache_worker(struct work_struct *work)
1609 {
1610 struct vol_info *vi, *nvi;
1611 struct TCP_Server_Info *server;
1612 LIST_HEAD(vols);
1613 LIST_HEAD(tcons);
1614 struct cifs_tcon *tcon, *ntcon;
1615 int rc;
1616
1617 /*
1618 * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
1619 * for refreshing.
1620 */
1621 spin_lock(&vol_list_lock);
1622 list_for_each_entry(vi, &vol_list, list) {
1623 server = get_tcp_server(&vi->smb_vol);
1624 if (!server)
1625 continue;
1626
1627 kref_get(&vi->refcnt);
1628 list_add_tail(&vi->rlist, &vols);
1629 put_tcp_server(server);
1630 }
1631 spin_unlock(&vol_list_lock);
1632
1633 /* Walk through all TCONs and refresh any expired cache entry */
1634 list_for_each_entry_safe(vi, nvi, &vols, rlist) {
1635 spin_lock(&vi->smb_vol_lock);
1636 server = get_tcp_server(&vi->smb_vol);
1637 spin_unlock(&vi->smb_vol_lock);
1638
1639 if (!server)
1640 goto next_vol;
1641
1642 get_tcons(server, &tcons);
1643 rc = 0;
1644
1645 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1646 /*
1647 * Skip tcp server if any of its tcons failed to refresh
1648 * (possibily due to reconnects).
1649 */
1650 if (!rc)
1651 rc = refresh_tcon(vi, tcon);
1652
1653 list_del_init(&tcon->ulist);
1654 cifs_put_tcon(tcon);
1655 }
1656
1657 put_tcp_server(server);
1658
1659 next_vol:
1660 list_del_init(&vi->rlist);
1661 kref_put(&vi->refcnt, vol_release);
1662 }
1663
1664 spin_lock(&cache_ttl_lock);
1665 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1666 spin_unlock(&cache_ttl_lock);
1667 }
1668