1
2
3
4
5
6
7
8#include <linux/jhash.h>
9#include <linux/ktime.h>
10#include <linux/slab.h>
11#include <linux/proc_fs.h>
12#include <linux/nls.h>
13#include <linux/workqueue.h>
14#include "cifsglob.h"
15#include "smb2pdu.h"
16#include "smb2proto.h"
17#include "cifsproto.h"
18#include "cifs_debug.h"
19#include "cifs_unicode.h"
20#include "smb2glob.h"
21#include "fs_context.h"
22
23#include "dfs_cache.h"
24
25#define CACHE_HTABLE_SIZE 32
26#define CACHE_MAX_ENTRIES 64
27
28#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
29 DFSREF_STORAGE_SERVER))
30
31struct cache_dfs_tgt {
32 char *name;
33 int path_consumed;
34 struct list_head list;
35};
36
37struct cache_entry {
38 struct hlist_node hlist;
39 const char *path;
40 int hdr_flags;
41 int ttl;
42 int srvtype;
43 int ref_flags;
44 struct timespec64 etime;
45 int path_consumed;
46 int numtgts;
47 struct list_head tlist;
48 struct cache_dfs_tgt *tgthint;
49};
50
51struct vol_info {
52 char *fullpath;
53 spinlock_t ctx_lock;
54 struct smb3_fs_context ctx;
55 char *mntdata;
56 struct list_head list;
57 struct list_head rlist;
58 struct kref refcnt;
59};
60
61static struct kmem_cache *cache_slab __read_mostly;
62static struct workqueue_struct *dfscache_wq __read_mostly;
63
64static int cache_ttl;
65static DEFINE_SPINLOCK(cache_ttl_lock);
66
67static struct nls_table *cache_nlsc;
68
69
70
71
72static atomic_t cache_count;
73
74static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
75static DECLARE_RWSEM(htable_rw_lock);
76
77static LIST_HEAD(vol_list);
78static DEFINE_SPINLOCK(vol_list_lock);
79
80static void refresh_cache_worker(struct work_struct *work);
81
82static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
83
84static int get_normalized_path(const char *path, const char **npath)
85{
86 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
87 return -EINVAL;
88
89 if (*path == '\\') {
90 *npath = path;
91 } else {
92 char *s = kstrdup(path, GFP_KERNEL);
93 if (!s)
94 return -ENOMEM;
95 convert_delimiter(s, '\\');
96 *npath = s;
97 }
98 return 0;
99}
100
101static inline void free_normalized_path(const char *path, const char *npath)
102{
103 if (path != npath)
104 kfree(npath);
105}
106
107static inline bool cache_entry_expired(const struct cache_entry *ce)
108{
109 struct timespec64 ts;
110
111 ktime_get_coarse_real_ts64(&ts);
112 return timespec64_compare(&ts, &ce->etime) >= 0;
113}
114
115static inline void free_tgts(struct cache_entry *ce)
116{
117 struct cache_dfs_tgt *t, *n;
118
119 list_for_each_entry_safe(t, n, &ce->tlist, list) {
120 list_del(&t->list);
121 kfree(t->name);
122 kfree(t);
123 }
124}
125
126static inline void flush_cache_ent(struct cache_entry *ce)
127{
128 hlist_del_init(&ce->hlist);
129 kfree(ce->path);
130 free_tgts(ce);
131 atomic_dec(&cache_count);
132 kmem_cache_free(cache_slab, ce);
133}
134
135static void flush_cache_ents(void)
136{
137 int i;
138
139 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
140 struct hlist_head *l = &cache_htable[i];
141 struct hlist_node *n;
142 struct cache_entry *ce;
143
144 hlist_for_each_entry_safe(ce, n, l, hlist) {
145 if (!hlist_unhashed(&ce->hlist))
146 flush_cache_ent(ce);
147 }
148 }
149}
150
151
152
153
154static int dfscache_proc_show(struct seq_file *m, void *v)
155{
156 int i;
157 struct cache_entry *ce;
158 struct cache_dfs_tgt *t;
159
160 seq_puts(m, "DFS cache\n---------\n");
161
162 down_read(&htable_rw_lock);
163 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
164 struct hlist_head *l = &cache_htable[i];
165
166 hlist_for_each_entry(ce, l, hlist) {
167 if (hlist_unhashed(&ce->hlist))
168 continue;
169
170 seq_printf(m,
171 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
172 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
173 ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
174 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
175 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
176
177 list_for_each_entry(t, &ce->tlist, list) {
178 seq_printf(m, " %s%s\n",
179 t->name,
180 ce->tgthint == t ? " (target hint)" : "");
181 }
182 }
183 }
184 up_read(&htable_rw_lock);
185
186 return 0;
187}
188
189static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
190 size_t count, loff_t *ppos)
191{
192 char c;
193 int rc;
194
195 rc = get_user(c, buffer);
196 if (rc)
197 return rc;
198
199 if (c != '0')
200 return -EINVAL;
201
202 cifs_dbg(FYI, "clearing dfs cache\n");
203
204 down_write(&htable_rw_lock);
205 flush_cache_ents();
206 up_write(&htable_rw_lock);
207
208 return count;
209}
210
211static int dfscache_proc_open(struct inode *inode, struct file *file)
212{
213 return single_open(file, dfscache_proc_show, NULL);
214}
215
216const struct proc_ops dfscache_proc_ops = {
217 .proc_open = dfscache_proc_open,
218 .proc_read = seq_read,
219 .proc_lseek = seq_lseek,
220 .proc_release = single_release,
221 .proc_write = dfscache_proc_write,
222};
223
224#ifdef CONFIG_CIFS_DEBUG2
225static inline void dump_tgts(const struct cache_entry *ce)
226{
227 struct cache_dfs_tgt *t;
228
229 cifs_dbg(FYI, "target list:\n");
230 list_for_each_entry(t, &ce->tlist, list) {
231 cifs_dbg(FYI, " %s%s\n", t->name,
232 ce->tgthint == t ? " (target hint)" : "");
233 }
234}
235
236static inline void dump_ce(const struct cache_entry *ce)
237{
238 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
239 ce->path,
240 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
241 ce->etime.tv_nsec,
242 ce->hdr_flags, ce->ref_flags,
243 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
244 ce->path_consumed,
245 cache_entry_expired(ce) ? "yes" : "no");
246 dump_tgts(ce);
247}
248
249static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
250{
251 int i;
252
253 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
254 for (i = 0; i < numrefs; i++) {
255 const struct dfs_info3_param *ref = &refs[i];
256
257 cifs_dbg(FYI,
258 "\n"
259 "flags: 0x%x\n"
260 "path_consumed: %d\n"
261 "server_type: 0x%x\n"
262 "ref_flag: 0x%x\n"
263 "path_name: %s\n"
264 "node_name: %s\n"
265 "ttl: %d (%dm)\n",
266 ref->flags, ref->path_consumed, ref->server_type,
267 ref->ref_flag, ref->path_name, ref->node_name,
268 ref->ttl, ref->ttl / 60);
269 }
270}
271#else
272#define dump_tgts(e)
273#define dump_ce(e)
274#define dump_refs(r, n)
275#endif
276
277
278
279
280
281
282int dfs_cache_init(void)
283{
284 int rc;
285 int i;
286
287 dfscache_wq = alloc_workqueue("cifs-dfscache",
288 WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
289 if (!dfscache_wq)
290 return -ENOMEM;
291
292 cache_slab = kmem_cache_create("cifs_dfs_cache",
293 sizeof(struct cache_entry), 0,
294 SLAB_HWCACHE_ALIGN, NULL);
295 if (!cache_slab) {
296 rc = -ENOMEM;
297 goto out_destroy_wq;
298 }
299
300 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
301 INIT_HLIST_HEAD(&cache_htable[i]);
302
303 atomic_set(&cache_count, 0);
304 cache_nlsc = load_nls_default();
305
306 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
307 return 0;
308
309out_destroy_wq:
310 destroy_workqueue(dfscache_wq);
311 return rc;
312}
313
314static inline unsigned int cache_entry_hash(const void *data, int size)
315{
316 unsigned int h;
317
318 h = jhash(data, size, 0);
319 return h & (CACHE_HTABLE_SIZE - 1);
320}
321
322
323static inline bool is_sysvol_or_netlogon(const char *path)
324{
325 const char *s;
326 char sep = path[0];
327
328 s = strchr(path + 1, sep) + 1;
329 return !strncasecmp(s, "sysvol", strlen("sysvol")) ||
330 !strncasecmp(s, "netlogon", strlen("netlogon"));
331}
332
333
334static inline char *get_tgt_name(const struct cache_entry *ce)
335{
336 struct cache_dfs_tgt *t = ce->tgthint;
337
338 return t ? t->name : ERR_PTR(-ENOENT);
339}
340
341
342static inline struct timespec64 get_expire_time(int ttl)
343{
344 struct timespec64 ts = {
345 .tv_sec = ttl,
346 .tv_nsec = 0,
347 };
348 struct timespec64 now;
349
350 ktime_get_coarse_real_ts64(&now);
351 return timespec64_add(now, ts);
352}
353
354
355static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
356{
357 struct cache_dfs_tgt *t;
358
359 t = kmalloc(sizeof(*t), GFP_ATOMIC);
360 if (!t)
361 return ERR_PTR(-ENOMEM);
362 t->name = kstrdup(name, GFP_ATOMIC);
363 if (!t->name) {
364 kfree(t);
365 return ERR_PTR(-ENOMEM);
366 }
367 t->path_consumed = path_consumed;
368 INIT_LIST_HEAD(&t->list);
369 return t;
370}
371
372
373
374
375
376static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
377 struct cache_entry *ce, const char *tgthint)
378{
379 int i;
380
381 ce->ttl = refs[0].ttl;
382 ce->etime = get_expire_time(ce->ttl);
383 ce->srvtype = refs[0].server_type;
384 ce->hdr_flags = refs[0].flags;
385 ce->ref_flags = refs[0].ref_flag;
386 ce->path_consumed = refs[0].path_consumed;
387
388 for (i = 0; i < numrefs; i++) {
389 struct cache_dfs_tgt *t;
390
391 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
392 if (IS_ERR(t)) {
393 free_tgts(ce);
394 return PTR_ERR(t);
395 }
396 if (tgthint && !strcasecmp(t->name, tgthint)) {
397 list_add(&t->list, &ce->tlist);
398 tgthint = NULL;
399 } else {
400 list_add_tail(&t->list, &ce->tlist);
401 }
402 ce->numtgts++;
403 }
404
405 ce->tgthint = list_first_entry_or_null(&ce->tlist,
406 struct cache_dfs_tgt, list);
407
408 return 0;
409}
410
411
412static struct cache_entry *alloc_cache_entry(const char *path,
413 const struct dfs_info3_param *refs,
414 int numrefs)
415{
416 struct cache_entry *ce;
417 int rc;
418
419 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
420 if (!ce)
421 return ERR_PTR(-ENOMEM);
422
423 ce->path = kstrdup(path, GFP_KERNEL);
424 if (!ce->path) {
425 kmem_cache_free(cache_slab, ce);
426 return ERR_PTR(-ENOMEM);
427 }
428 INIT_HLIST_NODE(&ce->hlist);
429 INIT_LIST_HEAD(&ce->tlist);
430
431 rc = copy_ref_data(refs, numrefs, ce, NULL);
432 if (rc) {
433 kfree(ce->path);
434 kmem_cache_free(cache_slab, ce);
435 ce = ERR_PTR(rc);
436 }
437 return ce;
438}
439
440
441static void remove_oldest_entry(void)
442{
443 int i;
444 struct cache_entry *ce;
445 struct cache_entry *to_del = NULL;
446
447 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
448 struct hlist_head *l = &cache_htable[i];
449
450 hlist_for_each_entry(ce, l, hlist) {
451 if (hlist_unhashed(&ce->hlist))
452 continue;
453 if (!to_del || timespec64_compare(&ce->etime,
454 &to_del->etime) < 0)
455 to_del = ce;
456 }
457 }
458
459 if (!to_del) {
460 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
461 return;
462 }
463
464 cifs_dbg(FYI, "%s: removing entry\n", __func__);
465 dump_ce(to_del);
466 flush_cache_ent(to_del);
467}
468
469
470static int add_cache_entry(const char *path, unsigned int hash,
471 struct dfs_info3_param *refs, int numrefs)
472{
473 struct cache_entry *ce;
474
475 ce = alloc_cache_entry(path, refs, numrefs);
476 if (IS_ERR(ce))
477 return PTR_ERR(ce);
478
479 spin_lock(&cache_ttl_lock);
480 if (!cache_ttl) {
481 cache_ttl = ce->ttl;
482 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
483 } else {
484 cache_ttl = min_t(int, cache_ttl, ce->ttl);
485 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
486 }
487 spin_unlock(&cache_ttl_lock);
488
489 down_write(&htable_rw_lock);
490 hlist_add_head(&ce->hlist, &cache_htable[hash]);
491 dump_ce(ce);
492 up_write(&htable_rw_lock);
493
494 return 0;
495}
496
497static struct cache_entry *__lookup_cache_entry(const char *path)
498{
499 struct cache_entry *ce;
500 unsigned int h;
501 bool found = false;
502
503 h = cache_entry_hash(path, strlen(path));
504
505 hlist_for_each_entry(ce, &cache_htable[h], hlist) {
506 if (!strcasecmp(path, ce->path)) {
507 found = true;
508 dump_ce(ce);
509 break;
510 }
511 }
512
513 if (!found)
514 ce = ERR_PTR(-ENOENT);
515 return ce;
516}
517
518
519
520
521
522
523
524
525
526static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
527{
528 struct cache_entry *ce = ERR_PTR(-ENOENT);
529 unsigned int h;
530 int cnt = 0;
531 char *npath;
532 char *s, *e;
533 char sep;
534
535 npath = kstrdup(path, GFP_KERNEL);
536 if (!npath)
537 return ERR_PTR(-ENOMEM);
538
539 s = npath;
540 sep = *npath;
541 while ((s = strchr(s, sep)) && ++cnt < 3)
542 s++;
543
544 if (cnt < 3) {
545 h = cache_entry_hash(path, strlen(path));
546 ce = __lookup_cache_entry(path);
547 goto out;
548 }
549
550
551
552
553
554
555 h = cache_entry_hash(npath, strlen(npath));
556 e = npath + strlen(npath) - 1;
557 while (e > s) {
558 char tmp;
559
560
561 while (e > s && *e == sep)
562 e--;
563 if (e == s)
564 goto out;
565
566 tmp = *(e+1);
567 *(e+1) = 0;
568
569 ce = __lookup_cache_entry(npath);
570 if (!IS_ERR(ce)) {
571 h = cache_entry_hash(npath, strlen(npath));
572 break;
573 }
574
575 *(e+1) = tmp;
576
577 while (e > s && *e != sep)
578 e--;
579 }
580out:
581 if (hash)
582 *hash = h;
583 kfree(npath);
584 return ce;
585}
586
587static void __vol_release(struct vol_info *vi)
588{
589 kfree(vi->fullpath);
590 kfree(vi->mntdata);
591 smb3_cleanup_fs_context_contents(&vi->ctx);
592 kfree(vi);
593}
594
595static void vol_release(struct kref *kref)
596{
597 struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
598
599 spin_lock(&vol_list_lock);
600 list_del(&vi->list);
601 spin_unlock(&vol_list_lock);
602 __vol_release(vi);
603}
604
605static inline void free_vol_list(void)
606{
607 struct vol_info *vi, *nvi;
608
609 list_for_each_entry_safe(vi, nvi, &vol_list, list) {
610 list_del_init(&vi->list);
611 __vol_release(vi);
612 }
613}
614
615
616
617
618void dfs_cache_destroy(void)
619{
620 cancel_delayed_work_sync(&refresh_task);
621 unload_nls(cache_nlsc);
622 free_vol_list();
623 flush_cache_ents();
624 kmem_cache_destroy(cache_slab);
625 destroy_workqueue(dfscache_wq);
626
627 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
628}
629
630
631static int __update_cache_entry(const char *path,
632 const struct dfs_info3_param *refs,
633 int numrefs)
634{
635 int rc;
636 struct cache_entry *ce;
637 char *s, *th = NULL;
638
639 ce = lookup_cache_entry(path, NULL);
640 if (IS_ERR(ce))
641 return PTR_ERR(ce);
642
643 if (ce->tgthint) {
644 s = ce->tgthint->name;
645 th = kstrdup(s, GFP_ATOMIC);
646 if (!th)
647 return -ENOMEM;
648 }
649
650 free_tgts(ce);
651 ce->numtgts = 0;
652
653 rc = copy_ref_data(refs, numrefs, ce, th);
654
655 kfree(th);
656
657 return rc;
658}
659
660static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
661 const struct nls_table *nls_codepage, int remap,
662 const char *path, struct dfs_info3_param **refs,
663 int *numrefs)
664{
665 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
666
667 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
668 return -EOPNOTSUPP;
669 if (unlikely(!nls_codepage))
670 return -EINVAL;
671
672 *refs = NULL;
673 *numrefs = 0;
674
675 return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
676 nls_codepage, remap);
677}
678
679
680static int update_cache_entry(const char *path,
681 const struct dfs_info3_param *refs,
682 int numrefs)
683{
684
685 int rc;
686
687 down_write(&htable_rw_lock);
688 rc = __update_cache_entry(path, refs, numrefs);
689 up_write(&htable_rw_lock);
690
691 return rc;
692}
693
694
695
696
697
698
699
700
701
702
703static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
704 const struct nls_table *nls_codepage, int remap,
705 const char *path, bool noreq)
706{
707 int rc;
708 unsigned int hash;
709 struct cache_entry *ce;
710 struct dfs_info3_param *refs = NULL;
711 int numrefs = 0;
712 bool newent = false;
713
714 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
715
716 down_read(&htable_rw_lock);
717
718 ce = lookup_cache_entry(path, &hash);
719
720
721
722
723
724 if (noreq) {
725 up_read(&htable_rw_lock);
726 return PTR_ERR_OR_ZERO(ce);
727 }
728
729 if (!IS_ERR(ce)) {
730 if (!cache_entry_expired(ce)) {
731 dump_ce(ce);
732 up_read(&htable_rw_lock);
733 return 0;
734 }
735 } else {
736 newent = true;
737 }
738
739 up_read(&htable_rw_lock);
740
741
742
743
744
745
746
747 rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
748 &refs, &numrefs);
749 if (rc)
750 return rc;
751
752 dump_refs(refs, numrefs);
753
754 if (!newent) {
755 rc = update_cache_entry(path, refs, numrefs);
756 goto out_free_refs;
757 }
758
759 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
760 cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
761 __func__, CACHE_MAX_ENTRIES);
762 down_write(&htable_rw_lock);
763 remove_oldest_entry();
764 up_write(&htable_rw_lock);
765 }
766
767 rc = add_cache_entry(path, hash, refs, numrefs);
768 if (!rc)
769 atomic_inc(&cache_count);
770
771out_free_refs:
772 free_dfs_info_array(refs, numrefs);
773 return rc;
774}
775
776
777
778
779
780
781static int setup_referral(const char *path, struct cache_entry *ce,
782 struct dfs_info3_param *ref, const char *target)
783{
784 int rc;
785
786 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
787
788 memset(ref, 0, sizeof(*ref));
789
790 ref->path_name = kstrdup(path, GFP_ATOMIC);
791 if (!ref->path_name)
792 return -ENOMEM;
793
794 ref->node_name = kstrdup(target, GFP_ATOMIC);
795 if (!ref->node_name) {
796 rc = -ENOMEM;
797 goto err_free_path;
798 }
799
800 ref->path_consumed = ce->path_consumed;
801 ref->ttl = ce->ttl;
802 ref->server_type = ce->srvtype;
803 ref->ref_flag = ce->ref_flags;
804 ref->flags = ce->hdr_flags;
805
806 return 0;
807
808err_free_path:
809 kfree(ref->path_name);
810 ref->path_name = NULL;
811 return rc;
812}
813
814
815static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
816{
817 int rc;
818 struct list_head *head = &tl->tl_list;
819 struct cache_dfs_tgt *t;
820 struct dfs_cache_tgt_iterator *it, *nit;
821
822 memset(tl, 0, sizeof(*tl));
823 INIT_LIST_HEAD(head);
824
825 list_for_each_entry(t, &ce->tlist, list) {
826 it = kzalloc(sizeof(*it), GFP_ATOMIC);
827 if (!it) {
828 rc = -ENOMEM;
829 goto err_free_it;
830 }
831
832 it->it_name = kstrdup(t->name, GFP_ATOMIC);
833 if (!it->it_name) {
834 kfree(it);
835 rc = -ENOMEM;
836 goto err_free_it;
837 }
838 it->it_path_consumed = t->path_consumed;
839
840 if (ce->tgthint == t)
841 list_add(&it->it_list, head);
842 else
843 list_add_tail(&it->it_list, head);
844 }
845
846 tl->tl_numtgts = ce->numtgts;
847
848 return 0;
849
850err_free_it:
851 list_for_each_entry_safe(it, nit, head, it_list) {
852 kfree(it->it_name);
853 kfree(it);
854 }
855 return rc;
856}
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
881 const struct nls_table *nls_codepage, int remap,
882 const char *path, struct dfs_info3_param *ref,
883 struct dfs_cache_tgt_list *tgt_list)
884{
885 int rc;
886 const char *npath;
887 struct cache_entry *ce;
888
889 rc = get_normalized_path(path, &npath);
890 if (rc)
891 return rc;
892
893 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
894 if (rc)
895 goto out_free_path;
896
897 down_read(&htable_rw_lock);
898
899 ce = lookup_cache_entry(npath, NULL);
900 if (IS_ERR(ce)) {
901 up_read(&htable_rw_lock);
902 rc = PTR_ERR(ce);
903 goto out_free_path;
904 }
905
906 if (ref)
907 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
908 else
909 rc = 0;
910 if (!rc && tgt_list)
911 rc = get_targets(ce, tgt_list);
912
913 up_read(&htable_rw_lock);
914
915out_free_path:
916 free_normalized_path(path, npath);
917 return rc;
918}
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
937 struct dfs_cache_tgt_list *tgt_list)
938{
939 int rc;
940 const char *npath;
941 struct cache_entry *ce;
942
943 rc = get_normalized_path(path, &npath);
944 if (rc)
945 return rc;
946
947 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
948
949 down_read(&htable_rw_lock);
950
951 ce = lookup_cache_entry(npath, NULL);
952 if (IS_ERR(ce)) {
953 rc = PTR_ERR(ce);
954 goto out_unlock;
955 }
956
957 if (ref)
958 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
959 else
960 rc = 0;
961 if (!rc && tgt_list)
962 rc = get_targets(ce, tgt_list);
963
964out_unlock:
965 up_read(&htable_rw_lock);
966 free_normalized_path(path, npath);
967
968 return rc;
969}
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
990 const struct nls_table *nls_codepage, int remap,
991 const char *path,
992 const struct dfs_cache_tgt_iterator *it)
993{
994 int rc;
995 const char *npath;
996 struct cache_entry *ce;
997 struct cache_dfs_tgt *t;
998
999 rc = get_normalized_path(path, &npath);
1000 if (rc)
1001 return rc;
1002
1003 cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
1004
1005 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
1006 if (rc)
1007 goto out_free_path;
1008
1009 down_write(&htable_rw_lock);
1010
1011 ce = lookup_cache_entry(npath, NULL);
1012 if (IS_ERR(ce)) {
1013 rc = PTR_ERR(ce);
1014 goto out_unlock;
1015 }
1016
1017 t = ce->tgthint;
1018
1019 if (likely(!strcasecmp(it->it_name, t->name)))
1020 goto out_unlock;
1021
1022 list_for_each_entry(t, &ce->tlist, list) {
1023 if (!strcasecmp(t->name, it->it_name)) {
1024 ce->tgthint = t;
1025 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1026 it->it_name);
1027 break;
1028 }
1029 }
1030
1031out_unlock:
1032 up_write(&htable_rw_lock);
1033out_free_path:
1034 free_normalized_path(path, npath);
1035
1036 return rc;
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053int dfs_cache_noreq_update_tgthint(const char *path,
1054 const struct dfs_cache_tgt_iterator *it)
1055{
1056 int rc;
1057 const char *npath;
1058 struct cache_entry *ce;
1059 struct cache_dfs_tgt *t;
1060
1061 if (!it)
1062 return -EINVAL;
1063
1064 rc = get_normalized_path(path, &npath);
1065 if (rc)
1066 return rc;
1067
1068 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1069
1070 down_write(&htable_rw_lock);
1071
1072 ce = lookup_cache_entry(npath, NULL);
1073 if (IS_ERR(ce)) {
1074 rc = PTR_ERR(ce);
1075 goto out_unlock;
1076 }
1077
1078 rc = 0;
1079 t = ce->tgthint;
1080
1081 if (unlikely(!strcasecmp(it->it_name, t->name)))
1082 goto out_unlock;
1083
1084 list_for_each_entry(t, &ce->tlist, list) {
1085 if (!strcasecmp(t->name, it->it_name)) {
1086 ce->tgthint = t;
1087 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1088 it->it_name);
1089 break;
1090 }
1091 }
1092
1093out_unlock:
1094 up_write(&htable_rw_lock);
1095 free_normalized_path(path, npath);
1096
1097 return rc;
1098}
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110int dfs_cache_get_tgt_referral(const char *path,
1111 const struct dfs_cache_tgt_iterator *it,
1112 struct dfs_info3_param *ref)
1113{
1114 int rc;
1115 const char *npath;
1116 struct cache_entry *ce;
1117
1118 if (!it || !ref)
1119 return -EINVAL;
1120
1121 rc = get_normalized_path(path, &npath);
1122 if (rc)
1123 return rc;
1124
1125 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1126
1127 down_read(&htable_rw_lock);
1128
1129 ce = lookup_cache_entry(npath, NULL);
1130 if (IS_ERR(ce)) {
1131 rc = PTR_ERR(ce);
1132 goto out_unlock;
1133 }
1134
1135 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1136
1137 rc = setup_referral(path, ce, ref, it->it_name);
1138
1139out_unlock:
1140 up_read(&htable_rw_lock);
1141 free_normalized_path(path, npath);
1142
1143 return rc;
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156int dfs_cache_add_vol(char *mntdata, struct smb3_fs_context *ctx, const char *fullpath)
1157{
1158 int rc;
1159 struct vol_info *vi;
1160
1161 if (!ctx || !fullpath || !mntdata)
1162 return -EINVAL;
1163
1164 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1165
1166 vi = kzalloc(sizeof(*vi), GFP_KERNEL);
1167 if (!vi)
1168 return -ENOMEM;
1169
1170 vi->fullpath = kstrdup(fullpath, GFP_KERNEL);
1171 if (!vi->fullpath) {
1172 rc = -ENOMEM;
1173 goto err_free_vi;
1174 }
1175
1176 rc = smb3_fs_context_dup(&vi->ctx, ctx);
1177 if (rc)
1178 goto err_free_fullpath;
1179
1180 vi->mntdata = mntdata;
1181 spin_lock_init(&vi->ctx_lock);
1182 kref_init(&vi->refcnt);
1183
1184 spin_lock(&vol_list_lock);
1185 list_add_tail(&vi->list, &vol_list);
1186 spin_unlock(&vol_list_lock);
1187
1188 return 0;
1189
1190err_free_fullpath:
1191 kfree(vi->fullpath);
1192err_free_vi:
1193 kfree(vi);
1194 return rc;
1195}
1196
1197
1198static struct vol_info *find_vol(const char *fullpath)
1199{
1200 struct vol_info *vi;
1201
1202 list_for_each_entry(vi, &vol_list, list) {
1203 cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
1204 if (!strcasecmp(vi->fullpath, fullpath))
1205 return vi;
1206 }
1207 return ERR_PTR(-ENOENT);
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
1219{
1220 struct vol_info *vi;
1221
1222 if (!fullpath || !server)
1223 return -EINVAL;
1224
1225 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1226
1227 spin_lock(&vol_list_lock);
1228 vi = find_vol(fullpath);
1229 if (IS_ERR(vi)) {
1230 spin_unlock(&vol_list_lock);
1231 return PTR_ERR(vi);
1232 }
1233 kref_get(&vi->refcnt);
1234 spin_unlock(&vol_list_lock);
1235
1236 cifs_dbg(FYI, "%s: updating volume info\n", __func__);
1237 spin_lock(&vi->ctx_lock);
1238 memcpy(&vi->ctx.dstaddr, &server->dstaddr,
1239 sizeof(vi->ctx.dstaddr));
1240 spin_unlock(&vi->ctx_lock);
1241
1242 kref_put(&vi->refcnt, vol_release);
1243
1244 return 0;
1245}
1246
1247
1248
1249
1250
1251
1252void dfs_cache_del_vol(const char *fullpath)
1253{
1254 struct vol_info *vi;
1255
1256 if (!fullpath || !*fullpath)
1257 return;
1258
1259 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1260
1261 spin_lock(&vol_list_lock);
1262 vi = find_vol(fullpath);
1263 spin_unlock(&vol_list_lock);
1264
1265 if (!IS_ERR(vi))
1266 kref_put(&vi->refcnt, vol_release);
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
1280 char **share, char **prefix)
1281{
1282 char *s, sep, *p;
1283 size_t len;
1284 size_t plen1, plen2;
1285
1286 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1287 return -EINVAL;
1288
1289 *share = NULL;
1290 *prefix = NULL;
1291
1292 sep = it->it_name[0];
1293 if (sep != '\\' && sep != '/')
1294 return -EINVAL;
1295
1296 s = strchr(it->it_name + 1, sep);
1297 if (!s)
1298 return -EINVAL;
1299
1300
1301 s = strchrnul(s + 1, sep);
1302
1303
1304 *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
1305 if (!*share)
1306 return -ENOMEM;
1307
1308
1309 if (*s)
1310 s++;
1311
1312 p = path + it->it_path_consumed;
1313 if (*p == sep)
1314 p++;
1315
1316
1317 plen1 = it->it_name + strlen(it->it_name) - s;
1318 plen2 = path + strlen(path) - p;
1319 if (plen1 || plen2) {
1320 len = plen1 + plen2 + 2;
1321 *prefix = kmalloc(len, GFP_KERNEL);
1322 if (!*prefix) {
1323 kfree(*share);
1324 *share = NULL;
1325 return -ENOMEM;
1326 }
1327 if (plen1)
1328 scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
1329 else
1330 strscpy(*prefix, p, len);
1331 }
1332 return 0;
1333}
1334
1335
1336static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
1337{
1338 struct cifs_ses *ses;
1339 struct cifs_tcon *tcon;
1340
1341 INIT_LIST_HEAD(head);
1342
1343 spin_lock(&cifs_tcp_ses_lock);
1344 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1345 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1346 if (!tcon->need_reconnect && !tcon->need_reopen_files &&
1347 tcon->dfs_path) {
1348 tcon->tc_count++;
1349 list_add_tail(&tcon->ulist, head);
1350 }
1351 }
1352 if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
1353 ses->tcon_ipc->dfs_path) {
1354 list_add_tail(&ses->tcon_ipc->ulist, head);
1355 }
1356 }
1357 spin_unlock(&cifs_tcp_ses_lock);
1358}
1359
1360static bool is_dfs_link(const char *path)
1361{
1362 char *s;
1363
1364 s = strchr(path + 1, '\\');
1365 if (!s)
1366 return false;
1367 return !!strchr(s + 1, '\\');
1368}
1369
1370static char *get_dfs_root(const char *path)
1371{
1372 char *s, *npath;
1373
1374 s = strchr(path + 1, '\\');
1375 if (!s)
1376 return ERR_PTR(-EINVAL);
1377
1378 s = strchr(s + 1, '\\');
1379 if (!s)
1380 return ERR_PTR(-EINVAL);
1381
1382 npath = kstrndup(path, s - path, GFP_KERNEL);
1383 if (!npath)
1384 return ERR_PTR(-ENOMEM);
1385
1386 return npath;
1387}
1388
1389static inline void put_tcp_server(struct TCP_Server_Info *server)
1390{
1391 cifs_put_tcp_session(server, 0);
1392}
1393
1394static struct TCP_Server_Info *get_tcp_server(struct smb3_fs_context *ctx)
1395{
1396 struct TCP_Server_Info *server;
1397
1398 server = cifs_find_tcp_session(ctx);
1399 if (IS_ERR_OR_NULL(server))
1400 return NULL;
1401
1402 spin_lock(&GlobalMid_Lock);
1403 if (server->tcpStatus != CifsGood) {
1404 spin_unlock(&GlobalMid_Lock);
1405 put_tcp_server(server);
1406 return NULL;
1407 }
1408 spin_unlock(&GlobalMid_Lock);
1409
1410 return server;
1411}
1412
1413
1414static struct cifs_ses *find_root_ses(struct vol_info *vi,
1415 struct cifs_tcon *tcon,
1416 const char *path)
1417{
1418 char *rpath;
1419 int rc;
1420 struct cache_entry *ce;
1421 struct dfs_info3_param ref = {0};
1422 char *mdata = NULL, *devname = NULL;
1423 struct TCP_Server_Info *server;
1424 struct cifs_ses *ses;
1425 struct smb3_fs_context ctx = {NULL};
1426
1427 rpath = get_dfs_root(path);
1428 if (IS_ERR(rpath))
1429 return ERR_CAST(rpath);
1430
1431 down_read(&htable_rw_lock);
1432
1433 ce = lookup_cache_entry(rpath, NULL);
1434 if (IS_ERR(ce)) {
1435 up_read(&htable_rw_lock);
1436 ses = ERR_CAST(ce);
1437 goto out;
1438 }
1439
1440 rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
1441 if (rc) {
1442 up_read(&htable_rw_lock);
1443 ses = ERR_PTR(rc);
1444 goto out;
1445 }
1446
1447 up_read(&htable_rw_lock);
1448
1449 mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
1450 &devname);
1451 free_dfs_info_param(&ref);
1452
1453 if (IS_ERR(mdata)) {
1454 ses = ERR_CAST(mdata);
1455 mdata = NULL;
1456 goto out;
1457 }
1458
1459 rc = cifs_setup_volume_info(&ctx, NULL, devname);
1460
1461 if (rc) {
1462 ses = ERR_PTR(rc);
1463 goto out;
1464 }
1465
1466 server = get_tcp_server(&ctx);
1467 if (!server) {
1468 ses = ERR_PTR(-EHOSTDOWN);
1469 goto out;
1470 }
1471
1472 ses = cifs_get_smb_ses(server, &ctx);
1473
1474out:
1475 smb3_cleanup_fs_context_contents(&ctx);
1476 kfree(mdata);
1477 kfree(rpath);
1478 kfree(devname);
1479
1480 return ses;
1481}
1482
1483
1484static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
1485{
1486 int rc = 0;
1487 unsigned int xid;
1488 const char *path, *npath;
1489 struct cache_entry *ce;
1490 struct cifs_ses *root_ses = NULL, *ses;
1491 struct dfs_info3_param *refs = NULL;
1492 int numrefs = 0;
1493
1494 xid = get_xid();
1495
1496 path = tcon->dfs_path + 1;
1497
1498 rc = get_normalized_path(path, &npath);
1499 if (rc)
1500 goto out_free_xid;
1501
1502 down_read(&htable_rw_lock);
1503
1504 ce = lookup_cache_entry(npath, NULL);
1505 if (IS_ERR(ce)) {
1506 rc = PTR_ERR(ce);
1507 up_read(&htable_rw_lock);
1508 goto out_free_path;
1509 }
1510
1511 if (!cache_entry_expired(ce)) {
1512 up_read(&htable_rw_lock);
1513 goto out_free_path;
1514 }
1515
1516 up_read(&htable_rw_lock);
1517
1518
1519 if (is_dfs_link(npath)) {
1520 ses = root_ses = find_root_ses(vi, tcon, npath);
1521 if (IS_ERR(ses)) {
1522 rc = PTR_ERR(ses);
1523 root_ses = NULL;
1524 goto out_free_path;
1525 }
1526 } else {
1527 ses = tcon->ses;
1528 }
1529
1530 rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
1531 &numrefs);
1532 if (!rc) {
1533 dump_refs(refs, numrefs);
1534 rc = update_cache_entry(npath, refs, numrefs);
1535 free_dfs_info_array(refs, numrefs);
1536 }
1537
1538 if (root_ses)
1539 cifs_put_smb_ses(root_ses);
1540
1541out_free_path:
1542 free_normalized_path(path, npath);
1543
1544out_free_xid:
1545 free_xid(xid);
1546 return rc;
1547}
1548
1549
1550
1551
1552
1553static void refresh_cache_worker(struct work_struct *work)
1554{
1555 struct vol_info *vi, *nvi;
1556 struct TCP_Server_Info *server;
1557 LIST_HEAD(vols);
1558 LIST_HEAD(tcons);
1559 struct cifs_tcon *tcon, *ntcon;
1560 int rc;
1561
1562
1563
1564
1565
1566 spin_lock(&vol_list_lock);
1567 list_for_each_entry(vi, &vol_list, list) {
1568 server = get_tcp_server(&vi->ctx);
1569 if (!server)
1570 continue;
1571
1572 kref_get(&vi->refcnt);
1573 list_add_tail(&vi->rlist, &vols);
1574 put_tcp_server(server);
1575 }
1576 spin_unlock(&vol_list_lock);
1577
1578
1579 list_for_each_entry_safe(vi, nvi, &vols, rlist) {
1580 spin_lock(&vi->ctx_lock);
1581 server = get_tcp_server(&vi->ctx);
1582 spin_unlock(&vi->ctx_lock);
1583
1584 if (!server)
1585 goto next_vol;
1586
1587 get_tcons(server, &tcons);
1588 rc = 0;
1589
1590 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1591
1592
1593
1594
1595 if (!rc)
1596 rc = refresh_tcon(vi, tcon);
1597
1598 list_del_init(&tcon->ulist);
1599 cifs_put_tcon(tcon);
1600 }
1601
1602 put_tcp_server(server);
1603
1604next_vol:
1605 list_del_init(&vi->rlist);
1606 kref_put(&vi->refcnt, vol_release);
1607 }
1608
1609 spin_lock(&cache_ttl_lock);
1610 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1611 spin_unlock(&cache_ttl_lock);
1612}
1613