1
2
3
4
5
6
7
8#include <linux/jhash.h>
9#include <linux/ktime.h>
10#include <linux/slab.h>
11#include <linux/proc_fs.h>
12#include <linux/nls.h>
13#include <linux/workqueue.h>
14#include "cifsglob.h"
15#include "smb2pdu.h"
16#include "smb2proto.h"
17#include "cifsproto.h"
18#include "cifs_debug.h"
19#include "cifs_unicode.h"
20#include "smb2glob.h"
21#include "fs_context.h"
22
23#include "dfs_cache.h"
24
25#define CACHE_HTABLE_SIZE 32
26#define CACHE_MAX_ENTRIES 64
27
28#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
29 DFSREF_STORAGE_SERVER))
30
31struct cache_dfs_tgt {
32 char *name;
33 int path_consumed;
34 struct list_head list;
35};
36
37struct cache_entry {
38 struct hlist_node hlist;
39 const char *path;
40 int ttl;
41 int srvtype;
42 int flags;
43 struct timespec64 etime;
44 int path_consumed;
45 int numtgts;
46 struct list_head tlist;
47 struct cache_dfs_tgt *tgthint;
48};
49
50struct vol_info {
51 char *fullpath;
52 spinlock_t ctx_lock;
53 struct smb3_fs_context ctx;
54 char *mntdata;
55 struct list_head list;
56 struct list_head rlist;
57 struct kref refcnt;
58};
59
60static struct kmem_cache *cache_slab __read_mostly;
61static struct workqueue_struct *dfscache_wq __read_mostly;
62
63static int cache_ttl;
64static DEFINE_SPINLOCK(cache_ttl_lock);
65
66static struct nls_table *cache_nlsc;
67
68
69
70
71static atomic_t cache_count;
72
73static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
74static DECLARE_RWSEM(htable_rw_lock);
75
76static LIST_HEAD(vol_list);
77static DEFINE_SPINLOCK(vol_list_lock);
78
79static void refresh_cache_worker(struct work_struct *work);
80
81static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
82
83static int get_normalized_path(const char *path, char **npath)
84{
85 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
86 return -EINVAL;
87
88 if (*path == '\\') {
89 *npath = (char *)path;
90 } else {
91 *npath = kstrndup(path, strlen(path), GFP_KERNEL);
92 if (!*npath)
93 return -ENOMEM;
94 convert_delimiter(*npath, '\\');
95 }
96 return 0;
97}
98
99static inline void free_normalized_path(const char *path, char *npath)
100{
101 if (path != npath)
102 kfree(npath);
103}
104
105static inline bool cache_entry_expired(const struct cache_entry *ce)
106{
107 struct timespec64 ts;
108
109 ktime_get_coarse_real_ts64(&ts);
110 return timespec64_compare(&ts, &ce->etime) >= 0;
111}
112
113static inline void free_tgts(struct cache_entry *ce)
114{
115 struct cache_dfs_tgt *t, *n;
116
117 list_for_each_entry_safe(t, n, &ce->tlist, list) {
118 list_del(&t->list);
119 kfree(t->name);
120 kfree(t);
121 }
122}
123
124static inline void flush_cache_ent(struct cache_entry *ce)
125{
126 hlist_del_init(&ce->hlist);
127 kfree(ce->path);
128 free_tgts(ce);
129 atomic_dec(&cache_count);
130 kmem_cache_free(cache_slab, ce);
131}
132
133static void flush_cache_ents(void)
134{
135 int i;
136
137 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
138 struct hlist_head *l = &cache_htable[i];
139 struct hlist_node *n;
140 struct cache_entry *ce;
141
142 hlist_for_each_entry_safe(ce, n, l, hlist) {
143 if (!hlist_unhashed(&ce->hlist))
144 flush_cache_ent(ce);
145 }
146 }
147}
148
149
150
151
152static int dfscache_proc_show(struct seq_file *m, void *v)
153{
154 int i;
155 struct cache_entry *ce;
156 struct cache_dfs_tgt *t;
157
158 seq_puts(m, "DFS cache\n---------\n");
159
160 down_read(&htable_rw_lock);
161 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
162 struct hlist_head *l = &cache_htable[i];
163
164 hlist_for_each_entry(ce, l, hlist) {
165 if (hlist_unhashed(&ce->hlist))
166 continue;
167
168 seq_printf(m,
169 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
170 "interlink=%s,path_consumed=%d,expired=%s\n",
171 ce->path,
172 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
173 ce->ttl, ce->etime.tv_nsec,
174 IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
175 ce->path_consumed,
176 cache_entry_expired(ce) ? "yes" : "no");
177
178 list_for_each_entry(t, &ce->tlist, list) {
179 seq_printf(m, " %s%s\n",
180 t->name,
181 ce->tgthint == t ? " (target hint)" : "");
182 }
183 }
184 }
185 up_read(&htable_rw_lock);
186
187 return 0;
188}
189
190static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
191 size_t count, loff_t *ppos)
192{
193 char c;
194 int rc;
195
196 rc = get_user(c, buffer);
197 if (rc)
198 return rc;
199
200 if (c != '0')
201 return -EINVAL;
202
203 cifs_dbg(FYI, "clearing dfs cache\n");
204
205 down_write(&htable_rw_lock);
206 flush_cache_ents();
207 up_write(&htable_rw_lock);
208
209 return count;
210}
211
212static int dfscache_proc_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, dfscache_proc_show, NULL);
215}
216
217const struct proc_ops dfscache_proc_ops = {
218 .proc_open = dfscache_proc_open,
219 .proc_read = seq_read,
220 .proc_lseek = seq_lseek,
221 .proc_release = single_release,
222 .proc_write = dfscache_proc_write,
223};
224
225#ifdef CONFIG_CIFS_DEBUG2
226static inline void dump_tgts(const struct cache_entry *ce)
227{
228 struct cache_dfs_tgt *t;
229
230 cifs_dbg(FYI, "target list:\n");
231 list_for_each_entry(t, &ce->tlist, list) {
232 cifs_dbg(FYI, " %s%s\n", t->name,
233 ce->tgthint == t ? " (target hint)" : "");
234 }
235}
236
237static inline void dump_ce(const struct cache_entry *ce)
238{
239 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s\n",
240 ce->path,
241 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
242 ce->etime.tv_nsec,
243 IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
244 ce->path_consumed,
245 cache_entry_expired(ce) ? "yes" : "no");
246 dump_tgts(ce);
247}
248
249static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
250{
251 int i;
252
253 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
254 for (i = 0; i < numrefs; i++) {
255 const struct dfs_info3_param *ref = &refs[i];
256
257 cifs_dbg(FYI,
258 "\n"
259 "flags: 0x%x\n"
260 "path_consumed: %d\n"
261 "server_type: 0x%x\n"
262 "ref_flag: 0x%x\n"
263 "path_name: %s\n"
264 "node_name: %s\n"
265 "ttl: %d (%dm)\n",
266 ref->flags, ref->path_consumed, ref->server_type,
267 ref->ref_flag, ref->path_name, ref->node_name,
268 ref->ttl, ref->ttl / 60);
269 }
270}
271#else
272#define dump_tgts(e)
273#define dump_ce(e)
274#define dump_refs(r, n)
275#endif
276
277
278
279
280
281
282int dfs_cache_init(void)
283{
284 int rc;
285 int i;
286
287 dfscache_wq = alloc_workqueue("cifs-dfscache",
288 WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
289 if (!dfscache_wq)
290 return -ENOMEM;
291
292 cache_slab = kmem_cache_create("cifs_dfs_cache",
293 sizeof(struct cache_entry), 0,
294 SLAB_HWCACHE_ALIGN, NULL);
295 if (!cache_slab) {
296 rc = -ENOMEM;
297 goto out_destroy_wq;
298 }
299
300 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
301 INIT_HLIST_HEAD(&cache_htable[i]);
302
303 atomic_set(&cache_count, 0);
304 cache_nlsc = load_nls_default();
305
306 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
307 return 0;
308
309out_destroy_wq:
310 destroy_workqueue(dfscache_wq);
311 return rc;
312}
313
314static inline unsigned int cache_entry_hash(const void *data, int size)
315{
316 unsigned int h;
317
318 h = jhash(data, size, 0);
319 return h & (CACHE_HTABLE_SIZE - 1);
320}
321
322
323static inline bool is_sysvol_or_netlogon(const char *path)
324{
325 const char *s;
326 char sep = path[0];
327
328 s = strchr(path + 1, sep) + 1;
329 return !strncasecmp(s, "sysvol", strlen("sysvol")) ||
330 !strncasecmp(s, "netlogon", strlen("netlogon"));
331}
332
333
334static inline char *get_tgt_name(const struct cache_entry *ce)
335{
336 struct cache_dfs_tgt *t = ce->tgthint;
337
338 return t ? t->name : ERR_PTR(-ENOENT);
339}
340
341
342static inline struct timespec64 get_expire_time(int ttl)
343{
344 struct timespec64 ts = {
345 .tv_sec = ttl,
346 .tv_nsec = 0,
347 };
348 struct timespec64 now;
349
350 ktime_get_coarse_real_ts64(&now);
351 return timespec64_add(now, ts);
352}
353
354
355static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
356{
357 struct cache_dfs_tgt *t;
358
359 t = kmalloc(sizeof(*t), GFP_ATOMIC);
360 if (!t)
361 return ERR_PTR(-ENOMEM);
362 t->name = kstrndup(name, strlen(name), GFP_ATOMIC);
363 if (!t->name) {
364 kfree(t);
365 return ERR_PTR(-ENOMEM);
366 }
367 t->path_consumed = path_consumed;
368 INIT_LIST_HEAD(&t->list);
369 return t;
370}
371
372
373
374
375
376static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
377 struct cache_entry *ce, const char *tgthint)
378{
379 int i;
380
381 ce->ttl = refs[0].ttl;
382 ce->etime = get_expire_time(ce->ttl);
383 ce->srvtype = refs[0].server_type;
384 ce->flags = refs[0].ref_flag;
385 ce->path_consumed = refs[0].path_consumed;
386
387 for (i = 0; i < numrefs; i++) {
388 struct cache_dfs_tgt *t;
389
390 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
391 if (IS_ERR(t)) {
392 free_tgts(ce);
393 return PTR_ERR(t);
394 }
395 if (tgthint && !strcasecmp(t->name, tgthint)) {
396 list_add(&t->list, &ce->tlist);
397 tgthint = NULL;
398 } else {
399 list_add_tail(&t->list, &ce->tlist);
400 }
401 ce->numtgts++;
402 }
403
404 ce->tgthint = list_first_entry_or_null(&ce->tlist,
405 struct cache_dfs_tgt, list);
406
407 return 0;
408}
409
410
411static struct cache_entry *alloc_cache_entry(const char *path,
412 const struct dfs_info3_param *refs,
413 int numrefs)
414{
415 struct cache_entry *ce;
416 int rc;
417
418 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
419 if (!ce)
420 return ERR_PTR(-ENOMEM);
421
422 ce->path = kstrndup(path, strlen(path), GFP_KERNEL);
423 if (!ce->path) {
424 kmem_cache_free(cache_slab, ce);
425 return ERR_PTR(-ENOMEM);
426 }
427 INIT_HLIST_NODE(&ce->hlist);
428 INIT_LIST_HEAD(&ce->tlist);
429
430 rc = copy_ref_data(refs, numrefs, ce, NULL);
431 if (rc) {
432 kfree(ce->path);
433 kmem_cache_free(cache_slab, ce);
434 ce = ERR_PTR(rc);
435 }
436 return ce;
437}
438
439
440static void remove_oldest_entry(void)
441{
442 int i;
443 struct cache_entry *ce;
444 struct cache_entry *to_del = NULL;
445
446 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
447 struct hlist_head *l = &cache_htable[i];
448
449 hlist_for_each_entry(ce, l, hlist) {
450 if (hlist_unhashed(&ce->hlist))
451 continue;
452 if (!to_del || timespec64_compare(&ce->etime,
453 &to_del->etime) < 0)
454 to_del = ce;
455 }
456 }
457
458 if (!to_del) {
459 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
460 return;
461 }
462
463 cifs_dbg(FYI, "%s: removing entry\n", __func__);
464 dump_ce(to_del);
465 flush_cache_ent(to_del);
466}
467
468
469static int add_cache_entry(const char *path, unsigned int hash,
470 struct dfs_info3_param *refs, int numrefs)
471{
472 struct cache_entry *ce;
473
474 ce = alloc_cache_entry(path, refs, numrefs);
475 if (IS_ERR(ce))
476 return PTR_ERR(ce);
477
478 spin_lock(&cache_ttl_lock);
479 if (!cache_ttl) {
480 cache_ttl = ce->ttl;
481 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
482 } else {
483 cache_ttl = min_t(int, cache_ttl, ce->ttl);
484 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
485 }
486 spin_unlock(&cache_ttl_lock);
487
488 down_write(&htable_rw_lock);
489 hlist_add_head(&ce->hlist, &cache_htable[hash]);
490 dump_ce(ce);
491 up_write(&htable_rw_lock);
492
493 return 0;
494}
495
496static struct cache_entry *__lookup_cache_entry(const char *path)
497{
498 struct cache_entry *ce;
499 unsigned int h;
500 bool found = false;
501
502 h = cache_entry_hash(path, strlen(path));
503
504 hlist_for_each_entry(ce, &cache_htable[h], hlist) {
505 if (!strcasecmp(path, ce->path)) {
506 found = true;
507 dump_ce(ce);
508 break;
509 }
510 }
511
512 if (!found)
513 ce = ERR_PTR(-ENOENT);
514 return ce;
515}
516
517
518
519
520
521
522
523
524
525static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
526{
527 struct cache_entry *ce = ERR_PTR(-ENOENT);
528 unsigned int h;
529 int cnt = 0;
530 char *npath;
531 char *s, *e;
532 char sep;
533
534 npath = kstrndup(path, strlen(path), GFP_KERNEL);
535 if (!npath)
536 return ERR_PTR(-ENOMEM);
537
538 s = npath;
539 sep = *npath;
540 while ((s = strchr(s, sep)) && ++cnt < 3)
541 s++;
542
543 if (cnt < 3) {
544 h = cache_entry_hash(path, strlen(path));
545 ce = __lookup_cache_entry(path);
546 goto out;
547 }
548
549
550
551
552
553
554 h = cache_entry_hash(npath, strlen(npath));
555 e = npath + strlen(npath) - 1;
556 while (e > s) {
557 char tmp;
558
559
560 while (e > s && *e == sep)
561 e--;
562 if (e == s)
563 goto out;
564
565 tmp = *(e+1);
566 *(e+1) = 0;
567
568 ce = __lookup_cache_entry(npath);
569 if (!IS_ERR(ce)) {
570 h = cache_entry_hash(npath, strlen(npath));
571 break;
572 }
573
574 *(e+1) = tmp;
575
576 while (e > s && *e != sep)
577 e--;
578 }
579out:
580 if (hash)
581 *hash = h;
582 kfree(npath);
583 return ce;
584}
585
586static void __vol_release(struct vol_info *vi)
587{
588 kfree(vi->fullpath);
589 kfree(vi->mntdata);
590 smb3_cleanup_fs_context_contents(&vi->ctx);
591 kfree(vi);
592}
593
594static void vol_release(struct kref *kref)
595{
596 struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
597
598 spin_lock(&vol_list_lock);
599 list_del(&vi->list);
600 spin_unlock(&vol_list_lock);
601 __vol_release(vi);
602}
603
604static inline void free_vol_list(void)
605{
606 struct vol_info *vi, *nvi;
607
608 list_for_each_entry_safe(vi, nvi, &vol_list, list) {
609 list_del_init(&vi->list);
610 __vol_release(vi);
611 }
612}
613
614
615
616
617void dfs_cache_destroy(void)
618{
619 cancel_delayed_work_sync(&refresh_task);
620 unload_nls(cache_nlsc);
621 free_vol_list();
622 flush_cache_ents();
623 kmem_cache_destroy(cache_slab);
624 destroy_workqueue(dfscache_wq);
625
626 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
627}
628
629
630static int __update_cache_entry(const char *path,
631 const struct dfs_info3_param *refs,
632 int numrefs)
633{
634 int rc;
635 struct cache_entry *ce;
636 char *s, *th = NULL;
637
638 ce = lookup_cache_entry(path, NULL);
639 if (IS_ERR(ce))
640 return PTR_ERR(ce);
641
642 if (ce->tgthint) {
643 s = ce->tgthint->name;
644 th = kstrndup(s, strlen(s), GFP_ATOMIC);
645 if (!th)
646 return -ENOMEM;
647 }
648
649 free_tgts(ce);
650 ce->numtgts = 0;
651
652 rc = copy_ref_data(refs, numrefs, ce, th);
653
654 kfree(th);
655
656 return rc;
657}
658
659static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
660 const struct nls_table *nls_codepage, int remap,
661 const char *path, struct dfs_info3_param **refs,
662 int *numrefs)
663{
664 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
665
666 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
667 return -EOPNOTSUPP;
668 if (unlikely(!nls_codepage))
669 return -EINVAL;
670
671 *refs = NULL;
672 *numrefs = 0;
673
674 return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
675 nls_codepage, remap);
676}
677
678
679static int update_cache_entry(const char *path,
680 const struct dfs_info3_param *refs,
681 int numrefs)
682{
683
684 int rc;
685
686 down_write(&htable_rw_lock);
687 rc = __update_cache_entry(path, refs, numrefs);
688 up_write(&htable_rw_lock);
689
690 return rc;
691}
692
693
694
695
696
697
698
699
700
701
702static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
703 const struct nls_table *nls_codepage, int remap,
704 const char *path, bool noreq)
705{
706 int rc;
707 unsigned int hash;
708 struct cache_entry *ce;
709 struct dfs_info3_param *refs = NULL;
710 int numrefs = 0;
711 bool newent = false;
712
713 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
714
715 down_read(&htable_rw_lock);
716
717 ce = lookup_cache_entry(path, &hash);
718
719
720
721
722
723 if (noreq) {
724 up_read(&htable_rw_lock);
725 return PTR_ERR_OR_ZERO(ce);
726 }
727
728 if (!IS_ERR(ce)) {
729 if (!cache_entry_expired(ce)) {
730 dump_ce(ce);
731 up_read(&htable_rw_lock);
732 return 0;
733 }
734 } else {
735 newent = true;
736 }
737
738 up_read(&htable_rw_lock);
739
740
741
742
743
744
745
746 rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
747 &refs, &numrefs);
748 if (rc)
749 return rc;
750
751 dump_refs(refs, numrefs);
752
753 if (!newent) {
754 rc = update_cache_entry(path, refs, numrefs);
755 goto out_free_refs;
756 }
757
758 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
759 cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
760 __func__, CACHE_MAX_ENTRIES);
761 down_write(&htable_rw_lock);
762 remove_oldest_entry();
763 up_write(&htable_rw_lock);
764 }
765
766 rc = add_cache_entry(path, hash, refs, numrefs);
767 if (!rc)
768 atomic_inc(&cache_count);
769
770out_free_refs:
771 free_dfs_info_array(refs, numrefs);
772 return rc;
773}
774
775
776
777
778
779
780static int setup_referral(const char *path, struct cache_entry *ce,
781 struct dfs_info3_param *ref, const char *target)
782{
783 int rc;
784
785 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
786
787 memset(ref, 0, sizeof(*ref));
788
789 ref->path_name = kstrndup(path, strlen(path), GFP_ATOMIC);
790 if (!ref->path_name)
791 return -ENOMEM;
792
793 ref->node_name = kstrndup(target, strlen(target), GFP_ATOMIC);
794 if (!ref->node_name) {
795 rc = -ENOMEM;
796 goto err_free_path;
797 }
798
799 ref->path_consumed = ce->path_consumed;
800 ref->ttl = ce->ttl;
801 ref->server_type = ce->srvtype;
802 ref->ref_flag = ce->flags;
803
804 return 0;
805
806err_free_path:
807 kfree(ref->path_name);
808 ref->path_name = NULL;
809 return rc;
810}
811
812
813static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
814{
815 int rc;
816 struct list_head *head = &tl->tl_list;
817 struct cache_dfs_tgt *t;
818 struct dfs_cache_tgt_iterator *it, *nit;
819
820 memset(tl, 0, sizeof(*tl));
821 INIT_LIST_HEAD(head);
822
823 list_for_each_entry(t, &ce->tlist, list) {
824 it = kzalloc(sizeof(*it), GFP_ATOMIC);
825 if (!it) {
826 rc = -ENOMEM;
827 goto err_free_it;
828 }
829
830 it->it_name = kstrndup(t->name, strlen(t->name), GFP_ATOMIC);
831 if (!it->it_name) {
832 kfree(it);
833 rc = -ENOMEM;
834 goto err_free_it;
835 }
836 it->it_path_consumed = t->path_consumed;
837
838 if (ce->tgthint == t)
839 list_add(&it->it_list, head);
840 else
841 list_add_tail(&it->it_list, head);
842 }
843
844 tl->tl_numtgts = ce->numtgts;
845
846 return 0;
847
848err_free_it:
849 list_for_each_entry_safe(it, nit, head, it_list) {
850 kfree(it->it_name);
851 kfree(it);
852 }
853 return rc;
854}
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
879 const struct nls_table *nls_codepage, int remap,
880 const char *path, struct dfs_info3_param *ref,
881 struct dfs_cache_tgt_list *tgt_list)
882{
883 int rc;
884 char *npath;
885 struct cache_entry *ce;
886
887 rc = get_normalized_path(path, &npath);
888 if (rc)
889 return rc;
890
891 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
892 if (rc)
893 goto out_free_path;
894
895 down_read(&htable_rw_lock);
896
897 ce = lookup_cache_entry(npath, NULL);
898 if (IS_ERR(ce)) {
899 up_read(&htable_rw_lock);
900 rc = PTR_ERR(ce);
901 goto out_free_path;
902 }
903
904 if (ref)
905 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
906 else
907 rc = 0;
908 if (!rc && tgt_list)
909 rc = get_targets(ce, tgt_list);
910
911 up_read(&htable_rw_lock);
912
913out_free_path:
914 free_normalized_path(path, npath);
915 return rc;
916}
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
935 struct dfs_cache_tgt_list *tgt_list)
936{
937 int rc;
938 char *npath;
939 struct cache_entry *ce;
940
941 rc = get_normalized_path(path, &npath);
942 if (rc)
943 return rc;
944
945 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
946
947 down_read(&htable_rw_lock);
948
949 ce = lookup_cache_entry(npath, NULL);
950 if (IS_ERR(ce)) {
951 rc = PTR_ERR(ce);
952 goto out_unlock;
953 }
954
955 if (ref)
956 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
957 else
958 rc = 0;
959 if (!rc && tgt_list)
960 rc = get_targets(ce, tgt_list);
961
962out_unlock:
963 up_read(&htable_rw_lock);
964 free_normalized_path(path, npath);
965
966 return rc;
967}
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
988 const struct nls_table *nls_codepage, int remap,
989 const char *path,
990 const struct dfs_cache_tgt_iterator *it)
991{
992 int rc;
993 char *npath;
994 struct cache_entry *ce;
995 struct cache_dfs_tgt *t;
996
997 rc = get_normalized_path(path, &npath);
998 if (rc)
999 return rc;
1000
1001 cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
1002
1003 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
1004 if (rc)
1005 goto out_free_path;
1006
1007 down_write(&htable_rw_lock);
1008
1009 ce = lookup_cache_entry(npath, NULL);
1010 if (IS_ERR(ce)) {
1011 rc = PTR_ERR(ce);
1012 goto out_unlock;
1013 }
1014
1015 t = ce->tgthint;
1016
1017 if (likely(!strcasecmp(it->it_name, t->name)))
1018 goto out_unlock;
1019
1020 list_for_each_entry(t, &ce->tlist, list) {
1021 if (!strcasecmp(t->name, it->it_name)) {
1022 ce->tgthint = t;
1023 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1024 it->it_name);
1025 break;
1026 }
1027 }
1028
1029out_unlock:
1030 up_write(&htable_rw_lock);
1031out_free_path:
1032 free_normalized_path(path, npath);
1033
1034 return rc;
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051int dfs_cache_noreq_update_tgthint(const char *path,
1052 const struct dfs_cache_tgt_iterator *it)
1053{
1054 int rc;
1055 char *npath;
1056 struct cache_entry *ce;
1057 struct cache_dfs_tgt *t;
1058
1059 if (!it)
1060 return -EINVAL;
1061
1062 rc = get_normalized_path(path, &npath);
1063 if (rc)
1064 return rc;
1065
1066 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1067
1068 down_write(&htable_rw_lock);
1069
1070 ce = lookup_cache_entry(npath, NULL);
1071 if (IS_ERR(ce)) {
1072 rc = PTR_ERR(ce);
1073 goto out_unlock;
1074 }
1075
1076 rc = 0;
1077 t = ce->tgthint;
1078
1079 if (unlikely(!strcasecmp(it->it_name, t->name)))
1080 goto out_unlock;
1081
1082 list_for_each_entry(t, &ce->tlist, list) {
1083 if (!strcasecmp(t->name, it->it_name)) {
1084 ce->tgthint = t;
1085 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1086 it->it_name);
1087 break;
1088 }
1089 }
1090
1091out_unlock:
1092 up_write(&htable_rw_lock);
1093 free_normalized_path(path, npath);
1094
1095 return rc;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108int dfs_cache_get_tgt_referral(const char *path,
1109 const struct dfs_cache_tgt_iterator *it,
1110 struct dfs_info3_param *ref)
1111{
1112 int rc;
1113 char *npath;
1114 struct cache_entry *ce;
1115
1116 if (!it || !ref)
1117 return -EINVAL;
1118
1119 rc = get_normalized_path(path, &npath);
1120 if (rc)
1121 return rc;
1122
1123 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1124
1125 down_read(&htable_rw_lock);
1126
1127 ce = lookup_cache_entry(npath, NULL);
1128 if (IS_ERR(ce)) {
1129 rc = PTR_ERR(ce);
1130 goto out_unlock;
1131 }
1132
1133 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1134
1135 rc = setup_referral(path, ce, ref, it->it_name);
1136
1137out_unlock:
1138 up_read(&htable_rw_lock);
1139 free_normalized_path(path, npath);
1140
1141 return rc;
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154int dfs_cache_add_vol(char *mntdata, struct smb3_fs_context *ctx, const char *fullpath)
1155{
1156 int rc;
1157 struct vol_info *vi;
1158
1159 if (!ctx || !fullpath || !mntdata)
1160 return -EINVAL;
1161
1162 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1163
1164 vi = kzalloc(sizeof(*vi), GFP_KERNEL);
1165 if (!vi)
1166 return -ENOMEM;
1167
1168 vi->fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL);
1169 if (!vi->fullpath) {
1170 rc = -ENOMEM;
1171 goto err_free_vi;
1172 }
1173
1174 rc = smb3_fs_context_dup(&vi->ctx, ctx);
1175 if (rc)
1176 goto err_free_fullpath;
1177
1178 vi->mntdata = mntdata;
1179 spin_lock_init(&vi->ctx_lock);
1180 kref_init(&vi->refcnt);
1181
1182 spin_lock(&vol_list_lock);
1183 list_add_tail(&vi->list, &vol_list);
1184 spin_unlock(&vol_list_lock);
1185
1186 return 0;
1187
1188err_free_fullpath:
1189 kfree(vi->fullpath);
1190err_free_vi:
1191 kfree(vi);
1192 return rc;
1193}
1194
1195
1196static struct vol_info *find_vol(const char *fullpath)
1197{
1198 struct vol_info *vi;
1199
1200 list_for_each_entry(vi, &vol_list, list) {
1201 cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
1202 if (!strcasecmp(vi->fullpath, fullpath))
1203 return vi;
1204 }
1205 return ERR_PTR(-ENOENT);
1206}
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
1217{
1218 struct vol_info *vi;
1219
1220 if (!fullpath || !server)
1221 return -EINVAL;
1222
1223 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1224
1225 spin_lock(&vol_list_lock);
1226 vi = find_vol(fullpath);
1227 if (IS_ERR(vi)) {
1228 spin_unlock(&vol_list_lock);
1229 return PTR_ERR(vi);
1230 }
1231 kref_get(&vi->refcnt);
1232 spin_unlock(&vol_list_lock);
1233
1234 cifs_dbg(FYI, "%s: updating volume info\n", __func__);
1235 spin_lock(&vi->ctx_lock);
1236 memcpy(&vi->ctx.dstaddr, &server->dstaddr,
1237 sizeof(vi->ctx.dstaddr));
1238 spin_unlock(&vi->ctx_lock);
1239
1240 kref_put(&vi->refcnt, vol_release);
1241
1242 return 0;
1243}
1244
1245
1246
1247
1248
1249
1250void dfs_cache_del_vol(const char *fullpath)
1251{
1252 struct vol_info *vi;
1253
1254 if (!fullpath || !*fullpath)
1255 return;
1256
1257 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1258
1259 spin_lock(&vol_list_lock);
1260 vi = find_vol(fullpath);
1261 spin_unlock(&vol_list_lock);
1262
1263 if (!IS_ERR(vi))
1264 kref_put(&vi->refcnt, vol_release);
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
1278 char **share, char **prefix)
1279{
1280 char *s, sep, *p;
1281 size_t len;
1282 size_t plen1, plen2;
1283
1284 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1285 return -EINVAL;
1286
1287 *share = NULL;
1288 *prefix = NULL;
1289
1290 sep = it->it_name[0];
1291 if (sep != '\\' && sep != '/')
1292 return -EINVAL;
1293
1294 s = strchr(it->it_name + 1, sep);
1295 if (!s)
1296 return -EINVAL;
1297
1298
1299 s = strchrnul(s + 1, sep);
1300
1301
1302 *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
1303 if (!*share)
1304 return -ENOMEM;
1305
1306
1307 if (*s)
1308 s++;
1309
1310 p = path + it->it_path_consumed;
1311 if (*p == sep)
1312 p++;
1313
1314
1315 plen1 = it->it_name + strlen(it->it_name) - s;
1316 plen2 = path + strlen(path) - p;
1317 if (plen1 || plen2) {
1318 len = plen1 + plen2 + 2;
1319 *prefix = kmalloc(len, GFP_KERNEL);
1320 if (!*prefix) {
1321 kfree(*share);
1322 *share = NULL;
1323 return -ENOMEM;
1324 }
1325 if (plen1)
1326 scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
1327 else
1328 strscpy(*prefix, p, len);
1329 }
1330 return 0;
1331}
1332
1333
1334static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
1335{
1336 struct cifs_ses *ses;
1337 struct cifs_tcon *tcon;
1338
1339 INIT_LIST_HEAD(head);
1340
1341 spin_lock(&cifs_tcp_ses_lock);
1342 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1343 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1344 if (!tcon->need_reconnect && !tcon->need_reopen_files &&
1345 tcon->dfs_path) {
1346 tcon->tc_count++;
1347 list_add_tail(&tcon->ulist, head);
1348 }
1349 }
1350 if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
1351 ses->tcon_ipc->dfs_path) {
1352 list_add_tail(&ses->tcon_ipc->ulist, head);
1353 }
1354 }
1355 spin_unlock(&cifs_tcp_ses_lock);
1356}
1357
1358static bool is_dfs_link(const char *path)
1359{
1360 char *s;
1361
1362 s = strchr(path + 1, '\\');
1363 if (!s)
1364 return false;
1365 return !!strchr(s + 1, '\\');
1366}
1367
1368static char *get_dfs_root(const char *path)
1369{
1370 char *s, *npath;
1371
1372 s = strchr(path + 1, '\\');
1373 if (!s)
1374 return ERR_PTR(-EINVAL);
1375
1376 s = strchr(s + 1, '\\');
1377 if (!s)
1378 return ERR_PTR(-EINVAL);
1379
1380 npath = kstrndup(path, s - path, GFP_KERNEL);
1381 if (!npath)
1382 return ERR_PTR(-ENOMEM);
1383
1384 return npath;
1385}
1386
1387static inline void put_tcp_server(struct TCP_Server_Info *server)
1388{
1389 cifs_put_tcp_session(server, 0);
1390}
1391
1392static struct TCP_Server_Info *get_tcp_server(struct smb3_fs_context *ctx)
1393{
1394 struct TCP_Server_Info *server;
1395
1396 server = cifs_find_tcp_session(ctx);
1397 if (IS_ERR_OR_NULL(server))
1398 return NULL;
1399
1400 spin_lock(&GlobalMid_Lock);
1401 if (server->tcpStatus != CifsGood) {
1402 spin_unlock(&GlobalMid_Lock);
1403 put_tcp_server(server);
1404 return NULL;
1405 }
1406 spin_unlock(&GlobalMid_Lock);
1407
1408 return server;
1409}
1410
1411
1412static struct cifs_ses *find_root_ses(struct vol_info *vi,
1413 struct cifs_tcon *tcon,
1414 const char *path)
1415{
1416 char *rpath;
1417 int rc;
1418 struct cache_entry *ce;
1419 struct dfs_info3_param ref = {0};
1420 char *mdata = NULL, *devname = NULL;
1421 struct TCP_Server_Info *server;
1422 struct cifs_ses *ses;
1423 struct smb3_fs_context ctx = {NULL};
1424
1425 rpath = get_dfs_root(path);
1426 if (IS_ERR(rpath))
1427 return ERR_CAST(rpath);
1428
1429 down_read(&htable_rw_lock);
1430
1431 ce = lookup_cache_entry(rpath, NULL);
1432 if (IS_ERR(ce)) {
1433 up_read(&htable_rw_lock);
1434 ses = ERR_CAST(ce);
1435 goto out;
1436 }
1437
1438 rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
1439 if (rc) {
1440 up_read(&htable_rw_lock);
1441 ses = ERR_PTR(rc);
1442 goto out;
1443 }
1444
1445 up_read(&htable_rw_lock);
1446
1447 mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
1448 &devname);
1449 free_dfs_info_param(&ref);
1450
1451 if (IS_ERR(mdata)) {
1452 ses = ERR_CAST(mdata);
1453 mdata = NULL;
1454 goto out;
1455 }
1456
1457 rc = cifs_setup_volume_info(&ctx, NULL, devname);
1458
1459 if (rc) {
1460 ses = ERR_PTR(rc);
1461 goto out;
1462 }
1463
1464 server = get_tcp_server(&ctx);
1465 if (!server) {
1466 ses = ERR_PTR(-EHOSTDOWN);
1467 goto out;
1468 }
1469
1470 ses = cifs_get_smb_ses(server, &ctx);
1471
1472out:
1473 smb3_cleanup_fs_context_contents(&ctx);
1474 kfree(mdata);
1475 kfree(rpath);
1476 kfree(devname);
1477
1478 return ses;
1479}
1480
1481
1482static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
1483{
1484 int rc = 0;
1485 unsigned int xid;
1486 char *path, *npath;
1487 struct cache_entry *ce;
1488 struct cifs_ses *root_ses = NULL, *ses;
1489 struct dfs_info3_param *refs = NULL;
1490 int numrefs = 0;
1491
1492 xid = get_xid();
1493
1494 path = tcon->dfs_path + 1;
1495
1496 rc = get_normalized_path(path, &npath);
1497 if (rc)
1498 goto out_free_xid;
1499
1500 down_read(&htable_rw_lock);
1501
1502 ce = lookup_cache_entry(npath, NULL);
1503 if (IS_ERR(ce)) {
1504 rc = PTR_ERR(ce);
1505 up_read(&htable_rw_lock);
1506 goto out_free_path;
1507 }
1508
1509 if (!cache_entry_expired(ce)) {
1510 up_read(&htable_rw_lock);
1511 goto out_free_path;
1512 }
1513
1514 up_read(&htable_rw_lock);
1515
1516
1517 if (is_dfs_link(npath)) {
1518 ses = root_ses = find_root_ses(vi, tcon, npath);
1519 if (IS_ERR(ses)) {
1520 rc = PTR_ERR(ses);
1521 root_ses = NULL;
1522 goto out_free_path;
1523 }
1524 } else {
1525 ses = tcon->ses;
1526 }
1527
1528 rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
1529 &numrefs);
1530 if (!rc) {
1531 dump_refs(refs, numrefs);
1532 rc = update_cache_entry(npath, refs, numrefs);
1533 free_dfs_info_array(refs, numrefs);
1534 }
1535
1536 if (root_ses)
1537 cifs_put_smb_ses(root_ses);
1538
1539out_free_path:
1540 free_normalized_path(path, npath);
1541
1542out_free_xid:
1543 free_xid(xid);
1544 return rc;
1545}
1546
1547
1548
1549
1550
1551static void refresh_cache_worker(struct work_struct *work)
1552{
1553 struct vol_info *vi, *nvi;
1554 struct TCP_Server_Info *server;
1555 LIST_HEAD(vols);
1556 LIST_HEAD(tcons);
1557 struct cifs_tcon *tcon, *ntcon;
1558 int rc;
1559
1560
1561
1562
1563
1564 spin_lock(&vol_list_lock);
1565 list_for_each_entry(vi, &vol_list, list) {
1566 server = get_tcp_server(&vi->ctx);
1567 if (!server)
1568 continue;
1569
1570 kref_get(&vi->refcnt);
1571 list_add_tail(&vi->rlist, &vols);
1572 put_tcp_server(server);
1573 }
1574 spin_unlock(&vol_list_lock);
1575
1576
1577 list_for_each_entry_safe(vi, nvi, &vols, rlist) {
1578 spin_lock(&vi->ctx_lock);
1579 server = get_tcp_server(&vi->ctx);
1580 spin_unlock(&vi->ctx_lock);
1581
1582 if (!server)
1583 goto next_vol;
1584
1585 get_tcons(server, &tcons);
1586 rc = 0;
1587
1588 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1589
1590
1591
1592
1593 if (!rc)
1594 rc = refresh_tcon(vi, tcon);
1595
1596 list_del_init(&tcon->ulist);
1597 cifs_put_tcon(tcon);
1598 }
1599
1600 put_tcp_server(server);
1601
1602next_vol:
1603 list_del_init(&vi->rlist);
1604 kref_put(&vi->refcnt, vol_release);
1605 }
1606
1607 spin_lock(&cache_ttl_lock);
1608 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1609 spin_unlock(&cache_ttl_lock);
1610}
1611