1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DFS referral cache routines
4  *
5  * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
6  */
7 
8 #include <linux/jhash.h>
9 #include <linux/ktime.h>
10 #include <linux/slab.h>
11 #include <linux/proc_fs.h>
12 #include <linux/nls.h>
13 #include <linux/workqueue.h>
14 #include <linux/uuid.h>
15 #include "cifsglob.h"
16 #include "smb2pdu.h"
17 #include "smb2proto.h"
18 #include "cifsproto.h"
19 #include "cifs_debug.h"
20 #include "cifs_unicode.h"
21 #include "smb2glob.h"
22 #include "dns_resolve.h"
23 
24 #include "dfs_cache.h"
25 
26 #define CACHE_HTABLE_SIZE 32
27 #define CACHE_MAX_ENTRIES 64
28 #define CACHE_MIN_TTL 120 /* 2 minutes */
29 
30 #define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
31 
32 struct cache_dfs_tgt {
33 	char *name;
34 	int path_consumed;
35 	struct list_head list;
36 };
37 
38 struct cache_entry {
39 	struct hlist_node hlist;
40 	const char *path;
41 	int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
42 	int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
43 	int srvtype; /* DFS_REREFERRAL_V3.ServerType */
44 	int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
45 	struct timespec64 etime;
46 	int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
47 	int numtgts;
48 	struct list_head tlist;
49 	struct cache_dfs_tgt *tgthint;
50 };
51 
52 /* List of referral server sessions per dfs mount */
53 struct mount_group {
54 	struct list_head list;
55 	uuid_t id;
56 	struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
57 	int num_sessions;
58 	spinlock_t lock;
59 	struct list_head refresh_list;
60 	struct kref refcount;
61 };
62 
63 static struct kmem_cache *cache_slab __read_mostly;
64 static struct workqueue_struct *dfscache_wq __read_mostly;
65 
66 static int cache_ttl;
67 static DEFINE_SPINLOCK(cache_ttl_lock);
68 
69 static struct nls_table *cache_cp;
70 
71 /*
72  * Number of entries in the cache
73  */
74 static atomic_t cache_count;
75 
76 static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
77 static DECLARE_RWSEM(htable_rw_lock);
78 
79 static LIST_HEAD(mount_group_list);
80 static DEFINE_MUTEX(mount_group_list_lock);
81 
82 static void refresh_cache_worker(struct work_struct *work);
83 
84 static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
85 
__mount_group_release(struct mount_group * mg)86 static void __mount_group_release(struct mount_group *mg)
87 {
88 	int i;
89 
90 	for (i = 0; i < mg->num_sessions; i++)
91 		cifs_put_smb_ses(mg->sessions[i]);
92 	kfree(mg);
93 }
94 
mount_group_release(struct kref * kref)95 static void mount_group_release(struct kref *kref)
96 {
97 	struct mount_group *mg = container_of(kref, struct mount_group, refcount);
98 
99 	mutex_lock(&mount_group_list_lock);
100 	list_del(&mg->list);
101 	mutex_unlock(&mount_group_list_lock);
102 	__mount_group_release(mg);
103 }
104 
find_mount_group_locked(const uuid_t * id)105 static struct mount_group *find_mount_group_locked(const uuid_t *id)
106 {
107 	struct mount_group *mg;
108 
109 	list_for_each_entry(mg, &mount_group_list, list) {
110 		if (uuid_equal(&mg->id, id))
111 			return mg;
112 	}
113 	return ERR_PTR(-ENOENT);
114 }
115 
__get_mount_group_locked(const uuid_t * id)116 static struct mount_group *__get_mount_group_locked(const uuid_t *id)
117 {
118 	struct mount_group *mg;
119 
120 	mg = find_mount_group_locked(id);
121 	if (!IS_ERR(mg))
122 		return mg;
123 
124 	mg = kmalloc(sizeof(*mg), GFP_KERNEL);
125 	if (!mg)
126 		return ERR_PTR(-ENOMEM);
127 	kref_init(&mg->refcount);
128 	uuid_copy(&mg->id, id);
129 	mg->num_sessions = 0;
130 	spin_lock_init(&mg->lock);
131 	list_add(&mg->list, &mount_group_list);
132 	return mg;
133 }
134 
get_mount_group(const uuid_t * id)135 static struct mount_group *get_mount_group(const uuid_t *id)
136 {
137 	struct mount_group *mg;
138 
139 	mutex_lock(&mount_group_list_lock);
140 	mg = __get_mount_group_locked(id);
141 	if (!IS_ERR(mg))
142 		kref_get(&mg->refcount);
143 	mutex_unlock(&mount_group_list_lock);
144 
145 	return mg;
146 }
147 
free_mount_group_list(void)148 static void free_mount_group_list(void)
149 {
150 	struct mount_group *mg, *tmp_mg;
151 
152 	list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
153 		list_del_init(&mg->list);
154 		__mount_group_release(mg);
155 	}
156 }
157 
158 /**
159  * dfs_cache_canonical_path - get a canonical DFS path
160  *
161  * @path: DFS path
162  * @cp: codepage
163  * @remap: mapping type
164  *
165  * Return canonical path if success, otherwise error.
166  */
dfs_cache_canonical_path(const char * path,const struct nls_table * cp,int remap)167 char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
168 {
169 	char *tmp;
170 	int plen = 0;
171 	char *npath;
172 
173 	if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
174 		return ERR_PTR(-EINVAL);
175 
176 	if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
177 		tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
178 		if (!tmp) {
179 			cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
180 			return ERR_PTR(-EINVAL);
181 		}
182 
183 		npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
184 		kfree(tmp);
185 
186 		if (!npath) {
187 			cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
188 			return ERR_PTR(-EINVAL);
189 		}
190 	} else {
191 		npath = kstrdup(path, GFP_KERNEL);
192 		if (!npath)
193 			return ERR_PTR(-ENOMEM);
194 	}
195 	convert_delimiter(npath, '\\');
196 	return npath;
197 }
198 
cache_entry_expired(const struct cache_entry * ce)199 static inline bool cache_entry_expired(const struct cache_entry *ce)
200 {
201 	struct timespec64 ts;
202 
203 	ktime_get_coarse_real_ts64(&ts);
204 	return timespec64_compare(&ts, &ce->etime) >= 0;
205 }
206 
free_tgts(struct cache_entry * ce)207 static inline void free_tgts(struct cache_entry *ce)
208 {
209 	struct cache_dfs_tgt *t, *n;
210 
211 	list_for_each_entry_safe(t, n, &ce->tlist, list) {
212 		list_del(&t->list);
213 		kfree(t->name);
214 		kfree(t);
215 	}
216 }
217 
flush_cache_ent(struct cache_entry * ce)218 static inline void flush_cache_ent(struct cache_entry *ce)
219 {
220 	hlist_del_init(&ce->hlist);
221 	kfree(ce->path);
222 	free_tgts(ce);
223 	atomic_dec(&cache_count);
224 	kmem_cache_free(cache_slab, ce);
225 }
226 
flush_cache_ents(void)227 static void flush_cache_ents(void)
228 {
229 	int i;
230 
231 	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
232 		struct hlist_head *l = &cache_htable[i];
233 		struct hlist_node *n;
234 		struct cache_entry *ce;
235 
236 		hlist_for_each_entry_safe(ce, n, l, hlist) {
237 			if (!hlist_unhashed(&ce->hlist))
238 				flush_cache_ent(ce);
239 		}
240 	}
241 }
242 
243 /*
244  * dfs cache /proc file
245  */
dfscache_proc_show(struct seq_file * m,void * v)246 static int dfscache_proc_show(struct seq_file *m, void *v)
247 {
248 	int i;
249 	struct cache_entry *ce;
250 	struct cache_dfs_tgt *t;
251 
252 	seq_puts(m, "DFS cache\n---------\n");
253 
254 	down_read(&htable_rw_lock);
255 	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
256 		struct hlist_head *l = &cache_htable[i];
257 
258 		hlist_for_each_entry(ce, l, hlist) {
259 			if (hlist_unhashed(&ce->hlist))
260 				continue;
261 
262 			seq_printf(m,
263 				   "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
264 				   ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
265 				   ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
266 				   IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
267 				   ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
268 
269 			list_for_each_entry(t, &ce->tlist, list) {
270 				seq_printf(m, "  %s%s\n",
271 					   t->name,
272 					   READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
273 			}
274 		}
275 	}
276 	up_read(&htable_rw_lock);
277 
278 	return 0;
279 }
280 
dfscache_proc_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)281 static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
282 				   size_t count, loff_t *ppos)
283 {
284 	char c;
285 	int rc;
286 
287 	rc = get_user(c, buffer);
288 	if (rc)
289 		return rc;
290 
291 	if (c != '0')
292 		return -EINVAL;
293 
294 	cifs_dbg(FYI, "clearing dfs cache\n");
295 
296 	down_write(&htable_rw_lock);
297 	flush_cache_ents();
298 	up_write(&htable_rw_lock);
299 
300 	return count;
301 }
302 
dfscache_proc_open(struct inode * inode,struct file * file)303 static int dfscache_proc_open(struct inode *inode, struct file *file)
304 {
305 	return single_open(file, dfscache_proc_show, NULL);
306 }
307 
308 const struct proc_ops dfscache_proc_ops = {
309 	.proc_open	= dfscache_proc_open,
310 	.proc_read	= seq_read,
311 	.proc_lseek	= seq_lseek,
312 	.proc_release	= single_release,
313 	.proc_write	= dfscache_proc_write,
314 };
315 
316 #ifdef CONFIG_CIFS_DEBUG2
dump_tgts(const struct cache_entry * ce)317 static inline void dump_tgts(const struct cache_entry *ce)
318 {
319 	struct cache_dfs_tgt *t;
320 
321 	cifs_dbg(FYI, "target list:\n");
322 	list_for_each_entry(t, &ce->tlist, list) {
323 		cifs_dbg(FYI, "  %s%s\n", t->name,
324 			 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
325 	}
326 }
327 
dump_ce(const struct cache_entry * ce)328 static inline void dump_ce(const struct cache_entry *ce)
329 {
330 	cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
331 		 ce->path,
332 		 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
333 		 ce->etime.tv_nsec,
334 		 ce->hdr_flags, ce->ref_flags,
335 		 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
336 		 ce->path_consumed,
337 		 cache_entry_expired(ce) ? "yes" : "no");
338 	dump_tgts(ce);
339 }
340 
dump_refs(const struct dfs_info3_param * refs,int numrefs)341 static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
342 {
343 	int i;
344 
345 	cifs_dbg(FYI, "DFS referrals returned by the server:\n");
346 	for (i = 0; i < numrefs; i++) {
347 		const struct dfs_info3_param *ref = &refs[i];
348 
349 		cifs_dbg(FYI,
350 			 "\n"
351 			 "flags:         0x%x\n"
352 			 "path_consumed: %d\n"
353 			 "server_type:   0x%x\n"
354 			 "ref_flag:      0x%x\n"
355 			 "path_name:     %s\n"
356 			 "node_name:     %s\n"
357 			 "ttl:           %d (%dm)\n",
358 			 ref->flags, ref->path_consumed, ref->server_type,
359 			 ref->ref_flag, ref->path_name, ref->node_name,
360 			 ref->ttl, ref->ttl / 60);
361 	}
362 }
363 #else
364 #define dump_tgts(e)
365 #define dump_ce(e)
366 #define dump_refs(r, n)
367 #endif
368 
369 /**
370  * dfs_cache_init - Initialize DFS referral cache.
371  *
372  * Return zero if initialized successfully, otherwise non-zero.
373  */
dfs_cache_init(void)374 int dfs_cache_init(void)
375 {
376 	int rc;
377 	int i;
378 
379 	dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
380 	if (!dfscache_wq)
381 		return -ENOMEM;
382 
383 	cache_slab = kmem_cache_create("cifs_dfs_cache",
384 				       sizeof(struct cache_entry), 0,
385 				       SLAB_HWCACHE_ALIGN, NULL);
386 	if (!cache_slab) {
387 		rc = -ENOMEM;
388 		goto out_destroy_wq;
389 	}
390 
391 	for (i = 0; i < CACHE_HTABLE_SIZE; i++)
392 		INIT_HLIST_HEAD(&cache_htable[i]);
393 
394 	atomic_set(&cache_count, 0);
395 	cache_cp = load_nls("utf8");
396 	if (!cache_cp)
397 		cache_cp = load_nls_default();
398 
399 	cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
400 	return 0;
401 
402 out_destroy_wq:
403 	destroy_workqueue(dfscache_wq);
404 	return rc;
405 }
406 
cache_entry_hash(const void * data,int size,unsigned int * hash)407 static int cache_entry_hash(const void *data, int size, unsigned int *hash)
408 {
409 	int i, clen;
410 	const unsigned char *s = data;
411 	wchar_t c;
412 	unsigned int h = 0;
413 
414 	for (i = 0; i < size; i += clen) {
415 		clen = cache_cp->char2uni(&s[i], size - i, &c);
416 		if (unlikely(clen < 0)) {
417 			cifs_dbg(VFS, "%s: can't convert char\n", __func__);
418 			return clen;
419 		}
420 		c = cifs_toupper(c);
421 		h = jhash(&c, sizeof(c), h);
422 	}
423 	*hash = h % CACHE_HTABLE_SIZE;
424 	return 0;
425 }
426 
427 /* Return target hint of a DFS cache entry */
get_tgt_name(const struct cache_entry * ce)428 static inline char *get_tgt_name(const struct cache_entry *ce)
429 {
430 	struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
431 
432 	return t ? t->name : ERR_PTR(-ENOENT);
433 }
434 
435 /* Return expire time out of a new entry's TTL */
get_expire_time(int ttl)436 static inline struct timespec64 get_expire_time(int ttl)
437 {
438 	struct timespec64 ts = {
439 		.tv_sec = ttl,
440 		.tv_nsec = 0,
441 	};
442 	struct timespec64 now;
443 
444 	ktime_get_coarse_real_ts64(&now);
445 	return timespec64_add(now, ts);
446 }
447 
448 /* Allocate a new DFS target */
alloc_target(const char * name,int path_consumed)449 static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
450 {
451 	struct cache_dfs_tgt *t;
452 
453 	t = kmalloc(sizeof(*t), GFP_ATOMIC);
454 	if (!t)
455 		return ERR_PTR(-ENOMEM);
456 	t->name = kstrdup(name, GFP_ATOMIC);
457 	if (!t->name) {
458 		kfree(t);
459 		return ERR_PTR(-ENOMEM);
460 	}
461 	t->path_consumed = path_consumed;
462 	INIT_LIST_HEAD(&t->list);
463 	return t;
464 }
465 
466 /*
467  * Copy DFS referral information to a cache entry and conditionally update
468  * target hint.
469  */
copy_ref_data(const struct dfs_info3_param * refs,int numrefs,struct cache_entry * ce,const char * tgthint)470 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
471 			 struct cache_entry *ce, const char *tgthint)
472 {
473 	struct cache_dfs_tgt *target;
474 	int i;
475 
476 	ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
477 	ce->etime = get_expire_time(ce->ttl);
478 	ce->srvtype = refs[0].server_type;
479 	ce->hdr_flags = refs[0].flags;
480 	ce->ref_flags = refs[0].ref_flag;
481 	ce->path_consumed = refs[0].path_consumed;
482 
483 	for (i = 0; i < numrefs; i++) {
484 		struct cache_dfs_tgt *t;
485 
486 		t = alloc_target(refs[i].node_name, refs[i].path_consumed);
487 		if (IS_ERR(t)) {
488 			free_tgts(ce);
489 			return PTR_ERR(t);
490 		}
491 		if (tgthint && !strcasecmp(t->name, tgthint)) {
492 			list_add(&t->list, &ce->tlist);
493 			tgthint = NULL;
494 		} else {
495 			list_add_tail(&t->list, &ce->tlist);
496 		}
497 		ce->numtgts++;
498 	}
499 
500 	target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
501 					  list);
502 	WRITE_ONCE(ce->tgthint, target);
503 
504 	return 0;
505 }
506 
507 /* Allocate a new cache entry */
alloc_cache_entry(struct dfs_info3_param * refs,int numrefs)508 static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
509 {
510 	struct cache_entry *ce;
511 	int rc;
512 
513 	ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
514 	if (!ce)
515 		return ERR_PTR(-ENOMEM);
516 
517 	ce->path = refs[0].path_name;
518 	refs[0].path_name = NULL;
519 
520 	INIT_HLIST_NODE(&ce->hlist);
521 	INIT_LIST_HEAD(&ce->tlist);
522 
523 	rc = copy_ref_data(refs, numrefs, ce, NULL);
524 	if (rc) {
525 		kfree(ce->path);
526 		kmem_cache_free(cache_slab, ce);
527 		ce = ERR_PTR(rc);
528 	}
529 	return ce;
530 }
531 
remove_oldest_entry_locked(void)532 static void remove_oldest_entry_locked(void)
533 {
534 	int i;
535 	struct cache_entry *ce;
536 	struct cache_entry *to_del = NULL;
537 
538 	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
539 
540 	for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
541 		struct hlist_head *l = &cache_htable[i];
542 
543 		hlist_for_each_entry(ce, l, hlist) {
544 			if (hlist_unhashed(&ce->hlist))
545 				continue;
546 			if (!to_del || timespec64_compare(&ce->etime,
547 							  &to_del->etime) < 0)
548 				to_del = ce;
549 		}
550 	}
551 
552 	if (!to_del) {
553 		cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
554 		return;
555 	}
556 
557 	cifs_dbg(FYI, "%s: removing entry\n", __func__);
558 	dump_ce(to_del);
559 	flush_cache_ent(to_del);
560 }
561 
562 /* Add a new DFS cache entry */
add_cache_entry_locked(struct dfs_info3_param * refs,int numrefs)563 static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
564 						  int numrefs)
565 {
566 	int rc;
567 	struct cache_entry *ce;
568 	unsigned int hash;
569 
570 	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
571 
572 	if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
573 		cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
574 		remove_oldest_entry_locked();
575 	}
576 
577 	rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
578 	if (rc)
579 		return ERR_PTR(rc);
580 
581 	ce = alloc_cache_entry(refs, numrefs);
582 	if (IS_ERR(ce))
583 		return ce;
584 
585 	spin_lock(&cache_ttl_lock);
586 	if (!cache_ttl) {
587 		cache_ttl = ce->ttl;
588 		queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
589 	} else {
590 		cache_ttl = min_t(int, cache_ttl, ce->ttl);
591 		mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
592 	}
593 	spin_unlock(&cache_ttl_lock);
594 
595 	hlist_add_head(&ce->hlist, &cache_htable[hash]);
596 	dump_ce(ce);
597 
598 	atomic_inc(&cache_count);
599 
600 	return ce;
601 }
602 
603 /* Check if two DFS paths are equal.  @s1 and @s2 are expected to be in @cache_cp's charset */
dfs_path_equal(const char * s1,int len1,const char * s2,int len2)604 static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
605 {
606 	int i, l1, l2;
607 	wchar_t c1, c2;
608 
609 	if (len1 != len2)
610 		return false;
611 
612 	for (i = 0; i < len1; i += l1) {
613 		l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
614 		l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
615 		if (unlikely(l1 < 0 && l2 < 0)) {
616 			if (s1[i] != s2[i])
617 				return false;
618 			l1 = 1;
619 			continue;
620 		}
621 		if (l1 != l2)
622 			return false;
623 		if (cifs_toupper(c1) != cifs_toupper(c2))
624 			return false;
625 	}
626 	return true;
627 }
628 
__lookup_cache_entry(const char * path,unsigned int hash,int len)629 static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
630 {
631 	struct cache_entry *ce;
632 
633 	hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
634 		if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
635 			dump_ce(ce);
636 			return ce;
637 		}
638 	}
639 	return ERR_PTR(-ENOENT);
640 }
641 
642 /*
643  * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
644  *
645  * Use whole path components in the match.  Must be called with htable_rw_lock held.
646  *
647  * Return cached entry if successful.
648  * Return ERR_PTR(-ENOENT) if the entry is not found.
649  * Return error ptr otherwise.
650  */
lookup_cache_entry(const char * path)651 static struct cache_entry *lookup_cache_entry(const char *path)
652 {
653 	struct cache_entry *ce;
654 	int cnt = 0;
655 	const char *s = path, *e;
656 	char sep = *s;
657 	unsigned int hash;
658 	int rc;
659 
660 	while ((s = strchr(s, sep)) && ++cnt < 3)
661 		s++;
662 
663 	if (cnt < 3) {
664 		rc = cache_entry_hash(path, strlen(path), &hash);
665 		if (rc)
666 			return ERR_PTR(rc);
667 		return __lookup_cache_entry(path, hash, strlen(path));
668 	}
669 	/*
670 	 * Handle paths that have more than two path components and are a complete prefix of the DFS
671 	 * referral request path (@path).
672 	 *
673 	 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
674 	 */
675 	e = path + strlen(path) - 1;
676 	while (e > s) {
677 		int len;
678 
679 		/* skip separators */
680 		while (e > s && *e == sep)
681 			e--;
682 		if (e == s)
683 			break;
684 
685 		len = e + 1 - path;
686 		rc = cache_entry_hash(path, len, &hash);
687 		if (rc)
688 			return ERR_PTR(rc);
689 		ce = __lookup_cache_entry(path, hash, len);
690 		if (!IS_ERR(ce))
691 			return ce;
692 
693 		/* backward until separator */
694 		while (e > s && *e != sep)
695 			e--;
696 	}
697 	return ERR_PTR(-ENOENT);
698 }
699 
700 /**
701  * dfs_cache_destroy - destroy DFS referral cache
702  */
dfs_cache_destroy(void)703 void dfs_cache_destroy(void)
704 {
705 	cancel_delayed_work_sync(&refresh_task);
706 	unload_nls(cache_cp);
707 	free_mount_group_list();
708 	flush_cache_ents();
709 	kmem_cache_destroy(cache_slab);
710 	destroy_workqueue(dfscache_wq);
711 
712 	cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
713 }
714 
715 /* Update a cache entry with the new referral in @refs */
update_cache_entry_locked(struct cache_entry * ce,const struct dfs_info3_param * refs,int numrefs)716 static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
717 				     int numrefs)
718 {
719 	struct cache_dfs_tgt *target;
720 	char *th = NULL;
721 	int rc;
722 
723 	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
724 
725 	target = READ_ONCE(ce->tgthint);
726 	if (target) {
727 		th = kstrdup(target->name, GFP_ATOMIC);
728 		if (!th)
729 			return -ENOMEM;
730 	}
731 
732 	free_tgts(ce);
733 	ce->numtgts = 0;
734 
735 	rc = copy_ref_data(refs, numrefs, ce, th);
736 
737 	kfree(th);
738 
739 	return rc;
740 }
741 
get_dfs_referral(const unsigned int xid,struct cifs_ses * ses,const char * path,struct dfs_info3_param ** refs,int * numrefs)742 static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
743 			    struct dfs_info3_param **refs, int *numrefs)
744 {
745 	int rc;
746 	int i;
747 
748 	*refs = NULL;
749 	*numrefs = 0;
750 
751 	if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
752 		return -EOPNOTSUPP;
753 	if (unlikely(!cache_cp))
754 		return -EINVAL;
755 
756 	cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path);
757 	rc =  ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
758 					      NO_MAP_UNI_RSVD);
759 	if (!rc) {
760 		struct dfs_info3_param *ref = *refs;
761 
762 		for (i = 0; i < *numrefs; i++)
763 			convert_delimiter(ref[i].path_name, '\\');
764 	}
765 	return rc;
766 }
767 
768 /*
769  * Find, create or update a DFS cache entry.
770  *
771  * If the entry wasn't found, it will create a new one. Or if it was found but
772  * expired, then it will update the entry accordingly.
773  *
774  * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
775  * handle them properly.
776  *
777  * On success, return entry with acquired lock for reading, otherwise error ptr.
778  */
cache_refresh_path(const unsigned int xid,struct cifs_ses * ses,const char * path,bool force_refresh)779 static struct cache_entry *cache_refresh_path(const unsigned int xid,
780 					      struct cifs_ses *ses,
781 					      const char *path,
782 					      bool force_refresh)
783 {
784 	struct dfs_info3_param *refs = NULL;
785 	struct cache_entry *ce;
786 	int numrefs = 0;
787 	int rc;
788 
789 	cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
790 
791 	down_read(&htable_rw_lock);
792 
793 	ce = lookup_cache_entry(path);
794 	if (!IS_ERR(ce)) {
795 		if (!force_refresh && !cache_entry_expired(ce))
796 			return ce;
797 	} else if (PTR_ERR(ce) != -ENOENT) {
798 		up_read(&htable_rw_lock);
799 		return ce;
800 	}
801 
802 	/*
803 	 * Unlock shared access as we don't want to hold any locks while getting
804 	 * a new referral.  The @ses used for performing the I/O could be
805 	 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
806 	 * in order to failover -- if necessary.
807 	 */
808 	up_read(&htable_rw_lock);
809 
810 	/*
811 	 * Either the entry was not found, or it is expired, or it is a forced
812 	 * refresh.
813 	 * Request a new DFS referral in order to create or update a cache entry.
814 	 */
815 	rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
816 	if (rc) {
817 		ce = ERR_PTR(rc);
818 		goto out;
819 	}
820 
821 	dump_refs(refs, numrefs);
822 
823 	down_write(&htable_rw_lock);
824 	/* Re-check as another task might have it added or refreshed already */
825 	ce = lookup_cache_entry(path);
826 	if (!IS_ERR(ce)) {
827 		if (force_refresh || cache_entry_expired(ce)) {
828 			rc = update_cache_entry_locked(ce, refs, numrefs);
829 			if (rc)
830 				ce = ERR_PTR(rc);
831 		}
832 	} else if (PTR_ERR(ce) == -ENOENT) {
833 		ce = add_cache_entry_locked(refs, numrefs);
834 	}
835 
836 	if (IS_ERR(ce)) {
837 		up_write(&htable_rw_lock);
838 		goto out;
839 	}
840 
841 	downgrade_write(&htable_rw_lock);
842 out:
843 	free_dfs_info_array(refs, numrefs);
844 	return ce;
845 }
846 
847 /*
848  * Set up a DFS referral from a given cache entry.
849  *
850  * Must be called with htable_rw_lock held.
851  */
setup_referral(const char * path,struct cache_entry * ce,struct dfs_info3_param * ref,const char * target)852 static int setup_referral(const char *path, struct cache_entry *ce,
853 			  struct dfs_info3_param *ref, const char *target)
854 {
855 	int rc;
856 
857 	cifs_dbg(FYI, "%s: set up new ref\n", __func__);
858 
859 	memset(ref, 0, sizeof(*ref));
860 
861 	ref->path_name = kstrdup(path, GFP_ATOMIC);
862 	if (!ref->path_name)
863 		return -ENOMEM;
864 
865 	ref->node_name = kstrdup(target, GFP_ATOMIC);
866 	if (!ref->node_name) {
867 		rc = -ENOMEM;
868 		goto err_free_path;
869 	}
870 
871 	ref->path_consumed = ce->path_consumed;
872 	ref->ttl = ce->ttl;
873 	ref->server_type = ce->srvtype;
874 	ref->ref_flag = ce->ref_flags;
875 	ref->flags = ce->hdr_flags;
876 
877 	return 0;
878 
879 err_free_path:
880 	kfree(ref->path_name);
881 	ref->path_name = NULL;
882 	return rc;
883 }
884 
885 /* Return target list of a DFS cache entry */
get_targets(struct cache_entry * ce,struct dfs_cache_tgt_list * tl)886 static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
887 {
888 	int rc;
889 	struct list_head *head = &tl->tl_list;
890 	struct cache_dfs_tgt *t;
891 	struct dfs_cache_tgt_iterator *it, *nit;
892 
893 	memset(tl, 0, sizeof(*tl));
894 	INIT_LIST_HEAD(head);
895 
896 	list_for_each_entry(t, &ce->tlist, list) {
897 		it = kzalloc(sizeof(*it), GFP_ATOMIC);
898 		if (!it) {
899 			rc = -ENOMEM;
900 			goto err_free_it;
901 		}
902 
903 		it->it_name = kstrdup(t->name, GFP_ATOMIC);
904 		if (!it->it_name) {
905 			kfree(it);
906 			rc = -ENOMEM;
907 			goto err_free_it;
908 		}
909 		it->it_path_consumed = t->path_consumed;
910 
911 		if (READ_ONCE(ce->tgthint) == t)
912 			list_add(&it->it_list, head);
913 		else
914 			list_add_tail(&it->it_list, head);
915 	}
916 
917 	tl->tl_numtgts = ce->numtgts;
918 
919 	return 0;
920 
921 err_free_it:
922 	list_for_each_entry_safe(it, nit, head, it_list) {
923 		list_del(&it->it_list);
924 		kfree(it->it_name);
925 		kfree(it);
926 	}
927 	return rc;
928 }
929 
930 /**
931  * dfs_cache_find - find a DFS cache entry
932  *
933  * If it doesn't find the cache entry, then it will get a DFS referral
934  * for @path and create a new entry.
935  *
936  * In case the cache entry exists but expired, it will get a DFS referral
937  * for @path and then update the respective cache entry.
938  *
939  * These parameters are passed down to the get_dfs_refer() call if it
940  * needs to be issued:
941  * @xid: syscall xid
942  * @ses: smb session to issue the request on
943  * @cp: codepage
944  * @remap: path character remapping type
945  * @path: path to lookup in DFS referral cache.
946  *
947  * @ref: when non-NULL, store single DFS referral result in it.
948  * @tgt_list: when non-NULL, store complete DFS target list in it.
949  *
950  * Return zero if the target was found, otherwise non-zero.
951  */
dfs_cache_find(const unsigned int xid,struct cifs_ses * ses,const struct nls_table * cp,int remap,const char * path,struct dfs_info3_param * ref,struct dfs_cache_tgt_list * tgt_list)952 int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
953 		   int remap, const char *path, struct dfs_info3_param *ref,
954 		   struct dfs_cache_tgt_list *tgt_list)
955 {
956 	int rc;
957 	const char *npath;
958 	struct cache_entry *ce;
959 
960 	npath = dfs_cache_canonical_path(path, cp, remap);
961 	if (IS_ERR(npath))
962 		return PTR_ERR(npath);
963 
964 	ce = cache_refresh_path(xid, ses, npath, false);
965 	if (IS_ERR(ce)) {
966 		rc = PTR_ERR(ce);
967 		goto out_free_path;
968 	}
969 
970 	if (ref)
971 		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
972 	else
973 		rc = 0;
974 	if (!rc && tgt_list)
975 		rc = get_targets(ce, tgt_list);
976 
977 	up_read(&htable_rw_lock);
978 
979 out_free_path:
980 	kfree(npath);
981 	return rc;
982 }
983 
984 /**
985  * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
986  * the currently connected server.
987  *
988  * NOTE: This function will neither update a cache entry in case it was
989  * expired, nor create a new cache entry if @path hasn't been found. It heavily
990  * relies on an existing cache entry.
991  *
992  * @path: canonical DFS path to lookup in the DFS referral cache.
993  * @ref: when non-NULL, store single DFS referral result in it.
994  * @tgt_list: when non-NULL, store complete DFS target list in it.
995  *
996  * Return 0 if successful.
997  * Return -ENOENT if the entry was not found.
998  * Return non-zero for other errors.
999  */
dfs_cache_noreq_find(const char * path,struct dfs_info3_param * ref,struct dfs_cache_tgt_list * tgt_list)1000 int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
1001 			 struct dfs_cache_tgt_list *tgt_list)
1002 {
1003 	int rc;
1004 	struct cache_entry *ce;
1005 
1006 	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1007 
1008 	down_read(&htable_rw_lock);
1009 
1010 	ce = lookup_cache_entry(path);
1011 	if (IS_ERR(ce)) {
1012 		rc = PTR_ERR(ce);
1013 		goto out_unlock;
1014 	}
1015 
1016 	if (ref)
1017 		rc = setup_referral(path, ce, ref, get_tgt_name(ce));
1018 	else
1019 		rc = 0;
1020 	if (!rc && tgt_list)
1021 		rc = get_targets(ce, tgt_list);
1022 
1023 out_unlock:
1024 	up_read(&htable_rw_lock);
1025 	return rc;
1026 }
1027 
1028 /**
1029  * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
1030  * without sending any requests to the currently connected server.
1031  *
1032  * NOTE: This function will neither update a cache entry in case it was
1033  * expired, nor create a new cache entry if @path hasn't been found. It heavily
1034  * relies on an existing cache entry.
1035  *
1036  * @path: canonical DFS path to lookup in DFS referral cache.
1037  * @it: target iterator which contains the target hint to update the cache
1038  * entry with.
1039  *
1040  * Return zero if the target hint was updated successfully, otherwise non-zero.
1041  */
dfs_cache_noreq_update_tgthint(const char * path,const struct dfs_cache_tgt_iterator * it)1042 void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
1043 {
1044 	struct cache_dfs_tgt *t;
1045 	struct cache_entry *ce;
1046 
1047 	if (!path || !it)
1048 		return;
1049 
1050 	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1051 
1052 	down_read(&htable_rw_lock);
1053 
1054 	ce = lookup_cache_entry(path);
1055 	if (IS_ERR(ce))
1056 		goto out_unlock;
1057 
1058 	t = READ_ONCE(ce->tgthint);
1059 
1060 	if (unlikely(!strcasecmp(it->it_name, t->name)))
1061 		goto out_unlock;
1062 
1063 	list_for_each_entry(t, &ce->tlist, list) {
1064 		if (!strcasecmp(t->name, it->it_name)) {
1065 			WRITE_ONCE(ce->tgthint, t);
1066 			cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1067 				 it->it_name);
1068 			break;
1069 		}
1070 	}
1071 
1072 out_unlock:
1073 	up_read(&htable_rw_lock);
1074 }
1075 
1076 /**
1077  * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1078  * target iterator (@it).
1079  *
1080  * @path: canonical DFS path to lookup in DFS referral cache.
1081  * @it: DFS target iterator.
1082  * @ref: DFS referral pointer to set up the gathered information.
1083  *
1084  * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1085  */
dfs_cache_get_tgt_referral(const char * path,const struct dfs_cache_tgt_iterator * it,struct dfs_info3_param * ref)1086 int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
1087 			       struct dfs_info3_param *ref)
1088 {
1089 	int rc;
1090 	struct cache_entry *ce;
1091 
1092 	if (!it || !ref)
1093 		return -EINVAL;
1094 
1095 	cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1096 
1097 	down_read(&htable_rw_lock);
1098 
1099 	ce = lookup_cache_entry(path);
1100 	if (IS_ERR(ce)) {
1101 		rc = PTR_ERR(ce);
1102 		goto out_unlock;
1103 	}
1104 
1105 	cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1106 
1107 	rc = setup_referral(path, ce, ref, it->it_name);
1108 
1109 out_unlock:
1110 	up_read(&htable_rw_lock);
1111 	return rc;
1112 }
1113 
1114 /**
1115  * dfs_cache_add_refsrv_session - add SMB session of referral server
1116  *
1117  * @mount_id: mount group uuid to lookup.
1118  * @ses: reference counted SMB session of referral server.
1119  */
dfs_cache_add_refsrv_session(const uuid_t * mount_id,struct cifs_ses * ses)1120 void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
1121 {
1122 	struct mount_group *mg;
1123 
1124 	if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
1125 		return;
1126 
1127 	mg = get_mount_group(mount_id);
1128 	if (WARN_ON_ONCE(IS_ERR(mg)))
1129 		return;
1130 
1131 	spin_lock(&mg->lock);
1132 	if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
1133 		mg->sessions[mg->num_sessions++] = ses;
1134 	spin_unlock(&mg->lock);
1135 	kref_put(&mg->refcount, mount_group_release);
1136 }
1137 
1138 /**
1139  * dfs_cache_put_refsrv_sessions - put all referral server sessions
1140  *
1141  * Put all SMB sessions from the given mount group id.
1142  *
1143  * @mount_id: mount group uuid to lookup.
1144  */
dfs_cache_put_refsrv_sessions(const uuid_t * mount_id)1145 void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
1146 {
1147 	struct mount_group *mg;
1148 
1149 	if (!mount_id || uuid_is_null(mount_id))
1150 		return;
1151 
1152 	mutex_lock(&mount_group_list_lock);
1153 	mg = find_mount_group_locked(mount_id);
1154 	if (IS_ERR(mg)) {
1155 		mutex_unlock(&mount_group_list_lock);
1156 		return;
1157 	}
1158 	mutex_unlock(&mount_group_list_lock);
1159 	kref_put(&mg->refcount, mount_group_release);
1160 }
1161 
1162 /* Extract share from DFS target and return a pointer to prefix path or NULL */
parse_target_share(const char * target,char ** share)1163 static const char *parse_target_share(const char *target, char **share)
1164 {
1165 	const char *s, *seps = "/\\";
1166 	size_t len;
1167 
1168 	s = strpbrk(target + 1, seps);
1169 	if (!s)
1170 		return ERR_PTR(-EINVAL);
1171 
1172 	len = strcspn(s + 1, seps);
1173 	if (!len)
1174 		return ERR_PTR(-EINVAL);
1175 	s += len;
1176 
1177 	len = s - target + 1;
1178 	*share = kstrndup(target, len, GFP_KERNEL);
1179 	if (!*share)
1180 		return ERR_PTR(-ENOMEM);
1181 
1182 	s = target + len;
1183 	return s + strspn(s, seps);
1184 }
1185 
1186 /**
1187  * dfs_cache_get_tgt_share - parse a DFS target
1188  *
1189  * @path: DFS full path
1190  * @it: DFS target iterator.
1191  * @share: tree name.
1192  * @prefix: prefix path.
1193  *
1194  * Return zero if target was parsed correctly, otherwise non-zero.
1195  */
dfs_cache_get_tgt_share(char * path,const struct dfs_cache_tgt_iterator * it,char ** share,char ** prefix)1196 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
1197 			    char **prefix)
1198 {
1199 	char sep;
1200 	char *target_share;
1201 	char *ppath = NULL;
1202 	const char *target_ppath, *dfsref_ppath;
1203 	size_t target_pplen, dfsref_pplen;
1204 	size_t len, c;
1205 
1206 	if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1207 		return -EINVAL;
1208 
1209 	sep = it->it_name[0];
1210 	if (sep != '\\' && sep != '/')
1211 		return -EINVAL;
1212 
1213 	target_ppath = parse_target_share(it->it_name, &target_share);
1214 	if (IS_ERR(target_ppath))
1215 		return PTR_ERR(target_ppath);
1216 
1217 	/* point to prefix in DFS referral path */
1218 	dfsref_ppath = path + it->it_path_consumed;
1219 	dfsref_ppath += strspn(dfsref_ppath, "/\\");
1220 
1221 	target_pplen = strlen(target_ppath);
1222 	dfsref_pplen = strlen(dfsref_ppath);
1223 
1224 	/* merge prefix paths from DFS referral path and target node */
1225 	if (target_pplen || dfsref_pplen) {
1226 		len = target_pplen + dfsref_pplen + 2;
1227 		ppath = kzalloc(len, GFP_KERNEL);
1228 		if (!ppath) {
1229 			kfree(target_share);
1230 			return -ENOMEM;
1231 		}
1232 		c = strscpy(ppath, target_ppath, len);
1233 		if (c && dfsref_pplen)
1234 			ppath[c] = sep;
1235 		strlcat(ppath, dfsref_ppath, len);
1236 	}
1237 	*share = target_share;
1238 	*prefix = ppath;
1239 	return 0;
1240 }
1241 
target_share_equal(struct TCP_Server_Info * server,const char * s1,const char * s2)1242 static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
1243 {
1244 	char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
1245 	const char *host;
1246 	size_t hostlen;
1247 	struct sockaddr_storage ss;
1248 	bool match;
1249 	int rc;
1250 
1251 	if (strcasecmp(s1, s2))
1252 		return false;
1253 
1254 	/*
1255 	 * Resolve share's hostname and check if server address matches.  Otherwise just ignore it
1256 	 * as we could not have upcall to resolve hostname or failed to convert ip address.
1257 	 */
1258 	extract_unc_hostname(s1, &host, &hostlen);
1259 	scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
1260 
1261 	rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
1262 	if (rc < 0) {
1263 		cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
1264 			 __func__, (int)hostlen, host);
1265 		return true;
1266 	}
1267 
1268 	cifs_server_lock(server);
1269 	match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1270 	cifs_server_unlock(server);
1271 
1272 	return match;
1273 }
1274 
1275 /*
1276  * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
1277  * target shares in @refs.
1278  */
mark_for_reconnect_if_needed(struct TCP_Server_Info * server,struct dfs_cache_tgt_list * old_tl,struct dfs_cache_tgt_list * new_tl)1279 static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
1280 					 struct dfs_cache_tgt_list *old_tl,
1281 					 struct dfs_cache_tgt_list *new_tl)
1282 {
1283 	struct dfs_cache_tgt_iterator *oit, *nit;
1284 
1285 	for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
1286 	     oit = dfs_cache_get_next_tgt(old_tl, oit)) {
1287 		for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
1288 		     nit = dfs_cache_get_next_tgt(new_tl, nit)) {
1289 			if (target_share_equal(server,
1290 					       dfs_cache_get_tgt_name(oit),
1291 					       dfs_cache_get_tgt_name(nit)))
1292 				return;
1293 		}
1294 	}
1295 
1296 	cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
1297 	cifs_signal_cifsd_for_reconnect(server, true);
1298 }
1299 
1300 /* Refresh dfs referral of tcon and mark it for reconnect if needed */
__refresh_tcon(const char * path,struct cifs_tcon * tcon,bool force_refresh)1301 static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
1302 {
1303 	struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
1304 	struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
1305 	struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
1306 	struct cifs_tcon *ipc = ses->tcon_ipc;
1307 	bool needs_refresh = false;
1308 	struct cache_entry *ce;
1309 	unsigned int xid;
1310 	int rc = 0;
1311 
1312 	xid = get_xid();
1313 
1314 	down_read(&htable_rw_lock);
1315 	ce = lookup_cache_entry(path);
1316 	needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1317 	if (!IS_ERR(ce)) {
1318 		rc = get_targets(ce, &old_tl);
1319 		cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1320 	}
1321 	up_read(&htable_rw_lock);
1322 
1323 	if (!needs_refresh) {
1324 		rc = 0;
1325 		goto out;
1326 	}
1327 
1328 	spin_lock(&ipc->tc_lock);
1329 	if (ses->ses_status != SES_GOOD || ipc->status != TID_GOOD) {
1330 		spin_unlock(&ipc->tc_lock);
1331 		cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
1332 		goto out;
1333 	}
1334 	spin_unlock(&ipc->tc_lock);
1335 
1336 	ce = cache_refresh_path(xid, ses, path, true);
1337 	if (!IS_ERR(ce)) {
1338 		rc = get_targets(ce, &new_tl);
1339 		up_read(&htable_rw_lock);
1340 		cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1341 		mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
1342 	}
1343 
1344 out:
1345 	free_xid(xid);
1346 	dfs_cache_free_tgts(&old_tl);
1347 	dfs_cache_free_tgts(&new_tl);
1348 	return rc;
1349 }
1350 
refresh_tcon(struct cifs_tcon * tcon,bool force_refresh)1351 static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
1352 {
1353 	struct TCP_Server_Info *server = tcon->ses->server;
1354 
1355 	mutex_lock(&server->refpath_lock);
1356 	if (server->leaf_fullpath)
1357 		__refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
1358 	mutex_unlock(&server->refpath_lock);
1359 	return 0;
1360 }
1361 
1362 /**
1363  * dfs_cache_remount_fs - remount a DFS share
1364  *
1365  * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
1366  * match any of the new targets, mark it for reconnect.
1367  *
1368  * @cifs_sb: cifs superblock.
1369  *
1370  * Return zero if remounted, otherwise non-zero.
1371  */
dfs_cache_remount_fs(struct cifs_sb_info * cifs_sb)1372 int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1373 {
1374 	struct cifs_tcon *tcon;
1375 	struct TCP_Server_Info *server;
1376 
1377 	if (!cifs_sb || !cifs_sb->master_tlink)
1378 		return -EINVAL;
1379 
1380 	tcon = cifs_sb_master_tcon(cifs_sb);
1381 	server = tcon->ses->server;
1382 
1383 	if (!server->origin_fullpath) {
1384 		cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
1385 		return 0;
1386 	}
1387 
1388 	if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
1389 		cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
1390 		return -EINVAL;
1391 	}
1392 	/*
1393 	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
1394 	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
1395 	 */
1396 	cifs_autodisable_serverino(cifs_sb);
1397 	/*
1398 	 * Force the use of prefix path to support failover on DFS paths that resolve to targets
1399 	 * that have different prefix paths.
1400 	 */
1401 	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1402 
1403 	return refresh_tcon(tcon, true);
1404 }
1405 
1406 /*
1407  * Worker that will refresh DFS cache from all active mounts based on lowest TTL value
1408  * from a DFS referral.
1409  */
refresh_cache_worker(struct work_struct * work)1410 static void refresh_cache_worker(struct work_struct *work)
1411 {
1412 	struct TCP_Server_Info *server;
1413 	struct cifs_tcon *tcon, *ntcon;
1414 	struct list_head tcons;
1415 	struct cifs_ses *ses;
1416 
1417 	INIT_LIST_HEAD(&tcons);
1418 
1419 	spin_lock(&cifs_tcp_ses_lock);
1420 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1421 		if (!server->leaf_fullpath)
1422 			continue;
1423 
1424 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1425 			if (ses->tcon_ipc) {
1426 				ses->ses_count++;
1427 				list_add_tail(&ses->tcon_ipc->ulist, &tcons);
1428 			}
1429 			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1430 				if (!tcon->ipc) {
1431 					tcon->tc_count++;
1432 					list_add_tail(&tcon->ulist, &tcons);
1433 				}
1434 			}
1435 		}
1436 	}
1437 	spin_unlock(&cifs_tcp_ses_lock);
1438 
1439 	list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1440 		struct TCP_Server_Info *server = tcon->ses->server;
1441 
1442 		list_del_init(&tcon->ulist);
1443 
1444 		mutex_lock(&server->refpath_lock);
1445 		if (server->leaf_fullpath)
1446 			__refresh_tcon(server->leaf_fullpath + 1, tcon, false);
1447 		mutex_unlock(&server->refpath_lock);
1448 
1449 		if (tcon->ipc)
1450 			cifs_put_smb_ses(tcon->ses);
1451 		else
1452 			cifs_put_tcon(tcon);
1453 	}
1454 
1455 	spin_lock(&cache_ttl_lock);
1456 	queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1457 	spin_unlock(&cache_ttl_lock);
1458 }
1459