1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/uuid.h>
29 #include <linux/xattr.h>
30 #include <uapi/linux/magic.h>
31 #include <net/ipv6.h>
32 #include "cifsfs.h"
33 #include "cifspdu.h"
34 #define DECLARE_GLOBALS_HERE
35 #include "cifsglob.h"
36 #include "cifsproto.h"
37 #include "cifs_debug.h"
38 #include "cifs_fs_sb.h"
39 #include <linux/mm.h>
40 #include <linux/key-type.h>
41 #include "cifs_spnego.h"
42 #include "fscache.h"
43 #ifdef CONFIG_CIFS_DFS_UPCALL
44 #include "dfs_cache.h"
45 #endif
46 #ifdef CONFIG_CIFS_SWN_UPCALL
47 #include "netlink.h"
48 #endif
49 #include "fs_context.h"
50 #include "cached_dir.h"
51
52 /*
53 * DOS dates from 1980/1/1 through 2107/12/31
54 * Protocol specifications indicate the range should be to 119, which
55 * limits maximum year to 2099. But this range has not been checked.
56 */
57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
60
61 int cifsFYI = 0;
62 bool traceSMB;
63 bool enable_oplocks = true;
64 bool linuxExtEnabled = true;
65 bool lookupCacheEnabled = true;
66 bool disable_legacy_dialects; /* false by default */
67 bool enable_gcm_256 = true;
68 bool require_gcm_256; /* false by default */
69 bool enable_negotiate_signing; /* false by default */
70 unsigned int global_secflags = CIFSSEC_DEF;
71 /* unsigned int ntlmv2_support = 0; */
72 unsigned int sign_CIFS_PDUs = 1;
73
74 /*
75 * Global transaction id (XID) information
76 */
77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
81
82 /*
83 * Global counters, updated atomically
84 */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head cifs_tcp_ses_list;
100 spinlock_t cifs_tcp_ses_lock;
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 "for CIFS requests. "
106 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 "1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 "CIFS/SMB1 dialect (N/A for SMB3) "
119 "Default: 32767 Range: 2 to 32767.");
120 #ifdef CONFIG_CIFS_STATS2
121 unsigned int slow_rsp_threshold = 1;
122 module_param(slow_rsp_threshold, uint, 0644);
123 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
124 "before logging that a response is delayed. "
125 "Default: 1 (if set to 0 disables msg).");
126 #endif /* STATS2 */
127
128 module_param(enable_oplocks, bool, 0644);
129 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
130
131 module_param(enable_gcm_256, bool, 0644);
132 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
133
134 module_param(require_gcm_256, bool, 0644);
135 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
136
137 module_param(enable_negotiate_signing, bool, 0644);
138 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
139
140 module_param(disable_legacy_dialects, bool, 0644);
141 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
142 "helpful to restrict the ability to "
143 "override the default dialects (SMB2.1, "
144 "SMB3 and SMB3.02) on mount with old "
145 "dialects (CIFS/SMB1 and SMB2) since "
146 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
147 " and less secure. Default: n/N/0");
148
149 extern mempool_t *cifs_sm_req_poolp;
150 extern mempool_t *cifs_req_poolp;
151 extern mempool_t *cifs_mid_poolp;
152
153 struct workqueue_struct *cifsiod_wq;
154 struct workqueue_struct *decrypt_wq;
155 struct workqueue_struct *fileinfo_put_wq;
156 struct workqueue_struct *cifsoplockd_wq;
157 struct workqueue_struct *deferredclose_wq;
158 __u32 cifs_lock_secret;
159
160 /*
161 * Bumps refcount for cifs super block.
162 * Note that it should be only called if a referece to VFS super block is
163 * already held, e.g. in open-type syscalls context. Otherwise it can race with
164 * atomic_dec_and_test in deactivate_locked_super.
165 */
166 void
cifs_sb_active(struct super_block * sb)167 cifs_sb_active(struct super_block *sb)
168 {
169 struct cifs_sb_info *server = CIFS_SB(sb);
170
171 if (atomic_inc_return(&server->active) == 1)
172 atomic_inc(&sb->s_active);
173 }
174
175 void
cifs_sb_deactive(struct super_block * sb)176 cifs_sb_deactive(struct super_block *sb)
177 {
178 struct cifs_sb_info *server = CIFS_SB(sb);
179
180 if (atomic_dec_and_test(&server->active))
181 deactivate_super(sb);
182 }
183
184 static int
cifs_read_super(struct super_block * sb)185 cifs_read_super(struct super_block *sb)
186 {
187 struct inode *inode;
188 struct cifs_sb_info *cifs_sb;
189 struct cifs_tcon *tcon;
190 struct timespec64 ts;
191 int rc = 0;
192
193 cifs_sb = CIFS_SB(sb);
194 tcon = cifs_sb_master_tcon(cifs_sb);
195
196 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
197 sb->s_flags |= SB_POSIXACL;
198
199 if (tcon->snapshot_time)
200 sb->s_flags |= SB_RDONLY;
201
202 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
203 sb->s_maxbytes = MAX_LFS_FILESIZE;
204 else
205 sb->s_maxbytes = MAX_NON_LFS;
206
207 /*
208 * Some very old servers like DOS and OS/2 used 2 second granularity
209 * (while all current servers use 100ns granularity - see MS-DTYP)
210 * but 1 second is the maximum allowed granularity for the VFS
211 * so for old servers set time granularity to 1 second while for
212 * everything else (current servers) set it to 100ns.
213 */
214 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
215 ((tcon->ses->capabilities &
216 tcon->ses->server->vals->cap_nt_find) == 0) &&
217 !tcon->unix_ext) {
218 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
219 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
220 sb->s_time_min = ts.tv_sec;
221 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
222 cpu_to_le16(SMB_TIME_MAX), 0);
223 sb->s_time_max = ts.tv_sec;
224 } else {
225 /*
226 * Almost every server, including all SMB2+, uses DCE TIME
227 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
228 */
229 sb->s_time_gran = 100;
230 ts = cifs_NTtimeToUnix(0);
231 sb->s_time_min = ts.tv_sec;
232 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
233 sb->s_time_max = ts.tv_sec;
234 }
235
236 sb->s_magic = CIFS_SUPER_MAGIC;
237 sb->s_op = &cifs_super_ops;
238 sb->s_xattr = cifs_xattr_handlers;
239 rc = super_setup_bdi(sb);
240 if (rc)
241 goto out_no_root;
242 /* tune readahead according to rsize if readahead size not set on mount */
243 if (cifs_sb->ctx->rsize == 0)
244 cifs_sb->ctx->rsize =
245 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
246 if (cifs_sb->ctx->rasize)
247 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
248 else
249 sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
250
251 sb->s_blocksize = CIFS_MAX_MSGSIZE;
252 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
253 inode = cifs_root_iget(sb);
254
255 if (IS_ERR(inode)) {
256 rc = PTR_ERR(inode);
257 goto out_no_root;
258 }
259
260 if (tcon->nocase)
261 sb->s_d_op = &cifs_ci_dentry_ops;
262 else
263 sb->s_d_op = &cifs_dentry_ops;
264
265 sb->s_root = d_make_root(inode);
266 if (!sb->s_root) {
267 rc = -ENOMEM;
268 goto out_no_root;
269 }
270
271 #ifdef CONFIG_CIFS_NFSD_EXPORT
272 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
273 cifs_dbg(FYI, "export ops supported\n");
274 sb->s_export_op = &cifs_export_ops;
275 }
276 #endif /* CONFIG_CIFS_NFSD_EXPORT */
277
278 return 0;
279
280 out_no_root:
281 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
282 return rc;
283 }
284
cifs_kill_sb(struct super_block * sb)285 static void cifs_kill_sb(struct super_block *sb)
286 {
287 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
288
289 /*
290 * We ned to release all dentries for the cached directories
291 * before we kill the sb.
292 */
293 if (cifs_sb->root) {
294 close_all_cached_dirs(cifs_sb);
295
296 /* finally release root dentry */
297 dput(cifs_sb->root);
298 cifs_sb->root = NULL;
299 }
300
301 kill_anon_super(sb);
302 cifs_umount(cifs_sb);
303 }
304
305 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)306 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
307 {
308 struct super_block *sb = dentry->d_sb;
309 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
310 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
311 struct TCP_Server_Info *server = tcon->ses->server;
312 unsigned int xid;
313 int rc = 0;
314
315 xid = get_xid();
316
317 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
318 buf->f_namelen =
319 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
320 else
321 buf->f_namelen = PATH_MAX;
322
323 buf->f_fsid.val[0] = tcon->vol_serial_number;
324 /* are using part of create time for more randomness, see man statfs */
325 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
326
327 buf->f_files = 0; /* undefined */
328 buf->f_ffree = 0; /* unlimited */
329
330 if (server->ops->queryfs)
331 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
332
333 free_xid(xid);
334 return rc;
335 }
336
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)337 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
338 {
339 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
340 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
341 struct TCP_Server_Info *server = tcon->ses->server;
342
343 if (server->ops->fallocate)
344 return server->ops->fallocate(file, tcon, mode, off, len);
345
346 return -EOPNOTSUPP;
347 }
348
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)349 static int cifs_permission(struct mnt_idmap *idmap,
350 struct inode *inode, int mask)
351 {
352 struct cifs_sb_info *cifs_sb;
353
354 cifs_sb = CIFS_SB(inode->i_sb);
355
356 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
357 if ((mask & MAY_EXEC) && !execute_ok(inode))
358 return -EACCES;
359 else
360 return 0;
361 } else /* file mode might have been restricted at mount time
362 on the client (above and beyond ACL on servers) for
363 servers which do not support setting and viewing mode bits,
364 so allowing client to check permissions is useful */
365 return generic_permission(&nop_mnt_idmap, inode, mask);
366 }
367
368 static struct kmem_cache *cifs_inode_cachep;
369 static struct kmem_cache *cifs_req_cachep;
370 static struct kmem_cache *cifs_mid_cachep;
371 static struct kmem_cache *cifs_sm_req_cachep;
372 mempool_t *cifs_sm_req_poolp;
373 mempool_t *cifs_req_poolp;
374 mempool_t *cifs_mid_poolp;
375
376 static struct inode *
cifs_alloc_inode(struct super_block * sb)377 cifs_alloc_inode(struct super_block *sb)
378 {
379 struct cifsInodeInfo *cifs_inode;
380 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
381 if (!cifs_inode)
382 return NULL;
383 cifs_inode->cifsAttrs = 0x20; /* default */
384 cifs_inode->time = 0;
385 /*
386 * Until the file is open and we have gotten oplock info back from the
387 * server, can not assume caching of file data or metadata.
388 */
389 cifs_set_oplock_level(cifs_inode, 0);
390 cifs_inode->flags = 0;
391 spin_lock_init(&cifs_inode->writers_lock);
392 cifs_inode->writers = 0;
393 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
394 cifs_inode->server_eof = 0;
395 cifs_inode->uniqueid = 0;
396 cifs_inode->createtime = 0;
397 cifs_inode->epoch = 0;
398 spin_lock_init(&cifs_inode->open_file_lock);
399 generate_random_uuid(cifs_inode->lease_key);
400 cifs_inode->symlink_target = NULL;
401
402 /*
403 * Can not set i_flags here - they get immediately overwritten to zero
404 * by the VFS.
405 */
406 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
407 INIT_LIST_HEAD(&cifs_inode->openFileList);
408 INIT_LIST_HEAD(&cifs_inode->llist);
409 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
410 spin_lock_init(&cifs_inode->deferred_lock);
411 return &cifs_inode->netfs.inode;
412 }
413
414 static void
cifs_free_inode(struct inode * inode)415 cifs_free_inode(struct inode *inode)
416 {
417 struct cifsInodeInfo *cinode = CIFS_I(inode);
418
419 if (S_ISLNK(inode->i_mode))
420 kfree(cinode->symlink_target);
421 kmem_cache_free(cifs_inode_cachep, cinode);
422 }
423
424 static void
cifs_evict_inode(struct inode * inode)425 cifs_evict_inode(struct inode *inode)
426 {
427 truncate_inode_pages_final(&inode->i_data);
428 if (inode->i_state & I_PINNING_FSCACHE_WB)
429 cifs_fscache_unuse_inode_cookie(inode, true);
430 cifs_fscache_release_inode_cookie(inode);
431 clear_inode(inode);
432 }
433
434 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)435 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
436 {
437 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
438 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
439
440 seq_puts(s, ",addr=");
441
442 switch (server->dstaddr.ss_family) {
443 case AF_INET:
444 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
445 break;
446 case AF_INET6:
447 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
448 if (sa6->sin6_scope_id)
449 seq_printf(s, "%%%u", sa6->sin6_scope_id);
450 break;
451 default:
452 seq_puts(s, "(unknown)");
453 }
454 if (server->rdma)
455 seq_puts(s, ",rdma");
456 }
457
458 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)459 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
460 {
461 if (ses->sectype == Unspecified) {
462 if (ses->user_name == NULL)
463 seq_puts(s, ",sec=none");
464 return;
465 }
466
467 seq_puts(s, ",sec=");
468
469 switch (ses->sectype) {
470 case NTLMv2:
471 seq_puts(s, "ntlmv2");
472 break;
473 case Kerberos:
474 seq_puts(s, "krb5");
475 break;
476 case RawNTLMSSP:
477 seq_puts(s, "ntlmssp");
478 break;
479 default:
480 /* shouldn't ever happen */
481 seq_puts(s, "unknown");
482 break;
483 }
484
485 if (ses->sign)
486 seq_puts(s, "i");
487
488 if (ses->sectype == Kerberos)
489 seq_printf(s, ",cruid=%u",
490 from_kuid_munged(&init_user_ns, ses->cred_uid));
491 }
492
493 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)494 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
495 {
496 seq_puts(s, ",cache=");
497
498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
499 seq_puts(s, "strict");
500 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
501 seq_puts(s, "none");
502 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
503 seq_puts(s, "singleclient"); /* assume only one client access */
504 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
505 seq_puts(s, "ro"); /* read only caching assumed */
506 else
507 seq_puts(s, "loose");
508 }
509
510 /*
511 * cifs_show_devname() is used so we show the mount device name with correct
512 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
513 */
cifs_show_devname(struct seq_file * m,struct dentry * root)514 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
515 {
516 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
517 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
518
519 if (devname == NULL)
520 seq_puts(m, "none");
521 else {
522 convert_delimiter(devname, '/');
523 /* escape all spaces in share names */
524 seq_escape(m, devname, " \t");
525 kfree(devname);
526 }
527 return 0;
528 }
529
530 /*
531 * cifs_show_options() is for displaying mount options in /proc/mounts.
532 * Not all settable options are displayed but most of the important
533 * ones are.
534 */
535 static int
cifs_show_options(struct seq_file * s,struct dentry * root)536 cifs_show_options(struct seq_file *s, struct dentry *root)
537 {
538 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
539 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
540 struct sockaddr *srcaddr;
541 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
542
543 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
544 cifs_show_security(s, tcon->ses);
545 cifs_show_cache_flavor(s, cifs_sb);
546
547 if (tcon->no_lease)
548 seq_puts(s, ",nolease");
549 if (cifs_sb->ctx->multiuser)
550 seq_puts(s, ",multiuser");
551 else if (tcon->ses->user_name)
552 seq_show_option(s, "username", tcon->ses->user_name);
553
554 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
555 seq_show_option(s, "domain", tcon->ses->domainName);
556
557 if (srcaddr->sa_family != AF_UNSPEC) {
558 struct sockaddr_in *saddr4;
559 struct sockaddr_in6 *saddr6;
560 saddr4 = (struct sockaddr_in *)srcaddr;
561 saddr6 = (struct sockaddr_in6 *)srcaddr;
562 if (srcaddr->sa_family == AF_INET6)
563 seq_printf(s, ",srcaddr=%pI6c",
564 &saddr6->sin6_addr);
565 else if (srcaddr->sa_family == AF_INET)
566 seq_printf(s, ",srcaddr=%pI4",
567 &saddr4->sin_addr.s_addr);
568 else
569 seq_printf(s, ",srcaddr=BAD-AF:%i",
570 (int)(srcaddr->sa_family));
571 }
572
573 seq_printf(s, ",uid=%u",
574 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
575 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
576 seq_puts(s, ",forceuid");
577 else
578 seq_puts(s, ",noforceuid");
579
580 seq_printf(s, ",gid=%u",
581 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
582 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
583 seq_puts(s, ",forcegid");
584 else
585 seq_puts(s, ",noforcegid");
586
587 cifs_show_address(s, tcon->ses->server);
588
589 if (!tcon->unix_ext)
590 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
591 cifs_sb->ctx->file_mode,
592 cifs_sb->ctx->dir_mode);
593 if (cifs_sb->ctx->iocharset)
594 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
595 if (tcon->seal)
596 seq_puts(s, ",seal");
597 else if (tcon->ses->server->ignore_signature)
598 seq_puts(s, ",signloosely");
599 if (tcon->nocase)
600 seq_puts(s, ",nocase");
601 if (tcon->nodelete)
602 seq_puts(s, ",nodelete");
603 if (cifs_sb->ctx->no_sparse)
604 seq_puts(s, ",nosparse");
605 if (tcon->local_lease)
606 seq_puts(s, ",locallease");
607 if (tcon->retry)
608 seq_puts(s, ",hard");
609 else
610 seq_puts(s, ",soft");
611 if (tcon->use_persistent)
612 seq_puts(s, ",persistenthandles");
613 else if (tcon->use_resilient)
614 seq_puts(s, ",resilienthandles");
615 if (tcon->posix_extensions)
616 seq_puts(s, ",posix");
617 else if (tcon->unix_ext)
618 seq_puts(s, ",unix");
619 else
620 seq_puts(s, ",nounix");
621 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
622 seq_puts(s, ",nodfs");
623 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
624 seq_puts(s, ",posixpaths");
625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
626 seq_puts(s, ",setuids");
627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
628 seq_puts(s, ",idsfromsid");
629 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
630 seq_puts(s, ",serverino");
631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
632 seq_puts(s, ",rwpidforward");
633 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
634 seq_puts(s, ",forcemand");
635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
636 seq_puts(s, ",nouser_xattr");
637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
638 seq_puts(s, ",mapchars");
639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
640 seq_puts(s, ",mapposix");
641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
642 seq_puts(s, ",sfu");
643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
644 seq_puts(s, ",nobrl");
645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
646 seq_puts(s, ",nohandlecache");
647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
648 seq_puts(s, ",modefromsid");
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
650 seq_puts(s, ",cifsacl");
651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
652 seq_puts(s, ",dynperm");
653 if (root->d_sb->s_flags & SB_POSIXACL)
654 seq_puts(s, ",acl");
655 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
656 seq_puts(s, ",mfsymlinks");
657 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
658 seq_puts(s, ",fsc");
659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
660 seq_puts(s, ",nostrictsync");
661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
662 seq_puts(s, ",noperm");
663 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
664 seq_printf(s, ",backupuid=%u",
665 from_kuid_munged(&init_user_ns,
666 cifs_sb->ctx->backupuid));
667 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
668 seq_printf(s, ",backupgid=%u",
669 from_kgid_munged(&init_user_ns,
670 cifs_sb->ctx->backupgid));
671
672 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
673 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
674 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
675 if (cifs_sb->ctx->rasize)
676 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
677 if (tcon->ses->server->min_offload)
678 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
679 seq_printf(s, ",echo_interval=%lu",
680 tcon->ses->server->echo_interval / HZ);
681
682 /* Only display the following if overridden on mount */
683 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
684 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
685 if (tcon->ses->server->tcp_nodelay)
686 seq_puts(s, ",tcpnodelay");
687 if (tcon->ses->server->noautotune)
688 seq_puts(s, ",noautotune");
689 if (tcon->ses->server->noblocksnd)
690 seq_puts(s, ",noblocksend");
691
692 if (tcon->snapshot_time)
693 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
694 if (tcon->handle_timeout)
695 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
696
697 /*
698 * Display file and directory attribute timeout in seconds.
699 * If file and directory attribute timeout the same then actimeo
700 * was likely specified on mount
701 */
702 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
703 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
704 else {
705 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
706 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
707 }
708 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
709
710 if (tcon->ses->chan_max > 1)
711 seq_printf(s, ",multichannel,max_channels=%zu",
712 tcon->ses->chan_max);
713
714 if (tcon->use_witness)
715 seq_puts(s, ",witness");
716
717 return 0;
718 }
719
cifs_umount_begin(struct super_block * sb)720 static void cifs_umount_begin(struct super_block *sb)
721 {
722 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
723 struct cifs_tcon *tcon;
724
725 if (cifs_sb == NULL)
726 return;
727
728 tcon = cifs_sb_master_tcon(cifs_sb);
729
730 spin_lock(&cifs_tcp_ses_lock);
731 spin_lock(&tcon->tc_lock);
732 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
733 /* we have other mounts to same share or we have
734 already tried to force umount this and woken up
735 all waiting network requests, nothing to do */
736 spin_unlock(&tcon->tc_lock);
737 spin_unlock(&cifs_tcp_ses_lock);
738 return;
739 } else if (tcon->tc_count == 1)
740 tcon->status = TID_EXITING;
741 spin_unlock(&tcon->tc_lock);
742 spin_unlock(&cifs_tcp_ses_lock);
743
744 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
745 /* cancel_notify_requests(tcon); */
746 if (tcon->ses && tcon->ses->server) {
747 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
748 wake_up_all(&tcon->ses->server->request_q);
749 wake_up_all(&tcon->ses->server->response_q);
750 msleep(1); /* yield */
751 /* we have to kick the requests once more */
752 wake_up_all(&tcon->ses->server->response_q);
753 msleep(1);
754 }
755
756 return;
757 }
758
759 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)760 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
761 {
762 /* BB FIXME */
763 return 0;
764 }
765 #endif
766
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)767 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
768 {
769 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
770 return 0;
771 }
772
cifs_drop_inode(struct inode * inode)773 static int cifs_drop_inode(struct inode *inode)
774 {
775 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
776
777 /* no serverino => unconditional eviction */
778 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
779 generic_drop_inode(inode);
780 }
781
782 static const struct super_operations cifs_super_ops = {
783 .statfs = cifs_statfs,
784 .alloc_inode = cifs_alloc_inode,
785 .write_inode = cifs_write_inode,
786 .free_inode = cifs_free_inode,
787 .drop_inode = cifs_drop_inode,
788 .evict_inode = cifs_evict_inode,
789 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
790 .show_devname = cifs_show_devname,
791 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
792 function unless later we add lazy close of inodes or unless the
793 kernel forgets to call us with the same number of releases (closes)
794 as opens */
795 .show_options = cifs_show_options,
796 .umount_begin = cifs_umount_begin,
797 #ifdef CONFIG_CIFS_STATS2
798 .show_stats = cifs_show_stats,
799 #endif
800 };
801
802 /*
803 * Get root dentry from superblock according to prefix path mount option.
804 * Return dentry with refcount + 1 on success and NULL otherwise.
805 */
806 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)807 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
808 {
809 struct dentry *dentry;
810 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
811 char *full_path = NULL;
812 char *s, *p;
813 char sep;
814
815 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
816 return dget(sb->s_root);
817
818 full_path = cifs_build_path_to_root(ctx, cifs_sb,
819 cifs_sb_master_tcon(cifs_sb), 0);
820 if (full_path == NULL)
821 return ERR_PTR(-ENOMEM);
822
823 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
824
825 sep = CIFS_DIR_SEP(cifs_sb);
826 dentry = dget(sb->s_root);
827 s = full_path;
828
829 do {
830 struct inode *dir = d_inode(dentry);
831 struct dentry *child;
832
833 if (!S_ISDIR(dir->i_mode)) {
834 dput(dentry);
835 dentry = ERR_PTR(-ENOTDIR);
836 break;
837 }
838
839 /* skip separators */
840 while (*s == sep)
841 s++;
842 if (!*s)
843 break;
844 p = s++;
845 /* next separator */
846 while (*s && *s != sep)
847 s++;
848
849 child = lookup_positive_unlocked(p, dentry, s - p);
850 dput(dentry);
851 dentry = child;
852 } while (!IS_ERR(dentry));
853 kfree(full_path);
854 return dentry;
855 }
856
cifs_set_super(struct super_block * sb,void * data)857 static int cifs_set_super(struct super_block *sb, void *data)
858 {
859 struct cifs_mnt_data *mnt_data = data;
860 sb->s_fs_info = mnt_data->cifs_sb;
861 return set_anon_super(sb, NULL);
862 }
863
864 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)865 cifs_smb3_do_mount(struct file_system_type *fs_type,
866 int flags, struct smb3_fs_context *old_ctx)
867 {
868 int rc;
869 struct super_block *sb = NULL;
870 struct cifs_sb_info *cifs_sb = NULL;
871 struct cifs_mnt_data mnt_data;
872 struct dentry *root;
873
874 /*
875 * Prints in Kernel / CIFS log the attempted mount operation
876 * If CIFS_DEBUG && cifs_FYI
877 */
878 if (cifsFYI)
879 cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
880 else
881 cifs_info("Attempting to mount %s\n", old_ctx->UNC);
882
883 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
884 if (cifs_sb == NULL) {
885 root = ERR_PTR(-ENOMEM);
886 goto out;
887 }
888
889 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
890 if (!cifs_sb->ctx) {
891 root = ERR_PTR(-ENOMEM);
892 goto out;
893 }
894 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
895 if (rc) {
896 root = ERR_PTR(rc);
897 goto out;
898 }
899
900 rc = cifs_setup_cifs_sb(cifs_sb);
901 if (rc) {
902 root = ERR_PTR(rc);
903 goto out;
904 }
905
906 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
907 if (rc) {
908 if (!(flags & SB_SILENT))
909 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
910 rc);
911 root = ERR_PTR(rc);
912 goto out;
913 }
914
915 mnt_data.ctx = cifs_sb->ctx;
916 mnt_data.cifs_sb = cifs_sb;
917 mnt_data.flags = flags;
918
919 /* BB should we make this contingent on mount parm? */
920 flags |= SB_NODIRATIME | SB_NOATIME;
921
922 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
923 if (IS_ERR(sb)) {
924 root = ERR_CAST(sb);
925 cifs_umount(cifs_sb);
926 cifs_sb = NULL;
927 goto out;
928 }
929
930 if (sb->s_root) {
931 cifs_dbg(FYI, "Use existing superblock\n");
932 cifs_umount(cifs_sb);
933 cifs_sb = NULL;
934 } else {
935 rc = cifs_read_super(sb);
936 if (rc) {
937 root = ERR_PTR(rc);
938 goto out_super;
939 }
940
941 sb->s_flags |= SB_ACTIVE;
942 }
943
944 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
945 if (IS_ERR(root))
946 goto out_super;
947
948 if (cifs_sb)
949 cifs_sb->root = dget(root);
950
951 cifs_dbg(FYI, "dentry root is: %p\n", root);
952 return root;
953
954 out_super:
955 deactivate_locked_super(sb);
956 return root;
957 out:
958 if (cifs_sb) {
959 if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
960 kfree(cifs_sb->prepath);
961 smb3_cleanup_fs_context(cifs_sb->ctx);
962 kfree(cifs_sb);
963 }
964 }
965 return root;
966 }
967
968
969 static ssize_t
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)970 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
971 {
972 ssize_t rc;
973 struct inode *inode = file_inode(iocb->ki_filp);
974
975 if (iocb->ki_flags & IOCB_DIRECT)
976 return cifs_user_readv(iocb, iter);
977
978 rc = cifs_revalidate_mapping(inode);
979 if (rc)
980 return rc;
981
982 return generic_file_read_iter(iocb, iter);
983 }
984
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)985 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
986 {
987 struct inode *inode = file_inode(iocb->ki_filp);
988 struct cifsInodeInfo *cinode = CIFS_I(inode);
989 ssize_t written;
990 int rc;
991
992 if (iocb->ki_filp->f_flags & O_DIRECT) {
993 written = cifs_user_writev(iocb, from);
994 if (written > 0 && CIFS_CACHE_READ(cinode)) {
995 cifs_zap_mapping(inode);
996 cifs_dbg(FYI,
997 "Set no oplock for inode=%p after a write operation\n",
998 inode);
999 cinode->oplock = 0;
1000 }
1001 return written;
1002 }
1003
1004 written = cifs_get_writer(cinode);
1005 if (written)
1006 return written;
1007
1008 written = generic_file_write_iter(iocb, from);
1009
1010 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1011 goto out;
1012
1013 rc = filemap_fdatawrite(inode->i_mapping);
1014 if (rc)
1015 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1016 rc, inode);
1017
1018 out:
1019 cifs_put_writer(cinode);
1020 return written;
1021 }
1022
cifs_llseek(struct file * file,loff_t offset,int whence)1023 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1024 {
1025 struct cifsFileInfo *cfile = file->private_data;
1026 struct cifs_tcon *tcon;
1027
1028 /*
1029 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1030 * the cached file length
1031 */
1032 if (whence != SEEK_SET && whence != SEEK_CUR) {
1033 int rc;
1034 struct inode *inode = file_inode(file);
1035
1036 /*
1037 * We need to be sure that all dirty pages are written and the
1038 * server has the newest file length.
1039 */
1040 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1041 inode->i_mapping->nrpages != 0) {
1042 rc = filemap_fdatawait(inode->i_mapping);
1043 if (rc) {
1044 mapping_set_error(inode->i_mapping, rc);
1045 return rc;
1046 }
1047 }
1048 /*
1049 * Some applications poll for the file length in this strange
1050 * way so we must seek to end on non-oplocked files by
1051 * setting the revalidate time to zero.
1052 */
1053 CIFS_I(inode)->time = 0;
1054
1055 rc = cifs_revalidate_file_attr(file);
1056 if (rc < 0)
1057 return (loff_t)rc;
1058 }
1059 if (cfile && cfile->tlink) {
1060 tcon = tlink_tcon(cfile->tlink);
1061 if (tcon->ses->server->ops->llseek)
1062 return tcon->ses->server->ops->llseek(file, tcon,
1063 offset, whence);
1064 }
1065 return generic_file_llseek(file, offset, whence);
1066 }
1067
1068 static int
cifs_setlease(struct file * file,long arg,struct file_lock ** lease,void ** priv)1069 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
1070 {
1071 /*
1072 * Note that this is called by vfs setlease with i_lock held to
1073 * protect *lease from going away.
1074 */
1075 struct inode *inode = file_inode(file);
1076 struct cifsFileInfo *cfile = file->private_data;
1077
1078 if (!(S_ISREG(inode->i_mode)))
1079 return -EINVAL;
1080
1081 /* Check if file is oplocked if this is request for new lease */
1082 if (arg == F_UNLCK ||
1083 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1084 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1085 return generic_setlease(file, arg, lease, priv);
1086 else if (tlink_tcon(cfile->tlink)->local_lease &&
1087 !CIFS_CACHE_READ(CIFS_I(inode)))
1088 /*
1089 * If the server claims to support oplock on this file, then we
1090 * still need to check oplock even if the local_lease mount
1091 * option is set, but there are servers which do not support
1092 * oplock for which this mount option may be useful if the user
1093 * knows that the file won't be changed on the server by anyone
1094 * else.
1095 */
1096 return generic_setlease(file, arg, lease, priv);
1097 else
1098 return -EAGAIN;
1099 }
1100
1101 struct file_system_type cifs_fs_type = {
1102 .owner = THIS_MODULE,
1103 .name = "cifs",
1104 .init_fs_context = smb3_init_fs_context,
1105 .parameters = smb3_fs_parameters,
1106 .kill_sb = cifs_kill_sb,
1107 .fs_flags = FS_RENAME_DOES_D_MOVE,
1108 };
1109 MODULE_ALIAS_FS("cifs");
1110
1111 struct file_system_type smb3_fs_type = {
1112 .owner = THIS_MODULE,
1113 .name = "smb3",
1114 .init_fs_context = smb3_init_fs_context,
1115 .parameters = smb3_fs_parameters,
1116 .kill_sb = cifs_kill_sb,
1117 .fs_flags = FS_RENAME_DOES_D_MOVE,
1118 };
1119 MODULE_ALIAS_FS("smb3");
1120 MODULE_ALIAS("smb3");
1121
1122 const struct inode_operations cifs_dir_inode_ops = {
1123 .create = cifs_create,
1124 .atomic_open = cifs_atomic_open,
1125 .lookup = cifs_lookup,
1126 .getattr = cifs_getattr,
1127 .unlink = cifs_unlink,
1128 .link = cifs_hardlink,
1129 .mkdir = cifs_mkdir,
1130 .rmdir = cifs_rmdir,
1131 .rename = cifs_rename2,
1132 .permission = cifs_permission,
1133 .setattr = cifs_setattr,
1134 .symlink = cifs_symlink,
1135 .mknod = cifs_mknod,
1136 .listxattr = cifs_listxattr,
1137 .get_acl = cifs_get_acl,
1138 .set_acl = cifs_set_acl,
1139 };
1140
1141 const struct inode_operations cifs_file_inode_ops = {
1142 .setattr = cifs_setattr,
1143 .getattr = cifs_getattr,
1144 .permission = cifs_permission,
1145 .listxattr = cifs_listxattr,
1146 .fiemap = cifs_fiemap,
1147 .get_acl = cifs_get_acl,
1148 .set_acl = cifs_set_acl,
1149 };
1150
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1151 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1152 struct delayed_call *done)
1153 {
1154 char *target_path;
1155
1156 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1157 if (!target_path)
1158 return ERR_PTR(-ENOMEM);
1159
1160 spin_lock(&inode->i_lock);
1161 if (likely(CIFS_I(inode)->symlink_target)) {
1162 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1163 } else {
1164 kfree(target_path);
1165 target_path = ERR_PTR(-EOPNOTSUPP);
1166 }
1167 spin_unlock(&inode->i_lock);
1168
1169 if (!IS_ERR(target_path))
1170 set_delayed_call(done, kfree_link, target_path);
1171
1172 return target_path;
1173 }
1174
1175 const struct inode_operations cifs_symlink_inode_ops = {
1176 .get_link = cifs_get_link,
1177 .permission = cifs_permission,
1178 .listxattr = cifs_listxattr,
1179 };
1180
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1181 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1182 struct file *dst_file, loff_t destoff, loff_t len,
1183 unsigned int remap_flags)
1184 {
1185 struct inode *src_inode = file_inode(src_file);
1186 struct inode *target_inode = file_inode(dst_file);
1187 struct cifsFileInfo *smb_file_src = src_file->private_data;
1188 struct cifsFileInfo *smb_file_target;
1189 struct cifs_tcon *target_tcon;
1190 unsigned int xid;
1191 int rc;
1192
1193 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1194 return -EINVAL;
1195
1196 cifs_dbg(FYI, "clone range\n");
1197
1198 xid = get_xid();
1199
1200 if (!src_file->private_data || !dst_file->private_data) {
1201 rc = -EBADF;
1202 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1203 goto out;
1204 }
1205
1206 smb_file_target = dst_file->private_data;
1207 target_tcon = tlink_tcon(smb_file_target->tlink);
1208
1209 /*
1210 * Note: cifs case is easier than btrfs since server responsible for
1211 * checks for proper open modes and file type and if it wants
1212 * server could even support copy of range where source = target
1213 */
1214 lock_two_nondirectories(target_inode, src_inode);
1215
1216 if (len == 0)
1217 len = src_inode->i_size - off;
1218
1219 cifs_dbg(FYI, "about to flush pages\n");
1220 /* should we flush first and last page first */
1221 truncate_inode_pages_range(&target_inode->i_data, destoff,
1222 PAGE_ALIGN(destoff + len)-1);
1223
1224 if (target_tcon->ses->server->ops->duplicate_extents)
1225 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1226 smb_file_src, smb_file_target, off, len, destoff);
1227 else
1228 rc = -EOPNOTSUPP;
1229
1230 /* force revalidate of size and timestamps of target file now
1231 that target is updated on the server */
1232 CIFS_I(target_inode)->time = 0;
1233 /* although unlocking in the reverse order from locking is not
1234 strictly necessary here it is a little cleaner to be consistent */
1235 unlock_two_nondirectories(src_inode, target_inode);
1236 out:
1237 free_xid(xid);
1238 return rc < 0 ? rc : len;
1239 }
1240
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1241 ssize_t cifs_file_copychunk_range(unsigned int xid,
1242 struct file *src_file, loff_t off,
1243 struct file *dst_file, loff_t destoff,
1244 size_t len, unsigned int flags)
1245 {
1246 struct inode *src_inode = file_inode(src_file);
1247 struct inode *target_inode = file_inode(dst_file);
1248 struct cifsFileInfo *smb_file_src;
1249 struct cifsFileInfo *smb_file_target;
1250 struct cifs_tcon *src_tcon;
1251 struct cifs_tcon *target_tcon;
1252 ssize_t rc;
1253
1254 cifs_dbg(FYI, "copychunk range\n");
1255
1256 if (!src_file->private_data || !dst_file->private_data) {
1257 rc = -EBADF;
1258 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1259 goto out;
1260 }
1261
1262 rc = -EXDEV;
1263 smb_file_target = dst_file->private_data;
1264 smb_file_src = src_file->private_data;
1265 src_tcon = tlink_tcon(smb_file_src->tlink);
1266 target_tcon = tlink_tcon(smb_file_target->tlink);
1267
1268 if (src_tcon->ses != target_tcon->ses) {
1269 cifs_dbg(VFS, "source and target of copy not on same server\n");
1270 goto out;
1271 }
1272
1273 rc = -EOPNOTSUPP;
1274 if (!target_tcon->ses->server->ops->copychunk_range)
1275 goto out;
1276
1277 /*
1278 * Note: cifs case is easier than btrfs since server responsible for
1279 * checks for proper open modes and file type and if it wants
1280 * server could even support copy of range where source = target
1281 */
1282 lock_two_nondirectories(target_inode, src_inode);
1283
1284 cifs_dbg(FYI, "about to flush pages\n");
1285
1286 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1287 off + len - 1);
1288 if (rc)
1289 goto unlock;
1290
1291 /* should we flush first and last page first */
1292 truncate_inode_pages(&target_inode->i_data, 0);
1293
1294 rc = file_modified(dst_file);
1295 if (!rc)
1296 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1297 smb_file_src, smb_file_target, off, len, destoff);
1298
1299 file_accessed(src_file);
1300
1301 /* force revalidate of size and timestamps of target file now
1302 * that target is updated on the server
1303 */
1304 CIFS_I(target_inode)->time = 0;
1305
1306 unlock:
1307 /* although unlocking in the reverse order from locking is not
1308 * strictly necessary here it is a little cleaner to be consistent
1309 */
1310 unlock_two_nondirectories(src_inode, target_inode);
1311
1312 out:
1313 return rc;
1314 }
1315
1316 /*
1317 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1318 * is a dummy operation.
1319 */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1320 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1321 {
1322 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1323 file, datasync);
1324
1325 return 0;
1326 }
1327
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1328 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1329 struct file *dst_file, loff_t destoff,
1330 size_t len, unsigned int flags)
1331 {
1332 unsigned int xid = get_xid();
1333 ssize_t rc;
1334 struct cifsFileInfo *cfile = dst_file->private_data;
1335
1336 if (cfile->swapfile) {
1337 rc = -EOPNOTSUPP;
1338 free_xid(xid);
1339 return rc;
1340 }
1341
1342 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1343 len, flags);
1344 free_xid(xid);
1345
1346 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1347 rc = generic_copy_file_range(src_file, off, dst_file,
1348 destoff, len, flags);
1349 return rc;
1350 }
1351
1352 const struct file_operations cifs_file_ops = {
1353 .read_iter = cifs_loose_read_iter,
1354 .write_iter = cifs_file_write_iter,
1355 .open = cifs_open,
1356 .release = cifs_close,
1357 .lock = cifs_lock,
1358 .flock = cifs_flock,
1359 .fsync = cifs_fsync,
1360 .flush = cifs_flush,
1361 .mmap = cifs_file_mmap,
1362 .splice_read = cifs_splice_read,
1363 .splice_write = iter_file_splice_write,
1364 .llseek = cifs_llseek,
1365 .unlocked_ioctl = cifs_ioctl,
1366 .copy_file_range = cifs_copy_file_range,
1367 .remap_file_range = cifs_remap_file_range,
1368 .setlease = cifs_setlease,
1369 .fallocate = cifs_fallocate,
1370 };
1371
1372 const struct file_operations cifs_file_strict_ops = {
1373 .read_iter = cifs_strict_readv,
1374 .write_iter = cifs_strict_writev,
1375 .open = cifs_open,
1376 .release = cifs_close,
1377 .lock = cifs_lock,
1378 .flock = cifs_flock,
1379 .fsync = cifs_strict_fsync,
1380 .flush = cifs_flush,
1381 .mmap = cifs_file_strict_mmap,
1382 .splice_read = cifs_splice_read,
1383 .splice_write = iter_file_splice_write,
1384 .llseek = cifs_llseek,
1385 .unlocked_ioctl = cifs_ioctl,
1386 .copy_file_range = cifs_copy_file_range,
1387 .remap_file_range = cifs_remap_file_range,
1388 .setlease = cifs_setlease,
1389 .fallocate = cifs_fallocate,
1390 };
1391
1392 const struct file_operations cifs_file_direct_ops = {
1393 .read_iter = cifs_direct_readv,
1394 .write_iter = cifs_direct_writev,
1395 .open = cifs_open,
1396 .release = cifs_close,
1397 .lock = cifs_lock,
1398 .flock = cifs_flock,
1399 .fsync = cifs_fsync,
1400 .flush = cifs_flush,
1401 .mmap = cifs_file_mmap,
1402 .splice_read = direct_splice_read,
1403 .splice_write = iter_file_splice_write,
1404 .unlocked_ioctl = cifs_ioctl,
1405 .copy_file_range = cifs_copy_file_range,
1406 .remap_file_range = cifs_remap_file_range,
1407 .llseek = cifs_llseek,
1408 .setlease = cifs_setlease,
1409 .fallocate = cifs_fallocate,
1410 };
1411
1412 const struct file_operations cifs_file_nobrl_ops = {
1413 .read_iter = cifs_loose_read_iter,
1414 .write_iter = cifs_file_write_iter,
1415 .open = cifs_open,
1416 .release = cifs_close,
1417 .fsync = cifs_fsync,
1418 .flush = cifs_flush,
1419 .mmap = cifs_file_mmap,
1420 .splice_read = cifs_splice_read,
1421 .splice_write = iter_file_splice_write,
1422 .llseek = cifs_llseek,
1423 .unlocked_ioctl = cifs_ioctl,
1424 .copy_file_range = cifs_copy_file_range,
1425 .remap_file_range = cifs_remap_file_range,
1426 .setlease = cifs_setlease,
1427 .fallocate = cifs_fallocate,
1428 };
1429
1430 const struct file_operations cifs_file_strict_nobrl_ops = {
1431 .read_iter = cifs_strict_readv,
1432 .write_iter = cifs_strict_writev,
1433 .open = cifs_open,
1434 .release = cifs_close,
1435 .fsync = cifs_strict_fsync,
1436 .flush = cifs_flush,
1437 .mmap = cifs_file_strict_mmap,
1438 .splice_read = cifs_splice_read,
1439 .splice_write = iter_file_splice_write,
1440 .llseek = cifs_llseek,
1441 .unlocked_ioctl = cifs_ioctl,
1442 .copy_file_range = cifs_copy_file_range,
1443 .remap_file_range = cifs_remap_file_range,
1444 .setlease = cifs_setlease,
1445 .fallocate = cifs_fallocate,
1446 };
1447
1448 const struct file_operations cifs_file_direct_nobrl_ops = {
1449 .read_iter = cifs_direct_readv,
1450 .write_iter = cifs_direct_writev,
1451 .open = cifs_open,
1452 .release = cifs_close,
1453 .fsync = cifs_fsync,
1454 .flush = cifs_flush,
1455 .mmap = cifs_file_mmap,
1456 .splice_read = direct_splice_read,
1457 .splice_write = iter_file_splice_write,
1458 .unlocked_ioctl = cifs_ioctl,
1459 .copy_file_range = cifs_copy_file_range,
1460 .remap_file_range = cifs_remap_file_range,
1461 .llseek = cifs_llseek,
1462 .setlease = cifs_setlease,
1463 .fallocate = cifs_fallocate,
1464 };
1465
1466 const struct file_operations cifs_dir_ops = {
1467 .iterate_shared = cifs_readdir,
1468 .release = cifs_closedir,
1469 .read = generic_read_dir,
1470 .unlocked_ioctl = cifs_ioctl,
1471 .copy_file_range = cifs_copy_file_range,
1472 .remap_file_range = cifs_remap_file_range,
1473 .llseek = generic_file_llseek,
1474 .fsync = cifs_dir_fsync,
1475 };
1476
1477 static void
cifs_init_once(void * inode)1478 cifs_init_once(void *inode)
1479 {
1480 struct cifsInodeInfo *cifsi = inode;
1481
1482 inode_init_once(&cifsi->netfs.inode);
1483 init_rwsem(&cifsi->lock_sem);
1484 }
1485
1486 static int __init
cifs_init_inodecache(void)1487 cifs_init_inodecache(void)
1488 {
1489 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1490 sizeof(struct cifsInodeInfo),
1491 0, (SLAB_RECLAIM_ACCOUNT|
1492 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1493 cifs_init_once);
1494 if (cifs_inode_cachep == NULL)
1495 return -ENOMEM;
1496
1497 return 0;
1498 }
1499
1500 static void
cifs_destroy_inodecache(void)1501 cifs_destroy_inodecache(void)
1502 {
1503 /*
1504 * Make sure all delayed rcu free inodes are flushed before we
1505 * destroy cache.
1506 */
1507 rcu_barrier();
1508 kmem_cache_destroy(cifs_inode_cachep);
1509 }
1510
1511 static int
cifs_init_request_bufs(void)1512 cifs_init_request_bufs(void)
1513 {
1514 /*
1515 * SMB2 maximum header size is bigger than CIFS one - no problems to
1516 * allocate some more bytes for CIFS.
1517 */
1518 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1519
1520 if (CIFSMaxBufSize < 8192) {
1521 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1522 Unicode path name has to fit in any SMB/CIFS path based frames */
1523 CIFSMaxBufSize = 8192;
1524 } else if (CIFSMaxBufSize > 1024*127) {
1525 CIFSMaxBufSize = 1024 * 127;
1526 } else {
1527 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1528 }
1529 /*
1530 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1531 CIFSMaxBufSize, CIFSMaxBufSize);
1532 */
1533 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1534 CIFSMaxBufSize + max_hdr_size, 0,
1535 SLAB_HWCACHE_ALIGN, 0,
1536 CIFSMaxBufSize + max_hdr_size,
1537 NULL);
1538 if (cifs_req_cachep == NULL)
1539 return -ENOMEM;
1540
1541 if (cifs_min_rcv < 1)
1542 cifs_min_rcv = 1;
1543 else if (cifs_min_rcv > 64) {
1544 cifs_min_rcv = 64;
1545 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1546 }
1547
1548 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1549 cifs_req_cachep);
1550
1551 if (cifs_req_poolp == NULL) {
1552 kmem_cache_destroy(cifs_req_cachep);
1553 return -ENOMEM;
1554 }
1555 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1556 almost all handle based requests (but not write response, nor is it
1557 sufficient for path based requests). A smaller size would have
1558 been more efficient (compacting multiple slab items on one 4k page)
1559 for the case in which debug was on, but this larger size allows
1560 more SMBs to use small buffer alloc and is still much more
1561 efficient to alloc 1 per page off the slab compared to 17K (5page)
1562 alloc of large cifs buffers even when page debugging is on */
1563 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1564 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1565 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1566 if (cifs_sm_req_cachep == NULL) {
1567 mempool_destroy(cifs_req_poolp);
1568 kmem_cache_destroy(cifs_req_cachep);
1569 return -ENOMEM;
1570 }
1571
1572 if (cifs_min_small < 2)
1573 cifs_min_small = 2;
1574 else if (cifs_min_small > 256) {
1575 cifs_min_small = 256;
1576 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1577 }
1578
1579 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1580 cifs_sm_req_cachep);
1581
1582 if (cifs_sm_req_poolp == NULL) {
1583 mempool_destroy(cifs_req_poolp);
1584 kmem_cache_destroy(cifs_req_cachep);
1585 kmem_cache_destroy(cifs_sm_req_cachep);
1586 return -ENOMEM;
1587 }
1588
1589 return 0;
1590 }
1591
1592 static void
cifs_destroy_request_bufs(void)1593 cifs_destroy_request_bufs(void)
1594 {
1595 mempool_destroy(cifs_req_poolp);
1596 kmem_cache_destroy(cifs_req_cachep);
1597 mempool_destroy(cifs_sm_req_poolp);
1598 kmem_cache_destroy(cifs_sm_req_cachep);
1599 }
1600
init_mids(void)1601 static int init_mids(void)
1602 {
1603 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1604 sizeof(struct mid_q_entry), 0,
1605 SLAB_HWCACHE_ALIGN, NULL);
1606 if (cifs_mid_cachep == NULL)
1607 return -ENOMEM;
1608
1609 /* 3 is a reasonable minimum number of simultaneous operations */
1610 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1611 if (cifs_mid_poolp == NULL) {
1612 kmem_cache_destroy(cifs_mid_cachep);
1613 return -ENOMEM;
1614 }
1615
1616 return 0;
1617 }
1618
destroy_mids(void)1619 static void destroy_mids(void)
1620 {
1621 mempool_destroy(cifs_mid_poolp);
1622 kmem_cache_destroy(cifs_mid_cachep);
1623 }
1624
1625 static int __init
init_cifs(void)1626 init_cifs(void)
1627 {
1628 int rc = 0;
1629 cifs_proc_init();
1630 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1631 /*
1632 * Initialize Global counters
1633 */
1634 atomic_set(&sesInfoAllocCount, 0);
1635 atomic_set(&tconInfoAllocCount, 0);
1636 atomic_set(&tcpSesNextId, 0);
1637 atomic_set(&tcpSesAllocCount, 0);
1638 atomic_set(&tcpSesReconnectCount, 0);
1639 atomic_set(&tconInfoReconnectCount, 0);
1640
1641 atomic_set(&buf_alloc_count, 0);
1642 atomic_set(&small_buf_alloc_count, 0);
1643 #ifdef CONFIG_CIFS_STATS2
1644 atomic_set(&total_buf_alloc_count, 0);
1645 atomic_set(&total_small_buf_alloc_count, 0);
1646 if (slow_rsp_threshold < 1)
1647 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1648 else if (slow_rsp_threshold > 32767)
1649 cifs_dbg(VFS,
1650 "slow response threshold set higher than recommended (0 to 32767)\n");
1651 #endif /* CONFIG_CIFS_STATS2 */
1652
1653 atomic_set(&mid_count, 0);
1654 GlobalCurrentXid = 0;
1655 GlobalTotalActiveXid = 0;
1656 GlobalMaxActiveXid = 0;
1657 spin_lock_init(&cifs_tcp_ses_lock);
1658 spin_lock_init(&GlobalMid_Lock);
1659
1660 cifs_lock_secret = get_random_u32();
1661
1662 if (cifs_max_pending < 2) {
1663 cifs_max_pending = 2;
1664 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1665 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1666 cifs_max_pending = CIFS_MAX_REQ;
1667 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1668 CIFS_MAX_REQ);
1669 }
1670
1671 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1672 if (!cifsiod_wq) {
1673 rc = -ENOMEM;
1674 goto out_clean_proc;
1675 }
1676
1677 /*
1678 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1679 * so that we don't launch too many worker threads but
1680 * Documentation/core-api/workqueue.rst recommends setting it to 0
1681 */
1682
1683 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1684 decrypt_wq = alloc_workqueue("smb3decryptd",
1685 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1686 if (!decrypt_wq) {
1687 rc = -ENOMEM;
1688 goto out_destroy_cifsiod_wq;
1689 }
1690
1691 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1692 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1693 if (!fileinfo_put_wq) {
1694 rc = -ENOMEM;
1695 goto out_destroy_decrypt_wq;
1696 }
1697
1698 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1699 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1700 if (!cifsoplockd_wq) {
1701 rc = -ENOMEM;
1702 goto out_destroy_fileinfo_put_wq;
1703 }
1704
1705 deferredclose_wq = alloc_workqueue("deferredclose",
1706 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1707 if (!deferredclose_wq) {
1708 rc = -ENOMEM;
1709 goto out_destroy_cifsoplockd_wq;
1710 }
1711
1712 rc = cifs_init_inodecache();
1713 if (rc)
1714 goto out_destroy_deferredclose_wq;
1715
1716 rc = init_mids();
1717 if (rc)
1718 goto out_destroy_inodecache;
1719
1720 rc = cifs_init_request_bufs();
1721 if (rc)
1722 goto out_destroy_mids;
1723
1724 #ifdef CONFIG_CIFS_DFS_UPCALL
1725 rc = dfs_cache_init();
1726 if (rc)
1727 goto out_destroy_request_bufs;
1728 #endif /* CONFIG_CIFS_DFS_UPCALL */
1729 #ifdef CONFIG_CIFS_UPCALL
1730 rc = init_cifs_spnego();
1731 if (rc)
1732 goto out_destroy_dfs_cache;
1733 #endif /* CONFIG_CIFS_UPCALL */
1734 #ifdef CONFIG_CIFS_SWN_UPCALL
1735 rc = cifs_genl_init();
1736 if (rc)
1737 goto out_register_key_type;
1738 #endif /* CONFIG_CIFS_SWN_UPCALL */
1739
1740 rc = init_cifs_idmap();
1741 if (rc)
1742 goto out_cifs_swn_init;
1743
1744 rc = register_filesystem(&cifs_fs_type);
1745 if (rc)
1746 goto out_init_cifs_idmap;
1747
1748 rc = register_filesystem(&smb3_fs_type);
1749 if (rc) {
1750 unregister_filesystem(&cifs_fs_type);
1751 goto out_init_cifs_idmap;
1752 }
1753
1754 return 0;
1755
1756 out_init_cifs_idmap:
1757 exit_cifs_idmap();
1758 out_cifs_swn_init:
1759 #ifdef CONFIG_CIFS_SWN_UPCALL
1760 cifs_genl_exit();
1761 out_register_key_type:
1762 #endif
1763 #ifdef CONFIG_CIFS_UPCALL
1764 exit_cifs_spnego();
1765 out_destroy_dfs_cache:
1766 #endif
1767 #ifdef CONFIG_CIFS_DFS_UPCALL
1768 dfs_cache_destroy();
1769 out_destroy_request_bufs:
1770 #endif
1771 cifs_destroy_request_bufs();
1772 out_destroy_mids:
1773 destroy_mids();
1774 out_destroy_inodecache:
1775 cifs_destroy_inodecache();
1776 out_destroy_deferredclose_wq:
1777 destroy_workqueue(deferredclose_wq);
1778 out_destroy_cifsoplockd_wq:
1779 destroy_workqueue(cifsoplockd_wq);
1780 out_destroy_fileinfo_put_wq:
1781 destroy_workqueue(fileinfo_put_wq);
1782 out_destroy_decrypt_wq:
1783 destroy_workqueue(decrypt_wq);
1784 out_destroy_cifsiod_wq:
1785 destroy_workqueue(cifsiod_wq);
1786 out_clean_proc:
1787 cifs_proc_clean();
1788 return rc;
1789 }
1790
1791 static void __exit
exit_cifs(void)1792 exit_cifs(void)
1793 {
1794 cifs_dbg(NOISY, "exit_smb3\n");
1795 unregister_filesystem(&cifs_fs_type);
1796 unregister_filesystem(&smb3_fs_type);
1797 cifs_dfs_release_automount_timer();
1798 exit_cifs_idmap();
1799 #ifdef CONFIG_CIFS_SWN_UPCALL
1800 cifs_genl_exit();
1801 #endif
1802 #ifdef CONFIG_CIFS_UPCALL
1803 exit_cifs_spnego();
1804 #endif
1805 #ifdef CONFIG_CIFS_DFS_UPCALL
1806 dfs_cache_destroy();
1807 #endif
1808 cifs_destroy_request_bufs();
1809 destroy_mids();
1810 cifs_destroy_inodecache();
1811 destroy_workqueue(deferredclose_wq);
1812 destroy_workqueue(cifsoplockd_wq);
1813 destroy_workqueue(decrypt_wq);
1814 destroy_workqueue(fileinfo_put_wq);
1815 destroy_workqueue(cifsiod_wq);
1816 cifs_proc_clean();
1817 }
1818
1819 MODULE_AUTHOR("Steve French");
1820 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1821 MODULE_DESCRIPTION
1822 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1823 "also older servers complying with the SNIA CIFS Specification)");
1824 MODULE_VERSION(CIFS_VERSION);
1825 MODULE_SOFTDEP("ecb");
1826 MODULE_SOFTDEP("hmac");
1827 MODULE_SOFTDEP("md5");
1828 MODULE_SOFTDEP("nls");
1829 MODULE_SOFTDEP("aes");
1830 MODULE_SOFTDEP("cmac");
1831 MODULE_SOFTDEP("sha256");
1832 MODULE_SOFTDEP("sha512");
1833 MODULE_SOFTDEP("aead2");
1834 MODULE_SOFTDEP("ccm");
1835 MODULE_SOFTDEP("gcm");
1836 module_init(init_cifs)
1837 module_exit(exit_cifs)
1838