1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14 
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29 
30 struct workqueue_struct *gfs2_freeze_wq;
31 
32 extern struct workqueue_struct *gfs2_control_wq;
33 
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
37 
38 	fs_err(sdp,
39 	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
40 	       "state 0x%lx\n",
41 	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
42 	       bh->b_folio->mapping, bh->b_folio->flags);
43 	fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
44 	       gl->gl_name.ln_type, gl->gl_name.ln_number,
45 	       gfs2_glock2aspace(gl));
46 	gfs2_lm(sdp, "AIL error\n");
47 	gfs2_withdraw_delayed(sdp);
48 }
49 
50 /**
51  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
52  * @gl: the glock
53  * @fsync: set when called from fsync (not all buffers will be clean)
54  * @nr_revokes: Number of buffers to revoke
55  *
56  * None of the buffers should be dirty, locked, or pinned.
57  */
58 
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)59 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
60 			     unsigned int nr_revokes)
61 {
62 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
63 	struct list_head *head = &gl->gl_ail_list;
64 	struct gfs2_bufdata *bd, *tmp;
65 	struct buffer_head *bh;
66 	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
67 
68 	gfs2_log_lock(sdp);
69 	spin_lock(&sdp->sd_ail_lock);
70 	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
71 		if (nr_revokes == 0)
72 			break;
73 		bh = bd->bd_bh;
74 		if (bh->b_state & b_state) {
75 			if (fsync)
76 				continue;
77 			gfs2_ail_error(gl, bh);
78 		}
79 		gfs2_trans_add_revoke(sdp, bd);
80 		nr_revokes--;
81 	}
82 	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
83 	spin_unlock(&sdp->sd_ail_lock);
84 	gfs2_log_unlock(sdp);
85 }
86 
87 
gfs2_ail_empty_gl(struct gfs2_glock * gl)88 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
89 {
90 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
91 	struct gfs2_trans tr;
92 	unsigned int revokes;
93 	int ret;
94 
95 	revokes = atomic_read(&gl->gl_ail_count);
96 
97 	if (!revokes) {
98 		bool have_revokes;
99 		bool log_in_flight;
100 
101 		/*
102 		 * We have nothing on the ail, but there could be revokes on
103 		 * the sdp revoke queue, in which case, we still want to flush
104 		 * the log and wait for it to finish.
105 		 *
106 		 * If the sdp revoke list is empty too, we might still have an
107 		 * io outstanding for writing revokes, so we should wait for
108 		 * it before returning.
109 		 *
110 		 * If none of these conditions are true, our revokes are all
111 		 * flushed and we can return.
112 		 */
113 		gfs2_log_lock(sdp);
114 		have_revokes = !list_empty(&sdp->sd_log_revokes);
115 		log_in_flight = atomic_read(&sdp->sd_log_in_flight);
116 		gfs2_log_unlock(sdp);
117 		if (have_revokes)
118 			goto flush;
119 		if (log_in_flight)
120 			log_flush_wait(sdp);
121 		return 0;
122 	}
123 
124 	memset(&tr, 0, sizeof(tr));
125 	set_bit(TR_ONSTACK, &tr.tr_flags);
126 	ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
127 	if (ret)
128 		goto flush;
129 	__gfs2_ail_flush(gl, 0, revokes);
130 	gfs2_trans_end(sdp);
131 
132 flush:
133 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
134 		       GFS2_LFC_AIL_EMPTY_GL);
135 	return 0;
136 }
137 
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)138 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
139 {
140 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
141 	unsigned int revokes = atomic_read(&gl->gl_ail_count);
142 	int ret;
143 
144 	if (!revokes)
145 		return;
146 
147 	ret = gfs2_trans_begin(sdp, 0, revokes);
148 	if (ret)
149 		return;
150 	__gfs2_ail_flush(gl, fsync, revokes);
151 	gfs2_trans_end(sdp);
152 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
153 		       GFS2_LFC_AIL_FLUSH);
154 }
155 
156 /**
157  * gfs2_rgrp_metasync - sync out the metadata of a resource group
158  * @gl: the glock protecting the resource group
159  *
160  */
161 
gfs2_rgrp_metasync(struct gfs2_glock * gl)162 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
163 {
164 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
165 	struct address_space *metamapping = &sdp->sd_aspace;
166 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
167 	const unsigned bsize = sdp->sd_sb.sb_bsize;
168 	loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
169 	loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
170 	int error;
171 
172 	filemap_fdatawrite_range(metamapping, start, end);
173 	error = filemap_fdatawait_range(metamapping, start, end);
174 	WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
175 	mapping_set_error(metamapping, error);
176 	if (error)
177 		gfs2_io_error(sdp);
178 	return error;
179 }
180 
181 /**
182  * rgrp_go_sync - sync out the metadata for this glock
183  * @gl: the glock
184  *
185  * Called when demoting or unlocking an EX glock.  We must flush
186  * to disk all dirty buffers/pages relating to this glock, and must not
187  * return to caller to demote/unlock the glock until I/O is complete.
188  */
189 
rgrp_go_sync(struct gfs2_glock * gl)190 static int rgrp_go_sync(struct gfs2_glock *gl)
191 {
192 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
193 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
194 	int error;
195 
196 	if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
197 		return 0;
198 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
199 
200 	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
201 		       GFS2_LFC_RGRP_GO_SYNC);
202 	error = gfs2_rgrp_metasync(gl);
203 	if (!error)
204 		error = gfs2_ail_empty_gl(gl);
205 	gfs2_free_clones(rgd);
206 	return error;
207 }
208 
209 /**
210  * rgrp_go_inval - invalidate the metadata for this glock
211  * @gl: the glock
212  * @flags:
213  *
214  * We never used LM_ST_DEFERRED with resource groups, so that we
215  * should always see the metadata flag set here.
216  *
217  */
218 
rgrp_go_inval(struct gfs2_glock * gl,int flags)219 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
220 {
221 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
222 	struct address_space *mapping = &sdp->sd_aspace;
223 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
224 	const unsigned bsize = sdp->sd_sb.sb_bsize;
225 	loff_t start, end;
226 
227 	if (!rgd)
228 		return;
229 	start = (rgd->rd_addr * bsize) & PAGE_MASK;
230 	end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
231 	gfs2_rgrp_brelse(rgd);
232 	WARN_ON_ONCE(!(flags & DIO_METADATA));
233 	truncate_inode_pages_range(mapping, start, end);
234 }
235 
gfs2_rgrp_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)236 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
237 			      const char *fs_id_buf)
238 {
239 	struct gfs2_rgrpd *rgd = gl->gl_object;
240 
241 	if (rgd)
242 		gfs2_rgrp_dump(seq, rgd, fs_id_buf);
243 }
244 
gfs2_glock2inode(struct gfs2_glock * gl)245 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
246 {
247 	struct gfs2_inode *ip;
248 
249 	spin_lock(&gl->gl_lockref.lock);
250 	ip = gl->gl_object;
251 	if (ip)
252 		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
253 	spin_unlock(&gl->gl_lockref.lock);
254 	return ip;
255 }
256 
gfs2_glock2rgrp(struct gfs2_glock * gl)257 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
258 {
259 	struct gfs2_rgrpd *rgd;
260 
261 	spin_lock(&gl->gl_lockref.lock);
262 	rgd = gl->gl_object;
263 	spin_unlock(&gl->gl_lockref.lock);
264 
265 	return rgd;
266 }
267 
gfs2_clear_glop_pending(struct gfs2_inode * ip)268 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
269 {
270 	if (!ip)
271 		return;
272 
273 	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
274 	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
275 }
276 
277 /**
278  * gfs2_inode_metasync - sync out the metadata of an inode
279  * @gl: the glock protecting the inode
280  *
281  */
gfs2_inode_metasync(struct gfs2_glock * gl)282 int gfs2_inode_metasync(struct gfs2_glock *gl)
283 {
284 	struct address_space *metamapping = gfs2_glock2aspace(gl);
285 	int error;
286 
287 	filemap_fdatawrite(metamapping);
288 	error = filemap_fdatawait(metamapping);
289 	if (error)
290 		gfs2_io_error(gl->gl_name.ln_sbd);
291 	return error;
292 }
293 
294 /**
295  * inode_go_sync - Sync the dirty metadata of an inode
296  * @gl: the glock protecting the inode
297  *
298  */
299 
inode_go_sync(struct gfs2_glock * gl)300 static int inode_go_sync(struct gfs2_glock *gl)
301 {
302 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
303 	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
304 	struct address_space *metamapping = gfs2_glock2aspace(gl);
305 	int error = 0, ret;
306 
307 	if (isreg) {
308 		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
309 			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
310 		inode_dio_wait(&ip->i_inode);
311 	}
312 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
313 		goto out;
314 
315 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
316 
317 	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
318 		       GFS2_LFC_INODE_GO_SYNC);
319 	filemap_fdatawrite(metamapping);
320 	if (isreg) {
321 		struct address_space *mapping = ip->i_inode.i_mapping;
322 		filemap_fdatawrite(mapping);
323 		error = filemap_fdatawait(mapping);
324 		mapping_set_error(mapping, error);
325 	}
326 	ret = gfs2_inode_metasync(gl);
327 	if (!error)
328 		error = ret;
329 	gfs2_ail_empty_gl(gl);
330 	/*
331 	 * Writeback of the data mapping may cause the dirty flag to be set
332 	 * so we have to clear it again here.
333 	 */
334 	smp_mb__before_atomic();
335 	clear_bit(GLF_DIRTY, &gl->gl_flags);
336 
337 out:
338 	gfs2_clear_glop_pending(ip);
339 	return error;
340 }
341 
342 /**
343  * inode_go_inval - prepare a inode glock to be released
344  * @gl: the glock
345  * @flags:
346  *
347  * Normally we invalidate everything, but if we are moving into
348  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
349  * can keep hold of the metadata, since it won't have changed.
350  *
351  */
352 
inode_go_inval(struct gfs2_glock * gl,int flags)353 static void inode_go_inval(struct gfs2_glock *gl, int flags)
354 {
355 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
356 
357 	if (flags & DIO_METADATA) {
358 		struct address_space *mapping = gfs2_glock2aspace(gl);
359 		truncate_inode_pages(mapping, 0);
360 		if (ip) {
361 			set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
362 			forget_all_cached_acls(&ip->i_inode);
363 			security_inode_invalidate_secctx(&ip->i_inode);
364 			gfs2_dir_hash_inval(ip);
365 		}
366 	}
367 
368 	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
369 		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
370 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
371 			       GFS2_LFC_INODE_GO_INVAL);
372 		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
373 	}
374 	if (ip && S_ISREG(ip->i_inode.i_mode))
375 		truncate_inode_pages(ip->i_inode.i_mapping, 0);
376 
377 	gfs2_clear_glop_pending(ip);
378 }
379 
380 /**
381  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
382  * @gl: the glock
383  *
384  * Returns: 1 if it's ok
385  */
386 
inode_go_demote_ok(const struct gfs2_glock * gl)387 static int inode_go_demote_ok(const struct gfs2_glock *gl)
388 {
389 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
390 
391 	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
392 		return 0;
393 
394 	return 1;
395 }
396 
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)397 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
398 {
399 	const struct gfs2_dinode *str = buf;
400 	struct timespec64 atime;
401 	u16 height, depth;
402 	umode_t mode = be32_to_cpu(str->di_mode);
403 	struct inode *inode = &ip->i_inode;
404 	bool is_new = inode->i_state & I_NEW;
405 
406 	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
407 		goto corrupt;
408 	if (unlikely(!is_new && inode_wrong_type(inode, mode)))
409 		goto corrupt;
410 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
411 	inode->i_mode = mode;
412 	if (is_new) {
413 		inode->i_rdev = 0;
414 		switch (mode & S_IFMT) {
415 		case S_IFBLK:
416 		case S_IFCHR:
417 			inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
418 					      be32_to_cpu(str->di_minor));
419 			break;
420 		}
421 	}
422 
423 	i_uid_write(inode, be32_to_cpu(str->di_uid));
424 	i_gid_write(inode, be32_to_cpu(str->di_gid));
425 	set_nlink(inode, be32_to_cpu(str->di_nlink));
426 	i_size_write(inode, be64_to_cpu(str->di_size));
427 	gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
428 	atime.tv_sec = be64_to_cpu(str->di_atime);
429 	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
430 	if (timespec64_compare(&inode->i_atime, &atime) < 0)
431 		inode->i_atime = atime;
432 	inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
433 	inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
434 	inode->i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
435 	inode->i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
436 
437 	ip->i_goal = be64_to_cpu(str->di_goal_meta);
438 	ip->i_generation = be64_to_cpu(str->di_generation);
439 
440 	ip->i_diskflags = be32_to_cpu(str->di_flags);
441 	ip->i_eattr = be64_to_cpu(str->di_eattr);
442 	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
443 	gfs2_set_inode_flags(inode);
444 	height = be16_to_cpu(str->di_height);
445 	if (unlikely(height > GFS2_MAX_META_HEIGHT))
446 		goto corrupt;
447 	ip->i_height = (u8)height;
448 
449 	depth = be16_to_cpu(str->di_depth);
450 	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
451 		goto corrupt;
452 	ip->i_depth = (u8)depth;
453 	ip->i_entries = be32_to_cpu(str->di_entries);
454 
455 	if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip))
456 		goto corrupt;
457 
458 	if (S_ISREG(inode->i_mode))
459 		gfs2_set_aops(inode);
460 
461 	return 0;
462 corrupt:
463 	gfs2_consist_inode(ip);
464 	return -EIO;
465 }
466 
467 /**
468  * gfs2_inode_refresh - Refresh the incore copy of the dinode
469  * @ip: The GFS2 inode
470  *
471  * Returns: errno
472  */
473 
gfs2_inode_refresh(struct gfs2_inode * ip)474 int gfs2_inode_refresh(struct gfs2_inode *ip)
475 {
476 	struct buffer_head *dibh;
477 	int error;
478 
479 	error = gfs2_meta_inode_buffer(ip, &dibh);
480 	if (error)
481 		return error;
482 
483 	error = gfs2_dinode_in(ip, dibh->b_data);
484 	brelse(dibh);
485 	return error;
486 }
487 
488 /**
489  * inode_go_instantiate - read in an inode if necessary
490  * @gh: The glock holder
491  *
492  * Returns: errno
493  */
494 
inode_go_instantiate(struct gfs2_glock * gl)495 static int inode_go_instantiate(struct gfs2_glock *gl)
496 {
497 	struct gfs2_inode *ip = gl->gl_object;
498 
499 	if (!ip) /* no inode to populate - read it in later */
500 		return 0;
501 
502 	return gfs2_inode_refresh(ip);
503 }
504 
inode_go_held(struct gfs2_holder * gh)505 static int inode_go_held(struct gfs2_holder *gh)
506 {
507 	struct gfs2_glock *gl = gh->gh_gl;
508 	struct gfs2_inode *ip = gl->gl_object;
509 	int error = 0;
510 
511 	if (!ip) /* no inode to populate - read it in later */
512 		return 0;
513 
514 	if (gh->gh_state != LM_ST_DEFERRED)
515 		inode_dio_wait(&ip->i_inode);
516 
517 	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
518 	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
519 	    (gh->gh_state == LM_ST_EXCLUSIVE))
520 		error = gfs2_truncatei_resume(ip);
521 
522 	return error;
523 }
524 
525 /**
526  * inode_go_dump - print information about an inode
527  * @seq: The iterator
528  * @gl: The glock
529  * @fs_id_buf: file system id (may be empty)
530  *
531  */
532 
inode_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)533 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
534 			  const char *fs_id_buf)
535 {
536 	struct gfs2_inode *ip = gl->gl_object;
537 	struct inode *inode = &ip->i_inode;
538 	unsigned long nrpages;
539 
540 	if (ip == NULL)
541 		return;
542 
543 	xa_lock_irq(&inode->i_data.i_pages);
544 	nrpages = inode->i_data.nrpages;
545 	xa_unlock_irq(&inode->i_data.i_pages);
546 
547 	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
548 		       "p:%lu\n", fs_id_buf,
549 		  (unsigned long long)ip->i_no_formal_ino,
550 		  (unsigned long long)ip->i_no_addr,
551 		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
552 		  (unsigned int)ip->i_diskflags,
553 		  (unsigned long long)i_size_read(inode), nrpages);
554 }
555 
556 /**
557  * freeze_go_sync - promote/demote the freeze glock
558  * @gl: the glock
559  */
560 
freeze_go_sync(struct gfs2_glock * gl)561 static int freeze_go_sync(struct gfs2_glock *gl)
562 {
563 	int error = 0;
564 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
565 
566 	/*
567 	 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
568 	 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
569 	 * all the nodes should have the freeze glock in SH mode and they all
570 	 * call do_xmote: One for EX and the others for UN. They ALL must
571 	 * freeze locally, and they ALL must queue freeze work. The freeze_work
572 	 * calls freeze_func, which tries to reacquire the freeze glock in SH,
573 	 * effectively waiting for the thaw on the node who holds it in EX.
574 	 * Once thawed, the work func acquires the freeze glock in
575 	 * SH and everybody goes back to thawed.
576 	 */
577 	if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
578 	    !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
579 		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
580 		error = freeze_super(sdp->sd_vfs);
581 		if (error) {
582 			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
583 				error);
584 			if (gfs2_withdrawn(sdp)) {
585 				atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
586 				return 0;
587 			}
588 			gfs2_assert_withdraw(sdp, 0);
589 		}
590 		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
591 		if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
592 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
593 				       GFS2_LFC_FREEZE_GO_SYNC);
594 		else /* read-only mounts */
595 			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
596 	}
597 	return 0;
598 }
599 
600 /**
601  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
602  * @gl: the glock
603  */
freeze_go_xmote_bh(struct gfs2_glock * gl)604 static int freeze_go_xmote_bh(struct gfs2_glock *gl)
605 {
606 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
607 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
608 	struct gfs2_glock *j_gl = ip->i_gl;
609 	struct gfs2_log_header_host head;
610 	int error;
611 
612 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
613 		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
614 
615 		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
616 		if (gfs2_assert_withdraw_delayed(sdp, !error))
617 			return error;
618 		if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
619 						 GFS2_LOG_HEAD_UNMOUNT))
620 			return -EIO;
621 		sdp->sd_log_sequence = head.lh_sequence + 1;
622 		gfs2_log_pointers_init(sdp, head.lh_blkno);
623 	}
624 	return 0;
625 }
626 
627 /**
628  * freeze_go_demote_ok
629  * @gl: the glock
630  *
631  * Always returns 0
632  */
633 
freeze_go_demote_ok(const struct gfs2_glock * gl)634 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
635 {
636 	return 0;
637 }
638 
639 /**
640  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
641  * @gl: the glock
642  * @remote: true if this came from a different cluster node
643  *
644  * gl_lockref.lock lock is held while calling this
645  */
iopen_go_callback(struct gfs2_glock * gl,bool remote)646 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
647 {
648 	struct gfs2_inode *ip = gl->gl_object;
649 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
650 
651 	if (!remote || sb_rdonly(sdp->sd_vfs) ||
652 	    test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
653 		return;
654 
655 	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
656 	    gl->gl_state == LM_ST_SHARED && ip) {
657 		gl->gl_lockref.count++;
658 		if (!gfs2_queue_try_to_evict(gl))
659 			gl->gl_lockref.count--;
660 	}
661 }
662 
663 /**
664  * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
665  * @gl: glock being freed
666  *
667  * For now, this is only used for the journal inode glock. In withdraw
668  * situations, we need to wait for the glock to be freed so that we know
669  * other nodes may proceed with recovery / journal replay.
670  */
inode_go_free(struct gfs2_glock * gl)671 static void inode_go_free(struct gfs2_glock *gl)
672 {
673 	/* Note that we cannot reference gl_object because it's already set
674 	 * to NULL by this point in its lifecycle. */
675 	if (!test_bit(GLF_FREEING, &gl->gl_flags))
676 		return;
677 	clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
678 	wake_up_bit(&gl->gl_flags, GLF_FREEING);
679 }
680 
681 /**
682  * nondisk_go_callback - used to signal when a node did a withdraw
683  * @gl: the nondisk glock
684  * @remote: true if this came from a different cluster node
685  *
686  */
nondisk_go_callback(struct gfs2_glock * gl,bool remote)687 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
688 {
689 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
690 
691 	/* Ignore the callback unless it's from another node, and it's the
692 	   live lock. */
693 	if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
694 		return;
695 
696 	/* First order of business is to cancel the demote request. We don't
697 	 * really want to demote a nondisk glock. At best it's just to inform
698 	 * us of another node's withdraw. We'll keep it in SH mode. */
699 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
700 	clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
701 
702 	/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
703 	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
704 	    test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
705 	    test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
706 		return;
707 
708 	/* We only care when a node wants us to unlock, because that means
709 	 * they want a journal recovered. */
710 	if (gl->gl_demote_state != LM_ST_UNLOCKED)
711 		return;
712 
713 	if (sdp->sd_args.ar_spectator) {
714 		fs_warn(sdp, "Spectator node cannot recover journals.\n");
715 		return;
716 	}
717 
718 	fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
719 	set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
720 	/*
721 	 * We can't call remote_withdraw directly here or gfs2_recover_journal
722 	 * because this is called from the glock unlock function and the
723 	 * remote_withdraw needs to enqueue and dequeue the same "live" glock
724 	 * we were called from. So we queue it to the control work queue in
725 	 * lock_dlm.
726 	 */
727 	queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
728 }
729 
730 const struct gfs2_glock_operations gfs2_meta_glops = {
731 	.go_type = LM_TYPE_META,
732 	.go_flags = GLOF_NONDISK,
733 };
734 
735 const struct gfs2_glock_operations gfs2_inode_glops = {
736 	.go_sync = inode_go_sync,
737 	.go_inval = inode_go_inval,
738 	.go_demote_ok = inode_go_demote_ok,
739 	.go_instantiate = inode_go_instantiate,
740 	.go_held = inode_go_held,
741 	.go_dump = inode_go_dump,
742 	.go_type = LM_TYPE_INODE,
743 	.go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
744 	.go_free = inode_go_free,
745 };
746 
747 const struct gfs2_glock_operations gfs2_rgrp_glops = {
748 	.go_sync = rgrp_go_sync,
749 	.go_inval = rgrp_go_inval,
750 	.go_instantiate = gfs2_rgrp_go_instantiate,
751 	.go_dump = gfs2_rgrp_go_dump,
752 	.go_type = LM_TYPE_RGRP,
753 	.go_flags = GLOF_LVB,
754 };
755 
756 const struct gfs2_glock_operations gfs2_freeze_glops = {
757 	.go_sync = freeze_go_sync,
758 	.go_xmote_bh = freeze_go_xmote_bh,
759 	.go_demote_ok = freeze_go_demote_ok,
760 	.go_type = LM_TYPE_NONDISK,
761 	.go_flags = GLOF_NONDISK,
762 };
763 
764 const struct gfs2_glock_operations gfs2_iopen_glops = {
765 	.go_type = LM_TYPE_IOPEN,
766 	.go_callback = iopen_go_callback,
767 	.go_dump = inode_go_dump,
768 	.go_flags = GLOF_LRU | GLOF_NONDISK,
769 	.go_subclass = 1,
770 };
771 
772 const struct gfs2_glock_operations gfs2_flock_glops = {
773 	.go_type = LM_TYPE_FLOCK,
774 	.go_flags = GLOF_LRU | GLOF_NONDISK,
775 };
776 
777 const struct gfs2_glock_operations gfs2_nondisk_glops = {
778 	.go_type = LM_TYPE_NONDISK,
779 	.go_flags = GLOF_NONDISK,
780 	.go_callback = nondisk_go_callback,
781 };
782 
783 const struct gfs2_glock_operations gfs2_quota_glops = {
784 	.go_type = LM_TYPE_QUOTA,
785 	.go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
786 };
787 
788 const struct gfs2_glock_operations gfs2_journal_glops = {
789 	.go_type = LM_TYPE_JOURNAL,
790 	.go_flags = GLOF_NONDISK,
791 };
792 
793 const struct gfs2_glock_operations *gfs2_glops_list[] = {
794 	[LM_TYPE_META] = &gfs2_meta_glops,
795 	[LM_TYPE_INODE] = &gfs2_inode_glops,
796 	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
797 	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
798 	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
799 	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
800 	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
801 	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
802 };
803 
804