1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4   * All Rights Reserved.
5   */
6  #ifndef	__XFS_LOG_PRIV_H__
7  #define __XFS_LOG_PRIV_H__
8  
9  struct xfs_buf;
10  struct xlog;
11  struct xlog_ticket;
12  struct xfs_mount;
13  
14  /*
15   * get client id from packed copy.
16   *
17   * this hack is here because the xlog_pack code copies four bytes
18   * of xlog_op_header containing the fields oh_clientid, oh_flags
19   * and oh_res2 into the packed copy.
20   *
21   * later on this four byte chunk is treated as an int and the
22   * client id is pulled out.
23   *
24   * this has endian issues, of course.
25   */
xlog_get_client_id(__be32 i)26  static inline uint xlog_get_client_id(__be32 i)
27  {
28  	return be32_to_cpu(i) >> 24;
29  }
30  
31  /*
32   * In core log state
33   */
34  enum xlog_iclog_state {
35  	XLOG_STATE_ACTIVE,	/* Current IC log being written to */
36  	XLOG_STATE_WANT_SYNC,	/* Want to sync this iclog; no more writes */
37  	XLOG_STATE_SYNCING,	/* This IC log is syncing */
38  	XLOG_STATE_DONE_SYNC,	/* Done syncing to disk */
39  	XLOG_STATE_CALLBACK,	/* Callback functions now */
40  	XLOG_STATE_DIRTY,	/* Dirty IC log, not ready for ACTIVE status */
41  };
42  
43  #define XLOG_STATE_STRINGS \
44  	{ XLOG_STATE_ACTIVE,	"XLOG_STATE_ACTIVE" }, \
45  	{ XLOG_STATE_WANT_SYNC,	"XLOG_STATE_WANT_SYNC" }, \
46  	{ XLOG_STATE_SYNCING,	"XLOG_STATE_SYNCING" }, \
47  	{ XLOG_STATE_DONE_SYNC,	"XLOG_STATE_DONE_SYNC" }, \
48  	{ XLOG_STATE_CALLBACK,	"XLOG_STATE_CALLBACK" }, \
49  	{ XLOG_STATE_DIRTY,	"XLOG_STATE_DIRTY" }
50  
51  /*
52   * In core log flags
53   */
54  #define XLOG_ICL_NEED_FLUSH	(1u << 0)	/* iclog needs REQ_PREFLUSH */
55  #define XLOG_ICL_NEED_FUA	(1u << 1)	/* iclog needs REQ_FUA */
56  
57  #define XLOG_ICL_STRINGS \
58  	{ XLOG_ICL_NEED_FLUSH,	"XLOG_ICL_NEED_FLUSH" }, \
59  	{ XLOG_ICL_NEED_FUA,	"XLOG_ICL_NEED_FUA" }
60  
61  
62  /*
63   * Log ticket flags
64   */
65  #define XLOG_TIC_PERM_RESERV	(1u << 0)	/* permanent reservation */
66  
67  #define XLOG_TIC_FLAGS \
68  	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
69  
70  /*
71   * Below are states for covering allocation transactions.
72   * By covering, we mean changing the h_tail_lsn in the last on-disk
73   * log write such that no allocation transactions will be re-done during
74   * recovery after a system crash. Recovery starts at the last on-disk
75   * log write.
76   *
77   * These states are used to insert dummy log entries to cover
78   * space allocation transactions which can undo non-transactional changes
79   * after a crash. Writes to a file with space
80   * already allocated do not result in any transactions. Allocations
81   * might include space beyond the EOF. So if we just push the EOF a
82   * little, the last transaction for the file could contain the wrong
83   * size. If there is no file system activity, after an allocation
84   * transaction, and the system crashes, the allocation transaction
85   * will get replayed and the file will be truncated. This could
86   * be hours/days/... after the allocation occurred.
87   *
88   * The fix for this is to do two dummy transactions when the
89   * system is idle. We need two dummy transaction because the h_tail_lsn
90   * in the log record header needs to point beyond the last possible
91   * non-dummy transaction. The first dummy changes the h_tail_lsn to
92   * the first transaction before the dummy. The second dummy causes
93   * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
94   *
95   * These dummy transactions get committed when everything
96   * is idle (after there has been some activity).
97   *
98   * There are 5 states used to control this.
99   *
100   *  IDLE -- no logging has been done on the file system or
101   *		we are done covering previous transactions.
102   *  NEED -- logging has occurred and we need a dummy transaction
103   *		when the log becomes idle.
104   *  DONE -- we were in the NEED state and have committed a dummy
105   *		transaction.
106   *  NEED2 -- we detected that a dummy transaction has gone to the
107   *		on disk log with no other transactions.
108   *  DONE2 -- we committed a dummy transaction when in the NEED2 state.
109   *
110   * There are two places where we switch states:
111   *
112   * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
113   *	We commit the dummy transaction and switch to DONE or DONE2,
114   *	respectively. In all other states, we don't do anything.
115   *
116   * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
117   *
118   *	No matter what state we are in, if this isn't the dummy
119   *	transaction going out, the next state is NEED.
120   *	So, if we aren't in the DONE or DONE2 states, the next state
121   *	is NEED. We can't be finishing a write of the dummy record
122   *	unless it was committed and the state switched to DONE or DONE2.
123   *
124   *	If we are in the DONE state and this was a write of the
125   *		dummy transaction, we move to NEED2.
126   *
127   *	If we are in the DONE2 state and this was a write of the
128   *		dummy transaction, we move to IDLE.
129   *
130   *
131   * Writing only one dummy transaction can get appended to
132   * one file space allocation. When this happens, the log recovery
133   * code replays the space allocation and a file could be truncated.
134   * This is why we have the NEED2 and DONE2 states before going idle.
135   */
136  
137  #define XLOG_STATE_COVER_IDLE	0
138  #define XLOG_STATE_COVER_NEED	1
139  #define XLOG_STATE_COVER_DONE	2
140  #define XLOG_STATE_COVER_NEED2	3
141  #define XLOG_STATE_COVER_DONE2	4
142  
143  #define XLOG_COVER_OPS		5
144  
145  typedef struct xlog_ticket {
146  	struct list_head	t_queue;	/* reserve/write queue */
147  	struct task_struct	*t_task;	/* task that owns this ticket */
148  	xlog_tid_t		t_tid;		/* transaction identifier */
149  	atomic_t		t_ref;		/* ticket reference count */
150  	int			t_curr_res;	/* current reservation */
151  	int			t_unit_res;	/* unit reservation */
152  	char			t_ocnt;		/* original unit count */
153  	char			t_cnt;		/* current unit count */
154  	uint8_t			t_flags;	/* properties of reservation */
155  	int			t_iclog_hdrs;	/* iclog hdrs in t_curr_res */
156  } xlog_ticket_t;
157  
158  /*
159   * - A log record header is 512 bytes.  There is plenty of room to grow the
160   *	xlog_rec_header_t into the reserved space.
161   * - ic_data follows, so a write to disk can start at the beginning of
162   *	the iclog.
163   * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
164   * - ic_next is the pointer to the next iclog in the ring.
165   * - ic_log is a pointer back to the global log structure.
166   * - ic_size is the full size of the log buffer, minus the cycle headers.
167   * - ic_offset is the current number of bytes written to in this iclog.
168   * - ic_refcnt is bumped when someone is writing to the log.
169   * - ic_state is the state of the iclog.
170   *
171   * Because of cacheline contention on large machines, we need to separate
172   * various resources onto different cachelines. To start with, make the
173   * structure cacheline aligned. The following fields can be contended on
174   * by independent processes:
175   *
176   *	- ic_callbacks
177   *	- ic_refcnt
178   *	- fields protected by the global l_icloglock
179   *
180   * so we need to ensure that these fields are located in separate cachelines.
181   * We'll put all the read-only and l_icloglock fields in the first cacheline,
182   * and move everything else out to subsequent cachelines.
183   */
184  typedef struct xlog_in_core {
185  	wait_queue_head_t	ic_force_wait;
186  	wait_queue_head_t	ic_write_wait;
187  	struct xlog_in_core	*ic_next;
188  	struct xlog_in_core	*ic_prev;
189  	struct xlog		*ic_log;
190  	u32			ic_size;
191  	u32			ic_offset;
192  	enum xlog_iclog_state	ic_state;
193  	unsigned int		ic_flags;
194  	void			*ic_datap;	/* pointer to iclog data */
195  	struct list_head	ic_callbacks;
196  
197  	/* reference counts need their own cacheline */
198  	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
199  	xlog_in_core_2_t	*ic_data;
200  #define ic_header	ic_data->hic_header
201  #ifdef DEBUG
202  	bool			ic_fail_crc : 1;
203  #endif
204  	struct semaphore	ic_sema;
205  	struct work_struct	ic_end_io_work;
206  	struct bio		ic_bio;
207  	struct bio_vec		ic_bvec[];
208  } xlog_in_core_t;
209  
210  /*
211   * The CIL context is used to aggregate per-transaction details as well be
212   * passed to the iclog for checkpoint post-commit processing.  After being
213   * passed to the iclog, another context needs to be allocated for tracking the
214   * next set of transactions to be aggregated into a checkpoint.
215   */
216  struct xfs_cil;
217  
218  struct xfs_cil_ctx {
219  	struct xfs_cil		*cil;
220  	xfs_csn_t		sequence;	/* chkpt sequence # */
221  	xfs_lsn_t		start_lsn;	/* first LSN of chkpt commit */
222  	xfs_lsn_t		commit_lsn;	/* chkpt commit record lsn */
223  	struct xlog_in_core	*commit_iclog;
224  	struct xlog_ticket	*ticket;	/* chkpt ticket */
225  	atomic_t		space_used;	/* aggregate size of regions */
226  	struct list_head	busy_extents;	/* busy extents in chkpt */
227  	struct list_head	log_items;	/* log items in chkpt */
228  	struct list_head	lv_chain;	/* logvecs being pushed */
229  	struct list_head	iclog_entry;
230  	struct list_head	committing;	/* ctx committing list */
231  	struct work_struct	discard_endio_work;
232  	struct work_struct	push_work;
233  	atomic_t		order_id;
234  };
235  
236  /*
237   * Per-cpu CIL tracking items
238   */
239  struct xlog_cil_pcp {
240  	int32_t			space_used;
241  	uint32_t		space_reserved;
242  	struct list_head	busy_extents;
243  	struct list_head	log_items;
244  };
245  
246  /*
247   * Committed Item List structure
248   *
249   * This structure is used to track log items that have been committed but not
250   * yet written into the log. It is used only when the delayed logging mount
251   * option is enabled.
252   *
253   * This structure tracks the list of committing checkpoint contexts so
254   * we can avoid the problem of having to hold out new transactions during a
255   * flush until we have a the commit record LSN of the checkpoint. We can
256   * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
257   * sequence match and extract the commit LSN directly from there. If the
258   * checkpoint is still in the process of committing, we can block waiting for
259   * the commit LSN to be determined as well. This should make synchronous
260   * operations almost as efficient as the old logging methods.
261   */
262  struct xfs_cil {
263  	struct xlog		*xc_log;
264  	unsigned long		xc_flags;
265  	atomic_t		xc_iclog_hdrs;
266  	struct workqueue_struct	*xc_push_wq;
267  
268  	struct rw_semaphore	xc_ctx_lock ____cacheline_aligned_in_smp;
269  	struct xfs_cil_ctx	*xc_ctx;
270  
271  	spinlock_t		xc_push_lock ____cacheline_aligned_in_smp;
272  	xfs_csn_t		xc_push_seq;
273  	bool			xc_push_commit_stable;
274  	struct list_head	xc_committing;
275  	wait_queue_head_t	xc_commit_wait;
276  	wait_queue_head_t	xc_start_wait;
277  	xfs_csn_t		xc_current_sequence;
278  	wait_queue_head_t	xc_push_wait;	/* background push throttle */
279  
280  	void __percpu		*xc_pcp;	/* percpu CIL structures */
281  #ifdef CONFIG_HOTPLUG_CPU
282  	struct list_head	xc_pcp_list;
283  #endif
284  } ____cacheline_aligned_in_smp;
285  
286  /* xc_flags bit values */
287  #define	XLOG_CIL_EMPTY		1
288  #define XLOG_CIL_PCP_SPACE	2
289  
290  /*
291   * The amount of log space we allow the CIL to aggregate is difficult to size.
292   * Whatever we choose, we have to make sure we can get a reservation for the
293   * log space effectively, that it is large enough to capture sufficient
294   * relogging to reduce log buffer IO significantly, but it is not too large for
295   * the log or induces too much latency when writing out through the iclogs. We
296   * track both space consumed and the number of vectors in the checkpoint
297   * context, so we need to decide which to use for limiting.
298   *
299   * Every log buffer we write out during a push needs a header reserved, which
300   * is at least one sector and more for v2 logs. Hence we need a reservation of
301   * at least 512 bytes per 32k of log space just for the LR headers. That means
302   * 16KB of reservation per megabyte of delayed logging space we will consume,
303   * plus various headers.  The number of headers will vary based on the num of
304   * io vectors, so limiting on a specific number of vectors is going to result
305   * in transactions of varying size. IOWs, it is more consistent to track and
306   * limit space consumed in the log rather than by the number of objects being
307   * logged in order to prevent checkpoint ticket overruns.
308   *
309   * Further, use of static reservations through the log grant mechanism is
310   * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
311   * grant) and a significant deadlock potential because regranting write space
312   * can block on log pushes. Hence if we have to regrant log space during a log
313   * push, we can deadlock.
314   *
315   * However, we can avoid this by use of a dynamic "reservation stealing"
316   * technique during transaction commit whereby unused reservation space in the
317   * transaction ticket is transferred to the CIL ctx commit ticket to cover the
318   * space needed by the checkpoint transaction. This means that we never need to
319   * specifically reserve space for the CIL checkpoint transaction, nor do we
320   * need to regrant space once the checkpoint completes. This also means the
321   * checkpoint transaction ticket is specific to the checkpoint context, rather
322   * than the CIL itself.
323   *
324   * With dynamic reservations, we can effectively make up arbitrary limits for
325   * the checkpoint size so long as they don't violate any other size rules.
326   * Recovery imposes a rule that no transaction exceed half the log, so we are
327   * limited by that.  Furthermore, the log transaction reservation subsystem
328   * tries to keep 25% of the log free, so we need to keep below that limit or we
329   * risk running out of free log space to start any new transactions.
330   *
331   * In order to keep background CIL push efficient, we only need to ensure the
332   * CIL is large enough to maintain sufficient in-memory relogging to avoid
333   * repeated physical writes of frequently modified metadata. If we allow the CIL
334   * to grow to a substantial fraction of the log, then we may be pinning hundreds
335   * of megabytes of metadata in memory until the CIL flushes. This can cause
336   * issues when we are running low on memory - pinned memory cannot be reclaimed,
337   * and the CIL consumes a lot of memory. Hence we need to set an upper physical
338   * size limit for the CIL that limits the maximum amount of memory pinned by the
339   * CIL but does not limit performance by reducing relogging efficiency
340   * significantly.
341   *
342   * As such, the CIL push threshold ends up being the smaller of two thresholds:
343   * - a threshold large enough that it allows CIL to be pushed and progress to be
344   *   made without excessive blocking of incoming transaction commits. This is
345   *   defined to be 12.5% of the log space - half the 25% push threshold of the
346   *   AIL.
347   * - small enough that it doesn't pin excessive amounts of memory but maintains
348   *   close to peak relogging efficiency. This is defined to be 16x the iclog
349   *   buffer window (32MB) as measurements have shown this to be roughly the
350   *   point of diminishing performance increases under highly concurrent
351   *   modification workloads.
352   *
353   * To prevent the CIL from overflowing upper commit size bounds, we introduce a
354   * new threshold at which we block committing transactions until the background
355   * CIL commit commences and switches to a new context. While this is not a hard
356   * limit, it forces the process committing a transaction to the CIL to block and
357   * yeild the CPU, giving the CIL push work a chance to be scheduled and start
358   * work. This prevents a process running lots of transactions from overfilling
359   * the CIL because it is not yielding the CPU. We set the blocking limit at
360   * twice the background push space threshold so we keep in line with the AIL
361   * push thresholds.
362   *
363   * Note: this is not a -hard- limit as blocking is applied after the transaction
364   * is inserted into the CIL and the push has been triggered. It is largely a
365   * throttling mechanism that allows the CIL push to be scheduled and run. A hard
366   * limit will be difficult to implement without introducing global serialisation
367   * in the CIL commit fast path, and it's not at all clear that we actually need
368   * such hard limits given the ~7 years we've run without a hard limit before
369   * finding the first situation where a checkpoint size overflow actually
370   * occurred. Hence the simple throttle, and an ASSERT check to tell us that
371   * we've overrun the max size.
372   */
373  #define XLOG_CIL_SPACE_LIMIT(log)	\
374  	min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
375  
376  #define XLOG_CIL_BLOCKING_SPACE_LIMIT(log)	\
377  	(XLOG_CIL_SPACE_LIMIT(log) * 2)
378  
379  /*
380   * ticket grant locks, queues and accounting have their own cachlines
381   * as these are quite hot and can be operated on concurrently.
382   */
383  struct xlog_grant_head {
384  	spinlock_t		lock ____cacheline_aligned_in_smp;
385  	struct list_head	waiters;
386  	atomic64_t		grant;
387  };
388  
389  /*
390   * The reservation head lsn is not made up of a cycle number and block number.
391   * Instead, it uses a cycle number and byte number.  Logs don't expect to
392   * overflow 31 bits worth of byte offset, so using a byte number will mean
393   * that round off problems won't occur when releasing partial reservations.
394   */
395  struct xlog {
396  	/* The following fields don't need locking */
397  	struct xfs_mount	*l_mp;	        /* mount point */
398  	struct xfs_ail		*l_ailp;	/* AIL log is working with */
399  	struct xfs_cil		*l_cilp;	/* CIL log is working with */
400  	struct xfs_buftarg	*l_targ;        /* buftarg of log */
401  	struct workqueue_struct	*l_ioend_workqueue; /* for I/O completions */
402  	struct delayed_work	l_work;		/* background flush work */
403  	long			l_opstate;	/* operational state */
404  	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
405  	struct list_head	*l_buf_cancel_table;
406  	int			l_iclog_hsize;  /* size of iclog header */
407  	int			l_iclog_heads;  /* # of iclog header sectors */
408  	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
409  	int			l_iclog_size;	/* size of log in bytes */
410  	int			l_iclog_bufs;	/* number of iclog buffers */
411  	xfs_daddr_t		l_logBBstart;   /* start block of log */
412  	int			l_logsize;      /* size of log in bytes */
413  	int			l_logBBsize;    /* size of log in BB chunks */
414  
415  	/* The following block of fields are changed while holding icloglock */
416  	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
417  						/* waiting for iclog flush */
418  	int			l_covered_state;/* state of "covering disk
419  						 * log entries" */
420  	xlog_in_core_t		*l_iclog;       /* head log queue	*/
421  	spinlock_t		l_icloglock;    /* grab to change iclog state */
422  	int			l_curr_cycle;   /* Cycle number of log writes */
423  	int			l_prev_cycle;   /* Cycle number before last
424  						 * block increment */
425  	int			l_curr_block;   /* current logical log block */
426  	int			l_prev_block;   /* previous logical log block */
427  
428  	/*
429  	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
430  	 * read without needing to hold specific locks. To avoid operations
431  	 * contending with other hot objects, place each of them on a separate
432  	 * cacheline.
433  	 */
434  	/* lsn of last LR on disk */
435  	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
436  	/* lsn of 1st LR with unflushed * buffers */
437  	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
438  
439  	struct xlog_grant_head	l_reserve_head;
440  	struct xlog_grant_head	l_write_head;
441  
442  	struct xfs_kobj		l_kobj;
443  
444  	/* log recovery lsn tracking (for buffer submission */
445  	xfs_lsn_t		l_recovery_lsn;
446  
447  	uint32_t		l_iclog_roundoff;/* padding roundoff */
448  
449  	/* Users of log incompat features should take a read lock. */
450  	struct rw_semaphore	l_incompat_users;
451  };
452  
453  /*
454   * Bits for operational state
455   */
456  #define XLOG_ACTIVE_RECOVERY	0	/* in the middle of recovery */
457  #define XLOG_RECOVERY_NEEDED	1	/* log was recovered */
458  #define XLOG_IO_ERROR		2	/* log hit an I/O error, and being
459  				   shutdown */
460  #define XLOG_TAIL_WARN		3	/* log tail verify warning issued */
461  
462  static inline bool
xlog_recovery_needed(struct xlog * log)463  xlog_recovery_needed(struct xlog *log)
464  {
465  	return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
466  }
467  
468  static inline bool
xlog_in_recovery(struct xlog * log)469  xlog_in_recovery(struct xlog *log)
470  {
471  	return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
472  }
473  
474  static inline bool
xlog_is_shutdown(struct xlog * log)475  xlog_is_shutdown(struct xlog *log)
476  {
477  	return test_bit(XLOG_IO_ERROR, &log->l_opstate);
478  }
479  
480  /*
481   * Wait until the xlog_force_shutdown() has marked the log as shut down
482   * so xlog_is_shutdown() will always return true.
483   */
484  static inline void
xlog_shutdown_wait(struct xlog * log)485  xlog_shutdown_wait(
486  	struct xlog	*log)
487  {
488  	wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
489  }
490  
491  /* common routines */
492  extern int
493  xlog_recover(
494  	struct xlog		*log);
495  extern int
496  xlog_recover_finish(
497  	struct xlog		*log);
498  extern void
499  xlog_recover_cancel(struct xlog *);
500  
501  extern __le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
502  			    char *dp, int size);
503  
504  extern struct kmem_cache *xfs_log_ticket_cache;
505  struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
506  		int count, bool permanent);
507  
508  void	xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
509  void	xlog_print_trans(struct xfs_trans *);
510  int	xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx,
511  		struct list_head *lv_chain, struct xlog_ticket *tic,
512  		uint32_t len);
513  void	xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
514  void	xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
515  
516  void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
517  		int eventual_size);
518  int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
519  		struct xlog_ticket *ticket);
520  
521  /*
522   * When we crack an atomic LSN, we sample it first so that the value will not
523   * change while we are cracking it into the component values. This means we
524   * will always get consistent component values to work from. This should always
525   * be used to sample and crack LSNs that are stored and updated in atomic
526   * variables.
527   */
528  static inline void
xlog_crack_atomic_lsn(atomic64_t * lsn,uint * cycle,uint * block)529  xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
530  {
531  	xfs_lsn_t val = atomic64_read(lsn);
532  
533  	*cycle = CYCLE_LSN(val);
534  	*block = BLOCK_LSN(val);
535  }
536  
537  /*
538   * Calculate and assign a value to an atomic LSN variable from component pieces.
539   */
540  static inline void
xlog_assign_atomic_lsn(atomic64_t * lsn,uint cycle,uint block)541  xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
542  {
543  	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
544  }
545  
546  /*
547   * When we crack the grant head, we sample it first so that the value will not
548   * change while we are cracking it into the component values. This means we
549   * will always get consistent component values to work from.
550   */
551  static inline void
xlog_crack_grant_head_val(int64_t val,int * cycle,int * space)552  xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
553  {
554  	*cycle = val >> 32;
555  	*space = val & 0xffffffff;
556  }
557  
558  static inline void
xlog_crack_grant_head(atomic64_t * head,int * cycle,int * space)559  xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
560  {
561  	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
562  }
563  
564  static inline int64_t
xlog_assign_grant_head_val(int cycle,int space)565  xlog_assign_grant_head_val(int cycle, int space)
566  {
567  	return ((int64_t)cycle << 32) | space;
568  }
569  
570  static inline void
xlog_assign_grant_head(atomic64_t * head,int cycle,int space)571  xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
572  {
573  	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
574  }
575  
576  /*
577   * Committed Item List interfaces
578   */
579  int	xlog_cil_init(struct xlog *log);
580  void	xlog_cil_init_post_recovery(struct xlog *log);
581  void	xlog_cil_destroy(struct xlog *log);
582  bool	xlog_cil_empty(struct xlog *log);
583  void	xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
584  			xfs_csn_t *commit_seq, bool regrant);
585  void	xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx,
586  			struct xlog_in_core *iclog);
587  
588  
589  /*
590   * CIL force routines
591   */
592  void xlog_cil_flush(struct xlog *log);
593  xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
594  
595  static inline void
xlog_cil_force(struct xlog * log)596  xlog_cil_force(struct xlog *log)
597  {
598  	xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
599  }
600  
601  /*
602   * Wrapper function for waiting on a wait queue serialised against wakeups
603   * by a spinlock. This matches the semantics of all the wait queues used in the
604   * log code.
605   */
606  static inline void
xlog_wait(struct wait_queue_head * wq,struct spinlock * lock)607  xlog_wait(
608  	struct wait_queue_head	*wq,
609  	struct spinlock		*lock)
610  		__releases(lock)
611  {
612  	DECLARE_WAITQUEUE(wait, current);
613  
614  	add_wait_queue_exclusive(wq, &wait);
615  	__set_current_state(TASK_UNINTERRUPTIBLE);
616  	spin_unlock(lock);
617  	schedule();
618  	remove_wait_queue(wq, &wait);
619  }
620  
621  int xlog_wait_on_iclog(struct xlog_in_core *iclog);
622  
623  /*
624   * The LSN is valid so long as it is behind the current LSN. If it isn't, this
625   * means that the next log record that includes this metadata could have a
626   * smaller LSN. In turn, this means that the modification in the log would not
627   * replay.
628   */
629  static inline bool
xlog_valid_lsn(struct xlog * log,xfs_lsn_t lsn)630  xlog_valid_lsn(
631  	struct xlog	*log,
632  	xfs_lsn_t	lsn)
633  {
634  	int		cur_cycle;
635  	int		cur_block;
636  	bool		valid = true;
637  
638  	/*
639  	 * First, sample the current lsn without locking to avoid added
640  	 * contention from metadata I/O. The current cycle and block are updated
641  	 * (in xlog_state_switch_iclogs()) and read here in a particular order
642  	 * to avoid false negatives (e.g., thinking the metadata LSN is valid
643  	 * when it is not).
644  	 *
645  	 * The current block is always rewound before the cycle is bumped in
646  	 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
647  	 * a transiently forward state. Instead, we can see the LSN in a
648  	 * transiently behind state if we happen to race with a cycle wrap.
649  	 */
650  	cur_cycle = READ_ONCE(log->l_curr_cycle);
651  	smp_rmb();
652  	cur_block = READ_ONCE(log->l_curr_block);
653  
654  	if ((CYCLE_LSN(lsn) > cur_cycle) ||
655  	    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
656  		/*
657  		 * If the metadata LSN appears invalid, it's possible the check
658  		 * above raced with a wrap to the next log cycle. Grab the lock
659  		 * to check for sure.
660  		 */
661  		spin_lock(&log->l_icloglock);
662  		cur_cycle = log->l_curr_cycle;
663  		cur_block = log->l_curr_block;
664  		spin_unlock(&log->l_icloglock);
665  
666  		if ((CYCLE_LSN(lsn) > cur_cycle) ||
667  		    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
668  			valid = false;
669  	}
670  
671  	return valid;
672  }
673  
674  /*
675   * Log vector and shadow buffers can be large, so we need to use kvmalloc() here
676   * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts
677   * to fall back to vmalloc, so we can't actually do anything useful with gfp
678   * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc()
679   * will do direct reclaim and compaction in the slow path, both of which are
680   * horrendously expensive. We just want kmalloc to fail fast and fall back to
681   * vmalloc if it can't get somethign straight away from the free lists or
682   * buddy allocator. Hence we have to open code kvmalloc outselves here.
683   *
684   * This assumes that the caller uses memalloc_nofs_save task context here, so
685   * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS
686   * allocations. This is actually the only way to make vmalloc() do GFP_NOFS
687   * allocations, so lets just all pretend this is a GFP_KERNEL context
688   * operation....
689   */
690  static inline void *
xlog_kvmalloc(size_t buf_size)691  xlog_kvmalloc(
692  	size_t		buf_size)
693  {
694  	gfp_t		flags = GFP_KERNEL;
695  	void		*p;
696  
697  	flags &= ~__GFP_DIRECT_RECLAIM;
698  	flags |= __GFP_NOWARN | __GFP_NORETRY;
699  	do {
700  		p = kmalloc(buf_size, flags);
701  		if (!p)
702  			p = vmalloc(buf_size);
703  	} while (!p);
704  
705  	return p;
706  }
707  
708  /*
709   * CIL CPU dead notifier
710   */
711  void xlog_cil_pcp_dead(struct xlog *log, unsigned int cpu);
712  
713  #endif	/* __XFS_LOG_PRIV_H__ */
714