1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2000-2003 Silicon Graphics, Inc.
4   * All Rights Reserved.
5   */
6  #include "xfs.h"
7  #include "xfs_fs.h"
8  #include "xfs_format.h"
9  #include "xfs_log_format.h"
10  #include "xfs_shared.h"
11  #include "xfs_trans_resv.h"
12  #include "xfs_bit.h"
13  #include "xfs_mount.h"
14  #include "xfs_defer.h"
15  #include "xfs_inode.h"
16  #include "xfs_bmap.h"
17  #include "xfs_quota.h"
18  #include "xfs_trans.h"
19  #include "xfs_buf_item.h"
20  #include "xfs_trans_space.h"
21  #include "xfs_trans_priv.h"
22  #include "xfs_qm.h"
23  #include "xfs_trace.h"
24  #include "xfs_log.h"
25  #include "xfs_bmap_btree.h"
26  #include "xfs_error.h"
27  
28  /*
29   * Lock order:
30   *
31   * ip->i_lock
32   *   qi->qi_tree_lock
33   *     dquot->q_qlock (xfs_dqlock() and friends)
34   *       dquot->q_flush (xfs_dqflock() and friends)
35   *       qi->qi_lru_lock
36   *
37   * If two dquots need to be locked the order is user before group/project,
38   * otherwise by the lowest id first, see xfs_dqlock2.
39   */
40  
41  struct kmem_cache		*xfs_dqtrx_cache;
42  static struct kmem_cache	*xfs_dquot_cache;
43  
44  static struct lock_class_key xfs_dquot_group_class;
45  static struct lock_class_key xfs_dquot_project_class;
46  
47  /*
48   * This is called to free all the memory associated with a dquot
49   */
50  void
xfs_qm_dqdestroy(struct xfs_dquot * dqp)51  xfs_qm_dqdestroy(
52  	struct xfs_dquot	*dqp)
53  {
54  	ASSERT(list_empty(&dqp->q_lru));
55  
56  	kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
57  	mutex_destroy(&dqp->q_qlock);
58  
59  	XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
60  	kmem_cache_free(xfs_dquot_cache, dqp);
61  }
62  
63  /*
64   * If default limits are in force, push them into the dquot now.
65   * We overwrite the dquot limits only if they are zero and this
66   * is not the root dquot.
67   */
68  void
xfs_qm_adjust_dqlimits(struct xfs_dquot * dq)69  xfs_qm_adjust_dqlimits(
70  	struct xfs_dquot	*dq)
71  {
72  	struct xfs_mount	*mp = dq->q_mount;
73  	struct xfs_quotainfo	*q = mp->m_quotainfo;
74  	struct xfs_def_quota	*defq;
75  	int			prealloc = 0;
76  
77  	ASSERT(dq->q_id);
78  	defq = xfs_get_defquota(q, xfs_dquot_type(dq));
79  
80  	if (!dq->q_blk.softlimit) {
81  		dq->q_blk.softlimit = defq->blk.soft;
82  		prealloc = 1;
83  	}
84  	if (!dq->q_blk.hardlimit) {
85  		dq->q_blk.hardlimit = defq->blk.hard;
86  		prealloc = 1;
87  	}
88  	if (!dq->q_ino.softlimit)
89  		dq->q_ino.softlimit = defq->ino.soft;
90  	if (!dq->q_ino.hardlimit)
91  		dq->q_ino.hardlimit = defq->ino.hard;
92  	if (!dq->q_rtb.softlimit)
93  		dq->q_rtb.softlimit = defq->rtb.soft;
94  	if (!dq->q_rtb.hardlimit)
95  		dq->q_rtb.hardlimit = defq->rtb.hard;
96  
97  	if (prealloc)
98  		xfs_dquot_set_prealloc_limits(dq);
99  }
100  
101  /* Set the expiration time of a quota's grace period. */
102  time64_t
xfs_dquot_set_timeout(struct xfs_mount * mp,time64_t timeout)103  xfs_dquot_set_timeout(
104  	struct xfs_mount	*mp,
105  	time64_t		timeout)
106  {
107  	struct xfs_quotainfo	*qi = mp->m_quotainfo;
108  
109  	return clamp_t(time64_t, timeout, qi->qi_expiry_min,
110  					  qi->qi_expiry_max);
111  }
112  
113  /* Set the length of the default grace period. */
114  time64_t
xfs_dquot_set_grace_period(time64_t grace)115  xfs_dquot_set_grace_period(
116  	time64_t		grace)
117  {
118  	return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX);
119  }
120  
121  /*
122   * Determine if this quota counter is over either limit and set the quota
123   * timers as appropriate.
124   */
125  static inline void
xfs_qm_adjust_res_timer(struct xfs_mount * mp,struct xfs_dquot_res * res,struct xfs_quota_limits * qlim)126  xfs_qm_adjust_res_timer(
127  	struct xfs_mount	*mp,
128  	struct xfs_dquot_res	*res,
129  	struct xfs_quota_limits	*qlim)
130  {
131  	ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit);
132  
133  	if ((res->softlimit && res->count > res->softlimit) ||
134  	    (res->hardlimit && res->count > res->hardlimit)) {
135  		if (res->timer == 0)
136  			res->timer = xfs_dquot_set_timeout(mp,
137  					ktime_get_real_seconds() + qlim->time);
138  	} else {
139  		res->timer = 0;
140  	}
141  }
142  
143  /*
144   * Check the limits and timers of a dquot and start or reset timers
145   * if necessary.
146   * This gets called even when quota enforcement is OFF, which makes our
147   * life a little less complicated. (We just don't reject any quota
148   * reservations in that case, when enforcement is off).
149   * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
150   * enforcement's off.
151   * In contrast, warnings are a little different in that they don't
152   * 'automatically' get started when limits get exceeded.  They do
153   * get reset to zero, however, when we find the count to be under
154   * the soft limit (they are only ever set non-zero via userspace).
155   */
156  void
xfs_qm_adjust_dqtimers(struct xfs_dquot * dq)157  xfs_qm_adjust_dqtimers(
158  	struct xfs_dquot	*dq)
159  {
160  	struct xfs_mount	*mp = dq->q_mount;
161  	struct xfs_quotainfo	*qi = mp->m_quotainfo;
162  	struct xfs_def_quota	*defq;
163  
164  	ASSERT(dq->q_id);
165  	defq = xfs_get_defquota(qi, xfs_dquot_type(dq));
166  
167  	xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk);
168  	xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino);
169  	xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb);
170  }
171  
172  /*
173   * initialize a buffer full of dquots and log the whole thing
174   */
175  STATIC void
xfs_qm_init_dquot_blk(struct xfs_trans * tp,struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,struct xfs_buf * bp)176  xfs_qm_init_dquot_blk(
177  	struct xfs_trans	*tp,
178  	struct xfs_mount	*mp,
179  	xfs_dqid_t		id,
180  	xfs_dqtype_t		type,
181  	struct xfs_buf		*bp)
182  {
183  	struct xfs_quotainfo	*q = mp->m_quotainfo;
184  	struct xfs_dqblk	*d;
185  	xfs_dqid_t		curid;
186  	unsigned int		qflag;
187  	unsigned int		blftype;
188  	int			i;
189  
190  	ASSERT(tp);
191  	ASSERT(xfs_buf_islocked(bp));
192  
193  	switch (type) {
194  	case XFS_DQTYPE_USER:
195  		qflag = XFS_UQUOTA_CHKD;
196  		blftype = XFS_BLF_UDQUOT_BUF;
197  		break;
198  	case XFS_DQTYPE_PROJ:
199  		qflag = XFS_PQUOTA_CHKD;
200  		blftype = XFS_BLF_PDQUOT_BUF;
201  		break;
202  	case XFS_DQTYPE_GROUP:
203  		qflag = XFS_GQUOTA_CHKD;
204  		blftype = XFS_BLF_GDQUOT_BUF;
205  		break;
206  	default:
207  		ASSERT(0);
208  		return;
209  	}
210  
211  	d = bp->b_addr;
212  
213  	/*
214  	 * ID of the first dquot in the block - id's are zero based.
215  	 */
216  	curid = id - (id % q->qi_dqperchunk);
217  	memset(d, 0, BBTOB(q->qi_dqchunklen));
218  	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
219  		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
220  		d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
221  		d->dd_diskdq.d_id = cpu_to_be32(curid);
222  		d->dd_diskdq.d_type = type;
223  		if (curid > 0 && xfs_has_bigtime(mp))
224  			d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME;
225  		if (xfs_has_crc(mp)) {
226  			uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
227  			xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
228  					 XFS_DQUOT_CRC_OFF);
229  		}
230  	}
231  
232  	xfs_trans_dquot_buf(tp, bp, blftype);
233  
234  	/*
235  	 * quotacheck uses delayed writes to update all the dquots on disk in an
236  	 * efficient manner instead of logging the individual dquot changes as
237  	 * they are made. However if we log the buffer allocated here and crash
238  	 * after quotacheck while the logged initialisation is still in the
239  	 * active region of the log, log recovery can replay the dquot buffer
240  	 * initialisation over the top of the checked dquots and corrupt quota
241  	 * accounting.
242  	 *
243  	 * To avoid this problem, quotacheck cannot log the initialised buffer.
244  	 * We must still dirty the buffer and write it back before the
245  	 * allocation transaction clears the log. Therefore, mark the buffer as
246  	 * ordered instead of logging it directly. This is safe for quotacheck
247  	 * because it detects and repairs allocated but initialized dquot blocks
248  	 * in the quota inodes.
249  	 */
250  	if (!(mp->m_qflags & qflag))
251  		xfs_trans_ordered_buf(tp, bp);
252  	else
253  		xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
254  }
255  
256  /*
257   * Initialize the dynamic speculative preallocation thresholds. The lo/hi
258   * watermarks correspond to the soft and hard limits by default. If a soft limit
259   * is not specified, we use 95% of the hard limit.
260   */
261  void
xfs_dquot_set_prealloc_limits(struct xfs_dquot * dqp)262  xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
263  {
264  	uint64_t space;
265  
266  	dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit;
267  	dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit;
268  	if (!dqp->q_prealloc_lo_wmark) {
269  		dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
270  		do_div(dqp->q_prealloc_lo_wmark, 100);
271  		dqp->q_prealloc_lo_wmark *= 95;
272  	}
273  
274  	space = dqp->q_prealloc_hi_wmark;
275  
276  	do_div(space, 100);
277  	dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
278  	dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
279  	dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
280  }
281  
282  /*
283   * Ensure that the given in-core dquot has a buffer on disk backing it, and
284   * return the buffer locked and held. This is called when the bmapi finds a
285   * hole.
286   */
287  STATIC int
xfs_dquot_disk_alloc(struct xfs_dquot * dqp,struct xfs_buf ** bpp)288  xfs_dquot_disk_alloc(
289  	struct xfs_dquot	*dqp,
290  	struct xfs_buf		**bpp)
291  {
292  	struct xfs_bmbt_irec	map;
293  	struct xfs_trans	*tp;
294  	struct xfs_mount	*mp = dqp->q_mount;
295  	struct xfs_buf		*bp;
296  	xfs_dqtype_t		qtype = xfs_dquot_type(dqp);
297  	struct xfs_inode	*quotip = xfs_quota_inode(mp, qtype);
298  	int			nmaps = 1;
299  	int			error;
300  
301  	trace_xfs_dqalloc(dqp);
302  
303  	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
304  			XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
305  	if (error)
306  		return error;
307  
308  	xfs_ilock(quotip, XFS_ILOCK_EXCL);
309  	xfs_trans_ijoin(tp, quotip, 0);
310  
311  	if (!xfs_this_quota_on(dqp->q_mount, qtype)) {
312  		/*
313  		 * Return if this type of quotas is turned off while we didn't
314  		 * have an inode lock
315  		 */
316  		error = -ESRCH;
317  		goto err_cancel;
318  	}
319  
320  	error = xfs_iext_count_may_overflow(quotip, XFS_DATA_FORK,
321  			XFS_IEXT_ADD_NOSPLIT_CNT);
322  	if (error == -EFBIG)
323  		error = xfs_iext_count_upgrade(tp, quotip,
324  				XFS_IEXT_ADD_NOSPLIT_CNT);
325  	if (error)
326  		goto err_cancel;
327  
328  	/* Create the block mapping. */
329  	error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
330  			XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
331  			&nmaps);
332  	if (error)
333  		goto err_cancel;
334  
335  	ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
336  	ASSERT(nmaps == 1);
337  	ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
338  	       (map.br_startblock != HOLESTARTBLOCK));
339  
340  	/*
341  	 * Keep track of the blkno to save a lookup later
342  	 */
343  	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
344  
345  	/* now we can just get the buffer (there's nothing to read yet) */
346  	error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
347  			mp->m_quotainfo->qi_dqchunklen, 0, &bp);
348  	if (error)
349  		goto err_cancel;
350  	bp->b_ops = &xfs_dquot_buf_ops;
351  
352  	/*
353  	 * Make a chunk of dquots out of this buffer and log
354  	 * the entire thing.
355  	 */
356  	xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp);
357  	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
358  
359  	/*
360  	 * Hold the buffer and join it to the dfops so that we'll still own
361  	 * the buffer when we return to the caller.  The buffer disposal on
362  	 * error must be paid attention to very carefully, as it has been
363  	 * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota
364  	 * code when allocating a new dquot record" in 2005, and the later
365  	 * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep
366  	 * the buffer locked across the _defer_finish call.  We can now do
367  	 * this correctly with xfs_defer_bjoin.
368  	 *
369  	 * Above, we allocated a disk block for the dquot information and used
370  	 * get_buf to initialize the dquot. If the _defer_finish fails, the old
371  	 * transaction is gone but the new buffer is not joined or held to any
372  	 * transaction, so we must _buf_relse it.
373  	 *
374  	 * If everything succeeds, the caller of this function is returned a
375  	 * buffer that is locked and held to the transaction.  The caller
376  	 * is responsible for unlocking any buffer passed back, either
377  	 * manually or by committing the transaction.  On error, the buffer is
378  	 * released and not passed back.
379  	 *
380  	 * Keep the quota inode ILOCKed until after the transaction commit to
381  	 * maintain the atomicity of bmap/rmap updates.
382  	 */
383  	xfs_trans_bhold(tp, bp);
384  	error = xfs_trans_commit(tp);
385  	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
386  	if (error) {
387  		xfs_buf_relse(bp);
388  		return error;
389  	}
390  
391  	*bpp = bp;
392  	return 0;
393  
394  err_cancel:
395  	xfs_trans_cancel(tp);
396  	xfs_iunlock(quotip, XFS_ILOCK_EXCL);
397  	return error;
398  }
399  
400  /*
401   * Read in the in-core dquot's on-disk metadata and return the buffer.
402   * Returns ENOENT to signal a hole.
403   */
404  STATIC int
xfs_dquot_disk_read(struct xfs_mount * mp,struct xfs_dquot * dqp,struct xfs_buf ** bpp)405  xfs_dquot_disk_read(
406  	struct xfs_mount	*mp,
407  	struct xfs_dquot	*dqp,
408  	struct xfs_buf		**bpp)
409  {
410  	struct xfs_bmbt_irec	map;
411  	struct xfs_buf		*bp;
412  	xfs_dqtype_t		qtype = xfs_dquot_type(dqp);
413  	struct xfs_inode	*quotip = xfs_quota_inode(mp, qtype);
414  	uint			lock_mode;
415  	int			nmaps = 1;
416  	int			error;
417  
418  	lock_mode = xfs_ilock_data_map_shared(quotip);
419  	if (!xfs_this_quota_on(mp, qtype)) {
420  		/*
421  		 * Return if this type of quotas is turned off while we
422  		 * didn't have the quota inode lock.
423  		 */
424  		xfs_iunlock(quotip, lock_mode);
425  		return -ESRCH;
426  	}
427  
428  	/*
429  	 * Find the block map; no allocations yet
430  	 */
431  	error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
432  			XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
433  	xfs_iunlock(quotip, lock_mode);
434  	if (error)
435  		return error;
436  
437  	ASSERT(nmaps == 1);
438  	ASSERT(map.br_blockcount >= 1);
439  	ASSERT(map.br_startblock != DELAYSTARTBLOCK);
440  	if (map.br_startblock == HOLESTARTBLOCK)
441  		return -ENOENT;
442  
443  	trace_xfs_dqtobp_read(dqp);
444  
445  	/*
446  	 * store the blkno etc so that we don't have to do the
447  	 * mapping all the time
448  	 */
449  	dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
450  
451  	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
452  			mp->m_quotainfo->qi_dqchunklen, 0, &bp,
453  			&xfs_dquot_buf_ops);
454  	if (error) {
455  		ASSERT(bp == NULL);
456  		return error;
457  	}
458  
459  	ASSERT(xfs_buf_islocked(bp));
460  	xfs_buf_set_ref(bp, XFS_DQUOT_REF);
461  	*bpp = bp;
462  
463  	return 0;
464  }
465  
466  /* Allocate and initialize everything we need for an incore dquot. */
467  STATIC struct xfs_dquot *
xfs_dquot_alloc(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type)468  xfs_dquot_alloc(
469  	struct xfs_mount	*mp,
470  	xfs_dqid_t		id,
471  	xfs_dqtype_t		type)
472  {
473  	struct xfs_dquot	*dqp;
474  
475  	dqp = kmem_cache_zalloc(xfs_dquot_cache, GFP_KERNEL | __GFP_NOFAIL);
476  
477  	dqp->q_type = type;
478  	dqp->q_id = id;
479  	dqp->q_mount = mp;
480  	INIT_LIST_HEAD(&dqp->q_lru);
481  	mutex_init(&dqp->q_qlock);
482  	init_waitqueue_head(&dqp->q_pinwait);
483  	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
484  	/*
485  	 * Offset of dquot in the (fixed sized) dquot chunk.
486  	 */
487  	dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
488  			sizeof(struct xfs_dqblk);
489  
490  	/*
491  	 * Because we want to use a counting completion, complete
492  	 * the flush completion once to allow a single access to
493  	 * the flush completion without blocking.
494  	 */
495  	init_completion(&dqp->q_flush);
496  	complete(&dqp->q_flush);
497  
498  	/*
499  	 * Make sure group quotas have a different lock class than user
500  	 * quotas.
501  	 */
502  	switch (type) {
503  	case XFS_DQTYPE_USER:
504  		/* uses the default lock class */
505  		break;
506  	case XFS_DQTYPE_GROUP:
507  		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
508  		break;
509  	case XFS_DQTYPE_PROJ:
510  		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
511  		break;
512  	default:
513  		ASSERT(0);
514  		break;
515  	}
516  
517  	xfs_qm_dquot_logitem_init(dqp);
518  
519  	XFS_STATS_INC(mp, xs_qm_dquot);
520  	return dqp;
521  }
522  
523  /* Check the ondisk dquot's id and type match what the incore dquot expects. */
524  static bool
xfs_dquot_check_type(struct xfs_dquot * dqp,struct xfs_disk_dquot * ddqp)525  xfs_dquot_check_type(
526  	struct xfs_dquot	*dqp,
527  	struct xfs_disk_dquot	*ddqp)
528  {
529  	uint8_t			ddqp_type;
530  	uint8_t			dqp_type;
531  
532  	ddqp_type = ddqp->d_type & XFS_DQTYPE_REC_MASK;
533  	dqp_type = xfs_dquot_type(dqp);
534  
535  	if (be32_to_cpu(ddqp->d_id) != dqp->q_id)
536  		return false;
537  
538  	/*
539  	 * V5 filesystems always expect an exact type match.  V4 filesystems
540  	 * expect an exact match for user dquots and for non-root group and
541  	 * project dquots.
542  	 */
543  	if (xfs_has_crc(dqp->q_mount) ||
544  	    dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0)
545  		return ddqp_type == dqp_type;
546  
547  	/*
548  	 * V4 filesystems support either group or project quotas, but not both
549  	 * at the same time.  The non-user quota file can be switched between
550  	 * group and project quota uses depending on the mount options, which
551  	 * means that we can encounter the other type when we try to load quota
552  	 * defaults.  Quotacheck will soon reset the entire quota file
553  	 * (including the root dquot) anyway, but don't log scary corruption
554  	 * reports to dmesg.
555  	 */
556  	return ddqp_type == XFS_DQTYPE_GROUP || ddqp_type == XFS_DQTYPE_PROJ;
557  }
558  
559  /* Copy the in-core quota fields in from the on-disk buffer. */
560  STATIC int
xfs_dquot_from_disk(struct xfs_dquot * dqp,struct xfs_buf * bp)561  xfs_dquot_from_disk(
562  	struct xfs_dquot	*dqp,
563  	struct xfs_buf		*bp)
564  {
565  	struct xfs_disk_dquot	*ddqp = bp->b_addr + dqp->q_bufoffset;
566  
567  	/*
568  	 * Ensure that we got the type and ID we were looking for.
569  	 * Everything else was checked by the dquot buffer verifier.
570  	 */
571  	if (!xfs_dquot_check_type(dqp, ddqp)) {
572  		xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR,
573  			  "Metadata corruption detected at %pS, quota %u",
574  			  __this_address, dqp->q_id);
575  		xfs_alert(bp->b_mount, "Unmount and run xfs_repair");
576  		return -EFSCORRUPTED;
577  	}
578  
579  	/* copy everything from disk dquot to the incore dquot */
580  	dqp->q_type = ddqp->d_type;
581  	dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
582  	dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit);
583  	dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
584  	dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit);
585  	dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
586  	dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
587  
588  	dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount);
589  	dqp->q_ino.count = be64_to_cpu(ddqp->d_icount);
590  	dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount);
591  
592  	dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer);
593  	dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer);
594  	dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer);
595  
596  	/*
597  	 * Reservation counters are defined as reservation plus current usage
598  	 * to avoid having to add every time.
599  	 */
600  	dqp->q_blk.reserved = dqp->q_blk.count;
601  	dqp->q_ino.reserved = dqp->q_ino.count;
602  	dqp->q_rtb.reserved = dqp->q_rtb.count;
603  
604  	/* initialize the dquot speculative prealloc thresholds */
605  	xfs_dquot_set_prealloc_limits(dqp);
606  	return 0;
607  }
608  
609  /* Copy the in-core quota fields into the on-disk buffer. */
610  void
xfs_dquot_to_disk(struct xfs_disk_dquot * ddqp,struct xfs_dquot * dqp)611  xfs_dquot_to_disk(
612  	struct xfs_disk_dquot	*ddqp,
613  	struct xfs_dquot	*dqp)
614  {
615  	ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
616  	ddqp->d_version = XFS_DQUOT_VERSION;
617  	ddqp->d_type = dqp->q_type;
618  	ddqp->d_id = cpu_to_be32(dqp->q_id);
619  	ddqp->d_pad0 = 0;
620  	ddqp->d_pad = 0;
621  
622  	ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit);
623  	ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit);
624  	ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit);
625  	ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit);
626  	ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit);
627  	ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit);
628  
629  	ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count);
630  	ddqp->d_icount = cpu_to_be64(dqp->q_ino.count);
631  	ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count);
632  
633  	ddqp->d_bwarns = 0;
634  	ddqp->d_iwarns = 0;
635  	ddqp->d_rtbwarns = 0;
636  
637  	ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer);
638  	ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer);
639  	ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer);
640  }
641  
642  /*
643   * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
644   * and release the buffer immediately.  If @can_alloc is true, fill any
645   * holes in the on-disk metadata.
646   */
647  static int
xfs_qm_dqread(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,bool can_alloc,struct xfs_dquot ** dqpp)648  xfs_qm_dqread(
649  	struct xfs_mount	*mp,
650  	xfs_dqid_t		id,
651  	xfs_dqtype_t		type,
652  	bool			can_alloc,
653  	struct xfs_dquot	**dqpp)
654  {
655  	struct xfs_dquot	*dqp;
656  	struct xfs_buf		*bp;
657  	int			error;
658  
659  	dqp = xfs_dquot_alloc(mp, id, type);
660  	trace_xfs_dqread(dqp);
661  
662  	/* Try to read the buffer, allocating if necessary. */
663  	error = xfs_dquot_disk_read(mp, dqp, &bp);
664  	if (error == -ENOENT && can_alloc)
665  		error = xfs_dquot_disk_alloc(dqp, &bp);
666  	if (error)
667  		goto err;
668  
669  	/*
670  	 * At this point we should have a clean locked buffer.  Copy the data
671  	 * to the incore dquot and release the buffer since the incore dquot
672  	 * has its own locking protocol so we needn't tie up the buffer any
673  	 * further.
674  	 */
675  	ASSERT(xfs_buf_islocked(bp));
676  	error = xfs_dquot_from_disk(dqp, bp);
677  	xfs_buf_relse(bp);
678  	if (error)
679  		goto err;
680  
681  	*dqpp = dqp;
682  	return error;
683  
684  err:
685  	trace_xfs_dqread_fail(dqp);
686  	xfs_qm_dqdestroy(dqp);
687  	*dqpp = NULL;
688  	return error;
689  }
690  
691  /*
692   * Advance to the next id in the current chunk, or if at the
693   * end of the chunk, skip ahead to first id in next allocated chunk
694   * using the SEEK_DATA interface.
695   */
696  static int
xfs_dq_get_next_id(struct xfs_mount * mp,xfs_dqtype_t type,xfs_dqid_t * id)697  xfs_dq_get_next_id(
698  	struct xfs_mount	*mp,
699  	xfs_dqtype_t		type,
700  	xfs_dqid_t		*id)
701  {
702  	struct xfs_inode	*quotip = xfs_quota_inode(mp, type);
703  	xfs_dqid_t		next_id = *id + 1; /* simple advance */
704  	uint			lock_flags;
705  	struct xfs_bmbt_irec	got;
706  	struct xfs_iext_cursor	cur;
707  	xfs_fsblock_t		start;
708  	int			error = 0;
709  
710  	/* If we'd wrap past the max ID, stop */
711  	if (next_id < *id)
712  		return -ENOENT;
713  
714  	/* If new ID is within the current chunk, advancing it sufficed */
715  	if (next_id % mp->m_quotainfo->qi_dqperchunk) {
716  		*id = next_id;
717  		return 0;
718  	}
719  
720  	/* Nope, next_id is now past the current chunk, so find the next one */
721  	start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
722  
723  	lock_flags = xfs_ilock_data_map_shared(quotip);
724  	error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
725  	if (error)
726  		return error;
727  
728  	if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &cur, &got)) {
729  		/* contiguous chunk, bump startoff for the id calculation */
730  		if (got.br_startoff < start)
731  			got.br_startoff = start;
732  		*id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
733  	} else {
734  		error = -ENOENT;
735  	}
736  
737  	xfs_iunlock(quotip, lock_flags);
738  
739  	return error;
740  }
741  
742  /*
743   * Look up the dquot in the in-core cache.  If found, the dquot is returned
744   * locked and ready to go.
745   */
746  static struct xfs_dquot *
xfs_qm_dqget_cache_lookup(struct xfs_mount * mp,struct xfs_quotainfo * qi,struct radix_tree_root * tree,xfs_dqid_t id)747  xfs_qm_dqget_cache_lookup(
748  	struct xfs_mount	*mp,
749  	struct xfs_quotainfo	*qi,
750  	struct radix_tree_root	*tree,
751  	xfs_dqid_t		id)
752  {
753  	struct xfs_dquot	*dqp;
754  
755  restart:
756  	mutex_lock(&qi->qi_tree_lock);
757  	dqp = radix_tree_lookup(tree, id);
758  	if (!dqp) {
759  		mutex_unlock(&qi->qi_tree_lock);
760  		XFS_STATS_INC(mp, xs_qm_dqcachemisses);
761  		return NULL;
762  	}
763  
764  	xfs_dqlock(dqp);
765  	if (dqp->q_flags & XFS_DQFLAG_FREEING) {
766  		xfs_dqunlock(dqp);
767  		mutex_unlock(&qi->qi_tree_lock);
768  		trace_xfs_dqget_freeing(dqp);
769  		delay(1);
770  		goto restart;
771  	}
772  
773  	dqp->q_nrefs++;
774  	mutex_unlock(&qi->qi_tree_lock);
775  
776  	trace_xfs_dqget_hit(dqp);
777  	XFS_STATS_INC(mp, xs_qm_dqcachehits);
778  	return dqp;
779  }
780  
781  /*
782   * Try to insert a new dquot into the in-core cache.  If an error occurs the
783   * caller should throw away the dquot and start over.  Otherwise, the dquot
784   * is returned locked (and held by the cache) as if there had been a cache
785   * hit.
786   */
787  static int
xfs_qm_dqget_cache_insert(struct xfs_mount * mp,struct xfs_quotainfo * qi,struct radix_tree_root * tree,xfs_dqid_t id,struct xfs_dquot * dqp)788  xfs_qm_dqget_cache_insert(
789  	struct xfs_mount	*mp,
790  	struct xfs_quotainfo	*qi,
791  	struct radix_tree_root	*tree,
792  	xfs_dqid_t		id,
793  	struct xfs_dquot	*dqp)
794  {
795  	int			error;
796  
797  	mutex_lock(&qi->qi_tree_lock);
798  	error = radix_tree_insert(tree, id, dqp);
799  	if (unlikely(error)) {
800  		/* Duplicate found!  Caller must try again. */
801  		WARN_ON(error != -EEXIST);
802  		mutex_unlock(&qi->qi_tree_lock);
803  		trace_xfs_dqget_dup(dqp);
804  		return error;
805  	}
806  
807  	/* Return a locked dquot to the caller, with a reference taken. */
808  	xfs_dqlock(dqp);
809  	dqp->q_nrefs = 1;
810  
811  	qi->qi_dquots++;
812  	mutex_unlock(&qi->qi_tree_lock);
813  
814  	return 0;
815  }
816  
817  /* Check our input parameters. */
818  static int
xfs_qm_dqget_checks(struct xfs_mount * mp,xfs_dqtype_t type)819  xfs_qm_dqget_checks(
820  	struct xfs_mount	*mp,
821  	xfs_dqtype_t		type)
822  {
823  	switch (type) {
824  	case XFS_DQTYPE_USER:
825  		if (!XFS_IS_UQUOTA_ON(mp))
826  			return -ESRCH;
827  		return 0;
828  	case XFS_DQTYPE_GROUP:
829  		if (!XFS_IS_GQUOTA_ON(mp))
830  			return -ESRCH;
831  		return 0;
832  	case XFS_DQTYPE_PROJ:
833  		if (!XFS_IS_PQUOTA_ON(mp))
834  			return -ESRCH;
835  		return 0;
836  	default:
837  		WARN_ON_ONCE(0);
838  		return -EINVAL;
839  	}
840  }
841  
842  /*
843   * Given the file system, id, and type (UDQUOT/GDQUOT/PDQUOT), return a
844   * locked dquot, doing an allocation (if requested) as needed.
845   */
846  int
xfs_qm_dqget(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,bool can_alloc,struct xfs_dquot ** O_dqpp)847  xfs_qm_dqget(
848  	struct xfs_mount	*mp,
849  	xfs_dqid_t		id,
850  	xfs_dqtype_t		type,
851  	bool			can_alloc,
852  	struct xfs_dquot	**O_dqpp)
853  {
854  	struct xfs_quotainfo	*qi = mp->m_quotainfo;
855  	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
856  	struct xfs_dquot	*dqp;
857  	int			error;
858  
859  	error = xfs_qm_dqget_checks(mp, type);
860  	if (error)
861  		return error;
862  
863  restart:
864  	dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
865  	if (dqp) {
866  		*O_dqpp = dqp;
867  		return 0;
868  	}
869  
870  	error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
871  	if (error)
872  		return error;
873  
874  	error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
875  	if (error) {
876  		/*
877  		 * Duplicate found. Just throw away the new dquot and start
878  		 * over.
879  		 */
880  		xfs_qm_dqdestroy(dqp);
881  		XFS_STATS_INC(mp, xs_qm_dquot_dups);
882  		goto restart;
883  	}
884  
885  	trace_xfs_dqget_miss(dqp);
886  	*O_dqpp = dqp;
887  	return 0;
888  }
889  
890  /*
891   * Given a dquot id and type, read and initialize a dquot from the on-disk
892   * metadata.  This function is only for use during quota initialization so
893   * it ignores the dquot cache assuming that the dquot shrinker isn't set up.
894   * The caller is responsible for _qm_dqdestroy'ing the returned dquot.
895   */
896  int
xfs_qm_dqget_uncached(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,struct xfs_dquot ** dqpp)897  xfs_qm_dqget_uncached(
898  	struct xfs_mount	*mp,
899  	xfs_dqid_t		id,
900  	xfs_dqtype_t		type,
901  	struct xfs_dquot	**dqpp)
902  {
903  	int			error;
904  
905  	error = xfs_qm_dqget_checks(mp, type);
906  	if (error)
907  		return error;
908  
909  	return xfs_qm_dqread(mp, id, type, 0, dqpp);
910  }
911  
912  /* Return the quota id for a given inode and type. */
913  xfs_dqid_t
xfs_qm_id_for_quotatype(struct xfs_inode * ip,xfs_dqtype_t type)914  xfs_qm_id_for_quotatype(
915  	struct xfs_inode	*ip,
916  	xfs_dqtype_t		type)
917  {
918  	switch (type) {
919  	case XFS_DQTYPE_USER:
920  		return i_uid_read(VFS_I(ip));
921  	case XFS_DQTYPE_GROUP:
922  		return i_gid_read(VFS_I(ip));
923  	case XFS_DQTYPE_PROJ:
924  		return ip->i_projid;
925  	}
926  	ASSERT(0);
927  	return 0;
928  }
929  
930  /*
931   * Return the dquot for a given inode and type.  If @can_alloc is true, then
932   * allocate blocks if needed.  The inode's ILOCK must be held and it must not
933   * have already had an inode attached.
934   */
935  int
xfs_qm_dqget_inode(struct xfs_inode * ip,xfs_dqtype_t type,bool can_alloc,struct xfs_dquot ** O_dqpp)936  xfs_qm_dqget_inode(
937  	struct xfs_inode	*ip,
938  	xfs_dqtype_t		type,
939  	bool			can_alloc,
940  	struct xfs_dquot	**O_dqpp)
941  {
942  	struct xfs_mount	*mp = ip->i_mount;
943  	struct xfs_quotainfo	*qi = mp->m_quotainfo;
944  	struct radix_tree_root	*tree = xfs_dquot_tree(qi, type);
945  	struct xfs_dquot	*dqp;
946  	xfs_dqid_t		id;
947  	int			error;
948  
949  	error = xfs_qm_dqget_checks(mp, type);
950  	if (error)
951  		return error;
952  
953  	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
954  	ASSERT(xfs_inode_dquot(ip, type) == NULL);
955  
956  	id = xfs_qm_id_for_quotatype(ip, type);
957  
958  restart:
959  	dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
960  	if (dqp) {
961  		*O_dqpp = dqp;
962  		return 0;
963  	}
964  
965  	/*
966  	 * Dquot cache miss. We don't want to keep the inode lock across
967  	 * a (potential) disk read. Also we don't want to deal with the lock
968  	 * ordering between quotainode and this inode. OTOH, dropping the inode
969  	 * lock here means dealing with a chown that can happen before
970  	 * we re-acquire the lock.
971  	 */
972  	xfs_iunlock(ip, XFS_ILOCK_EXCL);
973  	error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
974  	xfs_ilock(ip, XFS_ILOCK_EXCL);
975  	if (error)
976  		return error;
977  
978  	/*
979  	 * A dquot could be attached to this inode by now, since we had
980  	 * dropped the ilock.
981  	 */
982  	if (xfs_this_quota_on(mp, type)) {
983  		struct xfs_dquot	*dqp1;
984  
985  		dqp1 = xfs_inode_dquot(ip, type);
986  		if (dqp1) {
987  			xfs_qm_dqdestroy(dqp);
988  			dqp = dqp1;
989  			xfs_dqlock(dqp);
990  			goto dqret;
991  		}
992  	} else {
993  		/* inode stays locked on return */
994  		xfs_qm_dqdestroy(dqp);
995  		return -ESRCH;
996  	}
997  
998  	error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
999  	if (error) {
1000  		/*
1001  		 * Duplicate found. Just throw away the new dquot and start
1002  		 * over.
1003  		 */
1004  		xfs_qm_dqdestroy(dqp);
1005  		XFS_STATS_INC(mp, xs_qm_dquot_dups);
1006  		goto restart;
1007  	}
1008  
1009  dqret:
1010  	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1011  	trace_xfs_dqget_miss(dqp);
1012  	*O_dqpp = dqp;
1013  	return 0;
1014  }
1015  
1016  /*
1017   * Starting at @id and progressing upwards, look for an initialized incore
1018   * dquot, lock it, and return it.
1019   */
1020  int
xfs_qm_dqget_next(struct xfs_mount * mp,xfs_dqid_t id,xfs_dqtype_t type,struct xfs_dquot ** dqpp)1021  xfs_qm_dqget_next(
1022  	struct xfs_mount	*mp,
1023  	xfs_dqid_t		id,
1024  	xfs_dqtype_t		type,
1025  	struct xfs_dquot	**dqpp)
1026  {
1027  	struct xfs_dquot	*dqp;
1028  	int			error = 0;
1029  
1030  	*dqpp = NULL;
1031  	for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
1032  		error = xfs_qm_dqget(mp, id, type, false, &dqp);
1033  		if (error == -ENOENT)
1034  			continue;
1035  		else if (error != 0)
1036  			break;
1037  
1038  		if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
1039  			*dqpp = dqp;
1040  			return 0;
1041  		}
1042  
1043  		xfs_qm_dqput(dqp);
1044  	}
1045  
1046  	return error;
1047  }
1048  
1049  /*
1050   * Release a reference to the dquot (decrement ref-count) and unlock it.
1051   *
1052   * If there is a group quota attached to this dquot, carefully release that
1053   * too without tripping over deadlocks'n'stuff.
1054   */
1055  void
xfs_qm_dqput(struct xfs_dquot * dqp)1056  xfs_qm_dqput(
1057  	struct xfs_dquot	*dqp)
1058  {
1059  	ASSERT(dqp->q_nrefs > 0);
1060  	ASSERT(XFS_DQ_IS_LOCKED(dqp));
1061  
1062  	trace_xfs_dqput(dqp);
1063  
1064  	if (--dqp->q_nrefs == 0) {
1065  		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
1066  		trace_xfs_dqput_free(dqp);
1067  
1068  		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
1069  			XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
1070  	}
1071  	xfs_dqunlock(dqp);
1072  }
1073  
1074  /*
1075   * Release a dquot. Flush it if dirty, then dqput() it.
1076   * dquot must not be locked.
1077   */
1078  void
xfs_qm_dqrele(struct xfs_dquot * dqp)1079  xfs_qm_dqrele(
1080  	struct xfs_dquot	*dqp)
1081  {
1082  	if (!dqp)
1083  		return;
1084  
1085  	trace_xfs_dqrele(dqp);
1086  
1087  	xfs_dqlock(dqp);
1088  	/*
1089  	 * We don't care to flush it if the dquot is dirty here.
1090  	 * That will create stutters that we want to avoid.
1091  	 * Instead we do a delayed write when we try to reclaim
1092  	 * a dirty dquot. Also xfs_sync will take part of the burden...
1093  	 */
1094  	xfs_qm_dqput(dqp);
1095  }
1096  
1097  /*
1098   * This is the dquot flushing I/O completion routine.  It is called
1099   * from interrupt level when the buffer containing the dquot is
1100   * flushed to disk.  It is responsible for removing the dquot logitem
1101   * from the AIL if it has not been re-logged, and unlocking the dquot's
1102   * flush lock. This behavior is very similar to that of inodes..
1103   */
1104  static void
xfs_qm_dqflush_done(struct xfs_log_item * lip)1105  xfs_qm_dqflush_done(
1106  	struct xfs_log_item	*lip)
1107  {
1108  	struct xfs_dq_logitem	*qip = (struct xfs_dq_logitem *)lip;
1109  	struct xfs_dquot	*dqp = qip->qli_dquot;
1110  	struct xfs_ail		*ailp = lip->li_ailp;
1111  	xfs_lsn_t		tail_lsn;
1112  
1113  	/*
1114  	 * We only want to pull the item from the AIL if its
1115  	 * location in the log has not changed since we started the flush.
1116  	 * Thus, we only bother if the dquot's lsn has
1117  	 * not changed. First we check the lsn outside the lock
1118  	 * since it's cheaper, and then we recheck while
1119  	 * holding the lock before removing the dquot from the AIL.
1120  	 */
1121  	if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
1122  	    ((lip->li_lsn == qip->qli_flush_lsn) ||
1123  	     test_bit(XFS_LI_FAILED, &lip->li_flags))) {
1124  
1125  		spin_lock(&ailp->ail_lock);
1126  		xfs_clear_li_failed(lip);
1127  		if (lip->li_lsn == qip->qli_flush_lsn) {
1128  			/* xfs_ail_update_finish() drops the AIL lock */
1129  			tail_lsn = xfs_ail_delete_one(ailp, lip);
1130  			xfs_ail_update_finish(ailp, tail_lsn);
1131  		} else {
1132  			spin_unlock(&ailp->ail_lock);
1133  		}
1134  	}
1135  
1136  	/*
1137  	 * Release the dq's flush lock since we're done with it.
1138  	 */
1139  	xfs_dqfunlock(dqp);
1140  }
1141  
1142  void
xfs_buf_dquot_iodone(struct xfs_buf * bp)1143  xfs_buf_dquot_iodone(
1144  	struct xfs_buf		*bp)
1145  {
1146  	struct xfs_log_item	*lip, *n;
1147  
1148  	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
1149  		list_del_init(&lip->li_bio_list);
1150  		xfs_qm_dqflush_done(lip);
1151  	}
1152  }
1153  
1154  void
xfs_buf_dquot_io_fail(struct xfs_buf * bp)1155  xfs_buf_dquot_io_fail(
1156  	struct xfs_buf		*bp)
1157  {
1158  	struct xfs_log_item	*lip;
1159  
1160  	spin_lock(&bp->b_mount->m_ail->ail_lock);
1161  	list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1162  		xfs_set_li_failed(lip, bp);
1163  	spin_unlock(&bp->b_mount->m_ail->ail_lock);
1164  }
1165  
1166  /* Check incore dquot for errors before we flush. */
1167  static xfs_failaddr_t
xfs_qm_dqflush_check(struct xfs_dquot * dqp)1168  xfs_qm_dqflush_check(
1169  	struct xfs_dquot	*dqp)
1170  {
1171  	xfs_dqtype_t		type = xfs_dquot_type(dqp);
1172  
1173  	if (type != XFS_DQTYPE_USER &&
1174  	    type != XFS_DQTYPE_GROUP &&
1175  	    type != XFS_DQTYPE_PROJ)
1176  		return __this_address;
1177  
1178  	if (dqp->q_id == 0)
1179  		return NULL;
1180  
1181  	if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit &&
1182  	    !dqp->q_blk.timer)
1183  		return __this_address;
1184  
1185  	if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit &&
1186  	    !dqp->q_ino.timer)
1187  		return __this_address;
1188  
1189  	if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit &&
1190  	    !dqp->q_rtb.timer)
1191  		return __this_address;
1192  
1193  	/* bigtime flag should never be set on root dquots */
1194  	if (dqp->q_type & XFS_DQTYPE_BIGTIME) {
1195  		if (!xfs_has_bigtime(dqp->q_mount))
1196  			return __this_address;
1197  		if (dqp->q_id == 0)
1198  			return __this_address;
1199  	}
1200  
1201  	return NULL;
1202  }
1203  
1204  /*
1205   * Write a modified dquot to disk.
1206   * The dquot must be locked and the flush lock too taken by caller.
1207   * The flush lock will not be unlocked until the dquot reaches the disk,
1208   * but the dquot is free to be unlocked and modified by the caller
1209   * in the interim. Dquot is still locked on return. This behavior is
1210   * identical to that of inodes.
1211   */
1212  int
xfs_qm_dqflush(struct xfs_dquot * dqp,struct xfs_buf ** bpp)1213  xfs_qm_dqflush(
1214  	struct xfs_dquot	*dqp,
1215  	struct xfs_buf		**bpp)
1216  {
1217  	struct xfs_mount	*mp = dqp->q_mount;
1218  	struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
1219  	struct xfs_buf		*bp;
1220  	struct xfs_dqblk	*dqblk;
1221  	xfs_failaddr_t		fa;
1222  	int			error;
1223  
1224  	ASSERT(XFS_DQ_IS_LOCKED(dqp));
1225  	ASSERT(!completion_done(&dqp->q_flush));
1226  
1227  	trace_xfs_dqflush(dqp);
1228  
1229  	*bpp = NULL;
1230  
1231  	xfs_qm_dqunpin_wait(dqp);
1232  
1233  	/*
1234  	 * Get the buffer containing the on-disk dquot
1235  	 */
1236  	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1237  				   mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
1238  				   &bp, &xfs_dquot_buf_ops);
1239  	if (error == -EAGAIN)
1240  		goto out_unlock;
1241  	if (error)
1242  		goto out_abort;
1243  
1244  	fa = xfs_qm_dqflush_check(dqp);
1245  	if (fa) {
1246  		xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
1247  				dqp->q_id, fa);
1248  		xfs_buf_relse(bp);
1249  		error = -EFSCORRUPTED;
1250  		goto out_abort;
1251  	}
1252  
1253  	/* Flush the incore dquot to the ondisk buffer. */
1254  	dqblk = bp->b_addr + dqp->q_bufoffset;
1255  	xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
1256  
1257  	/*
1258  	 * Clear the dirty field and remember the flush lsn for later use.
1259  	 */
1260  	dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1261  
1262  	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1263  					&dqp->q_logitem.qli_item.li_lsn);
1264  
1265  	/*
1266  	 * copy the lsn into the on-disk dquot now while we have the in memory
1267  	 * dquot here. This can't be done later in the write verifier as we
1268  	 * can't get access to the log item at that point in time.
1269  	 *
1270  	 * We also calculate the CRC here so that the on-disk dquot in the
1271  	 * buffer always has a valid CRC. This ensures there is no possibility
1272  	 * of a dquot without an up-to-date CRC getting to disk.
1273  	 */
1274  	if (xfs_has_crc(mp)) {
1275  		dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1276  		xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
1277  				 XFS_DQUOT_CRC_OFF);
1278  	}
1279  
1280  	/*
1281  	 * Attach the dquot to the buffer so that we can remove this dquot from
1282  	 * the AIL and release the flush lock once the dquot is synced to disk.
1283  	 */
1284  	bp->b_flags |= _XBF_DQUOTS;
1285  	list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
1286  
1287  	/*
1288  	 * If the buffer is pinned then push on the log so we won't
1289  	 * get stuck waiting in the write for too long.
1290  	 */
1291  	if (xfs_buf_ispinned(bp)) {
1292  		trace_xfs_dqflush_force(dqp);
1293  		xfs_log_force(mp, 0);
1294  	}
1295  
1296  	trace_xfs_dqflush_done(dqp);
1297  	*bpp = bp;
1298  	return 0;
1299  
1300  out_abort:
1301  	dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1302  	xfs_trans_ail_delete(lip, 0);
1303  	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1304  out_unlock:
1305  	xfs_dqfunlock(dqp);
1306  	return error;
1307  }
1308  
1309  /*
1310   * Lock two xfs_dquot structures.
1311   *
1312   * To avoid deadlocks we always lock the quota structure with
1313   * the lowerd id first.
1314   */
1315  void
xfs_dqlock2(struct xfs_dquot * d1,struct xfs_dquot * d2)1316  xfs_dqlock2(
1317  	struct xfs_dquot	*d1,
1318  	struct xfs_dquot	*d2)
1319  {
1320  	if (d1 && d2) {
1321  		ASSERT(d1 != d2);
1322  		if (d1->q_id > d2->q_id) {
1323  			mutex_lock(&d2->q_qlock);
1324  			mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1325  		} else {
1326  			mutex_lock(&d1->q_qlock);
1327  			mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1328  		}
1329  	} else if (d1) {
1330  		mutex_lock(&d1->q_qlock);
1331  	} else if (d2) {
1332  		mutex_lock(&d2->q_qlock);
1333  	}
1334  }
1335  
1336  int __init
xfs_qm_init(void)1337  xfs_qm_init(void)
1338  {
1339  	xfs_dquot_cache = kmem_cache_create("xfs_dquot",
1340  					  sizeof(struct xfs_dquot),
1341  					  0, 0, NULL);
1342  	if (!xfs_dquot_cache)
1343  		goto out;
1344  
1345  	xfs_dqtrx_cache = kmem_cache_create("xfs_dqtrx",
1346  					     sizeof(struct xfs_dquot_acct),
1347  					     0, 0, NULL);
1348  	if (!xfs_dqtrx_cache)
1349  		goto out_free_dquot_cache;
1350  
1351  	return 0;
1352  
1353  out_free_dquot_cache:
1354  	kmem_cache_destroy(xfs_dquot_cache);
1355  out:
1356  	return -ENOMEM;
1357  }
1358  
1359  void
xfs_qm_exit(void)1360  xfs_qm_exit(void)
1361  {
1362  	kmem_cache_destroy(xfs_dqtrx_cache);
1363  	kmem_cache_destroy(xfs_dquot_cache);
1364  }
1365  
1366  /*
1367   * Iterate every dquot of a particular type.  The caller must ensure that the
1368   * particular quota type is active.  iter_fn can return negative error codes,
1369   * or -ECANCELED to indicate that it wants to stop iterating.
1370   */
1371  int
xfs_qm_dqiterate(struct xfs_mount * mp,xfs_dqtype_t type,xfs_qm_dqiterate_fn iter_fn,void * priv)1372  xfs_qm_dqiterate(
1373  	struct xfs_mount	*mp,
1374  	xfs_dqtype_t		type,
1375  	xfs_qm_dqiterate_fn	iter_fn,
1376  	void			*priv)
1377  {
1378  	struct xfs_dquot	*dq;
1379  	xfs_dqid_t		id = 0;
1380  	int			error;
1381  
1382  	do {
1383  		error = xfs_qm_dqget_next(mp, id, type, &dq);
1384  		if (error == -ENOENT)
1385  			return 0;
1386  		if (error)
1387  			return error;
1388  
1389  		error = iter_fn(dq, type, priv);
1390  		id = dq->q_id;
1391  		xfs_qm_dqput(dq);
1392  	} while (error == 0 && id != 0);
1393  
1394  	return error;
1395  }
1396