1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_bit.h"
12 #include "xfs_shared.h"
13 #include "xfs_mount.h"
14 #include "xfs_defer.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_refcount_item.h"
18 #include "xfs_log.h"
19 #include "xfs_refcount.h"
20 #include "xfs_error.h"
21 #include "xfs_log_priv.h"
22 #include "xfs_log_recover.h"
23
24 struct kmem_cache *xfs_cui_cache;
25 struct kmem_cache *xfs_cud_cache;
26
27 static const struct xfs_item_ops xfs_cui_item_ops;
28
CUI_ITEM(struct xfs_log_item * lip)29 static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
30 {
31 return container_of(lip, struct xfs_cui_log_item, cui_item);
32 }
33
34 STATIC void
xfs_cui_item_free(struct xfs_cui_log_item * cuip)35 xfs_cui_item_free(
36 struct xfs_cui_log_item *cuip)
37 {
38 kmem_free(cuip->cui_item.li_lv_shadow);
39 if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
40 kmem_free(cuip);
41 else
42 kmem_cache_free(xfs_cui_cache, cuip);
43 }
44
45 /*
46 * Freeing the CUI requires that we remove it from the AIL if it has already
47 * been placed there. However, the CUI may not yet have been placed in the AIL
48 * when called by xfs_cui_release() from CUD processing due to the ordering of
49 * committed vs unpin operations in bulk insert operations. Hence the reference
50 * count to ensure only the last caller frees the CUI.
51 */
52 STATIC void
xfs_cui_release(struct xfs_cui_log_item * cuip)53 xfs_cui_release(
54 struct xfs_cui_log_item *cuip)
55 {
56 ASSERT(atomic_read(&cuip->cui_refcount) > 0);
57 if (!atomic_dec_and_test(&cuip->cui_refcount))
58 return;
59
60 xfs_trans_ail_delete(&cuip->cui_item, 0);
61 xfs_cui_item_free(cuip);
62 }
63
64
65 STATIC void
xfs_cui_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)66 xfs_cui_item_size(
67 struct xfs_log_item *lip,
68 int *nvecs,
69 int *nbytes)
70 {
71 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
72
73 *nvecs += 1;
74 *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
75 }
76
77 /*
78 * This is called to fill in the vector of log iovecs for the
79 * given cui log item. We use only 1 iovec, and we point that
80 * at the cui_log_format structure embedded in the cui item.
81 * It is at this point that we assert that all of the extent
82 * slots in the cui item have been filled.
83 */
84 STATIC void
xfs_cui_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)85 xfs_cui_item_format(
86 struct xfs_log_item *lip,
87 struct xfs_log_vec *lv)
88 {
89 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
90 struct xfs_log_iovec *vecp = NULL;
91
92 ASSERT(atomic_read(&cuip->cui_next_extent) ==
93 cuip->cui_format.cui_nextents);
94
95 cuip->cui_format.cui_type = XFS_LI_CUI;
96 cuip->cui_format.cui_size = 1;
97
98 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
99 xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
100 }
101
102 /*
103 * The unpin operation is the last place an CUI is manipulated in the log. It is
104 * either inserted in the AIL or aborted in the event of a log I/O error. In
105 * either case, the CUI transaction has been successfully committed to make it
106 * this far. Therefore, we expect whoever committed the CUI to either construct
107 * and commit the CUD or drop the CUD's reference in the event of error. Simply
108 * drop the log's CUI reference now that the log is done with it.
109 */
110 STATIC void
xfs_cui_item_unpin(struct xfs_log_item * lip,int remove)111 xfs_cui_item_unpin(
112 struct xfs_log_item *lip,
113 int remove)
114 {
115 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
116
117 xfs_cui_release(cuip);
118 }
119
120 /*
121 * The CUI has been either committed or aborted if the transaction has been
122 * cancelled. If the transaction was cancelled, an CUD isn't going to be
123 * constructed and thus we free the CUI here directly.
124 */
125 STATIC void
xfs_cui_item_release(struct xfs_log_item * lip)126 xfs_cui_item_release(
127 struct xfs_log_item *lip)
128 {
129 xfs_cui_release(CUI_ITEM(lip));
130 }
131
132 /*
133 * Allocate and initialize an cui item with the given number of extents.
134 */
135 STATIC struct xfs_cui_log_item *
xfs_cui_init(struct xfs_mount * mp,uint nextents)136 xfs_cui_init(
137 struct xfs_mount *mp,
138 uint nextents)
139
140 {
141 struct xfs_cui_log_item *cuip;
142
143 ASSERT(nextents > 0);
144 if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
145 cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
146 0);
147 else
148 cuip = kmem_cache_zalloc(xfs_cui_cache,
149 GFP_KERNEL | __GFP_NOFAIL);
150
151 xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
152 cuip->cui_format.cui_nextents = nextents;
153 cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
154 atomic_set(&cuip->cui_next_extent, 0);
155 atomic_set(&cuip->cui_refcount, 2);
156
157 return cuip;
158 }
159
CUD_ITEM(struct xfs_log_item * lip)160 static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
161 {
162 return container_of(lip, struct xfs_cud_log_item, cud_item);
163 }
164
165 STATIC void
xfs_cud_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)166 xfs_cud_item_size(
167 struct xfs_log_item *lip,
168 int *nvecs,
169 int *nbytes)
170 {
171 *nvecs += 1;
172 *nbytes += sizeof(struct xfs_cud_log_format);
173 }
174
175 /*
176 * This is called to fill in the vector of log iovecs for the
177 * given cud log item. We use only 1 iovec, and we point that
178 * at the cud_log_format structure embedded in the cud item.
179 * It is at this point that we assert that all of the extent
180 * slots in the cud item have been filled.
181 */
182 STATIC void
xfs_cud_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)183 xfs_cud_item_format(
184 struct xfs_log_item *lip,
185 struct xfs_log_vec *lv)
186 {
187 struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
188 struct xfs_log_iovec *vecp = NULL;
189
190 cudp->cud_format.cud_type = XFS_LI_CUD;
191 cudp->cud_format.cud_size = 1;
192
193 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
194 sizeof(struct xfs_cud_log_format));
195 }
196
197 /*
198 * The CUD is either committed or aborted if the transaction is cancelled. If
199 * the transaction is cancelled, drop our reference to the CUI and free the
200 * CUD.
201 */
202 STATIC void
xfs_cud_item_release(struct xfs_log_item * lip)203 xfs_cud_item_release(
204 struct xfs_log_item *lip)
205 {
206 struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
207
208 xfs_cui_release(cudp->cud_cuip);
209 kmem_free(cudp->cud_item.li_lv_shadow);
210 kmem_cache_free(xfs_cud_cache, cudp);
211 }
212
213 static struct xfs_log_item *
xfs_cud_item_intent(struct xfs_log_item * lip)214 xfs_cud_item_intent(
215 struct xfs_log_item *lip)
216 {
217 return &CUD_ITEM(lip)->cud_cuip->cui_item;
218 }
219
220 static const struct xfs_item_ops xfs_cud_item_ops = {
221 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
222 XFS_ITEM_INTENT_DONE,
223 .iop_size = xfs_cud_item_size,
224 .iop_format = xfs_cud_item_format,
225 .iop_release = xfs_cud_item_release,
226 .iop_intent = xfs_cud_item_intent,
227 };
228
229 static struct xfs_cud_log_item *
xfs_trans_get_cud(struct xfs_trans * tp,struct xfs_cui_log_item * cuip)230 xfs_trans_get_cud(
231 struct xfs_trans *tp,
232 struct xfs_cui_log_item *cuip)
233 {
234 struct xfs_cud_log_item *cudp;
235
236 cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
237 xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
238 &xfs_cud_item_ops);
239 cudp->cud_cuip = cuip;
240 cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
241
242 xfs_trans_add_item(tp, &cudp->cud_item);
243 return cudp;
244 }
245
246 /*
247 * Finish an refcount update and log it to the CUD. Note that the
248 * transaction is marked dirty regardless of whether the refcount
249 * update succeeds or fails to support the CUI/CUD lifecycle rules.
250 */
251 static int
xfs_trans_log_finish_refcount_update(struct xfs_trans * tp,struct xfs_cud_log_item * cudp,struct xfs_refcount_intent * ri,struct xfs_btree_cur ** pcur)252 xfs_trans_log_finish_refcount_update(
253 struct xfs_trans *tp,
254 struct xfs_cud_log_item *cudp,
255 struct xfs_refcount_intent *ri,
256 struct xfs_btree_cur **pcur)
257 {
258 int error;
259
260 error = xfs_refcount_finish_one(tp, ri, pcur);
261
262 /*
263 * Mark the transaction dirty, even on error. This ensures the
264 * transaction is aborted, which:
265 *
266 * 1.) releases the CUI and frees the CUD
267 * 2.) shuts down the filesystem
268 */
269 tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
270 set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
271
272 return error;
273 }
274
275 /* Sort refcount intents by AG. */
276 static int
xfs_refcount_update_diff_items(void * priv,const struct list_head * a,const struct list_head * b)277 xfs_refcount_update_diff_items(
278 void *priv,
279 const struct list_head *a,
280 const struct list_head *b)
281 {
282 struct xfs_mount *mp = priv;
283 struct xfs_refcount_intent *ra;
284 struct xfs_refcount_intent *rb;
285
286 ra = container_of(a, struct xfs_refcount_intent, ri_list);
287 rb = container_of(b, struct xfs_refcount_intent, ri_list);
288 return XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
289 XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
290 }
291
292 /* Set the phys extent flags for this reverse mapping. */
293 static void
xfs_trans_set_refcount_flags(struct xfs_phys_extent * pmap,enum xfs_refcount_intent_type type)294 xfs_trans_set_refcount_flags(
295 struct xfs_phys_extent *pmap,
296 enum xfs_refcount_intent_type type)
297 {
298 pmap->pe_flags = 0;
299 switch (type) {
300 case XFS_REFCOUNT_INCREASE:
301 case XFS_REFCOUNT_DECREASE:
302 case XFS_REFCOUNT_ALLOC_COW:
303 case XFS_REFCOUNT_FREE_COW:
304 pmap->pe_flags |= type;
305 break;
306 default:
307 ASSERT(0);
308 }
309 }
310
311 /* Log refcount updates in the intent item. */
312 STATIC void
xfs_refcount_update_log_item(struct xfs_trans * tp,struct xfs_cui_log_item * cuip,struct xfs_refcount_intent * ri)313 xfs_refcount_update_log_item(
314 struct xfs_trans *tp,
315 struct xfs_cui_log_item *cuip,
316 struct xfs_refcount_intent *ri)
317 {
318 uint next_extent;
319 struct xfs_phys_extent *pmap;
320
321 tp->t_flags |= XFS_TRANS_DIRTY;
322 set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
323
324 /*
325 * atomic_inc_return gives us the value after the increment;
326 * we want to use it as an array index so we need to subtract 1 from
327 * it.
328 */
329 next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
330 ASSERT(next_extent < cuip->cui_format.cui_nextents);
331 pmap = &cuip->cui_format.cui_extents[next_extent];
332 pmap->pe_startblock = ri->ri_startblock;
333 pmap->pe_len = ri->ri_blockcount;
334 xfs_trans_set_refcount_flags(pmap, ri->ri_type);
335 }
336
337 static struct xfs_log_item *
xfs_refcount_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)338 xfs_refcount_update_create_intent(
339 struct xfs_trans *tp,
340 struct list_head *items,
341 unsigned int count,
342 bool sort)
343 {
344 struct xfs_mount *mp = tp->t_mountp;
345 struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
346 struct xfs_refcount_intent *ri;
347
348 ASSERT(count > 0);
349
350 xfs_trans_add_item(tp, &cuip->cui_item);
351 if (sort)
352 list_sort(mp, items, xfs_refcount_update_diff_items);
353 list_for_each_entry(ri, items, ri_list)
354 xfs_refcount_update_log_item(tp, cuip, ri);
355 return &cuip->cui_item;
356 }
357
358 /* Get an CUD so we can process all the deferred refcount updates. */
359 static struct xfs_log_item *
xfs_refcount_update_create_done(struct xfs_trans * tp,struct xfs_log_item * intent,unsigned int count)360 xfs_refcount_update_create_done(
361 struct xfs_trans *tp,
362 struct xfs_log_item *intent,
363 unsigned int count)
364 {
365 return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
366 }
367
368 /* Process a deferred refcount update. */
369 STATIC int
xfs_refcount_update_finish_item(struct xfs_trans * tp,struct xfs_log_item * done,struct list_head * item,struct xfs_btree_cur ** state)370 xfs_refcount_update_finish_item(
371 struct xfs_trans *tp,
372 struct xfs_log_item *done,
373 struct list_head *item,
374 struct xfs_btree_cur **state)
375 {
376 struct xfs_refcount_intent *ri;
377 int error;
378
379 ri = container_of(item, struct xfs_refcount_intent, ri_list);
380 error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done), ri,
381 state);
382
383 /* Did we run out of reservation? Requeue what we didn't finish. */
384 if (!error && ri->ri_blockcount > 0) {
385 ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
386 ri->ri_type == XFS_REFCOUNT_DECREASE);
387 return -EAGAIN;
388 }
389 kmem_cache_free(xfs_refcount_intent_cache, ri);
390 return error;
391 }
392
393 /* Abort all pending CUIs. */
394 STATIC void
xfs_refcount_update_abort_intent(struct xfs_log_item * intent)395 xfs_refcount_update_abort_intent(
396 struct xfs_log_item *intent)
397 {
398 xfs_cui_release(CUI_ITEM(intent));
399 }
400
401 /* Cancel a deferred refcount update. */
402 STATIC void
xfs_refcount_update_cancel_item(struct list_head * item)403 xfs_refcount_update_cancel_item(
404 struct list_head *item)
405 {
406 struct xfs_refcount_intent *ri;
407
408 ri = container_of(item, struct xfs_refcount_intent, ri_list);
409 kmem_cache_free(xfs_refcount_intent_cache, ri);
410 }
411
412 const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
413 .max_items = XFS_CUI_MAX_FAST_EXTENTS,
414 .create_intent = xfs_refcount_update_create_intent,
415 .abort_intent = xfs_refcount_update_abort_intent,
416 .create_done = xfs_refcount_update_create_done,
417 .finish_item = xfs_refcount_update_finish_item,
418 .finish_cleanup = xfs_refcount_finish_one_cleanup,
419 .cancel_item = xfs_refcount_update_cancel_item,
420 };
421
422 /* Is this recovered CUI ok? */
423 static inline bool
xfs_cui_validate_phys(struct xfs_mount * mp,struct xfs_phys_extent * pmap)424 xfs_cui_validate_phys(
425 struct xfs_mount *mp,
426 struct xfs_phys_extent *pmap)
427 {
428 if (!xfs_has_reflink(mp))
429 return false;
430
431 if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
432 return false;
433
434 switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
435 case XFS_REFCOUNT_INCREASE:
436 case XFS_REFCOUNT_DECREASE:
437 case XFS_REFCOUNT_ALLOC_COW:
438 case XFS_REFCOUNT_FREE_COW:
439 break;
440 default:
441 return false;
442 }
443
444 return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len);
445 }
446
447 /*
448 * Process a refcount update intent item that was recovered from the log.
449 * We need to update the refcountbt.
450 */
451 STATIC int
xfs_cui_item_recover(struct xfs_log_item * lip,struct list_head * capture_list)452 xfs_cui_item_recover(
453 struct xfs_log_item *lip,
454 struct list_head *capture_list)
455 {
456 struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
457 struct xfs_cud_log_item *cudp;
458 struct xfs_trans *tp;
459 struct xfs_btree_cur *rcur = NULL;
460 struct xfs_mount *mp = lip->li_log->l_mp;
461 unsigned int refc_type;
462 bool requeue_only = false;
463 int i;
464 int error = 0;
465
466 /*
467 * First check the validity of the extents described by the
468 * CUI. If any are bad, then assume that all are bad and
469 * just toss the CUI.
470 */
471 for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
472 if (!xfs_cui_validate_phys(mp,
473 &cuip->cui_format.cui_extents[i])) {
474 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
475 &cuip->cui_format,
476 sizeof(cuip->cui_format));
477 return -EFSCORRUPTED;
478 }
479 }
480
481 /*
482 * Under normal operation, refcount updates are deferred, so we
483 * wouldn't be adding them directly to a transaction. All
484 * refcount updates manage reservation usage internally and
485 * dynamically by deferring work that won't fit in the
486 * transaction. Normally, any work that needs to be deferred
487 * gets attached to the same defer_ops that scheduled the
488 * refcount update. However, we're in log recovery here, so we
489 * use the passed in defer_ops and to finish up any work that
490 * doesn't fit. We need to reserve enough blocks to handle a
491 * full btree split on either end of the refcount range.
492 */
493 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
494 mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
495 if (error)
496 return error;
497
498 cudp = xfs_trans_get_cud(tp, cuip);
499
500 for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
501 struct xfs_refcount_intent fake = { };
502 struct xfs_phys_extent *pmap;
503
504 pmap = &cuip->cui_format.cui_extents[i];
505 refc_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
506 switch (refc_type) {
507 case XFS_REFCOUNT_INCREASE:
508 case XFS_REFCOUNT_DECREASE:
509 case XFS_REFCOUNT_ALLOC_COW:
510 case XFS_REFCOUNT_FREE_COW:
511 fake.ri_type = refc_type;
512 break;
513 default:
514 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
515 &cuip->cui_format,
516 sizeof(cuip->cui_format));
517 error = -EFSCORRUPTED;
518 goto abort_error;
519 }
520
521 fake.ri_startblock = pmap->pe_startblock;
522 fake.ri_blockcount = pmap->pe_len;
523 if (!requeue_only)
524 error = xfs_trans_log_finish_refcount_update(tp, cudp,
525 &fake, &rcur);
526 if (error == -EFSCORRUPTED)
527 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
528 &cuip->cui_format,
529 sizeof(cuip->cui_format));
530 if (error)
531 goto abort_error;
532
533 /* Requeue what we didn't finish. */
534 if (fake.ri_blockcount > 0) {
535 struct xfs_bmbt_irec irec = {
536 .br_startblock = fake.ri_startblock,
537 .br_blockcount = fake.ri_blockcount,
538 };
539
540 switch (fake.ri_type) {
541 case XFS_REFCOUNT_INCREASE:
542 xfs_refcount_increase_extent(tp, &irec);
543 break;
544 case XFS_REFCOUNT_DECREASE:
545 xfs_refcount_decrease_extent(tp, &irec);
546 break;
547 case XFS_REFCOUNT_ALLOC_COW:
548 xfs_refcount_alloc_cow_extent(tp,
549 irec.br_startblock,
550 irec.br_blockcount);
551 break;
552 case XFS_REFCOUNT_FREE_COW:
553 xfs_refcount_free_cow_extent(tp,
554 irec.br_startblock,
555 irec.br_blockcount);
556 break;
557 default:
558 ASSERT(0);
559 }
560 requeue_only = true;
561 }
562 }
563
564 xfs_refcount_finish_one_cleanup(tp, rcur, error);
565 return xfs_defer_ops_capture_and_commit(tp, capture_list);
566
567 abort_error:
568 xfs_refcount_finish_one_cleanup(tp, rcur, error);
569 xfs_trans_cancel(tp);
570 return error;
571 }
572
573 STATIC bool
xfs_cui_item_match(struct xfs_log_item * lip,uint64_t intent_id)574 xfs_cui_item_match(
575 struct xfs_log_item *lip,
576 uint64_t intent_id)
577 {
578 return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
579 }
580
581 /* Relog an intent item to push the log tail forward. */
582 static struct xfs_log_item *
xfs_cui_item_relog(struct xfs_log_item * intent,struct xfs_trans * tp)583 xfs_cui_item_relog(
584 struct xfs_log_item *intent,
585 struct xfs_trans *tp)
586 {
587 struct xfs_cud_log_item *cudp;
588 struct xfs_cui_log_item *cuip;
589 struct xfs_phys_extent *pmap;
590 unsigned int count;
591
592 count = CUI_ITEM(intent)->cui_format.cui_nextents;
593 pmap = CUI_ITEM(intent)->cui_format.cui_extents;
594
595 tp->t_flags |= XFS_TRANS_DIRTY;
596 cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
597 set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
598
599 cuip = xfs_cui_init(tp->t_mountp, count);
600 memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap));
601 atomic_set(&cuip->cui_next_extent, count);
602 xfs_trans_add_item(tp, &cuip->cui_item);
603 set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
604 return &cuip->cui_item;
605 }
606
607 static const struct xfs_item_ops xfs_cui_item_ops = {
608 .flags = XFS_ITEM_INTENT,
609 .iop_size = xfs_cui_item_size,
610 .iop_format = xfs_cui_item_format,
611 .iop_unpin = xfs_cui_item_unpin,
612 .iop_release = xfs_cui_item_release,
613 .iop_recover = xfs_cui_item_recover,
614 .iop_match = xfs_cui_item_match,
615 .iop_relog = xfs_cui_item_relog,
616 };
617
618 static inline void
xfs_cui_copy_format(struct xfs_cui_log_format * dst,const struct xfs_cui_log_format * src)619 xfs_cui_copy_format(
620 struct xfs_cui_log_format *dst,
621 const struct xfs_cui_log_format *src)
622 {
623 unsigned int i;
624
625 memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents));
626
627 for (i = 0; i < src->cui_nextents; i++)
628 memcpy(&dst->cui_extents[i], &src->cui_extents[i],
629 sizeof(struct xfs_phys_extent));
630 }
631
632 /*
633 * This routine is called to create an in-core extent refcount update
634 * item from the cui format structure which was logged on disk.
635 * It allocates an in-core cui, copies the extents from the format
636 * structure into it, and adds the cui to the AIL with the given
637 * LSN.
638 */
639 STATIC int
xlog_recover_cui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)640 xlog_recover_cui_commit_pass2(
641 struct xlog *log,
642 struct list_head *buffer_list,
643 struct xlog_recover_item *item,
644 xfs_lsn_t lsn)
645 {
646 struct xfs_mount *mp = log->l_mp;
647 struct xfs_cui_log_item *cuip;
648 struct xfs_cui_log_format *cui_formatp;
649 size_t len;
650
651 cui_formatp = item->ri_buf[0].i_addr;
652
653 if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
654 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
655 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
656 return -EFSCORRUPTED;
657 }
658
659 len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
660 if (item->ri_buf[0].i_len != len) {
661 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
662 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
663 return -EFSCORRUPTED;
664 }
665
666 cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
667 xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
668 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
669 /*
670 * Insert the intent into the AIL directly and drop one reference so
671 * that finishing or canceling the work will drop the other.
672 */
673 xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
674 xfs_cui_release(cuip);
675 return 0;
676 }
677
678 const struct xlog_recover_item_ops xlog_cui_item_ops = {
679 .item_type = XFS_LI_CUI,
680 .commit_pass2 = xlog_recover_cui_commit_pass2,
681 };
682
683 /*
684 * This routine is called when an CUD format structure is found in a committed
685 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
686 * was still in the log. To do this it searches the AIL for the CUI with an id
687 * equal to that in the CUD format structure. If we find it we drop the CUD
688 * reference, which removes the CUI from the AIL and frees it.
689 */
690 STATIC int
xlog_recover_cud_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)691 xlog_recover_cud_commit_pass2(
692 struct xlog *log,
693 struct list_head *buffer_list,
694 struct xlog_recover_item *item,
695 xfs_lsn_t lsn)
696 {
697 struct xfs_cud_log_format *cud_formatp;
698
699 cud_formatp = item->ri_buf[0].i_addr;
700 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
701 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
702 item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
703 return -EFSCORRUPTED;
704 }
705
706 xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
707 return 0;
708 }
709
710 const struct xlog_recover_item_ops xlog_cud_item_ops = {
711 .item_type = XFS_LI_CUD,
712 .commit_pass2 = xlog_recover_cud_commit_pass2,
713 };
714