Lines Matching refs:ptr
107 struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, p->ptr.dev); in bch2_mark_io_failure()
114 f->dev = p->ptr.dev; in bch2_mark_io_failure()
161 struct bch_dev *ca2 = bch2_dev_rcu(c, p2.ptr.dev); in ptr_better()
205 have_dirty_ptrs |= !p.ptr.cached; in bch2_bkey_pick_read_device()
211 if (p.ptr.unwritten) { in bch2_bkey_pick_read_device()
217 if (dev >= 0 && p.ptr.dev != dev) in bch2_bkey_pick_read_device()
220 struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev); in bch2_bkey_pick_read_device()
222 if (unlikely(!ca && p.ptr.dev != BCH_SB_MEMBER_INVALID)) { in bch2_bkey_pick_read_device()
224 int ret = bch2_dev_missing_bkey(c, k, p.ptr.dev); in bch2_bkey_pick_read_device()
230 if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr))) in bch2_bkey_pick_read_device()
234 unlikely(failed) ? bch2_dev_io_failures(failed, p.ptr.dev) : NULL; in bch2_bkey_pick_read_device()
414 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size != in bch2_extent_merge()
415 rp.ptr.offset + rp.crc.offset || in bch2_extent_merge()
416 lp.ptr.dev != rp.ptr.dev || in bch2_extent_merge()
417 lp.ptr.gen != rp.ptr.gen || in bch2_extent_merge()
418 lp.ptr.unwritten != rp.ptr.unwritten || in bch2_extent_merge()
423 struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev); in bch2_extent_merge()
424 bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr); in bch2_extent_merge()
489 en_l->ptr = en_r->ptr; in bch2_extent_merge()
648 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr); in bch2_bkey_narrow_crcs()
649 p.ptr.offset += p.crc.offset; in bch2_bkey_narrow_crcs()
754 ret += !p.ptr.cached && !crc_is_compressed(p.crc); in bch2_bkey_nr_ptrs_fully_allocated()
768 if (!p.ptr.cached && crc_is_compressed(p.crc)) in bch2_bkey_sectors_compressed()
794 if (p.ptr.cached) in bch2_bkey_replicas()
809 if (p->ptr.cached) in __extent_ptr_durability()
819 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev); in bch2_extent_ptr_desired_durability()
826 struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev); in bch2_extent_ptr_durability()
856 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev]) in bch2_bkey_durability_safe()
892 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr; in bch2_extent_ptr_decoded_append()
893 __extent_entry_insert(k, pos, to_entry(&p->ptr)); in bch2_extent_ptr_decoded_append()
917 void bch2_bkey_drop_ptr_noerror(struct bkey_s k, struct bch_extent_ptr *ptr) in bch2_bkey_drop_ptr_noerror() argument
920 union bch_extent_entry *entry = to_entry(ptr), *next; in bch2_bkey_drop_ptr_noerror()
924 ptr->dev = BCH_SB_MEMBER_INVALID; in bch2_bkey_drop_ptr_noerror()
928 EBUG_ON(ptr < &ptrs.start->ptr || in bch2_bkey_drop_ptr_noerror()
929 ptr >= &ptrs.end->ptr); in bch2_bkey_drop_ptr_noerror()
930 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr); in bch2_bkey_drop_ptr_noerror()
955 void bch2_bkey_drop_ptr(struct bkey_s k, struct bch_extent_ptr *ptr) in bch2_bkey_drop_ptr() argument
963 if (p.ptr.dev == ptr->dev && p.has_ec) { in bch2_bkey_drop_ptr()
964 ptr->dev = BCH_SB_MEMBER_INVALID; in bch2_bkey_drop_ptr()
971 bch2_bkey_drop_ptr_noerror(k, ptr); in bch2_bkey_drop_ptr()
991 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev); in bch2_bkey_drop_device()
996 bch2_bkey_drop_ptrs_noerror(k, ptr, ptr->dev == dev); in bch2_bkey_drop_device_noerror()
1003 bkey_for_each_ptr(ptrs, ptr) in bch2_bkey_has_device_c()
1004 if (ptr->dev == dev) in bch2_bkey_has_device_c()
1005 return ptr; in bch2_bkey_has_device_c()
1016 bkey_for_each_ptr(ptrs, ptr) in bch2_bkey_has_target()
1017 if (bch2_dev_in_target(c, ptr->dev, target) && in bch2_bkey_has_target()
1018 (ca = bch2_dev_rcu(c, ptr->dev)) && in bch2_bkey_has_target()
1019 (!ptr->cached || in bch2_bkey_has_target()
1020 !dev_ptr_stale_rcu(ca, ptr))) in bch2_bkey_has_target()
1034 if (p.ptr.dev == m.dev && in bch2_bkey_matches_ptr()
1035 p.ptr.gen == m.gen && in bch2_bkey_matches_ptr()
1036 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) == in bch2_bkey_matches_ptr()
1062 if (p1.ptr.dev == p2.ptr.dev && in bch2_extents_match()
1063 p1.ptr.gen == p2.ptr.gen && in bch2_extents_match()
1071 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == in bch2_extents_match()
1072 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k) && in bch2_extents_match()
1083 ((p1.ptr.offset >= p2.ptr.offset && in bch2_extents_match()
1084 p1.ptr.offset < p2.ptr.offset + p2.crc.compressed_size) || in bch2_extents_match()
1085 (p2.ptr.offset >= p1.ptr.offset && in bch2_extents_match()
1086 p2.ptr.offset < p1.ptr.offset + p1.crc.compressed_size))) in bch2_extents_match()
1104 if (p1.ptr.dev == p2.ptr.dev && in bch2_extent_has_ptr()
1105 p1.ptr.gen == p2.ptr.gen && in bch2_extent_has_ptr()
1106 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == in bch2_extent_has_ptr()
1107 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) in bch2_extent_has_ptr()
1108 return &entry2->ptr; in bch2_extent_has_ptr()
1114 struct bch_extent_ptr *ptr) in want_cached_ptr() argument
1118 if (target && !bch2_dev_in_target(c, ptr->dev, target)) in want_cached_ptr()
1121 struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev); in want_cached_ptr()
1123 return ca && bch2_dev_is_healthy(ca) && !dev_ptr_stale_rcu(ca, ptr); in want_cached_ptr()
1129 struct bch_extent_ptr *ptr) in bch2_extent_ptr_set_cached() argument
1135 unsigned drop_dev = ptr->dev; in bch2_extent_ptr_set_cached()
1147 if (&entry->ptr == ptr && p.has_ec) in bch2_extent_ptr_set_cached()
1150 if (p.ptr.cached) { in bch2_extent_ptr_set_cached()
1151 if (have_cached_ptr || !want_cached_ptr(c, opts, &p.ptr)) { in bch2_extent_ptr_set_cached()
1152 bch2_bkey_drop_ptr_noerror(k, &entry->ptr); in bch2_extent_ptr_set_cached()
1153 ptr = NULL; in bch2_extent_ptr_set_cached()
1161 if (!ptr) in bch2_extent_ptr_set_cached()
1164 ptr = ptr2; in bch2_extent_ptr_set_cached()
1166 if (have_cached_ptr || !want_cached_ptr(c, opts, ptr)) in bch2_extent_ptr_set_cached()
1169 ptr->cached = true; in bch2_extent_ptr_set_cached()
1172 bch2_bkey_drop_ptr_noerror(k, ptr); in bch2_extent_ptr_set_cached()
1188 bch2_bkey_drop_ptrs(k, ptr, in bch2_extent_normalize()
1189 ptr->cached && in bch2_extent_normalize()
1190 (!(ca = bch2_dev_rcu(c, ptr->dev)) || in bch2_extent_normalize()
1191 dev_ptr_stale_rcu(ca, ptr) > 0)); in bch2_extent_normalize()
1214 bkey_for_each_ptr(ptrs, ptr) in bch2_extent_normalize_by_opts()
1215 if (ptr->cached) { in bch2_extent_normalize_by_opts()
1216 if (have_cached_ptr || !want_cached_ptr(c, opts, ptr)) { in bch2_extent_normalize_by_opts()
1217 bch2_bkey_drop_ptr(k, ptr); in bch2_extent_normalize_by_opts()
1226 …d bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr) in bch2_extent_ptr_to_text() argument
1230 struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev); in bch2_extent_ptr_to_text()
1232 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev, in bch2_extent_ptr_to_text()
1233 (u64) ptr->offset, ptr->gen, in bch2_extent_ptr_to_text()
1234 ptr->cached ? " cached" : ""); in bch2_extent_ptr_to_text()
1237 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset); in bch2_extent_ptr_to_text()
1240 ptr->dev, b, offset, ptr->gen); in bch2_extent_ptr_to_text()
1243 if (ptr->cached) in bch2_extent_ptr_to_text()
1245 if (ptr->unwritten) in bch2_extent_ptr_to_text()
1247 int stale = dev_ptr_stale_rcu(ca, ptr); in bch2_extent_ptr_to_text()
1374 const struct bch_extent_ptr *ptr, in extent_ptr_validate() argument
1382 bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, in extent_ptr_validate()
1384 "multiple pointers to same device (%u)", ptr->dev); in extent_ptr_validate()
1388 struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev); in extent_ptr_validate()
1394 u64 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset); in extent_ptr_validate()
1442 ret = extent_ptr_validate(c, k, from, &entry->ptr, size_ondisk, false); in bch2_bkey_ptrs_validate()
1446 bkey_fsck_err_on(entry->ptr.cached && have_ec, in bch2_bkey_ptrs_validate()
1450 if (!entry->ptr.unwritten) in bch2_bkey_ptrs_validate()
1649 entry->ptr.offset += sub; in bch2_cut_front_s()