Searched refs:folio_end (Results 1 – 10 of 10) sorted by relevance
| /fs/bcachefs/ |
| A D | fs-io-pagecache.c | 212 u64 folio_end = folio_end_sector(folio); in bch2_folio_set() 215 unsigned folio_len = min(k.k->p.offset, folio_end) - in bch2_folio_set() 219 BUG_ON(bkey_start_offset(k.k) > folio_end); in bch2_folio_set() 224 if (k.k->p.offset < folio_end) in bch2_folio_set() 268 u64 folio_end = folio_end_sector(folio); in bch2_mark_pagecache_unallocated() local 270 unsigned folio_len = min(end, folio_end) - folio_offset - folio_start; in bch2_mark_pagecache_unallocated() 322 u64 folio_end = folio_end_sector(folio); in bch2_mark_pagecache_reserved() local 326 *start = min(end, folio_end); in bch2_mark_pagecache_reserved() 331 unsigned folio_len = min(end, folio_end) - folio_offset - folio_start; in bch2_mark_pagecache_reserved()
|
| /fs/btrfs/ |
| A D | compression.h | 83 ASSERT(cur < folio_end(folio)); in btrfs_calc_input_length() 84 return min(range_end, folio_end(folio)) - cur; in btrfs_calc_input_length()
|
| A D | misc.h | 163 static inline u64 folio_end(struct folio *folio) in folio_end() function
|
| A D | defrag.c | 889 lock_end = folio_end(folio) - 1; in defrag_prepare_one_folio() 1181 if (start >= folio_end(folio) || start + len <= folio_pos(folio)) in defrag_one_locked_target() 1222 cur = folio_end(folios[i]); in defrag_one_range()
|
| A D | extent_io.c | 270 range_len = min_t(u64, folio_end(folio), end + 1) - range_start; in lock_delalloc_folios() 324 ASSERT(!(orig_start >= folio_end(locked_folio) || in find_lock_delalloc_range() 422 start + len <= folio_end(folio)); in end_folio_read() 1118 u64 range_len = min(folio_end(folio), in can_skip_one_ordered_range() 1140 u64 range_len = min(folio_end(folio), in can_skip_one_ordered_range() 2128 u32 range_len = min_t(u64, folio_end(folio), in write_one_eb() 2532 cur_end = min_t(u64, folio_end(folio) - 1, end); in extent_write_locked_range() 3772 u32 range_len = min_t(u64, folio_end(folio), in read_extent_buffer_pages_nowait()
|
| A D | subpage.c | 189 ASSERT(folio_pos(folio) <= start && start + len <= folio_end(folio), in btrfs_subpage_assert() 220 *len = min_t(u64, folio_end(folio), orig_start + orig_len) - *start; in btrfs_subpage_clamp_range()
|
| A D | relocation.c | 2806 u64 folio_end; in relocate_one_folio() local 2863 folio_end = folio_start + folio_size(folio) - 1; in relocate_one_folio() 2870 while (cur <= folio_end) { in relocate_one_folio() 2876 u64 clamped_end = min(folio_end, extent_end); in relocate_one_folio() 2942 *file_offset_ret = folio_end + 1; in relocate_one_folio()
|
| A D | file.c | 92 ASSERT(folio_pos(folio) <= pos && folio_end(folio) >= pos + write_bytes); in btrfs_dirty_folio() 803 u64 clamp_end = min_t(u64, pos + len, folio_end(folio)); in prepare_uptodate_folio() 1258 if (reserved_start + reserved_len > folio_end(folio)) { in copy_one_range() 1259 const u64 last_block = folio_end(folio); in copy_one_range()
|
| A D | ordered-data.c | 362 ASSERT(file_offset + len <= folio_end(folio)); in can_finish_ordered_extent()
|
| A D | inode.c | 409 index = folio_end(folio) >> PAGE_SHIFT; in btrfs_cleanup_ordered_extents() 2336 ASSERT(!(end <= folio_pos(locked_folio) || start >= folio_end(locked_folio))); in btrfs_run_delalloc_range() 2744 u64 page_end = folio_end(folio) - 1; in btrfs_writepage_fixup_worker() 4833 zero_end = folio_end(folio); in truncate_block_zero_beyond_eof() 5016 zero_end = min_t(u64, folio_end(folio) - 1, end); in btrfs_truncate_block()
|
Completed in 60 milliseconds