Lines Matching refs:entry
68 static unsigned long dax_to_pfn(void *entry) in dax_to_pfn() argument
70 return xa_to_value(entry) >> DAX_SHIFT; in dax_to_pfn()
73 static struct folio *dax_to_folio(void *entry) in dax_to_folio() argument
75 return page_folio(pfn_to_page(dax_to_pfn(entry))); in dax_to_folio()
83 static bool dax_is_locked(void *entry) in dax_is_locked() argument
85 return xa_to_value(entry) & DAX_LOCKED; in dax_is_locked()
88 static unsigned int dax_entry_order(void *entry) in dax_entry_order() argument
90 if (xa_to_value(entry) & DAX_PMD) in dax_entry_order()
95 static unsigned long dax_is_pmd_entry(void *entry) in dax_is_pmd_entry() argument
97 return xa_to_value(entry) & DAX_PMD; in dax_is_pmd_entry()
100 static bool dax_is_pte_entry(void *entry) in dax_is_pte_entry() argument
102 return !(xa_to_value(entry) & DAX_PMD); in dax_is_pte_entry()
105 static int dax_is_zero_entry(void *entry) in dax_is_zero_entry() argument
107 return xa_to_value(entry) & DAX_ZERO_PAGE; in dax_is_zero_entry()
110 static int dax_is_empty_entry(void *entry) in dax_is_empty_entry() argument
112 return xa_to_value(entry) & DAX_EMPTY; in dax_is_empty_entry()
119 static bool dax_is_conflict(void *entry) in dax_is_conflict() argument
121 return entry == XA_RETRY_ENTRY; in dax_is_conflict()
148 void *entry, struct exceptional_entry_key *key) in dax_entry_waitqueue() argument
158 if (dax_is_pmd_entry(entry)) in dax_entry_waitqueue()
185 static void dax_wake_entry(struct xa_state *xas, void *entry, in dax_wake_entry() argument
191 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
215 void *entry; in get_next_unlocked_entry() local
223 entry = xas_find_conflict(xas); in get_next_unlocked_entry()
224 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in get_next_unlocked_entry()
225 return entry; in get_next_unlocked_entry()
226 if (dax_entry_order(entry) < order) in get_next_unlocked_entry()
228 if (!dax_is_locked(entry)) in get_next_unlocked_entry()
229 return entry; in get_next_unlocked_entry()
231 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_next_unlocked_entry()
247 static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry) in wait_entry_unlocked_exclusive() argument
255 while (unlikely(dax_is_locked(entry))) { in wait_entry_unlocked_exclusive()
256 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked_exclusive()
264 entry = xas_load(xas); in wait_entry_unlocked_exclusive()
267 if (xa_is_internal(entry)) in wait_entry_unlocked_exclusive()
270 return entry; in wait_entry_unlocked_exclusive()
278 static void wait_entry_unlocked(struct xa_state *xas, void *entry) in wait_entry_unlocked() argument
286 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked()
299 static void put_unlocked_entry(struct xa_state *xas, void *entry, in put_unlocked_entry() argument
302 if (entry && !dax_is_conflict(entry)) in put_unlocked_entry()
303 dax_wake_entry(xas, entry, mode); in put_unlocked_entry()
311 static void dax_unlock_entry(struct xa_state *xas, void *entry) in dax_unlock_entry() argument
315 BUG_ON(dax_is_locked(entry)); in dax_unlock_entry()
318 old = xas_store(xas, entry); in dax_unlock_entry()
321 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_unlock_entry()
327 static void *dax_lock_entry(struct xa_state *xas, void *entry) in dax_lock_entry() argument
329 unsigned long v = xa_to_value(entry); in dax_lock_entry()
333 static unsigned long dax_entry_size(void *entry) in dax_entry_size() argument
335 if (dax_is_zero_entry(entry)) in dax_entry_size()
337 else if (dax_is_empty_entry(entry)) in dax_entry_size()
339 else if (dax_is_pmd_entry(entry)) in dax_entry_size()
421 static void dax_folio_init(void *entry) in dax_folio_init() argument
423 struct folio *folio = dax_to_folio(entry); in dax_folio_init()
424 int order = dax_entry_order(entry); in dax_folio_init()
441 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
445 unsigned long size = dax_entry_size(entry), index; in dax_associate_entry()
446 struct folio *folio = dax_to_folio(entry); in dax_associate_entry()
448 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) in dax_associate_entry()
457 WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); in dax_associate_entry()
461 dax_folio_init(entry); in dax_associate_entry()
462 folio = dax_to_folio(entry); in dax_associate_entry()
468 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
471 struct folio *folio = dax_to_folio(entry); in dax_disassociate_entry()
473 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) in dax_disassociate_entry()
479 static struct page *dax_busy_page(void *entry) in dax_busy_page() argument
481 struct folio *folio = dax_to_folio(entry); in dax_busy_page()
483 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) in dax_busy_page()
503 void *entry; in dax_lock_folio() local
510 entry = NULL; in dax_lock_folio()
521 entry = (void *)~0UL; in dax_lock_folio()
532 entry = xas_load(&xas); in dax_lock_folio()
533 if (dax_is_locked(entry)) { in dax_lock_folio()
535 wait_entry_unlocked(&xas, entry); in dax_lock_folio()
539 dax_lock_entry(&xas, entry); in dax_lock_folio()
544 return (dax_entry_t)entry; in dax_lock_folio()
571 void *entry; in dax_lock_mapping_entry() local
575 entry = NULL; in dax_lock_mapping_entry()
582 entry = xas_load(&xas); in dax_lock_mapping_entry()
583 if (dax_is_locked(entry)) { in dax_lock_mapping_entry()
585 wait_entry_unlocked(&xas, entry); in dax_lock_mapping_entry()
589 if (!entry || in dax_lock_mapping_entry()
590 dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { in dax_lock_mapping_entry()
598 entry = (void *)~0UL; in dax_lock_mapping_entry()
600 *page = pfn_to_page(dax_to_pfn(entry)); in dax_lock_mapping_entry()
601 dax_lock_entry(&xas, entry); in dax_lock_mapping_entry()
607 return (dax_entry_t)entry; in dax_lock_mapping_entry()
655 void *entry; in grab_mapping_entry() local
660 entry = get_next_unlocked_entry(xas, order); in grab_mapping_entry()
662 if (entry) { in grab_mapping_entry()
663 if (dax_is_conflict(entry)) in grab_mapping_entry()
665 if (!xa_is_value(entry)) { in grab_mapping_entry()
671 if (dax_is_pmd_entry(entry) && in grab_mapping_entry()
672 (dax_is_zero_entry(entry) || in grab_mapping_entry()
673 dax_is_empty_entry(entry))) { in grab_mapping_entry()
684 dax_lock_entry(xas, entry); in grab_mapping_entry()
691 if (dax_is_zero_entry(entry)) { in grab_mapping_entry()
700 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
702 dax_wake_entry(xas, entry, WAKE_ALL); in grab_mapping_entry()
704 entry = NULL; in grab_mapping_entry()
708 if (entry) { in grab_mapping_entry()
709 dax_lock_entry(xas, entry); in grab_mapping_entry()
715 entry = dax_make_entry(0, flags); in grab_mapping_entry()
716 dax_lock_entry(xas, entry); in grab_mapping_entry()
730 return entry; in grab_mapping_entry()
757 void *entry; in dax_layout_busy_page_range() local
787 xas_for_each(&xas, entry, end_idx) { in dax_layout_busy_page_range()
788 if (WARN_ON_ONCE(!xa_is_value(entry))) in dax_layout_busy_page_range()
790 entry = wait_entry_unlocked_exclusive(&xas, entry); in dax_layout_busy_page_range()
791 if (entry) in dax_layout_busy_page_range()
792 page = dax_busy_page(entry); in dax_layout_busy_page_range()
793 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_layout_busy_page_range()
820 void *entry; in __dax_invalidate_entry() local
823 entry = get_next_unlocked_entry(&xas, 0); in __dax_invalidate_entry()
824 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in __dax_invalidate_entry()
830 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_entry()
832 mapping->nrpages -= 1UL << dax_entry_order(entry); in __dax_invalidate_entry()
835 put_unlocked_entry(&xas, entry, WAKE_ALL); in __dax_invalidate_entry()
845 void *entry; in __dax_clear_dirty_range() local
848 xas_for_each(&xas, entry, end) { in __dax_clear_dirty_range()
849 entry = wait_entry_unlocked_exclusive(&xas, entry); in __dax_clear_dirty_range()
850 if (!entry) in __dax_clear_dirty_range()
854 put_unlocked_entry(&xas, entry, WAKE_NEXT); in __dax_clear_dirty_range()
891 void *entry; in dax_delete_mapping_range() local
903 xas_for_each(&xas, entry, end_idx) { in dax_delete_mapping_range()
904 if (!xa_is_value(entry)) in dax_delete_mapping_range()
906 entry = wait_entry_unlocked_exclusive(&xas, entry); in dax_delete_mapping_range()
907 if (!entry) in dax_delete_mapping_range()
909 dax_disassociate_entry(entry, mapping, true); in dax_delete_mapping_range()
911 mapping->nrpages -= 1UL << dax_entry_order(entry); in dax_delete_mapping_range()
912 put_unlocked_entry(&xas, entry, WAKE_ALL); in dax_delete_mapping_range()
1043 const struct iomap_iter *iter, void *entry, unsigned long pfn, in dax_insert_entry() argument
1055 if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { in dax_insert_entry()
1058 if (dax_is_pmd_entry(entry)) in dax_insert_entry()
1067 if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { in dax_insert_entry()
1070 dax_disassociate_entry(entry, mapping, false); in dax_insert_entry()
1083 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | in dax_insert_entry()
1085 entry = new_entry; in dax_insert_entry()
1097 return entry; in dax_insert_entry()
1101 struct address_space *mapping, void *entry) in dax_writeback_one() argument
1111 if (WARN_ON(!xa_is_value(entry))) in dax_writeback_one()
1114 if (unlikely(dax_is_locked(entry))) { in dax_writeback_one()
1115 void *old_entry = entry; in dax_writeback_one()
1117 entry = get_next_unlocked_entry(xas, 0); in dax_writeback_one()
1120 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in dax_writeback_one()
1127 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) in dax_writeback_one()
1129 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || in dax_writeback_one()
1130 dax_is_zero_entry(entry))) { in dax_writeback_one()
1141 dax_lock_entry(xas, entry); in dax_writeback_one()
1160 pfn = dax_to_pfn(entry); in dax_writeback_one()
1161 count = 1UL << dax_entry_order(entry); in dax_writeback_one()
1182 xas_store(xas, entry); in dax_writeback_one()
1184 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
1190 put_unlocked_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
1205 void *entry; in dax_writeback_mapping_range() local
1220 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { in dax_writeback_mapping_range()
1221 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
1359 const struct iomap_iter *iter, void **entry) in dax_load_hole() argument
1366 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); in dax_load_hole()
1375 const struct iomap_iter *iter, void **entry) in dax_pmd_load_hole() argument
1393 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, in dax_pmd_load_hole()
1415 trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry); in dax_pmd_load_hole()
1421 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry); in dax_pmd_load_hole()
1426 const struct iomap_iter *iter, void **entry) in dax_pmd_load_hole() argument
1834 struct xa_state *xas, void **entry, bool pmd) in dax_fault_iter() argument
1854 return dax_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1855 return dax_pmd_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1867 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); in dax_fault_iter()
1875 folio = dax_to_folio(*entry); in dax_fault_iter()
1901 void *entry; in dax_iomap_pte_fault() local
1918 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1919 if (xa_is_internal(entry)) { in dax_iomap_pte_fault()
1920 ret = xa_to_internal(entry); in dax_iomap_pte_fault()
1941 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); in dax_iomap_pte_fault()
1961 dax_unlock_entry(&xas, entry); in dax_iomap_pte_fault()
2013 void *entry; in dax_iomap_pmd_fault() local
2041 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
2042 if (xa_is_internal(entry)) { in dax_iomap_pmd_fault()
2043 ret = xa_to_internal(entry); in dax_iomap_pmd_fault()
2063 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); in dax_iomap_pmd_fault()
2071 dax_unlock_entry(&xas, entry); in dax_iomap_pmd_fault()
2130 void *entry; in dax_insert_pfn_mkwrite() local
2134 entry = get_next_unlocked_entry(&xas, order); in dax_insert_pfn_mkwrite()
2136 if (!entry || dax_is_conflict(entry) || in dax_insert_pfn_mkwrite()
2137 (order == 0 && !dax_is_pte_entry(entry))) { in dax_insert_pfn_mkwrite()
2138 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_insert_pfn_mkwrite()
2145 dax_lock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
2158 dax_unlock_entry(&xas, entry); in dax_insert_pfn_mkwrite()