Searched refs:__GFP_FS (Results 1 – 25 of 41) sorted by relevance
12
211 #define __GFP_FS ((__force gfp_t)___GFP_FS) macro329 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)334 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
29 避免这种死锁问题的传统方法是在调用分配器时,在gfp掩码中清除__GFP_FS和__GFP_IO41 关键部分。从该作用域的任何分配都将从给定的掩码中删除__GFP_FS和__GFP_IO,所以
38 lflags &= ~__GFP_FS; in kmem_flags_convert()
513 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) in xfs_qm_shrink_scan()
1289 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); in xfs_setup_inode()
213 flags &= ~(__GFP_IO | __GFP_FS); in current_gfp_context()215 flags &= ~__GFP_FS; in current_gfp_context()
19 The traditional way to avoid this deadlock problem is to clear __GFP_FS35 scope will inherently drop __GFP_FS respectively __GFP_IO from the given
54 if (current_is_kswapd() || !(gfp & __GFP_FS)) in nfs_fscache_release_folio()
35 gfpflag_string(__GFP_FS), \
260 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure()
24 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\30 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
3037 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) in __vmalloc_area_node()3039 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) in __vmalloc_area_node()3049 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) in __vmalloc_area_node()3051 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) in __vmalloc_area_node()
1138 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory()
318 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); in pm_restrict_gfp_mask()323 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in pm_suspended_storage()4729 if (gfp_mask & __GFP_FS) in fs_reclaim_acquire()4746 if (gfp_mask & __GFP_FS) in fs_reclaim_release()
129 if (current_is_kswapd() || !(gfp & __GFP_FS)) in v9fs_release_folio()
498 return mapping_gfp_constraint(mapping, ~__GFP_FS); in btrfs_alloc_write_mask()
748 page = __page_cache_alloc(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in btrfs_read_merkle_tree_page()
431 ~__GFP_FS)); in add_ra_bio_pages()
340 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in nilfs_new_inode()527 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in __nilfs_read_inode()
177 if (current_is_kswapd() || !(gfp & __GFP_FS)) in ceph_release_folio()1470 mapping_gfp_constraint(mapping, ~__GFP_FS)); in ceph_filemap_fault()1619 ~__GFP_FS)); in ceph_fill_inline_data()
497 if (current_is_kswapd() || !(gfp & __GFP_FS)) in afs_release_folio()
635 const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS; in alloc_private_pages()
612 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_change_fd()1077 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_configure()
199 ~__GFP_FS)); in ntfs_zero_range()
726 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS; in ubifs_do_bulk_read()
Completed in 63 milliseconds