| /include/linux/ |
| A D | gfp.h | 282 warn_if_node_offline(nid, gfp_mask); in __alloc_pages_node_noprof() 284 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof() 311 return __alloc_pages_node_noprof(nid, gfp_mask, order); in alloc_pages_node_noprof() 346 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) argument 375 #define __get_free_page(gfp_mask) \ argument 376 __get_free_pages((gfp_mask), 0) 378 #define __get_dma_pages(gfp_mask, order) \ argument 379 __get_free_pages((gfp_mask) | GFP_DMA, (order)) 407 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); 418 static inline bool gfp_compaction_allowed(gfp_t gfp_mask) in gfp_compaction_allowed() argument [all …]
|
| A D | page_frag_cache.h | 43 gfp_t gfp_mask, unsigned int align_mask); 46 unsigned int fragsz, gfp_t gfp_mask, in page_frag_alloc_align() argument 50 return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align); in page_frag_alloc_align() 54 unsigned int fragsz, gfp_t gfp_mask) in page_frag_alloc() argument 56 return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u); in page_frag_alloc()
|
| A D | mempool.h | 15 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 43 gfp_t gfp_mask, int node_id); 55 gfp_t gfp_mask, int nid); 66 extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc; 78 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 90 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 100 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data); 117 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
| A D | cpuset.h | 85 extern bool cpuset_current_node_allowed(int node, gfp_t gfp_mask); 87 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 89 return cpuset_current_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed() 92 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 95 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed() 227 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 232 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
|
| A D | page_owner.h | 13 unsigned short order, gfp_t gfp_mask); 29 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument 32 __set_page_owner(page, order, gfp_mask); in set_page_owner() 61 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument
|
| A D | blk-crypto.h | 137 gfp_t gfp_mask); 174 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); 187 gfp_t gfp_mask) in bio_crypt_clone() argument 190 return __bio_crypt_clone(dst, src, gfp_mask); in bio_crypt_clone()
|
| A D | vmalloc.h | 167 extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); 171 unsigned long start, unsigned long end, gfp_t gfp_mask, 176 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, 180 void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1); 183 static inline void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) in vmalloc_huge() argument 185 return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE); in vmalloc_huge()
|
| A D | connector.h | 102 u32 group, gfp_t gfp_mask, 127 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
|
| A D | fault-inject.h | 117 bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); 119 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
|
| A D | textsearch.h | 163 gfp_t gfp_mask) in alloc_ts_config() argument 167 conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask); in alloc_ts_config()
|
| A D | mISDNif.h | 537 mI_alloc_skb(unsigned int len, gfp_t gfp_mask) in mI_alloc_skb() argument 541 skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); in mI_alloc_skb() 548 _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) in _alloc_mISDN_skb() argument 550 struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); in _alloc_mISDN_skb() 565 u_int id, u_int len, void *dp, gfp_t gfp_mask) in _queue_data() argument 571 skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); in _queue_data()
|
| A D | swap.h | 412 gfp_t gfp_mask, nodemask_t *mask); 424 gfp_t gfp_mask, 428 gfp_t gfp_mask, bool noswap, 493 int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask); 557 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) in add_swap_count_continuation() argument 599 static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask) in folio_alloc_swap() argument
|
| A D | bio-integrity.h | 82 int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask); 111 gfp_t gfp_mask) in bio_integrity_clone() argument
|
| A D | scatterlist.h | 450 gfp_t gfp_mask); 466 unsigned int left_pages, gfp_t gfp_mask); 470 unsigned int max_segment, gfp_t gfp_mask); 496 unsigned long size, gfp_t gfp_mask) in sg_alloc_table_from_pages() argument 499 size, UINT_MAX, gfp_mask); in sg_alloc_table_from_pages()
|
| A D | umh.h | 38 gfp_t gfp_mask,
|
| A D | oom.h | 39 const gfp_t gfp_mask; member
|
| A D | shrinker.h | 35 gfp_t gfp_mask; member
|
| A D | compaction.h | 92 extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
|
| A D | hugetlb.h | 702 nodemask_t *nmask, gfp_t gfp_mask, 705 nodemask_t *nmask, gfp_t gfp_mask); 934 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) in htlb_modify_alloc_mask() argument 939 modified_mask |= (gfp_mask & __GFP_THISNODE); in htlb_modify_alloc_mask() 941 modified_mask |= (gfp_mask & __GFP_NOWARN); in htlb_modify_alloc_mask() 1114 nodemask_t *nmask, gfp_t gfp_mask) 1121 nodemask_t *nmask, gfp_t gfp_mask, 1233 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
|
| A D | radix-tree.h | 239 int radix_tree_preload(gfp_t gfp_mask); 240 int radix_tree_maybe_preload(gfp_t gfp_mask);
|
| A D | bio.h | 357 blk_opf_t opf, gfp_t gfp_mask, 359 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask); 370 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) in bio_alloc() argument 372 return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); in bio_alloc() 715 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask);
|
| A D | pagemap.h | 351 return mapping->gfp_mask; in mapping_gfp_mask() 356 gfp_t gfp_mask) in mapping_gfp_constraint() argument 358 return mapping_gfp_mask(mapping) & gfp_mask; in mapping_gfp_constraint() 367 m->gfp_mask = mask; in mapping_set_gfp_mask() 896 pgoff_t index, gfp_t gfp_mask) in find_or_create_page() argument 900 gfp_mask); in find_or_create_page() 980 pgoff_t index, gfp_t gfp_mask);
|
| /include/linux/sched/ |
| A D | mm.h | 273 extern void fs_reclaim_acquire(gfp_t gfp_mask); 274 extern void fs_reclaim_release(gfp_t gfp_mask); 278 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire() argument 279 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release() argument 316 static inline void might_alloc(gfp_t gfp_mask) in might_alloc() argument 318 fs_reclaim_acquire(gfp_mask); in might_alloc() 319 fs_reclaim_release(gfp_mask); in might_alloc() 321 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); in might_alloc()
|
| /include/trace/events/ |
| A D | compaction.h | 169 gfp_t gfp_mask, 172 TP_ARGS(order, gfp_mask, prio), 176 __field(unsigned long, gfp_mask) 182 __entry->gfp_mask = (__force unsigned long)gfp_mask; 188 show_gfp_flags(__entry->gfp_mask),
|
| /include/linux/sunrpc/ |
| A D | gss_api.h | 53 gfp_t gfp_mask); 115 gfp_t gfp_mask);
|