Home
last modified time | relevance | path

Searched refs:gfp (Results 1 – 25 of 47) sorted by relevance

12

/fs/notify/fanotify/
A Dfanotify.c414 gfp_t gfp) in fanotify_encode_fh() argument
555 gfp_t gfp) in fanotify_alloc_path_event() argument
587 gfp_t gfp) in fanotify_alloc_perm_event() argument
616 gfp_t gfp) in fanotify_alloc_fid_event() argument
628 hash, gfp); in fanotify_alloc_fid_event()
639 gfp_t gfp) in fanotify_alloc_name_event() argument
661 fne = kmalloc(size, gfp); in fanotify_alloc_name_event()
753 gfp_t gfp = GFP_KERNEL_ACCOUNT; in fanotify_alloc_event() local
838 gfp |= __GFP_NOFAIL; in fanotify_alloc_event()
840 gfp |= __GFP_RETRY_MAYFAIL; in fanotify_alloc_event()
[all …]
/fs/notify/
A Dgroup.c116 int flags, gfp_t gfp) in __fsnotify_alloc_group() argument
120 group = kzalloc(sizeof(struct fsnotify_group), gfp); in __fsnotify_alloc_group()
148 gfp_t gfp = (flags & FSNOTIFY_GROUP_USER) ? GFP_KERNEL_ACCOUNT : in fsnotify_alloc_group() local
151 return __fsnotify_alloc_group(ops, flags, gfp); in fsnotify_alloc_group()
/fs/bcachefs/
A Dfast_list.c22 static int fast_list_alloc_idx(struct fast_list *l, gfp_t gfp) in fast_list_alloc_idx() argument
24 int idx = ida_alloc_range(&l->slots_allocated, 1, INT_MAX, gfp); in fast_list_alloc_idx()
28 if (unlikely(!genradix_ptr_alloc_inlined(&l->items, idx, gfp))) { in fast_list_alloc_idx()
A Ddarray.c8 int __bch2_darray_resize_noprof(darray_char *d, size_t element_size, size_t new_size, gfp_t gfp) in __bch2_darray_resize_noprof() argument
24 ? kvmalloc_noprof(bytes, gfp) in __bch2_darray_resize_noprof()
A Dsix.h168 gfp_t gfp);
175 #define six_lock_init(lock, flags, gfp) \ argument
179 __six_lock_init((lock), #lock, &__key, flags, gfp); \
A Dfs-io-pagecache.c16 fgf_t fgp_flags, gfp_t gfp, in bch2_filemap_get_contig_folios_d() argument
27 ret = darray_make_room_gfp(fs, 1, gfp & GFP_KERNEL); in bch2_filemap_get_contig_folios_d()
31 f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp); in bch2_filemap_get_contig_folios_d()
126 struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp) in __bch2_folio_create() argument
132 folio_sectors(folio), gfp); in __bch2_folio_create()
141 struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp) in bch2_folio_create() argument
143 return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp); in bch2_folio_create()
A Dutil.c292 gfp_t gfp) in bch2_save_backtrace() argument
298 int ret = darray_make_room_gfp(stack, 32, gfp); in bch2_save_backtrace()
308 !(ret = darray_make_room_gfp(stack, stack->size * 2, gfp))); in bch2_save_backtrace()
327 …bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr, gfp_t gfp) in bch2_prt_task_backtrace() argument
330 int ret = bch2_save_backtrace(&stack, task, skipnr + 1, gfp); in bch2_prt_task_backtrace()
A Drcu_pending.c467 gfp_t gfp = GFP_KERNEL; in __rcu_pending_enqueue() local
469 gfp |= __GFP_NOFAIL; in __rcu_pending_enqueue()
471 new_node = genradix_alloc_node(gfp); in __rcu_pending_enqueue()
A Dbtree_key_cache.c132 static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp) in __bkey_cached_alloc() argument
134 gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE; in __bkey_cached_alloc()
136 struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp); in __bkey_cached_alloc()
139 ck->k = kmalloc(key_u64s * sizeof(u64), gfp); in __bkey_cached_alloc()
/fs/btrfs/
A Dlru_cache.c108 gfp_t gfp) in btrfs_lru_cache_store() argument
114 head = kmalloc(sizeof(*head), gfp); in btrfs_lru_cache_store()
118 ret = mtree_insert(&cache->entries, key, head, gfp); in btrfs_lru_cache_store()
A Dlru_cache.h66 gfp_t gfp);
/fs/
A Dbuffer.c922 gfp_t gfp) in folio_alloc_buffers() argument
935 bh = alloc_buffer_head(gfp); in folio_alloc_buffers()
969 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; in alloc_page_buffers() local
1043 pgoff_t index, unsigned size, gfp_t gfp) in grow_dev_folio() argument
1100 unsigned size, gfp_t gfp) in grow_buffers() argument
1121 unsigned size, gfp_t gfp) in __getblk_slow() argument
1448 unsigned size, gfp_t gfp) in bdev_getblk() argument
1452 if (gfpflags_allow_blocking(gfp)) in bdev_getblk()
1457 might_alloc(gfp); in bdev_getblk()
1503 unsigned size, gfp_t gfp) in __bread_gfp() argument
[all …]
A Dmpage.c171 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); in do_mpage_readpage() local
175 gfp |= __GFP_NORETRY | __GFP_NOWARN; in do_mpage_readpage()
288 gfp); in do_mpage_readpage()
A Dposix_acl.c555 __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p) in __posix_acl_create() argument
557 struct posix_acl *clone = posix_acl_clone(*acl, gfp); in __posix_acl_create()
573 __posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode) in __posix_acl_chmod() argument
575 struct posix_acl *clone = posix_acl_clone(*acl, gfp); in __posix_acl_chmod()
/fs/netfs/
A Dmisc.c21 size_t *_cur_size, ssize_t size, gfp_t gfp) in netfs_alloc_folioq_buffer() argument
54 folio = folio_alloc(gfp, order); in netfs_alloc_folioq_buffer()
56 folio = folio_alloc(gfp, 0); in netfs_alloc_folioq_buffer()
293 bool netfs_release_folio(struct folio *folio, gfp_t gfp) in netfs_release_folio() argument
308 if (current_is_kswapd() || !(gfp & __GFP_FS)) in netfs_release_folio()
A Drolling_buffer.c25 struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp, in netfs_folioq_alloc() argument
30 fq = kmalloc(sizeof(*fq), gfp); in netfs_folioq_alloc()
/fs/nfs/
A Dfscache.h102 static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp) in nfs_fscache_release_folio() argument
105 if (current_is_kswapd() || !(gfp & __GFP_FS)) in nfs_fscache_release_folio()
186 static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp) in nfs_fscache_release_folio() argument
/fs/ramfs/
A Dfile-nommu.c69 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); in ramfs_nommu_expand_for_mapping() local
84 pages = alloc_pages(gfp, order); in ramfs_nommu_expand_for_mapping()
108 gfp); in ramfs_nommu_expand_for_mapping()
/fs/erofs/
A Dinternal.h457 struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv);
458 static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) in erofs_allocpage() argument
460 return __erofs_allocpage(pagepool, gfp, false); in erofs_allocpage()
A Dzutil.c177 struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv) in __erofs_allocpage() argument
190 page = alloc_page(gfp); in __erofs_allocpage()
A Ddecompressor.c98 victim = __erofs_allocpage(pagepool, rq->gfp, true); in z_erofs_lz4_prepare_dstpages()
360 *pgo = erofs_allocpage(pgpl, rq->gfp); in z_erofs_stream_switch_bufs()
413 tmppage = erofs_allocpage(pgpl, rq->gfp); in z_erofs_stream_switch_bufs()
A Dcompress.h20 gfp_t gfp; /* allocation flags for extra temporary buffers */ member
/fs/iomap/
A Dbuffered-io.c176 gfp_t gfp; in ifs_alloc() local
182 gfp = GFP_NOWAIT; in ifs_alloc()
184 gfp = GFP_NOFS | __GFP_NOFAIL; in ifs_alloc()
193 BITS_TO_LONGS(2 * nr_blocks)), gfp); in ifs_alloc()
402 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); in iomap_readpage_iter() local
403 gfp_t orig_gfp = gfp; in iomap_readpage_iter()
410 gfp |= __GFP_NORETRY | __GFP_NOWARN; in iomap_readpage_iter()
412 REQ_OP_READ, gfp); in iomap_readpage_iter()
/fs/afs/
A Drxrpc.c158 gfp_t gfp) in afs_alloc_call() argument
163 call = kzalloc(sizeof(*call), gfp); in afs_alloc_call()
337 void afs_make_call(struct afs_call *call, gfp_t gfp) in afs_make_call() argument
378 gfp, in afs_make_call()
/fs/fuse/
A Dvirtio_fs.c100 gfp_t gfp);
694 static int copy_args_to_argbuf(struct fuse_req *req, gfp_t gfp) in copy_args_to_argbuf() argument
708 req->argbuf = kmalloc(len, gfp); in copy_args_to_argbuf()
1374 gfp_t gfp) in virtio_fs_enqueue_req() argument
1395 sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), gfp); in virtio_fs_enqueue_req()
1396 sg = kmalloc_array(total_sgs, sizeof(sg[0]), gfp); in virtio_fs_enqueue_req()
1404 ret = copy_args_to_argbuf(req, gfp); in virtio_fs_enqueue_req()

Completed in 65 milliseconds

12