Home
last modified time | relevance | path

Searched refs:gfp (Results 1 – 14 of 14) sorted by relevance

/kernel/dma/
A Dpool.c48 if (gfp & __GFP_DMA) in dma_atomic_pool_size_add()
50 else if (gfp & __GFP_DMA32) in dma_atomic_pool_size_add()
56 static bool cma_in_zone(gfp_t gfp) in cma_in_zone() argument
80 gfp_t gfp) in atomic_pool_expand() argument
92 if (cma_in_zone(gfp)) in atomic_pool_expand()
96 page = alloc_pages(gfp, order); in atomic_pool_expand()
125 dma_atomic_pool_size_add(gfp, pool_size); in atomic_pool_expand()
163 gfp_t gfp) in __dma_atomic_pool_init() argument
178 pool_size >> 10, &gfp); in __dma_atomic_pool_init()
183 gen_pool_size(pool) >> 10, &gfp); in __dma_atomic_pool_init()
[all …]
A Ddirect.c120 gfp_t gfp, bool allow_highmem) in __dma_direct_alloc_pages() argument
131 gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit); in __dma_direct_alloc_pages()
132 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
149 !(gfp & (GFP_DMA32 | GFP_DMA))) { in __dma_direct_alloc_pages()
150 gfp |= GFP_DMA32; in __dma_direct_alloc_pages()
155 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; in __dma_direct_alloc_pages()
173 dma_addr_t *dma_handle, gfp_t gfp) in dma_direct_alloc_from_pool() argument
191 dma_addr_t *dma_handle, gfp_t gfp) in dma_direct_alloc_no_mapping() argument
217 gfp |= __GFP_NOWARN; in dma_direct_alloc()
226 return arch_dma_alloc(dev, size, dma_handle, gfp, in dma_direct_alloc()
[all …]
A Dmapping.c94 gfp_t gfp, unsigned long attrs) in dmam_alloc_attrs() argument
99 dr = devres_alloc(dmam_release, sizeof(*dr), gfp); in dmam_alloc_attrs()
103 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs()
694 if (WARN_ON_ONCE(gfp & __GFP_COMP)) in __dma_alloc_pages()
714 size, dir, gfp, 0); in dma_alloc_pages()
760 enum dma_data_direction dir, gfp_t gfp) in alloc_single_sgt() argument
765 sgt = kmalloc(sizeof(*sgt), gfp); in alloc_single_sgt()
768 if (sg_alloc_table(sgt, 1, gfp)) in alloc_single_sgt()
790 if (WARN_ON_ONCE(gfp & __GFP_COMP)) in dma_alloc_noncontiguous()
796 sgt = alloc_single_sgt(dev, size, dir, gfp); in dma_alloc_noncontiguous()
[all …]
A Dcontiguous.c331 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned() argument
335 return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN); in cma_alloc_aligned()
353 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument
360 if (!gfpflags_allow_blocking(gfp)) in dma_alloc_contiguous()
363 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous()
368 if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) { in dma_alloc_contiguous()
373 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
380 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
389 return cma_alloc_aligned(dma_contiguous_default_area, size, gfp); in dma_alloc_contiguous()
A Dswiotlb.c581 page = alloc_pages(gfp, order); in alloc_dma_pages()
613 u64 phys_limit, gfp_t gfp) in swiotlb_alloc_tlb() argument
631 gfp &= ~GFP_ZONEMASK; in swiotlb_alloc_tlb()
633 gfp |= __GFP_DMA; in swiotlb_alloc_tlb()
635 gfp |= __GFP_DMA32; in swiotlb_alloc_tlb()
640 !(gfp & (__GFP_DMA32 | __GFP_DMA))) in swiotlb_alloc_tlb()
641 gfp |= __GFP_DMA32; in swiotlb_alloc_tlb()
643 !(gfp & __GFP_DMA)) in swiotlb_alloc_tlb()
644 gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA; in swiotlb_alloc_tlb()
699 pool = kzalloc(pool_size, gfp); in swiotlb_alloc_pool()
[all …]
A Dops_helpers.c63 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) in dma_common_alloc_pages() argument
68 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages()
70 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages()
A Ddebug.c601 static int dma_debug_create_entries(gfp_t gfp) in dma_debug_create_entries() argument
606 entry = (void *)get_zeroed_page(gfp); in dma_debug_create_entries()
/kernel/bpf/
A Dcpumap.c434 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; in __cpu_map_entry_alloc() local
441 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); in __cpu_map_entry_alloc()
447 sizeof(void *), gfp); in __cpu_map_entry_alloc()
457 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc()
462 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc()
A Dlocal_storage.c495 const gfp_t gfp = __GFP_ZERO | GFP_USER; in bpf_cgroup_storage_alloc() local
508 gfp, map->numa_node); in bpf_cgroup_storage_alloc()
513 storage->buf = bpf_map_kmalloc_node(map, size, gfp, in bpf_cgroup_storage_alloc()
519 storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp); in bpf_cgroup_storage_alloc()
A Dmemalloc.c208 gfp_t gfp; in alloc_bulk() local
212 gfp = __GFP_NOWARN | __GFP_ACCOUNT; in alloc_bulk()
213 gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL; in alloc_bulk()
246 obj = __alloc(c, node, gfp); in alloc_bulk()
A Dsyscall.c379 gfp_t gfp = bpf_memcg_flags(__GFP_NOWARN | __GFP_ZERO); in __bpf_map_area_alloc() local
393 area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY, in __bpf_map_area_alloc()
400 gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL, in __bpf_map_area_alloc()
/kernel/power/
A Dswap.c374 gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; in write_page() local
384 src = (void *)__get_free_page(gfp); in write_page()
389 src = (void *)__get_free_page(gfp); in write_page()
/kernel/rcu/
A Dtree.c1993 static bool rcu_gp_fqs_check_wake(int *gfp) in rcu_gp_fqs_check_wake() argument
1998 if (*gfp & RCU_GP_FLAG_OVLD) in rcu_gp_fqs_check_wake()
2002 *gfp = READ_ONCE(rcu_state.gp_flags); in rcu_gp_fqs_check_wake()
2003 if (*gfp & RCU_GP_FLAG_FQS) in rcu_gp_fqs_check_wake()
/kernel/
A Dauditsc.c2919 enum audit_nfcfgop op, gfp_t gfp) in __audit_log_nfcfg() argument
2924 ab = audit_log_start(audit_context(), gfp, AUDIT_NETFILTER_CFG); in __audit_log_nfcfg()

Completed in 49 milliseconds