Home
last modified time | relevance | path

Searched refs:page (Results 1 – 25 of 54) sorted by relevance

123

/kernel/dma/
A Ddirect.c109 struct page *page = swiotlb_alloc(dev, size); in dma_direct_alloc_swiotlb() local
123 struct page *page = NULL; in __dma_direct_alloc_pages() local
133 if (page) { in __dma_direct_alloc_pages()
141 if (!page) in __dma_direct_alloc_pages()
175 struct page *page; in dma_direct_alloc_from_pool() local
184 if (!page) in dma_direct_alloc_from_pool()
193 struct page *page; in dma_direct_alloc_no_mapping() local
196 if (!page) in dma_direct_alloc_no_mapping()
212 struct page *page; in dma_direct_alloc() local
361 struct page *page; in dma_direct_alloc_pages() local
[all …]
A Dops_helpers.c23 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_get_sgtable() local
43 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_mmap() local
55 page_to_pfn(page) + vma->vm_pgoff, in dma_common_mmap()
66 struct page *page; in dma_common_alloc_pages() local
69 if (!page) in dma_common_alloc_pages()
71 if (!page) in dma_common_alloc_pages()
81 dma_free_contiguous(dev, page, size); in dma_common_alloc_pages()
85 memset(page_address(page), 0, size); in dma_common_alloc_pages()
86 return page; in dma_common_alloc_pages()
89 void dma_common_free_pages(struct device *dev, size_t size, struct page *page, in dma_common_free_pages() argument
[all …]
A Dpool.c83 struct page *page = NULL; in atomic_pool_expand() local
95 if (!page) in atomic_pool_expand()
96 page = alloc_pages(gfp, order); in atomic_pool_expand()
97 } while (!page && order-- > 0); in atomic_pool_expand()
98 if (!page) in atomic_pool_expand()
101 arch_dma_prep_coherent(page, pool_size); in atomic_pool_expand()
110 addr = page_to_virt(page); in atomic_pool_expand()
139 __free_pages(page, order); in atomic_pool_expand()
270 struct page *page; in dma_alloc_from_pool() local
275 if (page) in dma_alloc_from_pool()
[all …]
A Dcontiguous.c370 struct page *page; in dma_alloc_contiguous() local
373 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
374 if (page) in dma_alloc_contiguous()
375 return page; in dma_alloc_contiguous()
380 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
381 if (page) in dma_alloc_contiguous()
382 return page; in dma_alloc_contiguous()
403 void dma_free_contiguous(struct device *dev, struct page *page, size_t size) in dma_free_contiguous() argument
417 page, count)) in dma_free_contiguous()
420 page, count)) in dma_free_contiguous()
[all …]
A Dremap.c9 struct page **dma_common_find_pages(void *cpu_addr) in dma_common_find_pages()
24 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap()
40 void *dma_common_contiguous_remap(struct page *page, size_t size, in dma_common_contiguous_remap() argument
44 struct page **pages; in dma_common_contiguous_remap()
48 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap()
52 pages[i] = nth_page(page, i); in dma_common_contiguous_remap()
A Dmapping.c155 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, in dma_map_page_attrs() argument
174 kmsan_handle_dma(page, offset, size, dir); in dma_map_page_attrs()
710 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); in dma_alloc_pages() local
712 if (page) { in dma_alloc_pages()
719 return page; in dma_alloc_pages()
723 static void __dma_free_pages(struct device *dev, size_t size, struct page *page, in __dma_free_pages() argument
737 void dma_free_pages(struct device *dev, size_t size, struct page *page, in dma_free_pages() argument
747 size_t size, struct page *page) in dma_mmap_pages() argument
754 page_to_pfn(page) + vma->vm_pgoff, in dma_mmap_pages()
763 struct page *page; in alloc_single_sgt() local
[all …]
A Dswiotlb.c577 struct page *page; in alloc_dma_pages() local
582 if (!page) in alloc_dma_pages()
585 paddr = page_to_phys(page); in alloc_dma_pages()
587 __free_pages(page, order); in alloc_dma_pages()
594 return page; in alloc_dma_pages()
615 struct page *page; in swiotlb_alloc_tlb() local
649 return page; in swiotlb_alloc_tlb()
689 struct page *tlb; in swiotlb_alloc_pool()
897 struct page *page; in swiotlb_bounce() local
905 page = pfn_to_page(pfn); in swiotlb_bounce()
[all …]
A Ddirect.h84 struct page *page, unsigned long offset, size_t size, in dma_direct_map_page() argument
87 phys_addr_t phys = page_to_phys(page) + offset; in dma_direct_map_page()
91 if (is_pci_p2pdma_page(page)) in dma_direct_map_page()
98 if (is_pci_p2pdma_page(page)) in dma_direct_map_page()
A Ddebug.h12 extern void debug_dma_map_page(struct device *dev, struct page *page,
58 static inline void debug_dma_map_page(struct device *dev, struct page *page, in debug_dma_map_page() argument
A Ddebug.c1055 struct page *page, size_t offset) in check_for_stack() argument
1062 if (PageHighMem(page)) in check_for_stack()
1064 addr = page_address(page) + offset; in check_for_stack()
1072 if (page != stack_vm_area->pages[i]) in check_for_stack()
1204 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, in debug_dma_map_page() argument
1228 check_for_stack(dev, page, offset); in debug_dma_map_page()
1230 if (!PageHighMem(page)) { in debug_dma_map_page()
1231 void *addr = page_address(page) + offset; in debug_dma_map_page()
1382 struct page *page; in virt_to_paddr() local
1385 page = vmalloc_to_page(virt); in virt_to_paddr()
[all …]
/kernel/power/
A Dsnapshot.c229 struct page *page; in alloc_image_page() local
257 struct page *page; in free_image_page() local
1249 struct page *page; in mark_free_pages() local
1318 struct page *page; in saveable_highmem_page() local
1377 struct page *page; in saveable_page() local
1652 struct page *page; in preallocate_image_pages() local
2054 struct page *page; in alloc_highmem_pages() local
2093 struct page *page; in swsusp_alloc() local
2260 struct page *page; in snapshot_read_next() local
2468 struct page *page; in prepare_highmem_image() local
[all …]
A Dswap.c249 struct page *page = bio_first_page_all(bio); in hib_end_io() local
258 put_page(page); in hib_end_io()
693 unsigned char *page = NULL; in save_compressed_image() local
709 if (!page) { in save_compressed_image()
902 if (page) free_page((unsigned long)page); in save_compressed_image()
1215 unsigned char **page = NULL; in load_compressed_image() local
1228 page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page))); in load_compressed_image()
1229 if (!page) { in load_compressed_image()
1321 if (!page[i]) { in load_compressed_image()
1415 page[pg], PAGE_SIZE); in load_compressed_image()
[all …]
/kernel/
A Dkexec_core.c305 static void kimage_free_pages(struct page *page) in kimage_free_pages() argument
321 struct page *page, *next; in kimage_free_page_list() local
490 struct page *page; in kimage_add_entry() local
493 if (!page) in kimage_add_entry()
551 struct page *page; in kimage_free_entry() local
667 struct page *page; in kimage_alloc_page() local
681 page = NULL; in kimage_alloc_page()
687 if (!page) in kimage_alloc_page()
738 return page; in kimage_alloc_page()
826 struct page *page; in kimage_load_normal_segment() local
[all …]
A Dcrash_dump_dm_crypt.c183 return sprintf(page, "%s\n", to_config_key(item)->description); in config_key_description_show()
187 const char *page, size_t count) in config_key_description_store() argument
194 len = strcspn(page, "\n"); in config_key_description_store()
206 config_key->description = kmemdup_nul(page, len, GFP_KERNEL); in config_key_description_store()
259 return sprintf(page, "%d\n", key_count); in config_keys_count_show()
268 return sprintf(page, "%d\n", is_dm_key_reused); in config_keys_reuse_show()
272 const char *page, size_t count) in config_keys_reuse_store() argument
280 if (kstrtobool(page, &is_dm_key_reused)) in config_keys_reuse_store()
315 return sprintf(page, "%d\n", restore); in config_keys_restore_show()
319 const char *page, size_t count) in config_keys_restore_store() argument
[all …]
A Dvmcore_info.c166 VMCOREINFO_STRUCT_SIZE(page); in crash_save_vmcoreinfo_init()
172 VMCOREINFO_OFFSET(page, flags); in crash_save_vmcoreinfo_init()
173 VMCOREINFO_OFFSET(page, _refcount); in crash_save_vmcoreinfo_init()
174 VMCOREINFO_OFFSET(page, mapping); in crash_save_vmcoreinfo_init()
175 VMCOREINFO_OFFSET(page, lru); in crash_save_vmcoreinfo_init()
176 VMCOREINFO_OFFSET(page, _mapcount); in crash_save_vmcoreinfo_init()
177 VMCOREINFO_OFFSET(page, private); in crash_save_vmcoreinfo_init()
178 VMCOREINFO_OFFSET(page, compound_head); in crash_save_vmcoreinfo_init()
A Dwatch_queue.c61 struct page *page; in watch_queue_pipe_buf_release() local
73 page = buf->page; in watch_queue_pipe_buf_release()
74 bit += page->private; in watch_queue_pipe_buf_release()
103 struct page *page; in post_one_notification() local
118 page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE]; in post_one_notification()
120 get_page(page); in post_one_notification()
122 p = kmap_atomic(page); in post_one_notification()
127 buf->page = page; in post_one_notification()
240 struct page **pages; in watch_queue_set_size()
281 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in watch_queue_set_size()
A Dkexec_handover.c86 struct page *fdt;
167 static void kho_restore_page(struct page *page, unsigned int order) in kho_restore_page() argument
172 set_page_count(page, 1); in kho_restore_page()
176 set_page_count(page + i, 0); in kho_restore_page()
192 struct page *page = pfn_to_online_page(PHYS_PFN(phys)); in kho_restore_folio() local
195 if (!page) in kho_restore_folio()
198 order = page->private; in kho_restore_folio()
202 kho_restore_page(page, order); in kho_restore_folio()
203 return page_folio(page); in kho_restore_folio()
323 struct page *page = phys_to_page(phys); in deserialize_bitmap() local
[all …]
A Drelay.c35 struct page *page; in relay_buf_fault() local
42 page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT)); in relay_buf_fault()
43 if (!page) in relay_buf_fault()
45 get_page(page); in relay_buf_fault()
46 vmf->page = page; in relay_buf_fault()
61 static struct page **relay_alloc_page_array(unsigned int n_pages) in relay_alloc_page_array()
63 return kvcalloc(n_pages, sizeof(struct page *), GFP_KERNEL); in relay_alloc_page_array()
69 static void relay_free_page_array(struct page **array) in relay_free_page_array()
/kernel/module/
A Ddecompress.c19 struct page **new_pages; in module_extend_max_pages()
36 struct page *page; in module_get_next_page() local
46 if (!page) in module_get_next_page()
50 return page; in module_get_next_page()
115 struct page *page = module_get_next_page(info); in module_gzip_decompress() local
117 if (IS_ERR(page)) { in module_gzip_decompress()
118 retval = PTR_ERR(page); in module_gzip_decompress()
174 struct page *page = module_get_next_page(info); in module_xz_decompress() local
176 if (IS_ERR(page)) { in module_xz_decompress()
258 struct page *page = module_get_next_page(info); in module_zstd_decompress() local
[all …]
/kernel/events/
A Dring_buffer.c618 struct page *page; in rb_alloc_aux_page() local
639 return page; in rb_alloc_aux_page()
644 struct page *page = virt_to_page(rb->aux_pages[idx]); in rb_free_aux_page() local
647 __free_page(page); in rb_free_aux_page()
738 struct page *page; in rb_alloc_aux() local
743 if (!page) in rb_alloc_aux()
759 struct page *page = virt_to_page(rb->aux_pages[0]); in rb_alloc_aux() local
818 struct page *page; in perf_mmap_alloc_page() local
823 if (!page) in perf_mmap_alloc_page()
831 struct page *page = virt_to_page(addr); in perf_mmap_free_page() local
[all …]
A Duprobes.c113 struct page *page; member
308 struct page *page; in __update_ref_ctr() local
339 put_page(page); in __update_ref_ctr()
383 unsigned long vaddr, struct page *page, bool *pmd_mappable) in orig_page_is_identical() argument
499 struct page *page; in uprobe_write_opcode() local
562 if (fw.page == page) in uprobe_write_opcode()
1041 struct page *page; in __copy_insn() local
1055 put_page(page); in __copy_insn()
1671 vmf->page = area->page; in xol_fault()
2378 struct page *page; in is_trap_at_addr() local
[all …]
A Dinternal.h98 extern struct page *
153 handle->page++; \
154 handle->page &= rb->nr_pages - 1; \
155 handle->addr = rb->data_pages[handle->page]; \
/kernel/bpf/
A Darena.c156 struct page *page; in existing_page_cb() local
162 page = pte_page(pte); in existing_page_cb()
170 __free_page(page); in existing_page_cb()
268 struct page *page; in arena_vm_fault() local
277 if (page) in arena_vm_fault()
299 __free_page(page); in arena_vm_fault()
304 vmf->page = page; in arena_vm_fault()
431 struct page **pages; in arena_alloc_pages()
514 struct page *page; in arena_free_pages() local
539 if (!page) in arena_free_pages()
[all …]
A Dstream.c56 struct page *p; in bpf_stream_page_free()
84 struct page *page; in bpf_stream_page_replace() local
87 if (!page) in bpf_stream_page_replace()
89 stream_page = page_address(page); in bpf_stream_page_replace()
137 struct bpf_stream_page *page; in bpf_stream_page_reserve_elem() local
141 if (!page) in bpf_stream_page_reserve_elem()
142 page = bpf_stream_page_replace(); in bpf_stream_page_reserve_elem()
143 if (!page) in bpf_stream_page_reserve_elem()
148 page = bpf_stream_page_replace(); in bpf_stream_page_reserve_elem()
149 if (!page) in bpf_stream_page_reserve_elem()
[all …]
/kernel/trace/
A Dring_buffer.c641 commit = local_read(&page->page->commit); in verify_event()
1393 if (rb_is_head_page(page, page->list.prev)) { in rb_set_head_page()
2244 struct page *page; in __rb_allocate_pages() local
2275 bpage->page = page_address(page); in __rb_allocate_pages()
2330 struct page *page; in rb_allocate_cpu_buffer() local
2378 bpage->page = page_address(page); in rb_allocate_cpu_buffer()
6063 rb_init_page(page->page); in rb_clear_buffer_page()
6467 struct page *page; in ring_buffer_alloc_read_page() local
6522 struct page *page = virt_to_page(bpage); in ring_buffer_free_read_page() local
6980 struct page *page; in rb_alloc_meta_page() local
[all …]

Completed in 85 milliseconds

123