| /kernel/bpf/ |
| A D | cgroup_iter.c | 54 int order; member 77 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_start() 110 if (p->order == BPF_CGROUP_ITER_DESCENDANTS_PRE) in cgroup_iter_seq_next() 176 p->order = aux->cgroup.order; in BTF_ID_LIST_GLOBAL_SINGLE() 200 int order = linfo->cgroup.order; in bpf_iter_attach_cgroup() local 203 if (order != BPF_CGROUP_ITER_DESCENDANTS_PRE && in bpf_iter_attach_cgroup() 204 order != BPF_CGROUP_ITER_DESCENDANTS_POST && in bpf_iter_attach_cgroup() 205 order != BPF_CGROUP_ITER_ANCESTORS_UP && in bpf_iter_attach_cgroup() 206 order != BPF_CGROUP_ITER_SELF_ONLY) in bpf_iter_attach_cgroup() 223 aux->cgroup.order = order; in bpf_iter_attach_cgroup() [all …]
|
| /kernel/ |
| A D | kexec_handover.c | 139 pfn += 1 << order; in __kho_unpreserve() 144 unsigned int order) in __kho_preserve_order() argument 178 if (order > 0) in kho_restore_page() 193 unsigned long order; in kho_restore_folio() local 198 order = page->private; in kho_restore_folio() 228 unsigned int order; member 251 chunk->hdr.order = order; in new_chunk() 274 unsigned long order; in kho_mem_serialize() local 327 page->private = order; in deserialize_bitmap() 705 pfn += 1 << order; in kho_preserve_phys() [all …]
|
| A D | kexec_core.c | 289 set_page_private(pages, order); in kimage_alloc_pages() 290 count = 1 << order; in kimage_alloc_pages() 307 unsigned int order, count, i; in kimage_free_pages() local 309 order = page_private(page); in kimage_free_pages() 310 count = 1 << order; in kimage_free_pages() 316 __free_pages(page, order); in kimage_free_pages() 330 unsigned int order) in kimage_alloc_normal_control_pages() argument 349 count = 1 << order; in kimage_alloc_normal_control_pages() 397 unsigned int order) in kimage_alloc_crash_control_pages() argument 424 size = (1 << order) << PAGE_SHIFT; in kimage_alloc_crash_control_pages() [all …]
|
| A D | Kconfig.kexec | 61 loaded in order for this to work.
|
| /kernel/locking/ |
| A D | test-ww_mutex.c | 406 int *order; in get_random_order() local 409 order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); in get_random_order() 410 if (!order) in get_random_order() 419 swap(order[n], order[r]); in get_random_order() 422 return order; in get_random_order() 436 int *order; in stress_inorder_work() local 439 if (!order) in stress_inorder_work() 481 kfree(order); in stress_inorder_work() 495 int *order; in stress_reorder_work() local 499 if (!order) in stress_reorder_work() [all …]
|
| /kernel/dma/ |
| A D | pool.c | 82 unsigned int order; in atomic_pool_expand() local 88 order = min(get_order(pool_size), MAX_PAGE_ORDER); in atomic_pool_expand() 91 pool_size = 1 << (PAGE_SHIFT + order); in atomic_pool_expand() 93 page = dma_alloc_from_contiguous(NULL, 1 << order, in atomic_pool_expand() 94 order, false); in atomic_pool_expand() 96 page = alloc_pages(gfp, order); in atomic_pool_expand() 97 } while (!page && order-- > 0); in atomic_pool_expand() 117 1 << order); in atomic_pool_expand() 130 1 << order); in atomic_pool_expand() 139 __free_pages(page, order); in atomic_pool_expand()
|
| A D | coherent.c | 145 int order = get_order(size); in __dma_alloc_from_coherent() local 155 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); in __dma_alloc_from_coherent() 200 int order, void *vaddr) in __dma_release_from_coherent() argument 208 bitmap_release_region(mem->bitmap, page, order); in __dma_release_from_coherent() 227 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) in dma_release_from_dev_coherent() argument 231 return __dma_release_from_coherent(mem, order, vaddr); in dma_release_from_dev_coherent() 292 int dma_release_from_global_coherent(int order, void *vaddr) in dma_release_from_global_coherent() argument 297 return __dma_release_from_coherent(dma_coherent_default_memory, order, in dma_release_from_global_coherent()
|
| A D | swiotlb.c | 437 unsigned int order, area_order; in swiotlb_init_late() local 465 nslabs = SLABS_PER_PAGE << order; in swiotlb_init_late() 469 order); in swiotlb_init_late() 472 order--; in swiotlb_init_late() 473 nslabs = SLABS_PER_PAGE << order; in swiotlb_init_late() 494 (PAGE_SIZE << order) >> 20); in swiotlb_init_late() 521 free_pages((unsigned long)vstart, order); in swiotlb_init_late() 576 unsigned int order = get_order(bytes); in alloc_dma_pages() local 581 page = alloc_pages(gfp, order); in alloc_dma_pages() 587 __free_pages(page, order); in alloc_dma_pages() [all …]
|
| A D | Kconfig | 230 int "Maximum PAGE_SIZE order of alignment for contiguous buffers" 235 PAGE_SIZE order which is greater than or equal to the requested buffer 238 specify the maximum PAGE_SIZE order for contiguous buffers. Larger 239 buffers will be aligned only to this specified order. The order is 242 For example, if your system defaults to 4KiB pages, the order value
|
| /kernel/events/ |
| A D | ring_buffer.c | 616 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page() argument 620 if (order > MAX_PAGE_ORDER) in rb_alloc_aux_page() 621 order = MAX_PAGE_ORDER; in rb_alloc_aux_page() 624 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page() 625 } while (!page && order--); in rb_alloc_aux_page() 627 if (page && order) { in rb_alloc_aux_page() 634 split_page(page, order); in rb_alloc_aux_page() 636 set_page_private(page, order); in rb_alloc_aux_page() 739 int last, order; in rb_alloc_aux() local 741 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux() [all …]
|
| /kernel/trace/ |
| A D | trace_sched_switch.c | 195 int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN); in free_saved_cmdlines_buffer() local 198 free_pages((unsigned long)s, order); in free_saved_cmdlines_buffer() 206 int order; in allocate_cmdlines_buffer() local 210 order = get_order(orig_size); in allocate_cmdlines_buffer() 211 size = 1 << (order + PAGE_SHIFT); in allocate_cmdlines_buffer() 212 page = alloc_pages(GFP_KERNEL, order); in allocate_cmdlines_buffer()
|
| A D | ring_buffer.c | 347 unsigned order; /* order of the page */ member 365 unsigned order; /* order of the page */ member 2453 int order, unsigned long start, in alloc_buffer() argument 2474 buffer->subbuf_order = order; in alloc_buffer() 2475 subbuf_size = (PAGE_SIZE << order); in alloc_buffer() 2622 int order, unsigned long start, in __ring_buffer_alloc_range() argument 6476 bpage->order = buffer->subbuf_order; in ring_buffer_alloc_read_page() 6830 if (!buffer || order < 0) in ring_buffer_subbuf_order_set() 6833 if (buffer->subbuf_order == order) in ring_buffer_subbuf_order_set() 6836 psize = (1 << order) * PAGE_SIZE; in ring_buffer_subbuf_order_set() [all …]
|
| A D | ftrace.c | 1121 int order; member 3800 int order; in ftrace_allocate_records() local 3809 order = fls(pages) - 1; in ftrace_allocate_records() 3816 if (!order) in ftrace_allocate_records() 3818 order--; in ftrace_allocate_records() 3822 ftrace_number_of_pages += 1 << order; in ftrace_allocate_records() 3825 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; in ftrace_allocate_records() 3826 pg->order = order; in ftrace_allocate_records() 3841 ftrace_number_of_pages -= 1 << pg->order; in ftrace_free_pages() 7194 if (end_offset > PAGE_SIZE << pg->order) { in ftrace_process_locs() [all …]
|
| A D | trace.c | 1351 int order; in tracing_alloc_snapshot_instance() local 1357 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); in tracing_alloc_snapshot_instance() 1358 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); in tracing_alloc_snapshot_instance() 9427 int order; in buffer_subbuf_size_read() local 9430 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); in buffer_subbuf_size_read() 9431 size = (PAGE_SIZE << order) / 1024; in buffer_subbuf_size_read() 9445 int order; in buffer_subbuf_size_write() local 9456 order = fls(pages - 1); in buffer_subbuf_size_write() 9459 if (order < 0 || order > 7) in buffer_subbuf_size_write() 9466 if (old_order == order) in buffer_subbuf_size_write() [all …]
|
| A D | Kconfig | 864 separated out as a stand-alone facility in order to allow it
|
| /kernel/futex/ |
| A D | core.c | 1967 unsigned int order, n; in futex_init() local 1980 order = get_order(size); in futex_init() 1985 if (order > MAX_PAGE_ORDER) in futex_init() 2001 order > MAX_PAGE_ORDER ? "vmalloc" : "linear"); in futex_init()
|
| /kernel/power/ |
| A D | snapshot.c | 1248 unsigned int order, t; in mark_free_pages() local 1272 for_each_migratetype_order(order, t) { in mark_free_pages() 1274 &zone->free_area[order].free_list[t], buddy_list) { in mark_free_pages() 1278 for (i = 0; i < (1UL << order); i++) { in mark_free_pages()
|
| A D | Kconfig | 328 In order to use APM, you will need supporting software. For location
|
| /kernel/time/ |
| A D | Kconfig | 165 other dependencies to provide in order to make the full
|
| /kernel/rcu/ |
| A D | Kconfig | 192 value to the maximum value possible in order to reduce the
|