| /include/trace/events/ |
| A D | compaction.h | 168 int order, 181 __entry->order = order; 187 __entry->order, 195 int order, 210 __entry->order = order; 217 __entry->order, 224 int order, 233 int order, 257 __entry->order = order; 266 __entry->order, [all …]
|
| A D | vmscan.h | 81 __entry->order = order; 86 __entry->order) 105 __entry->order = order; 111 __entry->order, 127 __entry->order = order; 132 __entry->order, 283 int order, 304 __entry->order = order; 318 __entry->order, 495 __entry->order = order; [all …]
|
| A D | migrate.h | 115 TP_PROTO(unsigned long addr, unsigned long pte, int order), 117 TP_ARGS(addr, pte, order), 122 __field(int, order) 128 __entry->order = order; 131 TP_printk("addr=%lx, pte=%lx order=%d", __entry->addr, __entry->pte, __entry->order) 135 TP_PROTO(unsigned long addr, unsigned long pte, int order), 136 TP_ARGS(addr, pte, order) 140 TP_PROTO(unsigned long addr, unsigned long pte, int order), 141 TP_ARGS(addr, pte, order)
|
| A D | kmem.h | 140 TP_ARGS(page, order), 144 __field( unsigned int, order ) 149 __entry->order = order; 155 __entry->order) 186 __field( unsigned int, order ) 193 __entry->order = order; 201 __entry->order, 215 __field( unsigned int, order ) 222 __entry->order = order; 230 __entry->order, [all …]
|
| A D | oom.h | 37 int order, 44 TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check), 49 __field( int, order) 60 __entry->order = order; 70 __entry->order, 185 TP_PROTO(int order, 192 TP_ARGS(order, priority, result, retries, max_retries, ret), 195 __field( int, order) 204 __entry->order = order; 213 __entry->order,
|
| A D | filemap.h | 27 __field(unsigned char, order) 38 __entry->order = folio_order(folio); 46 __entry->order)
|
| /include/linux/ |
| A D | compaction.h | 65 static inline unsigned long compact_gap(unsigned int order) in compact_gap() argument 80 return 2UL << order; in compact_gap() 90 extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); 91 extern int fragmentation_index(struct zone *zone, unsigned int order); 93 unsigned int order, unsigned int alloc_flags, 97 extern bool compaction_suitable(struct zone *zone, int order, 100 extern void compaction_defer_reset(struct zone *zone, int order, 103 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 108 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); 115 static inline bool compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument [all …]
|
| A D | gfp.h | 284 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof() 295 return __folio_alloc_noprof(gfp, order, nid, NULL); in __folio_alloc_node_noprof() 306 unsigned int order) in alloc_pages_node_noprof() argument 311 return __alloc_pages_node_noprof(nid, gfp_mask, order); in alloc_pages_node_noprof() 330 return __folio_alloc_node_noprof(gfp, order, numa_node_id()); in folio_alloc_noprof() 335 return folio_alloc_noprof(gfp, order); in folio_alloc_mpol_noprof() 337 #define vma_alloc_folio_noprof(gfp, order, vma, addr) \ argument 338 folio_alloc_noprof(gfp, order) 378 #define __get_dma_pages(gfp_mask, order) \ argument 379 __get_free_pages((gfp_mask) | GFP_DMA, (order)) [all …]
|
| A D | page_owner.h | 11 extern void __reset_page_owner(struct page *page, unsigned short order); 13 unsigned short order, gfp_t gfp_mask); 22 static inline void reset_page_owner(struct page *page, unsigned short order) in reset_page_owner() argument 25 __reset_page_owner(page, order); in reset_page_owner() 29 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument 32 __set_page_owner(page, order, gfp_mask); in set_page_owner() 57 static inline void reset_page_owner(struct page *page, unsigned short order) in reset_page_owner() argument 61 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument
|
| A D | page_table_check.h | 16 void __page_table_check_zero(struct page *page, unsigned int order); 30 static inline void page_table_check_alloc(struct page *page, unsigned int order) in page_table_check_alloc() argument 35 __page_table_check_zero(page, order); in page_table_check_alloc() 38 static inline void page_table_check_free(struct page *page, unsigned int order) in page_table_check_free() argument 43 __page_table_check_zero(page, order); in page_table_check_free() 109 static inline void page_table_check_alloc(struct page *page, unsigned int order) in page_table_check_alloc() argument 113 static inline void page_table_check_free(struct page *page, unsigned int order) in page_table_check_free() argument
|
| A D | kasan.h | 112 void __kasan_poison_pages(struct page *page, unsigned int order, bool init); 114 unsigned int order, bool init) in kasan_poison_pages() argument 117 __kasan_poison_pages(page, order, init); in kasan_poison_pages() 122 unsigned int order, bool init) in kasan_unpoison_pages() argument 125 return __kasan_unpoison_pages(page, order, init); in kasan_unpoison_pages() 284 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order, 303 unsigned int order) in kasan_mempool_poison_pages() argument 306 return __kasan_mempool_poison_pages(page, order, _RET_IP_); in kasan_mempool_poison_pages() 310 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order, 325 unsigned int order) in kasan_mempool_unpoison_pages() argument [all …]
|
| A D | huge_mm.h | 154 if (order <= 0 || order > PMD_ORDER) in mod_mthp_stat() 157 this_cpu_add(mthp_stats.stats[order][item], delta); in mod_mthp_stat() 162 mod_mthp_stat(order, item, 1); in count_mthp_stat() 218 unsigned long addr, int order) in thp_vma_suitable_order() argument 220 unsigned long hpage_size = PAGE_SIZE << order; in thp_vma_suitable_order() 245 int order; in thp_vma_suitable_orders() local 254 order = highest_order(orders); in thp_vma_suitable_orders() 257 if (thp_vma_suitable_order(vma, addr, order)) in thp_vma_suitable_orders() 259 order = next_order(&orders, order); in thp_vma_suitable_orders() 312 int order; member [all …]
|
| A D | kmsan.h | 58 unsigned int order); 69 void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags); 78 void kmsan_free_page(struct page *page, unsigned int order); 305 unsigned int order) in kmsan_memblock_free_pages() argument 318 static inline void kmsan_alloc_page(struct page *page, unsigned int order, in kmsan_alloc_page() argument 323 static inline void kmsan_free_page(struct page *page, unsigned int order) in kmsan_free_page() argument
|
| A D | xarray.h | 1410 #define XA_STATE_ORDER(name, array, index, order) \ argument 1412 (index >> order) << order, \ 1413 order - (order % XA_CHUNK_SHIFT), \ 1414 (1U << (order % XA_CHUNK_SHIFT)) - 1) 1578 unsigned int order) in xas_split() argument 1584 unsigned int order, gfp_t gfp) in xas_split_alloc() argument 1589 unsigned int order) in xas_try_split() argument 1674 unsigned int order) in xas_set_order() argument 1677 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; in xas_set_order() 1678 xas->xa_shift = order - (order % XA_CHUNK_SHIFT); in xas_set_order() [all …]
|
| A D | page_reporting.h | 23 unsigned int order; member
|
| A D | fault-inject.h | 117 bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); 119 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
|
| A D | cma.h | 61 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); 65 static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) in cma_alloc_folio() argument
|
| A D | ring_buffer.h | 93 int order, unsigned long start, 116 #define ring_buffer_alloc_range(size, flags, order, start, range_size, s_size) \ argument 119 __ring_buffer_alloc_range((size), (flags), (order), (start), \ 236 int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order);
|
| A D | dma-map-ops.h | 114 unsigned int order, bool no_warn); 136 size_t count, unsigned int order, bool no_warn) in dma_alloc_from_contiguous() argument 164 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); 175 #define dma_release_from_dev_coherent(dev, order, vaddr) (0) argument 176 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) argument 183 int dma_release_from_global_coherent(int order, void *vaddr); 193 static inline int dma_release_from_global_coherent(int order, void *vaddr) in dma_release_from_global_coherent() argument
|
| A D | bitmap.h | 655 void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order) in bitmap_release_region() argument 657 bitmap_clear(bitmap, pos, BIT(order)); in bitmap_release_region() 672 int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) in bitmap_allocate_region() argument 674 unsigned int len = BIT(order); in bitmap_allocate_region() 697 int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order) in bitmap_find_free_region() argument 701 for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) { in bitmap_find_free_region() 702 if (!bitmap_allocate_region(bitmap, pos, order)) in bitmap_find_free_region()
|
| A D | memcontrol.h | 1030 void split_page_memcg(struct page *first, unsigned order); 1442 static inline void split_page_memcg(struct page *first, unsigned order) in split_page_memcg() argument 1677 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1678 void __memcg_kmem_uncharge_page(struct page *page, int order); 1718 int order) in memcg_kmem_charge_page() argument 1721 return __memcg_kmem_charge_page(page, gfp, order); in memcg_kmem_charge_page() 1728 __memcg_kmem_uncharge_page(page, order); in memcg_kmem_uncharge_page() 1766 int order) in memcg_kmem_charge_page() argument 1776 int order) in __memcg_kmem_charge_page() argument 1856 unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, [all …]
|
| A D | oom.h | 45 const int order; member
|
| A D | bitops.h | 88 int order; in get_bitmask_order() local 90 order = fls(count); in get_bitmask_order() 91 return order; /* We could be slightly more clever with -1 here... */ in get_bitmask_order()
|
| /include/drm/ |
| A D | drm_pagemap.h | 38 u64 order : 8; member 54 unsigned int order, in drm_pagemap_device_addr_encode() argument 60 .order = order, in drm_pagemap_device_addr_encode() 81 unsigned int order,
|
| /include/drm/ttm/ |
| A D | ttm_pool.h | 53 unsigned int order; member
|