Home
last modified time | relevance | path

Searched refs:nr_pages (Results 1 – 25 of 39) sorted by relevance

12

/include/linux/
A Dhugetlb_cgroup.h127 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
131 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
137 extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
142 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
152 unsigned long nr_pages,
162 unsigned long nr_pages, in hugetlb_cgroup_uncharge_file_region() argument
214 unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup_rsvd() argument
227 hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, in hugetlb_cgroup_commit_charge_rsvd() argument
239 unsigned long nr_pages, in hugetlb_cgroup_uncharge_folio_rsvd() argument
244 unsigned long nr_pages, in hugetlb_cgroup_uncharge_cgroup() argument
[all …]
A Dpage_counter.h71 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
72 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
74 unsigned long nr_pages,
76 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
77 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
78 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
81 unsigned long nr_pages) in page_counter_set_high() argument
83 WRITE_ONCE(counter->high, nr_pages); in page_counter_set_high()
86 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
88 unsigned long *nr_pages);
A Dmemory_hotplug.h123 long nr_pages);
128 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
155 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
164 unsigned long nr_pages, struct mhp_params *params) in add_pages() argument
166 return __add_pages(nid, start_pfn, nr_pages, params); in add_pages()
169 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
280 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
316 unsigned long nr_pages,
321 unsigned long nr_pages);
323 unsigned long nr_pages, struct vmem_altmap *altmap,
[all …]
A Drmap.h425 VM_WARN_ON_ONCE(nr_pages <= 0); in __folio_rmap_sanity_checks()
439 VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); in __folio_rmap_sanity_checks()
447 VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio); in __folio_rmap_sanity_checks()
572 const int orig_nr_pages = nr_pages; in __folio_dup_file_rmap()
586 } while (page++, --nr_pages > 0); in __folio_dup_file_rmap()
645 const int orig_nr_pages = nr_pages; in __folio_try_dup_anon_rmap()
670 for (i = 0; i < nr_pages; i++) in __folio_try_dup_anon_rmap()
687 } while (page++, --nr_pages > 0); in __folio_try_dup_anon_rmap()
933 unsigned long nr_pages; member
946 .nr_pages = folio_nr_pages(_folio), \
[all …]
A Dswap.h194 pgoff_t nr_pages; member
366 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
423 unsigned long nr_pages,
431 extern unsigned long shrink_all_memory(unsigned long nr_pages);
464 unsigned long nr_pages, sector_t start_block);
501 extern void swap_free_nr(swp_entry_t entry, int nr_pages);
512 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
576 static inline void swap_free_nr(swp_entry_t entry, int nr_pages) in swap_free_nr() argument
611 unsigned long nr_pages, sector_t start_block) in add_swap_extent() argument
676 __mem_cgroup_uncharge_swap(entry, nr_pages); in mem_cgroup_uncharge_swap()
[all …]
A Dzswap.h31 int zswap_swapon(int type, unsigned long nr_pages);
53 static inline int zswap_swapon(int type, unsigned long nr_pages) in zswap_swapon() argument
A Dmm_inline.h40 long nr_pages) in __update_lru_size() argument
45 WARN_ON_ONCE(nr_pages != (int)nr_pages); in __update_lru_size()
47 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); in __update_lru_size()
49 NR_ZONE_LRU_BASE + lru, nr_pages); in __update_lru_size()
54 long nr_pages) in update_lru_size() argument
56 __update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
58 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
188 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone], in lru_gen_update_size()
189 lrugen->nr_pages[old_gen][type][zone] - delta); in lru_gen_update_size()
191 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone], in lru_gen_update_size()
[all …]
A Dbootmem_info.h22 unsigned long nr_pages);
64 struct page *map, unsigned long nr_pages) in register_page_bootmem_memmap() argument
A Dgfp.h233 nodemask_t *nodemask, int nr_pages,
238 unsigned long nr_pages,
248 alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, in alloc_pages_bulk_node_noprof() argument
254 return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array); in alloc_pages_bulk_node_noprof()
436 extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
441 void free_contig_range(unsigned long pfn, unsigned long nr_pages);
A Ddax.h171 …ct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) in dax_layout_busy_page_range() argument
245 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
252 size_t nr_pages);
A Dwatch_queue.h48 unsigned int nr_pages; /* Number of pages in notes[] */ member
A Dmemory.h111 unsigned long nr_pages; member
A Dsplice.h60 int nr_pages; /* number of populated pages in map */ member
A Dmemremap.h102 unsigned long nr_pages, int mf_flags);
A Dmm.h2473 unsigned long start, unsigned long nr_pages,
2477 unsigned long start, unsigned long nr_pages,
2511 long get_user_pages(unsigned long start, unsigned long nr_pages,
2513 long pin_user_pages(unsigned long start, unsigned long nr_pages,
2524 int get_user_pages_fast(unsigned long start, int nr_pages,
2526 int pin_user_pages_fast(unsigned long start, int nr_pages,
2576 int get_user_pages_fast_only(unsigned long start, int nr_pages,
3809 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
3876 unsigned long nr_pages; in __vmemmap_can_optimize() local
3882 nr_pages = pgmap_vmemmap_nr(pgmap); in __vmemmap_can_optimize()
[all …]
A Dmmzone.h485 long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; member
537 int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; member
1252 unsigned long start_pfn, unsigned long nr_pages) in zone_intersects() argument
1257 start_pfn + nr_pages <= zone->zone_start_pfn) in zone_intersects()
1878 void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
A Ddevice-mapper.h158 long nr_pages, enum dax_access_mode node, void **kaddr,
161 size_t nr_pages);
/include/xen/
A Dxen.h65 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
66 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
71 static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages, in xen_alloc_unpopulated_pages() argument
74 return xen_alloc_ballooned_pages(nr_pages, pages); in xen_alloc_unpopulated_pages()
76 static inline void xen_free_unpopulated_pages(unsigned int nr_pages, in xen_free_unpopulated_pages() argument
79 xen_free_ballooned_pages(nr_pages, pages); in xen_free_unpopulated_pages()
A Dballoon.h29 int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages);
30 void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages);
A Dgrant_table.h214 int gnttab_alloc_pages(int nr_pages, struct page **pages);
215 void gnttab_free_pages(int nr_pages, struct page **pages);
241 int nr_pages; member
252 int gnttab_pages_set_private(int nr_pages, struct page **pages);
253 void gnttab_pages_clear_private(int nr_pages, struct page **pages);
A Dxenbus.h220 unsigned int nr_pages, grant_ref_t *grefs);
221 void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages,
/include/xen/arm/
A Dpage.h105 unsigned long nr_pages);
/include/drm/
A Ddrm_prime.h96 struct page **pages, unsigned int nr_pages);
/include/trace/events/
A Dwriteback.h360 __field(long, nr_pages)
371 __entry->nr_pages = work->nr_pages;
384 __entry->nr_pages,
/include/xen/interface/
A Dxen.h623 unsigned long nr_pages; /* Total pages allocated to this domain. */ member

Completed in 61 milliseconds

12