| /mm/ |
| A D | percpu-vm.c | 14 unsigned int cpu, int page_idx) in pcpu_chunk_page() argument 57 unsigned int cpu; in pcpu_free_pages() local 60 for_each_possible_cpu(cpu) { in pcpu_free_pages() 86 unsigned int cpu, tcpu; in pcpu_alloc_pages() local 91 for_each_possible_cpu(cpu) { in pcpu_alloc_pages() 107 if (tcpu == cpu) in pcpu_alloc_pages() 156 unsigned int cpu; in pcpu_unmap_pages() local 159 for_each_possible_cpu(cpu) { in pcpu_unmap_pages() 217 unsigned int cpu, tcpu; in pcpu_map_pages() local 220 for_each_possible_cpu(cpu) { in pcpu_map_pages() [all …]
|
| A D | percpu.c | 2278 unsigned int cpu; in __is_kernel_percpu_address() local 2342 unsigned int cpu; in per_cpu_ptr_to_phys() local 2555 unsigned int cpu; in pcpu_setup_first_chunk() local 2602 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in pcpu_setup_first_chunk() 2615 cpu = gi->cpu_map[i]; in pcpu_setup_first_chunk() 2616 if (cpu == NR_CPUS) in pcpu_setup_first_chunk() 2801 unsigned int cpu, tcpu; in pcpu_build_alloc_info() local 2944 cpu, node); in pcpu_fc_alloc() 3219 psize_str, cpu); in pcpu_page_first_chunk() 3290 unsigned int cpu; in setup_per_cpu_areas() local [all …]
|
| A D | vmstat.c | 42 int item, cpu; in zero_zone_numa_counters() local 46 for_each_online_cpu(cpu) { in zero_zone_numa_counters() 115 int cpu; in sum_vm_events() local 120 for_each_online_cpu(cpu) { in sum_vm_events() 175 int cpu; in fold_vm_zone_numa_events() local 178 for_each_online_cpu(cpu) { in fold_vm_zone_numa_events() 279 int cpu; in refresh_zone_stat_thresholds() local 324 int cpu; in set_pgdat_percpu_threshold() local 2118 int cpu; in vmstat_shepherd() local 2152 int cpu; in start_shepherd_timer() local [all …]
|
| A D | swap.c | 321 static void folio_activate_drain(int cpu) in folio_activate_drain() argument 641 void lru_add_drain_cpu(int cpu) in lru_add_drain_cpu() argument 672 folio_activate_drain(cpu); in lru_add_drain_cpu() 784 need_mlock_drain(cpu) || in cpu_needs_drain() 785 has_bh_in_lru(cpu, NULL); in cpu_needs_drain() 810 unsigned cpu, this_gen; in __lru_add_drain_all() local 869 for_each_online_cpu(cpu) { in __lru_add_drain_all() 872 if (cpu_needs_drain(cpu)) { in __lru_add_drain_all() 874 queue_work_on(cpu, mm_percpu_wq, work); in __lru_add_drain_all() 875 __cpumask_set_cpu(cpu, &has_work); in __lru_add_drain_all() [all …]
|
| A D | numa_emulation.c | 547 void numa_add_cpu(unsigned int cpu) in numa_add_cpu() argument 551 nid = early_cpu_to_node(cpu); in numa_add_cpu() 562 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); in numa_add_cpu() 565 void numa_remove_cpu(unsigned int cpu) in numa_remove_cpu() argument 570 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); in numa_remove_cpu() 577 nid = early_cpu_to_node(cpu); in numa_set_cpumask() 589 debug_cpumask_set_cpu(cpu, nid, enable); in numa_set_cpumask() 593 void numa_add_cpu(unsigned int cpu) in numa_add_cpu() argument 595 numa_set_cpumask(cpu, true); in numa_add_cpu() 598 void numa_remove_cpu(unsigned int cpu) in numa_remove_cpu() argument [all …]
|
| A D | page_alloc.c | 2658 drain_pages(cpu); in drain_local_pages() 2673 int cpu; in __drain_all_pages() local 2733 drain_pages(cpu); in __drain_all_pages() 5684 int __maybe_unused cpu; in __build_all_zonelists() local 5732 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); in __build_all_zonelists() 5743 int cpu; in build_all_zonelists_init() local 5956 int cpu; in __zone_set_pageset_high_and_batch() local 6002 int cpu; in setup_zone_pageset() local 6070 int __maybe_unused cpu; in setup_per_cpu_pageset() local 6174 drain_pages(cpu); in page_alloc_cpu_dead() [all …]
|
| A D | slab_common.c | 1914 int cpu; in kfree_rcu_scheduler_running() local 1916 for_each_possible_cpu(cpu) { in kfree_rcu_scheduler_running() 2024 int i, cpu; in kvfree_rcu_barrier() local 2033 for_each_possible_cpu(cpu) { in kvfree_rcu_barrier() 2072 for_each_possible_cpu(cpu) { in kvfree_rcu_barrier() 2092 int cpu; in kfree_rcu_shrink_count() local 2096 for_each_possible_cpu(cpu) { in kfree_rcu_shrink_count() 2110 int cpu, freed = 0; in kfree_rcu_shrink_scan() local 2112 for_each_possible_cpu(cpu) { in kfree_rcu_shrink_scan() 2132 int cpu; in kvfree_rcu_init() local [all …]
|
| A D | memcontrol.c | 690 int cpu; in mod_memcg_state() local 698 cpu = get_cpu(); in mod_memcg_state() 734 int cpu; in mod_memcg_lruvec_state() local 742 cpu = get_cpu(); in mod_memcg_lruvec_state() 834 int cpu; in count_memcg_events() local 842 cpu = get_cpu(); in count_memcg_events() 1979 int cpu, curcpu; in drain_all_stock() local 3710 int node, cpu; in mem_cgroup_alloc() local 4020 int cpu) in flush_nmi_stats() argument 4061 int cpu) in flush_nmi_stats() argument [all …]
|
| A D | slub.c | 3056 return cpu; in init_tid() 3085 int cpu; in init_kmem_cache_cpus() local 3388 unsigned int cpu; in flush_all_cpus_locked() local 3594 cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags); in slab_out_of_memory() 6770 int cpu; in show_slab_objects() local 6774 cpu); in show_slab_objects() 7208 int cpu; in show_stat() local 7218 data[cpu] = x; in show_stat() 7226 if (data[cpu]) in show_stat() 7228 cpu, data[cpu]); in show_stat() [all …]
|
| A D | show_mem.c | 183 int cpu, nid; in show_free_areas() local 193 for_each_online_cpu(cpu) in show_free_areas() 194 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; in show_free_areas() 294 for_each_online_cpu(cpu) in show_free_areas() 295 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; in show_free_areas()
|
| A D | zswap.c | 256 int ret, cpu; in zswap_pool_create() local 290 for_each_possible_cpu(cpu) in zswap_pool_create() 291 mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex); in zswap_pool_create() 821 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) in zswap_cpu_comp_prepare() argument 824 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); in zswap_cpu_comp_prepare() 830 buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); in zswap_cpu_comp_prepare() 836 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); in zswap_cpu_comp_prepare() 882 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) in zswap_cpu_comp_dead() argument 885 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); in zswap_cpu_comp_dead()
|
| A D | mlock.c | 223 void mlock_drain_remote(int cpu) in mlock_drain_remote() argument 227 WARN_ON_ONCE(cpu_online(cpu)); in mlock_drain_remote() 228 fbatch = &per_cpu(mlock_fbatch.fbatch, cpu); in mlock_drain_remote() 233 bool need_mlock_drain(int cpu) in need_mlock_drain() argument 235 return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu)); in need_mlock_drain()
|
| A D | kmemleak.c | 1385 unsigned int cpu; in update_checksum() local 1388 for_each_possible_cpu(cpu) { in update_checksum() 1389 void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu); in update_checksum() 1572 unsigned int cpu; in scan_object() local 1574 for_each_possible_cpu(cpu) { in scan_object() 1575 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu); in scan_object()
|
| A D | internal.h | 1059 bool need_mlock_drain(int cpu); 1061 void mlock_drain_remote(int cpu); 1139 static inline bool need_mlock_drain(int cpu) { return false; } in need_mlock_drain() argument 1141 static inline void mlock_drain_remote(int cpu) { } in mlock_drain_remote() argument
|
| A D | vmalloc.c | 2568 unsigned int cpu; member 2695 vb->cpu = raw_smp_processor_id(); in new_vmap_block() 2712 vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu); in new_vmap_block() 2775 static void purge_fragmented_blocks(int cpu) in purge_fragmented_blocks() argument 2779 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); in purge_fragmented_blocks() 2800 int cpu; in purge_fragmented_blocks_allcpus() local 2802 for_each_possible_cpu(cpu) in purge_fragmented_blocks_allcpus() 2803 purge_fragmented_blocks(cpu); in purge_fragmented_blocks_allcpus() 2907 int cpu; in _vm_unmap_aliases() local 2914 for_each_possible_cpu(cpu) { in _vm_unmap_aliases() [all …]
|
| A D | Kconfig.debug | 290 Depending on the cpu, kmemleak scan may be cpu intensive and can
|
| A D | memory_hotplug.c | 2132 int cpu; in check_cpu_on_node() local 2134 for_each_present_cpu(cpu) { in check_cpu_on_node() 2135 if (cpu_to_node(cpu) == nid) in check_cpu_on_node()
|
| A D | memory-failure.c | 2519 int cpu; in memory_failure_init() local 2521 for_each_possible_cpu(cpu) { in memory_failure_init() 2522 mf_cpu = &per_cpu(memory_failure_cpu, cpu); in memory_failure_init()
|
| /mm/kasan/ |
| A D | quarantine.c | 346 int cpu; in kasan_quarantine_remove_cache() local 358 for_each_online_cpu(cpu) { in kasan_quarantine_remove_cache() 359 sq = per_cpu_ptr(&shrink_qlist, cpu); in kasan_quarantine_remove_cache() 383 static int kasan_cpu_online(unsigned int cpu) in kasan_cpu_online() argument 389 static int kasan_cpu_offline(unsigned int cpu) in kasan_cpu_offline() argument
|
| A D | sw_tags.c | 41 int cpu; in kasan_init_sw_tags() local 43 for_each_possible_cpu(cpu) in kasan_init_sw_tags() 44 per_cpu(prng_state, cpu) = (u32)get_cycles(); in kasan_init_sw_tags()
|
| A D | common.c | 54 u32 cpu = raw_smp_processor_id(); in kasan_set_track() local 57 track->cpu = cpu; in kasan_set_track()
|
| A D | kasan.h | 197 u64 cpu:20; member
|
| A D | report.c | 275 prefix, track->pid, track->cpu, in print_track()
|
| /mm/kfence/ |
| A D | kfence.h | 48 int cpu; member
|
| A D | report.c | 120 track->cpu, (unsigned long)ts_sec, rem_nsec / 1000, in kfence_print_stack()
|