Lines Matching refs:cpu

42 	int item, cpu;  in zero_zone_numa_counters()  local
46 for_each_online_cpu(cpu) { in zero_zone_numa_counters()
47 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] in zero_zone_numa_counters()
115 int cpu; in sum_vm_events() local
120 for_each_online_cpu(cpu) { in sum_vm_events()
121 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); in sum_vm_events()
147 void vm_events_fold_cpu(int cpu) in vm_events_fold_cpu() argument
149 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); in vm_events_fold_cpu()
175 int cpu; in fold_vm_zone_numa_events() local
178 for_each_online_cpu(cpu) { in fold_vm_zone_numa_events()
181 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in fold_vm_zone_numa_events()
279 int cpu; in refresh_zone_stat_thresholds() local
284 for_each_online_cpu(cpu) { in refresh_zone_stat_thresholds()
285 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; in refresh_zone_stat_thresholds()
295 for_each_online_cpu(cpu) { in refresh_zone_stat_thresholds()
298 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
302 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; in refresh_zone_stat_thresholds()
303 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
324 int cpu; in set_pgdat_percpu_threshold() local
334 for_each_online_cpu(cpu) in set_pgdat_percpu_threshold()
335 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold in set_pgdat_percpu_threshold()
899 void cpu_vm_stats_fold(int cpu) in cpu_vm_stats_fold() argument
910 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in cpu_vm_stats_fold()
938 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); in cpu_vm_stats_fold()
2056 static bool need_update(int cpu) in need_update() argument
2062 struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in need_update()
2074 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu); in need_update()
2118 int cpu; in vmstat_shepherd() local
2122 for_each_online_cpu(cpu) { in vmstat_shepherd()
2123 struct delayed_work *dw = &per_cpu(vmstat_work, cpu); in vmstat_shepherd()
2136 if (cpu_is_isolated(cpu)) in vmstat_shepherd()
2139 if (!delayed_work_pending(dw) && need_update(cpu)) in vmstat_shepherd()
2140 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); in vmstat_shepherd()
2152 int cpu; in start_shepherd_timer() local
2154 for_each_possible_cpu(cpu) { in start_shepherd_timer()
2155 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), in start_shepherd_timer()
2164 if (!cpu_online(cpu)) in start_shepherd_timer()
2165 disable_delayed_work_sync(&per_cpu(vmstat_work, cpu)); in start_shepherd_timer()
2182 static int vmstat_cpu_online(unsigned int cpu) in vmstat_cpu_online() argument
2187 if (!node_state(cpu_to_node(cpu), N_CPU)) { in vmstat_cpu_online()
2188 node_set_state(cpu_to_node(cpu), N_CPU); in vmstat_cpu_online()
2190 enable_delayed_work(&per_cpu(vmstat_work, cpu)); in vmstat_cpu_online()
2195 static int vmstat_cpu_down_prep(unsigned int cpu) in vmstat_cpu_down_prep() argument
2197 disable_delayed_work_sync(&per_cpu(vmstat_work, cpu)); in vmstat_cpu_down_prep()
2201 static int vmstat_cpu_dead(unsigned int cpu) in vmstat_cpu_dead() argument
2206 node = cpu_to_node(cpu); in vmstat_cpu_dead()