| /fs/squashfs/ |
| A D | decompressor_multi_percpu.c | 32 int err, cpu; in squashfs_decompressor_create() local 38 for_each_possible_cpu(cpu) { in squashfs_decompressor_create() 39 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 52 for_each_possible_cpu(cpu) { in squashfs_decompressor_create() 53 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 66 int cpu; in squashfs_decompressor_destroy() local 69 for_each_possible_cpu(cpu) { in squashfs_decompressor_destroy() 70 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy()
|
| /fs/bcachefs/ |
| A D | rcu_pending.c | 102 int cpu; member 578 int cpu; in rcu_pending_dequeue_from_all() local 579 for_each_possible_cpu(cpu) { in rcu_pending_dequeue_from_all() 589 int cpu; in rcu_pending_has_pending_or_armed() local 590 for_each_possible_cpu(cpu) { in rcu_pending_has_pending_or_armed() 605 int cpu; in rcu_pending_exit() local 619 for_each_possible_cpu(cpu) { in rcu_pending_exit() 624 for_each_possible_cpu(cpu) { in rcu_pending_exit() 652 int cpu; in rcu_pending_init() local 653 for_each_possible_cpu(cpu) { in rcu_pending_init() [all …]
|
| A D | util.h | 631 int cpu; \ 632 for_each_possible_cpu(cpu) \ 633 _ret += *per_cpu_ptr(_p, cpu); \ 644 int cpu; in percpu_u64_set() local 646 for_each_possible_cpu(cpu) in percpu_u64_set() 647 *per_cpu_ptr(dst, cpu) = 0; in percpu_u64_set() 660 int cpu; in acc_u64s_percpu() local 662 for_each_possible_cpu(cpu) in acc_u64s_percpu() 668 int cpu; in percpu_memset() local 670 for_each_possible_cpu(cpu) in percpu_memset() [all …]
|
| A D | time_stats.c | 165 int cpu; in bch2_time_stats_reset() local 166 for_each_possible_cpu(cpu) in bch2_time_stats_reset() 167 per_cpu_ptr(stats->buffer, cpu)->nr = 0; in bch2_time_stats_reset()
|
| A D | util.c | 386 int cpu; in bch2_time_stats_to_text() local 389 for_each_possible_cpu(cpu) in bch2_time_stats_to_text() 390 __bch2_time_stats_clear_buffer(stats, per_cpu_ptr(stats->buffer, cpu)); in bch2_time_stats_to_text() 994 int cpu; in bch2_acc_percpu_u64s() local 1001 for_each_possible_cpu(cpu) { in bch2_acc_percpu_u64s() 1002 u64 *i = per_cpu_ptr(p, cpu); in bch2_acc_percpu_u64s()
|
| A D | six.c | 101 int cpu; in pcpu_read_count() local 103 for_each_possible_cpu(cpu) in pcpu_read_count() 104 read_count += *per_cpu_ptr(lock->readers, cpu); in pcpu_read_count()
|
| A D | inode.c | 955 bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *max) in bch2_inode_alloc_cursor_get() argument 959 u64 cursor_idx = c->opts.inodes_32bit ? 0 : cpu + 1; in bch2_inode_alloc_cursor_get() 988 *min = max(cpu << bits, (u64) INT_MAX + 1); in bch2_inode_alloc_cursor_get() 989 *max = (cpu << bits) | ~(ULLONG_MAX << bits); in bch2_inode_alloc_cursor_get() 1010 u32 snapshot, u64 cpu) in bch2_inode_create() argument 1014 bch2_inode_alloc_cursor_get(trans, cpu, &min, &max); in bch2_inode_create()
|
| /fs/resctrl/ |
| A D | pseudo_lock.c | 153 int cpu; in pseudo_lock_cstates_constrain() local 156 for_each_cpu(cpu, &plr->d->hdr.cpu_mask) { in pseudo_lock_cstates_constrain() 169 cpu); in pseudo_lock_cstates_constrain() 237 if (!cpu_online(plr->cpu)) { in pseudo_lock_region_init() 239 plr->cpu); in pseudo_lock_region_init() 693 unsigned int cpu; in pseudo_lock_measure_cycles() local 710 cpu = cpumask_first(&plr->d->hdr.cpu_mask); in pseudo_lock_measure_cycles() 711 if (!cpu_online(cpu)) { in pseudo_lock_measure_cycles() 716 plr->cpu = cpu; in pseudo_lock_measure_cycles() 720 plr, cpu, "pseudo_lock_measure/%u"); in pseudo_lock_measure_cycles() [all …]
|
| A D | monitor.c | 362 int cpu = smp_processor_id(); in __mon_event_count() local 379 if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask)) in __mon_event_count() 392 ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE); in __mon_event_count() 499 if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask)) in get_ctrl_domain_from_cpu() 686 int cpu; in cqm_setup_limbo_handler() local 689 dom->cqm_work_cpu = cpu; in cqm_setup_limbo_handler() 691 if (cpu < nr_cpu_ids) in cqm_setup_limbo_handler() 752 int cpu; in mbm_setup_overflow_handler() local 761 dom->mbm_work_cpu = cpu; in mbm_setup_overflow_handler() 763 if (cpu < nr_cpu_ids) in mbm_setup_overflow_handler() [all …]
|
| A D | internal.h | 27 unsigned int cpu; in cpumask_any_housekeeping() local 31 cpu = cpumask_any_andnot_but(mask, tick_nohz_full_mask, exclude_cpu); in cpumask_any_housekeeping() 32 if (cpu < nr_cpu_ids) in cpumask_any_housekeeping() 33 return cpu; in cpumask_any_housekeeping()
|
| A D | ctrlmondata.c | 553 int cpu; in mon_event_read() local 572 cpu = cpumask_any_housekeeping(cpumask, RESCTRL_PICK_ANY_CPU); in mon_event_read() 580 if (tick_nohz_full_cpu(cpu)) in mon_event_read() 583 smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); in mon_event_read() 597 int domid, cpu, ret = 0; in rdtgroup_mondata_show() local 629 cpu = cpumask_any(&d->hdr.cpu_mask); in rdtgroup_mondata_show() 630 ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE); in rdtgroup_mondata_show()
|
| A D | rdtgroup.c | 2290 int cpu = cpumask_any(&d->hdr.cpu_mask); in mba_sc_domain_allocate() local 2294 GFP_KERNEL, cpu_to_node(cpu)); in mba_sc_domain_allocate() 3697 int cpu; in rdtgroup_rmdir_mon() local 3708 for_each_cpu(cpu, &rdtgrp->cpu_mask) in rdtgroup_rmdir_mon() 3744 int cpu; in rdtgroup_rmdir_ctrl() local 3756 for_each_cpu(cpu, &rdtgrp->cpu_mask) in rdtgroup_rmdir_ctrl() 4165 void resctrl_online_cpu(unsigned int cpu) in resctrl_online_cpu() argument 4208 clear_childcpus(rdtgrp, cpu); in resctrl_offline_cpu() 4216 d = get_mon_domain_from_cpu(cpu, l3); in resctrl_offline_cpu() 4220 mbm_setup_overflow_handler(d, 0, cpu); in resctrl_offline_cpu() [all …]
|
| /fs/proc/ |
| A D | stat.c | 19 #define arch_irq_stat_cpu(cpu) 0 argument 25 u64 get_idle_time(struct kernel_cpustat *kcs, int cpu) in get_idle_time() argument 29 if (cpu_online(cpu)) in get_idle_time() 30 idle_usecs = get_cpu_idle_time_us(cpu, NULL); in get_idle_time() 41 static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu) in get_iowait_time() argument 45 if (cpu_online(cpu)) in get_iowait_time() 46 iowait_usecs = get_cpu_iowait_time_us(cpu, NULL); in get_iowait_time()
|
| /fs/ |
| A D | seq_file.c | 1095 seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos) in seq_hlist_start_percpu() argument 1099 for_each_possible_cpu(*cpu) { in seq_hlist_start_percpu() 1100 hlist_for_each(node, per_cpu_ptr(head, *cpu)) { in seq_hlist_start_percpu() 1120 int *cpu, loff_t *pos) in seq_hlist_next_percpu() argument 1129 for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids; in seq_hlist_next_percpu() 1130 *cpu = cpumask_next(*cpu, cpu_possible_mask)) { in seq_hlist_next_percpu() 1131 struct hlist_head *bucket = per_cpu_ptr(head, *cpu); in seq_hlist_next_percpu()
|
| A D | aio.c | 103 struct kioctx_cpu __percpu *cpu; member 623 free_percpu(ctx->cpu); in free_ioctx() 788 ctx->cpu = alloc_percpu(struct kioctx_cpu); in ioctx_alloc() 789 if (!ctx->cpu) in ioctx_alloc() 835 free_percpu(ctx->cpu); in ioctx_alloc() 940 kcpu = this_cpu_ptr(ctx->cpu); in put_reqs_available() 958 kcpu = this_cpu_ptr(ctx->cpu); in __get_reqs_available()
|
| A D | buffer.c | 1545 bool has_bh_in_lru(int cpu, void *dummy) in has_bh_in_lru() argument 1547 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); in has_bh_in_lru() 3049 static int buffer_exit_cpu_dead(unsigned int cpu) in buffer_exit_cpu_dead() argument 3052 struct bh_lru *b = &per_cpu(bh_lrus, cpu); in buffer_exit_cpu_dead() 3058 this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr); in buffer_exit_cpu_dead() 3059 per_cpu(bh_accounting, cpu).nr = 0; in buffer_exit_cpu_dead()
|
| /fs/xfs/ |
| A D | xfs_icache.c | 473 int cpu; in xfs_inodegc_queue_all() local 476 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_queue_all() 477 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_queue_all() 492 int cpu; in xfs_inodegc_wait_all() local 496 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_wait_all() 499 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_wait_all() 2282 int cpu; in xfs_inodegc_shrinker_count() local 2287 for_each_cpu(cpu, &mp->m_inodegc_cpumask) { in xfs_inodegc_shrinker_count() 2288 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_shrinker_count() 2303 int cpu; in xfs_inodegc_shrinker_scan() local [all …]
|
| A D | xfs_stats.c | 12 int val = 0, cpu; in counter_val() local 14 for_each_possible_cpu(cpu) in counter_val() 15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); in counter_val()
|
| A D | xfs_log_cil.c | 124 int cpu; in xlog_cil_push_pcp_aggregate() local 126 for_each_cpu(cpu, &ctx->cil_pcpmask) { in xlog_cil_push_pcp_aggregate() 127 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); in xlog_cil_push_pcp_aggregate() 159 int cpu; in xlog_cil_insert_pcp_aggregate() local 172 for_each_cpu(cpu, &ctx->cil_pcpmask) { in xlog_cil_insert_pcp_aggregate() 173 struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); in xlog_cil_insert_pcp_aggregate() 1918 int cpu; in xlog_cil_init() local 1938 for_each_possible_cpu(cpu) { in xlog_cil_init() 1939 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); in xlog_cil_init()
|
| A D | xfs_super.c | 1212 int cpu; in xfs_inodegc_init_percpu() local 1218 for_each_possible_cpu(cpu) { in xfs_inodegc_init_percpu() 1219 gc = per_cpu_ptr(mp->m_inodegc, cpu); in xfs_inodegc_init_percpu() 1220 gc->cpu = cpu; in xfs_inodegc_init_percpu()
|
| /fs/erofs/ |
| A D | zdata.c | 296 unsigned int cpu; in erofs_destroy_percpu_workers() local 298 for_each_possible_cpu(cpu) { in erofs_destroy_percpu_workers() 300 z_erofs_pcpu_workers[cpu], 1); in erofs_destroy_percpu_workers() 301 rcu_assign_pointer(z_erofs_pcpu_workers[cpu], NULL); in erofs_destroy_percpu_workers() 311 kthread_run_worker_on_cpu(cpu, 0, "erofs_worker/%u"); in erofs_init_percpu_worker() 323 unsigned int cpu; in erofs_init_percpu_workers() local 331 worker = erofs_init_percpu_worker(cpu); in erofs_init_percpu_workers() 342 static int erofs_cpu_online(unsigned int cpu) in erofs_cpu_online() argument 346 worker = erofs_init_percpu_worker(cpu); in erofs_cpu_online() 361 static int erofs_cpu_offline(unsigned int cpu) in erofs_cpu_offline() argument [all …]
|
| /fs/fuse/ |
| A D | virtio_fs.c | 235 unsigned int cpu, qid; in cpu_list_show() local 244 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { in cpu_list_show() 245 if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid)) { in cpu_list_show() 247 ret = snprintf(buf + pos, size - pos, "%u", cpu); in cpu_list_show() 864 unsigned int q, cpu, nr_masks; in virtio_fs_map_queues() local 877 for_each_cpu(cpu, mask) in virtio_fs_map_queues() 878 fs->mq_map[cpu] = q + VQ_REQUEST; in virtio_fs_map_queues() 887 for_each_possible_cpu(cpu) in virtio_fs_map_queues() 888 fs->mq_map[cpu] = VQ_REQUEST; in virtio_fs_map_queues() 893 for_each_cpu(cpu, &masks[q % nr_masks]) in virtio_fs_map_queues() [all …]
|
| /fs/btrfs/ |
| A D | accessors.h | 600 static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, in btrfs_disk_key_to_cpu() argument 603 cpu->offset = le64_to_cpu(disk->offset); in btrfs_disk_key_to_cpu() 604 cpu->type = disk->type; in btrfs_disk_key_to_cpu() 605 cpu->objectid = le64_to_cpu(disk->objectid); in btrfs_disk_key_to_cpu() 609 const struct btrfs_key *cpu) in btrfs_cpu_key_to_disk() argument 611 disk->offset = cpu_to_le64(cpu->offset); in btrfs_cpu_key_to_disk() 612 disk->type = cpu->type; in btrfs_cpu_key_to_disk() 613 disk->objectid = cpu_to_le64(cpu->objectid); in btrfs_cpu_key_to_disk()
|
| A D | volumes.c | 3627 memset(cpu, 0, sizeof(*cpu)); in btrfs_disk_balance_args_to_cpu() 3630 cpu->usage = le64_to_cpu(disk->usage); in btrfs_disk_balance_args_to_cpu() 3631 cpu->devid = le64_to_cpu(disk->devid); in btrfs_disk_balance_args_to_cpu() 3633 cpu->pend = le64_to_cpu(disk->pend); in btrfs_disk_balance_args_to_cpu() 3635 cpu->vend = le64_to_cpu(disk->vend); in btrfs_disk_balance_args_to_cpu() 3637 cpu->flags = le64_to_cpu(disk->flags); in btrfs_disk_balance_args_to_cpu() 3638 cpu->limit = le64_to_cpu(disk->limit); in btrfs_disk_balance_args_to_cpu() 3649 disk->usage = cpu_to_le64(cpu->usage); in btrfs_cpu_balance_args_to_disk() 3650 disk->devid = cpu_to_le64(cpu->devid); in btrfs_cpu_balance_args_to_disk() 3652 disk->pend = cpu_to_le64(cpu->pend); in btrfs_cpu_balance_args_to_disk() [all …]
|
| /fs/nfs/ |
| A D | super.c | 671 int i, cpu; in nfs_show_stats() local 722 for_each_possible_cpu(cpu) { in nfs_show_stats() 726 stats = per_cpu_ptr(nfss->io_stats, cpu); in nfs_show_stats()
|