Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 25 of 63) sorted by relevance

123

/net/netfilter/
A Dnf_flow_table_procfs.c9 int cpu; in nf_flow_table_cpu_seq_start() local
14 for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_start()
15 if (!cpu_possible(cpu)) in nf_flow_table_cpu_seq_start()
17 *pos = cpu + 1; in nf_flow_table_cpu_seq_start()
18 return per_cpu_ptr(net->ft.stat, cpu); in nf_flow_table_cpu_seq_start()
27 int cpu; in nf_flow_table_cpu_seq_next() local
29 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_next()
30 if (!cpu_possible(cpu)) in nf_flow_table_cpu_seq_next()
32 *pos = cpu + 1; in nf_flow_table_cpu_seq_next()
33 return per_cpu_ptr(net->ft.stat, cpu); in nf_flow_table_cpu_seq_next()
A Dnft_counter.c133 int cpu; in nft_counter_fetch() local
136 for_each_possible_cpu(cpu) { in nft_counter_fetch()
137 struct u64_stats_sync *nft_sync = per_cpu_ptr(&nft_counter_sync, cpu); in nft_counter_fetch()
139 this_cpu = per_cpu_ptr(priv->counter, cpu); in nft_counter_fetch()
288 int cpu; in nft_counter_init_seqcount() local
290 for_each_possible_cpu(cpu) in nft_counter_init_seqcount()
291 u64_stats_init(per_cpu_ptr(&nft_counter_sync, cpu)); in nft_counter_init_seqcount()
A Dnf_conntrack_standalone.c402 int cpu; in ct_cpu_seq_start() local
407 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start()
408 if (!cpu_possible(cpu)) in ct_cpu_seq_start()
410 *pos = cpu + 1; in ct_cpu_seq_start()
411 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start()
420 int cpu; in ct_cpu_seq_next() local
422 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
423 if (!cpu_possible(cpu)) in ct_cpu_seq_next()
425 *pos = cpu + 1; in ct_cpu_seq_next()
426 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
A Dnf_synproxy_core.c243 int cpu; in synproxy_cpu_seq_start() local
248 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_start()
249 if (!cpu_possible(cpu)) in synproxy_cpu_seq_start()
251 *pos = cpu + 1; in synproxy_cpu_seq_start()
252 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_start()
261 int cpu; in synproxy_cpu_seq_next() local
263 for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_next()
264 if (!cpu_possible(cpu)) in synproxy_cpu_seq_next()
266 *pos = cpu + 1; in synproxy_cpu_seq_next()
267 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_next()
A Dnf_conncount.c45 int cpu; member
102 int cpu = raw_smp_processor_id(); in find_or_evict() local
117 if (conn->cpu == cpu || age >= 2) { in find_or_evict()
193 conn->cpu = raw_smp_processor_id(); in __nf_conncount_add()
380 conn->cpu = raw_smp_processor_id(); in insert_tree()
A Dxt_NFQUEUE.c94 int cpu = smp_processor_id(); in nfqueue_tg_v3() local
96 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3()
A Dnf_conntrack_netlink.c64 unsigned int cpu; member
2556 int cpu; in ctnetlink_ct_stat_cpu_dump() local
2562 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_ct_stat_cpu_dump()
2565 if (!cpu_possible(cpu)) in ctnetlink_ct_stat_cpu_dump()
2572 cpu, st) < 0) in ctnetlink_ct_stat_cpu_dump()
2575 cb->args[0] = cpu; in ctnetlink_ct_stat_cpu_dump()
3738 int cpu; in ctnetlink_exp_stat_cpu_dump() local
3744 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_exp_stat_cpu_dump()
3747 if (!cpu_possible(cpu)) in ctnetlink_exp_stat_cpu_dump()
3753 cpu, st) < 0) in ctnetlink_exp_stat_cpu_dump()
[all …]
A Dx_tables.c1205 int cpu; in xt_free_table_info() local
1208 for_each_possible_cpu(cpu) in xt_free_table_info()
1209 kvfree(info->jumpstack[cpu]); in xt_free_table_info()
1330 int cpu; in xt_jumpstack_alloc() local
1355 for_each_possible_cpu(cpu) { in xt_jumpstack_alloc()
1356 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL, in xt_jumpstack_alloc()
1357 cpu_to_node(cpu)); in xt_jumpstack_alloc()
1358 if (i->jumpstack[cpu] == NULL) in xt_jumpstack_alloc()
1392 unsigned int cpu; in xt_replace_table() local
1432 for_each_possible_cpu(cpu) { in xt_replace_table()
[all …]
/net/core/
A Dgen_stats.c150 struct gnet_stats_basic_sync __percpu *cpu, in gnet_stats_add_basic() argument
157 WARN_ON_ONCE((cpu || running) && in_hardirq()); in gnet_stats_add_basic()
159 if (cpu) { in gnet_stats_add_basic()
160 gnet_stats_add_basic_cpu(bstats, cpu); in gnet_stats_add_basic()
175 struct gnet_stats_basic_sync __percpu *cpu, in gnet_stats_read_basic() argument
180 if (cpu) { in gnet_stats_read_basic()
212 struct gnet_stats_basic_sync __percpu *cpu, in ___gnet_stats_copy_basic() argument
288 struct gnet_stats_basic_sync __percpu *cpu, in gnet_stats_copy_basic_hw() argument
357 const struct gnet_stats_queue __percpu *cpu, in gnet_stats_add_queue() argument
360 if (cpu) { in gnet_stats_add_queue()
[all …]
A Ddrop_monitor.c1052 int cpu, rc; in net_dm_hw_monitor_start() local
1066 for_each_possible_cpu(cpu) { in net_dm_hw_monitor_start()
1108 int cpu; in net_dm_hw_monitor_stop() local
1142 int cpu, rc; in net_dm_trace_on_set() local
1198 int cpu; in net_dm_trace_off_set() local
1443 int cpu; in net_dm_stats_read() local
1487 int cpu; in net_dm_hw_stats_read() local
1728 int cpu, rc; in init_net_drop_monitor() local
1738 net_dm_cpu_data_init(cpu); in init_net_drop_monitor()
1767 int cpu; in exit_net_drop_monitor() local
[all …]
A Ddst.c323 int cpu; in metadata_dst_alloc_percpu() local
332 for_each_possible_cpu(cpu) in metadata_dst_alloc_percpu()
333 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen); in metadata_dst_alloc_percpu()
341 int cpu; in metadata_dst_free_percpu() local
343 for_each_possible_cpu(cpu) { in metadata_dst_free_percpu()
344 struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu); in metadata_dst_free_percpu()
A Ddev.c4898 int cpu = -1; in get_rps_cpu() local
4969 cpu = tcpu; in get_rps_cpu()
4979 cpu = tcpu; in get_rps_cpu()
4985 return cpu; in get_rps_cpu()
5014 cpu = READ_ONCE(rflow->cpu); in rps_may_expire_flow()
5408 int cpu, rc; in generic_xdp_tx() local
5475 int cpu; in netif_rx_internal() local
5480 if (cpu < 0) in netif_rx_internal()
6359 queue_work_on(cpu, system_highpri_wq, &ptr->w[cpu]); in flush_all_backlogs()
11515 int cpu; in dev_fetch_dstats() local
[all …]
/net/iucv/
A Diucv.c495 cpu, rc, err); in iucv_declare_cpu()
542 int cpu; in iucv_setmask_mp() local
545 for_each_online_cpu(cpu) in iucv_setmask_mp()
562 int cpu; in iucv_setmask_up() local
582 int cpu, rc; in iucv_enable() local
592 for_each_online_cpu(cpu) in iucv_enable()
626 kfree(iucv_param[cpu]); in iucv_cpu_dead()
627 iucv_param[cpu] = NULL; in iucv_cpu_dead()
638 if (!iucv_irq_data[cpu]) in iucv_cpu_prepare()
644 if (!iucv_param[cpu]) in iucv_cpu_prepare()
[all …]
/net/ipv6/
A Dseg6_hmac.c367 int i, alg_count, cpu; in seg6_hmac_init_algo() local
381 for_each_possible_cpu(cpu) { in seg6_hmac_init_algo()
387 p_tfm = per_cpu_ptr(algo->tfms, cpu); in seg6_hmac_init_algo()
400 for_each_possible_cpu(cpu) { in seg6_hmac_init_algo()
402 cpu_to_node(cpu)); in seg6_hmac_init_algo()
405 *per_cpu_ptr(algo->shashs, cpu) = shash; in seg6_hmac_init_algo()
433 int i, alg_count, cpu; in seg6_hmac_exit() local
440 for_each_possible_cpu(cpu) { in seg6_hmac_exit()
441 shash = *per_cpu_ptr(algo->shashs, cpu); in seg6_hmac_exit()
448 for_each_possible_cpu(cpu) { in seg6_hmac_exit()
[all …]
/net/openvswitch/
A Dflow.c65 unsigned int cpu = smp_processor_id(); in ovs_flow_stats_update() local
74 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu)) in ovs_flow_stats_update()
75 flow->stats_last_writer = cpu; in ovs_flow_stats_update()
110 cpumask_set_cpu(cpu, in ovs_flow_stats_update()
115 flow->stats_last_writer = cpu; in ovs_flow_stats_update()
132 int cpu; in ovs_flow_stats_get() local
139 for (cpu = 0; cpu < nr_cpu_ids; in ovs_flow_stats_get()
140 cpu = cpumask_next(cpu, flow->cpu_used_mask)) { in ovs_flow_stats_get()
161 int cpu; in ovs_flow_stats_clear() local
164 for (cpu = 0; cpu < nr_cpu_ids; in ovs_flow_stats_clear()
[all …]
A Dflow_table.c110 int cpu; in flow_free() local
118 for (cpu = 0; cpu < nr_cpu_ids; in flow_free()
119 cpu = cpumask_next(cpu, flow->cpu_used_mask)) { in flow_free()
120 if (flow->stats[cpu]) in flow_free()
122 (struct sw_flow_stats __force *)flow->stats[cpu]); in flow_free()
192 int i, cpu; in tbl_mask_array_reset_counters() local
202 for_each_possible_cpu(cpu) { in tbl_mask_array_reset_counters()
207 stats = per_cpu_ptr(ma->masks_usage_stats, cpu); in tbl_mask_array_reset_counters()
1121 int cpu; in ovs_flow_masks_rebalance() local
1130 for_each_possible_cpu(cpu) { in ovs_flow_masks_rebalance()
[all …]
/net/ipv4/
A Dtcp_sigpool.c60 int cpu, err = 0; in sigpool_reserve_scratch() local
73 for_each_possible_cpu(cpu) { in sigpool_reserve_scratch()
76 scratch = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); in sigpool_reserve_scratch()
82 old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu), in sigpool_reserve_scratch()
84 if (!cpu_online(cpu) || !old_scratch) { in sigpool_reserve_scratch()
100 int cpu; in sigpool_scratch_free() local
102 for_each_possible_cpu(cpu) in sigpool_scratch_free()
103 kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu), in sigpool_scratch_free()
/net/rds/
A Dpage.c158 unsigned int cpu; in rds_page_exit() local
160 for_each_possible_cpu(cpu) { in rds_page_exit()
163 rem = &per_cpu(rds_page_remainders, cpu); in rds_page_exit()
164 rdsdebug("cpu %u\n", cpu); in rds_page_exit()
A Dib_stats.c91 int cpu; in rds_ib_stats_info_copy() local
96 for_each_online_cpu(cpu) { in rds_ib_stats_info_copy()
97 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); in rds_ib_stats_info_copy()
A Dtcp_stats.c58 int cpu; in rds_tcp_stats_info_copy() local
63 for_each_online_cpu(cpu) { in rds_tcp_stats_info_copy()
64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); in rds_tcp_stats_info_copy()
A Dstats.c118 int cpu; in rds_stats_info() local
128 for_each_online_cpu(cpu) { in rds_stats_info()
129 src = (uint64_t *)&(per_cpu(rds_stats, cpu)); in rds_stats_info()
/net/
A DKconfig.debug10 This adds memory and cpu costs.
19 This adds memory and cpu costs.
/net/bpf/
A Dtest_run.c693 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) in bpf_prog_test_run_tracing()
763 int cpu = kattr->test.cpu, err = 0; in bpf_prog_test_run_raw_tp() local
776 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) in bpf_prog_test_run_raw_tp()
791 cpu == current_cpu) { in bpf_prog_test_run_raw_tp()
793 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { in bpf_prog_test_run_raw_tp()
801 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, in bpf_prog_test_run_raw_tp()
1001 kattr->test.cpu || kattr->test.batch_size) in bpf_prog_test_run_skb()
1383 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) in bpf_prog_test_run_flow_dissector()
1447 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) in bpf_prog_test_run_sk_lookup()
1654 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) in bpf_prog_test_run_nf()
/net/mac80211/
A Dsta_info.c2476 int cpu; in sta_get_last_rx_stats() local
2489 for_each_possible_cpu(cpu) { in sta_get_last_rx_stats()
2592 int cpu; in sta_set_tidstats() local
2610 cpu); in sta_set_tidstats()
2792 cpu); in sta_set_link_sinfo()
2808 cpu); in sta_set_link_sinfo()
2846 for_each_possible_cpu(cpu) { in sta_set_link_sinfo()
2850 cpu); in sta_set_link_sinfo()
2965 int i, ac, cpu, link_id; in sta_set_sinfo() local
3022 cpu); in sta_set_sinfo()
[all …]
/net/sched/
A Dcls_basic.c275 int cpu; in basic_dump() local
290 for_each_possible_cpu(cpu) { in basic_dump()
291 struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu); in basic_dump()

Completed in 81 milliseconds

123