Lines Matching refs:cpu

2690 				 int cpu, u16 offset, u16 count)  in remove_xps_queue_cpu()  argument
2696 for (tci = cpu * num_tc; num_tc--; tci++) { in remove_xps_queue_cpu()
3344 int cpu, count = 0; in netif_get_num_default_rss_queues() local
3350 for_each_cpu(cpu, cpus) { in netif_get_num_default_rss_queues()
3352 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); in netif_get_num_default_rss_queues()
4696 int cpu = smp_processor_id(); /* ok because BHs are off */ in __dev_queue_xmit() local
4701 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { in __dev_queue_xmit()
4709 HARD_TX_LOCK(dev, txq, cpu); in __dev_queue_xmit()
4882 WRITE_ONCE(rflow->cpu, next_cpu); in set_rps_cpu()
4898 int cpu = -1; in get_rps_cpu() local
4946 tcpu = rflow->cpu; in get_rps_cpu()
4969 cpu = tcpu; in get_rps_cpu()
4979 cpu = tcpu; in get_rps_cpu()
4985 return cpu; in get_rps_cpu()
5008 unsigned int cpu; in rps_may_expire_flow() local
5014 cpu = READ_ONCE(rflow->cpu); in rps_may_expire_flow()
5015 if (READ_ONCE(rflow->filter) == filter_id && cpu < nr_cpu_ids && in rps_may_expire_flow()
5016 ((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) - in rps_may_expire_flow()
5084 void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu) in kick_defer_list_purge() argument
5097 smp_call_function_single_async(cpu, &sd->defer_csd); in kick_defer_list_purge()
5146 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, in enqueue_to_backlog() argument
5161 sd = &per_cpu(softnet_data, cpu); in enqueue_to_backlog()
5408 int cpu, rc; in generic_xdp_tx() local
5411 cpu = smp_processor_id(); in generic_xdp_tx()
5412 HARD_TX_LOCK(dev, txq, cpu); in generic_xdp_tx()
5475 int cpu; in netif_rx_internal() local
5479 cpu = get_rps_cpu(skb->dev, skb, &rflow); in netif_rx_internal()
5480 if (cpu < 0) in netif_rx_internal()
5481 cpu = smp_processor_id(); in netif_rx_internal()
5483 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); in netif_rx_internal()
6169 int cpu = get_rps_cpu(skb->dev, skb, &rflow); in netif_receive_skb_internal() local
6171 if (cpu >= 0) { in netif_receive_skb_internal()
6172 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); in netif_receive_skb_internal()
6202 int cpu = get_rps_cpu(skb->dev, skb, &rflow); in netif_receive_skb_list_internal() local
6204 if (cpu >= 0) { in netif_receive_skb_list_internal()
6207 enqueue_to_backlog(skb, cpu, &rflow->last_qtail); in netif_receive_skb_list_internal()
6304 static bool flush_required(int cpu) in flush_required() argument
6307 struct softnet_data *sd = &per_cpu(softnet_data, cpu); in flush_required()
6346 unsigned int cpu; in flush_all_backlogs() local
6356 for_each_online_cpu(cpu) { in flush_all_backlogs()
6357 if (flush_required(cpu)) { in flush_all_backlogs()
6358 INIT_WORK(&ptr->w[cpu], flush_backlog); in flush_all_backlogs()
6359 queue_work_on(cpu, system_highpri_wq, &ptr->w[cpu]); in flush_all_backlogs()
6360 __cpumask_set_cpu(cpu, &ptr->flush_cpus); in flush_all_backlogs()
6368 for_each_cpu(cpu, &ptr->flush_cpus) in flush_all_backlogs()
6369 flush_work(&ptr->w[cpu]); in flush_all_backlogs()
6385 if (cpu_online(remsd->cpu)) in net_rps_send_ipi()
6386 smp_call_function_single_async(remsd->cpu, &remsd->csd); in net_rps_send_ipi()
11515 int cpu; in dev_fetch_dstats() local
11517 for_each_possible_cpu(cpu) { in dev_fetch_dstats()
11523 stats = per_cpu_ptr(dstats, cpu); in dev_fetch_dstats()
11679 int cpu; in dev_fetch_sw_netstats() local
11681 for_each_possible_cpu(cpu) { in dev_fetch_sw_netstats()
11686 stats = per_cpu_ptr(netstats, cpu); in dev_fetch_sw_netstats()
12427 unsigned int cpu; in dev_cpu_dead() local
12431 cpu = smp_processor_id(); in dev_cpu_dead()
12432 sd = &per_cpu(softnet_data, cpu); in dev_cpu_dead()
12820 static int backlog_napi_should_run(unsigned int cpu) in backlog_napi_should_run() argument
12822 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in backlog_napi_should_run()
12828 static void run_backlog_napi(unsigned int cpu) in run_backlog_napi() argument
12830 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in run_backlog_napi()
12835 static void backlog_napi_setup(unsigned int cpu) in backlog_napi_setup() argument
12837 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in backlog_napi_setup()
12896 sd->cpu = i; in net_dev_init()