Lines Matching refs:sd

231 static inline void backlog_lock_irq_save(struct softnet_data *sd,  in backlog_lock_irq_save()  argument
235 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); in backlog_lock_irq_save()
240 static inline void backlog_lock_irq_disable(struct softnet_data *sd) in backlog_lock_irq_disable() argument
243 spin_lock_irq(&sd->input_pkt_queue.lock); in backlog_lock_irq_disable()
248 static inline void backlog_unlock_irq_restore(struct softnet_data *sd, in backlog_unlock_irq_restore() argument
252 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); in backlog_unlock_irq_restore()
257 static inline void backlog_unlock_irq_enable(struct softnet_data *sd) in backlog_unlock_irq_enable() argument
260 spin_unlock_irq(&sd->input_pkt_queue.lock); in backlog_unlock_irq_enable()
3362 struct softnet_data *sd; in __netif_reschedule() local
3366 sd = this_cpu_ptr(&softnet_data); in __netif_reschedule()
3368 *sd->output_queue_tailp = q; in __netif_reschedule()
3369 sd->output_queue_tailp = &q->next_sched; in __netif_reschedule()
4792 static inline void ____napi_schedule(struct softnet_data *sd, in ____napi_schedule() argument
4819 list_add_tail(&napi->poll_list, &sd->poll_list); in ____napi_schedule()
4824 if (!sd->in_net_rx_action) in ____napi_schedule()
5031 struct softnet_data *sd = data; in rps_trigger_softirq() local
5033 ____napi_schedule(sd, &sd->backlog); in rps_trigger_softirq()
5035 WRITE_ONCE(sd->received_rps, sd->received_rps + 1); in rps_trigger_softirq()
5043 struct softnet_data *sd = data; in trigger_rx_softirq() local
5046 smp_store_release(&sd->defer_ipi_scheduled, 0); in trigger_rx_softirq()
5059 static void napi_schedule_rps(struct softnet_data *sd) in napi_schedule_rps() argument
5064 if (sd != mysd) { in napi_schedule_rps()
5066 __napi_schedule_irqoff(&sd->backlog); in napi_schedule_rps()
5070 sd->rps_ipi_next = mysd->rps_ipi_list; in napi_schedule_rps()
5071 mysd->rps_ipi_list = sd; in napi_schedule_rps()
5084 void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu) in kick_defer_list_purge() argument
5089 backlog_lock_irq_save(sd, &flags); in kick_defer_list_purge()
5091 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) in kick_defer_list_purge()
5092 __napi_schedule_irqoff(&sd->backlog); in kick_defer_list_purge()
5094 backlog_unlock_irq_restore(sd, &flags); in kick_defer_list_purge()
5096 } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) { in kick_defer_list_purge()
5097 smp_call_function_single_async(cpu, &sd->defer_csd); in kick_defer_list_purge()
5109 struct softnet_data *sd; in skb_flow_limit() local
5115 sd = this_cpu_ptr(&softnet_data); in skb_flow_limit()
5118 fl = rcu_dereference(sd->flow_limit); in skb_flow_limit()
5150 struct softnet_data *sd; in enqueue_to_backlog() local
5161 sd = &per_cpu(softnet_data, cpu); in enqueue_to_backlog()
5163 qlen = skb_queue_len_lockless(&sd->input_pkt_queue); in enqueue_to_backlog()
5167 backlog_lock_irq_save(sd, &flags); in enqueue_to_backlog()
5168 qlen = skb_queue_len(&sd->input_pkt_queue); in enqueue_to_backlog()
5175 &sd->backlog.state)) in enqueue_to_backlog()
5176 napi_schedule_rps(sd); in enqueue_to_backlog()
5178 __skb_queue_tail(&sd->input_pkt_queue, skb); in enqueue_to_backlog()
5179 tail = rps_input_queue_tail_incr(sd); in enqueue_to_backlog()
5180 backlog_unlock_irq_restore(sd, &flags); in enqueue_to_backlog()
5187 backlog_unlock_irq_restore(sd, &flags); in enqueue_to_backlog()
5190 atomic_inc(&sd->dropped); in enqueue_to_backlog()
5554 struct softnet_data *sd = this_cpu_ptr(&softnet_data); in net_tx_action() local
5556 if (sd->completion_queue) { in net_tx_action()
5560 clist = sd->completion_queue; in net_tx_action()
5561 sd->completion_queue = NULL; in net_tx_action()
5584 if (sd->output_queue) { in net_tx_action()
5588 head = sd->output_queue; in net_tx_action()
5589 sd->output_queue = NULL; in net_tx_action()
5590 sd->output_queue_tailp = &sd->output_queue; in net_tx_action()
5632 xfrm_dev_backlog(sd); in net_tx_action()
6274 struct softnet_data *sd; in flush_backlog() local
6278 sd = this_cpu_ptr(&softnet_data); in flush_backlog()
6280 backlog_lock_irq_disable(sd); in flush_backlog()
6281 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { in flush_backlog()
6283 __skb_unlink(skb, &sd->input_pkt_queue); in flush_backlog()
6285 rps_input_queue_head_incr(sd); in flush_backlog()
6288 backlog_unlock_irq_enable(sd); in flush_backlog()
6291 skb_queue_walk_safe(&sd->process_queue, skb, tmp) { in flush_backlog()
6293 __skb_unlink(skb, &sd->process_queue); in flush_backlog()
6295 rps_input_queue_head_incr(sd); in flush_backlog()
6307 struct softnet_data *sd = &per_cpu(softnet_data, cpu); in flush_required() local
6310 backlog_lock_irq_disable(sd); in flush_required()
6315 do_flush = !skb_queue_empty(&sd->input_pkt_queue) || in flush_required()
6316 !skb_queue_empty_lockless(&sd->process_queue); in flush_required()
6317 backlog_unlock_irq_enable(sd); in flush_required()
6396 static void net_rps_action_and_irq_enable(struct softnet_data *sd) in net_rps_action_and_irq_enable() argument
6399 struct softnet_data *remsd = sd->rps_ipi_list; in net_rps_action_and_irq_enable()
6402 sd->rps_ipi_list = NULL; in net_rps_action_and_irq_enable()
6413 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) in sd_has_rps_ipi_waiting() argument
6416 return !use_backlog_threads() && sd->rps_ipi_list; in sd_has_rps_ipi_waiting()
6424 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog() local
6431 if (sd_has_rps_ipi_waiting(sd)) { in process_backlog()
6433 net_rps_action_and_irq_enable(sd); in process_backlog()
6441 while ((skb = __skb_dequeue(&sd->process_queue))) { in process_backlog()
6447 rps_input_queue_head_add(sd, work); in process_backlog()
6455 backlog_lock_irq_disable(sd); in process_backlog()
6456 if (skb_queue_empty(&sd->input_pkt_queue)) { in process_backlog()
6469 skb_queue_splice_tail_init(&sd->input_pkt_queue, in process_backlog()
6470 &sd->process_queue); in process_backlog()
6473 backlog_unlock_irq_enable(sd); in process_backlog()
6477 rps_input_queue_head_add(sd, work); in process_backlog()
6619 static void skb_defer_free_flush(struct softnet_data *sd) in skb_defer_free_flush() argument
6624 if (!READ_ONCE(sd->defer_list)) in skb_defer_free_flush()
6627 spin_lock(&sd->defer_lock); in skb_defer_free_flush()
6628 skb = sd->defer_list; in skb_defer_free_flush()
6629 sd->defer_list = NULL; in skb_defer_free_flush()
6630 sd->defer_count = 0; in skb_defer_free_flush()
6631 spin_unlock(&sd->defer_lock); in skb_defer_free_flush()
7599 struct softnet_data *sd; in napi_threaded_poll_loop() local
7609 sd = this_cpu_ptr(&softnet_data); in napi_threaded_poll_loop()
7610 sd->in_napi_threaded_poll = true; in napi_threaded_poll_loop()
7616 sd->in_napi_threaded_poll = false; in napi_threaded_poll_loop()
7619 if (sd_has_rps_ipi_waiting(sd)) { in napi_threaded_poll_loop()
7621 net_rps_action_and_irq_enable(sd); in napi_threaded_poll_loop()
7623 skb_defer_free_flush(sd); in napi_threaded_poll_loop()
7647 struct softnet_data *sd = this_cpu_ptr(&softnet_data); in net_rx_action() local
7657 sd->in_net_rx_action = true; in net_rx_action()
7659 list_splice_init(&sd->poll_list, &list); in net_rx_action()
7665 skb_defer_free_flush(sd); in net_rx_action()
7669 sd->in_net_rx_action = false; in net_rx_action()
7675 if (!list_empty(&sd->poll_list)) in net_rx_action()
7677 if (!sd_has_rps_ipi_waiting(sd)) in net_rx_action()
7693 WRITE_ONCE(sd->time_squeeze, sd->time_squeeze + 1); in net_rx_action()
7700 list_splice_tail_init(&sd->poll_list, &list); in net_rx_action()
7702 list_splice(&list, &sd->poll_list); in net_rx_action()
7703 if (!list_empty(&sd->poll_list)) in net_rx_action()
7706 sd->in_net_rx_action = false; in net_rx_action()
7708 net_rps_action_and_irq_enable(sd); in net_rx_action()
12428 struct softnet_data *sd, *oldsd, *remsd = NULL; in dev_cpu_dead() local
12432 sd = &per_cpu(softnet_data, cpu); in dev_cpu_dead()
12436 list_skb = &sd->completion_queue; in dev_cpu_dead()
12445 *sd->output_queue_tailp = oldsd->output_queue; in dev_cpu_dead()
12446 sd->output_queue_tailp = oldsd->output_queue_tailp; in dev_cpu_dead()
12463 ____napi_schedule(sd, napi); in dev_cpu_dead()
12822 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in backlog_napi_should_run() local
12823 struct napi_struct *napi = &sd->backlog; in backlog_napi_should_run()
12830 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in run_backlog_napi() local
12832 napi_threaded_poll_loop(&sd->backlog); in run_backlog_napi()
12837 struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); in backlog_napi_setup() local
12838 struct napi_struct *napi = &sd->backlog; in backlog_napi_setup()
12885 struct softnet_data *sd = &per_cpu(softnet_data, i); in net_dev_init() local
12887 skb_queue_head_init(&sd->input_pkt_queue); in net_dev_init()
12888 skb_queue_head_init(&sd->process_queue); in net_dev_init()
12890 skb_queue_head_init(&sd->xfrm_backlog); in net_dev_init()
12892 INIT_LIST_HEAD(&sd->poll_list); in net_dev_init()
12893 sd->output_queue_tailp = &sd->output_queue; in net_dev_init()
12895 INIT_CSD(&sd->csd, rps_trigger_softirq, sd); in net_dev_init()
12896 sd->cpu = i; in net_dev_init()
12898 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); in net_dev_init()
12899 spin_lock_init(&sd->defer_lock); in net_dev_init()
12901 gro_init(&sd->backlog.gro); in net_dev_init()
12902 sd->backlog.poll = process_backlog; in net_dev_init()
12903 sd->backlog.weight = weight_p; in net_dev_init()
12904 INIT_LIST_HEAD(&sd->backlog.poll_list); in net_dev_init()