Lines Matching refs:napi
778 struct napi_struct *napi; in napi_by_id() local
780 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) in napi_by_id()
781 if (napi->napi_id == napi_id) in napi_by_id()
782 return napi; in napi_by_id()
791 struct napi_struct *napi; in netdev_napi_by_id() local
793 napi = napi_by_id(napi_id); in netdev_napi_by_id()
794 if (!napi) in netdev_napi_by_id()
797 if (WARN_ON_ONCE(!napi->dev)) in netdev_napi_by_id()
799 if (!net_eq(net, dev_net(napi->dev))) in netdev_napi_by_id()
802 return napi; in netdev_napi_by_id()
819 struct napi_struct *napi; in netdev_napi_by_id_lock() local
823 napi = netdev_napi_by_id(net, napi_id); in netdev_napi_by_id_lock()
824 if (!napi || READ_ONCE(napi->dev->reg_state) != NETREG_REGISTERED) { in netdev_napi_by_id_lock()
829 dev = napi->dev; in netdev_napi_by_id_lock()
838 napi = netdev_napi_by_id(net, napi_id); in netdev_napi_by_id_lock()
839 if (napi && napi->dev != dev) in netdev_napi_by_id_lock()
840 napi = NULL; in netdev_napi_by_id_lock()
843 if (!napi) in netdev_napi_by_id_lock()
845 return napi; in netdev_napi_by_id_lock()
1024 struct napi_struct *napi; in dev_get_by_napi_id() local
1031 napi = napi_by_id(napi_id); in dev_get_by_napi_id()
1033 return napi ? napi->dev : NULL; in dev_get_by_napi_id()
4793 struct napi_struct *napi) in ____napi_schedule() argument
4799 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { in ____napi_schedule()
4806 thread = READ_ONCE(napi->thread); in ____napi_schedule()
4811 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); in ____napi_schedule()
4818 DEBUG_NET_WARN_ON_ONCE(!list_empty(&napi->poll_list)); in ____napi_schedule()
4819 list_add_tail(&napi->poll_list, &sd->poll_list); in ____napi_schedule()
4820 WRITE_ONCE(napi->list_owner, smp_processor_id()); in ____napi_schedule()
6422 static int process_backlog(struct napi_struct *napi, int quota) in process_backlog() argument
6424 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog()
6436 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight); in process_backlog()
6465 napi->state &= NAPIF_STATE_THREADED; in process_backlog()
6642 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) in __busy_poll_stop() argument
6645 gro_normal_list(&napi->gro); in __busy_poll_stop()
6646 __napi_schedule(napi); in __busy_poll_stop()
6651 gro_flush_normal(&napi->gro, HZ >= 1000); in __busy_poll_stop()
6653 clear_bit(NAPI_STATE_SCHED, &napi->state); in __busy_poll_stop()
6661 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, in busy_poll_stop() argument
6678 clear_bit(NAPI_STATE_MISSED, &napi->state); in busy_poll_stop()
6679 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); in busy_poll_stop()
6685 napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi); in busy_poll_stop()
6686 timeout = napi_get_gro_flush_timeout(napi); in busy_poll_stop()
6687 if (napi->defer_hard_irqs_count && timeout) { in busy_poll_stop()
6688 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); in busy_poll_stop()
6696 rc = napi->poll(napi, budget); in busy_poll_stop()
6701 trace_napi_poll(napi, rc, budget); in busy_poll_stop()
6704 __busy_poll_stop(napi, skip_schedule); in busy_poll_stop()
6714 int (*napi_poll)(struct napi_struct *napi, int budget); in __napi_busy_loop()
6717 struct napi_struct *napi; in __napi_busy_loop() local
6724 napi = napi_by_id(napi_id); in __napi_busy_loop()
6725 if (!napi) in __napi_busy_loop()
6736 unsigned long val = READ_ONCE(napi->state); in __napi_busy_loop()
6744 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in __napi_busy_loop()
6747 if (cmpxchg(&napi->state, val, in __napi_busy_loop()
6751 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in __napi_busy_loop()
6754 have_poll_lock = netpoll_poll_lock(napi); in __napi_busy_loop()
6755 napi_poll = napi->poll; in __napi_busy_loop()
6757 work = napi_poll(napi, budget); in __napi_busy_loop()
6758 trace_napi_poll(napi, work, budget); in __napi_busy_loop()
6759 gro_normal_list(&napi->gro); in __napi_busy_loop()
6762 __NET_ADD_STATS(dev_net(napi->dev), in __napi_busy_loop()
6775 busy_poll_stop(napi, have_poll_lock, flags, budget); in __napi_busy_loop()
6788 busy_poll_stop(napi, have_poll_lock, flags, budget); in __napi_busy_loop()
6819 struct napi_struct *napi; in napi_suspend_irqs() local
6822 napi = napi_by_id(napi_id); in napi_suspend_irqs()
6823 if (napi) { in napi_suspend_irqs()
6824 unsigned long timeout = napi_get_irq_suspend_timeout(napi); in napi_suspend_irqs()
6827 hrtimer_start(&napi->timer, ns_to_ktime(timeout), in napi_suspend_irqs()
6835 struct napi_struct *napi; in napi_resume_irqs() local
6838 napi = napi_by_id(napi_id); in napi_resume_irqs()
6839 if (napi) { in napi_resume_irqs()
6845 if (napi_get_irq_suspend_timeout(napi)) { in napi_resume_irqs()
6847 napi_schedule(napi); in napi_resume_irqs()
6856 static void __napi_hash_add_with_id(struct napi_struct *napi, in __napi_hash_add_with_id() argument
6859 napi->gro.cached_napi_id = napi_id; in __napi_hash_add_with_id()
6861 WRITE_ONCE(napi->napi_id, napi_id); in __napi_hash_add_with_id()
6862 hlist_add_head_rcu(&napi->napi_hash_node, in __napi_hash_add_with_id()
6863 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); in __napi_hash_add_with_id()
6866 static void napi_hash_add_with_id(struct napi_struct *napi, in napi_hash_add_with_id() argument
6873 __napi_hash_add_with_id(napi, napi_id); in napi_hash_add_with_id()
6877 static void napi_hash_add(struct napi_struct *napi) in napi_hash_add() argument
6881 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) in napi_hash_add()
6892 __napi_hash_add_with_id(napi, napi_gen_id); in napi_hash_add()
6900 static void napi_hash_del(struct napi_struct *napi) in napi_hash_del() argument
6906 hlist_del_init_rcu(&napi->napi_hash_node); in napi_hash_del()
6913 struct napi_struct *napi; in napi_watchdog() local
6915 napi = container_of(timer, struct napi_struct, timer); in napi_watchdog()
6920 if (!napi_disable_pending(napi) && in napi_watchdog()
6921 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { in napi_watchdog()
6922 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in napi_watchdog()
6923 __napi_schedule_irqoff(napi); in napi_watchdog()
6929 static void napi_stop_kthread(struct napi_struct *napi) in napi_stop_kthread() argument
6935 val = READ_ONCE(napi->state); in napi_stop_kthread()
6948 if (try_cmpxchg(&napi->state, &val, new)) in napi_stop_kthread()
6956 if (!test_bit(NAPIF_STATE_SCHED_THREADED, &napi->state)) in napi_stop_kthread()
6962 kthread_stop(napi->thread); in napi_stop_kthread()
6963 napi->thread = NULL; in napi_stop_kthread()
6966 int napi_set_threaded(struct napi_struct *napi, in napi_set_threaded() argument
6970 if (!napi->thread) { in napi_set_threaded()
6971 int err = napi_kthread_create(napi); in napi_set_threaded()
6978 if (napi->config) in napi_set_threaded()
6979 napi->config->threaded = threaded; in napi_set_threaded()
6987 if (!threaded && napi->thread) { in napi_set_threaded()
6988 napi_stop_kthread(napi); in napi_set_threaded()
6992 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); in napi_set_threaded()
7001 struct napi_struct *napi; in netif_set_threaded() local
7007 list_for_each_entry(napi, &dev->napi_list, dev_list) { in netif_set_threaded()
7008 if (!napi->thread) { in netif_set_threaded()
7009 err = napi_kthread_create(napi); in netif_set_threaded()
7021 list_for_each_entry(napi, &dev->napi_list, dev_list) in netif_set_threaded()
7022 WARN_ON_ONCE(napi_set_threaded(napi, threaded)); in netif_set_threaded()
7060 enum netdev_queue_type type, struct napi_struct *napi) in netif_queue_set_napi() argument
7065 if (WARN_ON_ONCE(napi && !napi->dev)) in netif_queue_set_napi()
7072 rxq->napi = napi; in netif_queue_set_napi()
7076 txq->napi = napi; in netif_queue_set_napi()
7088 struct napi_struct *napi = in netif_napi_irq_notify() local
7091 struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; in netif_napi_irq_notify()
7095 if (napi->config && napi->dev->irq_affinity_auto) in netif_napi_irq_notify()
7096 cpumask_copy(&napi->config->affinity_mask, mask); in netif_napi_irq_notify()
7099 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_irq_notify()
7100 err = cpu_rmap_update(rmap, napi->napi_rmap_idx, mask); in netif_napi_irq_notify()
7102 netdev_warn(napi->dev, "RMAP update failed (%d)\n", in netif_napi_irq_notify()
7111 struct napi_struct *napi = in netif_napi_affinity_release() local
7113 struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; in netif_napi_affinity_release()
7115 netdev_assert_locked(napi->dev); in netif_napi_affinity_release()
7117 &napi->state)); in netif_napi_affinity_release()
7119 if (!napi->dev->rx_cpu_rmap_auto) in netif_napi_affinity_release()
7121 rmap->obj[napi->napi_rmap_idx] = NULL; in netif_napi_affinity_release()
7122 napi->napi_rmap_idx = -1; in netif_napi_affinity_release()
7184 void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) in netif_napi_set_irq_locked() argument
7188 netdev_assert_locked_or_invisible(napi->dev); in netif_napi_set_irq_locked()
7190 if (napi->irq == irq) in netif_napi_set_irq_locked()
7194 if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) in netif_napi_set_irq_locked()
7195 irq_set_affinity_notifier(napi->irq, NULL); in netif_napi_set_irq_locked()
7197 napi->irq = irq; in netif_napi_set_irq_locked()
7199 (!napi->dev->rx_cpu_rmap_auto && !napi->dev->irq_affinity_auto)) in netif_napi_set_irq_locked()
7203 if (napi->dev->irq_affinity_auto && WARN_ON_ONCE(!napi->config)) in netif_napi_set_irq_locked()
7207 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_set_irq_locked()
7208 rc = cpu_rmap_add(napi->dev->rx_cpu_rmap, napi); in netif_napi_set_irq_locked()
7212 cpu_rmap_get(napi->dev->rx_cpu_rmap); in netif_napi_set_irq_locked()
7213 napi->napi_rmap_idx = rc; in netif_napi_set_irq_locked()
7218 napi->notify.notify = netif_napi_irq_notify; in netif_napi_set_irq_locked()
7219 napi->notify.release = netif_napi_affinity_release; in netif_napi_set_irq_locked()
7220 rc = irq_set_affinity_notifier(irq, &napi->notify); in netif_napi_set_irq_locked()
7222 netdev_warn(napi->dev, "Unable to set IRQ notifier (%d)\n", in netif_napi_set_irq_locked()
7227 set_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state); in netif_napi_set_irq_locked()
7232 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_set_irq_locked()
7233 napi->dev->rx_cpu_rmap->obj[napi->napi_rmap_idx] = NULL; in netif_napi_set_irq_locked()
7234 cpu_rmap_put(napi->dev->rx_cpu_rmap); in netif_napi_set_irq_locked()
7235 napi->napi_rmap_idx = -1; in netif_napi_set_irq_locked()
7238 napi->notify.notify = NULL; in netif_napi_set_irq_locked()
7239 napi->notify.release = NULL; in netif_napi_set_irq_locked()
7278 netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi) in netif_napi_dev_list_add() argument
7285 if (napi->config && napi->config->napi_id) in netif_napi_dev_list_add()
7286 new_id = napi->config->napi_id; in netif_napi_dev_list_add()
7301 list_add_rcu(&napi->dev_list, higher); /* adds after higher */ in netif_napi_dev_list_add()
7310 static void napi_get_frags_check(struct napi_struct *napi) in napi_get_frags_check() argument
7315 skb = napi_get_frags(napi); in napi_get_frags_check()
7317 napi_free_frags(napi); in napi_get_frags_check()
7322 struct napi_struct *napi, in netif_napi_add_weight_locked() argument
7327 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) in netif_napi_add_weight_locked()
7330 INIT_LIST_HEAD(&napi->poll_list); in netif_napi_add_weight_locked()
7331 INIT_HLIST_NODE(&napi->napi_hash_node); in netif_napi_add_weight_locked()
7332 hrtimer_setup(&napi->timer, napi_watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); in netif_napi_add_weight_locked()
7333 gro_init(&napi->gro); in netif_napi_add_weight_locked()
7334 napi->skb = NULL; in netif_napi_add_weight_locked()
7335 napi->poll = poll; in netif_napi_add_weight_locked()
7339 napi->weight = weight; in netif_napi_add_weight_locked()
7340 napi->dev = dev; in netif_napi_add_weight_locked()
7342 napi->poll_owner = -1; in netif_napi_add_weight_locked()
7344 napi->list_owner = -1; in netif_napi_add_weight_locked()
7345 set_bit(NAPI_STATE_SCHED, &napi->state); in netif_napi_add_weight_locked()
7346 set_bit(NAPI_STATE_NPSVC, &napi->state); in netif_napi_add_weight_locked()
7347 netif_napi_dev_list_add(dev, napi); in netif_napi_add_weight_locked()
7352 napi_set_defer_hard_irqs(napi, READ_ONCE(dev->napi_defer_hard_irqs)); in netif_napi_add_weight_locked()
7353 napi_set_gro_flush_timeout(napi, READ_ONCE(dev->gro_flush_timeout)); in netif_napi_add_weight_locked()
7355 napi_get_frags_check(napi); in netif_napi_add_weight_locked()
7360 if (napi_get_threaded_config(dev, napi)) in netif_napi_add_weight_locked()
7361 if (napi_kthread_create(napi)) in netif_napi_add_weight_locked()
7363 netif_napi_set_irq_locked(napi, -1); in netif_napi_add_weight_locked()
7450 void __netif_napi_del_locked(struct napi_struct *napi) in __netif_napi_del_locked() argument
7452 netdev_assert_locked(napi->dev); in __netif_napi_del_locked()
7454 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) in __netif_napi_del_locked()
7458 WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); in __netif_napi_del_locked()
7460 if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) in __netif_napi_del_locked()
7461 irq_set_affinity_notifier(napi->irq, NULL); in __netif_napi_del_locked()
7463 if (napi->config) { in __netif_napi_del_locked()
7464 napi->index = -1; in __netif_napi_del_locked()
7465 napi->config = NULL; in __netif_napi_del_locked()
7468 list_del_rcu(&napi->dev_list); in __netif_napi_del_locked()
7469 napi_free_frags(napi); in __netif_napi_del_locked()
7471 gro_cleanup(&napi->gro); in __netif_napi_del_locked()
7473 if (napi->thread) { in __netif_napi_del_locked()
7474 kthread_stop(napi->thread); in __netif_napi_del_locked()
7475 napi->thread = NULL; in __netif_napi_del_locked()
7572 static int napi_thread_wait(struct napi_struct *napi) in napi_thread_wait() argument
7582 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) { in napi_thread_wait()
7583 WARN_ON(!list_empty(&napi->poll_list)); in napi_thread_wait()
7596 static void napi_threaded_poll_loop(struct napi_struct *napi) in napi_threaded_poll_loop() argument
7612 have = netpoll_poll_lock(napi); in napi_threaded_poll_loop()
7613 __napi_poll(napi, &repoll); in napi_threaded_poll_loop()
7637 struct napi_struct *napi = data; in napi_threaded_poll() local
7639 while (!napi_thread_wait(napi)) in napi_threaded_poll()
7640 napi_threaded_poll_loop(napi); in napi_threaded_poll()
12455 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, in dev_cpu_dead() local
12459 list_del_init(&napi->poll_list); in dev_cpu_dead()
12460 if (napi->poll == process_backlog) in dev_cpu_dead()
12461 napi->state &= NAPIF_STATE_THREADED; in dev_cpu_dead()
12463 ____napi_schedule(sd, napi); in dev_cpu_dead()
12823 struct napi_struct *napi = &sd->backlog; in backlog_napi_should_run() local
12825 return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state); in backlog_napi_should_run()
12838 struct napi_struct *napi = &sd->backlog; in backlog_napi_setup() local
12840 napi->thread = this_cpu_read(backlog_napi); in backlog_napi_setup()
12841 set_bit(NAPI_STATE_THREADED, &napi->state); in backlog_napi_setup()