Lines Matching refs:efx

55 static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)  in count_online_cores()  argument
62 netif_warn(efx, probe, efx->net_dev, in count_online_cores()
70 cpumask_of_pcibus(efx->pci_dev->bus)); in count_online_cores()
83 static unsigned int efx_wanted_parallelism(struct efx_nic *efx) in efx_wanted_parallelism() argument
90 count = count_online_cores(efx, true); in efx_wanted_parallelism()
94 count = count_online_cores(efx, false); in efx_wanted_parallelism()
98 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, in efx_wanted_parallelism()
108 if (efx->type->sriov_wanted) { in efx_wanted_parallelism()
109 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && in efx_wanted_parallelism()
110 count > efx_vf_size(efx)) { in efx_wanted_parallelism()
111 netif_warn(efx, probe, efx->net_dev, in efx_wanted_parallelism()
115 count, efx_vf_size(efx)); in efx_wanted_parallelism()
116 count = efx_vf_size(efx); in efx_wanted_parallelism()
124 static int efx_allocate_msix_channels(struct efx_nic *efx, in efx_allocate_msix_channels() argument
144 tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx); in efx_allocate_msix_channels()
149 vec_count = pci_msix_vec_count(efx->pci_dev); in efx_allocate_msix_channels()
160 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; in efx_allocate_msix_channels()
161 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
164 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
166 } else if (n_channels + n_xdp_tx > efx->max_vis) { in efx_allocate_msix_channels()
167 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; in efx_allocate_msix_channels()
168 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
170 n_xdp_tx, n_channels, efx->max_vis); in efx_allocate_msix_channels()
171 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
174 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; in efx_allocate_msix_channels()
175 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
180 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
184 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; in efx_allocate_msix_channels()
187 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) { in efx_allocate_msix_channels()
188 efx->n_xdp_channels = n_xdp_ev; in efx_allocate_msix_channels()
189 efx->xdp_tx_per_channel = tx_per_ev; in efx_allocate_msix_channels()
190 efx->xdp_tx_queue_count = n_xdp_tx; in efx_allocate_msix_channels()
192 netif_dbg(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
196 efx->n_xdp_channels = 0; in efx_allocate_msix_channels()
197 efx->xdp_tx_per_channel = 0; in efx_allocate_msix_channels()
198 efx->xdp_tx_queue_count = n_xdp_tx; in efx_allocate_msix_channels()
202 netif_err(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
205 netif_err(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
212 efx->n_channels = n_channels; in efx_allocate_msix_channels()
215 n_channels -= efx->n_xdp_channels; in efx_allocate_msix_channels()
218 efx->n_tx_channels = in efx_allocate_msix_channels()
220 efx->max_tx_channels); in efx_allocate_msix_channels()
221 efx->tx_channel_offset = in efx_allocate_msix_channels()
222 n_channels - efx->n_tx_channels; in efx_allocate_msix_channels()
223 efx->n_rx_channels = in efx_allocate_msix_channels()
225 efx->n_tx_channels, 1U); in efx_allocate_msix_channels()
227 efx->n_tx_channels = min(n_channels, efx->max_tx_channels); in efx_allocate_msix_channels()
228 efx->tx_channel_offset = 0; in efx_allocate_msix_channels()
229 efx->n_rx_channels = n_channels; in efx_allocate_msix_channels()
232 efx->n_rx_channels = min(efx->n_rx_channels, parallelism); in efx_allocate_msix_channels()
233 efx->n_tx_channels = min(efx->n_tx_channels, parallelism); in efx_allocate_msix_channels()
235 efx->xdp_channel_offset = n_channels; in efx_allocate_msix_channels()
237 netif_dbg(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
239 efx->n_rx_channels); in efx_allocate_msix_channels()
241 return efx->n_channels; in efx_allocate_msix_channels()
247 int efx_probe_interrupts(struct efx_nic *efx) in efx_probe_interrupts() argument
255 if (efx->extra_channel_type[i]) in efx_probe_interrupts()
258 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { in efx_probe_interrupts()
259 unsigned int parallelism = efx_wanted_parallelism(efx); in efx_probe_interrupts()
263 rc = efx_allocate_msix_channels(efx, efx->max_channels, in efx_probe_interrupts()
269 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1, in efx_probe_interrupts()
274 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
276 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) in efx_probe_interrupts()
277 efx->interrupt_mode = EFX_INT_MODE_MSI; in efx_probe_interrupts()
281 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
284 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
290 for (i = 0; i < efx->n_channels; i++) in efx_probe_interrupts()
291 efx_get_channel(efx, i)->irq = in efx_probe_interrupts()
297 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { in efx_probe_interrupts()
298 efx->n_channels = 1; in efx_probe_interrupts()
299 efx->n_rx_channels = 1; in efx_probe_interrupts()
300 efx->n_tx_channels = 1; in efx_probe_interrupts()
301 efx->tx_channel_offset = 0; in efx_probe_interrupts()
302 efx->n_xdp_channels = 0; in efx_probe_interrupts()
303 efx->xdp_channel_offset = efx->n_channels; in efx_probe_interrupts()
304 rc = pci_enable_msi(efx->pci_dev); in efx_probe_interrupts()
306 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; in efx_probe_interrupts()
308 netif_err(efx, drv, efx->net_dev, in efx_probe_interrupts()
310 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) in efx_probe_interrupts()
311 efx->interrupt_mode = EFX_INT_MODE_LEGACY; in efx_probe_interrupts()
318 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { in efx_probe_interrupts()
319 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); in efx_probe_interrupts()
320 efx->n_rx_channels = 1; in efx_probe_interrupts()
321 efx->n_tx_channels = 1; in efx_probe_interrupts()
322 efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0; in efx_probe_interrupts()
323 efx->n_xdp_channels = 0; in efx_probe_interrupts()
324 efx->xdp_channel_offset = efx->n_channels; in efx_probe_interrupts()
325 efx->legacy_irq = efx->pci_dev->irq; in efx_probe_interrupts()
329 efx->n_extra_tx_channels = 0; in efx_probe_interrupts()
330 j = efx->xdp_channel_offset; in efx_probe_interrupts()
332 if (!efx->extra_channel_type[i]) in efx_probe_interrupts()
334 if (j <= efx->tx_channel_offset + efx->n_tx_channels) { in efx_probe_interrupts()
335 efx->extra_channel_type[i]->handle_no_channel(efx); in efx_probe_interrupts()
338 efx_get_channel(efx, j)->type = in efx_probe_interrupts()
339 efx->extra_channel_type[i]; in efx_probe_interrupts()
340 if (efx_channel_has_tx_queues(efx_get_channel(efx, j))) in efx_probe_interrupts()
341 efx->n_extra_tx_channels++; in efx_probe_interrupts()
345 rss_spread = efx->n_rx_channels; in efx_probe_interrupts()
348 if (efx->type->sriov_wanted) { in efx_probe_interrupts()
349 efx->rss_spread = ((rss_spread > 1 || in efx_probe_interrupts()
350 !efx->type->sriov_wanted(efx)) ? in efx_probe_interrupts()
351 rss_spread : efx_vf_size(efx)); in efx_probe_interrupts()
355 efx->rss_spread = rss_spread; in efx_probe_interrupts()
361 void efx_set_interrupt_affinity(struct efx_nic *efx) in efx_set_interrupt_affinity() argument
363 const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus); in efx_set_interrupt_affinity()
372 efx_for_each_channel(channel, efx) { in efx_set_interrupt_affinity()
380 void efx_clear_interrupt_affinity(struct efx_nic *efx) in efx_clear_interrupt_affinity() argument
384 efx_for_each_channel(channel, efx) in efx_clear_interrupt_affinity()
389 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) in efx_set_interrupt_affinity() argument
394 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) in efx_clear_interrupt_affinity() argument
399 void efx_remove_interrupts(struct efx_nic *efx) in efx_remove_interrupts() argument
404 efx_for_each_channel(channel, efx) in efx_remove_interrupts()
406 pci_disable_msi(efx->pci_dev); in efx_remove_interrupts()
407 pci_disable_msix(efx->pci_dev); in efx_remove_interrupts()
410 efx->legacy_irq = 0; in efx_remove_interrupts()
424 struct efx_nic *efx = channel->efx; in efx_probe_eventq() local
427 netif_dbg(efx, probe, efx->net_dev, in efx_probe_eventq()
433 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); in efx_probe_eventq()
443 struct efx_nic *efx = channel->efx; in efx_init_eventq() local
448 netif_dbg(efx, drv, efx->net_dev, in efx_init_eventq()
453 efx->type->push_irq_moderation(channel); in efx_init_eventq()
463 netif_dbg(channel->efx, ifup, channel->efx->net_dev, in efx_start_eventq()
489 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_fini_eventq()
498 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_eventq()
528 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) in efx_alloc_channel() argument
539 channel->efx = efx; in efx_alloc_channel()
545 tx_queue->efx = efx; in efx_alloc_channel()
556 rx_queue->efx = efx; in efx_alloc_channel()
562 int efx_init_channels(struct efx_nic *efx) in efx_init_channels() argument
567 efx->channel[i] = efx_alloc_channel(efx, i); in efx_init_channels()
568 if (!efx->channel[i]) in efx_init_channels()
570 efx->msi_context[i].efx = efx; in efx_init_channels()
571 efx->msi_context[i].index = i; in efx_init_channels()
575 efx->interrupt_mode = min(efx->type->min_interrupt_mode, in efx_init_channels()
578 efx->max_channels = EFX_MAX_CHANNELS; in efx_init_channels()
579 efx->max_tx_channels = EFX_MAX_CHANNELS; in efx_init_channels()
584 void efx_fini_channels(struct efx_nic *efx) in efx_fini_channels() argument
589 if (efx->channel[i]) { in efx_fini_channels()
590 kfree(efx->channel[i]); in efx_fini_channels()
591 efx->channel[i] = NULL; in efx_fini_channels()
643 netif_dbg(channel->efx, probe, channel->efx->net_dev, in efx_probe_channel()
678 struct efx_nic *efx = channel->efx; in efx_get_channel_name() local
684 if (number >= efx->xdp_channel_offset && in efx_get_channel_name()
685 !WARN_ON_ONCE(!efx->n_xdp_channels)) { in efx_get_channel_name()
687 number -= efx->xdp_channel_offset; in efx_get_channel_name()
688 } else if (efx->tx_channel_offset == 0) { in efx_get_channel_name()
690 } else if (number < efx->tx_channel_offset) { in efx_get_channel_name()
694 number -= efx->tx_channel_offset; in efx_get_channel_name()
696 snprintf(buf, len, "%s%s-%d", efx->name, type, number); in efx_get_channel_name()
699 void efx_set_channel_names(struct efx_nic *efx) in efx_set_channel_names() argument
703 efx_for_each_channel(channel, efx) in efx_set_channel_names()
705 efx->msi_context[channel->channel].name, in efx_set_channel_names()
706 sizeof(efx->msi_context[0].name)); in efx_set_channel_names()
709 int efx_probe_channels(struct efx_nic *efx) in efx_probe_channels() argument
715 efx->next_buffer_table = 0; in efx_probe_channels()
722 efx_for_each_channel_rev(channel, efx) { in efx_probe_channels()
725 netif_err(efx, probe, efx->net_dev, in efx_probe_channels()
731 efx_set_channel_names(efx); in efx_probe_channels()
736 efx_remove_channels(efx); in efx_probe_channels()
745 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_channel()
756 void efx_remove_channels(struct efx_nic *efx) in efx_remove_channels() argument
760 efx_for_each_channel(channel, efx) in efx_remove_channels()
763 kfree(efx->xdp_tx_queues); in efx_remove_channels()
766 static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, in efx_set_xdp_tx_queue() argument
769 if (xdp_queue_number >= efx->xdp_tx_queue_count) in efx_set_xdp_tx_queue()
772 netif_dbg(efx, drv, efx->net_dev, in efx_set_xdp_tx_queue()
776 efx->xdp_tx_queues[xdp_queue_number] = tx_queue; in efx_set_xdp_tx_queue()
780 static void efx_set_xdp_channels(struct efx_nic *efx) in efx_set_xdp_channels() argument
792 efx_for_each_channel(channel, efx) { in efx_set_xdp_channels()
793 if (channel->channel < efx->tx_channel_offset) in efx_set_xdp_channels()
799 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, in efx_set_xdp_channels()
807 netif_dbg(efx, drv, efx->net_dev, in efx_set_xdp_channels()
818 if (efx->xdp_txq_queues_mode == in efx_set_xdp_channels()
821 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, in efx_set_xdp_channels()
828 WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && in efx_set_xdp_channels()
829 xdp_queue_number != efx->xdp_tx_queue_count); in efx_set_xdp_channels()
830 WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && in efx_set_xdp_channels()
831 xdp_queue_number > efx->xdp_tx_queue_count); in efx_set_xdp_channels()
837 while (xdp_queue_number < efx->xdp_tx_queue_count) { in efx_set_xdp_channels()
838 tx_queue = efx->xdp_tx_queues[next_queue++]; in efx_set_xdp_channels()
839 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); in efx_set_xdp_channels()
845 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) in efx_realloc_channels() argument
848 *ptp_channel = efx_ptp_channel(efx); in efx_realloc_channels()
849 struct efx_ptp_data *ptp_data = efx->ptp_data; in efx_realloc_channels()
854 rc = efx_check_disabled(efx); in efx_realloc_channels()
861 efx_for_each_channel(channel, efx) { in efx_realloc_channels()
880 efx_device_detach_sync(efx); in efx_realloc_channels()
881 efx_stop_all(efx); in efx_realloc_channels()
882 efx_soft_disable_interrupts(efx); in efx_realloc_channels()
886 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
887 channel = efx->channel[i]; in efx_realloc_channels()
898 old_rxq_entries = efx->rxq_entries; in efx_realloc_channels()
899 old_txq_entries = efx->txq_entries; in efx_realloc_channels()
900 efx->rxq_entries = rxq_entries; in efx_realloc_channels()
901 efx->txq_entries = txq_entries; in efx_realloc_channels()
902 for (i = 0; i < efx->n_channels; i++) in efx_realloc_channels()
903 swap(efx->channel[i], other_channel[i]); in efx_realloc_channels()
906 efx->next_buffer_table = next_buffer_table; in efx_realloc_channels()
908 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
909 channel = efx->channel[i]; in efx_realloc_channels()
915 efx_init_napi_channel(efx->channel[i]); in efx_realloc_channels()
918 efx_set_xdp_channels(efx); in efx_realloc_channels()
920 efx->ptp_data = NULL; in efx_realloc_channels()
922 for (i = 0; i < efx->n_channels; i++) { in efx_realloc_channels()
931 efx->ptp_data = ptp_data; in efx_realloc_channels()
932 rc2 = efx_soft_enable_interrupts(efx); in efx_realloc_channels()
935 netif_err(efx, drv, efx->net_dev, in efx_realloc_channels()
937 efx_schedule_reset(efx, RESET_TYPE_DISABLE); in efx_realloc_channels()
939 efx_start_all(efx); in efx_realloc_channels()
940 efx_device_attach_if_not_resetting(efx); in efx_realloc_channels()
946 efx->rxq_entries = old_rxq_entries; in efx_realloc_channels()
947 efx->txq_entries = old_txq_entries; in efx_realloc_channels()
948 for (i = 0; i < efx->n_channels; i++) in efx_realloc_channels()
949 swap(efx->channel[i], other_channel[i]); in efx_realloc_channels()
950 efx_ptp_update_channel(efx, ptp_channel); in efx_realloc_channels()
954 int efx_set_channels(struct efx_nic *efx) in efx_set_channels() argument
959 if (efx->xdp_tx_queue_count) { in efx_set_channels()
960 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues); in efx_set_channels()
963 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count, in efx_set_channels()
964 sizeof(*efx->xdp_tx_queues), in efx_set_channels()
966 if (!efx->xdp_tx_queues) in efx_set_channels()
970 efx_for_each_channel(channel, efx) { in efx_set_channels()
971 if (channel->channel < efx->n_rx_channels) in efx_set_channels()
977 efx_set_xdp_channels(efx); in efx_set_channels()
979 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); in efx_set_channels()
982 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); in efx_set_channels()
987 return channel->channel - channel->efx->tx_channel_offset < in efx_default_channel_want_txqs()
988 channel->efx->n_tx_channels; in efx_default_channel_want_txqs()
995 int efx_soft_enable_interrupts(struct efx_nic *efx) in efx_soft_enable_interrupts() argument
1000 BUG_ON(efx->state == STATE_DISABLED); in efx_soft_enable_interrupts()
1002 efx->irq_soft_enabled = true; in efx_soft_enable_interrupts()
1005 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1014 efx_mcdi_mode_event(efx); in efx_soft_enable_interrupts()
1019 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1030 void efx_soft_disable_interrupts(struct efx_nic *efx) in efx_soft_disable_interrupts() argument
1034 if (efx->state == STATE_DISABLED) in efx_soft_disable_interrupts()
1037 efx_mcdi_mode_poll(efx); in efx_soft_disable_interrupts()
1039 efx->irq_soft_enabled = false; in efx_soft_disable_interrupts()
1042 if (efx->legacy_irq) in efx_soft_disable_interrupts()
1043 synchronize_irq(efx->legacy_irq); in efx_soft_disable_interrupts()
1045 efx_for_each_channel(channel, efx) { in efx_soft_disable_interrupts()
1055 efx_mcdi_flush_async(efx); in efx_soft_disable_interrupts()
1058 int efx_enable_interrupts(struct efx_nic *efx) in efx_enable_interrupts() argument
1064 BUG_ON(efx->state == STATE_DISABLED); in efx_enable_interrupts()
1066 if (efx->eeh_disabled_legacy_irq) { in efx_enable_interrupts()
1067 enable_irq(efx->legacy_irq); in efx_enable_interrupts()
1068 efx->eeh_disabled_legacy_irq = false; in efx_enable_interrupts()
1071 efx->type->irq_enable_master(efx); in efx_enable_interrupts()
1073 efx_for_each_channel(channel, efx) { in efx_enable_interrupts()
1081 rc = efx_soft_enable_interrupts(efx); in efx_enable_interrupts()
1089 efx_for_each_channel(channel, efx) { in efx_enable_interrupts()
1096 efx->type->irq_disable_non_ev(efx); in efx_enable_interrupts()
1101 void efx_disable_interrupts(struct efx_nic *efx) in efx_disable_interrupts() argument
1105 efx_soft_disable_interrupts(efx); in efx_disable_interrupts()
1107 efx_for_each_channel(channel, efx) { in efx_disable_interrupts()
1112 efx->type->irq_disable_non_ev(efx); in efx_disable_interrupts()
1115 void efx_start_channels(struct efx_nic *efx) in efx_start_channels() argument
1121 efx_for_each_channel_rev(channel, efx) { in efx_start_channels()
1126 atomic_inc(&efx->active_queues); in efx_start_channels()
1131 atomic_inc(&efx->active_queues); in efx_start_channels()
1141 void efx_stop_channels(struct efx_nic *efx) in efx_stop_channels() argument
1152 efx_for_each_channel(channel, efx) { in efx_stop_channels()
1159 efx_for_each_channel(channel, efx) { in efx_stop_channels()
1172 if (efx->type->fini_dmaq) in efx_stop_channels()
1173 rc = efx->type->fini_dmaq(efx); in efx_stop_channels()
1176 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); in efx_stop_channels()
1178 netif_dbg(efx, drv, efx->net_dev, in efx_stop_channels()
1182 efx_for_each_channel(channel, efx) { in efx_stop_channels()
1247 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) in efx_update_irq_mod() argument
1249 int step = efx->irq_mod_step_us; in efx_update_irq_mod()
1254 efx->type->push_irq_moderation(channel); in efx_update_irq_mod()
1258 efx->irq_rx_moderation_us) { in efx_update_irq_mod()
1260 efx->type->push_irq_moderation(channel); in efx_update_irq_mod()
1277 struct efx_nic *efx = channel->efx; in efx_poll() local
1283 netif_vdbg(efx, intr, efx->net_dev, in efx_poll()
1293 efx->irq_rx_adaptive && in efx_poll()
1295 efx_update_irq_mod(efx, channel); in efx_poll()
1320 struct efx_nic *efx = channel->efx; in efx_init_napi_channel() local
1322 channel->napi_dev = efx->net_dev; in efx_init_napi_channel()
1326 void efx_init_napi(struct efx_nic *efx) in efx_init_napi() argument
1330 efx_for_each_channel(channel, efx) in efx_init_napi()
1342 void efx_fini_napi(struct efx_nic *efx) in efx_fini_napi() argument
1346 efx_for_each_channel(channel, efx) in efx_fini_napi()