Lines Matching refs:efx
55 static unsigned int count_online_cores(struct efx_nic *efx, bool local_node) in count_online_cores() argument
62 netif_warn(efx, probe, efx->net_dev, in count_online_cores()
70 cpumask_of_pcibus(efx->pci_dev->bus)); in count_online_cores()
83 static unsigned int efx_wanted_parallelism(struct efx_nic *efx) in efx_wanted_parallelism() argument
90 count = count_online_cores(efx, true); in efx_wanted_parallelism()
94 count = count_online_cores(efx, false); in efx_wanted_parallelism()
98 netif_cond_dbg(efx, probe, efx->net_dev, !efx_siena_rss_cpus, in efx_wanted_parallelism()
109 if (efx->type->sriov_wanted) { in efx_wanted_parallelism()
110 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && in efx_wanted_parallelism()
111 count > efx_vf_size(efx)) { in efx_wanted_parallelism()
112 netif_warn(efx, probe, efx->net_dev, in efx_wanted_parallelism()
116 count, efx_vf_size(efx)); in efx_wanted_parallelism()
117 count = efx_vf_size(efx); in efx_wanted_parallelism()
125 static int efx_allocate_msix_channels(struct efx_nic *efx, in efx_allocate_msix_channels() argument
145 tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx); in efx_allocate_msix_channels()
150 vec_count = pci_msix_vec_count(efx->pci_dev); in efx_allocate_msix_channels()
161 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; in efx_allocate_msix_channels()
162 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
165 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
167 } else if (n_channels + n_xdp_tx > efx->max_vis) { in efx_allocate_msix_channels()
168 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; in efx_allocate_msix_channels()
169 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
171 n_xdp_tx, n_channels, efx->max_vis); in efx_allocate_msix_channels()
172 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
175 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; in efx_allocate_msix_channels()
176 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
181 netif_warn(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
185 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; in efx_allocate_msix_channels()
188 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) { in efx_allocate_msix_channels()
189 efx->n_xdp_channels = n_xdp_ev; in efx_allocate_msix_channels()
190 efx->xdp_tx_per_channel = tx_per_ev; in efx_allocate_msix_channels()
191 efx->xdp_tx_queue_count = n_xdp_tx; in efx_allocate_msix_channels()
193 netif_dbg(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
197 efx->n_xdp_channels = 0; in efx_allocate_msix_channels()
198 efx->xdp_tx_per_channel = 0; in efx_allocate_msix_channels()
199 efx->xdp_tx_queue_count = n_xdp_tx; in efx_allocate_msix_channels()
203 netif_err(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
206 netif_err(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
213 efx->n_channels = n_channels; in efx_allocate_msix_channels()
216 n_channels -= efx->n_xdp_channels; in efx_allocate_msix_channels()
219 efx->n_tx_channels = in efx_allocate_msix_channels()
221 efx->max_tx_channels); in efx_allocate_msix_channels()
222 efx->tx_channel_offset = in efx_allocate_msix_channels()
223 n_channels - efx->n_tx_channels; in efx_allocate_msix_channels()
224 efx->n_rx_channels = in efx_allocate_msix_channels()
226 efx->n_tx_channels, 1U); in efx_allocate_msix_channels()
228 efx->n_tx_channels = min(n_channels, efx->max_tx_channels); in efx_allocate_msix_channels()
229 efx->tx_channel_offset = 0; in efx_allocate_msix_channels()
230 efx->n_rx_channels = n_channels; in efx_allocate_msix_channels()
233 efx->n_rx_channels = min(efx->n_rx_channels, parallelism); in efx_allocate_msix_channels()
234 efx->n_tx_channels = min(efx->n_tx_channels, parallelism); in efx_allocate_msix_channels()
236 efx->xdp_channel_offset = n_channels; in efx_allocate_msix_channels()
238 netif_dbg(efx, drv, efx->net_dev, in efx_allocate_msix_channels()
240 efx->n_rx_channels); in efx_allocate_msix_channels()
242 return efx->n_channels; in efx_allocate_msix_channels()
248 int efx_siena_probe_interrupts(struct efx_nic *efx) in efx_siena_probe_interrupts() argument
256 if (efx->extra_channel_type[i]) in efx_siena_probe_interrupts()
259 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { in efx_siena_probe_interrupts()
260 unsigned int parallelism = efx_wanted_parallelism(efx); in efx_siena_probe_interrupts()
264 rc = efx_allocate_msix_channels(efx, efx->max_channels, in efx_siena_probe_interrupts()
270 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1, in efx_siena_probe_interrupts()
275 netif_err(efx, drv, efx->net_dev, in efx_siena_probe_interrupts()
277 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) in efx_siena_probe_interrupts()
278 efx->interrupt_mode = EFX_INT_MODE_MSI; in efx_siena_probe_interrupts()
282 netif_err(efx, drv, efx->net_dev, in efx_siena_probe_interrupts()
285 netif_err(efx, drv, efx->net_dev, in efx_siena_probe_interrupts()
291 for (i = 0; i < efx->n_channels; i++) in efx_siena_probe_interrupts()
292 efx_get_channel(efx, i)->irq = in efx_siena_probe_interrupts()
298 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { in efx_siena_probe_interrupts()
299 efx->n_channels = 1; in efx_siena_probe_interrupts()
300 efx->n_rx_channels = 1; in efx_siena_probe_interrupts()
301 efx->n_tx_channels = 1; in efx_siena_probe_interrupts()
302 efx->tx_channel_offset = 0; in efx_siena_probe_interrupts()
303 efx->n_xdp_channels = 0; in efx_siena_probe_interrupts()
304 efx->xdp_channel_offset = efx->n_channels; in efx_siena_probe_interrupts()
305 rc = pci_enable_msi(efx->pci_dev); in efx_siena_probe_interrupts()
307 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; in efx_siena_probe_interrupts()
309 netif_err(efx, drv, efx->net_dev, in efx_siena_probe_interrupts()
311 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) in efx_siena_probe_interrupts()
312 efx->interrupt_mode = EFX_INT_MODE_LEGACY; in efx_siena_probe_interrupts()
319 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { in efx_siena_probe_interrupts()
320 efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0); in efx_siena_probe_interrupts()
321 efx->n_rx_channels = 1; in efx_siena_probe_interrupts()
322 efx->n_tx_channels = 1; in efx_siena_probe_interrupts()
323 efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0; in efx_siena_probe_interrupts()
324 efx->n_xdp_channels = 0; in efx_siena_probe_interrupts()
325 efx->xdp_channel_offset = efx->n_channels; in efx_siena_probe_interrupts()
326 efx->legacy_irq = efx->pci_dev->irq; in efx_siena_probe_interrupts()
330 efx->n_extra_tx_channels = 0; in efx_siena_probe_interrupts()
331 j = efx->xdp_channel_offset; in efx_siena_probe_interrupts()
333 if (!efx->extra_channel_type[i]) in efx_siena_probe_interrupts()
335 if (j <= efx->tx_channel_offset + efx->n_tx_channels) { in efx_siena_probe_interrupts()
336 efx->extra_channel_type[i]->handle_no_channel(efx); in efx_siena_probe_interrupts()
339 efx_get_channel(efx, j)->type = in efx_siena_probe_interrupts()
340 efx->extra_channel_type[i]; in efx_siena_probe_interrupts()
341 if (efx_channel_has_tx_queues(efx_get_channel(efx, j))) in efx_siena_probe_interrupts()
342 efx->n_extra_tx_channels++; in efx_siena_probe_interrupts()
346 rss_spread = efx->n_rx_channels; in efx_siena_probe_interrupts()
349 if (efx->type->sriov_wanted) { in efx_siena_probe_interrupts()
350 efx->rss_spread = ((rss_spread > 1 || in efx_siena_probe_interrupts()
351 !efx->type->sriov_wanted(efx)) ? in efx_siena_probe_interrupts()
352 rss_spread : efx_vf_size(efx)); in efx_siena_probe_interrupts()
356 efx->rss_spread = rss_spread; in efx_siena_probe_interrupts()
362 void efx_siena_set_interrupt_affinity(struct efx_nic *efx) in efx_siena_set_interrupt_affinity() argument
364 const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus); in efx_siena_set_interrupt_affinity()
373 efx_for_each_channel(channel, efx) { in efx_siena_set_interrupt_affinity()
381 void efx_siena_clear_interrupt_affinity(struct efx_nic *efx) in efx_siena_clear_interrupt_affinity() argument
385 efx_for_each_channel(channel, efx) in efx_siena_clear_interrupt_affinity()
390 efx_siena_set_interrupt_affinity(struct efx_nic *efx __always_unused) in efx_siena_set_interrupt_affinity()
395 efx_siena_clear_interrupt_affinity(struct efx_nic *efx __always_unused) in efx_siena_clear_interrupt_affinity()
400 void efx_siena_remove_interrupts(struct efx_nic *efx) in efx_siena_remove_interrupts() argument
405 efx_for_each_channel(channel, efx) in efx_siena_remove_interrupts()
407 pci_disable_msi(efx->pci_dev); in efx_siena_remove_interrupts()
408 pci_disable_msix(efx->pci_dev); in efx_siena_remove_interrupts()
411 efx->legacy_irq = 0; in efx_siena_remove_interrupts()
425 struct efx_nic *efx = channel->efx; in efx_probe_eventq() local
428 netif_dbg(efx, probe, efx->net_dev, in efx_probe_eventq()
434 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); in efx_probe_eventq()
444 struct efx_nic *efx = channel->efx; in efx_init_eventq() local
449 netif_dbg(efx, drv, efx->net_dev, in efx_init_eventq()
454 efx->type->push_irq_moderation(channel); in efx_init_eventq()
464 netif_dbg(channel->efx, ifup, channel->efx->net_dev, in efx_siena_start_eventq()
490 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_fini_eventq()
499 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_remove_eventq()
530 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) in efx_alloc_channel() argument
541 channel->efx = efx; in efx_alloc_channel()
547 tx_queue->efx = efx; in efx_alloc_channel()
558 rx_queue->efx = efx; in efx_alloc_channel()
564 int efx_siena_init_channels(struct efx_nic *efx) in efx_siena_init_channels() argument
569 efx->channel[i] = efx_alloc_channel(efx, i); in efx_siena_init_channels()
570 if (!efx->channel[i]) in efx_siena_init_channels()
572 efx->msi_context[i].efx = efx; in efx_siena_init_channels()
573 efx->msi_context[i].index = i; in efx_siena_init_channels()
577 efx->interrupt_mode = min(efx->type->min_interrupt_mode, in efx_siena_init_channels()
580 efx->max_channels = EFX_MAX_CHANNELS; in efx_siena_init_channels()
581 efx->max_tx_channels = EFX_MAX_CHANNELS; in efx_siena_init_channels()
586 void efx_siena_fini_channels(struct efx_nic *efx) in efx_siena_fini_channels() argument
591 if (efx->channel[i]) { in efx_siena_fini_channels()
592 kfree(efx->channel[i]); in efx_siena_fini_channels()
593 efx->channel[i] = NULL; in efx_siena_fini_channels()
646 netif_dbg(channel->efx, probe, channel->efx->net_dev, in efx_probe_channel()
681 struct efx_nic *efx = channel->efx; in efx_get_channel_name() local
687 if (number >= efx->xdp_channel_offset && in efx_get_channel_name()
688 !WARN_ON_ONCE(!efx->n_xdp_channels)) { in efx_get_channel_name()
690 number -= efx->xdp_channel_offset; in efx_get_channel_name()
691 } else if (efx->tx_channel_offset == 0) { in efx_get_channel_name()
693 } else if (number < efx->tx_channel_offset) { in efx_get_channel_name()
697 number -= efx->tx_channel_offset; in efx_get_channel_name()
699 snprintf(buf, len, "%s%s-%d", efx->name, type, number); in efx_get_channel_name()
702 void efx_siena_set_channel_names(struct efx_nic *efx) in efx_siena_set_channel_names() argument
706 efx_for_each_channel(channel, efx) in efx_siena_set_channel_names()
708 efx->msi_context[channel->channel].name, in efx_siena_set_channel_names()
709 sizeof(efx->msi_context[0].name)); in efx_siena_set_channel_names()
712 int efx_siena_probe_channels(struct efx_nic *efx) in efx_siena_probe_channels() argument
718 efx->next_buffer_table = 0; in efx_siena_probe_channels()
725 efx_for_each_channel_rev(channel, efx) { in efx_siena_probe_channels()
728 netif_err(efx, probe, efx->net_dev, in efx_siena_probe_channels()
734 efx_siena_set_channel_names(efx); in efx_siena_probe_channels()
739 efx_siena_remove_channels(efx); in efx_siena_probe_channels()
748 netif_dbg(channel->efx, drv, channel->efx->net_dev, in efx_siena_remove_channel()
759 void efx_siena_remove_channels(struct efx_nic *efx) in efx_siena_remove_channels() argument
763 efx_for_each_channel(channel, efx) in efx_siena_remove_channels()
766 kfree(efx->xdp_tx_queues); in efx_siena_remove_channels()
769 static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, in efx_set_xdp_tx_queue() argument
772 if (xdp_queue_number >= efx->xdp_tx_queue_count) in efx_set_xdp_tx_queue()
775 netif_dbg(efx, drv, efx->net_dev, in efx_set_xdp_tx_queue()
779 efx->xdp_tx_queues[xdp_queue_number] = tx_queue; in efx_set_xdp_tx_queue()
783 static void efx_set_xdp_channels(struct efx_nic *efx) in efx_set_xdp_channels() argument
795 efx_for_each_channel(channel, efx) { in efx_set_xdp_channels()
796 if (channel->channel < efx->tx_channel_offset) in efx_set_xdp_channels()
802 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, in efx_set_xdp_channels()
810 netif_dbg(efx, drv, efx->net_dev, in efx_set_xdp_channels()
821 if (efx->xdp_txq_queues_mode == in efx_set_xdp_channels()
824 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, in efx_set_xdp_channels()
831 WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && in efx_set_xdp_channels()
832 xdp_queue_number != efx->xdp_tx_queue_count); in efx_set_xdp_channels()
833 WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && in efx_set_xdp_channels()
834 xdp_queue_number > efx->xdp_tx_queue_count); in efx_set_xdp_channels()
840 while (xdp_queue_number < efx->xdp_tx_queue_count) { in efx_set_xdp_channels()
841 tx_queue = efx->xdp_tx_queues[next_queue++]; in efx_set_xdp_channels()
842 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); in efx_set_xdp_channels()
848 static int efx_soft_enable_interrupts(struct efx_nic *efx);
849 static void efx_soft_disable_interrupts(struct efx_nic *efx);
853 int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries, in efx_siena_realloc_channels() argument
861 rc = efx_check_disabled(efx); in efx_siena_realloc_channels()
868 efx_for_each_channel(channel, efx) { in efx_siena_realloc_channels()
887 efx_device_detach_sync(efx); in efx_siena_realloc_channels()
888 efx_siena_stop_all(efx); in efx_siena_realloc_channels()
889 efx_soft_disable_interrupts(efx); in efx_siena_realloc_channels()
893 for (i = 0; i < efx->n_channels; i++) { in efx_siena_realloc_channels()
894 channel = efx->channel[i]; in efx_siena_realloc_channels()
905 old_rxq_entries = efx->rxq_entries; in efx_siena_realloc_channels()
906 old_txq_entries = efx->txq_entries; in efx_siena_realloc_channels()
907 efx->rxq_entries = rxq_entries; in efx_siena_realloc_channels()
908 efx->txq_entries = txq_entries; in efx_siena_realloc_channels()
909 for (i = 0; i < efx->n_channels; i++) in efx_siena_realloc_channels()
910 swap(efx->channel[i], other_channel[i]); in efx_siena_realloc_channels()
913 efx->next_buffer_table = next_buffer_table; in efx_siena_realloc_channels()
915 for (i = 0; i < efx->n_channels; i++) { in efx_siena_realloc_channels()
916 channel = efx->channel[i]; in efx_siena_realloc_channels()
922 efx_init_napi_channel(efx->channel[i]); in efx_siena_realloc_channels()
925 efx_set_xdp_channels(efx); in efx_siena_realloc_channels()
928 for (i = 0; i < efx->n_channels; i++) { in efx_siena_realloc_channels()
937 rc2 = efx_soft_enable_interrupts(efx); in efx_siena_realloc_channels()
940 netif_err(efx, drv, efx->net_dev, in efx_siena_realloc_channels()
942 efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); in efx_siena_realloc_channels()
944 efx_siena_start_all(efx); in efx_siena_realloc_channels()
945 efx_device_attach_if_not_resetting(efx); in efx_siena_realloc_channels()
951 efx->rxq_entries = old_rxq_entries; in efx_siena_realloc_channels()
952 efx->txq_entries = old_txq_entries; in efx_siena_realloc_channels()
953 for (i = 0; i < efx->n_channels; i++) in efx_siena_realloc_channels()
954 swap(efx->channel[i], other_channel[i]); in efx_siena_realloc_channels()
958 int efx_siena_set_channels(struct efx_nic *efx) in efx_siena_set_channels() argument
963 if (efx->xdp_tx_queue_count) { in efx_siena_set_channels()
964 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues); in efx_siena_set_channels()
967 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count, in efx_siena_set_channels()
968 sizeof(*efx->xdp_tx_queues), in efx_siena_set_channels()
970 if (!efx->xdp_tx_queues) in efx_siena_set_channels()
974 efx_for_each_channel(channel, efx) { in efx_siena_set_channels()
975 if (channel->channel < efx->n_rx_channels) in efx_siena_set_channels()
981 efx_set_xdp_channels(efx); in efx_siena_set_channels()
983 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); in efx_siena_set_channels()
986 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); in efx_siena_set_channels()
991 return channel->channel - channel->efx->tx_channel_offset < in efx_default_channel_want_txqs()
992 channel->efx->n_tx_channels; in efx_default_channel_want_txqs()
999 static int efx_soft_enable_interrupts(struct efx_nic *efx) in efx_soft_enable_interrupts() argument
1004 BUG_ON(efx->state == STATE_DISABLED); in efx_soft_enable_interrupts()
1006 efx->irq_soft_enabled = true; in efx_soft_enable_interrupts()
1009 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1018 efx_siena_mcdi_mode_event(efx); in efx_soft_enable_interrupts()
1023 efx_for_each_channel(channel, efx) { in efx_soft_enable_interrupts()
1034 static void efx_soft_disable_interrupts(struct efx_nic *efx) in efx_soft_disable_interrupts() argument
1038 if (efx->state == STATE_DISABLED) in efx_soft_disable_interrupts()
1041 efx_siena_mcdi_mode_poll(efx); in efx_soft_disable_interrupts()
1043 efx->irq_soft_enabled = false; in efx_soft_disable_interrupts()
1046 if (efx->legacy_irq) in efx_soft_disable_interrupts()
1047 synchronize_irq(efx->legacy_irq); in efx_soft_disable_interrupts()
1049 efx_for_each_channel(channel, efx) { in efx_soft_disable_interrupts()
1059 efx_siena_mcdi_flush_async(efx); in efx_soft_disable_interrupts()
1062 int efx_siena_enable_interrupts(struct efx_nic *efx) in efx_siena_enable_interrupts() argument
1068 BUG_ON(efx->state == STATE_DISABLED); in efx_siena_enable_interrupts()
1070 if (efx->eeh_disabled_legacy_irq) { in efx_siena_enable_interrupts()
1071 enable_irq(efx->legacy_irq); in efx_siena_enable_interrupts()
1072 efx->eeh_disabled_legacy_irq = false; in efx_siena_enable_interrupts()
1075 efx->type->irq_enable_master(efx); in efx_siena_enable_interrupts()
1077 efx_for_each_channel(channel, efx) { in efx_siena_enable_interrupts()
1085 rc = efx_soft_enable_interrupts(efx); in efx_siena_enable_interrupts()
1093 efx_for_each_channel(channel, efx) { in efx_siena_enable_interrupts()
1100 efx->type->irq_disable_non_ev(efx); in efx_siena_enable_interrupts()
1105 void efx_siena_disable_interrupts(struct efx_nic *efx) in efx_siena_disable_interrupts() argument
1109 efx_soft_disable_interrupts(efx); in efx_siena_disable_interrupts()
1111 efx_for_each_channel(channel, efx) { in efx_siena_disable_interrupts()
1116 efx->type->irq_disable_non_ev(efx); in efx_siena_disable_interrupts()
1119 void efx_siena_start_channels(struct efx_nic *efx) in efx_siena_start_channels() argument
1125 efx_for_each_channel_rev(channel, efx) { in efx_siena_start_channels()
1128 atomic_inc(&efx->active_queues); in efx_siena_start_channels()
1133 atomic_inc(&efx->active_queues); in efx_siena_start_channels()
1143 void efx_siena_stop_channels(struct efx_nic *efx) in efx_siena_stop_channels() argument
1151 efx_for_each_channel(channel, efx) { in efx_siena_stop_channels()
1156 efx_for_each_channel(channel, efx) { in efx_siena_stop_channels()
1169 if (efx->type->fini_dmaq) in efx_siena_stop_channels()
1170 rc = efx->type->fini_dmaq(efx); in efx_siena_stop_channels()
1173 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); in efx_siena_stop_channels()
1175 netif_dbg(efx, drv, efx->net_dev, in efx_siena_stop_channels()
1179 efx_for_each_channel(channel, efx) { in efx_siena_stop_channels()
1244 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) in efx_update_irq_mod() argument
1246 int step = efx->irq_mod_step_us; in efx_update_irq_mod()
1251 efx->type->push_irq_moderation(channel); in efx_update_irq_mod()
1255 efx->irq_rx_moderation_us) { in efx_update_irq_mod()
1257 efx->type->push_irq_moderation(channel); in efx_update_irq_mod()
1274 struct efx_nic *efx = channel->efx; in efx_poll() local
1280 netif_vdbg(efx, intr, efx->net_dev, in efx_poll()
1290 efx->irq_rx_adaptive && in efx_poll()
1292 efx_update_irq_mod(efx, channel); in efx_poll()
1317 struct efx_nic *efx = channel->efx; in efx_init_napi_channel() local
1319 channel->napi_dev = efx->net_dev; in efx_init_napi_channel()
1323 void efx_siena_init_napi(struct efx_nic *efx) in efx_siena_init_napi() argument
1327 efx_for_each_channel(channel, efx) in efx_siena_init_napi()
1339 void efx_siena_fini_napi(struct efx_nic *efx) in efx_siena_fini_napi() argument
1343 efx_for_each_channel(channel, efx) in efx_siena_fini_napi()