/linux-6.3-rc2/drivers/net/ethernet/sfc/siena/ |
A D | tx_common.c | 35 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in efx_siena_probe_tx_queue() 38 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), in efx_siena_probe_tx_queue() 43 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), in efx_siena_probe_tx_queue() 55 tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; in efx_siena_probe_tx_queue() 104 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_siena_remove_tx_queue() 160 tx_queue->queue, tx_queue->read_count); in efx_dequeue_buffer() 173 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_siena_fini_tx_queue() 180 while (tx_queue->read_count != tx_queue->write_count) { in efx_siena_fini_tx_queue() 183 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; in efx_siena_fini_tx_queue() 206 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers() [all …]
|
A D | tx.c | 106 ++tx_queue->insert_count; in efx_enqueue_skb_copy() 160 tx_queue->tso_fallbacks++; in __efx_siena_enqueue_skb() 168 tx_queue->cb_packets++; in __efx_siena_enqueue_skb() 178 tx_queue->xmit_pending = true; in __efx_siena_enqueue_skb() 184 tx_queue->tx_packets++; in __efx_siena_enqueue_skb() 212 struct efx_tx_queue *tx_queue; in efx_siena_xdp_tx_buffers() local 230 if (unlikely(!tx_queue)) in efx_siena_xdp_tx_buffers() 233 if (!tx_queue->initialised) in efx_siena_xdp_tx_buffers() 252 tx_queue->read_count - tx_queue->insert_count; in efx_siena_xdp_tx_buffers() 279 tx_queue->tx_packets++; in efx_siena_xdp_tx_buffers() [all …]
|
A D | nic_common.h | 60 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) in efx_tx_desc() argument 62 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; in efx_tx_desc() 89 bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count); in efx_nic_may_push_tx_desc() 91 tx_queue->empty_read_count = 0; in efx_nic_may_push_tx_desc() 92 return was_empty && tx_queue->write_count - write_count == 1; in efx_nic_may_push_tx_desc() 118 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) in efx_nic_probe_tx() argument 120 return tx_queue->efx->type->tx_probe(tx_queue); in efx_nic_probe_tx() 124 tx_queue->efx->type->tx_init(tx_queue); in efx_nic_init_tx() 128 if (tx_queue->efx->type->tx_remove) in efx_nic_remove_tx() 129 tx_queue->efx->type->tx_remove(tx_queue); in efx_nic_remove_tx() [all …]
|
A D | tx_common.h | 14 int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue); 15 void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue); 16 void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue); 17 void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue); 24 void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue); 25 void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 27 void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue, 30 struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue, 32 int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, 36 int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
|
A D | efx_channels.c | 546 tx_queue = &channel->tx_queue[j]; in efx_alloc_channel() 547 tx_queue->efx = efx; in efx_alloc_channel() 548 tx_queue->queue = -1; in efx_alloc_channel() 549 tx_queue->label = j; in efx_alloc_channel() 621 tx_queue = &channel->tx_queue[j]; in efx_copy_channel() 622 if (tx_queue->channel) in efx_copy_channel() 626 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); in efx_copy_channel() 777 tx_queue->channel->channel, tx_queue->label, in efx_set_xdp_tx_queue() 803 tx_queue); in efx_set_xdp_channels() 823 tx_queue = &channel->tx_queue[0]; in efx_set_xdp_channels() [all …]
|
A D | farch.c | 288 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_notify_tx_desc() 304 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_push_tx_desc() 325 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) in efx_farch_tx_write() 329 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in efx_farch_tx_write() 344 } while (tx_queue->write_count != tx_queue->insert_count); in efx_farch_tx_write() 352 ++tx_queue->pushes; in efx_farch_tx_write() 411 tx_queue->queue); in efx_farch_tx_init() 454 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); in efx_farch_tx_remove() 840 tx_queue = channel->tx_queue + in efx_farch_handle_tx_event() 846 tx_queue = channel->tx_queue + in efx_farch_handle_tx_event() [all …]
|
A D | net_driver.h | 1359 int (*tx_probe)(struct efx_tx_queue *tx_queue); 1360 void (*tx_init)(struct efx_tx_queue *tx_queue); 1562 for (_tx_queue = (_channel)->tx_queue; \ 1563 _tx_queue < (_channel)->tx_queue + \ 1651 struct efx_tx_queue *tx_queue; in efx_channel_tx_fill_level() local 1656 tx_queue->insert_count - tx_queue->read_count); in efx_channel_tx_fill_level() 1665 struct efx_tx_queue *tx_queue; in efx_channel_tx_old_fill_level() local 1670 tx_queue->insert_count - tx_queue->old_read_count); in efx_channel_tx_old_fill_level() 1691 return tx_queue->insert_count & tx_queue->ptr_mask; in efx_tx_queue_get_insert_index() 1698 return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; in __efx_tx_queue_get_insert_buffer() [all …]
|
A D | selftest.c | 410 struct efx_nic *efx = tx_queue->efx; in efx_begin_loopback() 438 rc = efx_enqueue_skb(tx_queue, skb); in efx_begin_loopback() 467 struct efx_nic *efx = tx_queue->efx; in efx_end_loopback() 515 lb_tests->tx_done[tx_queue->label] += tx_done; in efx_end_loopback() 523 efx_test_loopback(struct efx_tx_queue *tx_queue, in efx_test_loopback() argument 526 struct efx_nic *efx = tx_queue->efx; in efx_test_loopback() 542 tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx), in efx_test_loopback() 546 begin_rc = efx_begin_loopback(tx_queue); in efx_test_loopback() 618 struct efx_tx_queue *tx_queue; in efx_test_loopbacks() local 659 state->offload_csum = (tx_queue->type & in efx_test_loopbacks() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/sfc/ |
A D | tx_common.c | 35 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in efx_probe_tx_queue() 38 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), in efx_probe_tx_queue() 43 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), in efx_probe_tx_queue() 55 tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; in efx_probe_tx_queue() 101 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_fini_tx_queue() 110 while (tx_queue->read_count != tx_queue->write_count) { in efx_fini_tx_queue() 114 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; in efx_fini_tx_queue() 131 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_remove_tx_queue() 194 tx_queue->queue, tx_queue->read_count); in efx_dequeue_buffer() 218 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers() [all …]
|
A D | ef100_tx.c | 26 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, in ef100_tx_probe() 35 tx_queue->core_txq = in ef100_tx_init() 46 tx_queue->tso_version = 3; in ef100_tx_init() 98 ++tx_queue->insert_count; in ef100_tx_can_tso() 117 if (unlikely(tx_queue->notify_count == tx_queue->write_count)) in ef100_notify_tx_desc() 120 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef100_notify_tx_desc() 125 tx_queue->notify_count = tx_queue->write_count; in ef100_notify_tx_desc() 131 ++tx_queue->pushes; in ef100_tx_push_buffers() 358 tx_queue->ptr_mask; in ef100_ev_tx() 386 if (!tx_queue->buffer || !tx_queue->ptr_mask) { in __ef100_enqueue_skb() [all …]
|
A D | tx.c | 122 ++tx_queue->insert_count; in efx_enqueue_skb_copy() 280 if (!tx_queue->piobuf) in efx_tx_may_pio() 286 if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count)) in efx_tx_may_pio() 374 tx_queue->cb_packets++; in __efx_enqueue_skb() 391 tx_queue->tso_bursts++; in __efx_enqueue_skb() 395 tx_queue->tx_packets++; in __efx_enqueue_skb() 465 tx_queue->read_count - tx_queue->insert_count; in efx_xdp_tx_buffers() 566 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_xmit_done_single() 576 tx_queue->queue); in efx_xmit_done_single() 588 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_xmit_done_single() [all …]
|
A D | nic_common.h | 60 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) in efx_tx_desc() argument 62 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; in efx_tx_desc() 92 bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count); in efx_nic_may_push_tx_desc() 94 tx_queue->empty_read_count = 0; in efx_nic_may_push_tx_desc() 95 return was_empty && tx_queue->write_count - write_count == 1; in efx_nic_may_push_tx_desc() 121 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) in efx_nic_probe_tx() argument 123 return tx_queue->efx->type->tx_probe(tx_queue); in efx_nic_probe_tx() 127 tx_queue->efx->type->tx_init(tx_queue); in efx_nic_init_tx() 131 if (tx_queue->efx->type->tx_remove) in efx_nic_remove_tx() 132 tx_queue->efx->type->tx_remove(tx_queue); in efx_nic_remove_tx() [all …]
|
A D | tx_common.h | 14 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 15 void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 16 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 17 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 19 void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 30 void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue); 31 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 33 void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, 36 struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, 39 int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, [all …]
|
A D | tx_tso.c | 113 ++tx_queue->insert_count; in efx_tx_queue_insert() 116 tx_queue->read_count >= in efx_tx_queue_insert() 117 tx_queue->efx->txq_entries); in efx_tx_queue_insert() 121 dma_len = tx_queue->efx->type->tx_limit_len(tx_queue, in efx_tx_queue_insert() 170 struct efx_tx_queue *tx_queue, in tso_start() argument 319 ++tx_queue->insert_count; in tso_start_new_packet() 339 ++tx_queue->insert_count; in tso_start_new_packet() 366 struct efx_nic *efx = tx_queue->efx; in efx_enqueue_skb_tso() 370 if (tx_queue->tso_version != 1) in efx_enqueue_skb_tso() 378 EFX_WARN_ON_ONCE_PARANOID(tx_queue->write_count != tx_queue->insert_count); in efx_enqueue_skb_tso() [all …]
|
A D | efx_channels.c | 544 tx_queue = &channel->tx_queue[j]; in efx_alloc_channel() 545 tx_queue->efx = efx; in efx_alloc_channel() 546 tx_queue->queue = -1; in efx_alloc_channel() 547 tx_queue->label = j; in efx_alloc_channel() 618 tx_queue = &channel->tx_queue[j]; in efx_copy_channel() 619 if (tx_queue->channel) in efx_copy_channel() 623 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); in efx_copy_channel() 774 tx_queue->channel->channel, tx_queue->label, in efx_set_xdp_tx_queue() 800 tx_queue); in efx_set_xdp_channels() 820 tx_queue = &channel->tx_queue[0]; in efx_set_xdp_channels() [all …]
|
A D | mcdi_functions.c | 170 struct efx_channel *channel = tx_queue->channel; in efx_mcdi_tx_init() 171 struct efx_nic *efx = tx_queue->efx; in efx_mcdi_tx_init() 185 dma_addr = tx_queue->txd.buf.dma_addr; in efx_mcdi_tx_init() 188 tx_queue->queue, entries, (u64)dma_addr); in efx_mcdi_tx_init() 198 bool tso_v2 = tx_queue->tso_version == 2; in efx_mcdi_tx_init() 222 tx_queue->tso_version = 0; in efx_mcdi_tx_init() 243 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); in efx_mcdi_tx_remove() 250 struct efx_nic *efx = tx_queue->efx; in efx_mcdi_tx_fini() 255 tx_queue->queue); in efx_mcdi_tx_fini() 357 struct efx_tx_queue *tx_queue; in efx_fini_dmaq() local [all …]
|
A D | ef100_tx.h | 18 int ef100_tx_probe(struct efx_tx_queue *tx_queue); 19 void ef100_tx_init(struct efx_tx_queue *tx_queue); 20 void ef100_tx_write(struct efx_tx_queue *tx_queue); 25 netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 26 int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
A D | selftest.c | 410 struct efx_nic *efx = tx_queue->efx; in efx_begin_loopback() 438 rc = efx_enqueue_skb(tx_queue, skb); in efx_begin_loopback() 467 struct efx_nic *efx = tx_queue->efx; in efx_end_loopback() 515 lb_tests->tx_done[tx_queue->label] += tx_done; in efx_end_loopback() 523 efx_test_loopback(struct efx_tx_queue *tx_queue, in efx_test_loopback() argument 526 struct efx_nic *efx = tx_queue->efx; in efx_test_loopback() 542 tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx), in efx_test_loopback() 546 begin_rc = efx_begin_loopback(tx_queue); in efx_test_loopback() 618 struct efx_tx_queue *tx_queue; in efx_test_loopbacks() local 659 state->offload_csum = (tx_queue->type & in efx_test_loopbacks() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/sfc/falcon/ |
A D | tx.c | 72 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in ef4_dequeue_buffer() 74 tx_queue->queue, tx_queue->read_count); in ef4_dequeue_buffer() 274 while (tx_queue->insert_count != tx_queue->write_count) { in ef4_enqueue_unwind() 364 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in ef4_dequeue_buffers() 381 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in ef4_dequeue_buffers() 526 if (tx_queue->read_count == tx_queue->old_write_count) { in ef4_xmit_done() 552 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in ef4_probe_tx_queue() 611 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in ef4_fini_tx_queue() 618 while (tx_queue->read_count != tx_queue->write_count) { in ef4_fini_tx_queue() 620 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; in ef4_fini_tx_queue() [all …]
|
A D | nic.h | 71 if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD) in ef4_tx_queue_partner() 72 return tx_queue - EF4_TXQ_TYPE_OFFLOAD; in ef4_tx_queue_partner() 74 return tx_queue + EF4_TXQ_TYPE_OFFLOAD; in ef4_tx_queue_partner() 104 tx_queue->empty_read_count = 0; in ef4_nic_may_push_tx_desc() 317 return tx_queue->efx->type->tx_probe(tx_queue); in ef4_nic_probe_tx() 321 tx_queue->efx->type->tx_init(tx_queue); in ef4_nic_init_tx() 325 tx_queue->efx->type->tx_remove(tx_queue); in ef4_nic_remove_tx() 329 tx_queue->efx->type->tx_write(tx_queue); in ef4_nic_push_buffers() 383 int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue); 384 void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue); [all …]
|
A D | farch.c | 277 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef4_farch_notify_tx_desc() 293 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef4_farch_push_tx_desc() 314 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) in ef4_farch_tx_write() 318 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef4_farch_tx_write() 333 } while (tx_queue->write_count != tx_queue->insert_count); in ef4_farch_tx_write() 341 ++tx_queue->pushes; in ef4_farch_tx_write() 404 tx_queue->queue); in ef4_farch_tx_init() 425 tx_queue->queue); in ef4_farch_tx_init() 451 tx_queue->queue); in ef4_farch_tx_fini() 460 ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd); in ef4_farch_tx_remove() [all …]
|
A D | net_driver.h | 445 struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES]; member 1082 int (*tx_probe)(struct ef4_tx_queue *tx_queue); 1083 void (*tx_init)(struct ef4_tx_queue *tx_queue); 1206 return &channel->tx_queue[type]; in ef4_channel_get_tx_queue() 1211 return !(tx_queue->efx->net_dev->num_tc < 2 && in ef4_tx_queue_used() 1212 tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI); in ef4_tx_queue_used() 1220 for (_tx_queue = (_channel)->tx_queue; \ 1230 for (_tx_queue = (_channel)->tx_queue; \ 1312 return tx_queue->insert_count & tx_queue->ptr_mask; in ef4_tx_queue_get_insert_index() 1319 return &tx_queue->buffer[ef4_tx_queue_get_insert_index(tx_queue)]; in __ef4_tx_queue_get_insert_buffer() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/freescale/ |
A D | gianfar.c | 562 grp->tx_queue = priv->tx_queue[i]; in gfar_parse_group() 1134 tx_queue = priv->tx_queue[i]; in free_skb_resources() 1293 tx_queue = priv->tx_queue[i]; in gfar_init_bds() 1295 tx_queue->num_txbdfree = tx_queue->tx_ring_size; in gfar_init_bds() 1296 tx_queue->dirty_tx = tx_queue->tx_bd_base; in gfar_init_bds() 1297 tx_queue->cur_tx = tx_queue->tx_bd_base; in gfar_init_bds() 1362 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources() 1384 tx_queue = priv->tx_queue[i]; in gfar_alloc_skb_resources() 1785 tx_queue = priv->tx_queue[rq]; in gfar_start_xmit() 1956 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; in gfar_start_xmit() [all …]
|
/linux-6.3-rc2/drivers/net/wireless/rsi/ |
A D | rsi_91x_core.c | 36 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_determine_min_weight_queue() 60 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_recalculate_weights() 106 if (skb_queue_len(&common->tx_queue[q_num])) in rsi_get_num_pkts_dequeue() 107 skb = skb_peek(&common->tx_queue[q_num]); in rsi_get_num_pkts_dequeue() 149 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { in rsi_core_determine_hal_queue() 172 q_len = skb_queue_len(&common->tx_queue[ii]); in rsi_core_determine_hal_queue() 187 q_len = skb_queue_len(&common->tx_queue[q_num]); in rsi_core_determine_hal_queue() 200 q_len = skb_queue_len(&common->tx_queue[q_num]); in rsi_core_determine_hal_queue() 229 skb_queue_tail(&common->tx_queue[q_num], skb); in rsi_core_queue_pkt() 249 return skb_dequeue(&common->tx_queue[q_num]); in rsi_core_dequeue_pkt() [all …]
|
/linux-6.3-rc2/drivers/net/wireless/silabs/wfx/ |
A D | queue.c | 69 skb_queue_head_init(&wvif->tx_queue[i].normal); in wfx_tx_queues_init() 70 skb_queue_head_init(&wvif->tx_queue[i].cab); in wfx_tx_queues_init() 71 wvif->tx_queue[i].priority = priorities[i]; in wfx_tx_queues_init() 85 WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames)); in wfx_tx_queues_check_empty() 86 WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i])); in wfx_tx_queues_check_empty() 113 struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; in wfx_tx_queues_put() 134 queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; in wfx_pending_drop() 160 queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; in wfx_pending_get() 217 if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab)) in wfx_tx_queues_has_cab() 240 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb() [all …]
|