Lines Matching refs:tx_queue

16 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)  in efx_tx_cb_page_count()  argument
18 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, in efx_tx_cb_page_count()
22 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) in efx_probe_tx_queue() argument
24 struct efx_nic *efx = tx_queue->efx; in efx_probe_tx_queue()
31 tx_queue->ptr_mask = entries - 1; in efx_probe_tx_queue()
35 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in efx_probe_tx_queue()
38 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), in efx_probe_tx_queue()
40 if (!tx_queue->buffer) in efx_probe_tx_queue()
43 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), in efx_probe_tx_queue()
44 sizeof(tx_queue->cb_page[0]), GFP_KERNEL); in efx_probe_tx_queue()
45 if (!tx_queue->cb_page) { in efx_probe_tx_queue()
51 rc = efx_nic_probe_tx(tx_queue); in efx_probe_tx_queue()
55 tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; in efx_probe_tx_queue()
59 kfree(tx_queue->cb_page); in efx_probe_tx_queue()
60 tx_queue->cb_page = NULL; in efx_probe_tx_queue()
62 kfree(tx_queue->buffer); in efx_probe_tx_queue()
63 tx_queue->buffer = NULL; in efx_probe_tx_queue()
67 void efx_init_tx_queue(struct efx_tx_queue *tx_queue) in efx_init_tx_queue() argument
69 struct efx_nic *efx = tx_queue->efx; in efx_init_tx_queue()
72 "initialising TX queue %d\n", tx_queue->queue); in efx_init_tx_queue()
74 tx_queue->insert_count = 0; in efx_init_tx_queue()
75 tx_queue->notify_count = 0; in efx_init_tx_queue()
76 tx_queue->write_count = 0; in efx_init_tx_queue()
77 tx_queue->packet_write_count = 0; in efx_init_tx_queue()
78 tx_queue->old_write_count = 0; in efx_init_tx_queue()
79 tx_queue->read_count = 0; in efx_init_tx_queue()
80 tx_queue->old_read_count = 0; in efx_init_tx_queue()
81 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; in efx_init_tx_queue()
82 tx_queue->xmit_pending = false; in efx_init_tx_queue()
83 tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && in efx_init_tx_queue()
84 tx_queue->channel == efx_ptp_channel(efx)); in efx_init_tx_queue()
85 tx_queue->completed_timestamp_major = 0; in efx_init_tx_queue()
86 tx_queue->completed_timestamp_minor = 0; in efx_init_tx_queue()
88 tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel); in efx_init_tx_queue()
89 tx_queue->tso_version = 0; in efx_init_tx_queue()
92 efx_nic_init_tx(tx_queue); in efx_init_tx_queue()
94 tx_queue->initialised = true; in efx_init_tx_queue()
97 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) in efx_fini_tx_queue() argument
101 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_fini_tx_queue()
102 "shutting down TX queue %d\n", tx_queue->queue); in efx_fini_tx_queue()
104 tx_queue->initialised = false; in efx_fini_tx_queue()
106 if (!tx_queue->buffer) in efx_fini_tx_queue()
110 while (tx_queue->read_count != tx_queue->write_count) { in efx_fini_tx_queue()
114 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; in efx_fini_tx_queue()
115 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, in efx_fini_tx_queue()
118 ++tx_queue->read_count; in efx_fini_tx_queue()
120 tx_queue->xmit_pending = false; in efx_fini_tx_queue()
121 netdev_tx_reset_queue(tx_queue->core_txq); in efx_fini_tx_queue()
124 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) in efx_remove_tx_queue() argument
128 if (!tx_queue->buffer) in efx_remove_tx_queue()
131 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_remove_tx_queue()
132 "destroying TX queue %d\n", tx_queue->queue); in efx_remove_tx_queue()
133 efx_nic_remove_tx(tx_queue); in efx_remove_tx_queue()
135 if (tx_queue->cb_page) { in efx_remove_tx_queue()
136 for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) in efx_remove_tx_queue()
137 efx_nic_free_buffer(tx_queue->efx, in efx_remove_tx_queue()
138 &tx_queue->cb_page[i]); in efx_remove_tx_queue()
139 kfree(tx_queue->cb_page); in efx_remove_tx_queue()
140 tx_queue->cb_page = NULL; in efx_remove_tx_queue()
143 kfree(tx_queue->buffer); in efx_remove_tx_queue()
144 tx_queue->buffer = NULL; in efx_remove_tx_queue()
145 tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL; in efx_remove_tx_queue()
148 void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, in efx_dequeue_buffer() argument
155 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; in efx_dequeue_buffer()
179 if (tx_queue->timestamping && in efx_dequeue_buffer()
180 (tx_queue->completed_timestamp_major || in efx_dequeue_buffer()
181 tx_queue->completed_timestamp_minor)) { in efx_dequeue_buffer()
185 efx_ptp_nic_to_kernel_time(tx_queue); in efx_dequeue_buffer()
188 tx_queue->completed_timestamp_major = 0; in efx_dequeue_buffer()
189 tx_queue->completed_timestamp_minor = 0; in efx_dequeue_buffer()
192 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in efx_dequeue_buffer()
194 tx_queue->queue, tx_queue->read_count); in efx_dequeue_buffer()
208 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, in efx_dequeue_buffers() argument
214 struct efx_nic *efx = tx_queue->efx; in efx_dequeue_buffers()
217 stop_index = (index + 1) & tx_queue->ptr_mask; in efx_dequeue_buffers()
218 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers()
221 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; in efx_dequeue_buffers()
226 tx_queue->queue, read_ptr); in efx_dequeue_buffers()
231 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl, in efx_dequeue_buffers()
234 ++tx_queue->read_count; in efx_dequeue_buffers()
235 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers()
239 void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) in efx_xmit_done_check_empty() argument
241 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { in efx_xmit_done_check_empty()
242 tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); in efx_xmit_done_check_empty()
243 if (tx_queue->read_count == tx_queue->old_write_count) { in efx_xmit_done_check_empty()
246 tx_queue->empty_read_count = in efx_xmit_done_check_empty()
247 tx_queue->read_count | EFX_EMPTY_COUNT_VALID; in efx_xmit_done_check_empty()
252 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) in efx_xmit_done() argument
256 struct efx_nic *efx = tx_queue->efx; in efx_xmit_done()
258 EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); in efx_xmit_done()
260 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl, in efx_xmit_done()
262 tx_queue->pkts_compl += pkts_compl; in efx_xmit_done()
263 tx_queue->bytes_compl += bytes_compl; in efx_xmit_done()
266 ++tx_queue->merge_events; in efx_xmit_done()
273 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && in efx_xmit_done()
276 fill_level = efx_channel_tx_fill_level(tx_queue->channel); in efx_xmit_done()
278 netif_tx_wake_queue(tx_queue->core_txq); in efx_xmit_done()
281 efx_xmit_done_check_empty(tx_queue); in efx_xmit_done()
287 void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, in efx_enqueue_unwind() argument
296 while (tx_queue->insert_count != insert_count) { in efx_enqueue_unwind()
297 --tx_queue->insert_count; in efx_enqueue_unwind()
298 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_unwind()
299 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl, in efx_enqueue_unwind()
304 struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, in efx_tx_map_chunk() argument
307 const struct efx_nic_type *nic_type = tx_queue->efx->type; in efx_tx_map_chunk()
313 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_tx_map_chunk()
316 dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); in efx_tx_map_chunk()
325 ++tx_queue->insert_count; in efx_tx_map_chunk()
346 int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, in efx_tx_map_data() argument
349 struct efx_nic *efx = tx_queue->efx; in efx_tx_map_data()
376 tx_queue->tso_long_headers++; in efx_tx_map_data()
377 efx_tx_map_chunk(tx_queue, dma_addr, header_len); in efx_tx_map_data()
388 buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); in efx_tx_map_data()
449 int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in efx_tx_tso_fallback() argument
461 efx_enqueue_skb(tx_queue, skb); in efx_tx_tso_fallback()