Lines Matching refs:rx_queue

37 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)  in efx_reuse_page()  argument
39 struct efx_nic *efx = rx_queue->efx; in efx_reuse_page()
44 if (unlikely(!rx_queue->page_ring)) in efx_reuse_page()
46 index = rx_queue->page_remove & rx_queue->page_ptr_mask; in efx_reuse_page()
47 page = rx_queue->page_ring[index]; in efx_reuse_page()
51 rx_queue->page_ring[index] = NULL; in efx_reuse_page()
53 if (rx_queue->page_remove != rx_queue->page_add) in efx_reuse_page()
54 ++rx_queue->page_remove; in efx_reuse_page()
58 ++rx_queue->page_recycle_count; in efx_reuse_page()
66 ++rx_queue->page_recycle_failed; in efx_reuse_page()
79 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_recycle_rx_page() local
80 struct efx_nic *efx = rx_queue->efx; in efx_recycle_rx_page()
88 index = rx_queue->page_add & rx_queue->page_ptr_mask; in efx_recycle_rx_page()
89 if (rx_queue->page_ring[index] == NULL) { in efx_recycle_rx_page()
90 unsigned int read_index = rx_queue->page_remove & in efx_recycle_rx_page()
91 rx_queue->page_ptr_mask; in efx_recycle_rx_page()
98 ++rx_queue->page_remove; in efx_recycle_rx_page()
99 rx_queue->page_ring[index] = page; in efx_recycle_rx_page()
100 ++rx_queue->page_add; in efx_recycle_rx_page()
103 ++rx_queue->page_recycle_full; in efx_recycle_rx_page()
113 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_siena_recycle_rx_pages() local
115 if (unlikely(!rx_queue->page_ring)) in efx_siena_recycle_rx_pages()
120 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); in efx_siena_recycle_rx_pages()
128 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); in efx_siena_discard_rx_packet() local
132 efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags); in efx_siena_discard_rx_packet()
135 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue) in efx_init_rx_recycle_ring() argument
138 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_recycle_ring()
143 rx_queue->page_ring = kcalloc(page_ring_size, in efx_init_rx_recycle_ring()
144 sizeof(*rx_queue->page_ring), GFP_KERNEL); in efx_init_rx_recycle_ring()
145 if (!rx_queue->page_ring) in efx_init_rx_recycle_ring()
146 rx_queue->page_ptr_mask = 0; in efx_init_rx_recycle_ring()
148 rx_queue->page_ptr_mask = page_ring_size - 1; in efx_init_rx_recycle_ring()
151 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) in efx_fini_rx_recycle_ring() argument
153 struct efx_nic *efx = rx_queue->efx; in efx_fini_rx_recycle_ring()
156 if (unlikely(!rx_queue->page_ring)) in efx_fini_rx_recycle_ring()
160 for (i = 0; i <= rx_queue->page_ptr_mask; i++) { in efx_fini_rx_recycle_ring()
161 struct page *page = rx_queue->page_ring[i]; in efx_fini_rx_recycle_ring()
173 kfree(rx_queue->page_ring); in efx_fini_rx_recycle_ring()
174 rx_queue->page_ring = NULL; in efx_fini_rx_recycle_ring()
177 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, in efx_fini_rx_buffer() argument
186 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); in efx_fini_rx_buffer()
187 efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); in efx_fini_rx_buffer()
192 int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue) in efx_siena_probe_rx_queue() argument
194 struct efx_nic *efx = rx_queue->efx; in efx_siena_probe_rx_queue()
201 rx_queue->ptr_mask = entries - 1; in efx_siena_probe_rx_queue()
205 efx_rx_queue_index(rx_queue), efx->rxq_entries, in efx_siena_probe_rx_queue()
206 rx_queue->ptr_mask); in efx_siena_probe_rx_queue()
209 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), in efx_siena_probe_rx_queue()
211 if (!rx_queue->buffer) in efx_siena_probe_rx_queue()
214 rc = efx_nic_probe_rx(rx_queue); in efx_siena_probe_rx_queue()
216 kfree(rx_queue->buffer); in efx_siena_probe_rx_queue()
217 rx_queue->buffer = NULL; in efx_siena_probe_rx_queue()
223 void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue) in efx_siena_init_rx_queue() argument
226 struct efx_nic *efx = rx_queue->efx; in efx_siena_init_rx_queue()
229 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_siena_init_rx_queue()
230 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_siena_init_rx_queue()
233 rx_queue->added_count = 0; in efx_siena_init_rx_queue()
234 rx_queue->notified_count = 0; in efx_siena_init_rx_queue()
235 rx_queue->removed_count = 0; in efx_siena_init_rx_queue()
236 rx_queue->min_fill = -1U; in efx_siena_init_rx_queue()
237 efx_init_rx_recycle_ring(rx_queue); in efx_siena_init_rx_queue()
239 rx_queue->page_remove = 0; in efx_siena_init_rx_queue()
240 rx_queue->page_add = rx_queue->page_ptr_mask + 1; in efx_siena_init_rx_queue()
241 rx_queue->page_recycle_count = 0; in efx_siena_init_rx_queue()
242 rx_queue->page_recycle_failed = 0; in efx_siena_init_rx_queue()
243 rx_queue->page_recycle_full = 0; in efx_siena_init_rx_queue()
257 rx_queue->max_fill = max_fill; in efx_siena_init_rx_queue()
258 rx_queue->fast_fill_trigger = trigger; in efx_siena_init_rx_queue()
259 rx_queue->refill_enabled = true; in efx_siena_init_rx_queue()
262 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, in efx_siena_init_rx_queue()
263 rx_queue->core_index, 0); in efx_siena_init_rx_queue()
271 rx_queue->xdp_rxq_info_valid = true; in efx_siena_init_rx_queue()
275 efx_nic_init_rx(rx_queue); in efx_siena_init_rx_queue()
278 void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue) in efx_siena_fini_rx_queue() argument
283 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_siena_fini_rx_queue()
284 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_siena_fini_rx_queue()
286 del_timer_sync(&rx_queue->slow_fill); in efx_siena_fini_rx_queue()
289 if (rx_queue->buffer) { in efx_siena_fini_rx_queue()
290 for (i = rx_queue->removed_count; i < rx_queue->added_count; in efx_siena_fini_rx_queue()
292 unsigned int index = i & rx_queue->ptr_mask; in efx_siena_fini_rx_queue()
294 rx_buf = efx_rx_buffer(rx_queue, index); in efx_siena_fini_rx_queue()
295 efx_fini_rx_buffer(rx_queue, rx_buf); in efx_siena_fini_rx_queue()
299 efx_fini_rx_recycle_ring(rx_queue); in efx_siena_fini_rx_queue()
301 if (rx_queue->xdp_rxq_info_valid) in efx_siena_fini_rx_queue()
302 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); in efx_siena_fini_rx_queue()
304 rx_queue->xdp_rxq_info_valid = false; in efx_siena_fini_rx_queue()
307 void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue) in efx_siena_remove_rx_queue() argument
309 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, in efx_siena_remove_rx_queue()
310 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); in efx_siena_remove_rx_queue()
312 efx_nic_remove_rx(rx_queue); in efx_siena_remove_rx_queue()
314 kfree(rx_queue->buffer); in efx_siena_remove_rx_queue()
315 rx_queue->buffer = NULL; in efx_siena_remove_rx_queue()
336 void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue, in efx_siena_free_rx_buffers() argument
345 rx_buf = efx_rx_buf_next(rx_queue, rx_buf); in efx_siena_free_rx_buffers()
351 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); in efx_siena_rx_slow_fill() local
354 efx_nic_generate_fill_event(rx_queue); in efx_siena_rx_slow_fill()
355 ++rx_queue->slow_fill_count; in efx_siena_rx_slow_fill()
358 static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) in efx_schedule_slow_fill() argument
360 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); in efx_schedule_slow_fill()
372 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) in efx_init_rx_buffers() argument
375 struct efx_nic *efx = rx_queue->efx; in efx_init_rx_buffers()
383 page = efx_reuse_page(rx_queue); in efx_init_rx_buffers()
410 index = rx_queue->added_count & rx_queue->ptr_mask; in efx_init_rx_buffers()
411 rx_buf = efx_rx_buffer(rx_queue, index); in efx_init_rx_buffers()
419 ++rx_queue->added_count; in efx_init_rx_buffers()
456 void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, in efx_siena_fast_push_rx_descriptors() argument
459 struct efx_nic *efx = rx_queue->efx; in efx_siena_fast_push_rx_descriptors()
463 if (!rx_queue->refill_enabled) in efx_siena_fast_push_rx_descriptors()
467 fill_level = (rx_queue->added_count - rx_queue->removed_count); in efx_siena_fast_push_rx_descriptors()
468 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); in efx_siena_fast_push_rx_descriptors()
469 if (fill_level >= rx_queue->fast_fill_trigger) in efx_siena_fast_push_rx_descriptors()
473 if (unlikely(fill_level < rx_queue->min_fill)) { in efx_siena_fast_push_rx_descriptors()
475 rx_queue->min_fill = fill_level; in efx_siena_fast_push_rx_descriptors()
479 space = rx_queue->max_fill - fill_level; in efx_siena_fast_push_rx_descriptors()
482 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_siena_fast_push_rx_descriptors()
485 efx_rx_queue_index(rx_queue), fill_level, in efx_siena_fast_push_rx_descriptors()
486 rx_queue->max_fill); in efx_siena_fast_push_rx_descriptors()
489 rc = efx_init_rx_buffers(rx_queue, atomic); in efx_siena_fast_push_rx_descriptors()
492 efx_schedule_slow_fill(rx_queue); in efx_siena_fast_push_rx_descriptors()
497 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, in efx_siena_fast_push_rx_descriptors()
499 "to level %d\n", efx_rx_queue_index(rx_queue), in efx_siena_fast_push_rx_descriptors()
500 rx_queue->added_count - rx_queue->removed_count); in efx_siena_fast_push_rx_descriptors()
503 if (rx_queue->notified_count != rx_queue->added_count) in efx_siena_fast_push_rx_descriptors()
504 efx_nic_notify_rx_desc(rx_queue); in efx_siena_fast_push_rx_descriptors()
521 struct efx_rx_queue *rx_queue; in efx_siena_rx_packet_gro() local
523 rx_queue = efx_channel_get_rx_queue(channel); in efx_siena_rx_packet_gro()
524 efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags); in efx_siena_rx_packet_gro()
549 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); in efx_siena_rx_packet_gro()
555 skb_record_rx_queue(skb, channel->rx_queue.core_index); in efx_siena_rx_packet_gro()