Lines Matching refs:tx_ring
54 struct ena_ring *tx_ring; in ena_tx_timeout() local
63 tx_ring = &adapter->tx_ring[txqueue]; in ena_tx_timeout()
65 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in ena_tx_timeout()
66 napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED); in ena_tx_timeout()
193 txr = &adapter->tx_ring[i]; in ena_init_io_rings()
223 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; in ena_init_io_rings()
236 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources() local
240 if (tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
246 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources()
249 tx_ring->tx_buffer_info = vzalloc_node(size, node); in ena_setup_tx_resources()
250 if (!tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
251 tx_ring->tx_buffer_info = vzalloc(size); in ena_setup_tx_resources()
252 if (!tx_ring->tx_buffer_info) in ena_setup_tx_resources()
256 size = sizeof(u16) * tx_ring->ring_size; in ena_setup_tx_resources()
257 tx_ring->free_ids = vzalloc_node(size, node); in ena_setup_tx_resources()
258 if (!tx_ring->free_ids) { in ena_setup_tx_resources()
259 tx_ring->free_ids = vzalloc(size); in ena_setup_tx_resources()
260 if (!tx_ring->free_ids) in ena_setup_tx_resources()
264 size = tx_ring->tx_max_header_size; in ena_setup_tx_resources()
265 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); in ena_setup_tx_resources()
266 if (!tx_ring->push_buf_intermediate_buf) { in ena_setup_tx_resources()
267 tx_ring->push_buf_intermediate_buf = vzalloc(size); in ena_setup_tx_resources()
268 if (!tx_ring->push_buf_intermediate_buf) in ena_setup_tx_resources()
273 for (i = 0; i < tx_ring->ring_size; i++) in ena_setup_tx_resources()
274 tx_ring->free_ids[i] = i; in ena_setup_tx_resources()
277 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); in ena_setup_tx_resources()
279 tx_ring->next_to_use = 0; in ena_setup_tx_resources()
280 tx_ring->next_to_clean = 0; in ena_setup_tx_resources()
281 tx_ring->cpu = ena_irq->cpu; in ena_setup_tx_resources()
282 tx_ring->numa_node = node; in ena_setup_tx_resources()
286 vfree(tx_ring->free_ids); in ena_setup_tx_resources()
287 tx_ring->free_ids = NULL; in ena_setup_tx_resources()
289 vfree(tx_ring->tx_buffer_info); in ena_setup_tx_resources()
290 tx_ring->tx_buffer_info = NULL; in ena_setup_tx_resources()
303 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_free_tx_resources() local
305 vfree(tx_ring->tx_buffer_info); in ena_free_tx_resources()
306 tx_ring->tx_buffer_info = NULL; in ena_free_tx_resources()
308 vfree(tx_ring->free_ids); in ena_free_tx_resources()
309 tx_ring->free_ids = NULL; in ena_free_tx_resources()
311 vfree(tx_ring->push_buf_intermediate_buf); in ena_free_tx_resources()
312 tx_ring->push_buf_intermediate_buf = NULL; in ena_free_tx_resources()
662 void ena_unmap_tx_buff(struct ena_ring *tx_ring, in ena_unmap_tx_buff() argument
676 dma_unmap_single(tx_ring->dev, in ena_unmap_tx_buff()
686 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), in ena_unmap_tx_buff()
695 static void ena_free_tx_bufs(struct ena_ring *tx_ring) in ena_free_tx_bufs() argument
701 is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid); in ena_free_tx_bufs()
703 for (i = 0; i < tx_ring->ring_size; i++) { in ena_free_tx_bufs()
704 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; in ena_free_tx_bufs()
710 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, in ena_free_tx_bufs()
712 tx_ring->qid, i); in ena_free_tx_bufs()
715 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, in ena_free_tx_bufs()
717 tx_ring->qid, i); in ena_free_tx_bufs()
720 ena_unmap_tx_buff(tx_ring, tx_info); in ena_free_tx_bufs()
729 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in ena_free_tx_bufs()
730 tx_ring->qid)); in ena_free_tx_bufs()
735 struct ena_ring *tx_ring; in ena_free_all_tx_bufs() local
739 tx_ring = &adapter->tx_ring[i]; in ena_free_all_tx_bufs()
740 ena_free_tx_bufs(tx_ring); in ena_free_all_tx_bufs()
796 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) in validate_tx_req_id() argument
800 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_tx_req_id()
804 return handle_invalid_req_id(tx_ring, req_id, tx_info, false); in validate_tx_req_id()
807 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) in ena_clean_tx_irq() argument
818 next_to_clean = tx_ring->next_to_clean; in ena_clean_tx_irq()
819 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); in ena_clean_tx_irq()
825 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, in ena_clean_tx_irq()
829 handle_invalid_req_id(tx_ring, req_id, NULL, false); in ena_clean_tx_irq()
834 rc = validate_tx_req_id(tx_ring, req_id); in ena_clean_tx_irq()
838 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_clean_tx_irq()
847 ena_unmap_tx_buff(tx_ring, tx_info); in ena_clean_tx_irq()
849 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
850 "tx_poll: q %d skb %p completed\n", tx_ring->qid, in ena_clean_tx_irq()
858 tx_ring->free_ids[next_to_clean] = req_id; in ena_clean_tx_irq()
860 tx_ring->ring_size); in ena_clean_tx_irq()
863 tx_ring->next_to_clean = next_to_clean; in ena_clean_tx_irq()
864 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); in ena_clean_tx_irq()
868 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
870 tx_ring->qid, tx_pkts); in ena_clean_tx_irq()
877 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
882 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
885 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { in ena_clean_tx_irq()
887 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, in ena_clean_tx_irq()
888 &tx_ring->syncp); in ena_clean_tx_irq()
1368 void ena_unmask_interrupt(struct ena_ring *tx_ring, in ena_unmask_interrupt() argument
1371 u32 rx_interval = tx_ring->smoothed_interval; in ena_unmask_interrupt()
1387 tx_ring->smoothed_interval, in ena_unmask_interrupt()
1390 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1, in ena_unmask_interrupt()
1391 &tx_ring->syncp); in ena_unmask_interrupt()
1398 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg); in ena_unmask_interrupt()
1401 void ena_update_ring_numa_node(struct ena_ring *tx_ring, in ena_update_ring_numa_node() argument
1408 if (likely(tx_ring->cpu == cpu)) in ena_update_ring_numa_node()
1411 tx_ring->cpu = cpu; in ena_update_ring_numa_node()
1417 if (likely(tx_ring->numa_node == numa_node)) in ena_update_ring_numa_node()
1423 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); in ena_update_ring_numa_node()
1424 tx_ring->numa_node = numa_node; in ena_update_ring_numa_node()
1440 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
1447 tx_ring = ena_napi->tx_ring; in ena_io_poll()
1450 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; in ena_io_poll()
1452 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1453 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { in ena_io_poll()
1458 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); in ena_io_poll()
1468 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1469 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { in ena_io_poll()
1489 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
1490 ena_unmask_interrupt(tx_ring, rx_ring); in ena_io_poll()
1498 u64_stats_update_begin(&tx_ring->syncp); in ena_io_poll()
1499 tx_ring->tx_stats.napi_comp += napi_comp_call; in ena_io_poll()
1500 tx_ring->tx_stats.tx_poll++; in ena_io_poll()
1501 u64_stats_update_end(&tx_ring->syncp); in ena_io_poll()
1503 tx_ring->tx_stats.last_napi_jiffies = jiffies; in ena_io_poll()
1773 struct ena_ring *rx_ring, *tx_ring; in ena_init_napi_in_range() local
1778 tx_ring = &adapter->tx_ring[i]; in ena_init_napi_in_range()
1789 napi->tx_ring = tx_ring; in ena_init_napi_in_range()
1893 struct ena_ring *tx_ring; in ena_create_io_tx_queue() local
1900 tx_ring = &adapter->tx_ring[qid]; in ena_create_io_tx_queue()
1910 ctx.queue_size = tx_ring->ring_size; in ena_create_io_tx_queue()
1911 ctx.numa_node = tx_ring->numa_node; in ena_create_io_tx_queue()
1922 &tx_ring->ena_com_io_sq, in ena_create_io_tx_queue()
1923 &tx_ring->ena_com_io_cq); in ena_create_io_tx_queue()
1932 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_tx_queue()
2040 adapter->tx_ring[i].ring_size = new_tx_size; in set_io_rings_size()
2114 cur_tx_ring_size = adapter->tx_ring[0].ring_size; in create_queues_with_size_backoff()
2194 ena_unmask_interrupt(&adapter->tx_ring[i], in ena_up()
2472 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, in ena_check_and_linearize_skb() argument
2480 if (num_frags < tx_ring->sgl_size) in ena_check_and_linearize_skb()
2483 if ((num_frags == tx_ring->sgl_size) && in ena_check_and_linearize_skb()
2484 (header_len < tx_ring->tx_max_header_size)) in ena_check_and_linearize_skb()
2487 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp); in ena_check_and_linearize_skb()
2491 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1, in ena_check_and_linearize_skb()
2492 &tx_ring->syncp); in ena_check_and_linearize_skb()
2498 static int ena_tx_map_skb(struct ena_ring *tx_ring, in ena_tx_map_skb() argument
2504 struct ena_adapter *adapter = tx_ring->adapter; in ena_tx_map_skb()
2516 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_tx_map_skb()
2527 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); in ena_tx_map_skb()
2529 tx_ring->push_buf_intermediate_buf); in ena_tx_map_skb()
2532 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1, in ena_tx_map_skb()
2533 &tx_ring->syncp); in ena_tx_map_skb()
2540 tx_ring->tx_max_header_size); in ena_tx_map_skb()
2548 dma = dma_map_single(tx_ring->dev, skb->data + push_len, in ena_tx_map_skb()
2550 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
2575 dma = skb_frag_dma_map(tx_ring->dev, frag, delta, in ena_tx_map_skb()
2577 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
2590 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, in ena_tx_map_skb()
2591 &tx_ring->syncp); in ena_tx_map_skb()
2597 ena_unmap_tx_buff(tx_ring, tx_info); in ena_tx_map_skb()
2608 struct ena_ring *tx_ring; in ena_start_xmit() local
2617 tx_ring = &adapter->tx_ring[qid]; in ena_start_xmit()
2620 rc = ena_check_and_linearize_skb(tx_ring, skb); in ena_start_xmit()
2624 next_to_use = tx_ring->next_to_use; in ena_start_xmit()
2625 req_id = tx_ring->free_ids[next_to_use]; in ena_start_xmit()
2626 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_start_xmit()
2631 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); in ena_start_xmit()
2643 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); in ena_start_xmit()
2646 tx_ring, in ena_start_xmit()
2660 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
2661 tx_ring->sgl_size + 2))) { in ena_start_xmit()
2666 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1, in ena_start_xmit()
2667 &tx_ring->syncp); in ena_start_xmit()
2679 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
2682 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, in ena_start_xmit()
2683 &tx_ring->syncp); in ena_start_xmit()
2693 ena_ring_tx_doorbell(tx_ring); in ena_start_xmit()
2698 ena_unmap_tx_buff(tx_ring, tx_info); in ena_start_xmit()
2810 struct ena_ring *rx_ring, *tx_ring; in ena_get_stats64() local
2823 tx_ring = &adapter->tx_ring[i]; in ena_get_stats64()
2826 start = u64_stats_fetch_begin(&tx_ring->syncp); in ena_get_stats64()
2827 packets = tx_ring->tx_stats.cnt; in ena_get_stats64()
2828 bytes = tx_ring->tx_stats.bytes; in ena_get_stats64()
2829 } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); in ena_get_stats64()
2900 if (adapter->tx_ring->ring_size) in ena_calc_io_queue_size()
2901 tx_queue_size = adapter->tx_ring->ring_size; in ena_calc_io_queue_size()
3317 txr = &adapter->tx_ring[i]; in ena_restore_device()
3417 struct ena_ring *tx_ring) in check_missing_comp_in_tx_queue() argument
3419 struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi); in check_missing_comp_in_tx_queue()
3432 for (i = 0; i < tx_ring->ring_size; i++) { in check_missing_comp_in_tx_queue()
3433 tx_buf = &tx_ring->tx_buffer_info[i]; in check_missing_comp_in_tx_queue()
3449 tx_ring->qid); in check_missing_comp_in_tx_queue()
3459 jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in check_missing_comp_in_tx_queue()
3481 tx_ring->qid, i, time_since_last_napi, napi_scheduled); in check_missing_comp_in_tx_queue()
3500 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx, in check_missing_comp_in_tx_queue()
3501 &tx_ring->syncp); in check_missing_comp_in_tx_queue()
3508 struct ena_ring *tx_ring; in check_for_missing_completions() local
3534 tx_ring = &adapter->tx_ring[qid]; in check_for_missing_completions()
3537 rc = check_missing_comp_in_tx_queue(adapter, tx_ring); in check_for_missing_completions()