Lines Matching refs:rx_ring
96 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu()
194 rxr = &adapter->rx_ring[i]; in ena_init_io_rings()
370 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_setup_rx_resources() local
374 if (rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
383 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); in ena_setup_rx_resources()
386 rx_ring->rx_buffer_info = vzalloc_node(size, node); in ena_setup_rx_resources()
387 if (!rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
388 rx_ring->rx_buffer_info = vzalloc(size); in ena_setup_rx_resources()
389 if (!rx_ring->rx_buffer_info) in ena_setup_rx_resources()
393 size = sizeof(u16) * rx_ring->ring_size; in ena_setup_rx_resources()
394 rx_ring->free_ids = vzalloc_node(size, node); in ena_setup_rx_resources()
395 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
396 rx_ring->free_ids = vzalloc(size); in ena_setup_rx_resources()
397 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
398 vfree(rx_ring->rx_buffer_info); in ena_setup_rx_resources()
399 rx_ring->rx_buffer_info = NULL; in ena_setup_rx_resources()
405 for (i = 0; i < rx_ring->ring_size; i++) in ena_setup_rx_resources()
406 rx_ring->free_ids[i] = i; in ena_setup_rx_resources()
409 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); in ena_setup_rx_resources()
411 rx_ring->next_to_clean = 0; in ena_setup_rx_resources()
412 rx_ring->next_to_use = 0; in ena_setup_rx_resources()
413 rx_ring->cpu = ena_irq->cpu; in ena_setup_rx_resources()
414 rx_ring->numa_node = node; in ena_setup_rx_resources()
428 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_resources() local
430 vfree(rx_ring->rx_buffer_info); in ena_free_rx_resources()
431 rx_ring->rx_buffer_info = NULL; in ena_free_rx_resources()
433 vfree(rx_ring->free_ids); in ena_free_rx_resources()
434 rx_ring->free_ids = NULL; in ena_free_rx_resources()
478 static struct page *ena_alloc_map_page(struct ena_ring *rx_ring, in ena_alloc_map_page() argument
488 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp); in ena_alloc_map_page()
495 *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, in ena_alloc_map_page()
497 if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) { in ena_alloc_map_page()
498 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, in ena_alloc_map_page()
499 &rx_ring->syncp); in ena_alloc_map_page()
507 static int ena_alloc_rx_buffer(struct ena_ring *rx_ring, in ena_alloc_rx_buffer() argument
510 int headroom = rx_ring->rx_headroom; in ena_alloc_rx_buffer()
524 page = ena_alloc_map_page(rx_ring, &dma); in ena_alloc_rx_buffer()
528 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_alloc_rx_buffer()
543 static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring, in ena_unmap_rx_buff_attrs() argument
547 dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL, in ena_unmap_rx_buff_attrs()
551 static void ena_free_rx_page(struct ena_ring *rx_ring, in ena_free_rx_page() argument
557 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_free_rx_page()
562 ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0); in ena_free_rx_page()
568 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) in ena_refill_rx_bufs() argument
574 next_to_use = rx_ring->next_to_use; in ena_refill_rx_bufs()
579 req_id = rx_ring->free_ids[next_to_use]; in ena_refill_rx_bufs()
581 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_refill_rx_bufs()
583 rc = ena_alloc_rx_buffer(rx_ring, rx_info); in ena_refill_rx_bufs()
585 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
587 rx_ring->qid); in ena_refill_rx_bufs()
590 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, in ena_refill_rx_bufs()
594 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_rx_bufs()
596 rx_ring->qid); in ena_refill_rx_bufs()
600 rx_ring->ring_size); in ena_refill_rx_bufs()
604 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, in ena_refill_rx_bufs()
605 &rx_ring->syncp); in ena_refill_rx_bufs()
606 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
608 rx_ring->qid, i, num); in ena_refill_rx_bufs()
613 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); in ena_refill_rx_bufs()
615 rx_ring->next_to_use = next_to_use; in ena_refill_rx_bufs()
623 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_bufs() local
626 for (i = 0; i < rx_ring->ring_size; i++) { in ena_free_rx_bufs()
627 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; in ena_free_rx_bufs()
630 ena_free_rx_page(rx_ring, rx_info); in ena_free_rx_bufs()
639 struct ena_ring *rx_ring; in ena_refill_all_rx_bufs() local
643 rx_ring = &adapter->rx_ring[i]; in ena_refill_all_rx_bufs()
644 bufs_num = rx_ring->ring_size - 1; in ena_refill_all_rx_bufs()
645 rc = ena_refill_rx_bufs(rx_ring, bufs_num); in ena_refill_all_rx_bufs()
648 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_all_rx_bufs()
763 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]); in ena_destroy_all_rx_queues()
896 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len) in ena_alloc_skb() argument
901 skb = napi_alloc_skb(rx_ring->napi, len); in ena_alloc_skb()
906 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, in ena_alloc_skb()
907 &rx_ring->syncp); in ena_alloc_skb()
909 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_alloc_skb()
936 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, in ena_rx_skb() argument
942 bool is_xdp_loaded = ena_xdp_present_ring(rx_ring); in ena_rx_skb()
957 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
960 adapter = rx_ring->adapter; in ena_rx_skb()
961 netif_err(adapter, rx_err, rx_ring->netdev, in ena_rx_skb()
962 "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id); in ena_rx_skb()
963 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); in ena_rx_skb()
968 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
973 pkt_offset = buf_offset - rx_ring->rx_headroom; in ena_rx_skb()
977 if (len <= rx_ring->rx_copybreak) { in ena_rx_skb()
978 skb = ena_alloc_skb(rx_ring, NULL, len); in ena_rx_skb()
983 dma_sync_single_for_device(rx_ring->dev, in ena_rx_skb()
989 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
991 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
992 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
994 rx_ring->ring_size); in ena_rx_skb()
1005 ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); in ena_rx_skb()
1007 skb = ena_alloc_skb(rx_ring, buf_addr, buf_len); in ena_rx_skb()
1014 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1017 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1024 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1027 rx_ring->ring_size); in ena_rx_skb()
1035 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
1039 pkt_offset = buf_offset - rx_ring->rx_headroom; in ena_rx_skb()
1048 dma_sync_single_for_cpu(rx_ring->dev, in ena_rx_skb()
1054 ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); in ena_rx_skb()
1069 static void ena_rx_checksum(struct ena_ring *rx_ring, in ena_rx_checksum() argument
1074 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { in ena_rx_checksum()
1090 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, in ena_rx_checksum()
1091 &rx_ring->syncp); in ena_rx_checksum()
1092 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1102 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, in ena_rx_checksum()
1103 &rx_ring->syncp); in ena_rx_checksum()
1104 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1112 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, in ena_rx_checksum()
1113 &rx_ring->syncp); in ena_rx_checksum()
1115 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, in ena_rx_checksum()
1116 &rx_ring->syncp); in ena_rx_checksum()
1126 static void ena_set_rx_hash(struct ena_ring *rx_ring, in ena_set_rx_hash() argument
1132 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { in ena_set_rx_hash()
1148 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs) in ena_xdp_handle_buff() argument
1155 netdev_err_once(rx_ring->adapter->netdev, in ena_xdp_handle_buff()
1157 ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp); in ena_xdp_handle_buff()
1161 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_xdp_handle_buff()
1164 rx_ring->ena_bufs[0].len, false); in ena_xdp_handle_buff()
1166 ret = ena_xdp_execute(rx_ring, xdp); in ena_xdp_handle_buff()
1171 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; in ena_xdp_handle_buff()
1184 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, in ena_clean_rx_irq() argument
1187 u16 next_to_clean = rx_ring->next_to_clean; in ena_clean_rx_irq()
1204 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1205 "%s qid %d\n", __func__, rx_ring->qid); in ena_clean_rx_irq()
1207 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); in ena_clean_rx_irq()
1212 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; in ena_clean_rx_irq()
1213 ena_rx_ctx.max_bufs = rx_ring->sgl_size; in ena_clean_rx_irq()
1216 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, in ena_clean_rx_irq()
1217 rx_ring->ena_com_io_sq, in ena_clean_rx_irq()
1226 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_clean_rx_irq()
1230 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1232 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, in ena_clean_rx_irq()
1235 dma_sync_single_for_cpu(rx_ring->dev, in ena_clean_rx_irq()
1237 rx_ring->ena_bufs[0].len, in ena_clean_rx_irq()
1240 if (ena_xdp_present_ring(rx_ring)) in ena_clean_rx_irq()
1241 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs); in ena_clean_rx_irq()
1245 skb = ena_rx_skb(rx_ring, in ena_clean_rx_irq()
1246 rx_ring->ena_bufs, in ena_clean_rx_irq()
1252 int req_id = rx_ring->ena_bufs[i].req_id; in ena_clean_rx_irq()
1254 rx_ring->free_ids[next_to_clean] = req_id; in ena_clean_rx_irq()
1257 rx_ring->ring_size); in ena_clean_rx_irq()
1263 ena_unmap_rx_buff_attrs(rx_ring, in ena_clean_rx_irq()
1264 &rx_ring->rx_buffer_info[req_id], in ena_clean_rx_irq()
1266 rx_ring->rx_buffer_info[req_id].page = NULL; in ena_clean_rx_irq()
1278 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1280 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1282 skb_record_rx_queue(skb, rx_ring->qid); in ena_clean_rx_irq()
1284 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) in ena_clean_rx_irq()
1295 rx_ring->per_napi_packets += work_done; in ena_clean_rx_irq()
1296 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1297 rx_ring->rx_stats.bytes += total_len; in ena_clean_rx_irq()
1298 rx_ring->rx_stats.cnt += work_done; in ena_clean_rx_irq()
1299 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; in ena_clean_rx_irq()
1300 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1302 rx_ring->next_to_clean = next_to_clean; in ena_clean_rx_irq()
1304 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in ena_clean_rx_irq()
1306 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, in ena_clean_rx_irq()
1311 ena_refill_rx_bufs(rx_ring, refill_required); in ena_clean_rx_irq()
1322 adapter = netdev_priv(rx_ring->netdev); in ena_clean_rx_irq()
1325 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp); in ena_clean_rx_irq()
1330 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, in ena_clean_rx_irq()
1331 &rx_ring->syncp); in ena_clean_rx_irq()
1344 ena_napi->rx_ring->smoothed_interval = cur_moder.usec; in ena_dim_work()
1351 struct ena_ring *rx_ring = ena_napi->rx_ring; in ena_adjust_adaptive_rx_intr_moderation() local
1353 if (!rx_ring->per_napi_packets) in ena_adjust_adaptive_rx_intr_moderation()
1356 rx_ring->non_empty_napi_events++; in ena_adjust_adaptive_rx_intr_moderation()
1358 dim_update_sample(rx_ring->non_empty_napi_events, in ena_adjust_adaptive_rx_intr_moderation()
1359 rx_ring->rx_stats.cnt, in ena_adjust_adaptive_rx_intr_moderation()
1360 rx_ring->rx_stats.bytes, in ena_adjust_adaptive_rx_intr_moderation()
1365 rx_ring->per_napi_packets = 0; in ena_adjust_adaptive_rx_intr_moderation()
1369 struct ena_ring *rx_ring) in ena_unmask_interrupt() argument
1377 if (rx_ring) in ena_unmask_interrupt()
1378 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? in ena_unmask_interrupt()
1379 rx_ring->smoothed_interval : in ena_unmask_interrupt()
1380 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); in ena_unmask_interrupt()
1402 struct ena_ring *rx_ring) in ena_update_ring_numa_node() argument
1412 if (rx_ring) in ena_update_ring_numa_node()
1413 rx_ring->cpu = cpu; in ena_update_ring_numa_node()
1425 if (rx_ring) { in ena_update_ring_numa_node()
1426 rx_ring->numa_node = numa_node; in ena_update_ring_numa_node()
1427 ena_com_update_numa_node(rx_ring->ena_com_io_cq, in ena_update_ring_numa_node()
1440 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
1448 rx_ring = ena_napi->rx_ring; in ena_io_poll()
1463 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); in ena_io_poll()
1486 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) in ena_io_poll()
1489 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
1490 ena_unmask_interrupt(tx_ring, rx_ring); in ena_io_poll()
1761 adapter->ena_napi[i].rx_ring); in ena_del_napi_in_range()
1773 struct ena_ring *rx_ring, *tx_ring; in ena_init_napi_in_range() local
1777 rx_ring = &adapter->rx_ring[i]; in ena_init_napi_in_range()
1787 napi->rx_ring = rx_ring; in ena_init_napi_in_range()
1961 struct ena_ring *rx_ring; in ena_create_io_rx_queue() local
1968 rx_ring = &adapter->rx_ring[qid]; in ena_create_io_rx_queue()
1978 ctx.queue_size = rx_ring->ring_size; in ena_create_io_rx_queue()
1979 ctx.numa_node = rx_ring->numa_node; in ena_create_io_rx_queue()
1990 &rx_ring->ena_com_io_sq, in ena_create_io_rx_queue()
1991 &rx_ring->ena_com_io_cq); in ena_create_io_rx_queue()
1999 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_rx_queue()
2018 ena_xdp_register_rxq_info(&adapter->rx_ring[i]); in ena_create_all_io_rx_queues()
2025 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]); in ena_create_all_io_rx_queues()
2041 adapter->rx_ring[i].ring_size = new_rx_size; in set_io_rings_size()
2115 cur_rx_ring_size = adapter->rx_ring[0].ring_size; in create_queues_with_size_backoff()
2195 &adapter->rx_ring[i]); in ena_up()
2365 struct ena_ring *rx_ring; in ena_set_rx_copybreak() local
2374 rx_ring = &adapter->rx_ring[i]; in ena_set_rx_copybreak()
2375 rx_ring->rx_copybreak = rx_copybreak; in ena_set_rx_copybreak()
2810 struct ena_ring *rx_ring, *tx_ring; in ena_get_stats64() local
2838 rx_ring = &adapter->rx_ring[i]; in ena_get_stats64()
2841 start = u64_stats_fetch_begin(&rx_ring->syncp); in ena_get_stats64()
2842 packets = rx_ring->rx_stats.cnt; in ena_get_stats64()
2843 bytes = rx_ring->rx_stats.bytes; in ena_get_stats64()
2844 xdp_rx_drops = rx_ring->rx_stats.xdp_drop; in ena_get_stats64()
2845 } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); in ena_get_stats64()
2903 if (adapter->rx_ring->ring_size) in ena_calc_io_queue_size()
2904 rx_queue_size = adapter->rx_ring->ring_size; in ena_calc_io_queue_size()
3392 struct ena_ring *rx_ring) in check_for_rx_interrupt_queue() argument
3394 struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi); in check_for_rx_interrupt_queue()
3399 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) in check_for_rx_interrupt_queue()
3402 rx_ring->no_interrupt_event_cnt++; in check_for_rx_interrupt_queue()
3404 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { in check_for_rx_interrupt_queue()
3407 rx_ring->qid); in check_for_rx_interrupt_queue()
3509 struct ena_ring *rx_ring; in check_for_missing_completions() local
3535 rx_ring = &adapter->rx_ring[qid]; in check_for_missing_completions()
3542 check_for_rx_interrupt_queue(adapter, rx_ring) : 0; in check_for_missing_completions()
3570 struct ena_ring *rx_ring; in check_for_empty_rx_ring() local
3580 rx_ring = &adapter->rx_ring[i]; in check_for_empty_rx_ring()
3582 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in check_for_empty_rx_ring()
3583 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { in check_for_empty_rx_ring()
3584 rx_ring->empty_rx_queue++; in check_for_empty_rx_ring()
3586 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { in check_for_empty_rx_ring()
3587 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, in check_for_empty_rx_ring()
3588 &rx_ring->syncp); in check_for_empty_rx_ring()
3593 napi_schedule(rx_ring->napi); in check_for_empty_rx_ring()
3594 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()
3597 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()