Lines Matching refs:rx_ring
95 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu()
217 rxr = &adapter->rx_ring[i]; in ena_init_io_rings()
393 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_setup_rx_resources() local
397 if (rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
406 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); in ena_setup_rx_resources()
409 rx_ring->rx_buffer_info = vzalloc_node(size, node); in ena_setup_rx_resources()
410 if (!rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
411 rx_ring->rx_buffer_info = vzalloc(size); in ena_setup_rx_resources()
412 if (!rx_ring->rx_buffer_info) in ena_setup_rx_resources()
416 size = sizeof(u16) * rx_ring->ring_size; in ena_setup_rx_resources()
417 rx_ring->free_ids = vzalloc_node(size, node); in ena_setup_rx_resources()
418 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
419 rx_ring->free_ids = vzalloc(size); in ena_setup_rx_resources()
420 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
421 vfree(rx_ring->rx_buffer_info); in ena_setup_rx_resources()
422 rx_ring->rx_buffer_info = NULL; in ena_setup_rx_resources()
428 for (i = 0; i < rx_ring->ring_size; i++) in ena_setup_rx_resources()
429 rx_ring->free_ids[i] = i; in ena_setup_rx_resources()
432 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); in ena_setup_rx_resources()
434 rx_ring->next_to_clean = 0; in ena_setup_rx_resources()
435 rx_ring->next_to_use = 0; in ena_setup_rx_resources()
436 rx_ring->cpu = ena_irq->cpu; in ena_setup_rx_resources()
437 rx_ring->numa_node = node; in ena_setup_rx_resources()
451 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_resources() local
453 vfree(rx_ring->rx_buffer_info); in ena_free_rx_resources()
454 rx_ring->rx_buffer_info = NULL; in ena_free_rx_resources()
456 vfree(rx_ring->free_ids); in ena_free_rx_resources()
457 rx_ring->free_ids = NULL; in ena_free_rx_resources()
501 static struct page *ena_alloc_map_page(struct ena_ring *rx_ring, in ena_alloc_map_page() argument
511 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp); in ena_alloc_map_page()
518 *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, in ena_alloc_map_page()
520 if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) { in ena_alloc_map_page()
521 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, in ena_alloc_map_page()
522 &rx_ring->syncp); in ena_alloc_map_page()
530 static int ena_alloc_rx_buffer(struct ena_ring *rx_ring, in ena_alloc_rx_buffer() argument
533 int headroom = rx_ring->rx_headroom; in ena_alloc_rx_buffer()
547 page = ena_alloc_map_page(rx_ring, &dma); in ena_alloc_rx_buffer()
551 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_alloc_rx_buffer()
566 static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring, in ena_unmap_rx_buff_attrs() argument
570 dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL, in ena_unmap_rx_buff_attrs()
574 static void ena_free_rx_page(struct ena_ring *rx_ring, in ena_free_rx_page() argument
580 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_free_rx_page()
585 ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0); in ena_free_rx_page()
591 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) in ena_refill_rx_bufs() argument
597 next_to_use = rx_ring->next_to_use; in ena_refill_rx_bufs()
602 req_id = rx_ring->free_ids[next_to_use]; in ena_refill_rx_bufs()
604 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_refill_rx_bufs()
606 rc = ena_alloc_rx_buffer(rx_ring, rx_info); in ena_refill_rx_bufs()
608 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
610 rx_ring->qid); in ena_refill_rx_bufs()
613 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, in ena_refill_rx_bufs()
617 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_rx_bufs()
619 rx_ring->qid); in ena_refill_rx_bufs()
623 rx_ring->ring_size); in ena_refill_rx_bufs()
627 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, in ena_refill_rx_bufs()
628 &rx_ring->syncp); in ena_refill_rx_bufs()
629 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
631 rx_ring->qid, i, num); in ena_refill_rx_bufs()
636 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); in ena_refill_rx_bufs()
638 rx_ring->next_to_use = next_to_use; in ena_refill_rx_bufs()
646 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_bufs() local
649 for (i = 0; i < rx_ring->ring_size; i++) { in ena_free_rx_bufs()
650 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; in ena_free_rx_bufs()
653 ena_free_rx_page(rx_ring, rx_info); in ena_free_rx_bufs()
662 struct ena_ring *rx_ring; in ena_refill_all_rx_bufs() local
666 rx_ring = &adapter->rx_ring[i]; in ena_refill_all_rx_bufs()
667 bufs_num = rx_ring->ring_size - 1; in ena_refill_all_rx_bufs()
668 rc = ena_refill_rx_bufs(rx_ring, bufs_num); in ena_refill_all_rx_bufs()
671 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_all_rx_bufs()
786 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]); in ena_destroy_all_rx_queues()
919 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len) in ena_alloc_skb() argument
924 skb = napi_alloc_skb(rx_ring->napi, len); in ena_alloc_skb()
929 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, in ena_alloc_skb()
930 &rx_ring->syncp); in ena_alloc_skb()
932 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_alloc_skb()
959 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, in ena_rx_skb() argument
965 bool is_xdp_loaded = ena_xdp_present_ring(rx_ring); in ena_rx_skb()
980 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
983 adapter = rx_ring->adapter; in ena_rx_skb()
984 netif_err(adapter, rx_err, rx_ring->netdev, in ena_rx_skb()
985 "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id); in ena_rx_skb()
986 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); in ena_rx_skb()
991 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
996 pkt_offset = buf_offset - rx_ring->rx_headroom; in ena_rx_skb()
1000 if (len <= rx_ring->rx_copybreak) { in ena_rx_skb()
1001 skb = ena_alloc_skb(rx_ring, NULL, len); in ena_rx_skb()
1006 dma_sync_single_for_device(rx_ring->dev, in ena_rx_skb()
1012 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1014 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1015 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1017 rx_ring->ring_size); in ena_rx_skb()
1028 ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); in ena_rx_skb()
1030 skb = ena_alloc_skb(rx_ring, buf_addr, buf_len); in ena_rx_skb()
1037 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1040 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1047 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1050 rx_ring->ring_size); in ena_rx_skb()
1058 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
1062 pkt_offset = buf_offset - rx_ring->rx_headroom; in ena_rx_skb()
1071 dma_sync_single_for_cpu(rx_ring->dev, in ena_rx_skb()
1077 ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); in ena_rx_skb()
1092 static void ena_rx_checksum(struct ena_ring *rx_ring, in ena_rx_checksum() argument
1097 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { in ena_rx_checksum()
1113 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, in ena_rx_checksum()
1114 &rx_ring->syncp); in ena_rx_checksum()
1115 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1125 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, in ena_rx_checksum()
1126 &rx_ring->syncp); in ena_rx_checksum()
1127 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1135 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, in ena_rx_checksum()
1136 &rx_ring->syncp); in ena_rx_checksum()
1138 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, in ena_rx_checksum()
1139 &rx_ring->syncp); in ena_rx_checksum()
1149 static void ena_set_rx_hash(struct ena_ring *rx_ring, in ena_set_rx_hash() argument
1155 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { in ena_set_rx_hash()
1171 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs) in ena_xdp_handle_buff() argument
1178 netdev_err_once(rx_ring->adapter->netdev, in ena_xdp_handle_buff()
1180 ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp); in ena_xdp_handle_buff()
1184 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_xdp_handle_buff()
1187 rx_ring->ena_bufs[0].len, false); in ena_xdp_handle_buff()
1189 ret = ena_xdp_execute(rx_ring, xdp); in ena_xdp_handle_buff()
1194 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; in ena_xdp_handle_buff()
1207 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, in ena_clean_rx_irq() argument
1210 u16 next_to_clean = rx_ring->next_to_clean; in ena_clean_rx_irq()
1227 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1228 "%s qid %d\n", __func__, rx_ring->qid); in ena_clean_rx_irq()
1230 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); in ena_clean_rx_irq()
1235 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; in ena_clean_rx_irq()
1236 ena_rx_ctx.max_bufs = rx_ring->sgl_size; in ena_clean_rx_irq()
1239 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, in ena_clean_rx_irq()
1240 rx_ring->ena_com_io_sq, in ena_clean_rx_irq()
1249 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_clean_rx_irq()
1253 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1255 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, in ena_clean_rx_irq()
1258 dma_sync_single_for_cpu(rx_ring->dev, in ena_clean_rx_irq()
1260 rx_ring->ena_bufs[0].len, in ena_clean_rx_irq()
1263 if (ena_xdp_present_ring(rx_ring)) in ena_clean_rx_irq()
1264 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs); in ena_clean_rx_irq()
1268 skb = ena_rx_skb(rx_ring, in ena_clean_rx_irq()
1269 rx_ring->ena_bufs, in ena_clean_rx_irq()
1275 int req_id = rx_ring->ena_bufs[i].req_id; in ena_clean_rx_irq()
1277 rx_ring->free_ids[next_to_clean] = req_id; in ena_clean_rx_irq()
1280 rx_ring->ring_size); in ena_clean_rx_irq()
1286 ena_unmap_rx_buff_attrs(rx_ring, in ena_clean_rx_irq()
1287 &rx_ring->rx_buffer_info[req_id], in ena_clean_rx_irq()
1289 rx_ring->rx_buffer_info[req_id].page = NULL; in ena_clean_rx_irq()
1301 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1303 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); in ena_clean_rx_irq()
1305 skb_record_rx_queue(skb, rx_ring->qid); in ena_clean_rx_irq()
1307 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) in ena_clean_rx_irq()
1318 rx_ring->per_napi_packets += work_done; in ena_clean_rx_irq()
1319 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1320 rx_ring->rx_stats.bytes += total_len; in ena_clean_rx_irq()
1321 rx_ring->rx_stats.cnt += work_done; in ena_clean_rx_irq()
1322 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; in ena_clean_rx_irq()
1323 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1325 rx_ring->next_to_clean = next_to_clean; in ena_clean_rx_irq()
1327 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in ena_clean_rx_irq()
1329 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, in ena_clean_rx_irq()
1334 ena_refill_rx_bufs(rx_ring, refill_required); in ena_clean_rx_irq()
1345 adapter = netdev_priv(rx_ring->netdev); in ena_clean_rx_irq()
1348 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp); in ena_clean_rx_irq()
1353 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, in ena_clean_rx_irq()
1354 &rx_ring->syncp); in ena_clean_rx_irq()
1367 ena_napi->rx_ring->smoothed_interval = cur_moder.usec; in ena_dim_work()
1374 struct ena_ring *rx_ring = ena_napi->rx_ring; in ena_adjust_adaptive_rx_intr_moderation() local
1376 if (!rx_ring->per_napi_packets) in ena_adjust_adaptive_rx_intr_moderation()
1379 rx_ring->non_empty_napi_events++; in ena_adjust_adaptive_rx_intr_moderation()
1381 dim_update_sample(rx_ring->non_empty_napi_events, in ena_adjust_adaptive_rx_intr_moderation()
1382 rx_ring->rx_stats.cnt, in ena_adjust_adaptive_rx_intr_moderation()
1383 rx_ring->rx_stats.bytes, in ena_adjust_adaptive_rx_intr_moderation()
1388 rx_ring->per_napi_packets = 0; in ena_adjust_adaptive_rx_intr_moderation()
1392 struct ena_ring *rx_ring) in ena_unmask_interrupt() argument
1400 if (rx_ring) in ena_unmask_interrupt()
1401 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? in ena_unmask_interrupt()
1402 rx_ring->smoothed_interval : in ena_unmask_interrupt()
1403 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); in ena_unmask_interrupt()
1425 struct ena_ring *rx_ring) in ena_update_ring_numa_node() argument
1435 if (rx_ring) in ena_update_ring_numa_node()
1436 rx_ring->cpu = cpu; in ena_update_ring_numa_node()
1448 if (rx_ring) { in ena_update_ring_numa_node()
1449 rx_ring->numa_node = numa_node; in ena_update_ring_numa_node()
1450 ena_com_update_numa_node(rx_ring->ena_com_io_cq, in ena_update_ring_numa_node()
1463 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
1471 rx_ring = ena_napi->rx_ring; in ena_io_poll()
1486 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); in ena_io_poll()
1509 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) in ena_io_poll()
1512 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
1513 ena_unmask_interrupt(tx_ring, rx_ring); in ena_io_poll()
1777 adapter->ena_napi[i].rx_ring); in ena_del_napi_in_range()
1789 struct ena_ring *rx_ring, *tx_ring; in ena_init_napi_in_range() local
1793 rx_ring = &adapter->rx_ring[i]; in ena_init_napi_in_range()
1803 napi->rx_ring = rx_ring; in ena_init_napi_in_range()
1957 struct ena_ring *rx_ring; in ena_create_io_rx_queue() local
1964 rx_ring = &adapter->rx_ring[qid]; in ena_create_io_rx_queue()
1974 ctx.queue_size = rx_ring->ring_size; in ena_create_io_rx_queue()
1975 ctx.numa_node = rx_ring->numa_node; in ena_create_io_rx_queue()
1986 &rx_ring->ena_com_io_sq, in ena_create_io_rx_queue()
1987 &rx_ring->ena_com_io_cq); in ena_create_io_rx_queue()
1995 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_rx_queue()
2014 ena_xdp_register_rxq_info(&adapter->rx_ring[i]); in ena_create_all_io_rx_queues()
2021 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]); in ena_create_all_io_rx_queues()
2037 adapter->rx_ring[i].ring_size = new_rx_size; in set_io_rings_size()
2111 cur_rx_ring_size = adapter->rx_ring[0].ring_size; in create_queues_with_size_backoff()
2191 &adapter->rx_ring[i]); in ena_up()
2361 struct ena_ring *rx_ring; in ena_set_rx_copybreak() local
2370 rx_ring = &adapter->rx_ring[i]; in ena_set_rx_copybreak()
2371 rx_ring->rx_copybreak = rx_copybreak; in ena_set_rx_copybreak()
2805 struct ena_ring *rx_ring, *tx_ring; in ena_get_stats64() local
2833 rx_ring = &adapter->rx_ring[i]; in ena_get_stats64()
2836 start = u64_stats_fetch_begin(&rx_ring->syncp); in ena_get_stats64()
2837 packets = rx_ring->rx_stats.cnt; in ena_get_stats64()
2838 bytes = rx_ring->rx_stats.bytes; in ena_get_stats64()
2839 xdp_rx_drops = rx_ring->rx_stats.xdp_drop; in ena_get_stats64()
2840 } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); in ena_get_stats64()
2898 if (adapter->rx_ring->ring_size) in ena_calc_io_queue_size()
2899 rx_queue_size = adapter->rx_ring->ring_size; in ena_calc_io_queue_size()
3378 struct ena_ring *rx_ring) in check_for_rx_interrupt_queue() argument
3380 struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi); in check_for_rx_interrupt_queue()
3385 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) in check_for_rx_interrupt_queue()
3388 rx_ring->no_interrupt_event_cnt++; in check_for_rx_interrupt_queue()
3390 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { in check_for_rx_interrupt_queue()
3393 rx_ring->qid); in check_for_rx_interrupt_queue()
3495 struct ena_ring *rx_ring; in check_for_missing_completions() local
3521 rx_ring = &adapter->rx_ring[qid]; in check_for_missing_completions()
3528 check_for_rx_interrupt_queue(adapter, rx_ring) : 0; in check_for_missing_completions()
3556 struct ena_ring *rx_ring; in check_for_empty_rx_ring() local
3566 rx_ring = &adapter->rx_ring[i]; in check_for_empty_rx_ring()
3568 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in check_for_empty_rx_ring()
3569 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { in check_for_empty_rx_ring()
3570 rx_ring->empty_rx_queue++; in check_for_empty_rx_ring()
3572 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { in check_for_empty_rx_ring()
3573 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, in check_for_empty_rx_ring()
3574 &rx_ring->syncp); in check_for_empty_rx_ring()
3579 napi_schedule(rx_ring->napi); in check_for_empty_rx_ring()
3580 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()
3583 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()