Lines Matching refs:priv
35 static int gve_verify_driver_compatibility(struct gve_priv *priv) in gve_verify_driver_compatibility() argument
41 driver_info = dma_alloc_coherent(&priv->pdev->dev, in gve_verify_driver_compatibility()
64 err = gve_adminq_verify_driver_compatibility(priv, in gve_verify_driver_compatibility()
72 dma_free_coherent(&priv->pdev->dev, in gve_verify_driver_compatibility()
80 struct gve_priv *priv = netdev_priv(dev); in gve_start_xmit() local
82 if (gve_is_gqi(priv)) in gve_start_xmit()
90 struct gve_priv *priv = netdev_priv(dev); in gve_get_stats() local
95 if (priv->rx) { in gve_get_stats()
96 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { in gve_get_stats()
99 u64_stats_fetch_begin(&priv->rx[ring].statss); in gve_get_stats()
100 packets = priv->rx[ring].rpackets; in gve_get_stats()
101 bytes = priv->rx[ring].rbytes; in gve_get_stats()
102 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, in gve_get_stats()
108 if (priv->tx) { in gve_get_stats()
109 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { in gve_get_stats()
112 u64_stats_fetch_begin(&priv->tx[ring].statss); in gve_get_stats()
113 packets = priv->tx[ring].pkt_done; in gve_get_stats()
114 bytes = priv->tx[ring].bytes_done; in gve_get_stats()
115 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, in gve_get_stats()
123 static int gve_alloc_counter_array(struct gve_priv *priv) in gve_alloc_counter_array() argument
125 priv->counter_array = in gve_alloc_counter_array()
126 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_counter_array()
127 priv->num_event_counters * in gve_alloc_counter_array()
128 sizeof(*priv->counter_array), in gve_alloc_counter_array()
129 &priv->counter_array_bus, GFP_KERNEL); in gve_alloc_counter_array()
130 if (!priv->counter_array) in gve_alloc_counter_array()
136 static void gve_free_counter_array(struct gve_priv *priv) in gve_free_counter_array() argument
138 if (!priv->counter_array) in gve_free_counter_array()
141 dma_free_coherent(&priv->pdev->dev, in gve_free_counter_array()
142 priv->num_event_counters * in gve_free_counter_array()
143 sizeof(*priv->counter_array), in gve_free_counter_array()
144 priv->counter_array, priv->counter_array_bus); in gve_free_counter_array()
145 priv->counter_array = NULL; in gve_free_counter_array()
151 struct gve_priv *priv = container_of(work, struct gve_priv, in gve_stats_report_task() local
153 if (gve_get_do_report_stats(priv)) { in gve_stats_report_task()
154 gve_handle_report_stats(priv); in gve_stats_report_task()
155 gve_clear_do_report_stats(priv); in gve_stats_report_task()
159 static void gve_stats_report_schedule(struct gve_priv *priv) in gve_stats_report_schedule() argument
161 if (!gve_get_probe_in_progress(priv) && in gve_stats_report_schedule()
162 !gve_get_reset_in_progress(priv)) { in gve_stats_report_schedule()
163 gve_set_do_report_stats(priv); in gve_stats_report_schedule()
164 queue_work(priv->gve_wq, &priv->stats_report_task); in gve_stats_report_schedule()
170 struct gve_priv *priv = from_timer(priv, t, stats_report_timer); in gve_stats_report_timer() local
172 mod_timer(&priv->stats_report_timer, in gve_stats_report_timer()
174 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_stats_report_timer()
175 gve_stats_report_schedule(priv); in gve_stats_report_timer()
178 static int gve_alloc_stats_report(struct gve_priv *priv) in gve_alloc_stats_report() argument
183 priv->tx_cfg.num_queues; in gve_alloc_stats_report()
185 priv->rx_cfg.num_queues; in gve_alloc_stats_report()
186 priv->stats_report_len = struct_size(priv->stats_report, stats, in gve_alloc_stats_report()
188 priv->stats_report = in gve_alloc_stats_report()
189 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_alloc_stats_report()
190 &priv->stats_report_bus, GFP_KERNEL); in gve_alloc_stats_report()
191 if (!priv->stats_report) in gve_alloc_stats_report()
194 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0); in gve_alloc_stats_report()
195 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD; in gve_alloc_stats_report()
199 static void gve_free_stats_report(struct gve_priv *priv) in gve_free_stats_report() argument
201 if (!priv->stats_report) in gve_free_stats_report()
204 del_timer_sync(&priv->stats_report_timer); in gve_free_stats_report()
205 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_free_stats_report()
206 priv->stats_report, priv->stats_report_bus); in gve_free_stats_report()
207 priv->stats_report = NULL; in gve_free_stats_report()
212 struct gve_priv *priv = arg; in gve_mgmnt_intr() local
214 queue_work(priv->gve_wq, &priv->service_task); in gve_mgmnt_intr()
221 struct gve_priv *priv = block->priv; in gve_intr() local
223 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); in gve_intr()
242 struct gve_priv *priv; in gve_napi_poll() local
246 priv = block->priv; in gve_napi_poll()
260 irq_doorbell = gve_irq_doorbell(priv, block); in gve_napi_poll()
269 reschedule |= gve_tx_clean_pending(priv, block->tx); in gve_napi_poll()
283 struct gve_priv *priv = block->priv; in gve_napi_poll_dqo() local
297 gve_write_irq_doorbell_dqo(priv, block, in gve_napi_poll_dqo()
320 gve_write_irq_doorbell_dqo(priv, block, in gve_napi_poll_dqo()
327 static int gve_alloc_notify_blocks(struct gve_priv *priv) in gve_alloc_notify_blocks() argument
329 int num_vecs_requested = priv->num_ntfy_blks + 1; in gve_alloc_notify_blocks()
335 priv->msix_vectors = kvcalloc(num_vecs_requested, in gve_alloc_notify_blocks()
336 sizeof(*priv->msix_vectors), GFP_KERNEL); in gve_alloc_notify_blocks()
337 if (!priv->msix_vectors) in gve_alloc_notify_blocks()
340 priv->msix_vectors[i].entry = i; in gve_alloc_notify_blocks()
341 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors, in gve_alloc_notify_blocks()
344 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n", in gve_alloc_notify_blocks()
354 priv->num_ntfy_blks = new_num_ntfy_blks; in gve_alloc_notify_blocks()
355 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_alloc_notify_blocks()
356 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
358 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues, in gve_alloc_notify_blocks()
360 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
362 vecs_enabled, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
363 priv->rx_cfg.max_queues); in gve_alloc_notify_blocks()
364 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) in gve_alloc_notify_blocks()
365 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_alloc_notify_blocks()
366 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) in gve_alloc_notify_blocks()
367 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_alloc_notify_blocks()
370 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus()); in gve_alloc_notify_blocks()
373 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s", in gve_alloc_notify_blocks()
374 pci_name(priv->pdev)); in gve_alloc_notify_blocks()
375 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, in gve_alloc_notify_blocks()
376 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv); in gve_alloc_notify_blocks()
378 dev_err(&priv->pdev->dev, "Did not receive management vector.\n"); in gve_alloc_notify_blocks()
381 priv->irq_db_indices = in gve_alloc_notify_blocks()
382 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_notify_blocks()
383 priv->num_ntfy_blks * in gve_alloc_notify_blocks()
384 sizeof(*priv->irq_db_indices), in gve_alloc_notify_blocks()
385 &priv->irq_db_indices_bus, GFP_KERNEL); in gve_alloc_notify_blocks()
386 if (!priv->irq_db_indices) { in gve_alloc_notify_blocks()
391 priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks * in gve_alloc_notify_blocks()
392 sizeof(*priv->ntfy_blocks), GFP_KERNEL); in gve_alloc_notify_blocks()
393 if (!priv->ntfy_blocks) { in gve_alloc_notify_blocks()
399 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_alloc_notify_blocks()
400 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_alloc_notify_blocks()
404 i, pci_name(priv->pdev)); in gve_alloc_notify_blocks()
405 block->priv = priv; in gve_alloc_notify_blocks()
406 err = request_irq(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
407 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo, in gve_alloc_notify_blocks()
410 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
414 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
416 block->irq_db_index = &priv->irq_db_indices[i].index; in gve_alloc_notify_blocks()
421 struct gve_notify_block *block = &priv->ntfy_blocks[j]; in gve_alloc_notify_blocks()
424 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
426 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_alloc_notify_blocks()
428 kvfree(priv->ntfy_blocks); in gve_alloc_notify_blocks()
429 priv->ntfy_blocks = NULL; in gve_alloc_notify_blocks()
431 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_alloc_notify_blocks()
432 sizeof(*priv->irq_db_indices), in gve_alloc_notify_blocks()
433 priv->irq_db_indices, priv->irq_db_indices_bus); in gve_alloc_notify_blocks()
434 priv->irq_db_indices = NULL; in gve_alloc_notify_blocks()
436 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_alloc_notify_blocks()
438 pci_disable_msix(priv->pdev); in gve_alloc_notify_blocks()
440 kvfree(priv->msix_vectors); in gve_alloc_notify_blocks()
441 priv->msix_vectors = NULL; in gve_alloc_notify_blocks()
445 static void gve_free_notify_blocks(struct gve_priv *priv) in gve_free_notify_blocks() argument
449 if (!priv->msix_vectors) in gve_free_notify_blocks()
453 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_free_notify_blocks()
454 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_free_notify_blocks()
457 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_free_notify_blocks()
459 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_free_notify_blocks()
461 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_free_notify_blocks()
462 kvfree(priv->ntfy_blocks); in gve_free_notify_blocks()
463 priv->ntfy_blocks = NULL; in gve_free_notify_blocks()
464 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_free_notify_blocks()
465 sizeof(*priv->irq_db_indices), in gve_free_notify_blocks()
466 priv->irq_db_indices, priv->irq_db_indices_bus); in gve_free_notify_blocks()
467 priv->irq_db_indices = NULL; in gve_free_notify_blocks()
468 pci_disable_msix(priv->pdev); in gve_free_notify_blocks()
469 kvfree(priv->msix_vectors); in gve_free_notify_blocks()
470 priv->msix_vectors = NULL; in gve_free_notify_blocks()
473 static int gve_setup_device_resources(struct gve_priv *priv) in gve_setup_device_resources() argument
477 err = gve_alloc_counter_array(priv); in gve_setup_device_resources()
480 err = gve_alloc_notify_blocks(priv); in gve_setup_device_resources()
483 err = gve_alloc_stats_report(priv); in gve_setup_device_resources()
486 err = gve_adminq_configure_device_resources(priv, in gve_setup_device_resources()
487 priv->counter_array_bus, in gve_setup_device_resources()
488 priv->num_event_counters, in gve_setup_device_resources()
489 priv->irq_db_indices_bus, in gve_setup_device_resources()
490 priv->num_ntfy_blks); in gve_setup_device_resources()
492 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
498 if (priv->queue_format == GVE_DQO_RDA_FORMAT) { in gve_setup_device_resources()
499 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo), in gve_setup_device_resources()
501 if (!priv->ptype_lut_dqo) { in gve_setup_device_resources()
505 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo); in gve_setup_device_resources()
507 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
513 err = gve_adminq_report_stats(priv, priv->stats_report_len, in gve_setup_device_resources()
514 priv->stats_report_bus, in gve_setup_device_resources()
517 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
519 gve_set_device_resources_ok(priv); in gve_setup_device_resources()
523 kvfree(priv->ptype_lut_dqo); in gve_setup_device_resources()
524 priv->ptype_lut_dqo = NULL; in gve_setup_device_resources()
526 gve_free_stats_report(priv); in gve_setup_device_resources()
528 gve_free_notify_blocks(priv); in gve_setup_device_resources()
530 gve_free_counter_array(priv); in gve_setup_device_resources()
535 static void gve_trigger_reset(struct gve_priv *priv);
537 static void gve_teardown_device_resources(struct gve_priv *priv) in gve_teardown_device_resources() argument
542 if (gve_get_device_resources_ok(priv)) { in gve_teardown_device_resources()
544 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD); in gve_teardown_device_resources()
546 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
548 gve_trigger_reset(priv); in gve_teardown_device_resources()
550 err = gve_adminq_deconfigure_device_resources(priv); in gve_teardown_device_resources()
552 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
555 gve_trigger_reset(priv); in gve_teardown_device_resources()
559 kvfree(priv->ptype_lut_dqo); in gve_teardown_device_resources()
560 priv->ptype_lut_dqo = NULL; in gve_teardown_device_resources()
562 gve_free_counter_array(priv); in gve_teardown_device_resources()
563 gve_free_notify_blocks(priv); in gve_teardown_device_resources()
564 gve_free_stats_report(priv); in gve_teardown_device_resources()
565 gve_clear_device_resources_ok(priv); in gve_teardown_device_resources()
568 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx, in gve_add_napi() argument
571 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_add_napi()
573 netif_napi_add(priv->dev, &block->napi, gve_poll); in gve_add_napi()
576 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) in gve_remove_napi() argument
578 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_remove_napi()
583 static int gve_register_qpls(struct gve_priv *priv) in gve_register_qpls() argument
585 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_register_qpls()
590 err = gve_adminq_register_page_list(priv, &priv->qpls[i]); in gve_register_qpls()
592 netif_err(priv, drv, priv->dev, in gve_register_qpls()
594 priv->qpls[i].id); in gve_register_qpls()
604 static int gve_unregister_qpls(struct gve_priv *priv) in gve_unregister_qpls() argument
606 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_unregister_qpls()
611 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); in gve_unregister_qpls()
614 netif_err(priv, drv, priv->dev, in gve_unregister_qpls()
616 priv->qpls[i].id); in gve_unregister_qpls()
623 static int gve_create_rings(struct gve_priv *priv) in gve_create_rings() argument
628 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues); in gve_create_rings()
630 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", in gve_create_rings()
631 priv->tx_cfg.num_queues); in gve_create_rings()
637 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", in gve_create_rings()
638 priv->tx_cfg.num_queues); in gve_create_rings()
640 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); in gve_create_rings()
642 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n", in gve_create_rings()
643 priv->rx_cfg.num_queues); in gve_create_rings()
649 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n", in gve_create_rings()
650 priv->rx_cfg.num_queues); in gve_create_rings()
652 if (gve_is_gqi(priv)) { in gve_create_rings()
659 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_create_rings()
660 gve_rx_write_doorbell(priv, &priv->rx[i]); in gve_create_rings()
662 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_create_rings()
664 gve_rx_post_buffers_dqo(&priv->rx[i]); in gve_create_rings()
671 static void add_napi_init_sync_stats(struct gve_priv *priv, in add_napi_init_sync_stats() argument
678 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in add_napi_init_sync_stats()
679 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); in add_napi_init_sync_stats()
681 u64_stats_init(&priv->tx[i].statss); in add_napi_init_sync_stats()
682 priv->tx[i].ntfy_id = ntfy_idx; in add_napi_init_sync_stats()
683 gve_add_napi(priv, ntfy_idx, napi_poll); in add_napi_init_sync_stats()
686 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in add_napi_init_sync_stats()
687 int ntfy_idx = gve_rx_idx_to_ntfy(priv, i); in add_napi_init_sync_stats()
689 u64_stats_init(&priv->rx[i].statss); in add_napi_init_sync_stats()
690 priv->rx[i].ntfy_id = ntfy_idx; in add_napi_init_sync_stats()
691 gve_add_napi(priv, ntfy_idx, napi_poll); in add_napi_init_sync_stats()
695 static void gve_tx_free_rings(struct gve_priv *priv) in gve_tx_free_rings() argument
697 if (gve_is_gqi(priv)) { in gve_tx_free_rings()
698 gve_tx_free_rings_gqi(priv); in gve_tx_free_rings()
700 gve_tx_free_rings_dqo(priv); in gve_tx_free_rings()
704 static int gve_alloc_rings(struct gve_priv *priv) in gve_alloc_rings() argument
709 priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx), in gve_alloc_rings()
711 if (!priv->tx) in gve_alloc_rings()
714 if (gve_is_gqi(priv)) in gve_alloc_rings()
715 err = gve_tx_alloc_rings(priv); in gve_alloc_rings()
717 err = gve_tx_alloc_rings_dqo(priv); in gve_alloc_rings()
722 priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx), in gve_alloc_rings()
724 if (!priv->rx) { in gve_alloc_rings()
729 if (gve_is_gqi(priv)) in gve_alloc_rings()
730 err = gve_rx_alloc_rings(priv); in gve_alloc_rings()
732 err = gve_rx_alloc_rings_dqo(priv); in gve_alloc_rings()
736 if (gve_is_gqi(priv)) in gve_alloc_rings()
737 add_napi_init_sync_stats(priv, gve_napi_poll); in gve_alloc_rings()
739 add_napi_init_sync_stats(priv, gve_napi_poll_dqo); in gve_alloc_rings()
744 kvfree(priv->rx); in gve_alloc_rings()
745 priv->rx = NULL; in gve_alloc_rings()
747 gve_tx_free_rings(priv); in gve_alloc_rings()
749 kvfree(priv->tx); in gve_alloc_rings()
750 priv->tx = NULL; in gve_alloc_rings()
754 static int gve_destroy_rings(struct gve_priv *priv) in gve_destroy_rings() argument
758 err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues); in gve_destroy_rings()
760 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
765 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n"); in gve_destroy_rings()
766 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues); in gve_destroy_rings()
768 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
773 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n"); in gve_destroy_rings()
777 static void gve_rx_free_rings(struct gve_priv *priv) in gve_rx_free_rings() argument
779 if (gve_is_gqi(priv)) in gve_rx_free_rings()
780 gve_rx_free_rings_gqi(priv); in gve_rx_free_rings()
782 gve_rx_free_rings_dqo(priv); in gve_rx_free_rings()
785 static void gve_free_rings(struct gve_priv *priv) in gve_free_rings() argument
790 if (priv->tx) { in gve_free_rings()
791 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in gve_free_rings()
792 ntfy_idx = gve_tx_idx_to_ntfy(priv, i); in gve_free_rings()
793 gve_remove_napi(priv, ntfy_idx); in gve_free_rings()
795 gve_tx_free_rings(priv); in gve_free_rings()
796 kvfree(priv->tx); in gve_free_rings()
797 priv->tx = NULL; in gve_free_rings()
799 if (priv->rx) { in gve_free_rings()
800 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_free_rings()
801 ntfy_idx = gve_rx_idx_to_ntfy(priv, i); in gve_free_rings()
802 gve_remove_napi(priv, ntfy_idx); in gve_free_rings()
804 gve_rx_free_rings(priv); in gve_free_rings()
805 kvfree(priv->rx); in gve_free_rings()
806 priv->rx = NULL; in gve_free_rings()
810 int gve_alloc_page(struct gve_priv *priv, struct device *dev, in gve_alloc_page() argument
816 priv->page_alloc_fail++; in gve_alloc_page()
821 priv->dma_mapping_error++; in gve_alloc_page()
828 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, in gve_alloc_queue_page_list() argument
831 struct gve_queue_page_list *qpl = &priv->qpls[id]; in gve_alloc_queue_page_list()
835 if (pages + priv->num_registered_pages > priv->max_registered_pages) { in gve_alloc_queue_page_list()
836 netif_err(priv, drv, priv->dev, in gve_alloc_queue_page_list()
838 pages + priv->num_registered_pages, in gve_alloc_queue_page_list()
839 priv->max_registered_pages); in gve_alloc_queue_page_list()
855 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i], in gve_alloc_queue_page_list()
857 gve_qpl_dma_dir(priv, id), GFP_KERNEL); in gve_alloc_queue_page_list()
863 priv->num_registered_pages += pages; in gve_alloc_queue_page_list()
877 static void gve_free_queue_page_list(struct gve_priv *priv, u32 id) in gve_free_queue_page_list() argument
879 struct gve_queue_page_list *qpl = &priv->qpls[id]; in gve_free_queue_page_list()
888 gve_free_page(&priv->pdev->dev, qpl->pages[i], in gve_free_queue_page_list()
889 qpl->page_buses[i], gve_qpl_dma_dir(priv, id)); in gve_free_queue_page_list()
894 priv->num_registered_pages -= qpl->num_entries; in gve_free_queue_page_list()
897 static int gve_alloc_qpls(struct gve_priv *priv) in gve_alloc_qpls() argument
899 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_alloc_qpls()
906 priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL); in gve_alloc_qpls()
907 if (!priv->qpls) in gve_alloc_qpls()
910 for (i = 0; i < gve_num_tx_qpls(priv); i++) { in gve_alloc_qpls()
911 err = gve_alloc_queue_page_list(priv, i, in gve_alloc_qpls()
912 priv->tx_pages_per_qpl); in gve_alloc_qpls()
917 err = gve_alloc_queue_page_list(priv, i, in gve_alloc_qpls()
918 priv->rx_data_slot_cnt); in gve_alloc_qpls()
923 priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) * in gve_alloc_qpls()
925 priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls), in gve_alloc_qpls()
927 if (!priv->qpl_cfg.qpl_id_map) { in gve_alloc_qpls()
936 gve_free_queue_page_list(priv, j); in gve_alloc_qpls()
937 kvfree(priv->qpls); in gve_alloc_qpls()
941 static void gve_free_qpls(struct gve_priv *priv) in gve_free_qpls() argument
943 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_free_qpls()
949 kvfree(priv->qpl_cfg.qpl_id_map); in gve_free_qpls()
952 gve_free_queue_page_list(priv, i); in gve_free_qpls()
954 kvfree(priv->qpls); in gve_free_qpls()
961 void gve_schedule_reset(struct gve_priv *priv) in gve_schedule_reset() argument
963 gve_set_do_reset(priv); in gve_schedule_reset()
964 queue_work(priv->gve_wq, &priv->service_task); in gve_schedule_reset()
967 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
968 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
969 static void gve_turndown(struct gve_priv *priv);
970 static void gve_turnup(struct gve_priv *priv);
974 struct gve_priv *priv = netdev_priv(dev); in gve_open() local
977 err = gve_alloc_qpls(priv); in gve_open()
981 err = gve_alloc_rings(priv); in gve_open()
985 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); in gve_open()
988 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues); in gve_open()
992 err = gve_register_qpls(priv); in gve_open()
996 if (!gve_is_gqi(priv)) { in gve_open()
1000 priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; in gve_open()
1002 err = gve_create_rings(priv); in gve_open()
1006 gve_set_device_rings_ok(priv); in gve_open()
1008 if (gve_get_report_stats(priv)) in gve_open()
1009 mod_timer(&priv->stats_report_timer, in gve_open()
1011 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_open()
1013 gve_turnup(priv); in gve_open()
1014 queue_work(priv->gve_wq, &priv->service_task); in gve_open()
1015 priv->interface_up_cnt++; in gve_open()
1019 gve_free_rings(priv); in gve_open()
1021 gve_free_qpls(priv); in gve_open()
1028 if (gve_get_reset_in_progress(priv)) in gve_open()
1031 gve_reset_and_teardown(priv, true); in gve_open()
1033 gve_reset_recovery(priv, false); in gve_open()
1040 struct gve_priv *priv = netdev_priv(dev); in gve_close() local
1044 if (gve_get_device_rings_ok(priv)) { in gve_close()
1045 gve_turndown(priv); in gve_close()
1046 err = gve_destroy_rings(priv); in gve_close()
1049 err = gve_unregister_qpls(priv); in gve_close()
1052 gve_clear_device_rings_ok(priv); in gve_close()
1054 del_timer_sync(&priv->stats_report_timer); in gve_close()
1056 gve_free_rings(priv); in gve_close()
1057 gve_free_qpls(priv); in gve_close()
1058 priv->interface_down_cnt++; in gve_close()
1065 if (gve_get_reset_in_progress(priv)) in gve_close()
1068 gve_reset_and_teardown(priv, true); in gve_close()
1069 return gve_reset_recovery(priv, false); in gve_close()
1072 int gve_adjust_queues(struct gve_priv *priv, in gve_adjust_queues() argument
1078 if (netif_carrier_ok(priv->dev)) { in gve_adjust_queues()
1083 err = gve_close(priv->dev); in gve_adjust_queues()
1089 priv->tx_cfg = new_tx_config; in gve_adjust_queues()
1090 priv->rx_cfg = new_rx_config; in gve_adjust_queues()
1092 err = gve_open(priv->dev); in gve_adjust_queues()
1099 priv->tx_cfg = new_tx_config; in gve_adjust_queues()
1100 priv->rx_cfg = new_rx_config; in gve_adjust_queues()
1104 netif_err(priv, drv, priv->dev, in gve_adjust_queues()
1106 gve_turndown(priv); in gve_adjust_queues()
1110 static void gve_turndown(struct gve_priv *priv) in gve_turndown() argument
1114 if (netif_carrier_ok(priv->dev)) in gve_turndown()
1115 netif_carrier_off(priv->dev); in gve_turndown()
1117 if (!gve_get_napi_enabled(priv)) in gve_turndown()
1121 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_turndown()
1122 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); in gve_turndown()
1123 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1127 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turndown()
1128 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); in gve_turndown()
1129 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1135 netif_tx_disable(priv->dev); in gve_turndown()
1137 gve_clear_napi_enabled(priv); in gve_turndown()
1138 gve_clear_report_stats(priv); in gve_turndown()
1141 static void gve_turnup(struct gve_priv *priv) in gve_turnup() argument
1146 netif_tx_start_all_queues(priv->dev); in gve_turnup()
1149 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_turnup()
1150 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); in gve_turnup()
1151 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1154 if (gve_is_gqi(priv)) { in gve_turnup()
1155 iowrite32be(0, gve_irq_doorbell(priv, block)); in gve_turnup()
1157 gve_set_itr_coalesce_usecs_dqo(priv, block, in gve_turnup()
1158 priv->tx_coalesce_usecs); in gve_turnup()
1161 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turnup()
1162 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); in gve_turnup()
1163 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1166 if (gve_is_gqi(priv)) { in gve_turnup()
1167 iowrite32be(0, gve_irq_doorbell(priv, block)); in gve_turnup()
1169 gve_set_itr_coalesce_usecs_dqo(priv, block, in gve_turnup()
1170 priv->rx_coalesce_usecs); in gve_turnup()
1174 gve_set_napi_enabled(priv); in gve_turnup()
1181 struct gve_priv *priv; in gve_tx_timeout() local
1187 priv = netdev_priv(dev); in gve_tx_timeout()
1188 if (txqueue > priv->tx_cfg.num_queues) in gve_tx_timeout()
1191 ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); in gve_tx_timeout()
1192 if (ntfy_idx >= priv->num_ntfy_blks) in gve_tx_timeout()
1195 block = &priv->ntfy_blocks[ntfy_idx]; in gve_tx_timeout()
1205 last_nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_timeout()
1208 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); in gve_tx_timeout()
1215 gve_schedule_reset(priv); in gve_tx_timeout()
1220 priv->tx_timeo_cnt++; in gve_tx_timeout()
1227 struct gve_priv *priv = netdev_priv(netdev); in gve_set_features() local
1254 netif_err(priv, drv, netdev, in gve_set_features()
1268 static void gve_handle_status(struct gve_priv *priv, u32 status) in gve_handle_status() argument
1271 dev_info(&priv->pdev->dev, "Device requested reset.\n"); in gve_handle_status()
1272 gve_set_do_reset(priv); in gve_handle_status()
1275 priv->stats_report_trigger_cnt++; in gve_handle_status()
1276 gve_set_do_report_stats(priv); in gve_handle_status()
1280 static void gve_handle_reset(struct gve_priv *priv) in gve_handle_reset() argument
1286 if (gve_get_probe_in_progress(priv)) in gve_handle_reset()
1289 if (gve_get_do_reset(priv)) { in gve_handle_reset()
1291 gve_reset(priv, false); in gve_handle_reset()
1296 void gve_handle_report_stats(struct gve_priv *priv) in gve_handle_report_stats() argument
1298 struct stats *stats = priv->stats_report->stats; in gve_handle_report_stats()
1303 if (!gve_get_report_stats(priv)) in gve_handle_report_stats()
1306 be64_add_cpu(&priv->stats_report->written_count, 1); in gve_handle_report_stats()
1308 if (priv->tx) { in gve_handle_report_stats()
1309 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_handle_report_stats()
1314 if (gve_is_gqi(priv)) { in gve_handle_report_stats()
1315 last_completion = priv->tx[idx].done; in gve_handle_report_stats()
1316 tx_frames = priv->tx[idx].req; in gve_handle_report_stats()
1320 start = u64_stats_fetch_begin(&priv->tx[idx].statss); in gve_handle_report_stats()
1321 tx_bytes = priv->tx[idx].bytes_done; in gve_handle_report_stats()
1322 } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start)); in gve_handle_report_stats()
1325 .value = cpu_to_be64(priv->tx[idx].wake_queue), in gve_handle_report_stats()
1330 .value = cpu_to_be64(priv->tx[idx].stop_queue), in gve_handle_report_stats()
1350 .value = cpu_to_be64(priv->tx[idx].queue_timeout), in gve_handle_report_stats()
1356 if (priv->rx) { in gve_handle_report_stats()
1357 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_handle_report_stats()
1360 .value = cpu_to_be64(priv->rx[idx].desc.seqno), in gve_handle_report_stats()
1365 .value = cpu_to_be64(priv->rx[0].fill_cnt), in gve_handle_report_stats()
1372 static void gve_handle_link_status(struct gve_priv *priv, bool link_status) in gve_handle_link_status() argument
1374 if (!gve_get_napi_enabled(priv)) in gve_handle_link_status()
1377 if (link_status == netif_carrier_ok(priv->dev)) in gve_handle_link_status()
1381 netdev_info(priv->dev, "Device link is up.\n"); in gve_handle_link_status()
1382 netif_carrier_on(priv->dev); in gve_handle_link_status()
1384 netdev_info(priv->dev, "Device link is down.\n"); in gve_handle_link_status()
1385 netif_carrier_off(priv->dev); in gve_handle_link_status()
1392 struct gve_priv *priv = container_of(work, struct gve_priv, in gve_service_task() local
1394 u32 status = ioread32be(&priv->reg_bar0->device_status); in gve_service_task()
1396 gve_handle_status(priv, status); in gve_service_task()
1398 gve_handle_reset(priv); in gve_service_task()
1399 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); in gve_service_task()
1402 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) in gve_init_priv() argument
1408 err = gve_adminq_alloc(&priv->pdev->dev, priv); in gve_init_priv()
1410 dev_err(&priv->pdev->dev, in gve_init_priv()
1415 err = gve_verify_driver_compatibility(priv); in gve_init_priv()
1417 dev_err(&priv->pdev->dev, in gve_init_priv()
1425 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED; in gve_init_priv()
1427 err = gve_adminq_describe_device(priv); in gve_init_priv()
1429 dev_err(&priv->pdev->dev, in gve_init_priv()
1433 priv->dev->mtu = priv->dev->max_mtu; in gve_init_priv()
1434 num_ntfy = pci_msix_vec_count(priv->pdev); in gve_init_priv()
1436 dev_err(&priv->pdev->dev, in gve_init_priv()
1441 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n", in gve_init_priv()
1447 priv->num_registered_pages = 0; in gve_init_priv()
1448 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; in gve_init_priv()
1452 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1; in gve_init_priv()
1453 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_init_priv()
1455 priv->tx_cfg.max_queues = in gve_init_priv()
1456 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
1457 priv->rx_cfg.max_queues = in gve_init_priv()
1458 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
1460 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_init_priv()
1461 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_init_priv()
1462 if (priv->default_num_queues > 0) { in gve_init_priv()
1463 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
1464 priv->tx_cfg.num_queues); in gve_init_priv()
1465 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
1466 priv->rx_cfg.num_queues); in gve_init_priv()
1469 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n", in gve_init_priv()
1470 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues); in gve_init_priv()
1471 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n", in gve_init_priv()
1472 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues); in gve_init_priv()
1474 if (!gve_is_gqi(priv)) { in gve_init_priv()
1475 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO; in gve_init_priv()
1476 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO; in gve_init_priv()
1480 err = gve_setup_device_resources(priv); in gve_init_priv()
1484 gve_adminq_free(&priv->pdev->dev, priv); in gve_init_priv()
1488 static void gve_teardown_priv_resources(struct gve_priv *priv) in gve_teardown_priv_resources() argument
1490 gve_teardown_device_resources(priv); in gve_teardown_priv_resources()
1491 gve_adminq_free(&priv->pdev->dev, priv); in gve_teardown_priv_resources()
1494 static void gve_trigger_reset(struct gve_priv *priv) in gve_trigger_reset() argument
1497 gve_adminq_release(priv); in gve_trigger_reset()
1500 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up) in gve_reset_and_teardown() argument
1502 gve_trigger_reset(priv); in gve_reset_and_teardown()
1505 gve_close(priv->dev); in gve_reset_and_teardown()
1506 gve_teardown_priv_resources(priv); in gve_reset_and_teardown()
1509 static int gve_reset_recovery(struct gve_priv *priv, bool was_up) in gve_reset_recovery() argument
1513 err = gve_init_priv(priv, true); in gve_reset_recovery()
1517 err = gve_open(priv->dev); in gve_reset_recovery()
1523 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n"); in gve_reset_recovery()
1524 gve_turndown(priv); in gve_reset_recovery()
1528 int gve_reset(struct gve_priv *priv, bool attempt_teardown) in gve_reset() argument
1530 bool was_up = netif_carrier_ok(priv->dev); in gve_reset()
1533 dev_info(&priv->pdev->dev, "Performing reset\n"); in gve_reset()
1534 gve_clear_do_reset(priv); in gve_reset()
1535 gve_set_reset_in_progress(priv); in gve_reset()
1540 gve_turndown(priv); in gve_reset()
1541 gve_reset_and_teardown(priv, was_up); in gve_reset()
1545 err = gve_close(priv->dev); in gve_reset()
1548 gve_reset_and_teardown(priv, was_up); in gve_reset()
1551 gve_teardown_priv_resources(priv); in gve_reset()
1555 err = gve_reset_recovery(priv, was_up); in gve_reset()
1556 gve_clear_reset_in_progress(priv); in gve_reset()
1557 priv->reset_cnt++; in gve_reset()
1558 priv->interface_up_cnt = 0; in gve_reset()
1559 priv->interface_down_cnt = 0; in gve_reset()
1560 priv->stats_report_trigger_cnt = 0; in gve_reset()
1587 struct gve_priv *priv; in gve_probe() local
1625 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); in gve_probe()
1654 priv = netdev_priv(dev); in gve_probe()
1655 priv->dev = dev; in gve_probe()
1656 priv->pdev = pdev; in gve_probe()
1657 priv->msg_enable = DEFAULT_MSG_LEVEL; in gve_probe()
1658 priv->reg_bar0 = reg_bar; in gve_probe()
1659 priv->db_bar2 = db_bar; in gve_probe()
1660 priv->service_task_flags = 0x0; in gve_probe()
1661 priv->state_flags = 0x0; in gve_probe()
1662 priv->ethtool_flags = 0x0; in gve_probe()
1664 gve_set_probe_in_progress(priv); in gve_probe()
1665 priv->gve_wq = alloc_ordered_workqueue("gve", 0); in gve_probe()
1666 if (!priv->gve_wq) { in gve_probe()
1671 INIT_WORK(&priv->service_task, gve_service_task); in gve_probe()
1672 INIT_WORK(&priv->stats_report_task, gve_stats_report_task); in gve_probe()
1673 priv->tx_cfg.max_queues = max_tx_queues; in gve_probe()
1674 priv->rx_cfg.max_queues = max_rx_queues; in gve_probe()
1676 err = gve_init_priv(priv, false); in gve_probe()
1685 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format); in gve_probe()
1686 gve_clear_probe_in_progress(priv); in gve_probe()
1687 queue_work(priv->gve_wq, &priv->service_task); in gve_probe()
1691 gve_teardown_priv_resources(priv); in gve_probe()
1694 destroy_workqueue(priv->gve_wq); in gve_probe()
1716 struct gve_priv *priv = netdev_priv(netdev); in gve_remove() local
1717 __be32 __iomem *db_bar = priv->db_bar2; in gve_remove()
1718 void __iomem *reg_bar = priv->reg_bar0; in gve_remove()
1721 gve_teardown_priv_resources(priv); in gve_remove()
1722 destroy_workqueue(priv->gve_wq); in gve_remove()
1733 struct gve_priv *priv = netdev_priv(netdev); in gve_shutdown() local
1734 bool was_up = netif_carrier_ok(priv->dev); in gve_shutdown()
1737 if (was_up && gve_close(priv->dev)) { in gve_shutdown()
1739 gve_reset_and_teardown(priv, was_up); in gve_shutdown()
1742 gve_teardown_priv_resources(priv); in gve_shutdown()
1751 struct gve_priv *priv = netdev_priv(netdev); in gve_suspend() local
1752 bool was_up = netif_carrier_ok(priv->dev); in gve_suspend()
1754 priv->suspend_cnt++; in gve_suspend()
1756 if (was_up && gve_close(priv->dev)) { in gve_suspend()
1758 gve_reset_and_teardown(priv, was_up); in gve_suspend()
1761 gve_teardown_priv_resources(priv); in gve_suspend()
1763 priv->up_before_suspend = was_up; in gve_suspend()
1771 struct gve_priv *priv = netdev_priv(netdev); in gve_resume() local
1774 priv->resume_cnt++; in gve_resume()
1776 err = gve_reset_recovery(priv, priv->up_before_suspend); in gve_resume()