Lines Matching refs:pf
258 return vf->pf->vsi[vf->lan_vsi_idx]; in ice_get_vf_vsi()
266 static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id) in ice_validate_vf_id() argument
269 if (vf_id >= pf->num_alloc_vfs) { in ice_validate_vf_id()
270 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id); in ice_validate_vf_id()
281 static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) in ice_check_vf_init() argument
284 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", in ice_check_vf_init()
331 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, in ice_vc_vf_broadcast() argument
334 struct ice_hw *hw = &pf->hw; in ice_vc_vf_broadcast()
337 ice_for_each_vf(pf, i) { in ice_vc_vf_broadcast()
338 struct ice_vf *vf = &pf->vf[i]; in ice_vc_vf_broadcast()
397 struct ice_pf *pf = vf->pf; in ice_is_vf_link_up() local
399 if (ice_check_vf_init(pf, vf)) in ice_is_vf_link_up()
407 return pf->hw.port_info->phy.link_info.link_info & in ice_is_vf_link_up()
420 struct ice_hw *hw = &vf->pf->hw; in ice_vc_notify_vf_link_state()
471 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); in ice_vf_ctrl_vsi_release()
481 struct ice_pf *pf = vf->pf; in ice_free_vf_res() local
499 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1; in ice_free_vf_res()
507 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M); in ice_free_vf_res()
508 ice_flush(&pf->hw); in ice_free_vf_res()
521 struct ice_pf *pf = vf->pf; in ice_dis_vf_mappings() local
527 hw = &pf->hw; in ice_dis_vf_mappings()
530 dev = ice_pf_to_dev(pf); in ice_dis_vf_mappings()
535 last = first + pf->num_msix_per_vf - 1; in ice_dis_vf_mappings()
566 static int ice_sriov_free_msix_res(struct ice_pf *pf) in ice_sriov_free_msix_res() argument
570 if (!pf) in ice_sriov_free_msix_res()
573 res = pf->irq_tracker; in ice_sriov_free_msix_res()
578 WARN_ON(pf->sriov_base_vector < res->num_entries); in ice_sriov_free_msix_res()
580 pf->sriov_base_vector = 0; in ice_sriov_free_msix_res()
614 void ice_free_vfs(struct ice_pf *pf) in ice_free_vfs() argument
616 struct device *dev = ice_pf_to_dev(pf); in ice_free_vfs()
617 struct ice_hw *hw = &pf->hw; in ice_free_vfs()
620 set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); in ice_free_vfs()
622 if (!pf->vf) in ice_free_vfs()
625 ice_eswitch_release(pf); in ice_free_vfs()
627 while (test_and_set_bit(ICE_VF_DIS, pf->state)) in ice_free_vfs()
634 if (!pci_vfs_assigned(pf->pdev)) in ice_free_vfs()
635 pci_disable_sriov(pf->pdev); in ice_free_vfs()
640 ice_for_each_vf(pf, i) in ice_free_vfs()
641 ice_dis_vf_qs(&pf->vf[i]); in ice_free_vfs()
643 tmp = pf->num_alloc_vfs; in ice_free_vfs()
644 pf->num_qps_per_vf = 0; in ice_free_vfs()
645 pf->num_alloc_vfs = 0; in ice_free_vfs()
647 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { in ice_free_vfs()
649 ice_dis_vf_mappings(&pf->vf[i]); in ice_free_vfs()
650 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); in ice_free_vfs()
651 ice_free_vf_res(&pf->vf[i]); in ice_free_vfs()
654 mutex_destroy(&pf->vf[i].cfg_lock); in ice_free_vfs()
657 if (ice_sriov_free_msix_res(pf)) in ice_free_vfs()
660 devm_kfree(dev, pf->vf); in ice_free_vfs()
661 pf->vf = NULL; in ice_free_vfs()
667 if (!pci_vfs_assigned(pf->pdev)) { in ice_free_vfs()
684 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, in ice_free_vfs()
689 clear_bit(ICE_VF_DIS, pf->state); in ice_free_vfs()
690 clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state); in ice_free_vfs()
691 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); in ice_free_vfs()
706 struct ice_pf *pf = vf->pf; in ice_trigger_vf_reset() local
712 dev = ice_pf_to_dev(pf); in ice_trigger_vf_reset()
713 hw = &pf->hw; in ice_trigger_vf_reset()
820 return vf->pf->hw.port_info; in ice_vf_get_port_info()
833 struct ice_pf *pf = vf->pf; in ice_vf_vsi_setup() local
836 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL); in ice_vf_vsi_setup()
839 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n"); in ice_vf_vsi_setup()
860 struct ice_pf *pf = vf->pf; in ice_vf_ctrl_vsi_setup() local
863 vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL); in ice_vf_ctrl_vsi_setup()
865 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n"); in ice_vf_ctrl_vsi_setup()
884 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf) in ice_calc_vf_first_vector_idx() argument
886 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf; in ice_calc_vf_first_vector_idx()
898 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_tx_rate_cfg()
932 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_vlan_cfg()
969 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_mac_cfg()
974 if (ice_is_eswitch_mode_switchdev(vf->pf)) in ice_vf_rebuild_host_mac_cfg()
1028 struct ice_pf *pf = vf->pf; in ice_ena_vf_msix_mappings() local
1033 hw = &pf->hw; in ice_ena_vf_msix_mappings()
1035 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1; in ice_ena_vf_msix_mappings()
1038 pf->hw.func_caps.common_cap.msix_vector_first_id; in ice_ena_vf_msix_mappings()
1040 (device_based_first_msix + pf->num_msix_per_vf) - 1; in ice_ena_vf_msix_mappings()
1076 struct device *dev = ice_pf_to_dev(vf->pf); in ice_ena_vf_q_mappings()
1078 struct ice_hw *hw = &vf->pf->hw; in ice_ena_vf_q_mappings()
1141 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res) in ice_determine_res() argument
1157 num_all_res = pf->num_alloc_vfs * res; in ice_determine_res()
1176 struct ice_pf *pf; in ice_calc_vf_reg_idx() local
1181 pf = vf->pf; in ice_calc_vf_reg_idx()
1184 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id + in ice_calc_vf_reg_idx()
1226 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) in ice_sriov_set_msix_res() argument
1228 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; in ice_sriov_set_msix_res()
1229 int vectors_used = pf->irq_tracker->num_entries; in ice_sriov_set_msix_res()
1240 pf->sriov_base_vector = sriov_base_vector; in ice_sriov_set_msix_res()
1266 static int ice_set_per_vf_res(struct ice_pf *pf) in ice_set_per_vf_res() argument
1268 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); in ice_set_per_vf_res()
1270 struct device *dev = ice_pf_to_dev(pf); in ice_set_per_vf_res()
1273 if (!pf->num_alloc_vfs || max_valid_res_idx < 0) in ice_set_per_vf_res()
1277 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - in ice_set_per_vf_res()
1278 pf->irq_tracker->num_entries; in ice_set_per_vf_res()
1279 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs; in ice_set_per_vf_res()
1291 pf->num_alloc_vfs); in ice_set_per_vf_res()
1296 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf), in ice_set_per_vf_res()
1302 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf), in ice_set_per_vf_res()
1310 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs); in ice_set_per_vf_res()
1314 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) { in ice_set_per_vf_res()
1316 pf->num_alloc_vfs); in ice_set_per_vf_res()
1321 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq); in ice_set_per_vf_res()
1322 pf->num_msix_per_vf = num_msix_per_vf; in ice_set_per_vf_res()
1324 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf); in ice_set_per_vf_res()
1335 struct ice_hw *hw = &vf->pf->hw; in ice_clear_vf_reset_trigger()
1358 struct ice_pf *pf = vf->pf; in ice_vf_set_vsi_promisc() local
1362 hw = &pf->hw; in ice_vf_set_vsi_promisc()
1417 struct ice_pf *pf = vsi->back; in ice_vf_rebuild_aggregator_node_cfg() local
1424 dev = ice_pf_to_dev(pf); in ice_vf_rebuild_aggregator_node_cfg()
1432 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id, in ice_vf_rebuild_aggregator_node_cfg()
1447 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vf_rebuild_host_cfg()
1494 struct ice_pf *pf = vf->pf; in ice_vf_rebuild_vsi() local
1497 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", in ice_vf_rebuild_vsi()
1504 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vf_rebuild_vsi()
1532 struct ice_pf *pf = vf->pf; in ice_vf_post_vsi_rebuild() local
1535 hw = &pf->hw; in ice_vf_post_vsi_rebuild()
1556 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) in ice_reset_all_vfs() argument
1558 struct device *dev = ice_pf_to_dev(pf); in ice_reset_all_vfs()
1559 struct ice_hw *hw = &pf->hw; in ice_reset_all_vfs()
1564 if (!pf->num_alloc_vfs) in ice_reset_all_vfs()
1568 ice_for_each_vf(pf, i) in ice_reset_all_vfs()
1569 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i)) in ice_reset_all_vfs()
1573 if (test_and_set_bit(ICE_VF_DIS, pf->state)) in ice_reset_all_vfs()
1577 ice_for_each_vf(pf, v) in ice_reset_all_vfs()
1578 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true); in ice_reset_all_vfs()
1586 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { in ice_reset_all_vfs()
1588 while (v < pf->num_alloc_vfs) { in ice_reset_all_vfs()
1591 vf = &pf->vf[v]; in ice_reset_all_vfs()
1609 if (v < pf->num_alloc_vfs) in ice_reset_all_vfs()
1613 ice_for_each_vf(pf, v) { in ice_reset_all_vfs()
1614 vf = &pf->vf[v]; in ice_reset_all_vfs()
1632 if (ice_is_eswitch_mode_switchdev(pf)) in ice_reset_all_vfs()
1633 if (ice_eswitch_rebuild(pf)) in ice_reset_all_vfs()
1637 clear_bit(ICE_VF_DIS, pf->state); in ice_reset_all_vfs()
1650 struct ice_pf *pf = vf->pf; in ice_is_vf_disabled() local
1657 return (test_bit(ICE_VF_DIS, pf->state) || in ice_is_vf_disabled()
1671 struct ice_pf *pf = vf->pf; in ice_reset_vf() local
1680 dev = ice_pf_to_dev(pf); in ice_reset_vf()
1682 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { in ice_reset_vf()
1708 hw = &pf->hw; in ice_reset_vf()
1770 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id)) in ice_reset_vf()
1780 void ice_vc_notify_link_state(struct ice_pf *pf) in ice_vc_notify_link_state() argument
1784 ice_for_each_vf(pf, i) in ice_vc_notify_link_state()
1785 ice_vc_notify_vf_link_state(&pf->vf[i]); in ice_vc_notify_link_state()
1794 void ice_vc_notify_reset(struct ice_pf *pf) in ice_vc_notify_reset() argument
1798 if (!pf->num_alloc_vfs) in ice_vc_notify_reset()
1803 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, in ice_vc_notify_reset()
1814 struct ice_pf *pf; in ice_vc_notify_vf_reset() local
1819 pf = vf->pf; in ice_vc_notify_vf_reset()
1820 if (ice_validate_vf_id(pf, vf->vf_id)) in ice_vc_notify_vf_reset()
1833 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, in ice_vc_notify_vf_reset()
1847 struct ice_pf *pf = vf->pf; in ice_init_vf_vsi_res() local
1854 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); in ice_init_vf_vsi_res()
1856 dev = ice_pf_to_dev(pf); in ice_init_vf_vsi_res()
1890 static int ice_start_vfs(struct ice_pf *pf) in ice_start_vfs() argument
1892 struct ice_hw *hw = &pf->hw; in ice_start_vfs()
1895 ice_for_each_vf(pf, i) { in ice_start_vfs()
1896 struct ice_vf *vf = &pf->vf[i]; in ice_start_vfs()
1902 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n", in ice_start_vfs()
1917 struct ice_vf *vf = &pf->vf[i]; in ice_start_vfs()
1930 static void ice_set_dflt_settings_vfs(struct ice_pf *pf) in ice_set_dflt_settings_vfs() argument
1934 ice_for_each_vf(pf, i) { in ice_set_dflt_settings_vfs()
1935 struct ice_vf *vf = &pf->vf[i]; in ice_set_dflt_settings_vfs()
1937 vf->pf = pf; in ice_set_dflt_settings_vfs()
1939 vf->vf_sw_id = pf->first_sw; in ice_set_dflt_settings_vfs()
1943 vf->num_vf_qs = pf->num_qps_per_vf; in ice_set_dflt_settings_vfs()
1963 static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs) in ice_alloc_vfs() argument
1967 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs), in ice_alloc_vfs()
1972 pf->vf = vfs; in ice_alloc_vfs()
1973 pf->num_alloc_vfs = num_vfs; in ice_alloc_vfs()
1983 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) in ice_ena_vfs() argument
1985 struct device *dev = ice_pf_to_dev(pf); in ice_ena_vfs()
1986 struct ice_hw *hw = &pf->hw; in ice_ena_vfs()
1990 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), in ice_ena_vfs()
1992 set_bit(ICE_OICR_INTR_DIS, pf->state); in ice_ena_vfs()
1995 ret = pci_enable_sriov(pf->pdev, num_vfs); in ice_ena_vfs()
1997 pf->num_alloc_vfs = 0; in ice_ena_vfs()
2001 ret = ice_alloc_vfs(pf, num_vfs); in ice_ena_vfs()
2005 if (ice_set_per_vf_res(pf)) { in ice_ena_vfs()
2012 ice_set_dflt_settings_vfs(pf); in ice_ena_vfs()
2014 if (ice_start_vfs(pf)) { in ice_ena_vfs()
2020 clear_bit(ICE_VF_DIS, pf->state); in ice_ena_vfs()
2022 ret = ice_eswitch_configure(pf); in ice_ena_vfs()
2027 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state)) in ice_ena_vfs()
2033 devm_kfree(dev, pf->vf); in ice_ena_vfs()
2034 pf->vf = NULL; in ice_ena_vfs()
2035 pf->num_alloc_vfs = 0; in ice_ena_vfs()
2037 pci_disable_sriov(pf->pdev); in ice_ena_vfs()
2041 clear_bit(ICE_OICR_INTR_DIS, pf->state); in ice_ena_vfs()
2052 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) in ice_pci_sriov_ena() argument
2054 int pre_existing_vfs = pci_num_vf(pf->pdev); in ice_pci_sriov_ena()
2055 struct device *dev = ice_pf_to_dev(pf); in ice_pci_sriov_ena()
2059 ice_free_vfs(pf); in ice_pci_sriov_ena()
2063 if (num_vfs > pf->num_vfs_supported) { in ice_pci_sriov_ena()
2065 num_vfs, pf->num_vfs_supported); in ice_pci_sriov_ena()
2070 err = ice_ena_vfs(pf, num_vfs); in ice_pci_sriov_ena()
2076 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); in ice_pci_sriov_ena()
2084 static int ice_check_sriov_allowed(struct ice_pf *pf) in ice_check_sriov_allowed() argument
2086 struct device *dev = ice_pf_to_dev(pf); in ice_check_sriov_allowed()
2088 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { in ice_check_sriov_allowed()
2093 if (ice_is_safe_mode(pf)) { in ice_check_sriov_allowed()
2098 if (!ice_pf_state_is_nominal(pf)) { in ice_check_sriov_allowed()
2117 struct ice_pf *pf = pci_get_drvdata(pdev); in ice_sriov_configure() local
2118 struct device *dev = ice_pf_to_dev(pf); in ice_sriov_configure()
2122 err = ice_check_sriov_allowed(pf); in ice_sriov_configure()
2128 ice_mbx_deinit_snapshot(&pf->hw); in ice_sriov_configure()
2129 ice_free_vfs(pf); in ice_sriov_configure()
2130 if (pf->lag) in ice_sriov_configure()
2131 ice_enable_lag(pf->lag); in ice_sriov_configure()
2139 status = ice_mbx_init_snapshot(&pf->hw, num_vfs); in ice_sriov_configure()
2143 err = ice_pci_sriov_ena(pf, num_vfs); in ice_sriov_configure()
2145 ice_mbx_deinit_snapshot(&pf->hw); in ice_sriov_configure()
2149 if (pf->lag) in ice_sriov_configure()
2150 ice_disable_lag(pf->lag); in ice_sriov_configure()
2161 void ice_process_vflr_event(struct ice_pf *pf) in ice_process_vflr_event() argument
2163 struct ice_hw *hw = &pf->hw; in ice_process_vflr_event()
2167 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_process_vflr_event()
2168 !pf->num_alloc_vfs) in ice_process_vflr_event()
2171 ice_for_each_vf(pf, vf_id) { in ice_process_vflr_event()
2172 struct ice_vf *vf = &pf->vf[vf_id]; in ice_process_vflr_event()
2203 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) in ice_get_vf_from_pfq() argument
2207 ice_for_each_vf(pf, vf_id) { in ice_get_vf_from_pfq()
2208 struct ice_vf *vf = &pf->vf[vf_id]; in ice_get_vf_from_pfq()
2227 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq) in ice_globalq_to_pfq() argument
2229 return globalq - pf->hw.func_caps.common_cap.rxq_first_id; in ice_globalq_to_pfq()
2242 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_vf_lan_overflow_event() argument
2248 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq); in ice_vf_lan_overflow_event()
2254 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue)); in ice_vf_lan_overflow_event()
2277 struct ice_pf *pf; in ice_vc_send_msg_to_vf() local
2282 pf = vf->pf; in ice_vc_send_msg_to_vf()
2283 if (ice_validate_vf_id(pf, vf->vf_id)) in ice_vc_send_msg_to_vf()
2286 dev = ice_pf_to_dev(pf); in ice_vc_send_msg_to_vf()
2306 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, in ice_vc_send_msg_to_vf()
2308 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { in ice_vc_send_msg_to_vf()
2311 ice_aq_str(pf->hw.mailboxq.sq_last_status)); in ice_vc_send_msg_to_vf()
2374 struct ice_pf *pf = vf->pf; in ice_vc_get_vf_res_msg() local
2379 if (ice_check_vf_init(pf, vf)) { in ice_vc_get_vf_res_msg()
2451 vfres->max_vectors = pf->num_msix_per_vf; in ice_vc_get_vf_res_msg()
2500 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) in ice_find_vsi_from_id() argument
2504 ice_for_each_vsi(pf, i) in ice_find_vsi_from_id()
2505 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) in ice_find_vsi_from_id()
2506 return pf->vsi[i]; in ice_find_vsi_from_id()
2520 struct ice_pf *pf = vf->pf; in ice_vc_isvalid_vsi_id() local
2523 vsi = ice_find_vsi_from_id(pf, vsi_id); in ice_vc_isvalid_vsi_id()
2538 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); in ice_vc_isvalid_q_id()
2656 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vc_handle_rss_cfg()
2657 struct ice_hw *hw = &vf->pf->hw; in ice_vc_handle_rss_cfg()
2660 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { in ice_vc_handle_rss_cfg()
2803 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { in ice_vc_config_rss_key()
2849 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { in ice_vc_config_rss_lut()
2895 struct ice_pf *pf; in ice_check_vf_ready_for_cfg() local
2902 pf = vf->pf; in ice_check_vf_ready_for_cfg()
2903 if (ice_check_vf_init(pf, vf)) in ice_check_vf_ready_for_cfg()
2920 struct ice_pf *pf = np->vsi->back; in ice_set_vf_spoofchk() local
2928 dev = ice_pf_to_dev(pf); in ice_set_vf_spoofchk()
2929 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_spoofchk()
2932 vf = &pf->vf[vf_id]; in ice_set_vf_spoofchk()
2973 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); in ice_set_vf_spoofchk()
2998 bool ice_is_any_vf_in_promisc(struct ice_pf *pf) in ice_is_any_vf_in_promisc() argument
3002 ice_for_each_vf(pf, vf_idx) { in ice_is_any_vf_in_promisc()
3003 struct ice_vf *vf = &pf->vf[vf_idx]; in ice_is_any_vf_in_promisc()
3028 struct ice_pf *pf = vf->pf; in ice_vc_cfg_promiscuous_mode_msg() local
3049 dev = ice_pf_to_dev(pf); in ice_vc_cfg_promiscuous_mode_msg()
3077 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) { in ice_vc_cfg_promiscuous_mode_msg()
3080 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw)) in ice_vc_cfg_promiscuous_mode_msg()
3084 ret = ice_set_dflt_vsi(pf->first_sw, vsi); in ice_vc_cfg_promiscuous_mode_msg()
3086 ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) in ice_vc_cfg_promiscuous_mode_msg()
3090 ret = ice_clear_dflt_vsi(pf->first_sw); in ice_vc_cfg_promiscuous_mode_msg()
3521 struct ice_pf *pf = vf->pf; in ice_vc_cfg_irq_map_msg() local
3533 pf->num_msix_per_vf < num_q_vectors_mapped || in ice_vc_cfg_irq_map_msg()
3555 if (!(vector_id < pf->num_msix_per_vf) || in ice_vc_cfg_irq_map_msg()
3601 struct ice_pf *pf = vf->pf; in ice_vc_cfg_qs_msg() local
3623 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", in ice_vc_cfg_qs_msg()
3814 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vc_add_mac_addr()
3914 struct device *dev = ice_pf_to_dev(vf->pf); in ice_vc_del_mac_addr()
3957 struct ice_pf *pf = vf->pf; in ice_vc_handle_mac_addr_msg() local
3982 …dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the… in ice_vc_handle_mac_addr_msg()
4056 struct ice_pf *pf = vf->pf; in ice_vc_request_qs_msg() local
4062 dev = ice_pf_to_dev(pf); in ice_vc_request_qs_msg()
4069 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf), in ice_vc_request_qs_msg()
4070 ice_get_avail_rxq_count(pf)); in ice_vc_request_qs_msg()
4114 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_port_vlan() local
4120 dev = ice_pf_to_dev(pf); in ice_set_vf_port_vlan()
4121 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_port_vlan()
4135 vf = &pf->vf[vf_id]; in ice_set_vf_port_vlan()
4188 struct ice_pf *pf = vf->pf; in ice_vc_process_vlan_msg() local
4197 dev = ice_pf_to_dev(pf); in ice_vc_process_vlan_msg()
4222 hw = &pf->hw; in ice_vc_process_vlan_msg()
4246 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) in ice_vc_process_vlan_msg()
4517 struct ice_pf *pf; in ice_vc_repr_add_mac() local
4526 pf = vf->pf; in ice_vc_repr_add_mac()
4542 dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n"); in ice_vc_repr_add_mac()
4579 dev_dbg(ice_pf_to_dev(vf->pf), in ice_vc_repr_add_vlan()
4587 dev_dbg(ice_pf_to_dev(vf->pf), in ice_vc_repr_del_vlan()
4595 dev_dbg(ice_pf_to_dev(vf->pf), in ice_vc_repr_ena_vlan_stripping()
4605 dev_dbg(ice_pf_to_dev(vf->pf), in ice_vc_repr_dis_vlan_stripping()
4616 dev_dbg(ice_pf_to_dev(vf->pf), in ice_vc_repr_cfg_promiscuous_mode()
4643 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) in ice_vc_process_vf_msg() argument
4655 if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state)) in ice_vc_process_vf_msg()
4658 dev = ice_pf_to_dev(pf); in ice_vc_process_vf_msg()
4659 if (ice_validate_vf_id(pf, vf_id)) { in ice_vc_process_vf_msg()
4664 vf = &pf->vf[vf_id]; in ice_vc_process_vf_msg()
4811 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_vf_cfg() local
4814 if (ice_validate_vf_id(pf, vf_id)) in ice_get_vf_cfg()
4817 vf = &pf->vf[vf_id]; in ice_get_vf_cfg()
4819 if (ice_check_vf_init(pf, vf)) in ice_get_vf_cfg()
4849 static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac) in ice_unicast_mac_exists() argument
4852 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC]; in ice_unicast_mac_exists()
4885 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_mac() local
4889 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_mac()
4897 vf = &pf->vf[vf_id]; in ice_set_vf_mac()
4907 if (ice_unicast_mac_exists(pf, mac)) { in ice_set_vf_mac()
4947 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_trust() local
4951 if (ice_is_eswitch_mode_switchdev(pf)) { in ice_set_vf_trust()
4952 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); in ice_set_vf_trust()
4956 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_trust()
4959 vf = &pf->vf[vf_id]; in ice_set_vf_trust()
4972 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", in ice_set_vf_trust()
4990 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_link_state() local
4994 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_link_state()
4997 vf = &pf->vf[vf_id]; in ice_set_vf_link_state()
5027 static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) in ice_calc_all_vfs_min_tx_rate() argument
5031 ice_for_each_vf(pf, i) in ice_calc_all_vfs_min_tx_rate()
5032 rate += pf->vf[i].min_tx_rate; in ice_calc_all_vfs_min_tx_rate()
5053 int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); in ice_min_tx_rate_oversubscribed()
5059 …dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d… in ice_min_tx_rate_oversubscribed()
5080 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_set_vf_bw() local
5086 dev = ice_pf_to_dev(pf); in ice_set_vf_bw()
5087 if (ice_validate_vf_id(pf, vf_id)) in ice_set_vf_bw()
5090 vf = &pf->vf[vf_id]; in ice_set_vf_bw()
5106 if (min_tx_rate && ice_is_dcb_active(pf)) { in ice_set_vf_bw()
5148 struct ice_pf *pf = ice_netdev_to_pf(netdev); in ice_get_vf_stats() local
5154 if (ice_validate_vf_id(pf, vf_id)) in ice_get_vf_stats()
5157 vf = &pf->vf[vf_id]; in ice_get_vf_stats()
5191 struct ice_pf *pf = vf->pf; in ice_print_vf_rx_mdd_event() local
5194 dev = ice_pf_to_dev(pf); in ice_print_vf_rx_mdd_event()
5197 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id, in ice_print_vf_rx_mdd_event()
5199 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) in ice_print_vf_rx_mdd_event()
5209 void ice_print_vfs_mdd_events(struct ice_pf *pf) in ice_print_vfs_mdd_events() argument
5211 struct device *dev = ice_pf_to_dev(pf); in ice_print_vfs_mdd_events()
5212 struct ice_hw *hw = &pf->hw; in ice_print_vfs_mdd_events()
5216 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state)) in ice_print_vfs_mdd_events()
5220 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1)) in ice_print_vfs_mdd_events()
5223 pf->last_printed_mdd_jiffies = jiffies; in ice_print_vfs_mdd_events()
5225 ice_for_each_vf(pf, i) { in ice_print_vfs_mdd_events()
5226 struct ice_vf *vf = &pf->vf[i]; in ice_print_vfs_mdd_events()
5286 ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event, in ice_is_malicious_vf() argument
5290 struct device *dev = ice_pf_to_dev(pf); in ice_is_malicious_vf()
5296 if (ice_validate_vf_id(pf, vf_id)) in ice_is_malicious_vf()
5299 vf = &pf->vf[vf_id]; in ice_is_malicious_vf()
5306 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries; in ice_is_malicious_vf()
5311 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf); in ice_is_malicious_vf()
5321 status = ice_mbx_report_malvf(&pf->hw, pf->malvfs, in ice_is_malicious_vf()
5328 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf); in ice_is_malicious_vf()