Lines Matching refs:back
55 ice_flush(&vsi->back->hw); in ice_vsi_ctrl_all_rx_rings()
75 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_arrays()
154 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", in ice_vsi_set_num_desc()
179 struct ice_pf *pf = vsi->back; in ice_vsi_set_num_qs()
286 struct ice_pf *pf = vsi->back; in ice_vsi_delete_from_hw()
315 struct ice_pf *pf = vsi->back; in ice_vsi_free_arrays()
340 struct ice_pf *pf = vsi->back; in ice_vsi_free_stats()
381 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_ring_stats()
445 if (!vsi || !vsi->back) in ice_vsi_free()
448 pf = vsi->back; in ice_vsi_free()
519 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_stat_arrays()
573 vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev); in ice_vsi_alloc_def()
635 vsi->back = pf; in ice_vsi_alloc()
663 struct ice_pf *pf = vsi->back; in ice_alloc_fd_res()
753 struct ice_pf *pf = vsi->back; in ice_vsi_get_qs()
798 struct ice_pf *pf = vsi->back; in ice_vsi_put_qs()
853 struct ice_pf *pf = vsi->back; in ice_vsi_clean_rss_flow_fld()
871 struct ice_pf *pf = vsi->back; in ice_rss_clean()
892 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_params()
1069 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1075 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map()
1084 …dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence makin… in ice_vsi_setup_q_map()
1156 pf = vsi->back; in ice_set_rss_vsi_ctx()
1227 struct ice_pf *pf = vsi->back; in ice_vsi_init()
1387 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw); in ice_vsi_alloc_rings()
1388 struct ice_pf *pf = vsi->back; in ice_vsi_alloc_rings()
1498 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_rss_lut_key()
1570 struct ice_pf *pf = vsi->back; in ice_vsi_set_vf_rss_flow_fld()
1670 struct ice_pf *pf = vsi->back; in ice_vsi_set_rss_flow_fld()
1730 struct ice_hw *hw = &vsi->back->hw; in ice_update_eth_stats()
1731 struct ice_pf *pf = vsi->back; in ice_update_eth_stats()
1825 struct ice_hw *hw = &q_vector->vsi->back->hw; in ice_write_intrl()
1858 struct ice_hw *hw = &q_vector->vsi->back->hw; in __ice_write_itr()
1914 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_msix()
2037 struct ice_pf *pf = vsi->back; in ice_vsi_is_rx_queue_active()
2056 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) { in ice_vsi_set_tc_cfg()
2079 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_sw_lldp()
2154 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_agg_vsi()
2160 struct ice_pf *pf = vsi->back; in ice_set_agg_vsi()
2309 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_cfg_def()
2310 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_def()
2462 struct ice_pf *pf = vsi->back; in ice_vsi_cfg()
2472 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi); in ice_vsi_cfg()
2495 struct ice_pf *pf = vsi->back; in ice_vsi_decfg()
2593 struct ice_pf *pf = vsi->back; in ice_vsi_release_msix()
2630 struct ice_pf *pf = vsi->back; in ice_vsi_free_irq()
2858 if (!vsi->back) in ice_vsi_release()
2860 pf = vsi->back; in ice_vsi_release()
3014 struct ice_pf *pf = vsi->back; in ice_vsi_realloc_stat_arrays()
3083 pf = vsi->back; in ice_vsi_rebuild()
3193 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_netdev_tc()
3305 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3312 …dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", in ice_vsi_setup_q_map_mqprio()
3333 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq); in ice_vsi_setup_q_map_mqprio()
3334 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); in ice_vsi_setup_q_map_mqprio()
3335 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", in ice_vsi_setup_q_map_mqprio()
3351 struct ice_pf *pf = vsi->back; in ice_vsi_cfg_tc()
3512 dev = ice_pf_to_dev(vsi->back); in ice_set_dflt_vsi()
3514 if (ice_lag_is_switchdev_running(vsi->back)) { in ice_set_dflt_vsi()
3553 dev = ice_pf_to_dev(vsi->back); in ice_clear_dflt_vsi()
3611 struct ice_pf *pf = vsi->back; in ice_set_min_bw_limit()
3672 struct ice_pf *pf = vsi->back; in ice_set_max_bw_limit()
3729 struct device *dev = ice_pf_to_dev(vsi->back); in ice_set_link()
3788 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_add_vlan_zero()
3818 if (!ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_del_vlan_zero()
3829 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vsi_del_vlan_zero()
3853 if (ice_is_dvm_ena(&vsi->back->hw)) in ice_vsi_num_zero_vlans()
3970 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_security()
4017 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL)) in ice_vsi_update_local_lb()
4035 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_update_l2tsel()