/linux-6.3-rc2/drivers/net/ethernet/qlogic/qed/ |
A D | qed_dcbx.c | 1344 qed_ptt_release(hwfn, ptt); in qed_dcbnl_setstate() 1454 qed_ptt_release(hwfn, ptt); in qed_dcbnl_setpfccfg() 1594 qed_ptt_release(hwfn, ptt); in qed_dcbnl_setpgtccfgtx() 1630 qed_ptt_release(hwfn, ptt); in qed_dcbnl_setpgbwgcfgtx() 1656 qed_ptt_release(hwfn, ptt); in qed_dcbnl_setall() 1694 qed_ptt_release(hwfn, ptt); in qed_dcbnl_setnumtcs() 1722 qed_ptt_release(hwfn, ptt); in qed_dcbnl_setpfcstate() 1801 qed_ptt_release(hwfn, ptt); in qed_dcbnl_setapp() 1848 qed_ptt_release(hwfn, ptt); in qed_dcbnl_setdcbx() 1938 qed_ptt_release(hwfn, ptt); in qed_dcbnl_setfeatcfg() [all …]
|
A D | qed_main.c | 705 struct qed_hwfn *hwfn; in qed_single_int() local 734 DP_NOTICE(hwfn, in qed_single_int() 761 id = hwfn->my_id; in qed_slowpath_irq_req() 766 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc); in qed_slowpath_irq_req() 1142 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); in qed_slowpath_delayed_work() 1201 if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) in qed_slowpath_task() 1217 struct qed_hwfn *hwfn; in qed_slowpath_wq_start() local 1364 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, in qed_slowpath_start() 2101 if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { in qed_fill_link() 2958 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, in qed_read_module_eeprom() [all …]
|
A D | qed_init_ops.h | 71 #define STORE_RT_REG(hwfn, offset, val) \ argument 72 qed_init_store_rt_reg(hwfn, offset, val) 74 #define OVERWRITE_RT_REG(hwfn, offset, val) \ argument 75 qed_init_store_rt_reg(hwfn, offset, val) 82 #define STORE_RT_REG_AGG(hwfn, offset, val) \ argument 83 qed_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val))
|
A D | qed_sriov.c | 4442 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); in qed_schedule_iov() 4508 DP_ERR(hwfn, in qed_sriov_disable() 4542 struct qed_hwfn *hwfn; in qed_sriov_enable() local 4556 hwfn = &cdev->hwfns[j]; in qed_sriov_enable() 4746 if (!hwfn->pf_iov_info) in qed_inform_vf_link_state() 4912 DP_NOTICE(hwfn, in qed_set_vf_trust() 4949 qed_for_each_vf(hwfn, i) { in qed_handle_vf_msg() 5015 DP_VERBOSE(hwfn, in qed_handle_pf_set_vf_unicast() 5028 DP_VERBOSE(hwfn, in qed_handle_pf_set_vf_unicast() 5056 qed_for_each_vf(hwfn, i) in qed_handle_bulletin_post() [all …]
|
A D | qed_sriov.h | 404 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); 407 void qed_inform_vf_link_state(struct qed_hwfn *hwfn); 465 static inline void qed_schedule_iov(struct qed_hwfn *hwfn, in qed_schedule_iov() argument 479 static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) in qed_inform_vf_link_state() argument
|
A D | qed_vf.c | 1637 bulletin = &hwfn->vf_iov_info->bulletin_shadow; in qed_vf_bulletin_get_forced_mac() 1680 static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) in qed_handle_bulletin_change() argument 1682 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; in qed_handle_bulletin_change() 1684 void *cookie = hwfn->cdev->ops_cookie; in qed_handle_bulletin_change() 1688 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, in qed_handle_bulletin_change() 1696 qed_link_update(hwfn, NULL); in qed_handle_bulletin_change() 1701 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, in qed_iov_vf_task() local 1709 qed_vf_read_bulletin(hwfn, &change); in qed_iov_vf_task() 1711 &hwfn->iov_task_flags)) in qed_iov_vf_task() 1714 qed_handle_bulletin_change(hwfn); in qed_iov_vf_task() [all …]
|
A D | qed.h | 977 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); 978 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); 989 int qed_slowpath_irq_req(struct qed_hwfn *hwfn); 991 int qed_mfw_tlv_req(struct qed_hwfn *hwfn); 993 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
|
A D | qed_fcoe.c | 725 struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev); in qed_fill_fcoe_dev_info() local 732 qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); in qed_fill_fcoe_dev_info() 734 qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); in qed_fill_fcoe_dev_info() 736 info->wwpn = hwfn->mcp_info->func_info.wwn_port; in qed_fill_fcoe_dev_info() 737 info->wwnn = hwfn->mcp_info->func_info.wwn_node; in qed_fill_fcoe_dev_info() 739 info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ); in qed_fill_fcoe_dev_info()
|
A D | qed_l2.c | 2206 struct qed_hwfn *hwfn = &cdev->hwfns[i]; in qed_fill_eth_dev_info() local 2207 u16 l2_queues = (u16)FEAT_NUM(hwfn, in qed_fill_eth_dev_info() 2211 cids = hwfn->pf_params.eth_pf_params.num_cons; in qed_fill_eth_dev_info() 2622 struct qed_hwfn *hwfn = &cdev->hwfns[i]; in qed_tunn_configure() local 2626 tun = &hwfn->cdev->tunnel; in qed_tunn_configure() 2628 p_ptt = qed_ptt_acquire(hwfn); in qed_tunn_configure() 2639 qed_ptt_release(hwfn, p_ptt); in qed_tunn_configure() 2643 if (IS_PF_SRIOV(hwfn)) { in qed_tunn_configure() 2650 qed_for_each_vf(hwfn, j) { in qed_tunn_configure() 2651 qed_iov_bulletin_set_udp_ports(hwfn, j, in qed_tunn_configure() [all …]
|
A D | qed_nvmetcp.c | 148 struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev); in qed_fill_nvmetcp_dev_info() local 153 info->port_id = MFW_PORT(hwfn); in qed_fill_nvmetcp_dev_info() 154 info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ); in qed_fill_nvmetcp_dev_info()
|
A D | qed_iscsi.c | 1035 struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev); in qed_fill_iscsi_dev_info() local 1043 qed_iscsi_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); in qed_fill_iscsi_dev_info() 1045 qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); in qed_fill_iscsi_dev_info() 1047 info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ); in qed_fill_iscsi_dev_info()
|