Home
last modified time | relevance | path

Searched refs:vfid (Results 1 – 25 of 70) sorted by relevance

123

/drivers/gpu/drm/xe/
A Dxe_gt_sriov_pf_control.c517 pf_queue_vf(gt, vfid); in pf_enter_vf_pause_guc_done()
536 pf_queue_vf(gt, vfid); in pf_enter_vf_pause_send_pause()
696 pf_queue_vf(gt, vfid); in pf_enter_vf_resume_send_resume()
838 pf_queue_vf(gt, vfid); in pf_enter_vf_stop_send_stop()
964 pf_queue_vf(gt, vfid); in pf_enter_vf_flr_send_start()
1014 pf_queue_vf(gt, vfid); in pf_enter_vf_flr_send_finish()
1041 pf_queue_vf(gt, vfid); in pf_enter_vf_flr_reset_mmio()
1060 pf_queue_vf(gt, vfid); in pf_enter_vf_flr_reset_data()
1082 pf_queue_vf(gt, vfid); in pf_enter_vf_flr_reset_config()
1316 u32 vfid; in xe_gt_sriov_pf_control_process_guc2pf() local
[all …]
A Dxe_gt_sriov_pf_config.c49 vfid, in guc_action_update_vf_cfg()
533 if (vfid) in xe_gt_sriov_pf_config_get_ggtt()
586 if (vfid) in xe_gt_sriov_pf_config_set_ggtt()
651 for (n = vfid; n < vfid + num_vfs; n++) { in xe_gt_sriov_pf_config_bulk_set_ggtt()
855 if (vfid) in xe_gt_sriov_pf_config_get_ctxs()
910 if (vfid) in xe_gt_sriov_pf_config_set_ctxs()
972 for (n = vfid; n < vfid + num_vfs; n++) { in xe_gt_sriov_pf_config_bulk_set_ctxs()
1159 if (vfid) in xe_gt_sriov_pf_config_get_dbs()
1186 if (vfid) in xe_gt_sriov_pf_config_set_dbs()
1220 for (n = vfid; n < vfid + num_vfs; n++) { in xe_gt_sriov_pf_config_bulk_set_dbs()
[all …]
A Dxe_gt_sriov_pf_config.h15 u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid);
16 int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size);
18 unsigned int vfid, unsigned int num_vfs);
20 unsigned int vfid, unsigned int num_vfs, u64 size);
22 u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid);
28 u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid);
34 u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid);
50 u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
52 int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
61 int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
[all …]
A Dxe_gt_sriov_pf_migration.c129 unsigned int vfid) in pf_pick_vf_snapshot() argument
135 return &gt->sriov.pf.vfs[vfid].snapshot; in pf_pick_vf_snapshot()
238 xe_gt_assert(gt, vfid != PFID); in xe_gt_sriov_pf_migration_save_guc_state()
245 err = pf_save_vf_guc_state(gt, vfid); in xe_gt_sriov_pf_migration_save_guc_state()
260 snapshot->guc.size / sizeof(u32), vfid); in pf_restore_vf_guc_state()
287 xe_gt_assert(gt, vfid != PFID); in xe_gt_sriov_pf_migration_restore_guc_state()
294 ret = pf_restore_vf_guc_state(gt, vfid); in xe_gt_sriov_pf_migration_restore_guc_state()
323 xe_gt_assert(gt, vfid != PFID); in xe_gt_sriov_pf_migration_read_guc_state()
330 snapshot = pf_pick_vf_snapshot(gt, vfid); in xe_gt_sriov_pf_migration_read_guc_state()
363 xe_gt_assert(gt, vfid != PFID); in xe_gt_sriov_pf_migration_write_guc_state()
[all …]
A Dxe_sriov_pf_service.c89 xe_sriov_pf_assert_vfid(xe, vfid); in pf_connect()
92 xe->sriov.pf.vfs[vfid].version.major = major; in pf_connect()
98 xe_sriov_pf_assert_vfid(xe, vfid); in pf_disconnect()
100 xe->sriov.pf.vfs[vfid].version.major = 0; in pf_disconnect()
101 xe->sriov.pf.vfs[vfid].version.minor = 0; in pf_disconnect()
119 xe_sriov_pf_assert_vfid(xe, vfid); in xe_sriov_pf_service_is_negotiated()
147 vfid, wanted_major, wanted_minor); in xe_sriov_pf_service_handshake_vf()
154 pf_disconnect(xe, vfid); in xe_sriov_pf_service_handshake_vf()
157 vfid, *major, *minor); in xe_sriov_pf_service_handshake_vf()
158 pf_connect(xe, vfid, *major, *minor); in xe_sriov_pf_service_handshake_vf()
[all …]
A Dxe_gt_sriov_pf_debugfs.c201 unsigned int vfid = extract_vfid(data); \
219 unsigned int vfid = extract_vfid(data); \
257 unsigned int vfid = extract_vfid(data); in set_threshold() local
274 unsigned int vfid = extract_vfid(data); in get_threshold() local
356 unsigned int vfid = extract_vfid(parent); in control_write() local
361 xe_gt_assert(gt, vfid); in control_write()
362 xe_gt_sriov_pf_assert_vfid(gt, vfid); in control_write()
423 unsigned int vfid = extract_vfid(parent); in guc_state_read() local
434 unsigned int vfid = extract_vfid(parent); in guc_state_write() local
461 unsigned int vfid = extract_vfid(parent); in config_blob_read() local
[all …]
A Dxe_gt_sriov_pf_monitor.c24 void xe_gt_sriov_pf_monitor_flr(struct xe_gt *gt, u32 vfid) in xe_gt_sriov_pf_monitor_flr() argument
29 xe_gt_sriov_pf_assert_vfid(gt, vfid); in xe_gt_sriov_pf_monitor_flr()
32 gt->sriov.pf.vfs[vfid].monitor.guc.events[e] = 0; in xe_gt_sriov_pf_monitor_flr()
35 static void pf_update_event_counter(struct xe_gt *gt, u32 vfid, in pf_update_event_counter() argument
41 gt->sriov.pf.vfs[vfid].monitor.guc.events[e]++; in pf_update_event_counter()
50 xe_sriov_function_name(vfid, origin, sizeof(origin)); in pf_handle_vf_threshold_event()
60 origin, xe_gt_sriov_pf_config_get_threshold(gt, vfid, e), in pf_handle_vf_threshold_event()
63 pf_update_event_counter(gt, vfid, e); in pf_handle_vf_threshold_event()
81 u32 vfid; in xe_gt_sriov_pf_monitor_process_guc2pf() local
102 if (unlikely(vfid > xe_gt_sriov_pf_get_totalvfs(gt))) in xe_gt_sriov_pf_monitor_process_guc2pf()
[all …]
A Dxe_lmtt.c326 pt = pd->entries[vfid]; in lmtt_drop_pages()
327 pd->entries[vfid] = NULL; in lmtt_drop_pages()
394 if (pd->entries[vfid]) in lmtt_alloc_range()
405 lmtt_write_pte(lmtt, pd, pde, vfid); in lmtt_alloc_range()
407 pd->entries[vfid] = pt; in lmtt_alloc_range()
428 pt = pd->entries[vfid]; in lmtt_leaf_pt()
463 pt = lmtt_leaf_pt(lmtt, vfid, start); in lmtt_insert_bo()
492 lmtt_assert(lmtt, vfid); in xe_lmtt_prepare_pages()
515 lmtt_assert(lmtt, vfid); in xe_lmtt_populate_pages()
534 lmtt_assert(lmtt, vfid); in xe_lmtt_drop_pages()
[all …]
A Dxe_gt_sriov_pf_migration.h14 int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
15 int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
18 ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
20 ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
A Dxe_gt_sriov_pf_control.h17 int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
18 int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
19 int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
20 int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid);
A Dxe_gt_sriov_pf.c161 static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride) in xe_reg_vf_to_pf() argument
166 pf_reg.addr += stride * vfid; in xe_reg_vf_to_pf()
171 static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid) in pf_clear_vf_scratch_regs() argument
180 scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride); in pf_clear_vf_scratch_regs()
186 scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride); in pf_clear_vf_scratch_regs()
199 void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid) in xe_gt_sriov_pf_sanitize_hw() argument
203 pf_clear_vf_scratch_regs(gt, vfid); in xe_gt_sriov_pf_sanitize_hw()
A Dxe_sriov_pf_service.h17 int xe_sriov_pf_service_handshake_vf(struct xe_device *xe, u32 vfid,
20 bool xe_sriov_pf_service_is_negotiated(struct xe_device *xe, u32 vfid, u32 major, u32 minor);
21 void xe_sriov_pf_service_reset_vf(struct xe_device *xe, unsigned int vfid);
A Dxe_lmtt.h19 int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range);
20 int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset);
21 void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid);
A Dxe_gt_sriov_pf_helpers.h23 #define xe_gt_sriov_pf_assert_vfid(gt, vfid) xe_sriov_pf_assert_vfid(gt_to_xe(gt), (vfid)) argument
A Dxe_sriov_pf_helpers.h25 #define xe_sriov_pf_assert_vfid(xe, vfid) \ argument
26 xe_assert((xe), (vfid) <= xe_sriov_pf_get_totalvfs(xe))
/drivers/net/ethernet/marvell/octeon_ep/
A Doctep_ctrl_net.c44 u16 sz, int vfid) in init_send_req() argument
53 if (vfid != OCTEP_CTRL_NET_INVALID_VFID) { in init_send_req()
55 msg->hdr.s.vf_idx = vfid; in init_send_req()
149 init_send_req(&d.msg, req, state_sz, vfid); in octep_ctrl_net_set_link_status()
164 init_send_req(&d.msg, req, state_sz, vfid); in octep_ctrl_net_set_rx_state()
179 init_send_req(&d.msg, req, mac_sz, vfid); in octep_ctrl_net_get_mac_addr()
197 init_send_req(&d.msg, req, mac_sz, vfid); in octep_ctrl_net_set_mac_addr()
212 init_send_req(&d.msg, req, mtu_sz, vfid); in octep_ctrl_net_get_mtu()
229 init_send_req(&d.msg, req, mtu_sz, vfid); in octep_ctrl_net_set_mtu()
246 init_send_req(&d.msg, req, 0, vfid); in octep_ctrl_net_get_if_stats()
[all …]
A Doctep_ctrl_net.h262 int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid);
287 int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
311 int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
322 int octep_ctrl_net_get_mtu(struct octep_device *oct, int vfid);
334 int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
347 int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
360 int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
374 int vfid,
394 int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
405 int octep_ctrl_net_dev_remove(struct octep_device *oct, int vfid);
[all …]
/drivers/net/ethernet/qlogic/qed/
A Dqed_sriov.c939 u16 vfid, in qed_iov_set_link() argument
2980 u8 vfid, in qed_iov_pre_update_vport() argument
3733 vfid); in qed_iov_execute_vf_flr_cleanup()
3740 ack_vfs[vfid / 32] |= BIT((vfid % 32)); in qed_iov_execute_vf_flr_cleanup()
3790 u8 vfid; in qed_iov_mark_vf_flr() local
3797 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { in qed_iov_mark_vf_flr()
4183 vfid); in qed_iov_bulletin_set_mac()
4189 vfid); in qed_iov_bulletin_set_mac()
4254 vfid); in qed_iov_bulletin_set_udp_ports()
4524 u16 vfid, in qed_sriov_enable_qid_config() argument
[all …]
/drivers/net/ethernet/hisilicon/hns3/hns3vf/
A Dhclgevf_trace.h23 __field(u8, vfid)
31 __entry->vfid = req->dest_vfid;
41 __get_str(pciname), __get_str(devname), __entry->vfid,
54 __field(u8, vfid)
63 __entry->vfid = req->mbx_src_vfid;
74 __get_str(pciname), __get_str(devname), __entry->vfid,
/drivers/net/ethernet/hisilicon/hns3/hns3pf/
A Dhclge_trace.h24 __field(u8, vfid)
33 __entry->vfid = req->mbx_src_vfid;
44 __get_str(pciname), __get_str(devname), __entry->vfid,
57 __field(u8, vfid)
65 __entry->vfid = req->dest_vfid;
75 __get_str(pciname), __get_str(devname), __entry->vfid,
/drivers/crypto/cavium/cpt/
A Dcptvf_mbox.c34 cptvf->vfid = mbx.data; in cptvf_handle_mbox_intr()
35 dev_dbg(&cptvf->pdev->dev, "Received VFID %d\n", cptvf->vfid); in cptvf_handle_mbox_intr()
42 cptvf->vfid, ((mbx.data == SE_TYPES) ? "SE" : "AE"), in cptvf_handle_mbox_intr()
77 (mbx->msg & 0xFF), cptvf->vfid); in cptvf_send_msg_to_pf_timeout()
A Dcptvf_main.c524 intr, cptvf->vfid); in cptvf_misc_intr_handler()
532 intr, cptvf->vfid); in cptvf_misc_intr_handler()
536 intr, cptvf->vfid); in cptvf_misc_intr_handler()
540 intr, cptvf->vfid); in cptvf_misc_intr_handler()
544 intr, cptvf->vfid); in cptvf_misc_intr_handler()
547 cptvf->vfid); in cptvf_misc_intr_handler()
602 cptvf->vfid); in cptvf_done_intr_handler()
619 cptvf->vfid); in cptvf_set_irq_affinity()
623 cpu = cptvf->vfid % num_online_cpus(); in cptvf_set_irq_affinity()
/drivers/crypto/marvell/octeontx/
A Dotx_cptvf_mbox.c104 cptvf->vfid = mbx.data; in otx_cptvf_handle_mbox_intr()
105 dev_dbg(&cptvf->pdev->dev, "Received VFID %d\n", cptvf->vfid); in otx_cptvf_handle_mbox_intr()
111 cptvf->vfid, in otx_cptvf_handle_mbox_intr()
148 mbx->msg, cptvf->vfid); in cptvf_send_msg_to_pf_timeout()
/drivers/net/ethernet/intel/ice/
A Dice_vf_mbx.c22 ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, in ice_aq_send_msg_to_vf() argument
31 cmd->id = cpu_to_le32(vfid); in ice_aq_send_msg_to_vf()
225 u16 vfid = le16_to_cpu(event->desc.retval); in ice_mbx_vf_dec_trig_e830() local
227 wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1); in ice_mbx_vf_dec_trig_e830()
A Dice_vf_mbx.h18 ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
34 u16 __always_unused vfid, u32 __always_unused v_opcode, in ice_aq_send_msg_to_vf() argument

Completed in 909 milliseconds

123