Lines Matching refs:s

384 typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
396 FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
521 #define gmadr_dw_number(s) \ argument
522 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
718 static inline u32 *cmd_ptr(struct parser_exec_state *s, int index) in cmd_ptr() argument
720 return s->ip_va + (index << 2); in cmd_ptr()
723 static inline u32 cmd_val(struct parser_exec_state *s, int index) in cmd_val() argument
725 return *cmd_ptr(s, index); in cmd_val()
728 static inline bool is_init_ctx(struct parser_exec_state *s) in is_init_ctx() argument
730 return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx); in is_init_ctx()
733 static void parser_exec_state_dump(struct parser_exec_state *s) in parser_exec_state_dump() argument
740 s->vgpu->id, s->engine->name, in parser_exec_state_dump()
741 s->ring_start, s->ring_start + s->ring_size, in parser_exec_state_dump()
742 s->ring_head, s->ring_tail); in parser_exec_state_dump()
745 s->buf_type == RING_BUFFER_INSTRUCTION ? in parser_exec_state_dump()
746 "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ? in parser_exec_state_dump()
748 s->buf_addr_type == GTT_BUFFER ? in parser_exec_state_dump()
749 "GTT" : "PPGTT", s->ip_gma); in parser_exec_state_dump()
751 if (s->ip_va == NULL) { in parser_exec_state_dump()
757 s->ip_va, cmd_val(s, 0), cmd_val(s, 1), in parser_exec_state_dump()
758 cmd_val(s, 2), cmd_val(s, 3)); in parser_exec_state_dump()
760 print_opcode(cmd_val(s, 0), s->engine); in parser_exec_state_dump()
762 s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); in parser_exec_state_dump()
765 gvt_dbg_cmd("ip_va=%p: ", s->ip_va); in parser_exec_state_dump()
767 gvt_dbg_cmd("%08x ", cmd_val(s, i)); in parser_exec_state_dump()
770 s->ip_va += 8 * sizeof(u32); in parser_exec_state_dump()
775 static inline void update_ip_va(struct parser_exec_state *s) in update_ip_va() argument
779 if (WARN_ON(s->ring_head == s->ring_tail)) in update_ip_va()
782 if (s->buf_type == RING_BUFFER_INSTRUCTION || in update_ip_va()
783 s->buf_type == RING_BUFFER_CTX) { in update_ip_va()
784 unsigned long ring_top = s->ring_start + s->ring_size; in update_ip_va()
786 if (s->ring_head > s->ring_tail) { in update_ip_va()
787 if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top) in update_ip_va()
788 len = (s->ip_gma - s->ring_head); in update_ip_va()
789 else if (s->ip_gma >= s->ring_start && in update_ip_va()
790 s->ip_gma <= s->ring_tail) in update_ip_va()
791 len = (ring_top - s->ring_head) + in update_ip_va()
792 (s->ip_gma - s->ring_start); in update_ip_va()
794 len = (s->ip_gma - s->ring_head); in update_ip_va()
796 s->ip_va = s->rb_va + len; in update_ip_va()
798 s->ip_va = s->ret_bb_va; in update_ip_va()
802 static inline int ip_gma_set(struct parser_exec_state *s, in ip_gma_set() argument
807 s->ip_gma = ip_gma; in ip_gma_set()
808 update_ip_va(s); in ip_gma_set()
812 static inline int ip_gma_advance(struct parser_exec_state *s, in ip_gma_advance() argument
815 s->ip_gma += (dw_len << 2); in ip_gma_advance()
817 if (s->buf_type == RING_BUFFER_INSTRUCTION) { in ip_gma_advance()
818 if (s->ip_gma >= s->ring_start + s->ring_size) in ip_gma_advance()
819 s->ip_gma -= s->ring_size; in ip_gma_advance()
820 update_ip_va(s); in ip_gma_advance()
822 s->ip_va += (dw_len << 2); in ip_gma_advance()
837 static inline int cmd_length(struct parser_exec_state *s) in cmd_length() argument
839 return get_cmd_length(s->info, cmd_val(s, 0)); in cmd_length()
843 #define patch_value(s, addr, val) do { \ argument
854 struct parser_exec_state *s) in is_cmd_update_pdps() argument
856 u32 base = s->workload->engine->mmio_base; in is_cmd_update_pdps()
860 static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s, in cmd_pdp_mmio_update_handler() argument
863 struct intel_vgpu *vgpu = s->vgpu; in cmd_pdp_mmio_update_handler()
864 struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm; in cmd_pdp_mmio_update_handler()
870 pdps[0] = (u64)cmd_val(s, 2) << 32; in cmd_pdp_mmio_update_handler()
871 pdps[0] |= cmd_val(s, 4); in cmd_pdp_mmio_update_handler()
880 &s->workload->lri_shadow_mm); in cmd_pdp_mmio_update_handler()
881 *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]); in cmd_pdp_mmio_update_handler()
882 *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]); in cmd_pdp_mmio_update_handler()
894 static int cmd_reg_handler(struct parser_exec_state *s, in cmd_reg_handler() argument
897 struct intel_vgpu *vgpu = s->vgpu; in cmd_reg_handler()
908 if (is_init_ctx(s)) { in cmd_reg_handler()
957 vreg = &vgpu_vreg(s->vgpu, offset); in cmd_reg_handler()
959 if (is_cmd_update_pdps(offset, s) && in cmd_reg_handler()
960 cmd_pdp_mmio_update_handler(s, offset, index)) in cmd_reg_handler()
966 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); in cmd_reg_handler()
970 *vreg = cmd_val(s, index + 1); in cmd_reg_handler()
978 cmdval = cmd_val(s, index + 1); in cmd_reg_handler()
988 ret = mmio_info->write(s->vgpu, offset, in cmd_reg_handler()
1000 patch_value(s, cmd_ptr(s, index+1), cmdval_new); in cmd_reg_handler()
1016 if (GRAPHICS_VER(s->engine->i915) == 9 && in cmd_reg_handler()
1019 intel_gvt_read_gpa(s->vgpu, in cmd_reg_handler()
1020 s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); in cmd_reg_handler()
1023 u32 data = cmd_val(s, index + 1); in cmd_reg_handler()
1025 if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset)) in cmd_reg_handler()
1036 #define cmd_reg(s, i) \ argument
1037 (cmd_val(s, i) & GENMASK(22, 2))
1039 #define cmd_reg_inhibit(s, i) \ argument
1040 (cmd_val(s, i) & GENMASK(22, 18))
1042 #define cmd_gma(s, i) \ argument
1043 (cmd_val(s, i) & GENMASK(31, 2))
1045 #define cmd_gma_hi(s, i) \ argument
1046 (cmd_val(s, i) & GENMASK(15, 0))
1048 static int cmd_handler_lri(struct parser_exec_state *s) in cmd_handler_lri() argument
1051 int cmd_len = cmd_length(s); in cmd_handler_lri()
1054 if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) { in cmd_handler_lri()
1055 if (s->engine->id == BCS0 && in cmd_handler_lri()
1056 cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) in cmd_handler_lri()
1059 ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0; in cmd_handler_lri()
1063 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri"); in cmd_handler_lri()
1070 static int cmd_handler_lrr(struct parser_exec_state *s) in cmd_handler_lrr() argument
1073 int cmd_len = cmd_length(s); in cmd_handler_lrr()
1076 if (IS_BROADWELL(s->engine->i915)) in cmd_handler_lrr()
1077 ret |= ((cmd_reg_inhibit(s, i) || in cmd_handler_lrr()
1078 (cmd_reg_inhibit(s, i + 1)))) ? in cmd_handler_lrr()
1082 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src"); in cmd_handler_lrr()
1085 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst"); in cmd_handler_lrr()
1092 static inline int cmd_address_audit(struct parser_exec_state *s,
1095 static int cmd_handler_lrm(struct parser_exec_state *s) in cmd_handler_lrm() argument
1097 struct intel_gvt *gvt = s->vgpu->gvt; in cmd_handler_lrm()
1101 int cmd_len = cmd_length(s); in cmd_handler_lrm()
1104 if (IS_BROADWELL(s->engine->i915)) in cmd_handler_lrm()
1105 ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0; in cmd_handler_lrm()
1108 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm"); in cmd_handler_lrm()
1111 if (cmd_val(s, 0) & (1 << 22)) { in cmd_handler_lrm()
1112 gma = cmd_gma(s, i + 1); in cmd_handler_lrm()
1114 gma |= (cmd_gma_hi(s, i + 2)) << 32; in cmd_handler_lrm()
1115 ret |= cmd_address_audit(s, gma, sizeof(u32), false); in cmd_handler_lrm()
1119 i += gmadr_dw_number(s) + 1; in cmd_handler_lrm()
1124 static int cmd_handler_srm(struct parser_exec_state *s) in cmd_handler_srm() argument
1126 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_srm()
1129 int cmd_len = cmd_length(s); in cmd_handler_srm()
1132 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm"); in cmd_handler_srm()
1135 if (cmd_val(s, 0) & (1 << 22)) { in cmd_handler_srm()
1136 gma = cmd_gma(s, i + 1); in cmd_handler_srm()
1138 gma |= (cmd_gma_hi(s, i + 2)) << 32; in cmd_handler_srm()
1139 ret |= cmd_address_audit(s, gma, sizeof(u32), false); in cmd_handler_srm()
1143 i += gmadr_dw_number(s) + 1; in cmd_handler_srm()
1182 static int cmd_handler_pipe_control(struct parser_exec_state *s) in cmd_handler_pipe_control() argument
1184 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_pipe_control()
1191 post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14; in cmd_handler_pipe_control()
1194 if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE) in cmd_handler_pipe_control()
1195 ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl"); in cmd_handler_pipe_control()
1199 ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl"); in cmd_handler_pipe_control()
1201 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); in cmd_handler_pipe_control()
1204 if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) { in cmd_handler_pipe_control()
1205 gma = cmd_val(s, 2) & GENMASK(31, 3); in cmd_handler_pipe_control()
1207 gma |= (cmd_gma_hi(s, 3)) << 32; in cmd_handler_pipe_control()
1209 if (cmd_val(s, 1) & (1 << 21)) in cmd_handler_pipe_control()
1211 ret |= cmd_address_audit(s, gma, sizeof(u64), in cmd_handler_pipe_control()
1216 hws_pga = s->vgpu->hws_pga[s->engine->id]; in cmd_handler_pipe_control()
1218 patch_value(s, cmd_ptr(s, 2), gma); in cmd_handler_pipe_control()
1219 val = cmd_val(s, 1) & (~(1 << 21)); in cmd_handler_pipe_control()
1220 patch_value(s, cmd_ptr(s, 1), val); in cmd_handler_pipe_control()
1229 if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY) in cmd_handler_pipe_control()
1230 set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify, in cmd_handler_pipe_control()
1231 s->workload->pending_events); in cmd_handler_pipe_control()
1235 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) in cmd_handler_mi_user_interrupt() argument
1237 set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt, in cmd_handler_mi_user_interrupt()
1238 s->workload->pending_events); in cmd_handler_mi_user_interrupt()
1239 patch_value(s, cmd_ptr(s, 0), MI_NOOP); in cmd_handler_mi_user_interrupt()
1243 static int cmd_advance_default(struct parser_exec_state *s) in cmd_advance_default() argument
1245 return ip_gma_advance(s, cmd_length(s)); in cmd_advance_default()
1248 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s) in cmd_handler_mi_batch_buffer_end() argument
1252 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { in cmd_handler_mi_batch_buffer_end()
1253 s->buf_type = BATCH_BUFFER_INSTRUCTION; in cmd_handler_mi_batch_buffer_end()
1254 ret = ip_gma_set(s, s->ret_ip_gma_bb); in cmd_handler_mi_batch_buffer_end()
1255 s->buf_addr_type = s->saved_buf_addr_type; in cmd_handler_mi_batch_buffer_end()
1256 } else if (s->buf_type == RING_BUFFER_CTX) { in cmd_handler_mi_batch_buffer_end()
1257 ret = ip_gma_set(s, s->ring_tail); in cmd_handler_mi_batch_buffer_end()
1259 s->buf_type = RING_BUFFER_INSTRUCTION; in cmd_handler_mi_batch_buffer_end()
1260 s->buf_addr_type = GTT_BUFFER; in cmd_handler_mi_batch_buffer_end()
1261 if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size) in cmd_handler_mi_batch_buffer_end()
1262 s->ret_ip_gma_ring -= s->ring_size; in cmd_handler_mi_batch_buffer_end()
1263 ret = ip_gma_set(s, s->ret_ip_gma_ring); in cmd_handler_mi_batch_buffer_end()
1287 static int gen8_decode_mi_display_flip(struct parser_exec_state *s, in gen8_decode_mi_display_flip() argument
1290 struct drm_i915_private *dev_priv = s->engine->i915; in gen8_decode_mi_display_flip()
1303 dword0 = cmd_val(s, 0); in gen8_decode_mi_display_flip()
1304 dword1 = cmd_val(s, 1); in gen8_decode_mi_display_flip()
1305 dword2 = cmd_val(s, 2); in gen8_decode_mi_display_flip()
1334 static int skl_decode_mi_display_flip(struct parser_exec_state *s, in skl_decode_mi_display_flip() argument
1337 struct drm_i915_private *dev_priv = s->engine->i915; in skl_decode_mi_display_flip()
1339 struct intel_vgpu *vgpu = s->vgpu; in skl_decode_mi_display_flip()
1340 u32 dword0 = cmd_val(s, 0); in skl_decode_mi_display_flip()
1341 u32 dword1 = cmd_val(s, 1); in skl_decode_mi_display_flip()
1342 u32 dword2 = cmd_val(s, 2); in skl_decode_mi_display_flip()
1394 static int gen8_check_mi_display_flip(struct parser_exec_state *s, in gen8_check_mi_display_flip() argument
1402 if (GRAPHICS_VER(s->engine->i915) >= 9) { in gen8_check_mi_display_flip()
1403 stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); in gen8_check_mi_display_flip()
1404 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & in gen8_check_mi_display_flip()
1407 stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) & in gen8_check_mi_display_flip()
1409 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; in gen8_check_mi_display_flip()
1422 struct parser_exec_state *s, in gen8_update_plane_mmio_from_mi_display_flip() argument
1425 struct drm_i915_private *dev_priv = s->engine->i915; in gen8_update_plane_mmio_from_mi_display_flip()
1427 struct intel_vgpu *vgpu = s->vgpu; in gen8_update_plane_mmio_from_mi_display_flip()
1454 static int decode_mi_display_flip(struct parser_exec_state *s, in decode_mi_display_flip() argument
1457 if (IS_BROADWELL(s->engine->i915)) in decode_mi_display_flip()
1458 return gen8_decode_mi_display_flip(s, info); in decode_mi_display_flip()
1459 if (GRAPHICS_VER(s->engine->i915) >= 9) in decode_mi_display_flip()
1460 return skl_decode_mi_display_flip(s, info); in decode_mi_display_flip()
1465 static int check_mi_display_flip(struct parser_exec_state *s, in check_mi_display_flip() argument
1468 return gen8_check_mi_display_flip(s, info); in check_mi_display_flip()
1472 struct parser_exec_state *s, in update_plane_mmio_from_mi_display_flip() argument
1475 return gen8_update_plane_mmio_from_mi_display_flip(s, info); in update_plane_mmio_from_mi_display_flip()
1478 static int cmd_handler_mi_display_flip(struct parser_exec_state *s) in cmd_handler_mi_display_flip() argument
1481 struct intel_vgpu *vgpu = s->vgpu; in cmd_handler_mi_display_flip()
1484 int len = cmd_length(s); in cmd_handler_mi_display_flip()
1490 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_display_flip()
1495 ret = decode_mi_display_flip(s, &info); in cmd_handler_mi_display_flip()
1501 ret = check_mi_display_flip(s, &info); in cmd_handler_mi_display_flip()
1507 ret = update_plane_mmio_from_mi_display_flip(s, &info); in cmd_handler_mi_display_flip()
1514 patch_value(s, cmd_ptr(s, i), MI_NOOP); in cmd_handler_mi_display_flip()
1528 static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s) in cmd_handler_mi_wait_for_event() argument
1530 u32 cmd = cmd_val(s, 0); in cmd_handler_mi_wait_for_event()
1535 patch_value(s, cmd_ptr(s, 0), MI_NOOP); in cmd_handler_mi_wait_for_event()
1539 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index) in get_gma_bb_from_cmd() argument
1543 struct intel_vgpu *vgpu = s->vgpu; in get_gma_bb_from_cmd()
1551 gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK; in get_gma_bb_from_cmd()
1555 gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK; in get_gma_bb_from_cmd()
1561 static inline int cmd_address_audit(struct parser_exec_state *s, in cmd_address_audit() argument
1564 struct intel_vgpu *vgpu = s->vgpu; in cmd_address_audit()
1571 s->info->name); in cmd_address_audit()
1589 s->info->name, guest_gma, op_size); in cmd_address_audit()
1592 for (i = 0; i < cmd_length(s); i++) { in cmd_address_audit()
1594 pr_err("\n%08x ", cmd_val(s, i)); in cmd_address_audit()
1596 pr_err("%08x ", cmd_val(s, i)); in cmd_address_audit()
1607 static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) in cmd_handler_mi_store_data_imm() argument
1609 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_mi_store_data_imm()
1610 int op_size = (cmd_length(s) - 3) * sizeof(u32); in cmd_handler_mi_store_data_imm()
1611 int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0; in cmd_handler_mi_store_data_imm()
1617 if (!(cmd_val(s, 0) & (1 << 22))) in cmd_handler_mi_store_data_imm()
1623 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_store_data_imm()
1628 gma = cmd_val(s, 2) & GENMASK(31, 2); in cmd_handler_mi_store_data_imm()
1631 gma_low = cmd_val(s, 1) & GENMASK(31, 2); in cmd_handler_mi_store_data_imm()
1632 gma_high = cmd_val(s, 2) & GENMASK(15, 0); in cmd_handler_mi_store_data_imm()
1634 core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0; in cmd_handler_mi_store_data_imm()
1636 ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false); in cmd_handler_mi_store_data_imm()
1640 static inline int unexpected_cmd(struct parser_exec_state *s) in unexpected_cmd() argument
1642 struct intel_vgpu *vgpu = s->vgpu; in unexpected_cmd()
1644 gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); in unexpected_cmd()
1649 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s) in cmd_handler_mi_semaphore_wait() argument
1651 return unexpected_cmd(s); in cmd_handler_mi_semaphore_wait()
1654 static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s) in cmd_handler_mi_report_perf_count() argument
1656 return unexpected_cmd(s); in cmd_handler_mi_report_perf_count()
1659 static int cmd_handler_mi_op_2e(struct parser_exec_state *s) in cmd_handler_mi_op_2e() argument
1661 return unexpected_cmd(s); in cmd_handler_mi_op_2e()
1664 static int cmd_handler_mi_op_2f(struct parser_exec_state *s) in cmd_handler_mi_op_2f() argument
1666 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_mi_op_2f()
1667 int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) * in cmd_handler_mi_op_2f()
1673 if (!(cmd_val(s, 0) & (1 << 22))) in cmd_handler_mi_op_2f()
1677 if (cmd_val(s, 0) & BIT(18)) in cmd_handler_mi_op_2f()
1679 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_op_2f()
1684 gma = cmd_val(s, 1) & GENMASK(31, 2); in cmd_handler_mi_op_2f()
1686 gma_high = cmd_val(s, 2) & GENMASK(15, 0); in cmd_handler_mi_op_2f()
1689 ret = cmd_address_audit(s, gma, op_size, false); in cmd_handler_mi_op_2f()
1693 static int cmd_handler_mi_store_data_index(struct parser_exec_state *s) in cmd_handler_mi_store_data_index() argument
1695 return unexpected_cmd(s); in cmd_handler_mi_store_data_index()
1698 static int cmd_handler_mi_clflush(struct parser_exec_state *s) in cmd_handler_mi_clflush() argument
1700 return unexpected_cmd(s); in cmd_handler_mi_clflush()
1704 struct parser_exec_state *s) in cmd_handler_mi_conditional_batch_buffer_end() argument
1706 return unexpected_cmd(s); in cmd_handler_mi_conditional_batch_buffer_end()
1709 static int cmd_handler_mi_update_gtt(struct parser_exec_state *s) in cmd_handler_mi_update_gtt() argument
1711 return unexpected_cmd(s); in cmd_handler_mi_update_gtt()
1714 static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) in cmd_handler_mi_flush_dw() argument
1716 int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd; in cmd_handler_mi_flush_dw()
1723 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_flush_dw()
1727 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_handler_mi_flush_dw()
1733 if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) { in cmd_handler_mi_flush_dw()
1734 gma = cmd_val(s, 1) & GENMASK(31, 3); in cmd_handler_mi_flush_dw()
1736 gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32; in cmd_handler_mi_flush_dw()
1738 if (cmd_val(s, 0) & (1 << 21)) in cmd_handler_mi_flush_dw()
1740 ret = cmd_address_audit(s, gma, sizeof(u64), index_mode); in cmd_handler_mi_flush_dw()
1744 hws_pga = s->vgpu->hws_pga[s->engine->id]; in cmd_handler_mi_flush_dw()
1746 patch_value(s, cmd_ptr(s, 1), gma); in cmd_handler_mi_flush_dw()
1747 val = cmd_val(s, 0) & (~(1 << 21)); in cmd_handler_mi_flush_dw()
1748 patch_value(s, cmd_ptr(s, 0), val); in cmd_handler_mi_flush_dw()
1752 if ((cmd_val(s, 0) & (1 << 8))) in cmd_handler_mi_flush_dw()
1753 set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw, in cmd_handler_mi_flush_dw()
1754 s->workload->pending_events); in cmd_handler_mi_flush_dw()
1758 static void addr_type_update_snb(struct parser_exec_state *s) in addr_type_update_snb() argument
1760 if ((s->buf_type == RING_BUFFER_INSTRUCTION) && in addr_type_update_snb()
1761 (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) { in addr_type_update_snb()
1762 s->buf_addr_type = PPGTT_BUFFER; in addr_type_update_snb()
1799 static int batch_buffer_needs_scan(struct parser_exec_state *s) in batch_buffer_needs_scan() argument
1802 if (cmd_val(s, 0) & BIT(8) && in batch_buffer_needs_scan()
1803 !(s->vgpu->scan_nonprivbb & s->engine->mask)) in batch_buffer_needs_scan()
1814 static int find_bb_size(struct parser_exec_state *s, in find_bb_size() argument
1822 struct intel_vgpu *vgpu = s->vgpu; in find_bb_size()
1824 struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? in find_bb_size()
1825 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; in find_bb_size()
1831 gma = get_gma_bb_from_cmd(s, 1); in find_bb_size()
1835 cmd = cmd_val(s, 0); in find_bb_size()
1836 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in find_bb_size()
1839 cmd, get_opcode(cmd, s->engine), in find_bb_size()
1840 repr_addr_type(s->buf_addr_type), in find_bb_size()
1841 s->engine->name, s->workload); in find_bb_size()
1845 if (copy_gma_to_hva(s->vgpu, mm, in find_bb_size()
1848 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in find_bb_size()
1851 cmd, get_opcode(cmd, s->engine), in find_bb_size()
1852 repr_addr_type(s->buf_addr_type), in find_bb_size()
1853 s->engine->name, s->workload); in find_bb_size()
1876 static int audit_bb_end(struct parser_exec_state *s, void *va) in audit_bb_end() argument
1878 struct intel_vgpu *vgpu = s->vgpu; in audit_bb_end()
1882 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in audit_bb_end()
1885 cmd, get_opcode(cmd, s->engine), in audit_bb_end()
1886 repr_addr_type(s->buf_addr_type), in audit_bb_end()
1887 s->engine->name, s->workload); in audit_bb_end()
1899 static int perform_bb_shadow(struct parser_exec_state *s) in perform_bb_shadow() argument
1901 struct intel_vgpu *vgpu = s->vgpu; in perform_bb_shadow()
1907 struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? in perform_bb_shadow()
1908 s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; in perform_bb_shadow()
1912 gma = get_gma_bb_from_cmd(s, 1); in perform_bb_shadow()
1916 ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset); in perform_bb_shadow()
1924 bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true; in perform_bb_shadow()
1941 bb->obj = i915_gem_object_create_shmem(s->engine->i915, in perform_bb_shadow()
1955 ret = copy_gma_to_hva(s->vgpu, mm, in perform_bb_shadow()
1964 ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset); in perform_bb_shadow()
1970 list_add(&bb->list, &s->workload->shadow_bb); in perform_bb_shadow()
1972 bb->bb_start_cmd_va = s->ip_va; in perform_bb_shadow()
1974 if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa)) in perform_bb_shadow()
1975 bb->bb_offset = s->ip_va - s->rb_va; in perform_bb_shadow()
1987 s->ip_va = bb->va + start_offset; in perform_bb_shadow()
1988 s->ip_gma = gma; in perform_bb_shadow()
1999 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) in cmd_handler_mi_batch_buffer_start() argument
2003 struct intel_vgpu *vgpu = s->vgpu; in cmd_handler_mi_batch_buffer_start()
2005 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { in cmd_handler_mi_batch_buffer_start()
2010 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; in cmd_handler_mi_batch_buffer_start()
2011 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { in cmd_handler_mi_batch_buffer_start()
2016 s->saved_buf_addr_type = s->buf_addr_type; in cmd_handler_mi_batch_buffer_start()
2017 addr_type_update_snb(s); in cmd_handler_mi_batch_buffer_start()
2018 if (s->buf_type == RING_BUFFER_INSTRUCTION) { in cmd_handler_mi_batch_buffer_start()
2019 s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32); in cmd_handler_mi_batch_buffer_start()
2020 s->buf_type = BATCH_BUFFER_INSTRUCTION; in cmd_handler_mi_batch_buffer_start()
2022 s->buf_type = BATCH_BUFFER_2ND_LEVEL; in cmd_handler_mi_batch_buffer_start()
2023 s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32); in cmd_handler_mi_batch_buffer_start()
2024 s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32); in cmd_handler_mi_batch_buffer_start()
2027 if (batch_buffer_needs_scan(s)) { in cmd_handler_mi_batch_buffer_start()
2028 ret = perform_bb_shadow(s); in cmd_handler_mi_batch_buffer_start()
2033 ret = cmd_handler_mi_batch_buffer_end(s); in cmd_handler_mi_batch_buffer_start()
2729 static int cmd_parser_exec(struct parser_exec_state *s) in cmd_parser_exec() argument
2731 struct intel_vgpu *vgpu = s->vgpu; in cmd_parser_exec()
2736 cmd = cmd_val(s, 0); in cmd_parser_exec()
2742 info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); in cmd_parser_exec()
2746 cmd, get_opcode(cmd, s->engine), in cmd_parser_exec()
2747 repr_addr_type(s->buf_addr_type), in cmd_parser_exec()
2748 s->engine->name, s->workload); in cmd_parser_exec()
2752 s->info = info; in cmd_parser_exec()
2754 trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va, in cmd_parser_exec()
2755 cmd_length(s), s->buf_type, s->buf_addr_type, in cmd_parser_exec()
2756 s->workload, info->name); in cmd_parser_exec()
2759 ret = gvt_check_valid_cmd_length(cmd_length(s), in cmd_parser_exec()
2766 ret = info->handler(s); in cmd_parser_exec()
2774 ret = cmd_advance_default(s); in cmd_parser_exec()
2796 static int command_scan(struct parser_exec_state *s, in command_scan() argument
2803 struct intel_vgpu *vgpu = s->vgpu; in command_scan()
2809 while (s->ip_gma != gma_tail) { in command_scan()
2810 if (s->buf_type == RING_BUFFER_INSTRUCTION || in command_scan()
2811 s->buf_type == RING_BUFFER_CTX) { in command_scan()
2812 if (!(s->ip_gma >= rb_start) || in command_scan()
2813 !(s->ip_gma < gma_bottom)) { in command_scan()
2816 s->ip_gma, rb_start, in command_scan()
2818 parser_exec_state_dump(s); in command_scan()
2821 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { in command_scan()
2824 s->ip_gma, rb_start, in command_scan()
2826 parser_exec_state_dump(s); in command_scan()
2830 ret = cmd_parser_exec(s); in command_scan()
2833 parser_exec_state_dump(s); in command_scan()
2844 struct parser_exec_state s; in scan_workload() local
2854 s.buf_type = RING_BUFFER_INSTRUCTION; in scan_workload()
2855 s.buf_addr_type = GTT_BUFFER; in scan_workload()
2856 s.vgpu = workload->vgpu; in scan_workload()
2857 s.engine = workload->engine; in scan_workload()
2858 s.ring_start = workload->rb_start; in scan_workload()
2859 s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); in scan_workload()
2860 s.ring_head = gma_head; in scan_workload()
2861 s.ring_tail = gma_tail; in scan_workload()
2862 s.rb_va = workload->shadow_ring_buffer_va; in scan_workload()
2863 s.workload = workload; in scan_workload()
2864 s.is_ctx_wa = false; in scan_workload()
2869 ret = ip_gma_set(&s, gma_head); in scan_workload()
2873 ret = command_scan(&s, workload->rb_head, workload->rb_tail, in scan_workload()
2884 struct parser_exec_state s; in scan_wa_ctx() local
2901 s.buf_type = RING_BUFFER_INSTRUCTION; in scan_wa_ctx()
2902 s.buf_addr_type = GTT_BUFFER; in scan_wa_ctx()
2903 s.vgpu = workload->vgpu; in scan_wa_ctx()
2904 s.engine = workload->engine; in scan_wa_ctx()
2905 s.ring_start = wa_ctx->indirect_ctx.guest_gma; in scan_wa_ctx()
2906 s.ring_size = ring_size; in scan_wa_ctx()
2907 s.ring_head = gma_head; in scan_wa_ctx()
2908 s.ring_tail = gma_tail; in scan_wa_ctx()
2909 s.rb_va = wa_ctx->indirect_ctx.shadow_va; in scan_wa_ctx()
2910 s.workload = workload; in scan_wa_ctx()
2911 s.is_ctx_wa = true; in scan_wa_ctx()
2913 ret = ip_gma_set(&s, gma_head); in scan_wa_ctx()
2917 ret = command_scan(&s, 0, ring_tail, in scan_wa_ctx()
2926 struct intel_vgpu_submission *s = &vgpu->submission; in shadow_workload_ring_buffer() local
2941 if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) { in shadow_workload_ring_buffer()
2945 p = krealloc(s->ring_scan_buffer[workload->engine->id], in shadow_workload_ring_buffer()
2951 s->ring_scan_buffer[workload->engine->id] = p; in shadow_workload_ring_buffer()
2952 s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len; in shadow_workload_ring_buffer()
2955 shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id]; in shadow_workload_ring_buffer()
3119 struct parser_exec_state s; in intel_gvt_update_reg_whitelist() local
3133 s.buf_type = RING_BUFFER_CTX; in intel_gvt_update_reg_whitelist()
3134 s.buf_addr_type = GTT_BUFFER; in intel_gvt_update_reg_whitelist()
3135 s.vgpu = vgpu; in intel_gvt_update_reg_whitelist()
3136 s.engine = engine; in intel_gvt_update_reg_whitelist()
3137 s.ring_start = 0; in intel_gvt_update_reg_whitelist()
3138 s.ring_size = engine->context_size - start; in intel_gvt_update_reg_whitelist()
3139 s.ring_head = 0; in intel_gvt_update_reg_whitelist()
3140 s.ring_tail = s.ring_size; in intel_gvt_update_reg_whitelist()
3141 s.rb_va = vaddr + start; in intel_gvt_update_reg_whitelist()
3142 s.workload = NULL; in intel_gvt_update_reg_whitelist()
3143 s.is_ctx_wa = false; in intel_gvt_update_reg_whitelist()
3144 s.is_init_ctx = true; in intel_gvt_update_reg_whitelist()
3147 ret = ip_gma_set(&s, RING_CTX_SIZE); in intel_gvt_update_reg_whitelist()
3149 ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size); in intel_gvt_update_reg_whitelist()
3166 struct parser_exec_state s; in intel_gvt_scan_engine_context() local
3185 s.buf_type = RING_BUFFER_CTX; in intel_gvt_scan_engine_context()
3186 s.buf_addr_type = GTT_BUFFER; in intel_gvt_scan_engine_context()
3187 s.vgpu = workload->vgpu; in intel_gvt_scan_engine_context()
3188 s.engine = workload->engine; in intel_gvt_scan_engine_context()
3189 s.ring_start = gma_start; in intel_gvt_scan_engine_context()
3190 s.ring_size = ctx_size; in intel_gvt_scan_engine_context()
3191 s.ring_head = gma_start + gma_head; in intel_gvt_scan_engine_context()
3192 s.ring_tail = gma_start + gma_tail; in intel_gvt_scan_engine_context()
3193 s.rb_va = ce->lrc_reg_state; in intel_gvt_scan_engine_context()
3194 s.workload = workload; in intel_gvt_scan_engine_context()
3195 s.is_ctx_wa = false; in intel_gvt_scan_engine_context()
3196 s.is_init_ctx = false; in intel_gvt_scan_engine_context()
3201 ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE); in intel_gvt_scan_engine_context()
3205 ret = command_scan(&s, gma_head, gma_tail, in intel_gvt_scan_engine_context()