Lines Matching refs:gvt
58 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) in intel_gvt_get_device_type() argument
60 struct drm_i915_private *i915 = gvt->gt->i915; in intel_gvt_get_device_type()
76 static bool intel_gvt_match_device(struct intel_gvt *gvt, in intel_gvt_match_device() argument
79 return intel_gvt_get_device_type(gvt) & device; in intel_gvt_match_device()
94 struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, in intel_gvt_find_mmio_info() argument
99 hash_for_each_possible(gvt->mmio.mmio_info_table, e, node, offset) { in intel_gvt_find_mmio_info()
106 static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size, in setup_mmio_info() argument
113 if (!intel_gvt_match_device(gvt, device)) in setup_mmio_info()
123 p = intel_gvt_find_mmio_info(gvt, i); in setup_mmio_info()
130 gvt->mmio.mmio_attribute[i / 4] = flags; in setup_mmio_info()
148 intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset) in intel_gvt_render_mmio_to_engine() argument
154 for_each_engine(engine, gvt->gt, id) in intel_gvt_render_mmio_to_engine()
215 if (GRAPHICS_VER(vgpu->gvt->gt->i915) <= 10) { in gamw_echo_dev_rw_ia_write()
251 struct intel_gvt *gvt = vgpu->gvt; in fence_mmio_write() local
260 mmio_hw_access_pre(gvt->gt); in fence_mmio_write()
263 mmio_hw_access_post(gvt->gt); in fence_mmio_write()
281 if (GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) { in mul_force_wake_write()
343 engine_mask &= vgpu->gvt->gt->info.engine_mask; in gdrst_mmio_write()
502 refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc; in bdw_vgpu_get_dp_bitrate()
533 int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc; in bxt_vgpu_get_dp_bitrate()
644 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in vgpu_update_refresh_rate()
770 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); in force_nonpriv_write()
1006 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in pri_surf_mmio_write()
1047 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in reg50080_mmio_write()
1071 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in trigger_aux_channel_interrupt()
1174 if ((GRAPHICS_VER(vgpu->gvt->gt->i915) >= 9) in dp_aux_ch_ctl_mmio_write()
1178 } else if (IS_BROADWELL(vgpu->gvt->gt->i915) && in dp_aux_ch_ctl_mmio_write()
1494 struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj; in send_display_ready_uevent()
1555 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in pf_write()
1611 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in dma_ctrl_write()
1630 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in gen9_trtte_write()
1682 if (IS_SKYLAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1683 IS_KABYLAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1684 IS_COFFEELAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1685 IS_COMETLAKE(vgpu->gvt->gt->i915)) { in mailbox_write()
1695 } else if (IS_BROXTON(vgpu->gvt->gt->i915)) { in mailbox_write()
1708 if (IS_SKYLAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1709 IS_KABYLAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1710 IS_COFFEELAKE(vgpu->gvt->gt->i915) || in mailbox_write()
1711 IS_COMETLAKE(vgpu->gvt->gt->i915)) in mailbox_write()
1736 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); in hws_pga_write()
1767 if (IS_BROXTON(vgpu->gvt->gt->i915)) in skl_power_well_ctl_write()
1941 struct intel_gvt *gvt = vgpu->gvt; in mmio_read_from_hw() local
1943 intel_gvt_render_mmio_to_engine(gvt, offset); in mmio_read_from_hw()
1953 vgpu == gvt->scheduler.engine_owner[engine->id] || in mmio_read_from_hw()
1956 mmio_hw_access_pre(gvt->gt); in mmio_read_from_hw()
1958 intel_uncore_read(gvt->gt->uncore, _MMIO(offset)); in mmio_read_from_hw()
1959 mmio_hw_access_post(gvt->gt); in mmio_read_from_hw()
1968 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in elsp_mmio_write()
1969 const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); in elsp_mmio_write()
2012 intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); in ring_mode_mmio_write()
2017 if (IS_COFFEELAKE(vgpu->gvt->gt->i915) || in ring_mode_mmio_write()
2018 IS_COMETLAKE(vgpu->gvt->gt->i915)) in ring_mode_mmio_write()
2027 if ((IS_COFFEELAKE(vgpu->gvt->gt->i915) || in ring_mode_mmio_write()
2028 IS_COMETLAKE(vgpu->gvt->gt->i915)) && in ring_mode_mmio_write()
2132 ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
2158 if (HAS_ENGINE(gvt->gt, VCS1)) \
2174 static int init_generic_mmio_info(struct intel_gvt *gvt) in init_generic_mmio_info() argument
2176 struct drm_i915_private *dev_priv = gvt->gt->i915; in init_generic_mmio_info()
2437 static int init_bdw_mmio_info(struct intel_gvt *gvt) in init_bdw_mmio_info() argument
2572 static int init_skl_mmio_info(struct intel_gvt *gvt) in init_skl_mmio_info() argument
2574 struct drm_i915_private *dev_priv = gvt->gt->i915; in init_skl_mmio_info()
2744 static int init_bxt_mmio_info(struct intel_gvt *gvt) in init_bxt_mmio_info() argument
2792 static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, in find_mmio_block() argument
2795 struct gvt_mmio_block *block = gvt->mmio.mmio_block; in find_mmio_block()
2796 int num = gvt->mmio.num_mmio_block; in find_mmio_block()
2815 void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) in intel_gvt_clean_mmio_info() argument
2821 hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node) in intel_gvt_clean_mmio_info()
2824 kfree(gvt->mmio.mmio_block); in intel_gvt_clean_mmio_info()
2825 gvt->mmio.mmio_block = NULL; in intel_gvt_clean_mmio_info()
2826 gvt->mmio.num_mmio_block = 0; in intel_gvt_clean_mmio_info()
2828 vfree(gvt->mmio.mmio_attribute); in intel_gvt_clean_mmio_info()
2829 gvt->mmio.mmio_attribute = NULL; in intel_gvt_clean_mmio_info()
2835 struct intel_gvt *gvt = iter->data; in handle_mmio() local
2846 p = intel_gvt_find_mmio_info(gvt, i); in handle_mmio()
2866 hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset); in handle_mmio()
2867 gvt->mmio.num_tracked_mmio++; in handle_mmio()
2875 struct intel_gvt *gvt = iter->data; in handle_mmio_block() local
2876 struct gvt_mmio_block *block = gvt->mmio.mmio_block; in handle_mmio_block()
2880 (gvt->mmio.num_mmio_block + 1) * sizeof(*block), in handle_mmio_block()
2885 gvt->mmio.mmio_block = block = ret; in handle_mmio_block()
2887 block += gvt->mmio.num_mmio_block; in handle_mmio_block()
2894 gvt->mmio.num_mmio_block++; in handle_mmio_block()
2908 static int init_mmio_info(struct intel_gvt *gvt) in init_mmio_info() argument
2911 .i915 = gvt->gt->i915, in init_mmio_info()
2912 .data = gvt, in init_mmio_info()
2919 static int init_mmio_block_handlers(struct intel_gvt *gvt) in init_mmio_block_handlers() argument
2923 block = find_mmio_block(gvt, VGT_PVINFO_PAGE); in init_mmio_block_handlers()
2926 i915_mmio_reg_offset(gvt->mmio.mmio_block->offset)); in init_mmio_block_handlers()
2946 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) in intel_gvt_setup_mmio_info() argument
2948 struct intel_gvt_device_info *info = &gvt->device_info; in intel_gvt_setup_mmio_info()
2949 struct drm_i915_private *i915 = gvt->gt->i915; in intel_gvt_setup_mmio_info()
2950 int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute); in intel_gvt_setup_mmio_info()
2953 gvt->mmio.mmio_attribute = vzalloc(size); in intel_gvt_setup_mmio_info()
2954 if (!gvt->mmio.mmio_attribute) in intel_gvt_setup_mmio_info()
2957 ret = init_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2961 ret = init_mmio_block_handlers(gvt); in intel_gvt_setup_mmio_info()
2965 ret = init_generic_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2970 ret = init_bdw_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2977 ret = init_bdw_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2980 ret = init_skl_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2984 ret = init_bdw_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2987 ret = init_skl_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2990 ret = init_bxt_mmio_info(gvt); in intel_gvt_setup_mmio_info()
2997 intel_gvt_clean_mmio_info(gvt); in intel_gvt_setup_mmio_info()
3010 int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, in intel_gvt_for_each_tracked_mmio() argument
3011 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), in intel_gvt_for_each_tracked_mmio() argument
3014 struct gvt_mmio_block *block = gvt->mmio.mmio_block; in intel_gvt_for_each_tracked_mmio()
3018 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) { in intel_gvt_for_each_tracked_mmio()
3019 ret = handler(gvt, e->offset, data); in intel_gvt_for_each_tracked_mmio()
3024 for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) { in intel_gvt_for_each_tracked_mmio()
3030 ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data); in intel_gvt_for_each_tracked_mmio()
3107 bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, in intel_gvt_in_force_nonpriv_whitelist() argument
3127 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_mmio_reg_rw()
3128 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_mmio_reg_rw() local
3140 mmio_block = find_mmio_block(gvt, offset); in intel_vgpu_mmio_reg_rw()
3151 mmio_info = intel_gvt_find_mmio_info(gvt, offset); in intel_vgpu_mmio_reg_rw()
3164 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) { in intel_vgpu_mmio_reg_rw()
3182 if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) { in intel_vgpu_mmio_reg_rw()
3198 void intel_gvt_restore_fence(struct intel_gvt *gvt) in intel_gvt_restore_fence() argument
3203 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { in intel_gvt_restore_fence()
3204 mmio_hw_access_pre(gvt->gt); in intel_gvt_restore_fence()
3207 mmio_hw_access_post(gvt->gt); in intel_gvt_restore_fence()
3211 static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data) in mmio_pm_restore_handler() argument
3214 struct drm_i915_private *dev_priv = gvt->gt->i915; in mmio_pm_restore_handler()
3216 if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE) in mmio_pm_restore_handler()
3222 void intel_gvt_restore_mmio(struct intel_gvt *gvt) in intel_gvt_restore_mmio() argument
3227 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { in intel_gvt_restore_mmio()
3228 mmio_hw_access_pre(gvt->gt); in intel_gvt_restore_mmio()
3229 intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu); in intel_gvt_restore_mmio()
3230 mmio_hw_access_post(gvt->gt); in intel_gvt_restore_mmio()