| /drivers/gpu/drm/i915/gt/uc/ |
| A D | intel_guc.c | 75 guc_send_reg(guc, i), in intel_guc_init_send_regs() 284 u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; in guc_ctl_ads_flags() 360 u32 *params = guc->params; in guc_init_params() 438 GEM_BUG_ON(!guc->ads_vma); in intel_guc_init() 461 guc_init_params(guc); in intel_guc_init() 472 intel_guc_ads_destroy(guc); in intel_guc_init() 478 intel_uc_fw_fini(&guc->fw); in intel_guc_init() 500 intel_guc_ads_destroy(guc); in intel_guc_fini() 503 intel_uc_fw_fini(&guc->fw); in intel_guc_fini() 532 intel_guc_notify(guc); in intel_guc_send_mmio() [all …]
|
| A D | intel_guc.h | 327 #define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version) argument 328 #define GUC_FIRMWARE_VER(guc) MAKE_GUC_VER_STRUCT((guc)->fw.file_selected.ver) argument 396 if (guc->interrupts.enabled) in intel_guc_to_host_event_handler() 470 return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct); in intel_guc_is_ready() 475 guc->interrupts.reset(guc); in intel_guc_reset_interrupts() 480 guc->interrupts.enable(guc); in intel_guc_enable_interrupts() 485 guc->interrupts.disable(guc); in intel_guc_disable_interrupts() 493 guc->mmio_msg = 0; in intel_guc_sanitize() 500 spin_lock_irq(&guc->irq_lock); in intel_guc_enable_msg() 501 guc->msg_enabled_mask |= mask; in intel_guc_enable_msg() [all …]
|
| A D | intel_guc_ads.c | 473 addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; in guc_mmio_reg_state_init() 475 iosys_map_memcpy_to(&guc->ads_map, offset, guc->ads_regset, in guc_mmio_reg_state_init() 552 addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; in guc_prep_golden_context() 636 addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; in guc_init_golden_context() 718 ads_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma); in guc_capture_prep_lists() 872 addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; in guc_waklv_init() 918 base = intel_guc_ggtt_offset(guc, guc->ads_vma); in __guc_ads_init() 987 ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma, in intel_guc_ads_create() 1029 iosys_map_memset(&guc->ads_map, guc_ads_private_data_offset(guc), in guc_ads_private_data_reset() 1043 if (!guc->ads_vma) in intel_guc_ads_reset() [all …]
|
| A D | intel_uc.c | 156 struct intel_guc *guc = &uc->guc; in __uc_capture_load_err_log() local 317 struct intel_guc *guc = &uc->guc; in __uc_init() local 351 struct intel_guc *guc = &uc->guc; in __uc_sanitize() local 457 struct intel_guc *guc = &uc->guc; in __uc_init_hw() local 586 struct intel_guc *guc = &uc->guc; in __uc_fini_hw() local 605 struct intel_guc *guc = &uc->guc; in intel_uc_reset_prepare() local 626 struct intel_guc *guc = &uc->guc; in intel_uc_reset() local 635 struct intel_guc *guc = &uc->guc; in intel_uc_reset_finish() local 650 struct intel_guc *guc = &uc->guc; in intel_uc_cancel_requests() local 659 struct intel_guc *guc = &uc->guc; in intel_uc_runtime_suspend() local [all …]
|
| A D | intel_guc_debugfs.c | 19 struct intel_guc *guc = m->private; in guc_info_show() local 22 if (!intel_guc_is_supported(guc)) in guc_info_show() 25 intel_guc_load_status(guc, &p); in guc_info_show() 27 intel_guc_log_info(&guc->log, &p); in guc_info_show() 42 struct intel_guc *guc = m->private; in guc_registered_contexts_show() local 56 struct intel_guc *guc = m->private; in guc_slpc_info_show() local 60 if (!intel_guc_slpc_is_used(guc)) in guc_slpc_info_show() 76 struct intel_guc *guc = data; in guc_sched_disable_delay_ms_get() local 88 struct intel_guc *guc = data; in guc_sched_disable_delay_ms_set() local 104 struct intel_guc *guc = data; in guc_sched_disable_gucid_threshold_get() local [all …]
|
| A D | intel_guc_submission.c | 1522 struct intel_guc *guc = container_of(wrk, typeof(*guc), in guc_timestamp_ping() local 1524 struct intel_uc *uc = container_of(guc, typeof(*uc), guc); in guc_timestamp_ping() 1734 guc->interrupts.disable(guc); in intel_guc_submission_reset_prepare() 2137 wait = xa_load(&guc->tlb_lookup, guc->serial_slot); in fini_tlb_lookup() 2243 return submission_disabled(guc) || guc->stalled_request || in need_tasklet() 3700 struct intel_guc *guc = &ce->engine->gt->uc.guc; in update_context_prio() local 4648 guc->sched_engine->private_data = guc; in intel_guc_submission_setup() 4878 guc->submission_selected = __guc_submission_selected(guc); in intel_guc_submission_init_early() 4916 guc_dbg(guc, in wait_wake_outstanding_tlb_g2h() 4996 wq = xa_load(&guc->tlb_lookup, guc->serial_slot); in guc_send_invalidate_tlb() [all …]
|
| A D | intel_guc_capture.c | 619 if (!guc->capture) in guc_capture_output_min_size_est() 1093 guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc); in guc_capture_create_prealloc_nodes() 1296 struct intel_uc *uc = container_of(guc, typeof(*uc), guc); in __guc_capture_process_output() 1542 guc = gt_to_guc(gt); in intel_guc_capture_is_matching_engine() 1543 if (!guc->capture) in intel_guc_capture_is_matching_engine() 1573 if (!guc->capture) in intel_guc_capture_get_matching_node() 1602 if (guc->capture) in intel_guc_capture_process() 1626 if (!guc->capture) in intel_guc_capture_destroy() 1642 guc->capture = kzalloc(sizeof(*guc->capture), GFP_KERNEL); in intel_guc_capture_init() 1643 if (!guc->capture) in intel_guc_capture_init() [all …]
|
| A D | intel_guc_rc.c | 16 return guc->submission_supported && in __guc_rc_supported() 17 GRAPHICS_VER(guc_to_i915(guc)) >= 12; in __guc_rc_supported() 22 if (!intel_guc_rc_is_supported(guc)) in __guc_rc_selected() 25 return guc->submission_selected; in __guc_rc_selected() 30 guc->rc_supported = __guc_rc_supported(guc); in intel_guc_rc_init_early() 31 guc->rc_selected = __guc_rc_selected(guc); in intel_guc_rc_init_early() 52 struct intel_gt *gt = guc_to_gt(guc); in __guc_rc_control() 58 if (!intel_guc_is_ready(guc)) in __guc_rc_control() 61 ret = guc_action_control_gucrc(guc, enable); in __guc_rc_control() 75 return __guc_rc_control(guc, true); in intel_guc_rc_enable() [all …]
|
| A D | intel_guc_submission.h | 16 void intel_guc_submission_init_early(struct intel_guc *guc); 17 int intel_guc_submission_init(struct intel_guc *guc); 18 int intel_guc_submission_enable(struct intel_guc *guc); 19 void intel_guc_submission_disable(struct intel_guc *guc); 20 void intel_guc_submission_fini(struct intel_guc *guc); 21 int intel_guc_preempt_work_create(struct intel_guc *guc); 22 void intel_guc_preempt_work_destroy(struct intel_guc *guc); 36 int intel_guc_wait_for_pending_msg(struct intel_guc *guc, 45 return guc->submission_supported; in intel_guc_submission_is_supported() 50 return guc->submission_selected; in intel_guc_submission_is_wanted() [all …]
|
| A D | intel_guc_fw.c | 74 intel_guc_ggtt_offset(guc, guc_fw->rsa_data)); in guc_xfer_rsa_vma() 154 static int guc_wait_ucode(struct intel_guc *guc) in guc_wait_ucode() argument 156 struct intel_gt *gt = guc_to_gt(guc); in guc_wait_ucode() 235 guc_info(guc, "firmware exception. EIP: %#x\n", in guc_wait_ucode() 246 guc_info(guc, "invalid w/a KLV entry\n"); in guc_wait_ucode() 251 guc_info(guc, "still extracting hwconfig table.\n"); in guc_wait_ucode() 287 int intel_guc_fw_upload(struct intel_guc *guc) in intel_guc_fw_upload() argument 289 struct intel_gt *gt = guc_to_gt(guc); in intel_guc_fw_upload() 303 ret = guc_xfer_rsa(&guc->fw, uncore); in intel_guc_fw_upload() 311 ret = intel_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE); in intel_guc_fw_upload() [all …]
|
| A D | intel_guc_rc.h | 11 void intel_guc_rc_init_early(struct intel_guc *guc); 13 static inline bool intel_guc_rc_is_supported(struct intel_guc *guc) in intel_guc_rc_is_supported() argument 15 return guc->rc_supported; in intel_guc_rc_is_supported() 18 static inline bool intel_guc_rc_is_wanted(struct intel_guc *guc) in intel_guc_rc_is_wanted() argument 20 return guc->submission_selected && intel_guc_rc_is_supported(guc); in intel_guc_rc_is_wanted() 23 static inline bool intel_guc_rc_is_used(struct intel_guc *guc) in intel_guc_rc_is_used() argument 25 return intel_guc_submission_is_used(guc) && intel_guc_rc_is_wanted(guc); in intel_guc_rc_is_used() 28 int intel_guc_rc_enable(struct intel_guc *guc); 29 int intel_guc_rc_disable(struct intel_guc *guc);
|
| A D | intel_guc_log.c | 42 struct intel_guc *guc = log_to_guc(log); in _guc_log_init_sizes() local 544 if (!guc->dbgfs_node) in guc_log_relay_create() 548 guc->dbgfs_node, in guc_log_relay_create() 585 guc_action_flush_log_complete(guc); in guc_log_copy_debuglogs_for_relay() 686 ret = guc_action_control_log(guc, in intel_guc_log_set_level() 784 guc_action_flush_log(guc); in intel_guc_log_relay_flush() 884 struct intel_uc *uc = container_of(guc, struct intel_uc, guc); in intel_guc_log_dump() 890 if (!intel_guc_is_supported(guc)) in intel_guc_log_dump() 895 else if (guc->log.vma) in intel_guc_log_dump() 896 obj = guc->log.vma->obj; in intel_guc_log_dump() [all …]
|
| A D | selftest_guc.c | 147 struct intel_guc *guc = gt_to_guc(gt); in intel_guc_steal_guc_ids() local 158 guc_err(guc, "Context array allocation failed\n"); in intel_guc_steal_guc_ids() 164 sv = guc->submission_state.num_guc_ids; in intel_guc_steal_guc_ids() 165 guc->submission_state.num_guc_ids = 512; in intel_guc_steal_guc_ids() 264 guc_err(guc, "No guc_id was stolen"); in intel_guc_steal_guc_ids() 284 guc->submission_state.num_guc_ids = sv; in intel_guc_steal_guc_ids() 293 static int bad_h2g(struct intel_guc *guc) in bad_h2g() argument 354 gt->uc.guc.fast_response_selftest = 1; in intel_guc_fast_request() 356 ret = bad_h2g(>->uc.guc); in intel_guc_fast_request() 375 if (gt->uc.guc.fast_response_selftest != 2) { in intel_guc_fast_request() [all …]
|
| A D | intel_guc_slpc.c | 64 return guc->submission_supported && in __detect_slpc_supported() 65 GRAPHICS_VER(guc_to_i915(guc)) >= 12; in __detect_slpc_supported() 70 if (!intel_guc_slpc_is_supported(guc)) in __guc_slpc_selected() 73 return guc->submission_selected; in __guc_slpc_selected() 78 struct intel_guc *guc = slpc_to_guc(slpc); in intel_guc_slpc_init_early() local 81 slpc->selected = __guc_slpc_selected(guc); in intel_guc_slpc_init_early() 147 struct intel_guc *guc = slpc_to_guc(slpc); in slpc_set_param_nb() local 191 struct intel_guc *guc = slpc_to_guc(slpc); in slpc_query_task_state() local 195 ret = guc_action_slpc_query(guc, offset); in slpc_query_task_state() 228 if (!intel_guc_is_ready(guc)) in slpc_force_min_freq() [all …]
|
| A D | intel_guc_hwconfig.c | 35 static int __guc_action_get_hwconfig(struct intel_guc *guc, in __guc_action_get_hwconfig() argument 46 guc_dbg(guc, "Querying HW config table: size = %d, offset = 0x%08X\n", in __guc_action_get_hwconfig() 48 ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); in __guc_action_get_hwconfig() 63 ret = __guc_action_get_hwconfig(guc, 0, 0); in guc_hwconfig_discover_size() 74 static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig *hwconfig) in guc_hwconfig_fill_buffer() argument 83 ret = intel_guc_allocate_and_map_vma(guc, hwconfig->size, &vma, &vaddr); in guc_hwconfig_fill_buffer() 87 ggtt_offset = intel_guc_ggtt_offset(guc, vma); in guc_hwconfig_fill_buffer() 89 ret = __guc_action_get_hwconfig(guc, ggtt_offset, hwconfig->size); in guc_hwconfig_fill_buffer() 117 struct intel_guc *guc = gt_to_guc(gt); in guc_hwconfig_init() local 123 ret = guc_hwconfig_discover_size(guc, hwconfig); in guc_hwconfig_init() [all …]
|
| /drivers/gpu/drm/xe/ |
| A D | xe_guc.c | 94 u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT; in guc_ctl_log_params_flags() 147 u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT; in guc_ctl_ads_flags() 465 if (guc->g2g.bo) in guc_g2g_alloc() 477 guc->g2g.bo = bo; in guc_g2g_alloc() 493 guc->g2g.bo = bo; in guc_g2g_alloc() 501 if (!guc->g2g.bo) in guc_g2g_fini() 508 guc->g2g.bo = NULL; in guc_g2g_fini() 519 if (!guc->g2g.bo) { in guc_g2g_start() 664 guc_g2g_fini(guc); in guc_fini_hw() 812 err = xe_guc_submit_init(guc, xe_gt_sriov_vf_guc_ids(guc_to_gt(guc))); in vf_guc_init_post_hwconfig() [all …]
|
| A D | xe_guc_submit.c | 612 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE) in wq_wait_for_space() 1059 q->guc->id, in __deregister_exec_queue() 1182 q->guc->id, ret, xe_guc_read_stopped(guc)); in guc_exec_queue_timedout_job() 1377 xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending); in suspend_fence_signal() 1692 xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending); in guc_exec_queue_resume() 1886 xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id); in g2h_exec_queue_lookup() 1973 atomic_read(&q->guc->state), q->guc->id, in xe_guc_sched_done_handler() 2013 atomic_read(&q->guc->state), q->guc->id); in xe_guc_deregister_done_handler() 2167 snapshot->guc.wqi_head = q->guc->wqi_head; in guc_exec_queue_wq_snapshot_capture() 2168 snapshot->guc.wqi_tail = q->guc->wqi_tail; in guc_exec_queue_wq_snapshot_capture() [all …]
|
| A D | xe_guc.h | 21 #define GUC_SUBMIT_VER(guc) \ argument 23 #define GUC_FIRMWARE_VER(guc) \ argument 30 int xe_guc_init(struct xe_guc *guc); 33 int xe_guc_reset(struct xe_guc *guc); 34 int xe_guc_upload(struct xe_guc *guc); 38 int xe_guc_suspend(struct xe_guc *guc); 52 void xe_guc_stop(struct xe_guc *guc); 53 int xe_guc_start(struct xe_guc *guc); 79 return container_of(guc, struct xe_gt, uc.guc); in guc_to_gt() 84 return gt_to_xe(guc_to_gt(guc)); in guc_to_xe() [all …]
|
| A D | xe_guc_engine_activity.c | 68 struct xe_device *xe = guc_to_xe(guc); in allocate_engine_activity_group() 93 struct xe_gt *gt = guc_to_gt(guc); in allocate_engine_activity_buffers() 128 struct xe_gt *gt = guc_to_gt(guc); in is_engine_activity_supported() 150 struct xe_guc *guc = &hwe->gt->uc.guc; in hw_engine_to_engine_activity() local 177 struct xe_gt *gt = guc_to_gt(guc); in get_engine_active_ticks() 231 struct xe_device *xe = guc_to_xe(guc); in get_engine_total_ticks() 381 engine_activity_set_cpu_ts(guc, i); in engine_activity_enable_function_stats() 400 if (!is_function_valid(guc, fn_id)) in xe_guc_engine_activity_active_ticks() 420 if (!is_function_valid(guc, fn_id)) in xe_guc_engine_activity_total_ticks() 477 engine_activity_set_cpu_ts(guc, 0); in xe_guc_engine_activity_enable_stats() [all …]
|
| A D | xe_guc_hwconfig.c | 43 int ret = send_get_hwconfig(guc, xe_bo_ggtt_addr(guc->hwconfig.bo), in guc_hwconfig_copy() 44 guc->hwconfig.size); in guc_hwconfig_copy() 55 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_hwconfig_init() 62 if (guc->hwconfig.bo) in xe_guc_hwconfig_init() 75 err = guc_hwconfig_size(guc, &size); in xe_guc_hwconfig_init() 87 guc->hwconfig.bo = bo; in xe_guc_hwconfig_init() 88 guc->hwconfig.size = size; in xe_guc_hwconfig_init() 90 return guc_hwconfig_copy(guc); in xe_guc_hwconfig_init() 95 return !guc->hwconfig.bo ? 0 : guc->hwconfig.size; in xe_guc_hwconfig_size() 102 XE_WARN_ON(!guc->hwconfig.bo); in xe_guc_hwconfig_copy() [all …]
|
| A D | xe_guc_capture.c | 327 struct xe_guc *guc = >->uc.guc; in xe_guc_capture_get_reg_desc_list() local 774 if (!guc->capture) in guc_capture_output_size_est() 1352 struct xe_uc *uc = container_of(guc, typeof(*uc), guc); in __guc_capture_process_output() 1411 xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap, in __guc_capture_process_output() 1421 xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap, in __guc_capture_process_output() 1439 if (guc->capture) in xe_guc_capture_process() 1526 guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc); in guc_capture_create_prealloc_nodes() 1583 struct xe_guc *guc = >->uc.guc; in xe_engine_manual_capture() local 1882 struct xe_guc *guc = &q->gt->uc.guc; in xe_guc_capture_get_matching_and_lock() local 1938 struct xe_guc *guc = &q->gt->uc.guc; in xe_engine_snapshot_capture_for_queue() local [all …]
|
| A D | xe_uc.c | 60 ret = xe_guc_init(&uc->guc); in xe_uc_init() 125 ret = xe_guc_reset(&uc->guc); in uc_reset() 137 xe_guc_sanitize(&uc->guc); in xe_uc_sanitize() 176 xe_guc_sanitize(&uc->guc); in vf_uc_load_hw() 199 ret = xe_guc_upload(&uc->guc); in xe_uc_load_hw() 231 xe_guc_sanitize(&uc->guc); in xe_uc_load_hw() 252 xe_guc_stop_prepare(&uc->guc); in xe_uc_stop_prepare() 261 xe_guc_stop(&uc->guc); in xe_uc_stop() 270 return xe_guc_start(&uc->guc); in xe_uc_start() 278 xe_guc_reset_wait(&uc->guc); in uc_reset_wait() [all …]
|
| A D | xe_guc_submit.h | 15 int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids); 17 int xe_guc_submit_reset_prepare(struct xe_guc *guc); 18 void xe_guc_submit_reset_wait(struct xe_guc *guc); 19 void xe_guc_submit_stop(struct xe_guc *guc); 20 int xe_guc_submit_start(struct xe_guc *guc); 21 void xe_guc_submit_wedge(struct xe_guc *guc); 23 int xe_guc_read_stopped(struct xe_guc *guc); 24 int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len); 25 int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len); 30 int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len); [all …]
|
| A D | xe_gt_sriov_pf_migration.c | 51 struct xe_guc *guc = >->uc.guc; in pf_send_guc_save_vf_state() local 87 struct xe_guc *guc = >->uc.guc; in pf_send_guc_restore_vf_state() local 148 snapshot->guc.buff = NULL; in pf_free_guc_state() 149 snapshot->guc.size = 0; in pf_free_guc_state() 174 snapshot->guc.buff = p; in pf_alloc_guc_state() 175 snapshot->guc.size = size; in pf_alloc_guc_state() 187 snapshot->guc.buff, min(SZ_64, snapshot->guc.size)); in pf_dump_guc_state() 213 snapshot->guc.size = size; in pf_save_vf_guc_state() 256 if (!snapshot->guc.size) in pf_restore_vf_guc_state() 261 ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size); in pf_restore_vf_guc_state() [all …]
|
| A D | xe_guc_debugfs.c | 76 ret = print(>->uc.guc, &p); in guc_debugfs_show() 82 static int guc_log(struct xe_guc *guc, struct drm_printer *p) in guc_log() argument 84 xe_guc_log_print(&guc->log, p); in guc_log() 88 static int guc_log_dmesg(struct xe_guc *guc, struct drm_printer *p) in guc_log_dmesg() argument 90 xe_guc_log_print_dmesg(&guc->log); in guc_log_dmesg() 94 static int guc_ctb(struct xe_guc *guc, struct drm_printer *p) in guc_ctb() argument 96 xe_guc_ct_print(&guc->ct, p, true); in guc_ctb() 100 static int guc_pc(struct xe_guc *guc, struct drm_printer *p) in guc_pc() argument 102 xe_guc_pc_print(&guc->pc, p); in guc_pc() 127 void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent) in xe_guc_debugfs_register() argument [all …]
|