Lines Matching refs:gpu
25 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument
27 struct drm_device *dev = gpu->dev; in enable_pwrrail()
30 if (gpu->gpu_reg) { in enable_pwrrail()
31 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail()
38 if (gpu->gpu_cx) { in enable_pwrrail()
39 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail()
49 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument
51 if (gpu->gpu_cx) in disable_pwrrail()
52 regulator_disable(gpu->gpu_cx); in disable_pwrrail()
53 if (gpu->gpu_reg) in disable_pwrrail()
54 regulator_disable(gpu->gpu_reg); in disable_pwrrail()
58 static int enable_clk(struct msm_gpu *gpu) in enable_clk() argument
60 if (gpu->core_clk && gpu->fast_rate) in enable_clk()
61 dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate); in enable_clk()
64 if (gpu->rbbmtimer_clk) in enable_clk()
65 clk_set_rate(gpu->rbbmtimer_clk, 19200000); in enable_clk()
67 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in enable_clk()
70 static int disable_clk(struct msm_gpu *gpu) in disable_clk() argument
72 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in disable_clk()
79 if (gpu->core_clk) in disable_clk()
80 dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000); in disable_clk()
82 if (gpu->rbbmtimer_clk) in disable_clk()
83 clk_set_rate(gpu->rbbmtimer_clk, 0); in disable_clk()
88 static int enable_axi(struct msm_gpu *gpu) in enable_axi() argument
90 return clk_prepare_enable(gpu->ebi1_clk); in enable_axi()
93 static int disable_axi(struct msm_gpu *gpu) in disable_axi() argument
95 clk_disable_unprepare(gpu->ebi1_clk); in disable_axi()
99 int msm_gpu_pm_resume(struct msm_gpu *gpu) in msm_gpu_pm_resume() argument
103 DBG("%s", gpu->name); in msm_gpu_pm_resume()
106 ret = enable_pwrrail(gpu); in msm_gpu_pm_resume()
110 ret = enable_clk(gpu); in msm_gpu_pm_resume()
114 ret = enable_axi(gpu); in msm_gpu_pm_resume()
118 msm_devfreq_resume(gpu); in msm_gpu_pm_resume()
120 gpu->needs_hw_init = true; in msm_gpu_pm_resume()
125 int msm_gpu_pm_suspend(struct msm_gpu *gpu) in msm_gpu_pm_suspend() argument
129 DBG("%s", gpu->name); in msm_gpu_pm_suspend()
132 msm_devfreq_suspend(gpu); in msm_gpu_pm_suspend()
134 ret = disable_axi(gpu); in msm_gpu_pm_suspend()
138 ret = disable_clk(gpu); in msm_gpu_pm_suspend()
142 ret = disable_pwrrail(gpu); in msm_gpu_pm_suspend()
146 gpu->suspend_count++; in msm_gpu_pm_suspend()
151 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx, in msm_gpu_show_fdinfo() argument
156 drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate); in msm_gpu_show_fdinfo()
159 int msm_gpu_hw_init(struct msm_gpu *gpu) in msm_gpu_hw_init() argument
163 WARN_ON(!mutex_is_locked(&gpu->lock)); in msm_gpu_hw_init()
165 if (!gpu->needs_hw_init) in msm_gpu_hw_init()
168 disable_irq(gpu->irq); in msm_gpu_hw_init()
169 ret = gpu->funcs->hw_init(gpu); in msm_gpu_hw_init()
171 gpu->needs_hw_init = false; in msm_gpu_hw_init()
172 enable_irq(gpu->irq); in msm_gpu_hw_init()
181 struct msm_gpu *gpu = data; in msm_gpu_devcoredump_read() local
186 state = msm_gpu_crashstate_get(gpu); in msm_gpu_devcoredump_read()
207 gpu->funcs->show(gpu, state, &p); in msm_gpu_devcoredump_read()
209 msm_gpu_crashstate_put(gpu); in msm_gpu_devcoredump_read()
216 struct msm_gpu *gpu = data; in msm_gpu_devcoredump_free() local
218 msm_gpu_crashstate_put(gpu); in msm_gpu_devcoredump_free()
360 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, in msm_gpu_crashstate_capture() argument
367 if (!gpu->funcs->gpu_state_get) in msm_gpu_crashstate_capture()
371 if (gpu->crashstate) in msm_gpu_crashstate_capture()
374 state = gpu->funcs->gpu_state_get(gpu); in msm_gpu_crashstate_capture()
399 gpu->crashstate = state; in msm_gpu_crashstate_capture()
401 dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL, in msm_gpu_crashstate_capture()
405 static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, in msm_gpu_crashstate_capture() argument
434 static void retire_submits(struct msm_gpu *gpu);
441 WARN_ON(!mutex_is_locked(&submit->gpu->lock)); in get_comm_cmdline()
462 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); in recover_worker() local
463 struct drm_device *dev = gpu->dev; in recover_worker()
466 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); in recover_worker()
470 mutex_lock(&gpu->lock); in recover_worker()
472 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name); in recover_worker()
504 gpu->name, comm, cmd); in recover_worker()
509 DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name); in recover_worker()
515 pm_runtime_get_sync(&gpu->pdev->dev); in recover_worker()
516 msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd); in recover_worker()
526 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker()
527 struct msm_ringbuffer *ring = gpu->rb[i]; in recover_worker()
541 if (msm_gpu_active(gpu)) { in recover_worker()
543 retire_submits(gpu); in recover_worker()
545 gpu->funcs->recover(gpu); in recover_worker()
551 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker()
552 struct msm_ringbuffer *ring = gpu->rb[i]; in recover_worker()
557 gpu->funcs->submit(gpu, submit); in recover_worker()
562 pm_runtime_put(&gpu->pdev->dev); in recover_worker()
565 mutex_unlock(&gpu->lock); in recover_worker()
567 msm_gpu_retire(gpu); in recover_worker()
570 void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info) in msm_gpu_fault_crashstate_capture() argument
573 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); in msm_gpu_fault_crashstate_capture()
576 mutex_lock(&gpu->lock); in msm_gpu_fault_crashstate_capture()
593 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_fault_crashstate_capture()
594 msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd); in msm_gpu_fault_crashstate_capture()
595 pm_runtime_put_sync(&gpu->pdev->dev); in msm_gpu_fault_crashstate_capture()
601 mutex_unlock(&gpu->lock); in msm_gpu_fault_crashstate_capture()
604 static void hangcheck_timer_reset(struct msm_gpu *gpu) in hangcheck_timer_reset() argument
606 struct msm_drm_private *priv = gpu->dev->dev_private; in hangcheck_timer_reset()
607 mod_timer(&gpu->hangcheck_timer, in hangcheck_timer_reset()
611 static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in made_progress() argument
616 if (!gpu->funcs->progress) in made_progress()
619 if (!gpu->funcs->progress(gpu, ring)) in made_progress()
628 struct msm_gpu *gpu = timer_container_of(gpu, t, hangcheck_timer); in hangcheck_handler() local
629 struct drm_device *dev = gpu->dev; in hangcheck_handler()
630 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in hangcheck_handler()
638 !made_progress(gpu, ring)) { in hangcheck_handler()
643 gpu->name, ring->id); in hangcheck_handler()
645 gpu->name, fence); in hangcheck_handler()
647 gpu->name, ring->fctx->last_fence); in hangcheck_handler()
649 kthread_queue_work(gpu->worker, &gpu->recover_work); in hangcheck_handler()
654 hangcheck_timer_reset(gpu); in hangcheck_handler()
657 msm_gpu_retire(gpu); in hangcheck_handler()
665 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs) in update_hw_cntrs() argument
667 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; in update_hw_cntrs()
668 int i, n = min(ncntrs, gpu->num_perfcntrs); in update_hw_cntrs()
671 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
672 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); in update_hw_cntrs()
676 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; in update_hw_cntrs()
679 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
680 gpu->last_cntrs[i] = current_cntrs[i]; in update_hw_cntrs()
685 static void update_sw_cntrs(struct msm_gpu *gpu) in update_sw_cntrs() argument
691 spin_lock_irqsave(&gpu->perf_lock, flags); in update_sw_cntrs()
692 if (!gpu->perfcntr_active) in update_sw_cntrs()
696 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); in update_sw_cntrs()
698 gpu->totaltime += elapsed; in update_sw_cntrs()
699 if (gpu->last_sample.active) in update_sw_cntrs()
700 gpu->activetime += elapsed; in update_sw_cntrs()
702 gpu->last_sample.active = msm_gpu_active(gpu); in update_sw_cntrs()
703 gpu->last_sample.time = time; in update_sw_cntrs()
706 spin_unlock_irqrestore(&gpu->perf_lock, flags); in update_sw_cntrs()
709 void msm_gpu_perfcntr_start(struct msm_gpu *gpu) in msm_gpu_perfcntr_start() argument
713 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_perfcntr_start()
715 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
717 gpu->last_sample.active = msm_gpu_active(gpu); in msm_gpu_perfcntr_start()
718 gpu->last_sample.time = ktime_get(); in msm_gpu_perfcntr_start()
719 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_start()
720 gpu->perfcntr_active = true; in msm_gpu_perfcntr_start()
721 update_hw_cntrs(gpu, 0, NULL); in msm_gpu_perfcntr_start()
722 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
725 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) in msm_gpu_perfcntr_stop() argument
727 gpu->perfcntr_active = false; in msm_gpu_perfcntr_stop()
728 pm_runtime_put_sync(&gpu->pdev->dev); in msm_gpu_perfcntr_stop()
732 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, in msm_gpu_perfcntr_sample() argument
738 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
740 if (!gpu->perfcntr_active) { in msm_gpu_perfcntr_sample()
745 *activetime = gpu->activetime; in msm_gpu_perfcntr_sample()
746 *totaltime = gpu->totaltime; in msm_gpu_perfcntr_sample()
748 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_sample()
750 ret = update_hw_cntrs(gpu, ncntrs, cntrs); in msm_gpu_perfcntr_sample()
753 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
762 static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in retire_submit() argument
791 pm_runtime_mark_last_busy(&gpu->pdev->dev); in retire_submit()
798 mutex_lock(&gpu->active_lock); in retire_submit()
799 gpu->active_submits--; in retire_submit()
800 WARN_ON(gpu->active_submits < 0); in retire_submit()
801 if (!gpu->active_submits) { in retire_submit()
802 msm_devfreq_idle(gpu); in retire_submit()
803 pm_runtime_put_autosuspend(&gpu->pdev->dev); in retire_submit()
806 mutex_unlock(&gpu->active_lock); in retire_submit()
811 static void retire_submits(struct msm_gpu *gpu) in retire_submits() argument
816 for (i = 0; i < gpu->nr_rings; i++) { in retire_submits()
817 struct msm_ringbuffer *ring = gpu->rb[i]; in retire_submits()
834 retire_submit(gpu, ring, submit); in retire_submits()
841 wake_up_all(&gpu->retire_event); in retire_submits()
846 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); in retire_worker() local
848 retire_submits(gpu); in retire_worker()
852 void msm_gpu_retire(struct msm_gpu *gpu) in msm_gpu_retire() argument
856 for (i = 0; i < gpu->nr_rings; i++) in msm_gpu_retire()
857 msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence); in msm_gpu_retire()
859 kthread_queue_work(gpu->worker, &gpu->retire_work); in msm_gpu_retire()
860 update_sw_cntrs(gpu); in msm_gpu_retire()
864 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in msm_gpu_submit() argument
869 WARN_ON(!mutex_is_locked(&gpu->lock)); in msm_gpu_submit()
871 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_submit()
873 msm_gpu_hw_init(gpu); in msm_gpu_submit()
877 update_sw_cntrs(gpu); in msm_gpu_submit()
890 mutex_lock(&gpu->active_lock); in msm_gpu_submit()
891 if (!gpu->active_submits) { in msm_gpu_submit()
892 pm_runtime_get(&gpu->pdev->dev); in msm_gpu_submit()
893 msm_devfreq_active(gpu); in msm_gpu_submit()
895 gpu->active_submits++; in msm_gpu_submit()
896 mutex_unlock(&gpu->active_lock); in msm_gpu_submit()
898 gpu->funcs->submit(gpu, submit); in msm_gpu_submit()
901 pm_runtime_put(&gpu->pdev->dev); in msm_gpu_submit()
902 hangcheck_timer_reset(gpu); in msm_gpu_submit()
911 struct msm_gpu *gpu = data; in irq_handler() local
912 return gpu->funcs->irq(gpu); in irq_handler()
915 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) in get_clocks() argument
917 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks); in get_clocks()
920 gpu->nr_clocks = 0; in get_clocks()
924 gpu->nr_clocks = ret; in get_clocks()
926 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks, in get_clocks()
927 gpu->nr_clocks, "core"); in get_clocks()
929 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks, in get_clocks()
930 gpu->nr_clocks, "rbbmtimer"); in get_clocks()
937 msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task, in msm_gpu_create_private_vm() argument
942 if (!gpu) in msm_gpu_create_private_vm()
949 if (gpu->funcs->create_private_vm) { in msm_gpu_create_private_vm()
950 vm = gpu->funcs->create_private_vm(gpu, kernel_managed); in msm_gpu_create_private_vm()
956 vm = drm_gpuvm_get(gpu->vm); in msm_gpu_create_private_vm()
962 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, in msm_gpu_init() argument
970 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) in msm_gpu_init()
971 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); in msm_gpu_init()
973 gpu->dev = drm; in msm_gpu_init()
974 gpu->funcs = funcs; in msm_gpu_init()
975 gpu->name = name; in msm_gpu_init()
977 gpu->worker = kthread_run_worker(0, "gpu-worker"); in msm_gpu_init()
978 if (IS_ERR(gpu->worker)) { in msm_gpu_init()
979 ret = PTR_ERR(gpu->worker); in msm_gpu_init()
980 gpu->worker = NULL; in msm_gpu_init()
984 sched_set_fifo_low(gpu->worker->task); in msm_gpu_init()
986 mutex_init(&gpu->active_lock); in msm_gpu_init()
987 mutex_init(&gpu->lock); in msm_gpu_init()
988 init_waitqueue_head(&gpu->retire_event); in msm_gpu_init()
989 kthread_init_work(&gpu->retire_work, retire_worker); in msm_gpu_init()
990 kthread_init_work(&gpu->recover_work, recover_worker); in msm_gpu_init()
1002 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0); in msm_gpu_init()
1004 spin_lock_init(&gpu->perf_lock); in msm_gpu_init()
1008 gpu->mmio = msm_ioremap(pdev, config->ioname); in msm_gpu_init()
1009 if (IS_ERR(gpu->mmio)) { in msm_gpu_init()
1010 ret = PTR_ERR(gpu->mmio); in msm_gpu_init()
1015 gpu->irq = platform_get_irq(pdev, 0); in msm_gpu_init()
1016 if (gpu->irq < 0) { in msm_gpu_init()
1017 ret = gpu->irq; in msm_gpu_init()
1021 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, in msm_gpu_init()
1022 IRQF_TRIGGER_HIGH, "gpu-irq", gpu); in msm_gpu_init()
1024 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); in msm_gpu_init()
1028 ret = get_clocks(pdev, gpu); in msm_gpu_init()
1032 gpu->ebi1_clk = msm_clk_get(pdev, "bus"); in msm_gpu_init()
1033 DBG("ebi1_clk: %p", gpu->ebi1_clk); in msm_gpu_init()
1034 if (IS_ERR(gpu->ebi1_clk)) in msm_gpu_init()
1035 gpu->ebi1_clk = NULL; in msm_gpu_init()
1038 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); in msm_gpu_init()
1039 DBG("gpu_reg: %p", gpu->gpu_reg); in msm_gpu_init()
1040 if (IS_ERR(gpu->gpu_reg)) in msm_gpu_init()
1041 gpu->gpu_reg = NULL; in msm_gpu_init()
1043 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); in msm_gpu_init()
1044 DBG("gpu_cx: %p", gpu->gpu_cx); in msm_gpu_init()
1045 if (IS_ERR(gpu->gpu_cx)) in msm_gpu_init()
1046 gpu->gpu_cx = NULL; in msm_gpu_init()
1048 platform_set_drvdata(pdev, &gpu->adreno_smmu); in msm_gpu_init()
1050 msm_devfreq_init(gpu); in msm_gpu_init()
1052 gpu->vm = gpu->funcs->create_vm(gpu, pdev); in msm_gpu_init()
1053 if (IS_ERR(gpu->vm)) { in msm_gpu_init()
1054 ret = PTR_ERR(gpu->vm); in msm_gpu_init()
1060 check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo, in msm_gpu_init()
1069 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs"); in msm_gpu_init()
1071 if (nr_rings > ARRAY_SIZE(gpu->rb)) { in msm_gpu_init()
1073 ARRAY_SIZE(gpu->rb)); in msm_gpu_init()
1074 nr_rings = ARRAY_SIZE(gpu->rb); in msm_gpu_init()
1079 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova); in msm_gpu_init()
1081 if (IS_ERR(gpu->rb[i])) { in msm_gpu_init()
1082 ret = PTR_ERR(gpu->rb[i]); in msm_gpu_init()
1092 gpu->nr_rings = nr_rings; in msm_gpu_init()
1094 refcount_set(&gpu->sysprof_active, 1); in msm_gpu_init()
1099 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { in msm_gpu_init()
1100 msm_ringbuffer_destroy(gpu->rb[i]); in msm_gpu_init()
1101 gpu->rb[i] = NULL; in msm_gpu_init()
1104 msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm); in msm_gpu_init()
1110 void msm_gpu_cleanup(struct msm_gpu *gpu) in msm_gpu_cleanup() argument
1114 DBG("%s", gpu->name); in msm_gpu_cleanup()
1116 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { in msm_gpu_cleanup()
1117 msm_ringbuffer_destroy(gpu->rb[i]); in msm_gpu_cleanup()
1118 gpu->rb[i] = NULL; in msm_gpu_cleanup()
1121 msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm); in msm_gpu_cleanup()
1123 if (!IS_ERR_OR_NULL(gpu->vm)) { in msm_gpu_cleanup()
1124 struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; in msm_gpu_cleanup()
1126 drm_gpuvm_put(gpu->vm); in msm_gpu_cleanup()
1129 if (gpu->worker) { in msm_gpu_cleanup()
1130 kthread_destroy_worker(gpu->worker); in msm_gpu_cleanup()
1133 msm_devfreq_cleanup(gpu); in msm_gpu_cleanup()
1135 platform_set_drvdata(gpu->pdev, NULL); in msm_gpu_cleanup()