Home
last modified time | relevance | path

Searched refs:gpu (Results 1 – 25 of 175) sorted by relevance

1234567

/drivers/gpu/drm/etnaviv/
A Detnaviv_gpu.c429 gpu->identity.model, gpu->identity.revision); in etnaviv_hw_identify()
511 gpu->base_rate_core >> gpu->freq_scale); in etnaviv_gpu_update_clock()
513 gpu->base_rate_shader >> gpu->freq_scale); in etnaviv_gpu_update_clock()
528 gpu->fe_waitcycles = clamp(gpu->base_rate_core >> (15 - gpu->freq_scale), in etnaviv_gpu_update_clock()
1178 f->gpu = gpu; in etnaviv_gpu_fence_alloc()
1181 gpu->fence_context, ++gpu->next_fence); in etnaviv_gpu_fence_alloc()
1395 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_gpu_submit() local
1468 event_free(gpu, gpu->sync_point_event); in sync_point_worker()
1476 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_gpu_recover_hang() local
1600 queue_work(gpu->wq, &gpu->sync_point_work); in irq_handler()
[all …]
A Detnaviv_sched.c38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local
58 mutex_lock(&gpu->lock); in etnaviv_sched_timedout_job()
63 mutex_unlock(&gpu->lock); in etnaviv_sched_timedout_job()
66 (gpu->completed_fence != gpu->hangcheck_fence || in etnaviv_sched_timedout_job()
71 gpu->hangcheck_primid = primid; in etnaviv_sched_timedout_job()
72 gpu->hangcheck_fence = gpu->completed_fence; in etnaviv_sched_timedout_job()
109 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_push_job() local
117 mutex_lock(&gpu->sched_lock); in etnaviv_sched_push_job()
149 .name = dev_name(gpu->dev), in etnaviv_sched_init()
150 .dev = gpu->dev, in etnaviv_sched_init()
[all …]
A Detnaviv_buffer.c95 lockdep_assert_held(&gpu->lock); in etnaviv_cmd_select_pipe()
103 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_cmd_select_pipe()
167 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_init()
184 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_config_mmuv2()
219 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_config_pta()
241 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_end()
243 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_buffer_end()
307 lockdep_assert_held(&gpu->lock); in etnaviv_sync_point_queue()
355 lockdep_assert_held(&gpu->lock); in etnaviv_buffer_queue()
438 gpu->flush_seq = new_flush_seq; in etnaviv_buffer_queue()
[all …]
A Detnaviv_perfmon.c18 u32 (*sample)(struct etnaviv_gpu *gpu,
65 lockdep_assert_held(&gpu->lock); in pipe_perf_reg_read()
68 pipe_select(gpu, clock, i); in pipe_perf_reg_read()
73 pipe_select(gpu, clock, 0); in pipe_perf_reg_read()
86 lockdep_assert_held(&gpu->lock); in pipe_reg_read()
89 pipe_select(gpu, clock, i); in pipe_reg_read()
90 value += gpu_read(gpu, signal->data); in pipe_reg_read()
94 pipe_select(gpu, clock, 0); in pipe_reg_read()
110 return gpu_read(gpu, reg); in hi_total_cycle_read()
124 return gpu_read(gpu, reg); in hi_total_idle_cycle_read()
[all …]
A Detnaviv_gpu.h172 writel(data, gpu->mmio + reg); in gpu_write()
182 readl(gpu->mmio + reg); in gpu_read()
184 return readl(gpu->mmio + reg); in gpu_read()
190 if (gpu->identity.model == chipModel_GC300 && in gpu_fix_power_address()
191 gpu->identity.revision < 0x2000) in gpu_fix_power_address()
199 writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg)); in gpu_write_power()
204 return readl(gpu->mmio + gpu_fix_power_address(gpu, reg)); in gpu_read_power()
209 int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
217 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
224 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
[all …]
A Detnaviv_drv.c85 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_open() local
88 if (gpu) { in etnaviv_open()
112 struct etnaviv_gpu *gpu = priv->gpu[i]; in etnaviv_postclose() local
114 if (gpu) in etnaviv_postclose()
234 gpu = priv->gpu[i]; in show_each_gpu()
276 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_get_param()
277 if (!gpu) in etnaviv_ioctl_get_param()
372 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_wait_fence()
423 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_gem_wait()
451 gpu = priv->gpu[args->pipe]; in etnaviv_ioctl_pm_query_dom()
[all …]
/drivers/gpu/drm/msm/
A Dmsm_gpu.c60 if (gpu->core_clk && gpu->fast_rate) in enable_clk()
169 ret = gpu->funcs->hw_init(gpu); in msm_gpu_hw_init()
207 gpu->funcs->show(gpu, state, &p); in msm_gpu_devcoredump_read()
545 gpu->funcs->recover(gpu); in recover_worker()
557 gpu->funcs->submit(gpu, submit); in recover_worker()
619 if (!gpu->funcs->progress(gpu, ring)) in made_progress()
719 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_start()
898 gpu->funcs->submit(gpu, submit); in msm_gpu_submit()
912 return gpu->funcs->irq(gpu); in irq_handler()
1052 gpu->vm = gpu->funcs->create_vm(gpu, pdev); in msm_gpu_init()
[all …]
A Dmsm_gpu_devfreq.c48 gpu->funcs->gpu_set_freq(gpu, opp, df->suspended); in msm_devfreq_target()
72 return gpu->funcs->gpu_get_freq(gpu); in get_freq()
99 busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate); in msm_devfreq_get_dev_status()
196 gpu->cooling = NULL; in msm_devfreq_init()
221 if (!has_devfreq(gpu)) in msm_devfreq_cleanup()
233 if (!has_devfreq(gpu)) in msm_devfreq_resume()
237 df->busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate); in msm_devfreq_resume()
249 if (!has_devfreq(gpu)) in msm_devfreq_suspend()
275 if (!has_devfreq(gpu)) in msm_devfreq_boost()
278 freq = get_freq(gpu); in msm_devfreq_boost()
[all …]
A Dmsm_gpu.h52 int (*hw_init)(struct msm_gpu *gpu);
521 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority()
737 mutex_lock(&gpu->lock); in msm_gpu_crashstate_get()
739 if (gpu->crashstate) { in msm_gpu_crashstate_get()
741 state = gpu->crashstate; in msm_gpu_crashstate_get()
744 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_get()
751 mutex_lock(&gpu->lock); in msm_gpu_crashstate_put()
753 if (gpu->crashstate) { in msm_gpu_crashstate_put()
754 if (gpu->funcs->gpu_state_put(gpu->crashstate)) in msm_gpu_crashstate_put()
755 gpu->crashstate = NULL; in msm_gpu_crashstate_put()
[all …]
/drivers/gpu/drm/msm/adreno/
A Da3xx_gpu.c109 return a3xx_idle(gpu); in a3xx_me_init()
119 DBG("%s", gpu->name); in a3xx_hw_init()
289 gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a3xx_hw_init()
379 a3xx_dump(gpu); in a3xx_recover()
384 adreno_recover(gpu); in a3xx_recover()
392 DBG("%s", gpu->name); in a3xx_destroy()
404 if (!adreno_idle(gpu, gpu->rb[0])) in a3xx_idle()
430 msm_gpu_retire(gpu); in a3xx_irq()
478 adreno_dump(gpu); in a3xx_dump()
545 struct msm_gpu *gpu; in a3xx_gpu_init() local
[all …]
A Da4xx_gpu.c180 return a4xx_idle(gpu); in a4xx_me_init()
269 a4xx_enable_hwcg(gpu); in a4xx_hw_init()
325 gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a4xx_hw_init()
363 a4xx_dump(gpu); in a4xx_recover()
368 adreno_recover(gpu); in a4xx_recover()
376 DBG("%s", gpu->name); in a4xx_destroy()
388 if (!adreno_idle(gpu, gpu->rb[0])) in a4xx_idle()
418 msm_gpu_retire(gpu); in a4xx_irq()
569 adreno_dump(gpu); in a4xx_dump()
658 struct msm_gpu *gpu; in a4xx_gpu_init() local
[all …]
A Da5xx_gpu.c934 gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova); in a5xx_hw_init()
974 a5xx_flush(gpu, gpu->rb[0], true); in a5xx_hw_init()
975 if (!a5xx_idle(gpu, gpu->rb[0])) in a5xx_hw_init()
992 a5xx_flush(gpu, gpu->rb[0], true); in a5xx_hw_init()
993 if (!a5xx_idle(gpu, gpu->rb[0])) in a5xx_hw_init()
1236 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a5xx_fault_detect_irq()
1260 kthread_queue_work(gpu->worker, &gpu->recover_work); in a5xx_fault_detect_irq()
1383 gpu->name, in a5xx_pm_resume()
1394 gpu->name); in a5xx_pm_resume()
1790 msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, in a5xx_gpu_init()
[all …]
A Dadreno_gpu.h263 return gpu->chip_id & 0xff; in adreno_patchid()
268 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_revn()
275 return gpu->gmu_is_wrapper; in adreno_has_gmu_wrapper()
280 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_a2xx()
287 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_a20x()
331 return adreno_is_a330(gpu) && (adreno_patchid(gpu) > 0); in adreno_is_a330v2()
406 return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu); in adreno_is_a619_holi()
466 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_a610_family()
470 return adreno_is_a610(gpu) || adreno_is_a702(gpu); in adreno_is_a610_family()
476 return adreno_is_a618(gpu) || in adreno_is_a615_family()
[all …]
A Da5xx_power.c164 gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); in a530_lm_setup()
165 gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); in a530_lm_setup()
199 gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); in a540_lm_setup()
200 gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); in a540_lm_setup()
247 gpu->name); in a5xx_gpmu_init()
264 gpu->name); in a5xx_gpmu_init()
271 gpu->name, val); in a5xx_gpmu_init()
306 a530_lm_setup(gpu); in a5xx_power_init()
308 a540_lm_setup(gpu); in a5xx_power_init()
311 a5xx_pc_init(gpu); in a5xx_power_init()
[all …]
A Da6xx_gpu.c672 gpu->ubwc_config = &gpu->_ubwc_config; in a6xx_calc_ubwc_config()
1325 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); in hw_init()
1383 a6xx_flush(gpu, gpu->rb[0]); in hw_init()
1384 if (!a6xx_idle(gpu, gpu->rb[0])) in hw_init()
1698 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a6xx_fault_detect_irq()
1730 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_fault_detect_irq()
1750 kthread_queue_work(gpu->worker, &gpu->recover_work); in a7xx_sw_fuse_violation_irq()
2091 ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_resume()
2160 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_suspend()
2564 msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, in a6xx_gpu_init()
[all …]
A Da2xx_gpu.c105 return a2xx_idle(gpu); in a2xx_me_init()
118 DBG("%s", gpu->name); in a2xx_hw_init()
219 gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); in a2xx_hw_init()
279 a2xx_dump(gpu); in a2xx_recover()
284 adreno_recover(gpu); in a2xx_recover()
292 DBG("%s", gpu->name); in a2xx_destroy()
302 if (!adreno_idle(gpu, gpu->rb[0])) in a2xx_idle()
351 msm_gpu_retire(gpu); in a2xx_irq()
452 adreno_dump(gpu); in a2xx_dump()
519 struct msm_gpu *gpu; in a2xx_gpu_init() local
[all …]
A Da5xx_preempt.c68 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring()
91 kthread_queue_work(gpu->worker, &gpu->recover_work); in a5xx_preempt_timer()
102 if (gpu->nr_rings == 1) in a5xx_preempt_trigger()
120 ring = get_next_ring(gpu); in a5xx_preempt_trigger()
198 gpu->name); in a5xx_preempt_irq()
199 kthread_queue_work(gpu->worker, &gpu->recover_work); in a5xx_preempt_irq()
214 a5xx_preempt_trigger(gpu); in a5xx_preempt_irq()
227 if (gpu->nr_rings == 1) in a5xx_preempt_hw_init()
311 if (gpu->nr_rings <= 1) in a5xx_preempt_init()
320 a5xx_preempt_fini(gpu); in a5xx_preempt_init()
[all …]
A Da6xx_preempt.c76 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring()
99 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_preempt_timer()
165 gpu->name); in a6xx_preempt_irq()
166 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_preempt_irq()
195 if (gpu->nr_rings == 1) in a6xx_preempt_hw_init()
233 if (gpu->nr_rings == 1) in a6xx_preempt_trigger()
305 gpu_write64(gpu, in a6xx_preempt_trigger()
309 gpu_write64(gpu, in a6xx_preempt_trigger()
365 gpu->vm, &bo, &iova); in preempt_init_ring()
418 if (gpu->nr_rings <= 1) in a6xx_preempt_init()
[all …]
A Dadreno_device.c73 if (!gpu) { in adreno_load_gpu()
91 ret = gpu->funcs->ucode_load(gpu); in adreno_load_gpu()
121 gpu->funcs->debugfs_init(gpu, dev->primary); in adreno_load_gpu()
122 gpu->funcs->debugfs_init(gpu, dev->render); in adreno_load_gpu()
126 return gpu; in adreno_load_gpu()
232 if (IS_ERR(gpu)) { in adreno_bind()
253 gpu->funcs->destroy(gpu); in adreno_unbind()
301 return gpu->funcs->pm_resume(gpu); in adreno_runtime_resume()
315 return gpu->funcs->pm_suspend(gpu); in adreno_runtime_suspend()
356 if (!gpu) in adreno_system_suspend()
[all …]
A Dadreno_gpu.c428 if (vm == gpu->vm) in adreno_get_param()
433 if (vm == gpu->vm) in adreno_get_param()
660 VERB("%s", gpu->name); in adreno_hw_init()
699 return gpu->funcs->get_rptr(gpu, ring); in get_rptr()
704 return gpu->rb[0]; in adreno_active_ring()
715 gpu->funcs->pm_suspend(gpu); in adreno_recover()
716 gpu->funcs->pm_resume(gpu); in adreno_recover()
1100 struct msm_gpu *gpu) in adreno_get_pwrlevels() argument
1107 gpu->fast_rate = 0; in adreno_get_pwrlevels()
1204 gpu->pdev = pdev; in adreno_gpu_init()
[all …]
A Da6xx_gpu_state.c135 SZ_1M, MSM_BO_WC, gpu->vm, in a6xx_crashdumper_init()
227 gpu_write(gpu, ctrl0, reg); in vbif_debugbus_read()
230 gpu_write(gpu, ctrl1, i); in vbif_debugbus_read()
276 ptr += vbif_debugbus_read(gpu, in a6xx_get_vbif_debugbus_block()
282 ptr += vbif_debugbus_read(gpu, in a6xx_get_vbif_debugbus_block()
291 ptr += vbif_debugbus_read(gpu, in a6xx_get_vbif_debugbus_block()
352 a6xx_get_debugbus_block(gpu, in a6xx_get_debugbus_blocks()
375 a6xx_get_debugbus_block(gpu, in a6xx_get_debugbus_blocks()
416 a6xx_get_debugbus_block(gpu, in a7xx_get_debugbus_blocks()
422 a6xx_get_debugbus_block(gpu, in a7xx_get_debugbus_blocks()
[all …]
A Da5xx_debugfs.c23 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); in pfp_print()
36 gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); in me_print()
49 gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); in meq_print()
79 show(priv->gpu, &p); in show()
97 struct msm_gpu *gpu = priv->gpu; in reset_set() local
110 mutex_lock(&gpu->lock); in reset_set()
130 gpu->needs_hw_init = true; in reset_set()
132 pm_runtime_get_sync(&gpu->pdev->dev); in reset_set()
133 gpu->funcs->recover(gpu); in reset_set()
135 pm_runtime_put_sync(&gpu->pdev->dev); in reset_set()
[all …]
/drivers/gpu/drm/panthor/
A Dpanthor_gpu.c161 spin_lock(&ptdev->gpu->reqs_lock); in panthor_gpu_irq_handler()
163 ptdev->gpu->pending_reqs &= ~status; in panthor_gpu_irq_handler()
164 wake_up_all(&ptdev->gpu->reqs_acked); in panthor_gpu_irq_handler()
166 spin_unlock(&ptdev->gpu->reqs_lock); in panthor_gpu_irq_handler()
184 ptdev->gpu->pending_reqs = 0; in panthor_gpu_unplug()
185 wake_up_all(&ptdev->gpu->reqs_acked); in panthor_gpu_unplug()
197 struct panthor_gpu *gpu; in panthor_gpu_init() local
201 gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL); in panthor_gpu_init()
202 if (!gpu) in panthor_gpu_init()
205 spin_lock_init(&gpu->reqs_lock); in panthor_gpu_init()
[all …]
/drivers/gpu/drm/ci/xfails/
A Dmsm-sm8350-hdk-skips.txt24 # [ 200.895243] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=PERMISS…
25 # [ 200.906885] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN…
26 # [ 200.917625] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN…
27 # [ 200.928353] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN…
28 # [ 200.939084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN…
29 # [ 200.949815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN…
31 # [ 200.960467] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN…
32 # [ 200.960500] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN…
33 # [ 200.995966] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN…
95 # [ 228.161164] watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [gpu-worker:150]
[all …]
/drivers/gpu/drm/amd/amdkfd/
A Dkfd_topology.c303 if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) in mem_show()
1091 if (!gpu) in kfd_generate_gpu_id()
1158 dev->gpu = gpu; in kfd_assign_gpu()
1162 mem->gpu = dev->gpu; in kfd_assign_gpu()
1164 cache->gpu = dev->gpu; in kfd_assign_gpu()
1166 iolink->gpu = dev->gpu; in kfd_assign_gpu()
1168 p2plink->gpu = dev->gpu; in kfd_assign_gpu()
1272 struct kfd_node *gpu = outbound_link->gpu; in kfd_set_recommended_sdma_engines() local
1588 if (!dev->gpu || !dev->gpu->adev || in kfd_dev_create_p2p_links()
2037 if (gpu->xcp && !gpu->xcp->ddev) { in kfd_topology_add_device()
[all …]

Completed in 71 milliseconds

1234567