1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
5
6 #include <linux/bitfield.h>
7 #include <linux/bitmap.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15
16 #include <drm/drm_drv.h>
17 #include <drm/drm_managed.h>
18
19 #include "panthor_device.h"
20 #include "panthor_gpu.h"
21 #include "panthor_regs.h"
22
23 /**
24 * struct panthor_gpu - GPU block management data.
25 */
26 struct panthor_gpu {
27 /** @irq: GPU irq. */
28 struct panthor_irq irq;
29
30 /** @reqs_lock: Lock protecting access to pending_reqs. */
31 spinlock_t reqs_lock;
32
33 /** @pending_reqs: Pending GPU requests. */
34 u32 pending_reqs;
35
36 /** @reqs_acked: GPU request wait queue. */
37 wait_queue_head_t reqs_acked;
38 };
39
40 /**
41 * struct panthor_model - GPU model description
42 */
43 struct panthor_model {
44 /** @name: Model name. */
45 const char *name;
46
47 /** @arch_major: Major version number of architecture. */
48 u8 arch_major;
49
50 /** @product_major: Major version number of product. */
51 u8 product_major;
52 };
53
54 /**
55 * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified
56 * by a combination of the major architecture version and the major product
57 * version.
58 * @_name: Name for the GPU model.
59 * @_arch_major: Architecture major.
60 * @_product_major: Product major.
61 */
62 #define GPU_MODEL(_name, _arch_major, _product_major) \
63 {\
64 .name = __stringify(_name), \
65 .arch_major = _arch_major, \
66 .product_major = _product_major, \
67 }
68
69 static const struct panthor_model gpu_models[] = {
70 GPU_MODEL(g610, 10, 7),
71 {},
72 };
73
74 #define GPU_INTERRUPTS_MASK \
75 (GPU_IRQ_FAULT | \
76 GPU_IRQ_PROTM_FAULT | \
77 GPU_IRQ_RESET_COMPLETED | \
78 GPU_IRQ_CLEAN_CACHES_COMPLETED)
79
panthor_gpu_coherency_set(struct panthor_device * ptdev)80 static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
81 {
82 gpu_write(ptdev, GPU_COHERENCY_PROTOCOL,
83 ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
84 }
85
panthor_gpu_init_info(struct panthor_device * ptdev)86 static void panthor_gpu_init_info(struct panthor_device *ptdev)
87 {
88 const struct panthor_model *model;
89 u32 arch_major, product_major;
90 u32 major, minor, status;
91 unsigned int i;
92
93 ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
94 ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
95 ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
96 ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
97 ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
98 ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
99 ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
100 ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
101 ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
102 ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
103 ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
104 ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
105 ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
106 for (i = 0; i < 4; i++)
107 ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
108
109 ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
110
111 ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
112 ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
113 ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
114
115 arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
116 product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
117 major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
118 minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
119 status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
120
121 for (model = gpu_models; model->name; model++) {
122 if (model->arch_major == arch_major &&
123 model->product_major == product_major)
124 break;
125 }
126
127 drm_info(&ptdev->base,
128 "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
129 model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16,
130 major, minor, status);
131
132 drm_info(&ptdev->base,
133 "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
134 ptdev->gpu_info.l2_features,
135 ptdev->gpu_info.tiler_features,
136 ptdev->gpu_info.mem_features,
137 ptdev->gpu_info.mmu_features,
138 ptdev->gpu_info.as_present);
139
140 drm_info(&ptdev->base,
141 "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
142 ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
143 ptdev->gpu_info.tiler_present);
144 }
145
panthor_gpu_irq_handler(struct panthor_device * ptdev,u32 status)146 static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
147 {
148 gpu_write(ptdev, GPU_INT_CLEAR, status);
149
150 if (status & GPU_IRQ_FAULT) {
151 u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
152 u64 address = gpu_read64(ptdev, GPU_FAULT_ADDR);
153
154 drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
155 fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
156 address);
157 }
158 if (status & GPU_IRQ_PROTM_FAULT)
159 drm_warn(&ptdev->base, "GPU Fault in protected mode\n");
160
161 spin_lock(&ptdev->gpu->reqs_lock);
162 if (status & ptdev->gpu->pending_reqs) {
163 ptdev->gpu->pending_reqs &= ~status;
164 wake_up_all(&ptdev->gpu->reqs_acked);
165 }
166 spin_unlock(&ptdev->gpu->reqs_lock);
167 }
168 PANTHOR_IRQ_HANDLER(gpu, GPU, panthor_gpu_irq_handler);
169
170 /**
171 * panthor_gpu_unplug() - Called when the GPU is unplugged.
172 * @ptdev: Device to unplug.
173 */
panthor_gpu_unplug(struct panthor_device * ptdev)174 void panthor_gpu_unplug(struct panthor_device *ptdev)
175 {
176 unsigned long flags;
177
178 /* Make sure the IRQ handler is not running after that point. */
179 if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
180 panthor_gpu_irq_suspend(&ptdev->gpu->irq);
181
182 /* Wake-up all waiters. */
183 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
184 ptdev->gpu->pending_reqs = 0;
185 wake_up_all(&ptdev->gpu->reqs_acked);
186 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
187 }
188
189 /**
190 * panthor_gpu_init() - Initialize the GPU block
191 * @ptdev: Device.
192 *
193 * Return: 0 on success, a negative error code otherwise.
194 */
panthor_gpu_init(struct panthor_device * ptdev)195 int panthor_gpu_init(struct panthor_device *ptdev)
196 {
197 struct panthor_gpu *gpu;
198 u32 pa_bits;
199 int ret, irq;
200
201 gpu = drmm_kzalloc(&ptdev->base, sizeof(*gpu), GFP_KERNEL);
202 if (!gpu)
203 return -ENOMEM;
204
205 spin_lock_init(&gpu->reqs_lock);
206 init_waitqueue_head(&gpu->reqs_acked);
207 ptdev->gpu = gpu;
208 panthor_gpu_init_info(ptdev);
209
210 dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
211 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
212 ret = dma_set_mask_and_coherent(ptdev->base.dev, DMA_BIT_MASK(pa_bits));
213 if (ret)
214 return ret;
215
216 irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
217 if (irq < 0)
218 return irq;
219
220 ret = panthor_request_gpu_irq(ptdev, &ptdev->gpu->irq, irq, GPU_INTERRUPTS_MASK);
221 if (ret)
222 return ret;
223
224 return 0;
225 }
226
227 /**
228 * panthor_gpu_block_power_off() - Power-off a specific block of the GPU
229 * @ptdev: Device.
230 * @blk_name: Block name.
231 * @pwroff_reg: Power-off register for this block.
232 * @pwrtrans_reg: Power transition register for this block.
233 * @mask: Sub-elements to power-off.
234 * @timeout_us: Timeout in microseconds.
235 *
236 * Return: 0 on success, a negative error code otherwise.
237 */
panthor_gpu_block_power_off(struct panthor_device * ptdev,const char * blk_name,u32 pwroff_reg,u32 pwrtrans_reg,u64 mask,u32 timeout_us)238 int panthor_gpu_block_power_off(struct panthor_device *ptdev,
239 const char *blk_name,
240 u32 pwroff_reg, u32 pwrtrans_reg,
241 u64 mask, u32 timeout_us)
242 {
243 u32 val;
244 int ret;
245
246 ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
247 !(mask & val), 100, timeout_us);
248 if (ret) {
249 drm_err(&ptdev->base,
250 "timeout waiting on %s:%llx power transition", blk_name,
251 mask);
252 return ret;
253 }
254
255 gpu_write64(ptdev, pwroff_reg, mask);
256
257 ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
258 !(mask & val), 100, timeout_us);
259 if (ret) {
260 drm_err(&ptdev->base,
261 "timeout waiting on %s:%llx power transition", blk_name,
262 mask);
263 return ret;
264 }
265
266 return 0;
267 }
268
269 /**
270 * panthor_gpu_block_power_on() - Power-on a specific block of the GPU
271 * @ptdev: Device.
272 * @blk_name: Block name.
273 * @pwron_reg: Power-on register for this block.
274 * @pwrtrans_reg: Power transition register for this block.
275 * @rdy_reg: Power transition ready register.
276 * @mask: Sub-elements to power-on.
277 * @timeout_us: Timeout in microseconds.
278 *
279 * Return: 0 on success, a negative error code otherwise.
280 */
panthor_gpu_block_power_on(struct panthor_device * ptdev,const char * blk_name,u32 pwron_reg,u32 pwrtrans_reg,u32 rdy_reg,u64 mask,u32 timeout_us)281 int panthor_gpu_block_power_on(struct panthor_device *ptdev,
282 const char *blk_name,
283 u32 pwron_reg, u32 pwrtrans_reg,
284 u32 rdy_reg, u64 mask, u32 timeout_us)
285 {
286 u32 val;
287 int ret;
288
289 ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
290 !(mask & val), 100, timeout_us);
291 if (ret) {
292 drm_err(&ptdev->base,
293 "timeout waiting on %s:%llx power transition", blk_name,
294 mask);
295 return ret;
296 }
297
298 gpu_write64(ptdev, pwron_reg, mask);
299
300 ret = gpu_read64_relaxed_poll_timeout(ptdev, rdy_reg, val,
301 (mask & val) == val,
302 100, timeout_us);
303 if (ret) {
304 drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
305 blk_name, mask);
306 return ret;
307 }
308
309 return 0;
310 }
311
312 /**
313 * panthor_gpu_l2_power_on() - Power-on the L2-cache
314 * @ptdev: Device.
315 *
316 * Return: 0 on success, a negative error code otherwise.
317 */
panthor_gpu_l2_power_on(struct panthor_device * ptdev)318 int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
319 {
320 if (ptdev->gpu_info.l2_present != 1) {
321 /*
322 * Only support one core group now.
323 * ~(l2_present - 1) unsets all bits in l2_present except
324 * the bottom bit. (l2_present - 2) has all the bits in
325 * the first core group set. AND them together to generate
326 * a mask of cores in the first core group.
327 */
328 u64 core_mask = ~(ptdev->gpu_info.l2_present - 1) &
329 (ptdev->gpu_info.l2_present - 2);
330 drm_info_once(&ptdev->base, "using only 1st core group (%lu cores from %lu)\n",
331 hweight64(core_mask),
332 hweight64(ptdev->gpu_info.shader_present));
333 }
334
335 /* Set the desired coherency mode before the power up of L2 */
336 panthor_gpu_coherency_set(ptdev);
337
338 return panthor_gpu_power_on(ptdev, L2, 1, 20000);
339 }
340
341 /**
342 * panthor_gpu_flush_caches() - Flush caches
343 * @ptdev: Device.
344 * @l2: L2 flush type.
345 * @lsc: LSC flush type.
346 * @other: Other flush type.
347 *
348 * Return: 0 on success, a negative error code otherwise.
349 */
panthor_gpu_flush_caches(struct panthor_device * ptdev,u32 l2,u32 lsc,u32 other)350 int panthor_gpu_flush_caches(struct panthor_device *ptdev,
351 u32 l2, u32 lsc, u32 other)
352 {
353 bool timedout = false;
354 unsigned long flags;
355
356 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
357 if (!drm_WARN_ON(&ptdev->base,
358 ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
359 ptdev->gpu->pending_reqs |= GPU_IRQ_CLEAN_CACHES_COMPLETED;
360 gpu_write(ptdev, GPU_CMD, GPU_FLUSH_CACHES(l2, lsc, other));
361 }
362 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
363
364 if (!wait_event_timeout(ptdev->gpu->reqs_acked,
365 !(ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED),
366 msecs_to_jiffies(100))) {
367 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
368 if ((ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED) != 0 &&
369 !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_CLEAN_CACHES_COMPLETED))
370 timedout = true;
371 else
372 ptdev->gpu->pending_reqs &= ~GPU_IRQ_CLEAN_CACHES_COMPLETED;
373 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
374 }
375
376 if (timedout) {
377 drm_err(&ptdev->base, "Flush caches timeout");
378 return -ETIMEDOUT;
379 }
380
381 return 0;
382 }
383
384 /**
385 * panthor_gpu_soft_reset() - Issue a soft-reset
386 * @ptdev: Device.
387 *
388 * Return: 0 on success, a negative error code otherwise.
389 */
panthor_gpu_soft_reset(struct panthor_device * ptdev)390 int panthor_gpu_soft_reset(struct panthor_device *ptdev)
391 {
392 bool timedout = false;
393 unsigned long flags;
394
395 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
396 if (!drm_WARN_ON(&ptdev->base,
397 ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED)) {
398 ptdev->gpu->pending_reqs |= GPU_IRQ_RESET_COMPLETED;
399 gpu_write(ptdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
400 gpu_write(ptdev, GPU_CMD, GPU_SOFT_RESET);
401 }
402 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
403
404 if (!wait_event_timeout(ptdev->gpu->reqs_acked,
405 !(ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED),
406 msecs_to_jiffies(100))) {
407 spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
408 if ((ptdev->gpu->pending_reqs & GPU_IRQ_RESET_COMPLETED) != 0 &&
409 !(gpu_read(ptdev, GPU_INT_RAWSTAT) & GPU_IRQ_RESET_COMPLETED))
410 timedout = true;
411 else
412 ptdev->gpu->pending_reqs &= ~GPU_IRQ_RESET_COMPLETED;
413 spin_unlock_irqrestore(&ptdev->gpu->reqs_lock, flags);
414 }
415
416 if (timedout) {
417 drm_err(&ptdev->base, "Soft reset timeout");
418 return -ETIMEDOUT;
419 }
420
421 return 0;
422 }
423
424 /**
425 * panthor_gpu_suspend() - Suspend the GPU block.
426 * @ptdev: Device.
427 *
428 * Suspend the GPU irq. This should be called last in the suspend procedure,
429 * after all other blocks have been suspented.
430 */
panthor_gpu_suspend(struct panthor_device * ptdev)431 void panthor_gpu_suspend(struct panthor_device *ptdev)
432 {
433 /* On a fast reset, simply power down the L2. */
434 if (!ptdev->reset.fast)
435 panthor_gpu_soft_reset(ptdev);
436 else
437 panthor_gpu_power_off(ptdev, L2, 1, 20000);
438
439 panthor_gpu_irq_suspend(&ptdev->gpu->irq);
440 }
441
442 /**
443 * panthor_gpu_resume() - Resume the GPU block.
444 * @ptdev: Device.
445 *
446 * Resume the IRQ handler and power-on the L2-cache.
447 * The FW takes care of powering the other blocks.
448 */
panthor_gpu_resume(struct panthor_device * ptdev)449 void panthor_gpu_resume(struct panthor_device *ptdev)
450 {
451 panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
452 panthor_gpu_l2_power_on(ptdev);
453 }
454
455