1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright © 2018 Broadcom
4 *
5 * Authors:
6 * Eric Anholt <eric@anholt.net>
7 * Boris Brezillon <boris.brezillon@bootlin.com>
8 */
9
10 #include <linux/clk.h>
11 #include <linux/component.h>
12 #include <linux/of_graph.h>
13 #include <linux/of_platform.h>
14 #include <linux/pm_runtime.h>
15
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_drv.h>
19 #include <drm/drm_edid.h>
20 #include <drm/drm_fb_dma_helper.h>
21 #include <drm/drm_fourcc.h>
22 #include <drm/drm_framebuffer.h>
23 #include <drm/drm_panel.h>
24 #include <drm/drm_probe_helper.h>
25 #include <drm/drm_vblank.h>
26 #include <drm/drm_writeback.h>
27
28 #include "vc4_drv.h"
29 #include "vc4_regs.h"
30
31 /* Base address of the output. Raster formats must be 4-byte aligned,
32 * T and LT must be 16-byte aligned or maybe utile-aligned (docs are
33 * inconsistent, but probably utile).
34 */
35 #define TXP_DST_PTR 0x00
36
37 /* Pitch in bytes for raster images, 16-byte aligned. For tiled, it's
38 * the width in tiles.
39 */
40 #define TXP_DST_PITCH 0x04
41 /* For T-tiled imgaes, DST_PITCH should be the number of tiles wide,
42 * shifted up.
43 */
44 # define TXP_T_TILE_WIDTH_SHIFT 7
45 /* For LT-tiled images, DST_PITCH should be the number of utiles wide,
46 * shifted up.
47 */
48 # define TXP_LT_TILE_WIDTH_SHIFT 4
49
50 /* Pre-rotation width/height of the image. Must match HVS config.
51 *
52 * If TFORMAT and 32-bit, limit is 1920 for 32-bit and 3840 to 16-bit
53 * and width/height must be tile or utile-aligned as appropriate. If
54 * transposing (rotating), width is limited to 1920.
55 *
56 * Height is limited to various numbers between 4088 and 4095. I'd
57 * just use 4088 to be safe.
58 */
59 #define TXP_DIM 0x08
60 # define TXP_HEIGHT_SHIFT 16
61 # define TXP_HEIGHT_MASK GENMASK(31, 16)
62 # define TXP_WIDTH_SHIFT 0
63 # define TXP_WIDTH_MASK GENMASK(15, 0)
64
65 #define TXP_DST_CTRL 0x0c
66 /* These bits are set to 0x54 */
67 #define TXP_PILOT_SHIFT 24
68 #define TXP_PILOT_MASK GENMASK(31, 24)
69 /* Bits 22-23 are set to 0x01 */
70 #define TXP_VERSION_SHIFT 22
71 #define TXP_VERSION_MASK GENMASK(23, 22)
72
73 /* Powers down the internal memory. */
74 # define TXP_POWERDOWN BIT(21)
75
76 /* Enables storing the alpha component in 8888/4444, instead of
77 * filling with ~ALPHA_INVERT.
78 */
79 # define TXP_ALPHA_ENABLE BIT(20)
80
81 /* 4 bits, each enables stores for a channel in each set of 4 bytes.
82 * Set to 0xf for normal operation.
83 */
84 # define TXP_BYTE_ENABLE_SHIFT 16
85 # define TXP_BYTE_ENABLE_MASK GENMASK(19, 16)
86
87 /* Debug: Generate VSTART again at EOF. */
88 # define TXP_VSTART_AT_EOF BIT(15)
89
90 /* Debug: Terminate the current frame immediately. Stops AXI
91 * writes.
92 */
93 # define TXP_ABORT BIT(14)
94
95 # define TXP_DITHER BIT(13)
96
97 /* Inverts alpha if TXP_ALPHA_ENABLE, chooses fill value for
98 * !TXP_ALPHA_ENABLE.
99 */
100 # define TXP_ALPHA_INVERT BIT(12)
101
102 /* Note: I've listed the channels here in high bit (in byte 3/2/1) to
103 * low bit (in byte 0) order.
104 */
105 # define TXP_FORMAT_SHIFT 8
106 # define TXP_FORMAT_MASK GENMASK(11, 8)
107 # define TXP_FORMAT_ABGR4444 0
108 # define TXP_FORMAT_ARGB4444 1
109 # define TXP_FORMAT_BGRA4444 2
110 # define TXP_FORMAT_RGBA4444 3
111 # define TXP_FORMAT_BGR565 6
112 # define TXP_FORMAT_RGB565 7
113 /* 888s are non-rotated, raster-only */
114 # define TXP_FORMAT_BGR888 8
115 # define TXP_FORMAT_RGB888 9
116 # define TXP_FORMAT_ABGR8888 12
117 # define TXP_FORMAT_ARGB8888 13
118 # define TXP_FORMAT_BGRA8888 14
119 # define TXP_FORMAT_RGBA8888 15
120
121 /* If TFORMAT is set, generates LT instead of T format. */
122 # define TXP_LINEAR_UTILE BIT(7)
123
124 /* Rotate output by 90 degrees. */
125 # define TXP_TRANSPOSE BIT(6)
126
127 /* Generate a tiled format for V3D. */
128 # define TXP_TFORMAT BIT(5)
129
130 /* Generates some undefined test mode output. */
131 # define TXP_TEST_MODE BIT(4)
132
133 /* Request odd field from HVS. */
134 # define TXP_FIELD BIT(3)
135
136 /* Raise interrupt when idle. */
137 # define TXP_EI BIT(2)
138
139 /* Set when generating a frame, clears when idle. */
140 # define TXP_BUSY BIT(1)
141
142 /* Starts a frame. Self-clearing. */
143 # define TXP_GO BIT(0)
144
145 /* Number of lines received and committed to memory. */
146 #define TXP_PROGRESS 0x10
147
148 #define TXP_READ(offset) \
149 ({ \
150 kunit_fail_current_test("Accessing a register in a unit test!\n"); \
151 readl(txp->regs + (offset)); \
152 })
153
154 #define TXP_WRITE(offset, val) \
155 do { \
156 kunit_fail_current_test("Accessing a register in a unit test!\n"); \
157 writel(val, txp->regs + (offset)); \
158 } while (0)
159
160 struct vc4_txp {
161 struct vc4_crtc base;
162
163 struct platform_device *pdev;
164
165 struct vc4_encoder encoder;
166 struct drm_writeback_connector connector;
167
168 void __iomem *regs;
169 };
170
encoder_to_vc4_txp(struct drm_encoder * encoder)171 static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
172 {
173 return container_of(encoder, struct vc4_txp, encoder.base);
174 }
175
connector_to_vc4_txp(struct drm_connector * conn)176 static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn)
177 {
178 return container_of(conn, struct vc4_txp, connector.base);
179 }
180
181 static const struct debugfs_reg32 txp_regs[] = {
182 VC4_REG32(TXP_DST_PTR),
183 VC4_REG32(TXP_DST_PITCH),
184 VC4_REG32(TXP_DIM),
185 VC4_REG32(TXP_DST_CTRL),
186 VC4_REG32(TXP_PROGRESS),
187 };
188
vc4_txp_connector_get_modes(struct drm_connector * connector)189 static int vc4_txp_connector_get_modes(struct drm_connector *connector)
190 {
191 struct drm_device *dev = connector->dev;
192
193 return drm_add_modes_noedid(connector, dev->mode_config.max_width,
194 dev->mode_config.max_height);
195 }
196
197 static enum drm_mode_status
vc4_txp_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)198 vc4_txp_connector_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200 {
201 struct drm_device *dev = connector->dev;
202 struct drm_mode_config *mode_config = &dev->mode_config;
203 int w = mode->hdisplay, h = mode->vdisplay;
204
205 if (w < mode_config->min_width || w > mode_config->max_width)
206 return MODE_BAD_HVALUE;
207
208 if (h < mode_config->min_height || h > mode_config->max_height)
209 return MODE_BAD_VVALUE;
210
211 return MODE_OK;
212 }
213
214 static const u32 drm_fmts[] = {
215 DRM_FORMAT_RGB888,
216 DRM_FORMAT_BGR888,
217 DRM_FORMAT_XRGB8888,
218 DRM_FORMAT_XBGR8888,
219 DRM_FORMAT_ARGB8888,
220 DRM_FORMAT_ABGR8888,
221 DRM_FORMAT_RGBX8888,
222 DRM_FORMAT_BGRX8888,
223 DRM_FORMAT_RGBA8888,
224 DRM_FORMAT_BGRA8888,
225 };
226
227 static const u32 txp_fmts[] = {
228 TXP_FORMAT_RGB888,
229 TXP_FORMAT_BGR888,
230 TXP_FORMAT_ARGB8888,
231 TXP_FORMAT_ABGR8888,
232 TXP_FORMAT_ARGB8888,
233 TXP_FORMAT_ABGR8888,
234 TXP_FORMAT_RGBA8888,
235 TXP_FORMAT_BGRA8888,
236 TXP_FORMAT_RGBA8888,
237 TXP_FORMAT_BGRA8888,
238 };
239
vc4_txp_armed(struct drm_crtc_state * state)240 static void vc4_txp_armed(struct drm_crtc_state *state)
241 {
242 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
243
244 vc4_state->txp_armed = true;
245 }
246
vc4_txp_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)247 static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
248 struct drm_atomic_state *state)
249 {
250 struct drm_connector_state *conn_state;
251 struct drm_crtc_state *crtc_state;
252 struct drm_framebuffer *fb;
253 int i;
254
255 conn_state = drm_atomic_get_new_connector_state(state, conn);
256 if (!conn_state->writeback_job)
257 return 0;
258
259 crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
260
261 fb = conn_state->writeback_job->fb;
262 if (fb->width != crtc_state->mode.hdisplay ||
263 fb->height != crtc_state->mode.vdisplay) {
264 DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
265 fb->width, fb->height);
266 return -EINVAL;
267 }
268
269 for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
270 if (fb->format->format == drm_fmts[i])
271 break;
272 }
273
274 if (i == ARRAY_SIZE(drm_fmts))
275 return -EINVAL;
276
277 /* Pitch must be aligned on 16 bytes. */
278 if (fb->pitches[0] & GENMASK(3, 0))
279 return -EINVAL;
280
281 vc4_txp_armed(crtc_state);
282
283 return 0;
284 }
285
vc4_txp_connector_atomic_commit(struct drm_connector * conn,struct drm_atomic_state * state)286 static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
287 struct drm_atomic_state *state)
288 {
289 struct drm_device *drm = conn->dev;
290 struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
291 conn);
292 struct vc4_txp *txp = connector_to_vc4_txp(conn);
293 struct drm_gem_dma_object *gem;
294 struct drm_display_mode *mode;
295 struct drm_framebuffer *fb;
296 u32 ctrl;
297 int idx;
298 int i;
299
300 if (WARN_ON(!conn_state->writeback_job))
301 return;
302
303 mode = &conn_state->crtc->state->adjusted_mode;
304 fb = conn_state->writeback_job->fb;
305
306 for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
307 if (fb->format->format == drm_fmts[i])
308 break;
309 }
310
311 if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
312 return;
313
314 ctrl = TXP_GO | TXP_EI |
315 VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
316 VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
317
318 if (fb->format->has_alpha)
319 ctrl |= TXP_ALPHA_ENABLE;
320 else
321 /*
322 * If TXP_ALPHA_ENABLE isn't set and TXP_ALPHA_INVERT is, the
323 * hardware will force the output padding to be 0xff.
324 */
325 ctrl |= TXP_ALPHA_INVERT;
326
327 if (!drm_dev_enter(drm, &idx))
328 return;
329
330 gem = drm_fb_dma_get_gem_obj(fb, 0);
331 TXP_WRITE(TXP_DST_PTR, gem->dma_addr + fb->offsets[0]);
332 TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
333 TXP_WRITE(TXP_DIM,
334 VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
335 VC4_SET_FIELD(mode->vdisplay, TXP_HEIGHT));
336
337 TXP_WRITE(TXP_DST_CTRL, ctrl);
338
339 drm_writeback_queue_job(&txp->connector, conn_state);
340
341 drm_dev_exit(idx);
342 }
343
344 static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
345 .get_modes = vc4_txp_connector_get_modes,
346 .mode_valid = vc4_txp_connector_mode_valid,
347 .atomic_check = vc4_txp_connector_atomic_check,
348 .atomic_commit = vc4_txp_connector_atomic_commit,
349 };
350
351 static enum drm_connector_status
vc4_txp_connector_detect(struct drm_connector * connector,bool force)352 vc4_txp_connector_detect(struct drm_connector *connector, bool force)
353 {
354 return connector_status_connected;
355 }
356
357 static const struct drm_connector_funcs vc4_txp_connector_funcs = {
358 .detect = vc4_txp_connector_detect,
359 .fill_modes = drm_helper_probe_single_connector_modes,
360 .destroy = drm_connector_cleanup,
361 .reset = drm_atomic_helper_connector_reset,
362 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
363 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
364 };
365
vc4_txp_encoder_disable(struct drm_encoder * encoder)366 static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
367 {
368 struct drm_device *drm = encoder->dev;
369 struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
370 int idx;
371
372 if (!drm_dev_enter(drm, &idx))
373 return;
374
375 if (TXP_READ(TXP_DST_CTRL) & TXP_BUSY) {
376 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
377
378 TXP_WRITE(TXP_DST_CTRL, TXP_ABORT);
379
380 while (TXP_READ(TXP_DST_CTRL) & TXP_BUSY &&
381 time_before(jiffies, timeout))
382 ;
383
384 WARN_ON(TXP_READ(TXP_DST_CTRL) & TXP_BUSY);
385 }
386
387 TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
388
389 drm_dev_exit(idx);
390 }
391
392 static const struct drm_encoder_helper_funcs vc4_txp_encoder_helper_funcs = {
393 .disable = vc4_txp_encoder_disable,
394 };
395
vc4_txp_enable_vblank(struct drm_crtc * crtc)396 static int vc4_txp_enable_vblank(struct drm_crtc *crtc)
397 {
398 return 0;
399 }
400
vc4_txp_disable_vblank(struct drm_crtc * crtc)401 static void vc4_txp_disable_vblank(struct drm_crtc *crtc) {}
402
403 static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
404 .set_config = drm_atomic_helper_set_config,
405 .page_flip = vc4_page_flip,
406 .reset = vc4_crtc_reset,
407 .atomic_duplicate_state = vc4_crtc_duplicate_state,
408 .atomic_destroy_state = vc4_crtc_destroy_state,
409 .enable_vblank = vc4_txp_enable_vblank,
410 .disable_vblank = vc4_txp_disable_vblank,
411 .late_register = vc4_crtc_late_register,
412 };
413
vc4_txp_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)414 static int vc4_txp_atomic_check(struct drm_crtc *crtc,
415 struct drm_atomic_state *state)
416 {
417 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
418 crtc);
419 int ret;
420
421 ret = vc4_hvs_atomic_check(crtc, state);
422 if (ret)
423 return ret;
424
425 crtc_state->no_vblank = true;
426
427 return 0;
428 }
429
vc4_txp_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)430 static void vc4_txp_atomic_enable(struct drm_crtc *crtc,
431 struct drm_atomic_state *state)
432 {
433 drm_crtc_vblank_on(crtc);
434 vc4_hvs_atomic_enable(crtc, state);
435 }
436
vc4_txp_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)437 static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
438 struct drm_atomic_state *state)
439 {
440 struct drm_device *dev = crtc->dev;
441
442 /* Disable vblank irq handling before crtc is disabled. */
443 drm_crtc_vblank_off(crtc);
444
445 vc4_hvs_atomic_disable(crtc, state);
446
447 /*
448 * Make sure we issue a vblank event after disabling the CRTC if
449 * someone was waiting it.
450 */
451 if (crtc->state->event) {
452 unsigned long flags;
453
454 spin_lock_irqsave(&dev->event_lock, flags);
455 drm_crtc_send_vblank_event(crtc, crtc->state->event);
456 crtc->state->event = NULL;
457 spin_unlock_irqrestore(&dev->event_lock, flags);
458 }
459 }
460
461 static const struct drm_crtc_helper_funcs vc4_txp_crtc_helper_funcs = {
462 .atomic_check = vc4_txp_atomic_check,
463 .atomic_begin = vc4_hvs_atomic_begin,
464 .atomic_flush = vc4_hvs_atomic_flush,
465 .atomic_enable = vc4_txp_atomic_enable,
466 .atomic_disable = vc4_txp_atomic_disable,
467 };
468
vc4_txp_interrupt(int irq,void * data)469 static irqreturn_t vc4_txp_interrupt(int irq, void *data)
470 {
471 struct vc4_txp *txp = data;
472 struct vc4_crtc *vc4_crtc = &txp->base;
473
474 /*
475 * We don't need to protect the register access using
476 * drm_dev_enter() there because the interrupt handler lifetime
477 * is tied to the device itself, and not to the DRM device.
478 *
479 * So when the device will be gone, one of the first thing we
480 * will be doing will be to unregister the interrupt handler,
481 * and then unregister the DRM device. drm_dev_enter() would
482 * thus always succeed if we are here.
483 */
484 TXP_WRITE(TXP_DST_CTRL, TXP_READ(TXP_DST_CTRL) & ~TXP_EI);
485 vc4_crtc_handle_vblank(vc4_crtc);
486 drm_writeback_signal_completion(&txp->connector, 0);
487
488 return IRQ_HANDLED;
489 }
490
491 const struct vc4_crtc_data vc4_txp_crtc_data = {
492 .name = "txp",
493 .debugfs_name = "txp_regs",
494 .hvs_available_channels = BIT(2),
495 .hvs_output = 2,
496 };
497
vc4_txp_bind(struct device * dev,struct device * master,void * data)498 static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
499 {
500 struct platform_device *pdev = to_platform_device(dev);
501 struct drm_device *drm = dev_get_drvdata(master);
502 struct vc4_encoder *vc4_encoder;
503 struct drm_encoder *encoder;
504 struct vc4_crtc *vc4_crtc;
505 struct vc4_txp *txp;
506 int ret, irq;
507
508 irq = platform_get_irq(pdev, 0);
509 if (irq < 0)
510 return irq;
511
512 txp = drmm_kzalloc(drm, sizeof(*txp), GFP_KERNEL);
513 if (!txp)
514 return -ENOMEM;
515
516 txp->pdev = pdev;
517 txp->regs = vc4_ioremap_regs(pdev, 0);
518 if (IS_ERR(txp->regs))
519 return PTR_ERR(txp->regs);
520
521 vc4_crtc = &txp->base;
522 vc4_crtc->regset.base = txp->regs;
523 vc4_crtc->regset.regs = txp_regs;
524 vc4_crtc->regset.nregs = ARRAY_SIZE(txp_regs);
525
526 ret = vc4_crtc_init(drm, pdev, vc4_crtc, &vc4_txp_crtc_data,
527 &vc4_txp_crtc_funcs, &vc4_txp_crtc_helper_funcs, true);
528 if (ret)
529 return ret;
530
531 vc4_encoder = &txp->encoder;
532 txp->encoder.type = VC4_ENCODER_TYPE_TXP;
533
534 encoder = &vc4_encoder->base;
535 encoder->possible_crtcs = drm_crtc_mask(&vc4_crtc->base);
536
537 drm_encoder_helper_add(encoder, &vc4_txp_encoder_helper_funcs);
538
539 ret = drmm_encoder_init(drm, encoder, NULL, DRM_MODE_ENCODER_VIRTUAL, NULL);
540 if (ret)
541 return ret;
542
543 drm_connector_helper_add(&txp->connector.base,
544 &vc4_txp_connector_helper_funcs);
545 ret = drm_writeback_connector_init_with_encoder(drm, &txp->connector,
546 encoder,
547 &vc4_txp_connector_funcs,
548 drm_fmts, ARRAY_SIZE(drm_fmts));
549 if (ret)
550 return ret;
551
552 ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
553 dev_name(dev), txp);
554 if (ret)
555 return ret;
556
557 dev_set_drvdata(dev, txp);
558
559 return 0;
560 }
561
vc4_txp_unbind(struct device * dev,struct device * master,void * data)562 static void vc4_txp_unbind(struct device *dev, struct device *master,
563 void *data)
564 {
565 struct vc4_txp *txp = dev_get_drvdata(dev);
566
567 drm_connector_cleanup(&txp->connector.base);
568 }
569
570 static const struct component_ops vc4_txp_ops = {
571 .bind = vc4_txp_bind,
572 .unbind = vc4_txp_unbind,
573 };
574
vc4_txp_probe(struct platform_device * pdev)575 static int vc4_txp_probe(struct platform_device *pdev)
576 {
577 return component_add(&pdev->dev, &vc4_txp_ops);
578 }
579
vc4_txp_remove(struct platform_device * pdev)580 static int vc4_txp_remove(struct platform_device *pdev)
581 {
582 component_del(&pdev->dev, &vc4_txp_ops);
583 return 0;
584 }
585
586 static const struct of_device_id vc4_txp_dt_match[] = {
587 { .compatible = "brcm,bcm2835-txp" },
588 { /* sentinel */ },
589 };
590
591 struct platform_driver vc4_txp_driver = {
592 .probe = vc4_txp_probe,
593 .remove = vc4_txp_remove,
594 .driver = {
595 .name = "vc4_txp",
596 .of_match_table = vc4_txp_dt_match,
597 },
598 };
599