1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015 Free Electrons
4 * Copyright (C) 2015 NextThing Co
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9 #include <linux/component.h>
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_graph.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_blend.h>
21 #include <drm/drm_crtc.h>
22 #include <drm/drm_fb_dma_helper.h>
23 #include <drm/drm_fourcc.h>
24 #include <drm/drm_framebuffer.h>
25 #include <drm/drm_gem_dma_helper.h>
26 #include <drm/drm_probe_helper.h>
27
28 #include "sun4i_backend.h"
29 #include "sun4i_drv.h"
30 #include "sun4i_frontend.h"
31 #include "sun4i_layer.h"
32 #include "sunxi_engine.h"
33
34 struct sun4i_backend_quirks {
35 /* backend <-> TCON muxing selection done in backend */
36 bool needs_output_muxing;
37
38 /* alpha at the lowest z position is not always supported */
39 bool supports_lowest_plane_alpha;
40 };
41
42 static const u32 sunxi_rgb2yuv_coef[12] = {
43 0x00000107, 0x00000204, 0x00000064, 0x00000108,
44 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
45 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
46 };
47
sun4i_backend_apply_color_correction(struct sunxi_engine * engine)48 static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
49 {
50 int i;
51
52 DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
53
54 /* Set color correction */
55 regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG,
56 SUN4I_BACKEND_OCCTL_ENABLE);
57
58 for (i = 0; i < 12; i++)
59 regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
60 sunxi_rgb2yuv_coef[i]);
61 }
62
sun4i_backend_disable_color_correction(struct sunxi_engine * engine)63 static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine)
64 {
65 DRM_DEBUG_DRIVER("Disabling color correction\n");
66
67 /* Disable color correction */
68 regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG,
69 SUN4I_BACKEND_OCCTL_ENABLE, 0);
70 }
71
sun4i_backend_commit(struct sunxi_engine * engine)72 static void sun4i_backend_commit(struct sunxi_engine *engine)
73 {
74 DRM_DEBUG_DRIVER("Committing changes\n");
75
76 regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
77 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
78 SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
79 }
80
sun4i_backend_layer_enable(struct sun4i_backend * backend,int layer,bool enable)81 void sun4i_backend_layer_enable(struct sun4i_backend *backend,
82 int layer, bool enable)
83 {
84 u32 val;
85
86 DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis",
87 layer);
88
89 if (enable)
90 val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
91 else
92 val = 0;
93
94 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
95 SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
96 }
97
sun4i_backend_drm_format_to_layer(u32 format,u32 * mode)98 static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
99 {
100 switch (format) {
101 case DRM_FORMAT_ARGB8888:
102 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
103 break;
104
105 case DRM_FORMAT_ARGB4444:
106 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
107 break;
108
109 case DRM_FORMAT_ARGB1555:
110 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
111 break;
112
113 case DRM_FORMAT_RGBA5551:
114 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
115 break;
116
117 case DRM_FORMAT_RGBA4444:
118 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
119 break;
120
121 case DRM_FORMAT_XRGB8888:
122 *mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
123 break;
124
125 case DRM_FORMAT_RGB888:
126 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
127 break;
128
129 case DRM_FORMAT_RGB565:
130 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
131 break;
132
133 default:
134 return -EINVAL;
135 }
136
137 return 0;
138 }
139
140 static const uint32_t sun4i_backend_formats[] = {
141 DRM_FORMAT_ARGB1555,
142 DRM_FORMAT_ARGB4444,
143 DRM_FORMAT_ARGB8888,
144 DRM_FORMAT_RGB565,
145 DRM_FORMAT_RGB888,
146 DRM_FORMAT_RGBA4444,
147 DRM_FORMAT_RGBA5551,
148 DRM_FORMAT_UYVY,
149 DRM_FORMAT_VYUY,
150 DRM_FORMAT_XRGB8888,
151 DRM_FORMAT_YUYV,
152 DRM_FORMAT_YVYU,
153 };
154
sun4i_backend_format_is_supported(uint32_t fmt,uint64_t modifier)155 bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
156 {
157 unsigned int i;
158
159 if (modifier != DRM_FORMAT_MOD_LINEAR)
160 return false;
161
162 for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
163 if (sun4i_backend_formats[i] == fmt)
164 return true;
165
166 return false;
167 }
168
sun4i_backend_update_layer_coord(struct sun4i_backend * backend,int layer,struct drm_plane * plane)169 int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
170 int layer, struct drm_plane *plane)
171 {
172 struct drm_plane_state *state = plane->state;
173
174 DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
175
176 /* Set height and width */
177 DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
178 state->crtc_w, state->crtc_h);
179 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
180 SUN4I_BACKEND_LAYSIZE(state->crtc_w,
181 state->crtc_h));
182
183 /* Set base coordinates */
184 DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
185 state->crtc_x, state->crtc_y);
186 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
187 SUN4I_BACKEND_LAYCOOR(state->crtc_x,
188 state->crtc_y));
189
190 return 0;
191 }
192
sun4i_backend_update_yuv_format(struct sun4i_backend * backend,int layer,struct drm_plane * plane)193 static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
194 int layer, struct drm_plane *plane)
195 {
196 struct drm_plane_state *state = plane->state;
197 struct drm_framebuffer *fb = state->fb;
198 const struct drm_format_info *format = fb->format;
199 const uint32_t fmt = format->format;
200 u32 val = SUN4I_BACKEND_IYUVCTL_EN;
201 int i;
202
203 for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
204 regmap_write(backend->engine.regs,
205 SUN4I_BACKEND_YGCOEF_REG(i),
206 sunxi_bt601_yuv2rgb_coef[i]);
207
208 /*
209 * We should do that only for a single plane, but the
210 * framebuffer's atomic_check has our back on this.
211 */
212 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
213 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
214 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
215
216 /* TODO: Add support for the multi-planar YUV formats */
217 if (drm_format_info_is_yuv_packed(format) &&
218 drm_format_info_is_yuv_sampling_422(format))
219 val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
220 else
221 DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
222
223 /*
224 * Allwinner seems to list the pixel sequence from right to left, while
225 * DRM lists it from left to right.
226 */
227 switch (fmt) {
228 case DRM_FORMAT_YUYV:
229 val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
230 break;
231 case DRM_FORMAT_YVYU:
232 val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
233 break;
234 case DRM_FORMAT_UYVY:
235 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
236 break;
237 case DRM_FORMAT_VYUY:
238 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
239 break;
240 default:
241 DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
242 fmt);
243 }
244
245 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
246
247 return 0;
248 }
249
sun4i_backend_update_layer_formats(struct sun4i_backend * backend,int layer,struct drm_plane * plane)250 int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
251 int layer, struct drm_plane *plane)
252 {
253 struct drm_plane_state *state = plane->state;
254 struct drm_framebuffer *fb = state->fb;
255 u32 val;
256 int ret;
257
258 /* Clear the YUV mode */
259 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
260 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
261
262 val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
263 if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
264 val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
265 regmap_update_bits(backend->engine.regs,
266 SUN4I_BACKEND_ATTCTL_REG0(layer),
267 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
268 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
269 val);
270
271 if (fb->format->is_yuv)
272 return sun4i_backend_update_yuv_format(backend, layer, plane);
273
274 ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
275 if (ret) {
276 DRM_DEBUG_DRIVER("Invalid format\n");
277 return ret;
278 }
279
280 regmap_update_bits(backend->engine.regs,
281 SUN4I_BACKEND_ATTCTL_REG1(layer),
282 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
283
284 return 0;
285 }
286
sun4i_backend_update_layer_frontend(struct sun4i_backend * backend,int layer,uint32_t fmt)287 int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
288 int layer, uint32_t fmt)
289 {
290 u32 val;
291 int ret;
292
293 ret = sun4i_backend_drm_format_to_layer(fmt, &val);
294 if (ret) {
295 DRM_DEBUG_DRIVER("Invalid format\n");
296 return ret;
297 }
298
299 regmap_update_bits(backend->engine.regs,
300 SUN4I_BACKEND_ATTCTL_REG0(layer),
301 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
302 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
303
304 regmap_update_bits(backend->engine.regs,
305 SUN4I_BACKEND_ATTCTL_REG1(layer),
306 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
307
308 return 0;
309 }
310
sun4i_backend_update_yuv_buffer(struct sun4i_backend * backend,struct drm_framebuffer * fb,dma_addr_t paddr)311 static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
312 struct drm_framebuffer *fb,
313 dma_addr_t paddr)
314 {
315 /* TODO: Add support for the multi-planar YUV formats */
316 DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
317 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
318
319 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
320 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
321 fb->pitches[0] * 8);
322
323 return 0;
324 }
325
sun4i_backend_update_layer_buffer(struct sun4i_backend * backend,int layer,struct drm_plane * plane)326 int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
327 int layer, struct drm_plane *plane)
328 {
329 struct drm_plane_state *state = plane->state;
330 struct drm_framebuffer *fb = state->fb;
331 u32 lo_paddr, hi_paddr;
332 dma_addr_t dma_addr;
333
334 /* Set the line width */
335 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
336 regmap_write(backend->engine.regs,
337 SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
338 fb->pitches[0] * 8);
339
340 /* Get the start of the displayed memory */
341 dma_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
342 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
343
344 if (fb->format->is_yuv)
345 return sun4i_backend_update_yuv_buffer(backend, fb, dma_addr);
346
347 /* Write the 32 lower bits of the address (in bits) */
348 lo_paddr = dma_addr << 3;
349 DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
350 regmap_write(backend->engine.regs,
351 SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
352 lo_paddr);
353
354 /* And the upper bits */
355 hi_paddr = dma_addr >> 29;
356 DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
357 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
358 SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
359 SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
360
361 return 0;
362 }
363
sun4i_backend_update_layer_zpos(struct sun4i_backend * backend,int layer,struct drm_plane * plane)364 int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
365 struct drm_plane *plane)
366 {
367 struct drm_plane_state *state = plane->state;
368 struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
369 unsigned int priority = state->normalized_zpos;
370 unsigned int pipe = p_state->pipe;
371
372 DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
373 layer, priority, pipe);
374 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
375 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
376 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
377 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
378 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
379
380 return 0;
381 }
382
sun4i_backend_cleanup_layer(struct sun4i_backend * backend,int layer)383 void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
384 int layer)
385 {
386 regmap_update_bits(backend->engine.regs,
387 SUN4I_BACKEND_ATTCTL_REG0(layer),
388 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
389 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
390 }
391
sun4i_backend_plane_uses_scaler(struct drm_plane_state * state)392 static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
393 {
394 u16 src_h = state->src_h >> 16;
395 u16 src_w = state->src_w >> 16;
396
397 DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
398 src_w, src_h, state->crtc_w, state->crtc_h);
399
400 if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
401 return true;
402
403 return false;
404 }
405
sun4i_backend_plane_uses_frontend(struct drm_plane_state * state)406 static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
407 {
408 struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
409 struct sun4i_backend *backend = layer->backend;
410 uint32_t format = state->fb->format->format;
411 uint64_t modifier = state->fb->modifier;
412
413 if (IS_ERR(backend->frontend))
414 return false;
415
416 if (!sun4i_frontend_format_is_supported(format, modifier))
417 return false;
418
419 if (!sun4i_backend_format_is_supported(format, modifier))
420 return true;
421
422 /*
423 * TODO: The backend alone allows 2x and 4x integer scaling, including
424 * support for an alpha component (which the frontend doesn't support).
425 * Use the backend directly instead of the frontend in this case, with
426 * another test to return false.
427 */
428
429 if (sun4i_backend_plane_uses_scaler(state))
430 return true;
431
432 /*
433 * Here the format is supported by both the frontend and the backend
434 * and no frontend scaling is required, so use the backend directly.
435 */
436 return false;
437 }
438
sun4i_backend_plane_is_supported(struct drm_plane_state * state,bool * uses_frontend)439 static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
440 bool *uses_frontend)
441 {
442 if (sun4i_backend_plane_uses_frontend(state)) {
443 *uses_frontend = true;
444 return true;
445 }
446
447 *uses_frontend = false;
448
449 /* Scaling is not supported without the frontend. */
450 if (sun4i_backend_plane_uses_scaler(state))
451 return false;
452
453 return true;
454 }
455
sun4i_backend_atomic_begin(struct sunxi_engine * engine,struct drm_crtc_state * old_state)456 static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
457 struct drm_crtc_state *old_state)
458 {
459 u32 val;
460
461 WARN_ON(regmap_read_poll_timeout(engine->regs,
462 SUN4I_BACKEND_REGBUFFCTL_REG,
463 val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
464 100, 50000));
465 }
466
sun4i_backend_atomic_check(struct sunxi_engine * engine,struct drm_crtc_state * crtc_state)467 static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
468 struct drm_crtc_state *crtc_state)
469 {
470 struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
471 struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
472 struct drm_atomic_state *state = crtc_state->state;
473 struct drm_device *drm = state->dev;
474 struct drm_plane *plane;
475 unsigned int num_planes = 0;
476 unsigned int num_alpha_planes = 0;
477 unsigned int num_frontend_planes = 0;
478 unsigned int num_alpha_planes_max = 1;
479 unsigned int num_yuv_planes = 0;
480 unsigned int current_pipe = 0;
481 unsigned int i;
482
483 DRM_DEBUG_DRIVER("Starting checking our planes\n");
484
485 if (!crtc_state->planes_changed)
486 return 0;
487
488 drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
489 struct drm_plane_state *plane_state =
490 drm_atomic_get_plane_state(state, plane);
491 struct sun4i_layer_state *layer_state =
492 state_to_sun4i_layer_state(plane_state);
493 struct drm_framebuffer *fb = plane_state->fb;
494
495 if (!sun4i_backend_plane_is_supported(plane_state,
496 &layer_state->uses_frontend))
497 return -EINVAL;
498
499 if (layer_state->uses_frontend) {
500 DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
501 plane->index);
502 num_frontend_planes++;
503 } else {
504 if (fb->format->is_yuv) {
505 DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
506 num_yuv_planes++;
507 }
508 }
509
510 DRM_DEBUG_DRIVER("Plane FB format is %p4cc\n",
511 &fb->format->format);
512 if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
513 num_alpha_planes++;
514
515 DRM_DEBUG_DRIVER("Plane zpos is %d\n",
516 plane_state->normalized_zpos);
517
518 /* Sort our planes by Zpos */
519 plane_states[plane_state->normalized_zpos] = plane_state;
520
521 num_planes++;
522 }
523
524 /* All our planes were disabled, bail out */
525 if (!num_planes)
526 return 0;
527
528 /*
529 * The hardware is a bit unusual here.
530 *
531 * Even though it supports 4 layers, it does the composition
532 * in two separate steps.
533 *
534 * The first one is assigning a layer to one of its two
535 * pipes. If more that 1 layer is assigned to the same pipe,
536 * and if pixels overlaps, the pipe will take the pixel from
537 * the layer with the highest priority.
538 *
539 * The second step is the actual alpha blending, that takes
540 * the two pipes as input, and uses the potential alpha
541 * component to do the transparency between the two.
542 *
543 * This two-step scenario makes us unable to guarantee a
544 * robust alpha blending between the 4 layers in all
545 * situations, since this means that we need to have one layer
546 * with alpha at the lowest position of our two pipes.
547 *
548 * However, we cannot even do that on every platform, since
549 * the hardware has a bug where the lowest plane of the lowest
550 * pipe (pipe 0, priority 0), if it has any alpha, will
551 * discard the pixel data entirely and just display the pixels
552 * in the background color (black by default).
553 *
554 * This means that on the affected platforms, we effectively
555 * have only three valid configurations with alpha, all of
556 * them with the alpha being on pipe1 with the lowest
557 * position, which can be 1, 2 or 3 depending on the number of
558 * planes and their zpos.
559 */
560
561 /* For platforms that are not affected by the issue described above. */
562 if (backend->quirks->supports_lowest_plane_alpha)
563 num_alpha_planes_max++;
564
565 if (num_alpha_planes > num_alpha_planes_max) {
566 DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
567 return -EINVAL;
568 }
569
570 /* We can't have an alpha plane at the lowest position */
571 if (!backend->quirks->supports_lowest_plane_alpha &&
572 (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
573 return -EINVAL;
574
575 for (i = 1; i < num_planes; i++) {
576 struct drm_plane_state *p_state = plane_states[i];
577 struct drm_framebuffer *fb = p_state->fb;
578 struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
579
580 /*
581 * The only alpha position is the lowest plane of the
582 * second pipe.
583 */
584 if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
585 current_pipe++;
586
587 s_state->pipe = current_pipe;
588 }
589
590 /* We can only have a single YUV plane at a time */
591 if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
592 DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
593 return -EINVAL;
594 }
595
596 if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
597 DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
598 return -EINVAL;
599 }
600
601 DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
602 num_planes, num_alpha_planes, num_frontend_planes,
603 num_yuv_planes);
604
605 return 0;
606 }
607
sun4i_backend_vblank_quirk(struct sunxi_engine * engine)608 static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
609 {
610 struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
611 struct sun4i_frontend *frontend = backend->frontend;
612
613 if (!frontend)
614 return;
615
616 /*
617 * In a teardown scenario with the frontend involved, we have
618 * to keep the frontend enabled until the next vblank, and
619 * only then disable it.
620 *
621 * This is due to the fact that the backend will not take into
622 * account the new configuration (with the plane that used to
623 * be fed by the frontend now disabled) until we write to the
624 * commit bit and the hardware fetches the new configuration
625 * during the next vblank.
626 *
627 * So we keep the frontend around in order to prevent any
628 * visual artifacts.
629 */
630 spin_lock(&backend->frontend_lock);
631 if (backend->frontend_teardown) {
632 sun4i_frontend_exit(frontend);
633 backend->frontend_teardown = false;
634 }
635 spin_unlock(&backend->frontend_lock);
636 };
637
sun4i_backend_mode_set(struct sunxi_engine * engine,const struct drm_display_mode * mode)638 static void sun4i_backend_mode_set(struct sunxi_engine *engine,
639 const struct drm_display_mode *mode)
640 {
641 bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
642
643 DRM_DEBUG_DRIVER("Updating global size W: %u H: %u\n",
644 mode->hdisplay, mode->vdisplay);
645
646 regmap_write(engine->regs, SUN4I_BACKEND_DISSIZE_REG,
647 SUN4I_BACKEND_DISSIZE(mode->hdisplay, mode->vdisplay));
648
649 regmap_update_bits(engine->regs, SUN4I_BACKEND_MODCTL_REG,
650 SUN4I_BACKEND_MODCTL_ITLMOD_EN,
651 interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
652
653 DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
654 interlaced ? "on" : "off");
655 }
656
sun4i_backend_init_sat(struct device * dev)657 static int sun4i_backend_init_sat(struct device *dev) {
658 struct sun4i_backend *backend = dev_get_drvdata(dev);
659 int ret;
660
661 backend->sat_reset = devm_reset_control_get(dev, "sat");
662 if (IS_ERR(backend->sat_reset)) {
663 dev_err(dev, "Couldn't get the SAT reset line\n");
664 return PTR_ERR(backend->sat_reset);
665 }
666
667 ret = reset_control_deassert(backend->sat_reset);
668 if (ret) {
669 dev_err(dev, "Couldn't deassert the SAT reset line\n");
670 return ret;
671 }
672
673 backend->sat_clk = devm_clk_get(dev, "sat");
674 if (IS_ERR(backend->sat_clk)) {
675 dev_err(dev, "Couldn't get our SAT clock\n");
676 ret = PTR_ERR(backend->sat_clk);
677 goto err_assert_reset;
678 }
679
680 ret = clk_prepare_enable(backend->sat_clk);
681 if (ret) {
682 dev_err(dev, "Couldn't enable the SAT clock\n");
683 return ret;
684 }
685
686 return 0;
687
688 err_assert_reset:
689 reset_control_assert(backend->sat_reset);
690 return ret;
691 }
692
sun4i_backend_free_sat(struct device * dev)693 static int sun4i_backend_free_sat(struct device *dev) {
694 struct sun4i_backend *backend = dev_get_drvdata(dev);
695
696 clk_disable_unprepare(backend->sat_clk);
697 reset_control_assert(backend->sat_reset);
698
699 return 0;
700 }
701
702 /*
703 * The display backend can take video output from the display frontend, or
704 * the display enhancement unit on the A80, as input for one it its layers.
705 * This relationship within the display pipeline is encoded in the device
706 * tree with of_graph, and we use it here to figure out which backend, if
707 * there are 2 or more, we are currently probing. The number would be in
708 * the "reg" property of the upstream output port endpoint.
709 */
sun4i_backend_of_get_id(struct device_node * node)710 static int sun4i_backend_of_get_id(struct device_node *node)
711 {
712 struct device_node *ep, *remote;
713 struct of_endpoint of_ep;
714
715 /* Input port is 0, and we want the first endpoint. */
716 ep = of_graph_get_endpoint_by_regs(node, 0, -1);
717 if (!ep)
718 return -EINVAL;
719
720 remote = of_graph_get_remote_endpoint(ep);
721 of_node_put(ep);
722 if (!remote)
723 return -EINVAL;
724
725 of_graph_parse_endpoint(remote, &of_ep);
726 of_node_put(remote);
727 return of_ep.id;
728 }
729
730 /* TODO: This needs to take multiple pipelines into account */
sun4i_backend_find_frontend(struct sun4i_drv * drv,struct device_node * node)731 static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
732 struct device_node *node)
733 {
734 struct device_node *port, *ep, *remote;
735 struct sun4i_frontend *frontend;
736
737 port = of_graph_get_port_by_id(node, 0);
738 if (!port)
739 return ERR_PTR(-EINVAL);
740
741 for_each_available_child_of_node(port, ep) {
742 remote = of_graph_get_remote_port_parent(ep);
743 if (!remote)
744 continue;
745 of_node_put(remote);
746
747 /* does this node match any registered engines? */
748 list_for_each_entry(frontend, &drv->frontend_list, list) {
749 if (remote == frontend->node) {
750 of_node_put(port);
751 of_node_put(ep);
752 return frontend;
753 }
754 }
755 }
756 of_node_put(port);
757 return ERR_PTR(-EINVAL);
758 }
759
760 static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
761 .atomic_begin = sun4i_backend_atomic_begin,
762 .atomic_check = sun4i_backend_atomic_check,
763 .commit = sun4i_backend_commit,
764 .layers_init = sun4i_layers_init,
765 .apply_color_correction = sun4i_backend_apply_color_correction,
766 .disable_color_correction = sun4i_backend_disable_color_correction,
767 .vblank_quirk = sun4i_backend_vblank_quirk,
768 .mode_set = sun4i_backend_mode_set,
769 };
770
771 static const struct regmap_config sun4i_backend_regmap_config = {
772 .reg_bits = 32,
773 .val_bits = 32,
774 .reg_stride = 4,
775 .max_register = 0x5800,
776 };
777
sun4i_backend_bind(struct device * dev,struct device * master,void * data)778 static int sun4i_backend_bind(struct device *dev, struct device *master,
779 void *data)
780 {
781 struct platform_device *pdev = to_platform_device(dev);
782 struct drm_device *drm = data;
783 struct sun4i_drv *drv = drm->dev_private;
784 struct sun4i_backend *backend;
785 const struct sun4i_backend_quirks *quirks;
786 void __iomem *regs;
787 int i, ret;
788
789 backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
790 if (!backend)
791 return -ENOMEM;
792 dev_set_drvdata(dev, backend);
793 spin_lock_init(&backend->frontend_lock);
794
795 if (of_find_property(dev->of_node, "interconnects", NULL)) {
796 /*
797 * This assume we have the same DMA constraints for all our the
798 * devices in our pipeline (all the backends, but also the
799 * frontends). This sounds bad, but it has always been the case
800 * for us, and DRM doesn't do per-device allocation either, so
801 * we would need to fix DRM first...
802 */
803 ret = of_dma_configure(drm->dev, dev->of_node, true);
804 if (ret)
805 return ret;
806 }
807
808 backend->engine.node = dev->of_node;
809 backend->engine.ops = &sun4i_backend_engine_ops;
810 backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
811 if (backend->engine.id < 0)
812 return backend->engine.id;
813
814 backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
815 if (IS_ERR(backend->frontend))
816 dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
817
818 regs = devm_platform_ioremap_resource(pdev, 0);
819 if (IS_ERR(regs))
820 return PTR_ERR(regs);
821
822 backend->reset = devm_reset_control_get(dev, NULL);
823 if (IS_ERR(backend->reset)) {
824 dev_err(dev, "Couldn't get our reset line\n");
825 return PTR_ERR(backend->reset);
826 }
827
828 ret = reset_control_deassert(backend->reset);
829 if (ret) {
830 dev_err(dev, "Couldn't deassert our reset line\n");
831 return ret;
832 }
833
834 backend->bus_clk = devm_clk_get(dev, "ahb");
835 if (IS_ERR(backend->bus_clk)) {
836 dev_err(dev, "Couldn't get the backend bus clock\n");
837 ret = PTR_ERR(backend->bus_clk);
838 goto err_assert_reset;
839 }
840 clk_prepare_enable(backend->bus_clk);
841
842 backend->mod_clk = devm_clk_get(dev, "mod");
843 if (IS_ERR(backend->mod_clk)) {
844 dev_err(dev, "Couldn't get the backend module clock\n");
845 ret = PTR_ERR(backend->mod_clk);
846 goto err_disable_bus_clk;
847 }
848
849 ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
850 if (ret) {
851 dev_err(dev, "Couldn't set the module clock frequency\n");
852 goto err_disable_bus_clk;
853 }
854
855 clk_prepare_enable(backend->mod_clk);
856
857 backend->ram_clk = devm_clk_get(dev, "ram");
858 if (IS_ERR(backend->ram_clk)) {
859 dev_err(dev, "Couldn't get the backend RAM clock\n");
860 ret = PTR_ERR(backend->ram_clk);
861 goto err_disable_mod_clk;
862 }
863 clk_prepare_enable(backend->ram_clk);
864
865 if (of_device_is_compatible(dev->of_node,
866 "allwinner,sun8i-a33-display-backend")) {
867 ret = sun4i_backend_init_sat(dev);
868 if (ret) {
869 dev_err(dev, "Couldn't init SAT resources\n");
870 goto err_disable_ram_clk;
871 }
872 }
873
874 backend->engine.regs = devm_regmap_init_mmio(dev, regs,
875 &sun4i_backend_regmap_config);
876 if (IS_ERR(backend->engine.regs)) {
877 dev_err(dev, "Couldn't create the backend regmap\n");
878 return PTR_ERR(backend->engine.regs);
879 }
880
881 list_add_tail(&backend->engine.list, &drv->engine_list);
882
883 /*
884 * Many of the backend's layer configuration registers have
885 * undefined default values. This poses a risk as we use
886 * regmap_update_bits in some places, and don't overwrite
887 * the whole register.
888 *
889 * Clear the registers here to have something predictable.
890 */
891 for (i = 0x800; i < 0x1000; i += 4)
892 regmap_write(backend->engine.regs, i, 0);
893
894 /* Disable registers autoloading */
895 regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG,
896 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
897
898 /* Enable the backend */
899 regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
900 SUN4I_BACKEND_MODCTL_DEBE_EN |
901 SUN4I_BACKEND_MODCTL_START_CTL);
902
903 /* Set output selection if needed */
904 quirks = of_device_get_match_data(dev);
905 if (quirks->needs_output_muxing) {
906 /*
907 * We assume there is no dynamic muxing of backends
908 * and TCONs, so we select the backend with same ID.
909 *
910 * While dynamic selection might be interesting, since
911 * the CRTC is tied to the TCON, while the layers are
912 * tied to the backends, this means, we will need to
913 * switch between groups of layers. There might not be
914 * a way to represent this constraint in DRM.
915 */
916 regmap_update_bits(backend->engine.regs,
917 SUN4I_BACKEND_MODCTL_REG,
918 SUN4I_BACKEND_MODCTL_OUT_SEL,
919 (backend->engine.id
920 ? SUN4I_BACKEND_MODCTL_OUT_LCD1
921 : SUN4I_BACKEND_MODCTL_OUT_LCD0));
922 }
923
924 backend->quirks = quirks;
925
926 return 0;
927
928 err_disable_ram_clk:
929 clk_disable_unprepare(backend->ram_clk);
930 err_disable_mod_clk:
931 clk_rate_exclusive_put(backend->mod_clk);
932 clk_disable_unprepare(backend->mod_clk);
933 err_disable_bus_clk:
934 clk_disable_unprepare(backend->bus_clk);
935 err_assert_reset:
936 reset_control_assert(backend->reset);
937 return ret;
938 }
939
sun4i_backend_unbind(struct device * dev,struct device * master,void * data)940 static void sun4i_backend_unbind(struct device *dev, struct device *master,
941 void *data)
942 {
943 struct sun4i_backend *backend = dev_get_drvdata(dev);
944
945 list_del(&backend->engine.list);
946
947 if (of_device_is_compatible(dev->of_node,
948 "allwinner,sun8i-a33-display-backend"))
949 sun4i_backend_free_sat(dev);
950
951 clk_disable_unprepare(backend->ram_clk);
952 clk_rate_exclusive_put(backend->mod_clk);
953 clk_disable_unprepare(backend->mod_clk);
954 clk_disable_unprepare(backend->bus_clk);
955 reset_control_assert(backend->reset);
956 }
957
958 static const struct component_ops sun4i_backend_ops = {
959 .bind = sun4i_backend_bind,
960 .unbind = sun4i_backend_unbind,
961 };
962
sun4i_backend_probe(struct platform_device * pdev)963 static int sun4i_backend_probe(struct platform_device *pdev)
964 {
965 return component_add(&pdev->dev, &sun4i_backend_ops);
966 }
967
sun4i_backend_remove(struct platform_device * pdev)968 static int sun4i_backend_remove(struct platform_device *pdev)
969 {
970 component_del(&pdev->dev, &sun4i_backend_ops);
971
972 return 0;
973 }
974
975 static const struct sun4i_backend_quirks sun4i_backend_quirks = {
976 .needs_output_muxing = true,
977 };
978
979 static const struct sun4i_backend_quirks sun5i_backend_quirks = {
980 };
981
982 static const struct sun4i_backend_quirks sun6i_backend_quirks = {
983 };
984
985 static const struct sun4i_backend_quirks sun7i_backend_quirks = {
986 .needs_output_muxing = true,
987 };
988
989 static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
990 .supports_lowest_plane_alpha = true,
991 };
992
993 static const struct sun4i_backend_quirks sun9i_backend_quirks = {
994 };
995
996 static const struct of_device_id sun4i_backend_of_table[] = {
997 {
998 .compatible = "allwinner,sun4i-a10-display-backend",
999 .data = &sun4i_backend_quirks,
1000 },
1001 {
1002 .compatible = "allwinner,sun5i-a13-display-backend",
1003 .data = &sun5i_backend_quirks,
1004 },
1005 {
1006 .compatible = "allwinner,sun6i-a31-display-backend",
1007 .data = &sun6i_backend_quirks,
1008 },
1009 {
1010 .compatible = "allwinner,sun7i-a20-display-backend",
1011 .data = &sun7i_backend_quirks,
1012 },
1013 {
1014 .compatible = "allwinner,sun8i-a23-display-backend",
1015 .data = &sun8i_a33_backend_quirks,
1016 },
1017 {
1018 .compatible = "allwinner,sun8i-a33-display-backend",
1019 .data = &sun8i_a33_backend_quirks,
1020 },
1021 {
1022 .compatible = "allwinner,sun9i-a80-display-backend",
1023 .data = &sun9i_backend_quirks,
1024 },
1025 { }
1026 };
1027 MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
1028
1029 static struct platform_driver sun4i_backend_platform_driver = {
1030 .probe = sun4i_backend_probe,
1031 .remove = sun4i_backend_remove,
1032 .driver = {
1033 .name = "sun4i-backend",
1034 .of_match_table = sun4i_backend_of_table,
1035 },
1036 };
1037 module_platform_driver(sun4i_backend_platform_driver);
1038
1039 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1040 MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
1041 MODULE_LICENSE("GPL");
1042