1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2019 NXP.
4 */
5
6 #include <linux/delay.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/platform_device.h>
10 #include <linux/slab.h>
11
12 #include "dcss-dev.h"
13
14 #define DCSS_CTXLD_CONTROL_STATUS 0x0
15 #define CTXLD_ENABLE BIT(0)
16 #define ARB_SEL BIT(1)
17 #define RD_ERR_EN BIT(2)
18 #define DB_COMP_EN BIT(3)
19 #define SB_HP_COMP_EN BIT(4)
20 #define SB_LP_COMP_EN BIT(5)
21 #define DB_PEND_SB_REC_EN BIT(6)
22 #define SB_PEND_DISP_ACTIVE_EN BIT(7)
23 #define AHB_ERR_EN BIT(8)
24 #define RD_ERR BIT(16)
25 #define DB_COMP BIT(17)
26 #define SB_HP_COMP BIT(18)
27 #define SB_LP_COMP BIT(19)
28 #define DB_PEND_SB_REC BIT(20)
29 #define SB_PEND_DISP_ACTIVE BIT(21)
30 #define AHB_ERR BIT(22)
31 #define DCSS_CTXLD_DB_BASE_ADDR 0x10
32 #define DCSS_CTXLD_DB_COUNT 0x14
33 #define DCSS_CTXLD_SB_BASE_ADDR 0x18
34 #define DCSS_CTXLD_SB_COUNT 0x1C
35 #define SB_HP_COUNT_POS 0
36 #define SB_HP_COUNT_MASK 0xffff
37 #define SB_LP_COUNT_POS 16
38 #define SB_LP_COUNT_MASK 0xffff0000
39 #define DCSS_AHB_ERR_ADDR 0x20
40
41 #define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP)
42 #define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR)
43
44 /* The following sizes are in context loader entries, 8 bytes each. */
45 #define CTXLD_DB_CTX_ENTRIES 1024 /* max 65536 */
46 #define CTXLD_SB_LP_CTX_ENTRIES 10240 /* max 65536 */
47 #define CTXLD_SB_HP_CTX_ENTRIES 20000 /* max 65536 */
48 #define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \
49 CTXLD_SB_HP_CTX_ENTRIES)
50
51 /* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */
52 static u16 dcss_ctxld_ctx_size[3] = {
53 CTXLD_DB_CTX_ENTRIES,
54 CTXLD_SB_HP_CTX_ENTRIES,
55 CTXLD_SB_LP_CTX_ENTRIES
56 };
57
58 /* this represents an entry in the context loader map */
59 struct dcss_ctxld_item {
60 u32 val;
61 u32 ofs;
62 };
63
64 #define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item)
65
66 struct dcss_ctxld {
67 struct device *dev;
68 void __iomem *ctxld_reg;
69 int irq;
70 bool irq_en;
71
72 struct dcss_ctxld_item *db[2];
73 struct dcss_ctxld_item *sb_hp[2];
74 struct dcss_ctxld_item *sb_lp[2];
75
76 dma_addr_t db_paddr[2];
77 dma_addr_t sb_paddr[2];
78
79 u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */
80 u8 current_ctx;
81
82 bool in_use;
83 bool armed;
84
85 spinlock_t lock; /* protects concurent access to private data */
86 };
87
dcss_ctxld_irq_handler(int irq,void * data)88 static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data)
89 {
90 struct dcss_ctxld *ctxld = data;
91 struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
92 u32 irq_status;
93
94 irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
95
96 if (irq_status & CTXLD_IRQ_COMPLETION &&
97 !(irq_status & CTXLD_ENABLE) && ctxld->in_use) {
98 ctxld->in_use = false;
99
100 if (dcss && dcss->disable_callback)
101 dcss->disable_callback(dcss);
102 } else if (irq_status & CTXLD_IRQ_ERROR) {
103 /*
104 * Except for throwing an error message and clearing the status
105 * register, there's not much we can do here.
106 */
107 dev_err(ctxld->dev, "ctxld: error encountered: %08x\n",
108 irq_status);
109 dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n",
110 ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB],
111 ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP],
112 ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]);
113 }
114
115 dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION),
116 ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
117
118 return IRQ_HANDLED;
119 }
120
dcss_ctxld_irq_config(struct dcss_ctxld * ctxld,struct platform_device * pdev)121 static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld,
122 struct platform_device *pdev)
123 {
124 int ret;
125
126 ctxld->irq = platform_get_irq_byname(pdev, "ctxld");
127 if (ctxld->irq < 0)
128 return ctxld->irq;
129
130 ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler,
131 0, "dcss_ctxld", ctxld);
132 if (ret) {
133 dev_err(ctxld->dev, "ctxld: irq request failed.\n");
134 return ret;
135 }
136
137 ctxld->irq_en = true;
138
139 return 0;
140 }
141
dcss_ctxld_hw_cfg(struct dcss_ctxld * ctxld)142 static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld)
143 {
144 dcss_writel(RD_ERR_EN | SB_HP_COMP_EN |
145 DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR,
146 ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
147 }
148
dcss_ctxld_free_ctx(struct dcss_ctxld * ctxld)149 static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld)
150 {
151 struct dcss_ctxld_item *ctx;
152 int i;
153
154 for (i = 0; i < 2; i++) {
155 if (ctxld->db[i]) {
156 dma_free_coherent(ctxld->dev,
157 CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
158 ctxld->db[i], ctxld->db_paddr[i]);
159 ctxld->db[i] = NULL;
160 ctxld->db_paddr[i] = 0;
161 }
162
163 if (ctxld->sb_hp[i]) {
164 dma_free_coherent(ctxld->dev,
165 CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
166 ctxld->sb_hp[i], ctxld->sb_paddr[i]);
167 ctxld->sb_hp[i] = NULL;
168 ctxld->sb_paddr[i] = 0;
169 }
170 }
171 }
172
dcss_ctxld_alloc_ctx(struct dcss_ctxld * ctxld)173 static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld)
174 {
175 struct dcss_ctxld_item *ctx;
176 int i;
177
178 for (i = 0; i < 2; i++) {
179 ctx = dma_alloc_coherent(ctxld->dev,
180 CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
181 &ctxld->db_paddr[i], GFP_KERNEL);
182 if (!ctx)
183 return -ENOMEM;
184
185 ctxld->db[i] = ctx;
186
187 ctx = dma_alloc_coherent(ctxld->dev,
188 CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
189 &ctxld->sb_paddr[i], GFP_KERNEL);
190 if (!ctx)
191 return -ENOMEM;
192
193 ctxld->sb_hp[i] = ctx;
194 ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES;
195 }
196
197 return 0;
198 }
199
dcss_ctxld_init(struct dcss_dev * dcss,unsigned long ctxld_base)200 int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
201 {
202 struct dcss_ctxld *ctxld;
203 int ret;
204
205 ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL);
206 if (!ctxld)
207 return -ENOMEM;
208
209 dcss->ctxld = ctxld;
210 ctxld->dev = dcss->dev;
211
212 spin_lock_init(&ctxld->lock);
213
214 ret = dcss_ctxld_alloc_ctx(ctxld);
215 if (ret) {
216 dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n");
217 goto err;
218 }
219
220 ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K);
221 if (!ctxld->ctxld_reg) {
222 dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
223 ret = -ENOMEM;
224 goto err;
225 }
226
227 ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
228 if (ret)
229 goto err_irq;
230
231 dcss_ctxld_hw_cfg(ctxld);
232
233 return 0;
234
235 err_irq:
236 iounmap(ctxld->ctxld_reg);
237
238 err:
239 dcss_ctxld_free_ctx(ctxld);
240 kfree(ctxld);
241
242 return ret;
243 }
244
dcss_ctxld_exit(struct dcss_ctxld * ctxld)245 void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
246 {
247 free_irq(ctxld->irq, ctxld);
248
249 if (ctxld->ctxld_reg)
250 iounmap(ctxld->ctxld_reg);
251
252 dcss_ctxld_free_ctx(ctxld);
253 kfree(ctxld);
254 }
255
dcss_ctxld_enable_locked(struct dcss_ctxld * ctxld)256 static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld)
257 {
258 int curr_ctx = ctxld->current_ctx;
259 u32 db_base, sb_base, sb_count;
260 u32 sb_hp_cnt, sb_lp_cnt, db_cnt;
261 struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
262
263 if (!dcss)
264 return 0;
265
266 dcss_dpr_write_sysctrl(dcss->dpr);
267
268 dcss_scaler_write_sclctrl(dcss->scaler);
269
270 sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP];
271 sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP];
272 db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB];
273
274 /* make sure SB_LP context area comes after SB_HP */
275 if (sb_lp_cnt &&
276 ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) {
277 struct dcss_ctxld_item *sb_lp_adjusted;
278
279 sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt;
280
281 memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx],
282 sb_lp_cnt * CTX_ITEM_SIZE);
283 }
284
285 db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0;
286
287 dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR);
288 dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT);
289
290 if (sb_hp_cnt)
291 sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) |
292 ((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK);
293 else
294 sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK;
295
296 sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0;
297
298 dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR);
299 dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT);
300
301 /* enable the context loader */
302 dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
303
304 ctxld->in_use = true;
305
306 /*
307 * Toggle the current context to the alternate one so that any updates
308 * in the modules' settings take place there.
309 */
310 ctxld->current_ctx ^= 1;
311
312 ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0;
313 ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0;
314 ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0;
315
316 return 0;
317 }
318
dcss_ctxld_enable(struct dcss_ctxld * ctxld)319 int dcss_ctxld_enable(struct dcss_ctxld *ctxld)
320 {
321 spin_lock_irq(&ctxld->lock);
322 ctxld->armed = true;
323 spin_unlock_irq(&ctxld->lock);
324
325 return 0;
326 }
327
dcss_ctxld_kick(struct dcss_ctxld * ctxld)328 void dcss_ctxld_kick(struct dcss_ctxld *ctxld)
329 {
330 unsigned long flags;
331
332 spin_lock_irqsave(&ctxld->lock, flags);
333 if (ctxld->armed && !ctxld->in_use) {
334 ctxld->armed = false;
335 dcss_ctxld_enable_locked(ctxld);
336 }
337 spin_unlock_irqrestore(&ctxld->lock, flags);
338 }
339
dcss_ctxld_write_irqsafe(struct dcss_ctxld * ctxld,u32 ctx_id,u32 val,u32 reg_ofs)340 void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val,
341 u32 reg_ofs)
342 {
343 int curr_ctx = ctxld->current_ctx;
344 struct dcss_ctxld_item *ctx[] = {
345 [CTX_DB] = ctxld->db[curr_ctx],
346 [CTX_SB_HP] = ctxld->sb_hp[curr_ctx],
347 [CTX_SB_LP] = ctxld->sb_lp[curr_ctx]
348 };
349 int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
350
351 if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
352 WARN_ON(1);
353 return;
354 }
355
356 ctx[ctx_id][item_idx].val = val;
357 ctx[ctx_id][item_idx].ofs = reg_ofs;
358 ctxld->ctx_size[curr_ctx][ctx_id] += 1;
359 }
360
dcss_ctxld_write(struct dcss_ctxld * ctxld,u32 ctx_id,u32 val,u32 reg_ofs)361 void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
362 u32 val, u32 reg_ofs)
363 {
364 spin_lock_irq(&ctxld->lock);
365 dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
366 spin_unlock_irq(&ctxld->lock);
367 }
368
dcss_ctxld_is_flushed(struct dcss_ctxld * ctxld)369 bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld)
370 {
371 return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 &&
372 ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 &&
373 ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0;
374 }
375
dcss_ctxld_resume(struct dcss_ctxld * ctxld)376 int dcss_ctxld_resume(struct dcss_ctxld *ctxld)
377 {
378 dcss_ctxld_hw_cfg(ctxld);
379
380 if (!ctxld->irq_en) {
381 enable_irq(ctxld->irq);
382 ctxld->irq_en = true;
383 }
384
385 return 0;
386 }
387
dcss_ctxld_suspend(struct dcss_ctxld * ctxld)388 int dcss_ctxld_suspend(struct dcss_ctxld *ctxld)
389 {
390 int ret = 0;
391 unsigned long timeout = jiffies + msecs_to_jiffies(500);
392
393 if (!dcss_ctxld_is_flushed(ctxld)) {
394 dcss_ctxld_kick(ctxld);
395
396 while (!time_after(jiffies, timeout) && ctxld->in_use)
397 msleep(20);
398
399 if (time_after(jiffies, timeout))
400 return -ETIMEDOUT;
401 }
402
403 spin_lock_irq(&ctxld->lock);
404
405 if (ctxld->irq_en) {
406 disable_irq_nosync(ctxld->irq);
407 ctxld->irq_en = false;
408 }
409
410 /* reset context region and sizes */
411 ctxld->current_ctx = 0;
412 ctxld->ctx_size[0][CTX_DB] = 0;
413 ctxld->ctx_size[0][CTX_SB_HP] = 0;
414 ctxld->ctx_size[0][CTX_SB_LP] = 0;
415
416 spin_unlock_irq(&ctxld->lock);
417
418 return ret;
419 }
420
dcss_ctxld_assert_locked(struct dcss_ctxld * ctxld)421 void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld)
422 {
423 lockdep_assert_held(&ctxld->lock);
424 }
425