1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022 MediaTek Inc.
4  * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
5  */
6 
7 #include <linux/clk.h>
8 #include <linux/module.h>
9 #include <linux/of_platform.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/remoteproc.h>
13 #include <linux/remoteproc/mtk_scp.h>
14 #include <media/videobuf2-dma-contig.h>
15 
16 #include "mtk-mdp3-core.h"
17 #include "mtk-mdp3-cfg.h"
18 #include "mtk-mdp3-m2m.h"
19 
20 static const struct of_device_id mdp_of_ids[] = {
21 	{ .compatible = "mediatek,mt8183-mdp3-rdma",
22 	  .data = &mt8183_mdp_driver_data,
23 	},
24 	{ .compatible = "mediatek,mt8188-mdp3-rdma",
25 	  .data = &mt8188_mdp_driver_data,
26 	},
27 	{ .compatible = "mediatek,mt8195-mdp3-rdma",
28 	  .data = &mt8195_mdp_driver_data,
29 	},
30 	{ .compatible = "mediatek,mt8195-mdp3-wrot",
31 	  .data = &mt8195_mdp_driver_data,
32 	},
33 	{},
34 };
35 MODULE_DEVICE_TABLE(of, mdp_of_ids);
36 
__get_pdev_by_id(struct platform_device * pdev,struct platform_device * from,enum mdp_infra_id id)37 static struct platform_device *__get_pdev_by_id(struct platform_device *pdev,
38 						struct platform_device *from,
39 						enum mdp_infra_id id)
40 {
41 	struct device_node *node, *f = NULL;
42 	struct platform_device *mdp_pdev = NULL;
43 	const struct mtk_mdp_driver_data *mdp_data;
44 	const char *compat;
45 
46 	if (!pdev)
47 		return NULL;
48 
49 	if (id < MDP_INFRA_MMSYS || id >= MDP_INFRA_MAX) {
50 		dev_err(&pdev->dev, "Illegal infra id %d\n", id);
51 		return NULL;
52 	}
53 
54 	mdp_data = of_device_get_match_data(&pdev->dev);
55 	if (!mdp_data) {
56 		dev_err(&pdev->dev, "have no driver data to find node\n");
57 		return NULL;
58 	}
59 
60 	compat = mdp_data->mdp_probe_infra[id].compatible;
61 	if (strlen(compat) == 0)
62 		return NULL;
63 
64 	if (from)
65 		f = from->dev.of_node;
66 	node = of_find_compatible_node(f, NULL, compat);
67 	if (WARN_ON(!node)) {
68 		dev_err(&pdev->dev, "find node from id %d failed\n", id);
69 		return NULL;
70 	}
71 
72 	mdp_pdev = of_find_device_by_node(node);
73 	of_node_put(node);
74 	if (WARN_ON(!mdp_pdev)) {
75 		dev_err(&pdev->dev, "find pdev from id %d failed\n", id);
76 		return NULL;
77 	}
78 
79 	return mdp_pdev;
80 }
81 
mdp_vpu_get_locked(struct mdp_dev * mdp)82 int mdp_vpu_get_locked(struct mdp_dev *mdp)
83 {
84 	int ret = 0;
85 
86 	if (mdp->vpu_count++ == 0) {
87 		ret = rproc_boot(mdp->rproc_handle);
88 		if (ret) {
89 			dev_err(&mdp->pdev->dev,
90 				"vpu_load_firmware failed %d\n", ret);
91 			goto err_load_vpu;
92 		}
93 		ret = mdp_vpu_register(mdp);
94 		if (ret) {
95 			dev_err(&mdp->pdev->dev,
96 				"mdp_vpu register failed %d\n", ret);
97 			goto err_reg_vpu;
98 		}
99 		ret = mdp_vpu_dev_init(&mdp->vpu, mdp->scp, &mdp->vpu_lock);
100 		if (ret) {
101 			dev_err(&mdp->pdev->dev,
102 				"mdp_vpu device init failed %d\n", ret);
103 			goto err_init_vpu;
104 		}
105 	}
106 	return 0;
107 
108 err_init_vpu:
109 	mdp_vpu_unregister(mdp);
110 err_reg_vpu:
111 err_load_vpu:
112 	mdp->vpu_count--;
113 	return ret;
114 }
115 
mdp_vpu_put_locked(struct mdp_dev * mdp)116 void mdp_vpu_put_locked(struct mdp_dev *mdp)
117 {
118 	if (--mdp->vpu_count == 0) {
119 		mdp_vpu_dev_deinit(&mdp->vpu);
120 		mdp_vpu_unregister(mdp);
121 	}
122 }
123 
mdp_video_device_release(struct video_device * vdev)124 void mdp_video_device_release(struct video_device *vdev)
125 {
126 	struct mdp_dev *mdp = (struct mdp_dev *)video_get_drvdata(vdev);
127 	int i;
128 
129 	for (i = 0; i < mdp->mdp_data->pp_used; i++)
130 		if (mdp->cmdq_clt[i])
131 			cmdq_mbox_destroy(mdp->cmdq_clt[i]);
132 
133 	scp_put(mdp->scp);
134 
135 	destroy_workqueue(mdp->job_wq);
136 	destroy_workqueue(mdp->clock_wq);
137 
138 	pm_runtime_disable(&mdp->pdev->dev);
139 
140 	vb2_dma_contig_clear_max_seg_size(&mdp->pdev->dev);
141 
142 	mdp_comp_destroy(mdp);
143 	for (i = 0; i < mdp->mdp_data->pipe_info_len; i++) {
144 		enum mdp_mm_subsys_id idx;
145 		struct mtk_mutex *m;
146 		u32 m_id;
147 
148 		idx = mdp->mdp_data->pipe_info[i].sub_id;
149 		m_id = mdp->mdp_data->pipe_info[i].mutex_id;
150 		m = mdp->mm_subsys[idx].mdp_mutex[m_id];
151 		if (!IS_ERR_OR_NULL(m))
152 			mtk_mutex_put(m);
153 	}
154 
155 	mdp_vpu_shared_mem_free(&mdp->vpu);
156 	v4l2_m2m_release(mdp->m2m_dev);
157 	kfree(mdp);
158 }
159 
mdp_mm_subsys_deploy(struct mdp_dev * mdp,enum mdp_infra_id id)160 static int mdp_mm_subsys_deploy(struct mdp_dev *mdp, enum mdp_infra_id id)
161 {
162 	struct platform_device *mm_pdev = NULL;
163 	struct device **dev;
164 	int i;
165 
166 	if (!mdp)
167 		return -EINVAL;
168 
169 	for (i = 0; i < MDP_MM_SUBSYS_MAX; i++) {
170 		const char *compat;
171 		enum mdp_infra_id sub_id = id + i;
172 
173 		switch (id) {
174 		case MDP_INFRA_MMSYS:
175 			dev = &mdp->mm_subsys[i].mmsys;
176 			break;
177 		case MDP_INFRA_MUTEX:
178 			dev = &mdp->mm_subsys[i].mutex;
179 			break;
180 		default:
181 			dev_err(&mdp->pdev->dev, "Unknown infra id %d", id);
182 			return -EINVAL;
183 		}
184 
185 		/*
186 		 * Not every chip has multiple multimedia subsystems, so
187 		 * the config may be null.
188 		 */
189 		compat = mdp->mdp_data->mdp_probe_infra[sub_id].compatible;
190 		if (strlen(compat) == 0)
191 			continue;
192 
193 		mm_pdev = __get_pdev_by_id(mdp->pdev, mm_pdev, sub_id);
194 		if (WARN_ON(!mm_pdev))
195 			return -ENODEV;
196 
197 		*dev = &mm_pdev->dev;
198 	}
199 
200 	return 0;
201 }
202 
mdp_probe(struct platform_device * pdev)203 static int mdp_probe(struct platform_device *pdev)
204 {
205 	struct device *dev = &pdev->dev;
206 	struct mdp_dev *mdp;
207 	struct platform_device *mm_pdev;
208 	struct resource *res;
209 	int ret, i, mutex_id;
210 
211 	mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
212 	if (!mdp) {
213 		ret = -ENOMEM;
214 		goto err_return;
215 	}
216 
217 	mdp->pdev = pdev;
218 	mdp->mdp_data = of_device_get_match_data(&pdev->dev);
219 
220 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
221 	if (res->start != mdp->mdp_data->mdp_con_res) {
222 		platform_set_drvdata(pdev, mdp);
223 		goto success_return;
224 	}
225 
226 	ret = mdp_mm_subsys_deploy(mdp, MDP_INFRA_MMSYS);
227 	if (ret)
228 		goto err_destroy_device;
229 
230 	ret = mdp_mm_subsys_deploy(mdp, MDP_INFRA_MUTEX);
231 	if (ret)
232 		goto err_destroy_device;
233 
234 	for (i = 0; i < mdp->mdp_data->pipe_info_len; i++) {
235 		enum mdp_mm_subsys_id idx;
236 		struct mtk_mutex **m;
237 
238 		idx = mdp->mdp_data->pipe_info[i].sub_id;
239 		mutex_id = mdp->mdp_data->pipe_info[i].mutex_id;
240 		m = &mdp->mm_subsys[idx].mdp_mutex[mutex_id];
241 
242 		if (!IS_ERR_OR_NULL(*m))
243 			continue;
244 
245 		*m = mtk_mutex_get(mdp->mm_subsys[idx].mutex);
246 		if (IS_ERR(*m)) {
247 			ret = PTR_ERR(*m);
248 			goto err_free_mutex;
249 		}
250 	}
251 
252 	ret = mdp_comp_config(mdp);
253 	if (ret) {
254 		dev_err(dev, "Failed to config mdp components\n");
255 		goto err_free_mutex;
256 	}
257 
258 	mdp->job_wq = alloc_workqueue(MDP_MODULE_NAME, WQ_FREEZABLE, 0);
259 	if (!mdp->job_wq) {
260 		dev_err(dev, "Unable to create job workqueue\n");
261 		ret = -ENOMEM;
262 		goto err_deinit_comp;
263 	}
264 
265 	mdp->clock_wq = alloc_workqueue(MDP_MODULE_NAME "-clock", WQ_FREEZABLE,
266 					0);
267 	if (!mdp->clock_wq) {
268 		dev_err(dev, "Unable to create clock workqueue\n");
269 		ret = -ENOMEM;
270 		goto err_destroy_job_wq;
271 	}
272 
273 	mdp->scp = scp_get(pdev);
274 	if (!mdp->scp) {
275 		mm_pdev = __get_pdev_by_id(pdev, NULL, MDP_INFRA_SCP);
276 		if (WARN_ON(!mm_pdev)) {
277 			dev_err(&pdev->dev, "Could not get scp device\n");
278 			ret = -ENODEV;
279 			goto err_destroy_clock_wq;
280 		}
281 		mdp->scp = platform_get_drvdata(mm_pdev);
282 	}
283 
284 	mdp->rproc_handle = scp_get_rproc(mdp->scp);
285 	dev_dbg(&pdev->dev, "MDP rproc_handle: %pK", mdp->rproc_handle);
286 
287 	mutex_init(&mdp->vpu_lock);
288 	mutex_init(&mdp->m2m_lock);
289 
290 	for (i = 0; i < mdp->mdp_data->pp_used; i++) {
291 		mdp->cmdq_clt[i] = cmdq_mbox_create(dev, i);
292 		if (IS_ERR(mdp->cmdq_clt[i])) {
293 			ret = PTR_ERR(mdp->cmdq_clt[i]);
294 			goto err_mbox_destroy;
295 		}
296 
297 		mdp->cmdq_shift_pa[i] = cmdq_get_shift_pa(mdp->cmdq_clt[i]->chan);
298 	}
299 
300 	init_waitqueue_head(&mdp->callback_wq);
301 	ida_init(&mdp->mdp_ida);
302 	platform_set_drvdata(pdev, mdp);
303 
304 	vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
305 
306 	ret = v4l2_device_register(dev, &mdp->v4l2_dev);
307 	if (ret) {
308 		dev_err(dev, "Failed to register v4l2 device\n");
309 		ret = -EINVAL;
310 		goto err_mbox_destroy;
311 	}
312 
313 	ret = mdp_m2m_device_register(mdp);
314 	if (ret) {
315 		v4l2_err(&mdp->v4l2_dev, "Failed to register m2m device\n");
316 		goto err_unregister_device;
317 	}
318 
319 success_return:
320 	dev_dbg(dev, "mdp-%d registered successfully\n", pdev->id);
321 	return 0;
322 
323 err_unregister_device:
324 	v4l2_device_unregister(&mdp->v4l2_dev);
325 err_mbox_destroy:
326 	while (--i >= 0)
327 		cmdq_mbox_destroy(mdp->cmdq_clt[i]);
328 	scp_put(mdp->scp);
329 err_destroy_clock_wq:
330 	destroy_workqueue(mdp->clock_wq);
331 err_destroy_job_wq:
332 	destroy_workqueue(mdp->job_wq);
333 err_deinit_comp:
334 	mdp_comp_destroy(mdp);
335 err_free_mutex:
336 	for (i = 0; i < mdp->mdp_data->pipe_info_len; i++) {
337 		enum mdp_mm_subsys_id idx;
338 		struct mtk_mutex *m;
339 
340 		idx = mdp->mdp_data->pipe_info[i].sub_id;
341 		mutex_id = mdp->mdp_data->pipe_info[i].mutex_id;
342 		m = mdp->mm_subsys[idx].mdp_mutex[mutex_id];
343 		if (!IS_ERR_OR_NULL(m))
344 			mtk_mutex_put(m);
345 	}
346 err_destroy_device:
347 	kfree(mdp);
348 err_return:
349 	dev_dbg(dev, "Errno %d\n", ret);
350 	return ret;
351 }
352 
mdp_remove(struct platform_device * pdev)353 static void mdp_remove(struct platform_device *pdev)
354 {
355 	struct mdp_dev *mdp = platform_get_drvdata(pdev);
356 
357 	v4l2_device_unregister(&mdp->v4l2_dev);
358 
359 	dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
360 }
361 
mdp_suspend(struct device * dev)362 static int __maybe_unused mdp_suspend(struct device *dev)
363 {
364 	struct mdp_dev *mdp = dev_get_drvdata(dev);
365 	int ret;
366 
367 	atomic_set(&mdp->suspended, 1);
368 
369 	if (refcount_read(&mdp->job_count)) {
370 		ret = wait_event_timeout(mdp->callback_wq,
371 					 !refcount_read(&mdp->job_count),
372 					 2 * HZ);
373 		if (ret == 0) {
374 			dev_err(dev,
375 				"%s:flushed cmdq task incomplete, count=%d\n",
376 				__func__, refcount_read(&mdp->job_count));
377 			return -EBUSY;
378 		}
379 	}
380 
381 	return 0;
382 }
383 
mdp_resume(struct device * dev)384 static int __maybe_unused mdp_resume(struct device *dev)
385 {
386 	struct mdp_dev *mdp = dev_get_drvdata(dev);
387 
388 	atomic_set(&mdp->suspended, 0);
389 
390 	return 0;
391 }
392 
393 static const struct dev_pm_ops mdp_pm_ops = {
394 	SET_SYSTEM_SLEEP_PM_OPS(mdp_suspend, mdp_resume)
395 };
396 
397 static struct platform_driver mdp_driver = {
398 	.probe		= mdp_probe,
399 	.remove		= mdp_remove,
400 	.driver = {
401 		.name	= MDP_MODULE_NAME,
402 		.pm	= &mdp_pm_ops,
403 		.of_match_table = mdp_of_ids,
404 	},
405 };
406 
407 module_platform_driver(mdp_driver);
408 
409 MODULE_AUTHOR("Ping-Hsun Wu <ping-hsun.wu@mediatek.com>");
410 MODULE_DESCRIPTION("MediaTek image processor 3 driver");
411 MODULE_LICENSE("GPL");
412