1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2020-2021 NXP
4 */
5
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_address.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pm_domain.h>
19 #include <linux/firmware.h>
20 #include <linux/vmalloc.h>
21 #include "vpu.h"
22 #include "vpu_defs.h"
23 #include "vpu_core.h"
24 #include "vpu_mbox.h"
25 #include "vpu_msgs.h"
26 #include "vpu_rpc.h"
27 #include "vpu_cmds.h"
28
csr_writel(struct vpu_core * core,u32 reg,u32 val)29 void csr_writel(struct vpu_core *core, u32 reg, u32 val)
30 {
31 writel(val, core->base + reg);
32 }
33
csr_readl(struct vpu_core * core,u32 reg)34 u32 csr_readl(struct vpu_core *core, u32 reg)
35 {
36 return readl(core->base + reg);
37 }
38
vpu_core_load_firmware(struct vpu_core * core)39 static int vpu_core_load_firmware(struct vpu_core *core)
40 {
41 const struct firmware *pfw = NULL;
42 int ret = 0;
43
44 if (!core->fw.virt) {
45 dev_err(core->dev, "firmware buffer is not ready\n");
46 return -EINVAL;
47 }
48
49 ret = request_firmware(&pfw, core->res->fwname, core->dev);
50 dev_dbg(core->dev, "request_firmware %s : %d\n", core->res->fwname, ret);
51 if (ret) {
52 dev_err(core->dev, "request firmware %s failed, ret = %d\n",
53 core->res->fwname, ret);
54 return ret;
55 }
56
57 if (core->fw.length < pfw->size) {
58 dev_err(core->dev, "firmware buffer size want %zu, but %d\n",
59 pfw->size, core->fw.length);
60 ret = -EINVAL;
61 goto exit;
62 }
63
64 memset(core->fw.virt, 0, core->fw.length);
65 memcpy(core->fw.virt, pfw->data, pfw->size);
66 core->fw.bytesused = pfw->size;
67 ret = vpu_iface_on_firmware_loaded(core);
68 exit:
69 release_firmware(pfw);
70 pfw = NULL;
71
72 return ret;
73 }
74
vpu_core_boot_done(struct vpu_core * core)75 static int vpu_core_boot_done(struct vpu_core *core)
76 {
77 u32 fw_version;
78
79 fw_version = vpu_iface_get_version(core);
80 dev_info(core->dev, "%s firmware version : %d.%d.%d\n",
81 vpu_core_type_desc(core->type),
82 (fw_version >> 16) & 0xff,
83 (fw_version >> 8) & 0xff,
84 fw_version & 0xff);
85 core->supported_instance_count = vpu_iface_get_max_instance_count(core);
86 if (core->res->act_size) {
87 u32 count = core->act.length / core->res->act_size;
88
89 core->supported_instance_count = min(core->supported_instance_count, count);
90 }
91 core->fw_version = fw_version;
92 vpu_core_set_state(core, VPU_CORE_ACTIVE);
93
94 return 0;
95 }
96
vpu_core_wait_boot_done(struct vpu_core * core)97 static int vpu_core_wait_boot_done(struct vpu_core *core)
98 {
99 int ret;
100
101 ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT);
102 if (!ret) {
103 dev_err(core->dev, "boot timeout\n");
104 return -EINVAL;
105 }
106 return vpu_core_boot_done(core);
107 }
108
vpu_core_boot(struct vpu_core * core,bool load)109 static int vpu_core_boot(struct vpu_core *core, bool load)
110 {
111 int ret;
112
113 reinit_completion(&core->cmp);
114 if (load) {
115 ret = vpu_core_load_firmware(core);
116 if (ret)
117 return ret;
118 }
119
120 vpu_iface_boot_core(core);
121 return vpu_core_wait_boot_done(core);
122 }
123
vpu_core_shutdown(struct vpu_core * core)124 static int vpu_core_shutdown(struct vpu_core *core)
125 {
126 return vpu_iface_shutdown_core(core);
127 }
128
vpu_core_restore(struct vpu_core * core)129 static int vpu_core_restore(struct vpu_core *core)
130 {
131 int ret;
132
133 ret = vpu_core_sw_reset(core);
134 if (ret)
135 return ret;
136
137 vpu_core_boot_done(core);
138 return vpu_iface_restore_core(core);
139 }
140
__vpu_alloc_dma(struct device * dev,struct vpu_buffer * buf)141 static int __vpu_alloc_dma(struct device *dev, struct vpu_buffer *buf)
142 {
143 gfp_t gfp = GFP_KERNEL | GFP_DMA32;
144
145 if (!buf->length)
146 return 0;
147
148 buf->virt = dma_alloc_coherent(dev, buf->length, &buf->phys, gfp);
149 if (!buf->virt)
150 return -ENOMEM;
151
152 buf->dev = dev;
153
154 return 0;
155 }
156
vpu_free_dma(struct vpu_buffer * buf)157 void vpu_free_dma(struct vpu_buffer *buf)
158 {
159 if (!buf->virt || !buf->dev)
160 return;
161
162 dma_free_coherent(buf->dev, buf->length, buf->virt, buf->phys);
163 buf->virt = NULL;
164 buf->phys = 0;
165 buf->length = 0;
166 buf->bytesused = 0;
167 buf->dev = NULL;
168 }
169
vpu_alloc_dma(struct vpu_core * core,struct vpu_buffer * buf)170 int vpu_alloc_dma(struct vpu_core *core, struct vpu_buffer *buf)
171 {
172 return __vpu_alloc_dma(core->dev, buf);
173 }
174
vpu_core_set_state(struct vpu_core * core,enum vpu_core_state state)175 void vpu_core_set_state(struct vpu_core *core, enum vpu_core_state state)
176 {
177 if (state != core->state)
178 vpu_trace(core->dev, "vpu core state change from %d to %d\n", core->state, state);
179 core->state = state;
180 if (core->state == VPU_CORE_DEINIT)
181 core->hang_mask = 0;
182 }
183
vpu_core_update_state(struct vpu_core * core)184 static void vpu_core_update_state(struct vpu_core *core)
185 {
186 if (!vpu_iface_get_power_state(core)) {
187 if (core->request_count)
188 vpu_core_set_state(core, VPU_CORE_HANG);
189 else
190 vpu_core_set_state(core, VPU_CORE_DEINIT);
191
192 } else if (core->state == VPU_CORE_ACTIVE && core->hang_mask) {
193 vpu_core_set_state(core, VPU_CORE_HANG);
194 }
195 }
196
vpu_core_find_proper_by_type(struct vpu_dev * vpu,u32 type)197 static struct vpu_core *vpu_core_find_proper_by_type(struct vpu_dev *vpu, u32 type)
198 {
199 struct vpu_core *core = NULL;
200 int request_count = INT_MAX;
201 struct vpu_core *c;
202
203 list_for_each_entry(c, &vpu->cores, list) {
204 dev_dbg(c->dev, "instance_mask = 0x%lx, state = %d\n", c->instance_mask, c->state);
205 if (c->type != type)
206 continue;
207 mutex_lock(&c->lock);
208 vpu_core_update_state(c);
209 mutex_unlock(&c->lock);
210 if (c->state == VPU_CORE_DEINIT) {
211 core = c;
212 break;
213 }
214 if (c->state != VPU_CORE_ACTIVE)
215 continue;
216 if (c->request_count < request_count) {
217 request_count = c->request_count;
218 core = c;
219 }
220 if (!request_count)
221 break;
222 }
223
224 return core;
225 }
226
vpu_core_is_exist(struct vpu_dev * vpu,struct vpu_core * core)227 static bool vpu_core_is_exist(struct vpu_dev *vpu, struct vpu_core *core)
228 {
229 struct vpu_core *c;
230
231 list_for_each_entry(c, &vpu->cores, list) {
232 if (c == core)
233 return true;
234 }
235
236 return false;
237 }
238
vpu_core_get_vpu(struct vpu_core * core)239 static void vpu_core_get_vpu(struct vpu_core *core)
240 {
241 core->vpu->get_vpu(core->vpu);
242 if (core->type == VPU_CORE_TYPE_ENC)
243 core->vpu->get_enc(core->vpu);
244 if (core->type == VPU_CORE_TYPE_DEC)
245 core->vpu->get_dec(core->vpu);
246 }
247
vpu_core_register(struct device * dev,struct vpu_core * core)248 static int vpu_core_register(struct device *dev, struct vpu_core *core)
249 {
250 struct vpu_dev *vpu = dev_get_drvdata(dev);
251 int ret = 0;
252
253 dev_dbg(core->dev, "register core %s\n", vpu_core_type_desc(core->type));
254 if (vpu_core_is_exist(vpu, core))
255 return 0;
256
257 core->workqueue = alloc_workqueue("vpu", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
258 if (!core->workqueue) {
259 dev_err(core->dev, "fail to alloc workqueue\n");
260 return -ENOMEM;
261 }
262 INIT_WORK(&core->msg_work, vpu_msg_run_work);
263 INIT_DELAYED_WORK(&core->msg_delayed_work, vpu_msg_delayed_work);
264 core->msg_buffer_size = roundup_pow_of_two(VPU_MSG_BUFFER_SIZE);
265 core->msg_buffer = vzalloc(core->msg_buffer_size);
266 if (!core->msg_buffer) {
267 dev_err(core->dev, "failed allocate buffer for fifo\n");
268 ret = -ENOMEM;
269 goto error;
270 }
271 ret = kfifo_init(&core->msg_fifo, core->msg_buffer, core->msg_buffer_size);
272 if (ret) {
273 dev_err(core->dev, "failed init kfifo\n");
274 goto error;
275 }
276
277 list_add_tail(&core->list, &vpu->cores);
278 vpu_core_get_vpu(core);
279
280 return 0;
281 error:
282 if (core->msg_buffer) {
283 vfree(core->msg_buffer);
284 core->msg_buffer = NULL;
285 }
286 if (core->workqueue) {
287 destroy_workqueue(core->workqueue);
288 core->workqueue = NULL;
289 }
290 return ret;
291 }
292
vpu_core_put_vpu(struct vpu_core * core)293 static void vpu_core_put_vpu(struct vpu_core *core)
294 {
295 if (core->type == VPU_CORE_TYPE_ENC)
296 core->vpu->put_enc(core->vpu);
297 if (core->type == VPU_CORE_TYPE_DEC)
298 core->vpu->put_dec(core->vpu);
299 core->vpu->put_vpu(core->vpu);
300 }
301
vpu_core_unregister(struct device * dev,struct vpu_core * core)302 static int vpu_core_unregister(struct device *dev, struct vpu_core *core)
303 {
304 list_del_init(&core->list);
305
306 vpu_core_put_vpu(core);
307 core->vpu = NULL;
308 vfree(core->msg_buffer);
309 core->msg_buffer = NULL;
310
311 if (core->workqueue) {
312 cancel_work_sync(&core->msg_work);
313 cancel_delayed_work_sync(&core->msg_delayed_work);
314 destroy_workqueue(core->workqueue);
315 core->workqueue = NULL;
316 }
317
318 return 0;
319 }
320
vpu_core_acquire_instance(struct vpu_core * core)321 static int vpu_core_acquire_instance(struct vpu_core *core)
322 {
323 int id;
324
325 id = ffz(core->instance_mask);
326 if (id >= core->supported_instance_count)
327 return -EINVAL;
328
329 set_bit(id, &core->instance_mask);
330
331 return id;
332 }
333
vpu_core_release_instance(struct vpu_core * core,int id)334 static void vpu_core_release_instance(struct vpu_core *core, int id)
335 {
336 if (id < 0 || id >= core->supported_instance_count)
337 return;
338
339 clear_bit(id, &core->instance_mask);
340 }
341
vpu_inst_get(struct vpu_inst * inst)342 struct vpu_inst *vpu_inst_get(struct vpu_inst *inst)
343 {
344 if (!inst)
345 return NULL;
346
347 atomic_inc(&inst->ref_count);
348
349 return inst;
350 }
351
vpu_inst_put(struct vpu_inst * inst)352 void vpu_inst_put(struct vpu_inst *inst)
353 {
354 if (!inst)
355 return;
356 if (atomic_dec_and_test(&inst->ref_count)) {
357 if (inst->release)
358 inst->release(inst);
359 }
360 }
361
vpu_request_core(struct vpu_dev * vpu,enum vpu_core_type type)362 struct vpu_core *vpu_request_core(struct vpu_dev *vpu, enum vpu_core_type type)
363 {
364 struct vpu_core *core = NULL;
365 int ret;
366
367 mutex_lock(&vpu->lock);
368
369 core = vpu_core_find_proper_by_type(vpu, type);
370 if (!core)
371 goto exit;
372
373 mutex_lock(&core->lock);
374 pm_runtime_resume_and_get(core->dev);
375
376 if (core->state == VPU_CORE_DEINIT) {
377 if (vpu_iface_get_power_state(core))
378 ret = vpu_core_restore(core);
379 else
380 ret = vpu_core_boot(core, true);
381 if (ret) {
382 pm_runtime_put_sync(core->dev);
383 mutex_unlock(&core->lock);
384 core = NULL;
385 goto exit;
386 }
387 }
388
389 core->request_count++;
390
391 mutex_unlock(&core->lock);
392 exit:
393 mutex_unlock(&vpu->lock);
394
395 return core;
396 }
397
vpu_release_core(struct vpu_core * core)398 void vpu_release_core(struct vpu_core *core)
399 {
400 if (!core)
401 return;
402
403 mutex_lock(&core->lock);
404 pm_runtime_put_sync(core->dev);
405 if (core->request_count)
406 core->request_count--;
407 mutex_unlock(&core->lock);
408 }
409
vpu_inst_register(struct vpu_inst * inst)410 int vpu_inst_register(struct vpu_inst *inst)
411 {
412 struct vpu_dev *vpu;
413 struct vpu_core *core;
414 int ret = 0;
415
416 vpu = inst->vpu;
417 core = inst->core;
418 if (!core) {
419 core = vpu_request_core(vpu, inst->type);
420 if (!core) {
421 dev_err(vpu->dev, "there is no vpu core for %s\n",
422 vpu_core_type_desc(inst->type));
423 return -EINVAL;
424 }
425 inst->core = core;
426 inst->dev = get_device(core->dev);
427 }
428
429 mutex_lock(&core->lock);
430 if (core->state != VPU_CORE_ACTIVE) {
431 dev_err(core->dev, "vpu core is not active, state = %d\n", core->state);
432 ret = -EINVAL;
433 goto exit;
434 }
435
436 if (inst->id >= 0 && inst->id < core->supported_instance_count)
437 goto exit;
438
439 ret = vpu_core_acquire_instance(core);
440 if (ret < 0)
441 goto exit;
442
443 vpu_trace(inst->dev, "[%d] %p\n", ret, inst);
444 inst->id = ret;
445 list_add_tail(&inst->list, &core->instances);
446 ret = 0;
447 if (core->res->act_size) {
448 inst->act.phys = core->act.phys + core->res->act_size * inst->id;
449 inst->act.virt = core->act.virt + core->res->act_size * inst->id;
450 inst->act.length = core->res->act_size;
451 }
452 vpu_inst_create_dbgfs_file(inst);
453 exit:
454 mutex_unlock(&core->lock);
455
456 if (ret)
457 dev_err(core->dev, "register instance fail\n");
458 return ret;
459 }
460
vpu_inst_unregister(struct vpu_inst * inst)461 int vpu_inst_unregister(struct vpu_inst *inst)
462 {
463 struct vpu_core *core;
464
465 if (!inst->core)
466 return 0;
467
468 core = inst->core;
469 vpu_clear_request(inst);
470 mutex_lock(&core->lock);
471 if (inst->id >= 0 && inst->id < core->supported_instance_count) {
472 vpu_inst_remove_dbgfs_file(inst);
473 list_del_init(&inst->list);
474 vpu_core_release_instance(core, inst->id);
475 inst->id = VPU_INST_NULL_ID;
476 }
477 vpu_core_update_state(core);
478 if (core->state == VPU_CORE_HANG && !core->instance_mask) {
479 int err;
480
481 dev_info(core->dev, "reset hang core\n");
482 mutex_unlock(&core->lock);
483 err = vpu_core_sw_reset(core);
484 mutex_lock(&core->lock);
485 if (!err) {
486 vpu_core_set_state(core, VPU_CORE_ACTIVE);
487 core->hang_mask = 0;
488 }
489 }
490 mutex_unlock(&core->lock);
491
492 return 0;
493 }
494
vpu_core_find_instance(struct vpu_core * core,u32 index)495 struct vpu_inst *vpu_core_find_instance(struct vpu_core *core, u32 index)
496 {
497 struct vpu_inst *inst = NULL;
498 struct vpu_inst *tmp;
499
500 mutex_lock(&core->lock);
501 if (index >= core->supported_instance_count || !test_bit(index, &core->instance_mask))
502 goto exit;
503 list_for_each_entry(tmp, &core->instances, list) {
504 if (tmp->id == index) {
505 inst = vpu_inst_get(tmp);
506 break;
507 }
508 }
509 exit:
510 mutex_unlock(&core->lock);
511
512 return inst;
513 }
514
vpu_get_resource(struct vpu_inst * inst)515 const struct vpu_core_resources *vpu_get_resource(struct vpu_inst *inst)
516 {
517 struct vpu_dev *vpu;
518 struct vpu_core *core = NULL;
519 const struct vpu_core_resources *res = NULL;
520
521 if (!inst || !inst->vpu)
522 return NULL;
523
524 if (inst->core && inst->core->res)
525 return inst->core->res;
526
527 vpu = inst->vpu;
528 mutex_lock(&vpu->lock);
529 list_for_each_entry(core, &vpu->cores, list) {
530 if (core->type == inst->type) {
531 res = core->res;
532 break;
533 }
534 }
535 mutex_unlock(&vpu->lock);
536
537 return res;
538 }
539
vpu_core_parse_dt(struct vpu_core * core,struct device_node * np)540 static int vpu_core_parse_dt(struct vpu_core *core, struct device_node *np)
541 {
542 struct device_node *node;
543 struct resource res;
544 int ret;
545
546 if (of_count_phandle_with_args(np, "memory-region", NULL) < 2) {
547 dev_err(core->dev, "need 2 memory-region for boot and rpc\n");
548 return -ENODEV;
549 }
550
551 node = of_parse_phandle(np, "memory-region", 0);
552 if (!node) {
553 dev_err(core->dev, "boot-region of_parse_phandle error\n");
554 return -ENODEV;
555 }
556 if (of_address_to_resource(node, 0, &res)) {
557 dev_err(core->dev, "boot-region of_address_to_resource error\n");
558 of_node_put(node);
559 return -EINVAL;
560 }
561 core->fw.phys = res.start;
562 core->fw.length = resource_size(&res);
563
564 of_node_put(node);
565
566 node = of_parse_phandle(np, "memory-region", 1);
567 if (!node) {
568 dev_err(core->dev, "rpc-region of_parse_phandle error\n");
569 return -ENODEV;
570 }
571 if (of_address_to_resource(node, 0, &res)) {
572 dev_err(core->dev, "rpc-region of_address_to_resource error\n");
573 of_node_put(node);
574 return -EINVAL;
575 }
576 core->rpc.phys = res.start;
577 core->rpc.length = resource_size(&res);
578
579 if (core->rpc.length < core->res->rpc_size + core->res->fwlog_size) {
580 dev_err(core->dev, "the rpc-region <%pad, 0x%x> is not enough\n",
581 &core->rpc.phys, core->rpc.length);
582 of_node_put(node);
583 return -EINVAL;
584 }
585
586 core->fw.virt = memremap(core->fw.phys, core->fw.length, MEMREMAP_WC);
587 core->rpc.virt = memremap(core->rpc.phys, core->rpc.length, MEMREMAP_WC);
588 memset(core->rpc.virt, 0, core->rpc.length);
589
590 ret = vpu_iface_check_memory_region(core, core->rpc.phys, core->rpc.length);
591 if (ret != VPU_CORE_MEMORY_UNCACHED) {
592 dev_err(core->dev, "rpc region<%pad, 0x%x> isn't uncached\n",
593 &core->rpc.phys, core->rpc.length);
594 of_node_put(node);
595 return -EINVAL;
596 }
597
598 core->log.phys = core->rpc.phys + core->res->rpc_size;
599 core->log.virt = core->rpc.virt + core->res->rpc_size;
600 core->log.length = core->res->fwlog_size;
601 core->act.phys = core->log.phys + core->log.length;
602 core->act.virt = core->log.virt + core->log.length;
603 core->act.length = core->rpc.length - core->res->rpc_size - core->log.length;
604 core->rpc.length = core->res->rpc_size;
605
606 of_node_put(node);
607
608 return 0;
609 }
610
vpu_core_probe(struct platform_device * pdev)611 static int vpu_core_probe(struct platform_device *pdev)
612 {
613 struct device *dev = &pdev->dev;
614 struct vpu_core *core;
615 struct vpu_dev *vpu = dev_get_drvdata(dev->parent);
616 struct vpu_shared_addr *iface;
617 u32 iface_data_size;
618 int ret;
619
620 dev_dbg(dev, "probe\n");
621 if (!vpu)
622 return -EINVAL;
623 core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
624 if (!core)
625 return -ENOMEM;
626
627 core->pdev = pdev;
628 core->dev = dev;
629 platform_set_drvdata(pdev, core);
630 core->vpu = vpu;
631 INIT_LIST_HEAD(&core->instances);
632 mutex_init(&core->lock);
633 mutex_init(&core->cmd_lock);
634 init_completion(&core->cmp);
635 init_waitqueue_head(&core->ack_wq);
636 vpu_core_set_state(core, VPU_CORE_DEINIT);
637
638 core->res = of_device_get_match_data(dev);
639 if (!core->res)
640 return -ENODEV;
641
642 core->type = core->res->type;
643 core->id = of_alias_get_id(dev->of_node, "vpu_core");
644 if (core->id < 0) {
645 dev_err(dev, "can't get vpu core id\n");
646 return core->id;
647 }
648 dev_info(core->dev, "[%d] = %s\n", core->id, vpu_core_type_desc(core->type));
649 ret = vpu_core_parse_dt(core, dev->of_node);
650 if (ret)
651 return ret;
652
653 core->base = devm_platform_ioremap_resource(pdev, 0);
654 if (IS_ERR(core->base))
655 return PTR_ERR(core->base);
656
657 if (!vpu_iface_check_codec(core)) {
658 dev_err(core->dev, "is not supported\n");
659 return -EINVAL;
660 }
661
662 ret = vpu_mbox_init(core);
663 if (ret)
664 return ret;
665
666 iface = devm_kzalloc(dev, sizeof(*iface), GFP_KERNEL);
667 if (!iface)
668 return -ENOMEM;
669
670 iface_data_size = vpu_iface_get_data_size(core);
671 if (iface_data_size) {
672 iface->priv = devm_kzalloc(dev, iface_data_size, GFP_KERNEL);
673 if (!iface->priv)
674 return -ENOMEM;
675 }
676
677 ret = vpu_iface_init(core, iface, &core->rpc, core->fw.phys);
678 if (ret) {
679 dev_err(core->dev, "init iface fail, ret = %d\n", ret);
680 return ret;
681 }
682
683 vpu_iface_config_system(core, vpu->res->mreg_base, vpu->base);
684 vpu_iface_set_log_buf(core, &core->log);
685
686 pm_runtime_enable(dev);
687 ret = pm_runtime_resume_and_get(dev);
688 if (ret) {
689 pm_runtime_put_noidle(dev);
690 pm_runtime_set_suspended(dev);
691 goto err_runtime_disable;
692 }
693
694 ret = vpu_core_register(dev->parent, core);
695 if (ret)
696 goto err_core_register;
697 core->parent = dev->parent;
698
699 pm_runtime_put_sync(dev);
700 vpu_core_create_dbgfs_file(core);
701
702 return 0;
703
704 err_core_register:
705 pm_runtime_put_sync(dev);
706 err_runtime_disable:
707 pm_runtime_disable(dev);
708
709 return ret;
710 }
711
vpu_core_remove(struct platform_device * pdev)712 static int vpu_core_remove(struct platform_device *pdev)
713 {
714 struct device *dev = &pdev->dev;
715 struct vpu_core *core = platform_get_drvdata(pdev);
716 int ret;
717
718 vpu_core_remove_dbgfs_file(core);
719 ret = pm_runtime_resume_and_get(dev);
720 WARN_ON(ret < 0);
721
722 vpu_core_shutdown(core);
723 pm_runtime_put_sync(dev);
724 pm_runtime_disable(dev);
725
726 vpu_core_unregister(core->parent, core);
727 memunmap(core->fw.virt);
728 memunmap(core->rpc.virt);
729 mutex_destroy(&core->lock);
730 mutex_destroy(&core->cmd_lock);
731
732 return 0;
733 }
734
vpu_core_runtime_resume(struct device * dev)735 static int __maybe_unused vpu_core_runtime_resume(struct device *dev)
736 {
737 struct vpu_core *core = dev_get_drvdata(dev);
738
739 return vpu_mbox_request(core);
740 }
741
vpu_core_runtime_suspend(struct device * dev)742 static int __maybe_unused vpu_core_runtime_suspend(struct device *dev)
743 {
744 struct vpu_core *core = dev_get_drvdata(dev);
745
746 vpu_mbox_free(core);
747 return 0;
748 }
749
vpu_core_cancel_work(struct vpu_core * core)750 static void vpu_core_cancel_work(struct vpu_core *core)
751 {
752 struct vpu_inst *inst = NULL;
753
754 cancel_work_sync(&core->msg_work);
755 cancel_delayed_work_sync(&core->msg_delayed_work);
756
757 mutex_lock(&core->lock);
758 list_for_each_entry(inst, &core->instances, list)
759 cancel_work_sync(&inst->msg_work);
760 mutex_unlock(&core->lock);
761 }
762
vpu_core_resume_work(struct vpu_core * core)763 static void vpu_core_resume_work(struct vpu_core *core)
764 {
765 struct vpu_inst *inst = NULL;
766 unsigned long delay = msecs_to_jiffies(10);
767
768 queue_work(core->workqueue, &core->msg_work);
769 queue_delayed_work(core->workqueue, &core->msg_delayed_work, delay);
770
771 mutex_lock(&core->lock);
772 list_for_each_entry(inst, &core->instances, list)
773 queue_work(inst->workqueue, &inst->msg_work);
774 mutex_unlock(&core->lock);
775 }
776
vpu_core_resume(struct device * dev)777 static int __maybe_unused vpu_core_resume(struct device *dev)
778 {
779 struct vpu_core *core = dev_get_drvdata(dev);
780 int ret = 0;
781
782 mutex_lock(&core->lock);
783 pm_runtime_resume_and_get(dev);
784 vpu_core_get_vpu(core);
785
786 if (core->request_count) {
787 if (!vpu_iface_get_power_state(core))
788 ret = vpu_core_boot(core, false);
789 else
790 ret = vpu_core_sw_reset(core);
791 if (ret) {
792 dev_err(core->dev, "resume fail\n");
793 vpu_core_set_state(core, VPU_CORE_HANG);
794 }
795 }
796 vpu_core_update_state(core);
797 pm_runtime_put_sync(dev);
798 mutex_unlock(&core->lock);
799
800 vpu_core_resume_work(core);
801 return ret;
802 }
803
vpu_core_suspend(struct device * dev)804 static int __maybe_unused vpu_core_suspend(struct device *dev)
805 {
806 struct vpu_core *core = dev_get_drvdata(dev);
807 int ret = 0;
808
809 mutex_lock(&core->lock);
810 if (core->request_count)
811 ret = vpu_core_snapshot(core);
812 mutex_unlock(&core->lock);
813 if (ret)
814 return ret;
815
816 vpu_core_cancel_work(core);
817
818 mutex_lock(&core->lock);
819 vpu_core_put_vpu(core);
820 mutex_unlock(&core->lock);
821 return ret;
822 }
823
824 static const struct dev_pm_ops vpu_core_pm_ops = {
825 SET_RUNTIME_PM_OPS(vpu_core_runtime_suspend, vpu_core_runtime_resume, NULL)
826 SET_SYSTEM_SLEEP_PM_OPS(vpu_core_suspend, vpu_core_resume)
827 };
828
829 static struct vpu_core_resources imx8q_enc = {
830 .type = VPU_CORE_TYPE_ENC,
831 .fwname = "vpu/vpu_fw_imx8_enc.bin",
832 .stride = 16,
833 .max_width = 1920,
834 .max_height = 1920,
835 .min_width = 64,
836 .min_height = 48,
837 .step_width = 2,
838 .step_height = 2,
839 .rpc_size = 0x80000,
840 .fwlog_size = 0x80000,
841 .act_size = 0xc0000,
842 };
843
844 static struct vpu_core_resources imx8q_dec = {
845 .type = VPU_CORE_TYPE_DEC,
846 .fwname = "vpu/vpu_fw_imx8_dec.bin",
847 .stride = 256,
848 .max_width = 8188,
849 .max_height = 8188,
850 .min_width = 16,
851 .min_height = 16,
852 .step_width = 1,
853 .step_height = 1,
854 .rpc_size = 0x80000,
855 .fwlog_size = 0x80000,
856 };
857
858 static const struct of_device_id vpu_core_dt_match[] = {
859 { .compatible = "nxp,imx8q-vpu-encoder", .data = &imx8q_enc },
860 { .compatible = "nxp,imx8q-vpu-decoder", .data = &imx8q_dec },
861 {}
862 };
863 MODULE_DEVICE_TABLE(of, vpu_core_dt_match);
864
865 static struct platform_driver amphion_vpu_core_driver = {
866 .probe = vpu_core_probe,
867 .remove = vpu_core_remove,
868 .driver = {
869 .name = "amphion-vpu-core",
870 .of_match_table = vpu_core_dt_match,
871 .pm = &vpu_core_pm_ops,
872 },
873 };
874
vpu_core_driver_init(void)875 int __init vpu_core_driver_init(void)
876 {
877 return platform_driver_register(&hion_vpu_core_driver);
878 }
879
vpu_core_driver_exit(void)880 void __exit vpu_core_driver_exit(void)
881 {
882 platform_driver_unregister(&hion_vpu_core_driver);
883 }
884