1
2
3
4
5
6#include <linux/clk.h>
7#include <linux/debugfs.h>
8#include <linux/firmware.h>
9#include <linux/interrupt.h>
10#include <linux/iommu.h>
11#include <linux/module.h>
12#include <linux/of_address.h>
13#include <linux/of_irq.h>
14#include <linux/of_platform.h>
15#include <linux/of_reserved_mem.h>
16#include <linux/sched.h>
17#include <linux/sizes.h>
18#include <linux/dma-mapping.h>
19
20#include "mtk_vpu.h"
21
22
23
24
25
26
27
28#define INIT_TIMEOUT_MS 2000U
29#define IPI_TIMEOUT_MS 2000U
30#define VPU_IDLE_TIMEOUT_MS 1000U
31#define VPU_FW_VER_LEN 16
32
33
34#define VPU_PTCM_SIZE (96 * SZ_1K)
35#define VPU_DTCM_SIZE (32 * SZ_1K)
36
37#define VPU_DTCM_OFFSET 0x18000UL
38
39#define VPU_EXT_P_SIZE SZ_1M
40#define VPU_EXT_D_SIZE SZ_4M
41
42#define VPU_P_FW_SIZE (VPU_PTCM_SIZE + VPU_EXT_P_SIZE)
43#define VPU_D_FW_SIZE (VPU_DTCM_SIZE + VPU_EXT_D_SIZE)
44
45#define SHARE_BUF_SIZE 48
46
47
48#define VPU_P_FW "vpu_p.bin"
49#define VPU_D_FW "vpu_d.bin"
50#define VPU_P_FW_NEW "mediatek/mt8173/vpu_p.bin"
51#define VPU_D_FW_NEW "mediatek/mt8173/vpu_d.bin"
52
53#define VPU_RESET 0x0
54#define VPU_TCM_CFG 0x0008
55#define VPU_PMEM_EXT0_ADDR 0x000C
56#define VPU_PMEM_EXT1_ADDR 0x0010
57#define VPU_TO_HOST 0x001C
58#define VPU_DMEM_EXT0_ADDR 0x0014
59#define VPU_DMEM_EXT1_ADDR 0x0018
60#define HOST_TO_VPU 0x0024
61#define VPU_IDLE_REG 0x002C
62#define VPU_INT_STATUS 0x0034
63#define VPU_PC_REG 0x0060
64#define VPU_SP_REG 0x0064
65#define VPU_RA_REG 0x0068
66#define VPU_WDT_REG 0x0084
67
68
69#define VPU_IPC_INT BIT(8)
70
71#define VPU_IDLE_STATE BIT(23)
72
73
74
75
76
77
78
79
80enum vpu_fw_type {
81 P_FW,
82 D_FW,
83};
84
85
86
87
88
89
90
91
92struct vpu_mem {
93 void *va;
94 dma_addr_t pa;
95};
96
97
98
99
100
101
102
103
104struct vpu_regs {
105 void __iomem *tcm;
106 void __iomem *cfg;
107 int irq;
108};
109
110
111
112
113
114
115
116struct vpu_wdt_handler {
117 void (*reset_func)(void *);
118 void *priv;
119};
120
121
122
123
124
125
126
127
128struct vpu_wdt {
129 struct vpu_wdt_handler handler[VPU_RST_MAX];
130 struct work_struct ws;
131 struct workqueue_struct *wq;
132};
133
134
135
136
137
138
139
140
141
142
143
144
145struct vpu_run {
146 u32 signaled;
147 char fw_ver[VPU_FW_VER_LEN];
148 unsigned int dec_capability;
149 unsigned int enc_capability;
150 wait_queue_head_t wq;
151};
152
153
154
155
156
157
158
159
160struct vpu_ipi_desc {
161 ipi_handler_t handler;
162 const char *name;
163 void *priv;
164};
165
166
167
168
169
170
171
172
173
174struct share_obj {
175 s32 id;
176 u32 len;
177 unsigned char share_buf[SHARE_BUF_SIZE];
178};
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209struct mtk_vpu {
210 struct vpu_mem extmem[2];
211 struct vpu_regs reg;
212 struct vpu_run run;
213 struct vpu_wdt wdt;
214 struct vpu_ipi_desc ipi_desc[IPI_MAX];
215 struct share_obj __iomem *recv_buf;
216 struct share_obj __iomem *send_buf;
217 struct device *dev;
218 struct clk *clk;
219 bool fw_loaded;
220 bool enable_4GB;
221 struct mutex vpu_mutex;
222 u32 wdt_refcnt;
223 wait_queue_head_t ack_wq;
224 bool ipi_id_ack[IPI_MAX];
225};
226
227static inline void vpu_cfg_writel(struct mtk_vpu *vpu, u32 val, u32 offset)
228{
229 writel(val, vpu->reg.cfg + offset);
230}
231
232static inline u32 vpu_cfg_readl(struct mtk_vpu *vpu, u32 offset)
233{
234 return readl(vpu->reg.cfg + offset);
235}
236
237static inline bool vpu_running(struct mtk_vpu *vpu)
238{
239 return vpu_cfg_readl(vpu, VPU_RESET) & BIT(0);
240}
241
242static void vpu_clock_disable(struct mtk_vpu *vpu)
243{
244
245 mutex_lock(&vpu->vpu_mutex);
246 if (!--vpu->wdt_refcnt)
247 vpu_cfg_writel(vpu,
248 vpu_cfg_readl(vpu, VPU_WDT_REG) & ~(1L << 31),
249 VPU_WDT_REG);
250 mutex_unlock(&vpu->vpu_mutex);
251
252 clk_disable(vpu->clk);
253}
254
255static int vpu_clock_enable(struct mtk_vpu *vpu)
256{
257 int ret;
258
259 ret = clk_enable(vpu->clk);
260 if (ret)
261 return ret;
262
263 mutex_lock(&vpu->vpu_mutex);
264 if (!vpu->wdt_refcnt++)
265 vpu_cfg_writel(vpu,
266 vpu_cfg_readl(vpu, VPU_WDT_REG) | (1L << 31),
267 VPU_WDT_REG);
268 mutex_unlock(&vpu->vpu_mutex);
269
270 return ret;
271}
272
273static void vpu_dump_status(struct mtk_vpu *vpu)
274{
275 dev_info(vpu->dev,
276 "vpu: run %x, pc = 0x%x, ra = 0x%x, sp = 0x%x, idle = 0x%x\n"
277 "vpu: int %x, hv = 0x%x, vh = 0x%x, wdt = 0x%x\n",
278 vpu_running(vpu), vpu_cfg_readl(vpu, VPU_PC_REG),
279 vpu_cfg_readl(vpu, VPU_RA_REG), vpu_cfg_readl(vpu, VPU_SP_REG),
280 vpu_cfg_readl(vpu, VPU_IDLE_REG),
281 vpu_cfg_readl(vpu, VPU_INT_STATUS),
282 vpu_cfg_readl(vpu, HOST_TO_VPU),
283 vpu_cfg_readl(vpu, VPU_TO_HOST),
284 vpu_cfg_readl(vpu, VPU_WDT_REG));
285}
286
287int vpu_ipi_register(struct platform_device *pdev,
288 enum ipi_id id, ipi_handler_t handler,
289 const char *name, void *priv)
290{
291 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
292 struct vpu_ipi_desc *ipi_desc;
293
294 if (!vpu) {
295 dev_err(&pdev->dev, "vpu device in not ready\n");
296 return -EPROBE_DEFER;
297 }
298
299 if (id < IPI_MAX && handler) {
300 ipi_desc = vpu->ipi_desc;
301 ipi_desc[id].name = name;
302 ipi_desc[id].handler = handler;
303 ipi_desc[id].priv = priv;
304 return 0;
305 }
306
307 dev_err(&pdev->dev, "register vpu ipi id %d with invalid arguments\n",
308 id);
309 return -EINVAL;
310}
311EXPORT_SYMBOL_GPL(vpu_ipi_register);
312
313int vpu_ipi_send(struct platform_device *pdev,
314 enum ipi_id id, void *buf,
315 unsigned int len)
316{
317 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
318 struct share_obj __iomem *send_obj = vpu->send_buf;
319 unsigned long timeout;
320 int ret = 0;
321
322 if (id <= IPI_VPU_INIT || id >= IPI_MAX ||
323 len > sizeof(send_obj->share_buf) || !buf) {
324 dev_err(vpu->dev, "failed to send ipi message\n");
325 return -EINVAL;
326 }
327
328 ret = vpu_clock_enable(vpu);
329 if (ret) {
330 dev_err(vpu->dev, "failed to enable vpu clock\n");
331 return ret;
332 }
333 if (!vpu_running(vpu)) {
334 dev_err(vpu->dev, "vpu_ipi_send: VPU is not running\n");
335 ret = -EINVAL;
336 goto clock_disable;
337 }
338
339 mutex_lock(&vpu->vpu_mutex);
340
341
342 timeout = jiffies + msecs_to_jiffies(IPI_TIMEOUT_MS);
343 do {
344 if (time_after(jiffies, timeout)) {
345 dev_err(vpu->dev, "vpu_ipi_send: IPI timeout!\n");
346 ret = -EIO;
347 vpu_dump_status(vpu);
348 goto mut_unlock;
349 }
350 } while (vpu_cfg_readl(vpu, HOST_TO_VPU));
351
352 memcpy_toio(send_obj->share_buf, buf, len);
353 writel(len, &send_obj->len);
354 writel(id, &send_obj->id);
355
356 vpu->ipi_id_ack[id] = false;
357
358 vpu_cfg_writel(vpu, 0x1, HOST_TO_VPU);
359
360 mutex_unlock(&vpu->vpu_mutex);
361
362
363 timeout = msecs_to_jiffies(IPI_TIMEOUT_MS);
364 ret = wait_event_timeout(vpu->ack_wq, vpu->ipi_id_ack[id], timeout);
365 vpu->ipi_id_ack[id] = false;
366 if (ret == 0) {
367 dev_err(vpu->dev, "vpu ipi %d ack time out !\n", id);
368 ret = -EIO;
369 vpu_dump_status(vpu);
370 goto clock_disable;
371 }
372 vpu_clock_disable(vpu);
373
374 return 0;
375
376mut_unlock:
377 mutex_unlock(&vpu->vpu_mutex);
378clock_disable:
379 vpu_clock_disable(vpu);
380
381 return ret;
382}
383EXPORT_SYMBOL_GPL(vpu_ipi_send);
384
385static void vpu_wdt_reset_func(struct work_struct *ws)
386{
387 struct vpu_wdt *wdt = container_of(ws, struct vpu_wdt, ws);
388 struct mtk_vpu *vpu = container_of(wdt, struct mtk_vpu, wdt);
389 struct vpu_wdt_handler *handler = wdt->handler;
390 int index, ret;
391
392 dev_info(vpu->dev, "vpu reset\n");
393 ret = vpu_clock_enable(vpu);
394 if (ret) {
395 dev_err(vpu->dev, "[VPU] wdt enables clock failed %d\n", ret);
396 return;
397 }
398 mutex_lock(&vpu->vpu_mutex);
399 vpu_cfg_writel(vpu, 0x0, VPU_RESET);
400 vpu->fw_loaded = false;
401 mutex_unlock(&vpu->vpu_mutex);
402 vpu_clock_disable(vpu);
403
404 for (index = 0; index < VPU_RST_MAX; index++) {
405 if (handler[index].reset_func) {
406 handler[index].reset_func(handler[index].priv);
407 dev_dbg(vpu->dev, "wdt handler func %d\n", index);
408 }
409 }
410}
411
412int vpu_wdt_reg_handler(struct platform_device *pdev,
413 void wdt_reset(void *),
414 void *priv, enum rst_id id)
415{
416 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
417 struct vpu_wdt_handler *handler;
418
419 if (!vpu) {
420 dev_err(&pdev->dev, "vpu device in not ready\n");
421 return -EPROBE_DEFER;
422 }
423
424 handler = vpu->wdt.handler;
425
426 if (id < VPU_RST_MAX && wdt_reset) {
427 dev_dbg(vpu->dev, "wdt register id %d\n", id);
428 mutex_lock(&vpu->vpu_mutex);
429 handler[id].reset_func = wdt_reset;
430 handler[id].priv = priv;
431 mutex_unlock(&vpu->vpu_mutex);
432 return 0;
433 }
434
435 dev_err(vpu->dev, "register vpu wdt handler failed\n");
436 return -EINVAL;
437}
438EXPORT_SYMBOL_GPL(vpu_wdt_reg_handler);
439
440unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev)
441{
442 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
443
444 return vpu->run.dec_capability;
445}
446EXPORT_SYMBOL_GPL(vpu_get_vdec_hw_capa);
447
448unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev)
449{
450 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
451
452 return vpu->run.enc_capability;
453}
454EXPORT_SYMBOL_GPL(vpu_get_venc_hw_capa);
455
456void *vpu_mapping_dm_addr(struct platform_device *pdev,
457 u32 dtcm_dmem_addr)
458{
459 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
460
461 if (!dtcm_dmem_addr ||
462 (dtcm_dmem_addr > (VPU_DTCM_SIZE + VPU_EXT_D_SIZE))) {
463 dev_err(vpu->dev, "invalid virtual data memory address\n");
464 return ERR_PTR(-EINVAL);
465 }
466
467 if (dtcm_dmem_addr < VPU_DTCM_SIZE)
468 return (__force void *)(dtcm_dmem_addr + vpu->reg.tcm +
469 VPU_DTCM_OFFSET);
470
471 return vpu->extmem[D_FW].va + (dtcm_dmem_addr - VPU_DTCM_SIZE);
472}
473EXPORT_SYMBOL_GPL(vpu_mapping_dm_addr);
474
475struct platform_device *vpu_get_plat_device(struct platform_device *pdev)
476{
477 struct device *dev = &pdev->dev;
478 struct device_node *vpu_node;
479 struct platform_device *vpu_pdev;
480
481 vpu_node = of_parse_phandle(dev->of_node, "mediatek,vpu", 0);
482 if (!vpu_node) {
483 dev_err(dev, "can't get vpu node\n");
484 return NULL;
485 }
486
487 vpu_pdev = of_find_device_by_node(vpu_node);
488 of_node_put(vpu_node);
489 if (WARN_ON(!vpu_pdev)) {
490 dev_err(dev, "vpu pdev failed\n");
491 return NULL;
492 }
493
494 return vpu_pdev;
495}
496EXPORT_SYMBOL_GPL(vpu_get_plat_device);
497
498
499static int load_requested_vpu(struct mtk_vpu *vpu,
500 u8 fw_type)
501{
502 size_t tcm_size = fw_type ? VPU_DTCM_SIZE : VPU_PTCM_SIZE;
503 size_t fw_size = fw_type ? VPU_D_FW_SIZE : VPU_P_FW_SIZE;
504 char *fw_name = fw_type ? VPU_D_FW : VPU_P_FW;
505 char *fw_new_name = fw_type ? VPU_D_FW_NEW : VPU_P_FW_NEW;
506 const struct firmware *vpu_fw;
507 size_t dl_size = 0;
508 size_t extra_fw_size = 0;
509 void *dest;
510 int ret;
511
512 ret = request_firmware(&vpu_fw, fw_new_name, vpu->dev);
513 if (ret < 0) {
514 dev_info(vpu->dev, "Failed to load %s, %d, retry\n",
515 fw_new_name, ret);
516
517 ret = request_firmware(&vpu_fw, fw_name, vpu->dev);
518 if (ret < 0) {
519 dev_err(vpu->dev, "Failed to load %s, %d\n", fw_name,
520 ret);
521 return ret;
522 }
523 }
524 dl_size = vpu_fw->size;
525 if (dl_size > fw_size) {
526 dev_err(vpu->dev, "fw %s size %zu is abnormal\n", fw_name,
527 dl_size);
528 release_firmware(vpu_fw);
529 return -EFBIG;
530 }
531 dev_dbg(vpu->dev, "Downloaded fw %s size: %zu.\n",
532 fw_name,
533 dl_size);
534
535 vpu_cfg_writel(vpu, 0x0, VPU_RESET);
536
537
538 if (dl_size > tcm_size) {
539 dev_dbg(vpu->dev, "fw size %zu > limited fw size %zu\n",
540 dl_size, tcm_size);
541 extra_fw_size = dl_size - tcm_size;
542 dev_dbg(vpu->dev, "extra_fw_size %zu\n", extra_fw_size);
543 dl_size = tcm_size;
544 }
545 dest = (__force void *)vpu->reg.tcm;
546 if (fw_type == D_FW)
547 dest += VPU_DTCM_OFFSET;
548 memcpy(dest, vpu_fw->data, dl_size);
549
550 if (extra_fw_size > 0) {
551 dest = vpu->extmem[fw_type].va;
552 dev_dbg(vpu->dev, "download extended memory type %x\n",
553 fw_type);
554 memcpy(dest, vpu_fw->data + tcm_size, extra_fw_size);
555 }
556
557 release_firmware(vpu_fw);
558
559 return 0;
560}
561
562int vpu_load_firmware(struct platform_device *pdev)
563{
564 struct mtk_vpu *vpu;
565 struct device *dev = &pdev->dev;
566 struct vpu_run *run;
567 int ret;
568
569 if (!pdev) {
570 dev_err(dev, "VPU platform device is invalid\n");
571 return -EINVAL;
572 }
573
574 vpu = platform_get_drvdata(pdev);
575 run = &vpu->run;
576
577 mutex_lock(&vpu->vpu_mutex);
578 if (vpu->fw_loaded) {
579 mutex_unlock(&vpu->vpu_mutex);
580 return 0;
581 }
582 mutex_unlock(&vpu->vpu_mutex);
583
584 ret = vpu_clock_enable(vpu);
585 if (ret) {
586 dev_err(dev, "enable clock failed %d\n", ret);
587 return ret;
588 }
589
590 mutex_lock(&vpu->vpu_mutex);
591
592 run->signaled = false;
593 dev_dbg(vpu->dev, "firmware request\n");
594
595 ret = load_requested_vpu(vpu, P_FW);
596 if (ret < 0) {
597 dev_err(dev, "Failed to request %s, %d\n", VPU_P_FW, ret);
598 goto OUT_LOAD_FW;
599 }
600
601
602 ret = load_requested_vpu(vpu, D_FW);
603 if (ret < 0) {
604 dev_err(dev, "Failed to request %s, %d\n", VPU_D_FW, ret);
605 goto OUT_LOAD_FW;
606 }
607
608 vpu->fw_loaded = true;
609
610 vpu_cfg_writel(vpu, 0x1, VPU_RESET);
611
612 ret = wait_event_interruptible_timeout(run->wq,
613 run->signaled,
614 msecs_to_jiffies(INIT_TIMEOUT_MS)
615 );
616 if (ret == 0) {
617 ret = -ETIME;
618 dev_err(dev, "wait vpu initialization timeout!\n");
619 goto OUT_LOAD_FW;
620 } else if (-ERESTARTSYS == ret) {
621 dev_err(dev, "wait vpu interrupted by a signal!\n");
622 goto OUT_LOAD_FW;
623 }
624
625 ret = 0;
626 dev_info(dev, "vpu is ready. Fw version %s\n", run->fw_ver);
627
628OUT_LOAD_FW:
629 mutex_unlock(&vpu->vpu_mutex);
630 vpu_clock_disable(vpu);
631
632 return ret;
633}
634EXPORT_SYMBOL_GPL(vpu_load_firmware);
635
636static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv)
637{
638 struct mtk_vpu *vpu = priv;
639 const struct vpu_run *run = data;
640
641 vpu->run.signaled = run->signaled;
642 strscpy(vpu->run.fw_ver, run->fw_ver, sizeof(vpu->run.fw_ver));
643 vpu->run.dec_capability = run->dec_capability;
644 vpu->run.enc_capability = run->enc_capability;
645 wake_up_interruptible(&vpu->run.wq);
646}
647
648#ifdef CONFIG_DEBUG_FS
649static ssize_t vpu_debug_read(struct file *file, char __user *user_buf,
650 size_t count, loff_t *ppos)
651{
652 char buf[256];
653 unsigned int len;
654 unsigned int running, pc, vpu_to_host, host_to_vpu, wdt, idle, ra, sp;
655 int ret;
656 struct device *dev = file->private_data;
657 struct mtk_vpu *vpu = dev_get_drvdata(dev);
658
659 ret = vpu_clock_enable(vpu);
660 if (ret) {
661 dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret);
662 return 0;
663 }
664
665
666 running = vpu_running(vpu);
667 pc = vpu_cfg_readl(vpu, VPU_PC_REG);
668 wdt = vpu_cfg_readl(vpu, VPU_WDT_REG);
669 host_to_vpu = vpu_cfg_readl(vpu, HOST_TO_VPU);
670 vpu_to_host = vpu_cfg_readl(vpu, VPU_TO_HOST);
671 ra = vpu_cfg_readl(vpu, VPU_RA_REG);
672 sp = vpu_cfg_readl(vpu, VPU_SP_REG);
673 idle = vpu_cfg_readl(vpu, VPU_IDLE_REG);
674
675 vpu_clock_disable(vpu);
676
677 if (running) {
678 len = snprintf(buf, sizeof(buf), "VPU is running\n\n"
679 "FW Version: %s\n"
680 "PC: 0x%x\n"
681 "WDT: 0x%x\n"
682 "Host to VPU: 0x%x\n"
683 "VPU to Host: 0x%x\n"
684 "SP: 0x%x\n"
685 "RA: 0x%x\n"
686 "idle: 0x%x\n",
687 vpu->run.fw_ver, pc, wdt,
688 host_to_vpu, vpu_to_host, sp, ra, idle);
689 } else {
690 len = snprintf(buf, sizeof(buf), "VPU not running\n");
691 }
692
693 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
694}
695
696static const struct file_operations vpu_debug_fops = {
697 .open = simple_open,
698 .read = vpu_debug_read,
699};
700#endif
701
702static void vpu_free_ext_mem(struct mtk_vpu *vpu, u8 fw_type)
703{
704 struct device *dev = vpu->dev;
705 size_t fw_ext_size = fw_type ? VPU_EXT_D_SIZE : VPU_EXT_P_SIZE;
706
707 dma_free_coherent(dev, fw_ext_size, vpu->extmem[fw_type].va,
708 vpu->extmem[fw_type].pa);
709}
710
711static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type)
712{
713 struct device *dev = vpu->dev;
714 size_t fw_ext_size = fw_type ? VPU_EXT_D_SIZE : VPU_EXT_P_SIZE;
715 u32 vpu_ext_mem0 = fw_type ? VPU_DMEM_EXT0_ADDR : VPU_PMEM_EXT0_ADDR;
716 u32 vpu_ext_mem1 = fw_type ? VPU_DMEM_EXT1_ADDR : VPU_PMEM_EXT1_ADDR;
717 u32 offset_4gb = vpu->enable_4GB ? 0x40000000 : 0;
718
719 vpu->extmem[fw_type].va = dma_alloc_coherent(dev,
720 fw_ext_size,
721 &vpu->extmem[fw_type].pa,
722 GFP_KERNEL);
723 if (!vpu->extmem[fw_type].va) {
724 dev_err(dev, "Failed to allocate the extended program memory\n");
725 return -ENOMEM;
726 }
727
728
729 vpu_cfg_writel(vpu, 0x1, vpu_ext_mem0);
730 vpu_cfg_writel(vpu, (vpu->extmem[fw_type].pa & 0xFFFFF000) + offset_4gb,
731 vpu_ext_mem1);
732
733 dev_info(dev, "%s extend memory phy=0x%llx virt=0x%p\n",
734 fw_type ? "Data" : "Program",
735 (unsigned long long)vpu->extmem[fw_type].pa,
736 vpu->extmem[fw_type].va);
737
738 return 0;
739}
740
741static void vpu_ipi_handler(struct mtk_vpu *vpu)
742{
743 struct share_obj __iomem *rcv_obj = vpu->recv_buf;
744 struct vpu_ipi_desc *ipi_desc = vpu->ipi_desc;
745 unsigned char data[SHARE_BUF_SIZE];
746 s32 id = readl(&rcv_obj->id);
747
748 memcpy_fromio(data, rcv_obj->share_buf, sizeof(data));
749 if (id < IPI_MAX && ipi_desc[id].handler) {
750 ipi_desc[id].handler(data, readl(&rcv_obj->len),
751 ipi_desc[id].priv);
752 if (id > IPI_VPU_INIT) {
753 vpu->ipi_id_ack[id] = true;
754 wake_up(&vpu->ack_wq);
755 }
756 } else {
757 dev_err(vpu->dev, "No such ipi id = %d\n", id);
758 }
759}
760
761static int vpu_ipi_init(struct mtk_vpu *vpu)
762{
763
764 vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
765
766
767 vpu->recv_buf = vpu->reg.tcm + VPU_DTCM_OFFSET;
768 vpu->send_buf = vpu->recv_buf + 1;
769 memset_io(vpu->recv_buf, 0, sizeof(struct share_obj));
770 memset_io(vpu->send_buf, 0, sizeof(struct share_obj));
771
772 return 0;
773}
774
775static irqreturn_t vpu_irq_handler(int irq, void *priv)
776{
777 struct mtk_vpu *vpu = priv;
778 u32 vpu_to_host;
779 int ret;
780
781
782
783
784
785
786 ret = clk_enable(vpu->clk);
787 if (ret) {
788 dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret);
789 return IRQ_NONE;
790 }
791 vpu_to_host = vpu_cfg_readl(vpu, VPU_TO_HOST);
792 if (vpu_to_host & VPU_IPC_INT) {
793 vpu_ipi_handler(vpu);
794 } else {
795 dev_err(vpu->dev, "vpu watchdog timeout! 0x%x", vpu_to_host);
796 queue_work(vpu->wdt.wq, &vpu->wdt.ws);
797 }
798
799
800 vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
801 clk_disable(vpu->clk);
802
803 return IRQ_HANDLED;
804}
805
806#ifdef CONFIG_DEBUG_FS
807static struct dentry *vpu_debugfs;
808#endif
809static int mtk_vpu_probe(struct platform_device *pdev)
810{
811 struct mtk_vpu *vpu;
812 struct device *dev;
813 struct resource *res;
814 int ret = 0;
815
816 dev_dbg(&pdev->dev, "initialization\n");
817
818 dev = &pdev->dev;
819 vpu = devm_kzalloc(dev, sizeof(*vpu), GFP_KERNEL);
820 if (!vpu)
821 return -ENOMEM;
822
823 vpu->dev = &pdev->dev;
824 vpu->reg.tcm = devm_platform_ioremap_resource_byname(pdev, "tcm");
825 if (IS_ERR((__force void *)vpu->reg.tcm))
826 return PTR_ERR((__force void *)vpu->reg.tcm);
827
828 vpu->reg.cfg = devm_platform_ioremap_resource_byname(pdev, "cfg_reg");
829 if (IS_ERR((__force void *)vpu->reg.cfg))
830 return PTR_ERR((__force void *)vpu->reg.cfg);
831
832
833 vpu->clk = devm_clk_get(dev, "main");
834 if (IS_ERR(vpu->clk)) {
835 dev_err(dev, "get vpu clock failed\n");
836 return PTR_ERR(vpu->clk);
837 }
838
839 platform_set_drvdata(pdev, vpu);
840
841 ret = clk_prepare(vpu->clk);
842 if (ret) {
843 dev_err(dev, "prepare vpu clock failed\n");
844 return ret;
845 }
846
847
848 vpu->wdt.wq = create_singlethread_workqueue("vpu_wdt");
849 if (!vpu->wdt.wq) {
850 dev_err(dev, "initialize wdt workqueue failed\n");
851 return -ENOMEM;
852 }
853 INIT_WORK(&vpu->wdt.ws, vpu_wdt_reset_func);
854 mutex_init(&vpu->vpu_mutex);
855
856 ret = vpu_clock_enable(vpu);
857 if (ret) {
858 dev_err(dev, "enable vpu clock failed\n");
859 goto workqueue_destroy;
860 }
861
862 dev_dbg(dev, "vpu ipi init\n");
863 ret = vpu_ipi_init(vpu);
864 if (ret) {
865 dev_err(dev, "Failed to init ipi\n");
866 goto disable_vpu_clk;
867 }
868
869
870 ret = vpu_ipi_register(pdev, IPI_VPU_INIT, vpu_init_ipi_handler,
871 "vpu_init", vpu);
872 if (ret) {
873 dev_err(dev, "Failed to register IPI_VPU_INIT\n");
874 goto vpu_mutex_destroy;
875 }
876
877#ifdef CONFIG_DEBUG_FS
878 vpu_debugfs = debugfs_create_file("mtk_vpu", S_IRUGO, NULL, (void *)dev,
879 &vpu_debug_fops);
880#endif
881
882
883 vpu_cfg_writel(vpu, 0x2, VPU_TCM_CFG);
884
885 vpu->enable_4GB = !!(totalram_pages() > (SZ_2G >> PAGE_SHIFT));
886 dev_info(dev, "4GB mode %u\n", vpu->enable_4GB);
887
888 if (vpu->enable_4GB) {
889 ret = of_reserved_mem_device_init(dev);
890 if (ret)
891 dev_info(dev, "init reserved memory failed\n");
892
893 }
894
895 ret = vpu_alloc_ext_mem(vpu, D_FW);
896 if (ret) {
897 dev_err(dev, "Allocate DM failed\n");
898 goto remove_debugfs;
899 }
900
901 ret = vpu_alloc_ext_mem(vpu, P_FW);
902 if (ret) {
903 dev_err(dev, "Allocate PM failed\n");
904 goto free_d_mem;
905 }
906
907 init_waitqueue_head(&vpu->run.wq);
908 init_waitqueue_head(&vpu->ack_wq);
909
910 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
911 if (!res) {
912 dev_err(dev, "get IRQ resource failed.\n");
913 ret = -ENXIO;
914 goto free_p_mem;
915 }
916 vpu->reg.irq = platform_get_irq(pdev, 0);
917 ret = devm_request_irq(dev, vpu->reg.irq, vpu_irq_handler, 0,
918 pdev->name, vpu);
919 if (ret) {
920 dev_err(dev, "failed to request irq\n");
921 goto free_p_mem;
922 }
923
924 vpu_clock_disable(vpu);
925 dev_dbg(dev, "initialization completed\n");
926
927 return 0;
928
929free_p_mem:
930 vpu_free_ext_mem(vpu, P_FW);
931free_d_mem:
932 vpu_free_ext_mem(vpu, D_FW);
933remove_debugfs:
934 of_reserved_mem_device_release(dev);
935#ifdef CONFIG_DEBUG_FS
936 debugfs_remove(vpu_debugfs);
937#endif
938 memset(vpu->ipi_desc, 0, sizeof(struct vpu_ipi_desc) * IPI_MAX);
939vpu_mutex_destroy:
940 mutex_destroy(&vpu->vpu_mutex);
941disable_vpu_clk:
942 vpu_clock_disable(vpu);
943workqueue_destroy:
944 destroy_workqueue(vpu->wdt.wq);
945
946 return ret;
947}
948
949static const struct of_device_id mtk_vpu_match[] = {
950 {
951 .compatible = "mediatek,mt8173-vpu",
952 },
953 {},
954};
955MODULE_DEVICE_TABLE(of, mtk_vpu_match);
956
957static int mtk_vpu_remove(struct platform_device *pdev)
958{
959 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
960
961#ifdef CONFIG_DEBUG_FS
962 debugfs_remove(vpu_debugfs);
963#endif
964 if (vpu->wdt.wq) {
965 flush_workqueue(vpu->wdt.wq);
966 destroy_workqueue(vpu->wdt.wq);
967 }
968 vpu_free_ext_mem(vpu, P_FW);
969 vpu_free_ext_mem(vpu, D_FW);
970 mutex_destroy(&vpu->vpu_mutex);
971 clk_unprepare(vpu->clk);
972
973 return 0;
974}
975
976static int mtk_vpu_suspend(struct device *dev)
977{
978 struct mtk_vpu *vpu = dev_get_drvdata(dev);
979 unsigned long timeout;
980 int ret;
981
982 ret = vpu_clock_enable(vpu);
983 if (ret) {
984 dev_err(dev, "failed to enable vpu clock\n");
985 return ret;
986 }
987
988 if (!vpu_running(vpu)) {
989 vpu_clock_disable(vpu);
990 clk_unprepare(vpu->clk);
991 return 0;
992 }
993
994 mutex_lock(&vpu->vpu_mutex);
995
996 vpu_cfg_writel(vpu, vpu_cfg_readl(vpu, VPU_INT_STATUS) | VPU_IDLE_STATE,
997 VPU_INT_STATUS);
998
999 timeout = jiffies + msecs_to_jiffies(VPU_IDLE_TIMEOUT_MS);
1000 do {
1001 if (time_after(jiffies, timeout)) {
1002 dev_err(dev, "vpu idle timeout\n");
1003 mutex_unlock(&vpu->vpu_mutex);
1004 vpu_clock_disable(vpu);
1005 return -EIO;
1006 }
1007 } while (!vpu_cfg_readl(vpu, VPU_IDLE_REG));
1008
1009 mutex_unlock(&vpu->vpu_mutex);
1010 vpu_clock_disable(vpu);
1011 clk_unprepare(vpu->clk);
1012
1013 return 0;
1014}
1015
1016static int mtk_vpu_resume(struct device *dev)
1017{
1018 struct mtk_vpu *vpu = dev_get_drvdata(dev);
1019 int ret;
1020
1021 clk_prepare(vpu->clk);
1022 ret = vpu_clock_enable(vpu);
1023 if (ret) {
1024 dev_err(dev, "failed to enable vpu clock\n");
1025 return ret;
1026 }
1027
1028 mutex_lock(&vpu->vpu_mutex);
1029
1030 vpu_cfg_writel(vpu,
1031 vpu_cfg_readl(vpu, VPU_INT_STATUS) & ~(VPU_IDLE_STATE),
1032 VPU_INT_STATUS);
1033 mutex_unlock(&vpu->vpu_mutex);
1034 vpu_clock_disable(vpu);
1035
1036 return 0;
1037}
1038
1039static const struct dev_pm_ops mtk_vpu_pm = {
1040 .suspend = mtk_vpu_suspend,
1041 .resume = mtk_vpu_resume,
1042};
1043
1044static struct platform_driver mtk_vpu_driver = {
1045 .probe = mtk_vpu_probe,
1046 .remove = mtk_vpu_remove,
1047 .driver = {
1048 .name = "mtk_vpu",
1049 .pm = &mtk_vpu_pm,
1050 .of_match_table = mtk_vpu_match,
1051 },
1052};
1053
1054module_platform_driver(mtk_vpu_driver);
1055
1056MODULE_LICENSE("GPL v2");
1057MODULE_DESCRIPTION("Mediatek Video Processor Unit driver");
1058