1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk.h>
15#include <linux/debugfs.h>
16#include <linux/firmware.h>
17#include <linux/interrupt.h>
18#include <linux/iommu.h>
19#include <linux/module.h>
20#include <linux/of_address.h>
21#include <linux/of_irq.h>
22#include <linux/of_platform.h>
23#include <linux/of_reserved_mem.h>
24#include <linux/sched.h>
25#include <linux/sizes.h>
26#include <linux/dma-mapping.h>
27
28#include "mtk_vpu.h"
29
30
31
32
33
34
35
36#define INIT_TIMEOUT_MS 2000U
37#define IPI_TIMEOUT_MS 2000U
38#define VPU_FW_VER_LEN 16
39
40
41#define VPU_PTCM_SIZE (96 * SZ_1K)
42#define VPU_DTCM_SIZE (32 * SZ_1K)
43
44#define VPU_DTCM_OFFSET 0x18000UL
45
46#define VPU_EXT_P_SIZE SZ_1M
47#define VPU_EXT_D_SIZE SZ_4M
48
49#define VPU_P_FW_SIZE (VPU_PTCM_SIZE + VPU_EXT_P_SIZE)
50#define VPU_D_FW_SIZE (VPU_DTCM_SIZE + VPU_EXT_D_SIZE)
51
52#define SHARE_BUF_SIZE 48
53
54
55#define VPU_P_FW "vpu_p.bin"
56#define VPU_D_FW "vpu_d.bin"
57
58#define VPU_RESET 0x0
59#define VPU_TCM_CFG 0x0008
60#define VPU_PMEM_EXT0_ADDR 0x000C
61#define VPU_PMEM_EXT1_ADDR 0x0010
62#define VPU_TO_HOST 0x001C
63#define VPU_DMEM_EXT0_ADDR 0x0014
64#define VPU_DMEM_EXT1_ADDR 0x0018
65#define HOST_TO_VPU 0x0024
66#define VPU_PC_REG 0x0060
67#define VPU_WDT_REG 0x0084
68
69
70#define VPU_IPC_INT BIT(8)
71
72
73
74
75
76
77
78
79enum vpu_fw_type {
80 P_FW,
81 D_FW,
82};
83
84
85
86
87
88
89
90
91struct vpu_mem {
92 void *va;
93 dma_addr_t pa;
94};
95
96
97
98
99
100
101
102
103struct vpu_regs {
104 void __iomem *tcm;
105 void __iomem *cfg;
106 int irq;
107};
108
109
110
111
112
113
114
115struct vpu_wdt_handler {
116 void (*reset_func)(void *);
117 void *priv;
118};
119
120
121
122
123
124
125
126
127struct vpu_wdt {
128 struct vpu_wdt_handler handler[VPU_RST_MAX];
129 struct work_struct ws;
130 struct workqueue_struct *wq;
131};
132
133
134
135
136
137
138
139
140
141
142
143
144struct vpu_run {
145 u32 signaled;
146 char fw_ver[VPU_FW_VER_LEN];
147 unsigned int dec_capability;
148 unsigned int enc_capability;
149 wait_queue_head_t wq;
150};
151
152
153
154
155
156
157
158
159struct vpu_ipi_desc {
160 ipi_handler_t handler;
161 const char *name;
162 void *priv;
163};
164
165
166
167
168
169
170
171
172
173struct share_obj {
174 s32 id;
175 u32 len;
176 unsigned char share_buf[SHARE_BUF_SIZE];
177};
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208struct mtk_vpu {
209 struct vpu_mem extmem[2];
210 struct vpu_regs reg;
211 struct vpu_run run;
212 struct vpu_wdt wdt;
213 struct vpu_ipi_desc ipi_desc[IPI_MAX];
214 struct share_obj *recv_buf;
215 struct share_obj *send_buf;
216 struct device *dev;
217 struct clk *clk;
218 bool fw_loaded;
219 bool enable_4GB;
220 struct mutex vpu_mutex;
221 u32 wdt_refcnt;
222 wait_queue_head_t ack_wq;
223 bool ipi_id_ack[IPI_MAX];
224};
225
226static inline void vpu_cfg_writel(struct mtk_vpu *vpu, u32 val, u32 offset)
227{
228 writel(val, vpu->reg.cfg + offset);
229}
230
231static inline u32 vpu_cfg_readl(struct mtk_vpu *vpu, u32 offset)
232{
233 return readl(vpu->reg.cfg + offset);
234}
235
236static inline bool vpu_running(struct mtk_vpu *vpu)
237{
238 return vpu_cfg_readl(vpu, VPU_RESET) & BIT(0);
239}
240
241static void vpu_clock_disable(struct mtk_vpu *vpu)
242{
243
244 mutex_lock(&vpu->vpu_mutex);
245 if (!--vpu->wdt_refcnt)
246 vpu_cfg_writel(vpu,
247 vpu_cfg_readl(vpu, VPU_WDT_REG) & ~(1L << 31),
248 VPU_WDT_REG);
249 mutex_unlock(&vpu->vpu_mutex);
250
251 clk_disable(vpu->clk);
252}
253
254static int vpu_clock_enable(struct mtk_vpu *vpu)
255{
256 int ret;
257
258 ret = clk_enable(vpu->clk);
259 if (ret)
260 return ret;
261
262 mutex_lock(&vpu->vpu_mutex);
263 if (!vpu->wdt_refcnt++)
264 vpu_cfg_writel(vpu,
265 vpu_cfg_readl(vpu, VPU_WDT_REG) | (1L << 31),
266 VPU_WDT_REG);
267 mutex_unlock(&vpu->vpu_mutex);
268
269 return ret;
270}
271
272int vpu_ipi_register(struct platform_device *pdev,
273 enum ipi_id id, ipi_handler_t handler,
274 const char *name, void *priv)
275{
276 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
277 struct vpu_ipi_desc *ipi_desc;
278
279 if (!vpu) {
280 dev_err(&pdev->dev, "vpu device in not ready\n");
281 return -EPROBE_DEFER;
282 }
283
284 if (id >= 0 && id < IPI_MAX && handler) {
285 ipi_desc = vpu->ipi_desc;
286 ipi_desc[id].name = name;
287 ipi_desc[id].handler = handler;
288 ipi_desc[id].priv = priv;
289 return 0;
290 }
291
292 dev_err(&pdev->dev, "register vpu ipi id %d with invalid arguments\n",
293 id);
294 return -EINVAL;
295}
296EXPORT_SYMBOL_GPL(vpu_ipi_register);
297
298int vpu_ipi_send(struct platform_device *pdev,
299 enum ipi_id id, void *buf,
300 unsigned int len)
301{
302 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
303 struct share_obj *send_obj = vpu->send_buf;
304 unsigned long timeout;
305 int ret = 0;
306
307 if (id <= IPI_VPU_INIT || id >= IPI_MAX ||
308 len > sizeof(send_obj->share_buf) || !buf) {
309 dev_err(vpu->dev, "failed to send ipi message\n");
310 return -EINVAL;
311 }
312
313 ret = vpu_clock_enable(vpu);
314 if (ret) {
315 dev_err(vpu->dev, "failed to enable vpu clock\n");
316 return ret;
317 }
318 if (!vpu_running(vpu)) {
319 dev_err(vpu->dev, "vpu_ipi_send: VPU is not running\n");
320 ret = -EINVAL;
321 goto clock_disable;
322 }
323
324 mutex_lock(&vpu->vpu_mutex);
325
326
327 timeout = jiffies + msecs_to_jiffies(IPI_TIMEOUT_MS);
328 do {
329 if (time_after(jiffies, timeout)) {
330 dev_err(vpu->dev, "vpu_ipi_send: IPI timeout!\n");
331 ret = -EIO;
332 goto mut_unlock;
333 }
334 } while (vpu_cfg_readl(vpu, HOST_TO_VPU));
335
336 memcpy((void *)send_obj->share_buf, buf, len);
337 send_obj->len = len;
338 send_obj->id = id;
339
340 vpu->ipi_id_ack[id] = false;
341
342 vpu_cfg_writel(vpu, 0x1, HOST_TO_VPU);
343
344 mutex_unlock(&vpu->vpu_mutex);
345
346
347 timeout = msecs_to_jiffies(IPI_TIMEOUT_MS);
348 ret = wait_event_timeout(vpu->ack_wq, vpu->ipi_id_ack[id], timeout);
349 vpu->ipi_id_ack[id] = false;
350 if (ret == 0) {
351 dev_err(vpu->dev, "vpu ipi %d ack time out !", id);
352 ret = -EIO;
353 goto clock_disable;
354 }
355 vpu_clock_disable(vpu);
356
357 return 0;
358
359mut_unlock:
360 mutex_unlock(&vpu->vpu_mutex);
361clock_disable:
362 vpu_clock_disable(vpu);
363
364 return ret;
365}
366EXPORT_SYMBOL_GPL(vpu_ipi_send);
367
368static void vpu_wdt_reset_func(struct work_struct *ws)
369{
370 struct vpu_wdt *wdt = container_of(ws, struct vpu_wdt, ws);
371 struct mtk_vpu *vpu = container_of(wdt, struct mtk_vpu, wdt);
372 struct vpu_wdt_handler *handler = wdt->handler;
373 int index, ret;
374
375 dev_info(vpu->dev, "vpu reset\n");
376 ret = vpu_clock_enable(vpu);
377 if (ret) {
378 dev_err(vpu->dev, "[VPU] wdt enables clock failed %d\n", ret);
379 return;
380 }
381 mutex_lock(&vpu->vpu_mutex);
382 vpu_cfg_writel(vpu, 0x0, VPU_RESET);
383 vpu->fw_loaded = false;
384 mutex_unlock(&vpu->vpu_mutex);
385 vpu_clock_disable(vpu);
386
387 for (index = 0; index < VPU_RST_MAX; index++) {
388 if (handler[index].reset_func) {
389 handler[index].reset_func(handler[index].priv);
390 dev_dbg(vpu->dev, "wdt handler func %d\n", index);
391 }
392 }
393}
394
395int vpu_wdt_reg_handler(struct platform_device *pdev,
396 void wdt_reset(void *),
397 void *priv, enum rst_id id)
398{
399 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
400 struct vpu_wdt_handler *handler;
401
402 if (!vpu) {
403 dev_err(&pdev->dev, "vpu device in not ready\n");
404 return -EPROBE_DEFER;
405 }
406
407 handler = vpu->wdt.handler;
408
409 if (id >= 0 && id < VPU_RST_MAX && wdt_reset) {
410 dev_dbg(vpu->dev, "wdt register id %d\n", id);
411 mutex_lock(&vpu->vpu_mutex);
412 handler[id].reset_func = wdt_reset;
413 handler[id].priv = priv;
414 mutex_unlock(&vpu->vpu_mutex);
415 return 0;
416 }
417
418 dev_err(vpu->dev, "register vpu wdt handler failed\n");
419 return -EINVAL;
420}
421EXPORT_SYMBOL_GPL(vpu_wdt_reg_handler);
422
423unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev)
424{
425 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
426
427 return vpu->run.dec_capability;
428}
429EXPORT_SYMBOL_GPL(vpu_get_vdec_hw_capa);
430
431unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev)
432{
433 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
434
435 return vpu->run.enc_capability;
436}
437EXPORT_SYMBOL_GPL(vpu_get_venc_hw_capa);
438
439void *vpu_mapping_dm_addr(struct platform_device *pdev,
440 u32 dtcm_dmem_addr)
441{
442 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
443
444 if (!dtcm_dmem_addr ||
445 (dtcm_dmem_addr > (VPU_DTCM_SIZE + VPU_EXT_D_SIZE))) {
446 dev_err(vpu->dev, "invalid virtual data memory address\n");
447 return ERR_PTR(-EINVAL);
448 }
449
450 if (dtcm_dmem_addr < VPU_DTCM_SIZE)
451 return (__force void *)(dtcm_dmem_addr + vpu->reg.tcm +
452 VPU_DTCM_OFFSET);
453
454 return vpu->extmem[D_FW].va + (dtcm_dmem_addr - VPU_DTCM_SIZE);
455}
456EXPORT_SYMBOL_GPL(vpu_mapping_dm_addr);
457
458struct platform_device *vpu_get_plat_device(struct platform_device *pdev)
459{
460 struct device *dev = &pdev->dev;
461 struct device_node *vpu_node;
462 struct platform_device *vpu_pdev;
463
464 vpu_node = of_parse_phandle(dev->of_node, "mediatek,vpu", 0);
465 if (!vpu_node) {
466 dev_err(dev, "can't get vpu node\n");
467 return NULL;
468 }
469
470 vpu_pdev = of_find_device_by_node(vpu_node);
471 if (WARN_ON(!vpu_pdev)) {
472 dev_err(dev, "vpu pdev failed\n");
473 of_node_put(vpu_node);
474 return NULL;
475 }
476
477 return vpu_pdev;
478}
479EXPORT_SYMBOL_GPL(vpu_get_plat_device);
480
481
482static int load_requested_vpu(struct mtk_vpu *vpu,
483 const struct firmware *vpu_fw,
484 u8 fw_type)
485{
486 size_t tcm_size = fw_type ? VPU_DTCM_SIZE : VPU_PTCM_SIZE;
487 size_t fw_size = fw_type ? VPU_D_FW_SIZE : VPU_P_FW_SIZE;
488 char *fw_name = fw_type ? VPU_D_FW : VPU_P_FW;
489 size_t dl_size = 0;
490 size_t extra_fw_size = 0;
491 void *dest;
492 int ret;
493
494 ret = request_firmware(&vpu_fw, fw_name, vpu->dev);
495 if (ret < 0) {
496 dev_err(vpu->dev, "Failed to load %s, %d\n", fw_name, ret);
497 return ret;
498 }
499 dl_size = vpu_fw->size;
500 if (dl_size > fw_size) {
501 dev_err(vpu->dev, "fw %s size %zu is abnormal\n", fw_name,
502 dl_size);
503 release_firmware(vpu_fw);
504 return -EFBIG;
505 }
506 dev_dbg(vpu->dev, "Downloaded fw %s size: %zu.\n",
507 fw_name,
508 dl_size);
509
510 vpu_cfg_writel(vpu, 0x0, VPU_RESET);
511
512
513 if (dl_size > tcm_size) {
514 dev_dbg(vpu->dev, "fw size %zu > limited fw size %zu\n",
515 dl_size, tcm_size);
516 extra_fw_size = dl_size - tcm_size;
517 dev_dbg(vpu->dev, "extra_fw_size %zu\n", extra_fw_size);
518 dl_size = tcm_size;
519 }
520 dest = (__force void *)vpu->reg.tcm;
521 if (fw_type == D_FW)
522 dest += VPU_DTCM_OFFSET;
523 memcpy(dest, vpu_fw->data, dl_size);
524
525 if (extra_fw_size > 0) {
526 dest = vpu->extmem[fw_type].va;
527 dev_dbg(vpu->dev, "download extended memory type %x\n",
528 fw_type);
529 memcpy(dest, vpu_fw->data + tcm_size, extra_fw_size);
530 }
531
532 release_firmware(vpu_fw);
533
534 return 0;
535}
536
537int vpu_load_firmware(struct platform_device *pdev)
538{
539 struct mtk_vpu *vpu;
540 struct device *dev = &pdev->dev;
541 struct vpu_run *run;
542 const struct firmware *vpu_fw = NULL;
543 int ret;
544
545 if (!pdev) {
546 dev_err(dev, "VPU platform device is invalid\n");
547 return -EINVAL;
548 }
549
550 vpu = platform_get_drvdata(pdev);
551 run = &vpu->run;
552
553 mutex_lock(&vpu->vpu_mutex);
554 if (vpu->fw_loaded) {
555 mutex_unlock(&vpu->vpu_mutex);
556 return 0;
557 }
558 mutex_unlock(&vpu->vpu_mutex);
559
560 ret = vpu_clock_enable(vpu);
561 if (ret) {
562 dev_err(dev, "enable clock failed %d\n", ret);
563 return ret;
564 }
565
566 mutex_lock(&vpu->vpu_mutex);
567
568 run->signaled = false;
569 dev_dbg(vpu->dev, "firmware request\n");
570
571 ret = load_requested_vpu(vpu, vpu_fw, P_FW);
572 if (ret < 0) {
573 dev_err(dev, "Failed to request %s, %d\n", VPU_P_FW, ret);
574 goto OUT_LOAD_FW;
575 }
576
577
578 ret = load_requested_vpu(vpu, vpu_fw, D_FW);
579 if (ret < 0) {
580 dev_err(dev, "Failed to request %s, %d\n", VPU_D_FW, ret);
581 goto OUT_LOAD_FW;
582 }
583
584 vpu->fw_loaded = true;
585
586 vpu_cfg_writel(vpu, 0x1, VPU_RESET);
587
588 ret = wait_event_interruptible_timeout(run->wq,
589 run->signaled,
590 msecs_to_jiffies(INIT_TIMEOUT_MS)
591 );
592 if (ret == 0) {
593 ret = -ETIME;
594 dev_err(dev, "wait vpu initialization timeout!\n");
595 goto OUT_LOAD_FW;
596 } else if (-ERESTARTSYS == ret) {
597 dev_err(dev, "wait vpu interrupted by a signal!\n");
598 goto OUT_LOAD_FW;
599 }
600
601 ret = 0;
602 dev_info(dev, "vpu is ready. Fw version %s\n", run->fw_ver);
603
604OUT_LOAD_FW:
605 mutex_unlock(&vpu->vpu_mutex);
606 vpu_clock_disable(vpu);
607
608 return ret;
609}
610EXPORT_SYMBOL_GPL(vpu_load_firmware);
611
612static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
613{
614 struct mtk_vpu *vpu = (struct mtk_vpu *)priv;
615 struct vpu_run *run = (struct vpu_run *)data;
616
617 vpu->run.signaled = run->signaled;
618 strncpy(vpu->run.fw_ver, run->fw_ver, VPU_FW_VER_LEN);
619 vpu->run.dec_capability = run->dec_capability;
620 vpu->run.enc_capability = run->enc_capability;
621 wake_up_interruptible(&vpu->run.wq);
622}
623
624#ifdef CONFIG_DEBUG_FS
625static ssize_t vpu_debug_read(struct file *file, char __user *user_buf,
626 size_t count, loff_t *ppos)
627{
628 char buf[256];
629 unsigned int len;
630 unsigned int running, pc, vpu_to_host, host_to_vpu, wdt;
631 int ret;
632 struct device *dev = file->private_data;
633 struct mtk_vpu *vpu = dev_get_drvdata(dev);
634
635 ret = vpu_clock_enable(vpu);
636 if (ret) {
637 dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret);
638 return 0;
639 }
640
641
642 running = vpu_running(vpu);
643 pc = vpu_cfg_readl(vpu, VPU_PC_REG);
644 wdt = vpu_cfg_readl(vpu, VPU_WDT_REG);
645 host_to_vpu = vpu_cfg_readl(vpu, HOST_TO_VPU);
646 vpu_to_host = vpu_cfg_readl(vpu, VPU_TO_HOST);
647 vpu_clock_disable(vpu);
648
649 if (running) {
650 len = snprintf(buf, sizeof(buf), "VPU is running\n\n"
651 "FW Version: %s\n"
652 "PC: 0x%x\n"
653 "WDT: 0x%x\n"
654 "Host to VPU: 0x%x\n"
655 "VPU to Host: 0x%x\n",
656 vpu->run.fw_ver, pc, wdt,
657 host_to_vpu, vpu_to_host);
658 } else {
659 len = snprintf(buf, sizeof(buf), "VPU not running\n");
660 }
661
662 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
663}
664
665static const struct file_operations vpu_debug_fops = {
666 .open = simple_open,
667 .read = vpu_debug_read,
668};
669#endif
670
671static void vpu_free_ext_mem(struct mtk_vpu *vpu, u8 fw_type)
672{
673 struct device *dev = vpu->dev;
674 size_t fw_ext_size = fw_type ? VPU_EXT_D_SIZE : VPU_EXT_P_SIZE;
675
676 dma_free_coherent(dev, fw_ext_size, vpu->extmem[fw_type].va,
677 vpu->extmem[fw_type].pa);
678}
679
680static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type)
681{
682 struct device *dev = vpu->dev;
683 size_t fw_ext_size = fw_type ? VPU_EXT_D_SIZE : VPU_EXT_P_SIZE;
684 u32 vpu_ext_mem0 = fw_type ? VPU_DMEM_EXT0_ADDR : VPU_PMEM_EXT0_ADDR;
685 u32 vpu_ext_mem1 = fw_type ? VPU_DMEM_EXT1_ADDR : VPU_PMEM_EXT1_ADDR;
686 u32 offset_4gb = vpu->enable_4GB ? 0x40000000 : 0;
687
688 vpu->extmem[fw_type].va = dma_alloc_coherent(dev,
689 fw_ext_size,
690 &vpu->extmem[fw_type].pa,
691 GFP_KERNEL);
692 if (!vpu->extmem[fw_type].va) {
693 dev_err(dev, "Failed to allocate the extended program memory\n");
694 return -ENOMEM;
695 }
696
697
698 vpu_cfg_writel(vpu, 0x1, vpu_ext_mem0);
699 vpu_cfg_writel(vpu, (vpu->extmem[fw_type].pa & 0xFFFFF000) + offset_4gb,
700 vpu_ext_mem1);
701
702 dev_info(dev, "%s extend memory phy=0x%llx virt=0x%p\n",
703 fw_type ? "Data" : "Program",
704 (unsigned long long)vpu->extmem[fw_type].pa,
705 vpu->extmem[fw_type].va);
706
707 return 0;
708}
709
710static void vpu_ipi_handler(struct mtk_vpu *vpu)
711{
712 struct share_obj *rcv_obj = vpu->recv_buf;
713 struct vpu_ipi_desc *ipi_desc = vpu->ipi_desc;
714
715 if (rcv_obj->id < IPI_MAX && ipi_desc[rcv_obj->id].handler) {
716 ipi_desc[rcv_obj->id].handler(rcv_obj->share_buf,
717 rcv_obj->len,
718 ipi_desc[rcv_obj->id].priv);
719 if (rcv_obj->id > IPI_VPU_INIT) {
720 vpu->ipi_id_ack[rcv_obj->id] = true;
721 wake_up(&vpu->ack_wq);
722 }
723 } else {
724 dev_err(vpu->dev, "No such ipi id = %d\n", rcv_obj->id);
725 }
726}
727
728static int vpu_ipi_init(struct mtk_vpu *vpu)
729{
730
731 vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
732
733
734 vpu->recv_buf = (__force struct share_obj *)(vpu->reg.tcm +
735 VPU_DTCM_OFFSET);
736 vpu->send_buf = vpu->recv_buf + 1;
737 memset(vpu->recv_buf, 0, sizeof(struct share_obj));
738 memset(vpu->send_buf, 0, sizeof(struct share_obj));
739
740 return 0;
741}
742
743static irqreturn_t vpu_irq_handler(int irq, void *priv)
744{
745 struct mtk_vpu *vpu = priv;
746 u32 vpu_to_host;
747 int ret;
748
749
750
751
752
753
754 ret = clk_enable(vpu->clk);
755 if (ret) {
756 dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret);
757 return IRQ_NONE;
758 }
759 vpu_to_host = vpu_cfg_readl(vpu, VPU_TO_HOST);
760 if (vpu_to_host & VPU_IPC_INT) {
761 vpu_ipi_handler(vpu);
762 } else {
763 dev_err(vpu->dev, "vpu watchdog timeout! 0x%x", vpu_to_host);
764 queue_work(vpu->wdt.wq, &vpu->wdt.ws);
765 }
766
767
768 vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
769 clk_disable(vpu->clk);
770
771 return IRQ_HANDLED;
772}
773
774#ifdef CONFIG_DEBUG_FS
775static struct dentry *vpu_debugfs;
776#endif
777static int mtk_vpu_probe(struct platform_device *pdev)
778{
779 struct mtk_vpu *vpu;
780 struct device *dev;
781 struct resource *res;
782 int ret = 0;
783
784 dev_dbg(&pdev->dev, "initialization\n");
785
786 dev = &pdev->dev;
787 vpu = devm_kzalloc(dev, sizeof(*vpu), GFP_KERNEL);
788 if (!vpu)
789 return -ENOMEM;
790
791 vpu->dev = &pdev->dev;
792 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcm");
793 vpu->reg.tcm = devm_ioremap_resource(dev, res);
794 if (IS_ERR((__force void *)vpu->reg.tcm))
795 return PTR_ERR((__force void *)vpu->reg.tcm);
796
797 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_reg");
798 vpu->reg.cfg = devm_ioremap_resource(dev, res);
799 if (IS_ERR((__force void *)vpu->reg.cfg))
800 return PTR_ERR((__force void *)vpu->reg.cfg);
801
802
803 vpu->clk = devm_clk_get(dev, "main");
804 if (IS_ERR(vpu->clk)) {
805 dev_err(dev, "get vpu clock failed\n");
806 return PTR_ERR(vpu->clk);
807 }
808
809 platform_set_drvdata(pdev, vpu);
810
811 ret = clk_prepare(vpu->clk);
812 if (ret) {
813 dev_err(dev, "prepare vpu clock failed\n");
814 return ret;
815 }
816
817
818 vpu->wdt.wq = create_singlethread_workqueue("vpu_wdt");
819 if (!vpu->wdt.wq) {
820 dev_err(dev, "initialize wdt workqueue failed\n");
821 return -ENOMEM;
822 }
823 INIT_WORK(&vpu->wdt.ws, vpu_wdt_reset_func);
824 mutex_init(&vpu->vpu_mutex);
825
826 ret = vpu_clock_enable(vpu);
827 if (ret) {
828 dev_err(dev, "enable vpu clock failed\n");
829 goto workqueue_destroy;
830 }
831
832 dev_dbg(dev, "vpu ipi init\n");
833 ret = vpu_ipi_init(vpu);
834 if (ret) {
835 dev_err(dev, "Failed to init ipi\n");
836 goto disable_vpu_clk;
837 }
838
839
840 ret = vpu_ipi_register(pdev, IPI_VPU_INIT, vpu_init_ipi_handler,
841 "vpu_init", vpu);
842 if (ret) {
843 dev_err(dev, "Failed to register IPI_VPU_INIT\n");
844 goto vpu_mutex_destroy;
845 }
846
847#ifdef CONFIG_DEBUG_FS
848 vpu_debugfs = debugfs_create_file("mtk_vpu", S_IRUGO, NULL, (void *)dev,
849 &vpu_debug_fops);
850 if (!vpu_debugfs) {
851 ret = -ENOMEM;
852 goto cleanup_ipi;
853 }
854#endif
855
856
857 vpu_cfg_writel(vpu, 0x2, VPU_TCM_CFG);
858
859 vpu->enable_4GB = !!(totalram_pages > (SZ_2G >> PAGE_SHIFT));
860 dev_info(dev, "4GB mode %u\n", vpu->enable_4GB);
861
862 if (vpu->enable_4GB) {
863 ret = of_reserved_mem_device_init(dev);
864 if (ret)
865 dev_info(dev, "init reserved memory failed\n");
866
867 }
868
869 ret = vpu_alloc_ext_mem(vpu, D_FW);
870 if (ret) {
871 dev_err(dev, "Allocate DM failed\n");
872 goto remove_debugfs;
873 }
874
875 ret = vpu_alloc_ext_mem(vpu, P_FW);
876 if (ret) {
877 dev_err(dev, "Allocate PM failed\n");
878 goto free_d_mem;
879 }
880
881 init_waitqueue_head(&vpu->run.wq);
882 init_waitqueue_head(&vpu->ack_wq);
883
884 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
885 if (!res) {
886 dev_err(dev, "get IRQ resource failed.\n");
887 ret = -ENXIO;
888 goto free_p_mem;
889 }
890 vpu->reg.irq = platform_get_irq(pdev, 0);
891 ret = devm_request_irq(dev, vpu->reg.irq, vpu_irq_handler, 0,
892 pdev->name, vpu);
893 if (ret) {
894 dev_err(dev, "failed to request irq\n");
895 goto free_p_mem;
896 }
897
898 vpu_clock_disable(vpu);
899 dev_dbg(dev, "initialization completed\n");
900
901 return 0;
902
903free_p_mem:
904 vpu_free_ext_mem(vpu, P_FW);
905free_d_mem:
906 vpu_free_ext_mem(vpu, D_FW);
907remove_debugfs:
908 of_reserved_mem_device_release(dev);
909#ifdef CONFIG_DEBUG_FS
910 debugfs_remove(vpu_debugfs);
911cleanup_ipi:
912#endif
913 memset(vpu->ipi_desc, 0, sizeof(struct vpu_ipi_desc) * IPI_MAX);
914vpu_mutex_destroy:
915 mutex_destroy(&vpu->vpu_mutex);
916disable_vpu_clk:
917 vpu_clock_disable(vpu);
918workqueue_destroy:
919 destroy_workqueue(vpu->wdt.wq);
920
921 return ret;
922}
923
924static const struct of_device_id mtk_vpu_match[] = {
925 {
926 .compatible = "mediatek,mt8173-vpu",
927 },
928 {},
929};
930MODULE_DEVICE_TABLE(of, mtk_vpu_match);
931
932static int mtk_vpu_remove(struct platform_device *pdev)
933{
934 struct mtk_vpu *vpu = platform_get_drvdata(pdev);
935
936#ifdef CONFIG_DEBUG_FS
937 debugfs_remove(vpu_debugfs);
938#endif
939 if (vpu->wdt.wq) {
940 flush_workqueue(vpu->wdt.wq);
941 destroy_workqueue(vpu->wdt.wq);
942 }
943 vpu_free_ext_mem(vpu, P_FW);
944 vpu_free_ext_mem(vpu, D_FW);
945 mutex_destroy(&vpu->vpu_mutex);
946 clk_unprepare(vpu->clk);
947
948 return 0;
949}
950
951static struct platform_driver mtk_vpu_driver = {
952 .probe = mtk_vpu_probe,
953 .remove = mtk_vpu_remove,
954 .driver = {
955 .name = "mtk_vpu",
956 .of_match_table = mtk_vpu_match,
957 },
958};
959
960module_platform_driver(mtk_vpu_driver);
961
962MODULE_LICENSE("GPL v2");
963MODULE_DESCRIPTION("Mediatek Video Prosessor Unit driver");
964