1
2
3
4
5
6
7
8#include <linux/clk.h>
9#include <linux/device.h>
10#include <linux/errno.h>
11#include <linux/interrupt.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/of_address.h>
16#include <linux/of_platform.h>
17#include <linux/platform_device.h>
18#include <linux/pm_runtime.h>
19#include <linux/workqueue.h>
20#include <soc/mediatek/smi.h>
21
22#include "mtk_mdp_core.h"
23#include "mtk_mdp_m2m.h"
24#include "mtk_vpu.h"
25
26
27int mtk_mdp_dbg_level;
28EXPORT_SYMBOL(mtk_mdp_dbg_level);
29
30module_param(mtk_mdp_dbg_level, int, 0644);
31
32static const struct of_device_id mtk_mdp_comp_dt_ids[] = {
33 {
34 .compatible = "mediatek,mt8173-mdp-rdma",
35 .data = (void *)MTK_MDP_RDMA
36 }, {
37 .compatible = "mediatek,mt8173-mdp-rsz",
38 .data = (void *)MTK_MDP_RSZ
39 }, {
40 .compatible = "mediatek,mt8173-mdp-wdma",
41 .data = (void *)MTK_MDP_WDMA
42 }, {
43 .compatible = "mediatek,mt8173-mdp-wrot",
44 .data = (void *)MTK_MDP_WROT
45 },
46 { },
47};
48
49static const struct of_device_id mtk_mdp_of_ids[] = {
50 { .compatible = "mediatek,mt8173-mdp", },
51 { },
52};
53MODULE_DEVICE_TABLE(of, mtk_mdp_of_ids);
54
55static void mtk_mdp_clock_on(struct mtk_mdp_dev *mdp)
56{
57 struct device *dev = &mdp->pdev->dev;
58 struct mtk_mdp_comp *comp_node;
59
60 list_for_each_entry(comp_node, &mdp->comp_list, node)
61 mtk_mdp_comp_clock_on(dev, comp_node);
62}
63
64static void mtk_mdp_clock_off(struct mtk_mdp_dev *mdp)
65{
66 struct device *dev = &mdp->pdev->dev;
67 struct mtk_mdp_comp *comp_node;
68
69 list_for_each_entry(comp_node, &mdp->comp_list, node)
70 mtk_mdp_comp_clock_off(dev, comp_node);
71}
72
73static void mtk_mdp_wdt_worker(struct work_struct *work)
74{
75 struct mtk_mdp_dev *mdp =
76 container_of(work, struct mtk_mdp_dev, wdt_work);
77 struct mtk_mdp_ctx *ctx;
78
79 mtk_mdp_err("Watchdog timeout");
80
81 list_for_each_entry(ctx, &mdp->ctx_list, list) {
82 mtk_mdp_dbg(0, "[%d] Change as state error", ctx->id);
83 mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_CTX_ERROR);
84 }
85}
86
87static void mtk_mdp_reset_handler(void *priv)
88{
89 struct mtk_mdp_dev *mdp = priv;
90
91 queue_work(mdp->wdt_wq, &mdp->wdt_work);
92}
93
94void mtk_mdp_register_component(struct mtk_mdp_dev *mdp,
95 struct mtk_mdp_comp *comp)
96{
97 list_add(&comp->node, &mdp->comp_list);
98}
99
100void mtk_mdp_unregister_component(struct mtk_mdp_dev *mdp,
101 struct mtk_mdp_comp *comp)
102{
103 list_del(&comp->node);
104}
105
106static int mtk_mdp_probe(struct platform_device *pdev)
107{
108 struct mtk_mdp_dev *mdp;
109 struct device *dev = &pdev->dev;
110 struct device_node *node, *parent;
111 struct mtk_mdp_comp *comp, *comp_temp;
112 int ret = 0;
113
114 mdp = devm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
115 if (!mdp)
116 return -ENOMEM;
117
118 mdp->id = pdev->id;
119 mdp->pdev = pdev;
120 INIT_LIST_HEAD(&mdp->comp_list);
121 INIT_LIST_HEAD(&mdp->ctx_list);
122
123 mutex_init(&mdp->lock);
124 mutex_init(&mdp->vpulock);
125
126
127 node = of_get_next_child(dev->of_node, NULL);
128 if (node) {
129 of_node_put(node);
130 parent = dev->of_node;
131 dev_warn(dev, "device tree is out of date\n");
132 } else {
133 parent = dev->of_node->parent;
134 }
135
136
137 for_each_child_of_node(parent, node) {
138 const struct of_device_id *of_id;
139 enum mtk_mdp_comp_type comp_type;
140
141 of_id = of_match_node(mtk_mdp_comp_dt_ids, node);
142 if (!of_id)
143 continue;
144
145 if (!of_device_is_available(node)) {
146 dev_err(dev, "Skipping disabled component %pOF\n",
147 node);
148 continue;
149 }
150
151 comp_type = (enum mtk_mdp_comp_type)of_id->data;
152
153 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
154 if (!comp) {
155 ret = -ENOMEM;
156 of_node_put(node);
157 goto err_comp;
158 }
159
160 ret = mtk_mdp_comp_init(dev, node, comp, comp_type);
161 if (ret) {
162 of_node_put(node);
163 goto err_comp;
164 }
165
166 mtk_mdp_register_component(mdp, comp);
167 }
168
169 mdp->job_wq = create_singlethread_workqueue(MTK_MDP_MODULE_NAME);
170 if (!mdp->job_wq) {
171 dev_err(&pdev->dev, "unable to alloc job workqueue\n");
172 ret = -ENOMEM;
173 goto err_alloc_job_wq;
174 }
175
176 mdp->wdt_wq = create_singlethread_workqueue("mdp_wdt_wq");
177 if (!mdp->wdt_wq) {
178 dev_err(&pdev->dev, "unable to alloc wdt workqueue\n");
179 ret = -ENOMEM;
180 goto err_alloc_wdt_wq;
181 }
182 INIT_WORK(&mdp->wdt_work, mtk_mdp_wdt_worker);
183
184 ret = v4l2_device_register(dev, &mdp->v4l2_dev);
185 if (ret) {
186 dev_err(&pdev->dev, "Failed to register v4l2 device\n");
187 ret = -EINVAL;
188 goto err_dev_register;
189 }
190
191 ret = mtk_mdp_register_m2m_device(mdp);
192 if (ret) {
193 v4l2_err(&mdp->v4l2_dev, "Failed to init mem2mem device\n");
194 goto err_m2m_register;
195 }
196
197 mdp->vpu_dev = vpu_get_plat_device(pdev);
198 ret = vpu_wdt_reg_handler(mdp->vpu_dev, mtk_mdp_reset_handler, mdp,
199 VPU_RST_MDP);
200 if (ret) {
201 dev_err(&pdev->dev, "Failed to register reset handler\n");
202 goto err_m2m_register;
203 }
204
205 platform_set_drvdata(pdev, mdp);
206
207 ret = vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
208 if (ret) {
209 dev_err(&pdev->dev, "Failed to set vb2 dma mag seg size\n");
210 goto err_m2m_register;
211 }
212
213 pm_runtime_enable(dev);
214 dev_dbg(dev, "mdp-%d registered successfully\n", mdp->id);
215
216 return 0;
217
218err_m2m_register:
219 v4l2_device_unregister(&mdp->v4l2_dev);
220
221err_dev_register:
222 destroy_workqueue(mdp->wdt_wq);
223
224err_alloc_wdt_wq:
225 destroy_workqueue(mdp->job_wq);
226
227err_alloc_job_wq:
228
229err_comp:
230 list_for_each_entry_safe(comp, comp_temp, &mdp->comp_list, node) {
231 mtk_mdp_unregister_component(mdp, comp);
232 mtk_mdp_comp_deinit(dev, comp);
233 }
234
235 dev_dbg(dev, "err %d\n", ret);
236 return ret;
237}
238
239static int mtk_mdp_remove(struct platform_device *pdev)
240{
241 struct mtk_mdp_dev *mdp = platform_get_drvdata(pdev);
242 struct mtk_mdp_comp *comp, *comp_temp;
243
244 pm_runtime_disable(&pdev->dev);
245 vb2_dma_contig_clear_max_seg_size(&pdev->dev);
246 mtk_mdp_unregister_m2m_device(mdp);
247 v4l2_device_unregister(&mdp->v4l2_dev);
248
249 flush_workqueue(mdp->wdt_wq);
250 destroy_workqueue(mdp->wdt_wq);
251
252 flush_workqueue(mdp->job_wq);
253 destroy_workqueue(mdp->job_wq);
254
255 list_for_each_entry_safe(comp, comp_temp, &mdp->comp_list, node) {
256 mtk_mdp_unregister_component(mdp, comp);
257 mtk_mdp_comp_deinit(&pdev->dev, comp);
258 }
259
260 dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
261 return 0;
262}
263
264static int __maybe_unused mtk_mdp_pm_suspend(struct device *dev)
265{
266 struct mtk_mdp_dev *mdp = dev_get_drvdata(dev);
267
268 mtk_mdp_clock_off(mdp);
269
270 return 0;
271}
272
273static int __maybe_unused mtk_mdp_pm_resume(struct device *dev)
274{
275 struct mtk_mdp_dev *mdp = dev_get_drvdata(dev);
276
277 mtk_mdp_clock_on(mdp);
278
279 return 0;
280}
281
282static int __maybe_unused mtk_mdp_suspend(struct device *dev)
283{
284 if (pm_runtime_suspended(dev))
285 return 0;
286
287 return mtk_mdp_pm_suspend(dev);
288}
289
290static int __maybe_unused mtk_mdp_resume(struct device *dev)
291{
292 if (pm_runtime_suspended(dev))
293 return 0;
294
295 return mtk_mdp_pm_resume(dev);
296}
297
298static const struct dev_pm_ops mtk_mdp_pm_ops = {
299 SET_SYSTEM_SLEEP_PM_OPS(mtk_mdp_suspend, mtk_mdp_resume)
300 SET_RUNTIME_PM_OPS(mtk_mdp_pm_suspend, mtk_mdp_pm_resume, NULL)
301};
302
303static struct platform_driver mtk_mdp_driver = {
304 .probe = mtk_mdp_probe,
305 .remove = mtk_mdp_remove,
306 .driver = {
307 .name = MTK_MDP_MODULE_NAME,
308 .pm = &mtk_mdp_pm_ops,
309 .of_match_table = mtk_mdp_of_ids,
310 }
311};
312
313module_platform_driver(mtk_mdp_driver);
314
315MODULE_AUTHOR("Houlong Wei <houlong.wei@mediatek.com>");
316MODULE_DESCRIPTION("Mediatek image processor driver");
317MODULE_LICENSE("GPL v2");
318