1
2
3
4
5
6
7
8#include <linux/clk.h>
9#include <linux/interrupt.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
13#include <linux/seq_file.h>
14#endif
15
16#include "hva.h"
17#include "hva-hw.h"
18
19
20#define HVA_HIF_REG_RST 0x0100U
21#define HVA_HIF_REG_RST_ACK 0x0104U
22#define HVA_HIF_REG_MIF_CFG 0x0108U
23#define HVA_HIF_REG_HEC_MIF_CFG 0x010CU
24#define HVA_HIF_REG_CFL 0x0110U
25#define HVA_HIF_FIFO_CMD 0x0114U
26#define HVA_HIF_FIFO_STS 0x0118U
27#define HVA_HIF_REG_SFL 0x011CU
28#define HVA_HIF_REG_IT_ACK 0x0120U
29#define HVA_HIF_REG_ERR_IT_ACK 0x0124U
30#define HVA_HIF_REG_LMI_ERR 0x0128U
31#define HVA_HIF_REG_EMI_ERR 0x012CU
32#define HVA_HIF_REG_HEC_MIF_ERR 0x0130U
33#define HVA_HIF_REG_HEC_STS 0x0134U
34#define HVA_HIF_REG_HVC_STS 0x0138U
35#define HVA_HIF_REG_HJE_STS 0x013CU
36#define HVA_HIF_REG_CNT 0x0140U
37#define HVA_HIF_REG_HEC_CHKSYN_DIS 0x0144U
38#define HVA_HIF_REG_CLK_GATING 0x0148U
39#define HVA_HIF_REG_VERSION 0x014CU
40#define HVA_HIF_REG_BSM 0x0150U
41
42
43#define VERSION_ID_MASK 0x0000FFFF
44
45
46#define BSM_CFG_VAL1 0x0003F000
47#define BSM_CFG_VAL2 0x003F0000
48
49
50#define MIF_CFG_VAL1 0x04460446
51#define MIF_CFG_VAL2 0x04460806
52#define MIF_CFG_VAL3 0x00000000
53
54
55#define HEC_MIF_CFG_VAL 0x000000C4
56
57
58#define CLK_GATING_HVC BIT(0)
59#define CLK_GATING_HEC BIT(1)
60#define CLK_GATING_HJE BIT(2)
61
62
63#define CLK_RATE 300000000
64
65
66#define AUTOSUSPEND_DELAY_MS 3
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85enum hva_hw_error {
86 NO_ERROR = 0x0,
87 H264_BITSTREAM_OVERSIZE = 0x2,
88 H264_FRAME_SKIPPED = 0x4,
89 H264_SLICE_LIMIT_SIZE = 0x5,
90 H264_MAX_SLICE_NUMBER = 0x7,
91 H264_SLICE_READY = 0x8,
92 TASK_LIST_FULL = 0xF0,
93 UNKNOWN_COMMAND = 0xF1,
94 WRONG_CODEC_OR_RESOLUTION = 0xF4,
95 NO_INT_COMPLETION = 0x100,
96 LMI_ERR = 0x101,
97 EMI_ERR = 0x102,
98 HECMI_ERR = 0x103,
99};
100
101static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
102{
103 struct hva_dev *hva = data;
104
105
106 hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
107 hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
108
109
110 writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
111
112 return IRQ_WAKE_THREAD;
113}
114
115static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
116{
117 struct hva_dev *hva = arg;
118 struct device *dev = hva_to_dev(hva);
119 u32 status = hva->sts_reg & 0xFF;
120 u8 ctx_id = 0;
121 struct hva_ctx *ctx = NULL;
122
123 dev_dbg(dev, "%s %s: status: 0x%02x fifo level: 0x%02x\n",
124 HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
125
126
127
128
129
130 ctx_id = (hva->sts_reg & 0xFF00) >> 8;
131 if (ctx_id >= HVA_MAX_INSTANCES) {
132 dev_err(dev, "%s %s: bad context identifier: %d\n",
133 ctx->name, __func__, ctx_id);
134 ctx->hw_err = true;
135 goto out;
136 }
137
138 ctx = hva->instances[ctx_id];
139 if (!ctx)
140 goto out;
141
142 switch (status) {
143 case NO_ERROR:
144 dev_dbg(dev, "%s %s: no error\n",
145 ctx->name, __func__);
146 ctx->hw_err = false;
147 break;
148 case H264_SLICE_READY:
149 dev_dbg(dev, "%s %s: h264 slice ready\n",
150 ctx->name, __func__);
151 ctx->hw_err = false;
152 break;
153 case H264_FRAME_SKIPPED:
154 dev_dbg(dev, "%s %s: h264 frame skipped\n",
155 ctx->name, __func__);
156 ctx->hw_err = false;
157 break;
158 case H264_BITSTREAM_OVERSIZE:
159 dev_err(dev, "%s %s:h264 bitstream oversize\n",
160 ctx->name, __func__);
161 ctx->hw_err = true;
162 break;
163 case H264_SLICE_LIMIT_SIZE:
164 dev_err(dev, "%s %s: h264 slice limit size is reached\n",
165 ctx->name, __func__);
166 ctx->hw_err = true;
167 break;
168 case H264_MAX_SLICE_NUMBER:
169 dev_err(dev, "%s %s: h264 max slice number is reached\n",
170 ctx->name, __func__);
171 ctx->hw_err = true;
172 break;
173 case TASK_LIST_FULL:
174 dev_err(dev, "%s %s:task list full\n",
175 ctx->name, __func__);
176 ctx->hw_err = true;
177 break;
178 case UNKNOWN_COMMAND:
179 dev_err(dev, "%s %s: command not known\n",
180 ctx->name, __func__);
181 ctx->hw_err = true;
182 break;
183 case WRONG_CODEC_OR_RESOLUTION:
184 dev_err(dev, "%s %s: wrong codec or resolution\n",
185 ctx->name, __func__);
186 ctx->hw_err = true;
187 break;
188 default:
189 dev_err(dev, "%s %s: status not recognized\n",
190 ctx->name, __func__);
191 ctx->hw_err = true;
192 break;
193 }
194out:
195 complete(&hva->interrupt);
196
197 return IRQ_HANDLED;
198}
199
200static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
201{
202 struct hva_dev *hva = data;
203
204
205 hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
206 hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
207
208
209 hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
210 hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
211 hva->hec_mif_err_reg = readl_relaxed(hva->regs +
212 HVA_HIF_REG_HEC_MIF_ERR);
213
214
215 writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
216
217 return IRQ_WAKE_THREAD;
218}
219
220static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
221{
222 struct hva_dev *hva = arg;
223 struct device *dev = hva_to_dev(hva);
224 u8 ctx_id = 0;
225 struct hva_ctx *ctx;
226
227 dev_dbg(dev, "%s status: 0x%02x fifo level: 0x%02x\n",
228 HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
229
230
231
232
233
234 ctx_id = (hva->sts_reg & 0xFF00) >> 8;
235 if (ctx_id >= HVA_MAX_INSTANCES) {
236 dev_err(dev, "%s bad context identifier: %d\n", HVA_PREFIX,
237 ctx_id);
238 goto out;
239 }
240
241 ctx = hva->instances[ctx_id];
242 if (!ctx)
243 goto out;
244
245 if (hva->lmi_err_reg) {
246 dev_err(dev, "%s local memory interface error: 0x%08x\n",
247 ctx->name, hva->lmi_err_reg);
248 ctx->hw_err = true;
249 }
250
251 if (hva->emi_err_reg) {
252 dev_err(dev, "%s external memory interface error: 0x%08x\n",
253 ctx->name, hva->emi_err_reg);
254 ctx->hw_err = true;
255 }
256
257 if (hva->hec_mif_err_reg) {
258 dev_err(dev, "%s hec memory interface error: 0x%08x\n",
259 ctx->name, hva->hec_mif_err_reg);
260 ctx->hw_err = true;
261 }
262out:
263 complete(&hva->interrupt);
264
265 return IRQ_HANDLED;
266}
267
268static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
269{
270 struct device *dev = hva_to_dev(hva);
271 unsigned long int version;
272
273 if (pm_runtime_get_sync(dev) < 0) {
274 dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
275 mutex_unlock(&hva->protect_mutex);
276 return -EFAULT;
277 }
278
279 version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
280 VERSION_ID_MASK;
281
282 pm_runtime_put_autosuspend(dev);
283
284 switch (version) {
285 case HVA_VERSION_V400:
286 dev_dbg(dev, "%s IP hardware version 0x%lx\n",
287 HVA_PREFIX, version);
288 break;
289 default:
290 dev_err(dev, "%s unknown IP hardware version 0x%lx\n",
291 HVA_PREFIX, version);
292 version = HVA_VERSION_UNKNOWN;
293 break;
294 }
295
296 return version;
297}
298
299int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
300{
301 struct device *dev = &pdev->dev;
302 struct resource *regs;
303 struct resource *esram;
304 int ret;
305
306 WARN_ON(!hva);
307
308
309 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
310 hva->regs = devm_ioremap_resource(dev, regs);
311 if (IS_ERR(hva->regs)) {
312 dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
313 return PTR_ERR(hva->regs);
314 }
315
316
317 esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
318 if (!esram) {
319 dev_err(dev, "%s failed to get esram\n", HVA_PREFIX);
320 return -ENODEV;
321 }
322 hva->esram_addr = esram->start;
323 hva->esram_size = resource_size(esram);
324
325 dev_info(dev, "%s esram reserved for address: 0x%x size:%d\n",
326 HVA_PREFIX, hva->esram_addr, hva->esram_size);
327
328
329 hva->clk = devm_clk_get(dev, "clk_hva");
330 if (IS_ERR(hva->clk)) {
331 dev_err(dev, "%s failed to get clock\n", HVA_PREFIX);
332 return PTR_ERR(hva->clk);
333 }
334
335 ret = clk_prepare(hva->clk);
336 if (ret < 0) {
337 dev_err(dev, "%s failed to prepare clock\n", HVA_PREFIX);
338 hva->clk = ERR_PTR(-EINVAL);
339 return ret;
340 }
341
342
343 ret = platform_get_irq(pdev, 0);
344 if (ret < 0) {
345 dev_err(dev, "%s failed to get status IRQ\n", HVA_PREFIX);
346 goto err_clk;
347 }
348 hva->irq_its = ret;
349
350 ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
351 hva_hw_its_irq_thread,
352 IRQF_ONESHOT,
353 "hva_its_irq", hva);
354 if (ret) {
355 dev_err(dev, "%s failed to install status IRQ 0x%x\n",
356 HVA_PREFIX, hva->irq_its);
357 goto err_clk;
358 }
359 disable_irq(hva->irq_its);
360
361
362 ret = platform_get_irq(pdev, 1);
363 if (ret < 0) {
364 dev_err(dev, "%s failed to get error IRQ\n", HVA_PREFIX);
365 goto err_clk;
366 }
367 hva->irq_err = ret;
368
369 ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
370 hva_hw_err_irq_thread,
371 IRQF_ONESHOT,
372 "hva_err_irq", hva);
373 if (ret) {
374 dev_err(dev, "%s failed to install error IRQ 0x%x\n",
375 HVA_PREFIX, hva->irq_err);
376 goto err_clk;
377 }
378 disable_irq(hva->irq_err);
379
380
381 mutex_init(&hva->protect_mutex);
382
383
384 init_completion(&hva->interrupt);
385
386
387 pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
388 pm_runtime_use_autosuspend(dev);
389 pm_runtime_set_suspended(dev);
390 pm_runtime_enable(dev);
391
392 ret = pm_runtime_get_sync(dev);
393 if (ret < 0) {
394 dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
395 goto err_clk;
396 }
397
398
399 hva->ip_version = hva_hw_get_ip_version(hva);
400
401 if (hva->ip_version == HVA_VERSION_UNKNOWN) {
402 ret = -EINVAL;
403 goto err_pm;
404 }
405
406 dev_info(dev, "%s found hva device (version 0x%lx)\n", HVA_PREFIX,
407 hva->ip_version);
408
409 return 0;
410
411err_pm:
412 pm_runtime_put(dev);
413err_clk:
414 if (hva->clk)
415 clk_unprepare(hva->clk);
416
417 return ret;
418}
419
420void hva_hw_remove(struct hva_dev *hva)
421{
422 struct device *dev = hva_to_dev(hva);
423
424 disable_irq(hva->irq_its);
425 disable_irq(hva->irq_err);
426
427 pm_runtime_put_autosuspend(dev);
428 pm_runtime_disable(dev);
429}
430
431int hva_hw_runtime_suspend(struct device *dev)
432{
433 struct hva_dev *hva = dev_get_drvdata(dev);
434
435 clk_disable_unprepare(hva->clk);
436
437 return 0;
438}
439
440int hva_hw_runtime_resume(struct device *dev)
441{
442 struct hva_dev *hva = dev_get_drvdata(dev);
443
444 if (clk_prepare_enable(hva->clk)) {
445 dev_err(hva->dev, "%s failed to prepare hva clk\n",
446 HVA_PREFIX);
447 return -EINVAL;
448 }
449
450 if (clk_set_rate(hva->clk, CLK_RATE)) {
451 dev_err(dev, "%s failed to set clock frequency\n",
452 HVA_PREFIX);
453 return -EINVAL;
454 }
455
456 return 0;
457}
458
459int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
460 struct hva_buffer *task)
461{
462 struct hva_dev *hva = ctx_to_hdev(ctx);
463 struct device *dev = hva_to_dev(hva);
464 u8 client_id = ctx->id;
465 int ret;
466 u32 reg = 0;
467
468 mutex_lock(&hva->protect_mutex);
469
470
471 enable_irq(hva->irq_its);
472 enable_irq(hva->irq_err);
473
474 if (pm_runtime_get_sync(dev) < 0) {
475 dev_err(dev, "%s failed to get pm_runtime\n", ctx->name);
476 ctx->sys_errors++;
477 ret = -EFAULT;
478 goto out;
479 }
480
481 reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
482 switch (cmd) {
483 case H264_ENC:
484 reg |= CLK_GATING_HVC;
485 break;
486 default:
487 dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
488 ctx->encode_errors++;
489 ret = -EFAULT;
490 goto out;
491 }
492 writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
493
494 dev_dbg(dev, "%s %s: write configuration registers\n", ctx->name,
495 __func__);
496
497
498 writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
499
500
501 writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
502 writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
503
504
505
506
507
508
509
510 dev_dbg(dev, "%s %s: send task (cmd: %d, task_desc: %pad)\n",
511 ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
512 writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
513 writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
514
515 if (!wait_for_completion_timeout(&hva->interrupt,
516 msecs_to_jiffies(2000))) {
517 dev_err(dev, "%s %s: time out on completion\n", ctx->name,
518 __func__);
519 ctx->encode_errors++;
520 ret = -EFAULT;
521 goto out;
522 }
523
524
525 ret = ctx->hw_err ? -EFAULT : 0;
526
527 ctx->encode_errors += ctx->hw_err ? 1 : 0;
528
529out:
530 disable_irq(hva->irq_its);
531 disable_irq(hva->irq_err);
532
533 switch (cmd) {
534 case H264_ENC:
535 reg &= ~CLK_GATING_HVC;
536 writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
537 break;
538 default:
539 dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
540 }
541
542 pm_runtime_put_autosuspend(dev);
543 mutex_unlock(&hva->protect_mutex);
544
545 return ret;
546}
547
548#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
549#define DUMP(reg) seq_printf(s, "%-30s: 0x%08X\n",\
550 #reg, readl_relaxed(hva->regs + reg))
551
552void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
553{
554 struct device *dev = hva_to_dev(hva);
555
556 mutex_lock(&hva->protect_mutex);
557
558 if (pm_runtime_get_sync(dev) < 0) {
559 seq_puts(s, "Cannot wake up IP\n");
560 mutex_unlock(&hva->protect_mutex);
561 return;
562 }
563
564 seq_printf(s, "Registers:\nReg @ = 0x%p\n", hva->regs);
565
566 DUMP(HVA_HIF_REG_RST);
567 DUMP(HVA_HIF_REG_RST_ACK);
568 DUMP(HVA_HIF_REG_MIF_CFG);
569 DUMP(HVA_HIF_REG_HEC_MIF_CFG);
570 DUMP(HVA_HIF_REG_CFL);
571 DUMP(HVA_HIF_REG_SFL);
572 DUMP(HVA_HIF_REG_LMI_ERR);
573 DUMP(HVA_HIF_REG_EMI_ERR);
574 DUMP(HVA_HIF_REG_HEC_MIF_ERR);
575 DUMP(HVA_HIF_REG_HEC_STS);
576 DUMP(HVA_HIF_REG_HVC_STS);
577 DUMP(HVA_HIF_REG_HJE_STS);
578 DUMP(HVA_HIF_REG_CNT);
579 DUMP(HVA_HIF_REG_HEC_CHKSYN_DIS);
580 DUMP(HVA_HIF_REG_CLK_GATING);
581 DUMP(HVA_HIF_REG_VERSION);
582
583 pm_runtime_put_autosuspend(dev);
584 mutex_unlock(&hva->protect_mutex);
585}
586#endif
587