1
2
3
4
5
6
7
8#include <linux/clk.h>
9#include <linux/interrupt.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
13#include <linux/seq_file.h>
14#endif
15
16#include "hva.h"
17#include "hva-hw.h"
18
19
20#define HVA_HIF_REG_RST 0x0100U
21#define HVA_HIF_REG_RST_ACK 0x0104U
22#define HVA_HIF_REG_MIF_CFG 0x0108U
23#define HVA_HIF_REG_HEC_MIF_CFG 0x010CU
24#define HVA_HIF_REG_CFL 0x0110U
25#define HVA_HIF_FIFO_CMD 0x0114U
26#define HVA_HIF_FIFO_STS 0x0118U
27#define HVA_HIF_REG_SFL 0x011CU
28#define HVA_HIF_REG_IT_ACK 0x0120U
29#define HVA_HIF_REG_ERR_IT_ACK 0x0124U
30#define HVA_HIF_REG_LMI_ERR 0x0128U
31#define HVA_HIF_REG_EMI_ERR 0x012CU
32#define HVA_HIF_REG_HEC_MIF_ERR 0x0130U
33#define HVA_HIF_REG_HEC_STS 0x0134U
34#define HVA_HIF_REG_HVC_STS 0x0138U
35#define HVA_HIF_REG_HJE_STS 0x013CU
36#define HVA_HIF_REG_CNT 0x0140U
37#define HVA_HIF_REG_HEC_CHKSYN_DIS 0x0144U
38#define HVA_HIF_REG_CLK_GATING 0x0148U
39#define HVA_HIF_REG_VERSION 0x014CU
40#define HVA_HIF_REG_BSM 0x0150U
41
42
43#define VERSION_ID_MASK 0x0000FFFF
44
45
46#define BSM_CFG_VAL1 0x0003F000
47#define BSM_CFG_VAL2 0x003F0000
48
49
50#define MIF_CFG_VAL1 0x04460446
51#define MIF_CFG_VAL2 0x04460806
52#define MIF_CFG_VAL3 0x00000000
53
54
55#define HEC_MIF_CFG_VAL 0x000000C4
56
57
58#define CLK_GATING_HVC BIT(0)
59#define CLK_GATING_HEC BIT(1)
60#define CLK_GATING_HJE BIT(2)
61
62
63#define CLK_RATE 300000000
64
65
66#define AUTOSUSPEND_DELAY_MS 3
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85enum hva_hw_error {
86 NO_ERROR = 0x0,
87 H264_BITSTREAM_OVERSIZE = 0x2,
88 H264_FRAME_SKIPPED = 0x4,
89 H264_SLICE_LIMIT_SIZE = 0x5,
90 H264_MAX_SLICE_NUMBER = 0x7,
91 H264_SLICE_READY = 0x8,
92 TASK_LIST_FULL = 0xF0,
93 UNKNOWN_COMMAND = 0xF1,
94 WRONG_CODEC_OR_RESOLUTION = 0xF4,
95 NO_INT_COMPLETION = 0x100,
96 LMI_ERR = 0x101,
97 EMI_ERR = 0x102,
98 HECMI_ERR = 0x103,
99};
100
101static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
102{
103 struct hva_dev *hva = data;
104
105
106 hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
107 hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
108
109
110 writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
111
112 return IRQ_WAKE_THREAD;
113}
114
115static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
116{
117 struct hva_dev *hva = arg;
118 struct device *dev = hva_to_dev(hva);
119 u32 status = hva->sts_reg & 0xFF;
120 u8 ctx_id = 0;
121 struct hva_ctx *ctx = NULL;
122
123 dev_dbg(dev, "%s %s: status: 0x%02x fifo level: 0x%02x\n",
124 HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
125
126
127
128
129
130 ctx_id = (hva->sts_reg & 0xFF00) >> 8;
131 if (ctx_id >= HVA_MAX_INSTANCES) {
132 dev_err(dev, "%s %s: bad context identifier: %d\n",
133 HVA_PREFIX, __func__, ctx_id);
134 goto out;
135 }
136
137 ctx = hva->instances[ctx_id];
138 if (!ctx)
139 goto out;
140
141 switch (status) {
142 case NO_ERROR:
143 dev_dbg(dev, "%s %s: no error\n",
144 ctx->name, __func__);
145 ctx->hw_err = false;
146 break;
147 case H264_SLICE_READY:
148 dev_dbg(dev, "%s %s: h264 slice ready\n",
149 ctx->name, __func__);
150 ctx->hw_err = false;
151 break;
152 case H264_FRAME_SKIPPED:
153 dev_dbg(dev, "%s %s: h264 frame skipped\n",
154 ctx->name, __func__);
155 ctx->hw_err = false;
156 break;
157 case H264_BITSTREAM_OVERSIZE:
158 dev_err(dev, "%s %s:h264 bitstream oversize\n",
159 ctx->name, __func__);
160 ctx->hw_err = true;
161 break;
162 case H264_SLICE_LIMIT_SIZE:
163 dev_err(dev, "%s %s: h264 slice limit size is reached\n",
164 ctx->name, __func__);
165 ctx->hw_err = true;
166 break;
167 case H264_MAX_SLICE_NUMBER:
168 dev_err(dev, "%s %s: h264 max slice number is reached\n",
169 ctx->name, __func__);
170 ctx->hw_err = true;
171 break;
172 case TASK_LIST_FULL:
173 dev_err(dev, "%s %s:task list full\n",
174 ctx->name, __func__);
175 ctx->hw_err = true;
176 break;
177 case UNKNOWN_COMMAND:
178 dev_err(dev, "%s %s: command not known\n",
179 ctx->name, __func__);
180 ctx->hw_err = true;
181 break;
182 case WRONG_CODEC_OR_RESOLUTION:
183 dev_err(dev, "%s %s: wrong codec or resolution\n",
184 ctx->name, __func__);
185 ctx->hw_err = true;
186 break;
187 default:
188 dev_err(dev, "%s %s: status not recognized\n",
189 ctx->name, __func__);
190 ctx->hw_err = true;
191 break;
192 }
193out:
194 complete(&hva->interrupt);
195
196 return IRQ_HANDLED;
197}
198
199static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
200{
201 struct hva_dev *hva = data;
202
203
204 hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
205 hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
206
207
208 hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
209 hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
210 hva->hec_mif_err_reg = readl_relaxed(hva->regs +
211 HVA_HIF_REG_HEC_MIF_ERR);
212
213
214 writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
215
216 return IRQ_WAKE_THREAD;
217}
218
219static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
220{
221 struct hva_dev *hva = arg;
222 struct device *dev = hva_to_dev(hva);
223 u8 ctx_id = 0;
224 struct hva_ctx *ctx;
225
226 dev_dbg(dev, "%s status: 0x%02x fifo level: 0x%02x\n",
227 HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
228
229
230
231
232
233 ctx_id = (hva->sts_reg & 0xFF00) >> 8;
234 if (ctx_id >= HVA_MAX_INSTANCES) {
235 dev_err(dev, "%s bad context identifier: %d\n", HVA_PREFIX,
236 ctx_id);
237 goto out;
238 }
239
240 ctx = hva->instances[ctx_id];
241 if (!ctx)
242 goto out;
243
244 if (hva->lmi_err_reg) {
245 dev_err(dev, "%s local memory interface error: 0x%08x\n",
246 ctx->name, hva->lmi_err_reg);
247 ctx->hw_err = true;
248 }
249
250 if (hva->emi_err_reg) {
251 dev_err(dev, "%s external memory interface error: 0x%08x\n",
252 ctx->name, hva->emi_err_reg);
253 ctx->hw_err = true;
254 }
255
256 if (hva->hec_mif_err_reg) {
257 dev_err(dev, "%s hec memory interface error: 0x%08x\n",
258 ctx->name, hva->hec_mif_err_reg);
259 ctx->hw_err = true;
260 }
261out:
262 complete(&hva->interrupt);
263
264 return IRQ_HANDLED;
265}
266
267static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
268{
269 struct device *dev = hva_to_dev(hva);
270 unsigned long int version;
271
272 if (pm_runtime_resume_and_get(dev) < 0) {
273 dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
274 mutex_unlock(&hva->protect_mutex);
275 return -EFAULT;
276 }
277
278 version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
279 VERSION_ID_MASK;
280
281 pm_runtime_put_autosuspend(dev);
282
283 switch (version) {
284 case HVA_VERSION_V400:
285 dev_dbg(dev, "%s IP hardware version 0x%lx\n",
286 HVA_PREFIX, version);
287 break;
288 default:
289 dev_err(dev, "%s unknown IP hardware version 0x%lx\n",
290 HVA_PREFIX, version);
291 version = HVA_VERSION_UNKNOWN;
292 break;
293 }
294
295 return version;
296}
297
298int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
299{
300 struct device *dev = &pdev->dev;
301 struct resource *regs;
302 struct resource *esram;
303 int ret;
304
305 WARN_ON(!hva);
306
307
308 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
309 hva->regs = devm_ioremap_resource(dev, regs);
310 if (IS_ERR(hva->regs)) {
311 dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
312 return PTR_ERR(hva->regs);
313 }
314
315
316 esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
317 if (!esram) {
318 dev_err(dev, "%s failed to get esram\n", HVA_PREFIX);
319 return -ENODEV;
320 }
321 hva->esram_addr = esram->start;
322 hva->esram_size = resource_size(esram);
323
324 dev_info(dev, "%s esram reserved for address: 0x%x size:%d\n",
325 HVA_PREFIX, hva->esram_addr, hva->esram_size);
326
327
328 hva->clk = devm_clk_get(dev, "clk_hva");
329 if (IS_ERR(hva->clk)) {
330 dev_err(dev, "%s failed to get clock\n", HVA_PREFIX);
331 return PTR_ERR(hva->clk);
332 }
333
334 ret = clk_prepare(hva->clk);
335 if (ret < 0) {
336 dev_err(dev, "%s failed to prepare clock\n", HVA_PREFIX);
337 hva->clk = ERR_PTR(-EINVAL);
338 return ret;
339 }
340
341
342 ret = platform_get_irq(pdev, 0);
343 if (ret < 0)
344 goto err_clk;
345 hva->irq_its = ret;
346
347 ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
348 hva_hw_its_irq_thread,
349 IRQF_ONESHOT,
350 "hva_its_irq", hva);
351 if (ret) {
352 dev_err(dev, "%s failed to install status IRQ 0x%x\n",
353 HVA_PREFIX, hva->irq_its);
354 goto err_clk;
355 }
356 disable_irq(hva->irq_its);
357
358
359 ret = platform_get_irq(pdev, 1);
360 if (ret < 0)
361 goto err_clk;
362 hva->irq_err = ret;
363
364 ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
365 hva_hw_err_irq_thread,
366 IRQF_ONESHOT,
367 "hva_err_irq", hva);
368 if (ret) {
369 dev_err(dev, "%s failed to install error IRQ 0x%x\n",
370 HVA_PREFIX, hva->irq_err);
371 goto err_clk;
372 }
373 disable_irq(hva->irq_err);
374
375
376 mutex_init(&hva->protect_mutex);
377
378
379 init_completion(&hva->interrupt);
380
381
382 pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
383 pm_runtime_use_autosuspend(dev);
384 pm_runtime_set_suspended(dev);
385 pm_runtime_enable(dev);
386
387 ret = pm_runtime_resume_and_get(dev);
388 if (ret < 0) {
389 dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
390 goto err_clk;
391 }
392
393
394 hva->ip_version = hva_hw_get_ip_version(hva);
395
396 if (hva->ip_version == HVA_VERSION_UNKNOWN) {
397 ret = -EINVAL;
398 goto err_pm;
399 }
400
401 dev_info(dev, "%s found hva device (version 0x%lx)\n", HVA_PREFIX,
402 hva->ip_version);
403
404 return 0;
405
406err_pm:
407 pm_runtime_put(dev);
408err_clk:
409 if (hva->clk)
410 clk_unprepare(hva->clk);
411
412 return ret;
413}
414
415void hva_hw_remove(struct hva_dev *hva)
416{
417 struct device *dev = hva_to_dev(hva);
418
419 disable_irq(hva->irq_its);
420 disable_irq(hva->irq_err);
421
422 pm_runtime_put_autosuspend(dev);
423 pm_runtime_disable(dev);
424}
425
426int hva_hw_runtime_suspend(struct device *dev)
427{
428 struct hva_dev *hva = dev_get_drvdata(dev);
429
430 clk_disable_unprepare(hva->clk);
431
432 return 0;
433}
434
435int hva_hw_runtime_resume(struct device *dev)
436{
437 struct hva_dev *hva = dev_get_drvdata(dev);
438
439 if (clk_prepare_enable(hva->clk)) {
440 dev_err(hva->dev, "%s failed to prepare hva clk\n",
441 HVA_PREFIX);
442 return -EINVAL;
443 }
444
445 if (clk_set_rate(hva->clk, CLK_RATE)) {
446 dev_err(dev, "%s failed to set clock frequency\n",
447 HVA_PREFIX);
448 clk_disable_unprepare(hva->clk);
449 return -EINVAL;
450 }
451
452 return 0;
453}
454
455int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
456 struct hva_buffer *task)
457{
458 struct hva_dev *hva = ctx_to_hdev(ctx);
459 struct device *dev = hva_to_dev(hva);
460 u8 client_id = ctx->id;
461 int ret;
462 u32 reg = 0;
463 bool got_pm = false;
464
465 mutex_lock(&hva->protect_mutex);
466
467
468 enable_irq(hva->irq_its);
469 enable_irq(hva->irq_err);
470
471 if (pm_runtime_resume_and_get(dev) < 0) {
472 dev_err(dev, "%s failed to get pm_runtime\n", ctx->name);
473 ctx->sys_errors++;
474 ret = -EFAULT;
475 goto out;
476 }
477 got_pm = true;
478
479 reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
480 switch (cmd) {
481 case H264_ENC:
482 reg |= CLK_GATING_HVC;
483 break;
484 default:
485 dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
486 ctx->encode_errors++;
487 ret = -EFAULT;
488 goto out;
489 }
490 writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
491
492 dev_dbg(dev, "%s %s: write configuration registers\n", ctx->name,
493 __func__);
494
495
496 writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
497
498
499 writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
500 writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
501
502
503
504
505
506
507
508 dev_dbg(dev, "%s %s: send task (cmd: %d, task_desc: %pad)\n",
509 ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
510 writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
511 writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
512
513 if (!wait_for_completion_timeout(&hva->interrupt,
514 msecs_to_jiffies(2000))) {
515 dev_err(dev, "%s %s: time out on completion\n", ctx->name,
516 __func__);
517 ctx->encode_errors++;
518 ret = -EFAULT;
519 goto out;
520 }
521
522
523 ret = ctx->hw_err ? -EFAULT : 0;
524
525 ctx->encode_errors += ctx->hw_err ? 1 : 0;
526
527out:
528 disable_irq(hva->irq_its);
529 disable_irq(hva->irq_err);
530
531 switch (cmd) {
532 case H264_ENC:
533 reg &= ~CLK_GATING_HVC;
534 writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
535 break;
536 default:
537 dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
538 }
539
540 if (got_pm)
541 pm_runtime_put_autosuspend(dev);
542 mutex_unlock(&hva->protect_mutex);
543
544 return ret;
545}
546
547#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
548#define DUMP(reg) seq_printf(s, "%-30s: 0x%08X\n",\
549 #reg, readl_relaxed(hva->regs + reg))
550
551void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
552{
553 struct device *dev = hva_to_dev(hva);
554
555 mutex_lock(&hva->protect_mutex);
556
557 if (pm_runtime_resume_and_get(dev) < 0) {
558 seq_puts(s, "Cannot wake up IP\n");
559 mutex_unlock(&hva->protect_mutex);
560 return;
561 }
562
563 seq_printf(s, "Registers:\nReg @ = 0x%p\n", hva->regs);
564
565 DUMP(HVA_HIF_REG_RST);
566 DUMP(HVA_HIF_REG_RST_ACK);
567 DUMP(HVA_HIF_REG_MIF_CFG);
568 DUMP(HVA_HIF_REG_HEC_MIF_CFG);
569 DUMP(HVA_HIF_REG_CFL);
570 DUMP(HVA_HIF_REG_SFL);
571 DUMP(HVA_HIF_REG_LMI_ERR);
572 DUMP(HVA_HIF_REG_EMI_ERR);
573 DUMP(HVA_HIF_REG_HEC_MIF_ERR);
574 DUMP(HVA_HIF_REG_HEC_STS);
575 DUMP(HVA_HIF_REG_HVC_STS);
576 DUMP(HVA_HIF_REG_HJE_STS);
577 DUMP(HVA_HIF_REG_CNT);
578 DUMP(HVA_HIF_REG_HEC_CHKSYN_DIS);
579 DUMP(HVA_HIF_REG_CLK_GATING);
580 DUMP(HVA_HIF_REG_VERSION);
581
582 pm_runtime_put_autosuspend(dev);
583 mutex_unlock(&hva->protect_mutex);
584}
585#endif
586