1
2
3
4
5
6
7
8#include <linux/clk.h>
9#include <linux/interrupt.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
13#include <linux/seq_file.h>
14#endif
15
16#include "hva.h"
17#include "hva-hw.h"
18
19
20#define HVA_HIF_REG_RST 0x0100U
21#define HVA_HIF_REG_RST_ACK 0x0104U
22#define HVA_HIF_REG_MIF_CFG 0x0108U
23#define HVA_HIF_REG_HEC_MIF_CFG 0x010CU
24#define HVA_HIF_REG_CFL 0x0110U
25#define HVA_HIF_FIFO_CMD 0x0114U
26#define HVA_HIF_FIFO_STS 0x0118U
27#define HVA_HIF_REG_SFL 0x011CU
28#define HVA_HIF_REG_IT_ACK 0x0120U
29#define HVA_HIF_REG_ERR_IT_ACK 0x0124U
30#define HVA_HIF_REG_LMI_ERR 0x0128U
31#define HVA_HIF_REG_EMI_ERR 0x012CU
32#define HVA_HIF_REG_HEC_MIF_ERR 0x0130U
33#define HVA_HIF_REG_HEC_STS 0x0134U
34#define HVA_HIF_REG_HVC_STS 0x0138U
35#define HVA_HIF_REG_HJE_STS 0x013CU
36#define HVA_HIF_REG_CNT 0x0140U
37#define HVA_HIF_REG_HEC_CHKSYN_DIS 0x0144U
38#define HVA_HIF_REG_CLK_GATING 0x0148U
39#define HVA_HIF_REG_VERSION 0x014CU
40#define HVA_HIF_REG_BSM 0x0150U
41
42
43#define VERSION_ID_MASK 0x0000FFFF
44
45
46#define BSM_CFG_VAL1 0x0003F000
47#define BSM_CFG_VAL2 0x003F0000
48
49
50#define MIF_CFG_VAL1 0x04460446
51#define MIF_CFG_VAL2 0x04460806
52#define MIF_CFG_VAL3 0x00000000
53
54
55#define HEC_MIF_CFG_VAL 0x000000C4
56
57
58#define CLK_GATING_HVC BIT(0)
59#define CLK_GATING_HEC BIT(1)
60#define CLK_GATING_HJE BIT(2)
61
62
63#define CLK_RATE 300000000
64
65
66#define AUTOSUSPEND_DELAY_MS 3
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85enum hva_hw_error {
86 NO_ERROR = 0x0,
87 H264_BITSTREAM_OVERSIZE = 0x2,
88 H264_FRAME_SKIPPED = 0x4,
89 H264_SLICE_LIMIT_SIZE = 0x5,
90 H264_MAX_SLICE_NUMBER = 0x7,
91 H264_SLICE_READY = 0x8,
92 TASK_LIST_FULL = 0xF0,
93 UNKNOWN_COMMAND = 0xF1,
94 WRONG_CODEC_OR_RESOLUTION = 0xF4,
95 NO_INT_COMPLETION = 0x100,
96 LMI_ERR = 0x101,
97 EMI_ERR = 0x102,
98 HECMI_ERR = 0x103,
99};
100
101static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
102{
103 struct hva_dev *hva = data;
104
105
106 hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
107 hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
108
109
110 writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
111
112 return IRQ_WAKE_THREAD;
113}
114
115static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
116{
117 struct hva_dev *hva = arg;
118 struct device *dev = hva_to_dev(hva);
119 u32 status = hva->sts_reg & 0xFF;
120 u8 ctx_id = 0;
121 struct hva_ctx *ctx = NULL;
122
123 dev_dbg(dev, "%s %s: status: 0x%02x fifo level: 0x%02x\n",
124 HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
125
126
127
128
129
130 ctx_id = (hva->sts_reg & 0xFF00) >> 8;
131 if (ctx_id >= HVA_MAX_INSTANCES) {
132 dev_err(dev, "%s %s: bad context identifier: %d\n",
133 ctx->name, __func__, ctx_id);
134 ctx->hw_err = true;
135 goto out;
136 }
137
138 ctx = hva->instances[ctx_id];
139 if (!ctx)
140 goto out;
141
142 switch (status) {
143 case NO_ERROR:
144 dev_dbg(dev, "%s %s: no error\n",
145 ctx->name, __func__);
146 ctx->hw_err = false;
147 break;
148 case H264_SLICE_READY:
149 dev_dbg(dev, "%s %s: h264 slice ready\n",
150 ctx->name, __func__);
151 ctx->hw_err = false;
152 break;
153 case H264_FRAME_SKIPPED:
154 dev_dbg(dev, "%s %s: h264 frame skipped\n",
155 ctx->name, __func__);
156 ctx->hw_err = false;
157 break;
158 case H264_BITSTREAM_OVERSIZE:
159 dev_err(dev, "%s %s:h264 bitstream oversize\n",
160 ctx->name, __func__);
161 ctx->hw_err = true;
162 break;
163 case H264_SLICE_LIMIT_SIZE:
164 dev_err(dev, "%s %s: h264 slice limit size is reached\n",
165 ctx->name, __func__);
166 ctx->hw_err = true;
167 break;
168 case H264_MAX_SLICE_NUMBER:
169 dev_err(dev, "%s %s: h264 max slice number is reached\n",
170 ctx->name, __func__);
171 ctx->hw_err = true;
172 break;
173 case TASK_LIST_FULL:
174 dev_err(dev, "%s %s:task list full\n",
175 ctx->name, __func__);
176 ctx->hw_err = true;
177 break;
178 case UNKNOWN_COMMAND:
179 dev_err(dev, "%s %s: command not known\n",
180 ctx->name, __func__);
181 ctx->hw_err = true;
182 break;
183 case WRONG_CODEC_OR_RESOLUTION:
184 dev_err(dev, "%s %s: wrong codec or resolution\n",
185 ctx->name, __func__);
186 ctx->hw_err = true;
187 break;
188 default:
189 dev_err(dev, "%s %s: status not recognized\n",
190 ctx->name, __func__);
191 ctx->hw_err = true;
192 break;
193 }
194out:
195 complete(&hva->interrupt);
196
197 return IRQ_HANDLED;
198}
199
200static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
201{
202 struct hva_dev *hva = data;
203
204
205 hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
206 hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
207
208
209 hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
210 hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
211 hva->hec_mif_err_reg = readl_relaxed(hva->regs +
212 HVA_HIF_REG_HEC_MIF_ERR);
213
214
215 writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
216
217 return IRQ_WAKE_THREAD;
218}
219
220static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
221{
222 struct hva_dev *hva = arg;
223 struct device *dev = hva_to_dev(hva);
224 u8 ctx_id = 0;
225 struct hva_ctx *ctx;
226
227 dev_dbg(dev, "%s status: 0x%02x fifo level: 0x%02x\n",
228 HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
229
230
231
232
233
234 ctx_id = (hva->sts_reg & 0xFF00) >> 8;
235 if (ctx_id >= HVA_MAX_INSTANCES) {
236 dev_err(dev, "%s bad context identifier: %d\n", HVA_PREFIX,
237 ctx_id);
238 goto out;
239 }
240
241 ctx = hva->instances[ctx_id];
242 if (!ctx)
243 goto out;
244
245 if (hva->lmi_err_reg) {
246 dev_err(dev, "%s local memory interface error: 0x%08x\n",
247 ctx->name, hva->lmi_err_reg);
248 ctx->hw_err = true;
249 }
250
251 if (hva->emi_err_reg) {
252 dev_err(dev, "%s external memory interface error: 0x%08x\n",
253 ctx->name, hva->emi_err_reg);
254 ctx->hw_err = true;
255 }
256
257 if (hva->hec_mif_err_reg) {
258 dev_err(dev, "%s hec memory interface error: 0x%08x\n",
259 ctx->name, hva->hec_mif_err_reg);
260 ctx->hw_err = true;
261 }
262out:
263 complete(&hva->interrupt);
264
265 return IRQ_HANDLED;
266}
267
268static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
269{
270 struct device *dev = hva_to_dev(hva);
271 unsigned long int version;
272
273 if (pm_runtime_get_sync(dev) < 0) {
274 dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
275 pm_runtime_put_noidle(dev);
276 mutex_unlock(&hva->protect_mutex);
277 return -EFAULT;
278 }
279
280 version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
281 VERSION_ID_MASK;
282
283 pm_runtime_put_autosuspend(dev);
284
285 switch (version) {
286 case HVA_VERSION_V400:
287 dev_dbg(dev, "%s IP hardware version 0x%lx\n",
288 HVA_PREFIX, version);
289 break;
290 default:
291 dev_err(dev, "%s unknown IP hardware version 0x%lx\n",
292 HVA_PREFIX, version);
293 version = HVA_VERSION_UNKNOWN;
294 break;
295 }
296
297 return version;
298}
299
300int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
301{
302 struct device *dev = &pdev->dev;
303 struct resource *regs;
304 struct resource *esram;
305 int ret;
306
307 WARN_ON(!hva);
308
309
310 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
311 hva->regs = devm_ioremap_resource(dev, regs);
312 if (IS_ERR(hva->regs)) {
313 dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
314 return PTR_ERR(hva->regs);
315 }
316
317
318 esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
319 if (!esram) {
320 dev_err(dev, "%s failed to get esram\n", HVA_PREFIX);
321 return -ENODEV;
322 }
323 hva->esram_addr = esram->start;
324 hva->esram_size = resource_size(esram);
325
326 dev_info(dev, "%s esram reserved for address: 0x%x size:%d\n",
327 HVA_PREFIX, hva->esram_addr, hva->esram_size);
328
329
330 hva->clk = devm_clk_get(dev, "clk_hva");
331 if (IS_ERR(hva->clk)) {
332 dev_err(dev, "%s failed to get clock\n", HVA_PREFIX);
333 return PTR_ERR(hva->clk);
334 }
335
336 ret = clk_prepare(hva->clk);
337 if (ret < 0) {
338 dev_err(dev, "%s failed to prepare clock\n", HVA_PREFIX);
339 hva->clk = ERR_PTR(-EINVAL);
340 return ret;
341 }
342
343
344 ret = platform_get_irq(pdev, 0);
345 if (ret < 0)
346 goto err_clk;
347 hva->irq_its = ret;
348
349 ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
350 hva_hw_its_irq_thread,
351 IRQF_ONESHOT,
352 "hva_its_irq", hva);
353 if (ret) {
354 dev_err(dev, "%s failed to install status IRQ 0x%x\n",
355 HVA_PREFIX, hva->irq_its);
356 goto err_clk;
357 }
358 disable_irq(hva->irq_its);
359
360
361 ret = platform_get_irq(pdev, 1);
362 if (ret < 0)
363 goto err_clk;
364 hva->irq_err = ret;
365
366 ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
367 hva_hw_err_irq_thread,
368 IRQF_ONESHOT,
369 "hva_err_irq", hva);
370 if (ret) {
371 dev_err(dev, "%s failed to install error IRQ 0x%x\n",
372 HVA_PREFIX, hva->irq_err);
373 goto err_clk;
374 }
375 disable_irq(hva->irq_err);
376
377
378 mutex_init(&hva->protect_mutex);
379
380
381 init_completion(&hva->interrupt);
382
383
384 pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
385 pm_runtime_use_autosuspend(dev);
386 pm_runtime_set_suspended(dev);
387 pm_runtime_enable(dev);
388
389 ret = pm_runtime_get_sync(dev);
390 if (ret < 0) {
391 dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
392 goto err_pm;
393 }
394
395
396 hva->ip_version = hva_hw_get_ip_version(hva);
397
398 if (hva->ip_version == HVA_VERSION_UNKNOWN) {
399 ret = -EINVAL;
400 goto err_pm;
401 }
402
403 dev_info(dev, "%s found hva device (version 0x%lx)\n", HVA_PREFIX,
404 hva->ip_version);
405
406 return 0;
407
408err_pm:
409 pm_runtime_put(dev);
410err_clk:
411 if (hva->clk)
412 clk_unprepare(hva->clk);
413
414 return ret;
415}
416
417void hva_hw_remove(struct hva_dev *hva)
418{
419 struct device *dev = hva_to_dev(hva);
420
421 disable_irq(hva->irq_its);
422 disable_irq(hva->irq_err);
423
424 pm_runtime_put_autosuspend(dev);
425 pm_runtime_disable(dev);
426}
427
428int hva_hw_runtime_suspend(struct device *dev)
429{
430 struct hva_dev *hva = dev_get_drvdata(dev);
431
432 clk_disable_unprepare(hva->clk);
433
434 return 0;
435}
436
437int hva_hw_runtime_resume(struct device *dev)
438{
439 struct hva_dev *hva = dev_get_drvdata(dev);
440
441 if (clk_prepare_enable(hva->clk)) {
442 dev_err(hva->dev, "%s failed to prepare hva clk\n",
443 HVA_PREFIX);
444 return -EINVAL;
445 }
446
447 if (clk_set_rate(hva->clk, CLK_RATE)) {
448 dev_err(dev, "%s failed to set clock frequency\n",
449 HVA_PREFIX);
450 clk_disable_unprepare(hva->clk);
451 return -EINVAL;
452 }
453
454 return 0;
455}
456
457int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
458 struct hva_buffer *task)
459{
460 struct hva_dev *hva = ctx_to_hdev(ctx);
461 struct device *dev = hva_to_dev(hva);
462 u8 client_id = ctx->id;
463 int ret;
464 u32 reg = 0;
465
466 mutex_lock(&hva->protect_mutex);
467
468
469 enable_irq(hva->irq_its);
470 enable_irq(hva->irq_err);
471
472 if (pm_runtime_get_sync(dev) < 0) {
473 dev_err(dev, "%s failed to get pm_runtime\n", ctx->name);
474 ctx->sys_errors++;
475 ret = -EFAULT;
476 goto out;
477 }
478
479 reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
480 switch (cmd) {
481 case H264_ENC:
482 reg |= CLK_GATING_HVC;
483 break;
484 default:
485 dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
486 ctx->encode_errors++;
487 ret = -EFAULT;
488 goto out;
489 }
490 writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
491
492 dev_dbg(dev, "%s %s: write configuration registers\n", ctx->name,
493 __func__);
494
495
496 writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
497
498
499 writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
500 writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
501
502
503
504
505
506
507
508 dev_dbg(dev, "%s %s: send task (cmd: %d, task_desc: %pad)\n",
509 ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
510 writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
511 writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
512
513 if (!wait_for_completion_timeout(&hva->interrupt,
514 msecs_to_jiffies(2000))) {
515 dev_err(dev, "%s %s: time out on completion\n", ctx->name,
516 __func__);
517 ctx->encode_errors++;
518 ret = -EFAULT;
519 goto out;
520 }
521
522
523 ret = ctx->hw_err ? -EFAULT : 0;
524
525 ctx->encode_errors += ctx->hw_err ? 1 : 0;
526
527out:
528 disable_irq(hva->irq_its);
529 disable_irq(hva->irq_err);
530
531 switch (cmd) {
532 case H264_ENC:
533 reg &= ~CLK_GATING_HVC;
534 writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
535 break;
536 default:
537 dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
538 }
539
540 pm_runtime_put_autosuspend(dev);
541 mutex_unlock(&hva->protect_mutex);
542
543 return ret;
544}
545
546#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
547#define DUMP(reg) seq_printf(s, "%-30s: 0x%08X\n",\
548 #reg, readl_relaxed(hva->regs + reg))
549
550void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
551{
552 struct device *dev = hva_to_dev(hva);
553
554 mutex_lock(&hva->protect_mutex);
555
556 if (pm_runtime_get_sync(dev) < 0) {
557 seq_puts(s, "Cannot wake up IP\n");
558 pm_runtime_put_noidle(dev);
559 mutex_unlock(&hva->protect_mutex);
560 return;
561 }
562
563 seq_printf(s, "Registers:\nReg @ = 0x%p\n", hva->regs);
564
565 DUMP(HVA_HIF_REG_RST);
566 DUMP(HVA_HIF_REG_RST_ACK);
567 DUMP(HVA_HIF_REG_MIF_CFG);
568 DUMP(HVA_HIF_REG_HEC_MIF_CFG);
569 DUMP(HVA_HIF_REG_CFL);
570 DUMP(HVA_HIF_REG_SFL);
571 DUMP(HVA_HIF_REG_LMI_ERR);
572 DUMP(HVA_HIF_REG_EMI_ERR);
573 DUMP(HVA_HIF_REG_HEC_MIF_ERR);
574 DUMP(HVA_HIF_REG_HEC_STS);
575 DUMP(HVA_HIF_REG_HVC_STS);
576 DUMP(HVA_HIF_REG_HJE_STS);
577 DUMP(HVA_HIF_REG_CNT);
578 DUMP(HVA_HIF_REG_HEC_CHKSYN_DIS);
579 DUMP(HVA_HIF_REG_CLK_GATING);
580 DUMP(HVA_HIF_REG_VERSION);
581
582 pm_runtime_put_autosuspend(dev);
583 mutex_unlock(&hva->protect_mutex);
584}
585#endif
586