1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "mixer.h"
15#include "regs-mixer.h"
16#include "regs-vp.h"
17
18#include <linux/delay.h>
19
20
21
22static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
23{
24 return readl(mdev->res.vp_regs + reg_id);
25}
26
27static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
28{
29 writel(val, mdev->res.vp_regs + reg_id);
30}
31
32static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
33 u32 val, u32 mask)
34{
35 u32 old = vp_read(mdev, reg_id);
36
37 val = (val & mask) | (old & ~mask);
38 writel(val, mdev->res.vp_regs + reg_id);
39}
40
41static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
42{
43 return readl(mdev->res.mxr_regs + reg_id);
44}
45
46static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
47{
48 writel(val, mdev->res.mxr_regs + reg_id);
49}
50
51static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
52 u32 val, u32 mask)
53{
54 u32 old = mxr_read(mdev, reg_id);
55
56 val = (val & mask) | (old & ~mask);
57 writel(val, mdev->res.mxr_regs + reg_id);
58}
59
60void mxr_vsync_set_update(struct mxr_device *mdev, int en)
61{
62
63 mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
64 MXR_STATUS_SYNC_ENABLE);
65 vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
66}
67
68static void __mxr_reg_vp_reset(struct mxr_device *mdev)
69{
70 int tries = 100;
71
72 vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
73 for (tries = 100; tries; --tries) {
74
75 if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
76 break;
77 mdelay(10);
78 }
79 WARN(tries == 0, "failed to reset Video Processor\n");
80}
81
82static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
83
84void mxr_reg_reset(struct mxr_device *mdev)
85{
86 unsigned long flags;
87 u32 val;
88
89 spin_lock_irqsave(&mdev->reg_slock, flags);
90 mxr_vsync_set_update(mdev, MXR_DISABLE);
91
92
93 mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888);
94
95
96 mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
97 MXR_STATUS_BURST_MASK);
98
99
100
101
102
103
104
105 val = MXR_LAYER_CFG_GRP0_VAL(1);
106 val |= MXR_LAYER_CFG_VP_VAL(2);
107 val |= MXR_LAYER_CFG_GRP1_VAL(3);
108 mxr_write(mdev, MXR_LAYER_CFG, val);
109
110
111 mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
112 mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
113 mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
114
115
116
117 val = MXR_GRP_CFG_COLOR_KEY_DISABLE;
118 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
119 val |= MXR_GRP_CFG_ALPHA_VAL(0xff);
120
121
122 mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
123 mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
124
125
126 __mxr_reg_vp_reset(mdev);
127 mxr_reg_vp_default_filter(mdev);
128
129
130 mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
131
132 mxr_vsync_set_update(mdev, MXR_ENABLE);
133 spin_unlock_irqrestore(&mdev->reg_slock, flags);
134}
135
136void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
137 const struct mxr_format *fmt, const struct mxr_geometry *geo)
138{
139 u32 val;
140 unsigned long flags;
141
142 spin_lock_irqsave(&mdev->reg_slock, flags);
143 mxr_vsync_set_update(mdev, MXR_DISABLE);
144
145
146 mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
147 MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
148
149
150 mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
151 val = MXR_GRP_WH_WIDTH(geo->src.width);
152 val |= MXR_GRP_WH_HEIGHT(geo->src.height);
153 val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
154 val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
155 mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
156
157
158 val = MXR_GRP_SXY_SX(geo->src.x_offset);
159 val |= MXR_GRP_SXY_SY(geo->src.y_offset);
160 mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
161
162
163 val = MXR_GRP_DXY_DX(geo->dst.x_offset);
164 val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
165 mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
166
167 mxr_vsync_set_update(mdev, MXR_ENABLE);
168 spin_unlock_irqrestore(&mdev->reg_slock, flags);
169}
170
171void mxr_reg_vp_format(struct mxr_device *mdev,
172 const struct mxr_format *fmt, const struct mxr_geometry *geo)
173{
174 unsigned long flags;
175
176 spin_lock_irqsave(&mdev->reg_slock, flags);
177 mxr_vsync_set_update(mdev, MXR_DISABLE);
178
179 vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
180
181
182 vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
183 VP_IMG_VSIZE(geo->src.full_height));
184
185 vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
186 VP_IMG_VSIZE(geo->src.full_height / 2));
187
188 vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
189 vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
190 vp_write(mdev, VP_SRC_H_POSITION,
191 VP_SRC_H_POSITION_VAL(geo->src.x_offset));
192 vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
193
194 vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
195 vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
196 if (geo->dst.field == V4L2_FIELD_INTERLACED) {
197 vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
198 vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
199 } else {
200 vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
201 vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
202 }
203
204 vp_write(mdev, VP_H_RATIO, geo->x_ratio);
205 vp_write(mdev, VP_V_RATIO, geo->y_ratio);
206
207 vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
208
209 mxr_vsync_set_update(mdev, MXR_ENABLE);
210 spin_unlock_irqrestore(&mdev->reg_slock, flags);
211
212}
213
214void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
215{
216 u32 val = addr ? ~0 : 0;
217 unsigned long flags;
218
219 spin_lock_irqsave(&mdev->reg_slock, flags);
220 mxr_vsync_set_update(mdev, MXR_DISABLE);
221
222 if (idx == 0)
223 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
224 else
225 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
226 mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
227
228 mxr_vsync_set_update(mdev, MXR_ENABLE);
229 spin_unlock_irqrestore(&mdev->reg_slock, flags);
230}
231
232void mxr_reg_vp_buffer(struct mxr_device *mdev,
233 dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
234{
235 u32 val = luma_addr[0] ? ~0 : 0;
236 unsigned long flags;
237
238 spin_lock_irqsave(&mdev->reg_slock, flags);
239 mxr_vsync_set_update(mdev, MXR_DISABLE);
240
241 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
242 vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
243
244 vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
245 vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
246 vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
247 vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
248
249 mxr_vsync_set_update(mdev, MXR_ENABLE);
250 spin_unlock_irqrestore(&mdev->reg_slock, flags);
251}
252
253static void mxr_irq_layer_handle(struct mxr_layer *layer)
254{
255 struct list_head *head = &layer->enq_list;
256 struct mxr_buffer *done;
257
258
259 if (layer == NULL)
260 return;
261
262 spin_lock(&layer->enq_slock);
263 if (layer->state == MXR_LAYER_IDLE)
264 goto done;
265
266 done = layer->shadow_buf;
267 layer->shadow_buf = layer->update_buf;
268
269 if (list_empty(head)) {
270 if (layer->state != MXR_LAYER_STREAMING)
271 layer->update_buf = NULL;
272 } else {
273 struct mxr_buffer *next;
274 next = list_first_entry(head, struct mxr_buffer, list);
275 list_del(&next->list);
276 layer->update_buf = next;
277 }
278
279 layer->ops.buffer_set(layer, layer->update_buf);
280
281 if (done && done != layer->shadow_buf)
282 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
283
284done:
285 spin_unlock(&layer->enq_slock);
286}
287
288irqreturn_t mxr_irq_handler(int irq, void *dev_data)
289{
290 struct mxr_device *mdev = dev_data;
291 u32 i, val;
292
293 spin_lock(&mdev->reg_slock);
294 val = mxr_read(mdev, MXR_INT_STATUS);
295
296
297 if (val & MXR_INT_STATUS_VSYNC) {
298 set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
299
300 if (~mxr_read(mdev, MXR_CFG) & MXR_CFG_SCAN_PROGRASSIVE)
301 change_bit(MXR_EVENT_TOP, &mdev->event_flags);
302 wake_up(&mdev->event_queue);
303
304 val &= ~MXR_INT_STATUS_VSYNC;
305 val |= MXR_INT_CLEAR_VSYNC;
306 }
307
308
309 mxr_write(mdev, MXR_INT_STATUS, val);
310
311 spin_unlock(&mdev->reg_slock);
312
313 if (~val & MXR_INT_CLEAR_VSYNC)
314 return IRQ_HANDLED;
315
316 if (!test_bit(MXR_EVENT_TOP, &mdev->event_flags))
317 return IRQ_HANDLED;
318 for (i = 0; i < MXR_MAX_LAYERS; ++i)
319 mxr_irq_layer_handle(mdev->layer[i]);
320 return IRQ_HANDLED;
321}
322
323void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
324{
325 u32 val;
326
327 val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
328 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
329}
330
331void mxr_reg_streamon(struct mxr_device *mdev)
332{
333 unsigned long flags;
334
335 spin_lock_irqsave(&mdev->reg_slock, flags);
336
337
338
339 mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
340 set_bit(MXR_EVENT_TOP, &mdev->event_flags);
341
342 spin_unlock_irqrestore(&mdev->reg_slock, flags);
343}
344
345void mxr_reg_streamoff(struct mxr_device *mdev)
346{
347 unsigned long flags;
348
349 spin_lock_irqsave(&mdev->reg_slock, flags);
350
351
352
353 mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
354
355 spin_unlock_irqrestore(&mdev->reg_slock, flags);
356}
357
358int mxr_reg_wait4vsync(struct mxr_device *mdev)
359{
360 int ret;
361
362 clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
363
364 ret = wait_event_timeout(mdev->event_queue,
365 test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
366 msecs_to_jiffies(1000));
367 if (ret > 0)
368 return 0;
369 if (ret < 0)
370 return ret;
371 mxr_warn(mdev, "no vsync detected - timeout\n");
372 return -ETIME;
373}
374
375void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
376 struct v4l2_mbus_framefmt *fmt)
377{
378 u32 val = 0;
379 unsigned long flags;
380
381 spin_lock_irqsave(&mdev->reg_slock, flags);
382 mxr_vsync_set_update(mdev, MXR_DISABLE);
383
384
385 if (fmt->colorspace == V4L2_COLORSPACE_JPEG)
386 val |= MXR_CFG_OUT_YUV444;
387 else
388 val |= MXR_CFG_OUT_RGB888;
389
390
391 if (fmt->field == V4L2_FIELD_INTERLACED)
392 val |= MXR_CFG_SCAN_INTERLACE;
393 else
394 val |= MXR_CFG_SCAN_PROGRASSIVE;
395
396
397 if (fmt->height == 480)
398 val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
399 else if (fmt->height == 576)
400 val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
401 else if (fmt->height == 720)
402 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
403 else if (fmt->height == 1080)
404 val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
405 else
406 WARN(1, "unrecognized mbus height %u!\n", fmt->height);
407
408 mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK |
409 MXR_CFG_OUT_MASK);
410
411 val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
412 vp_write_mask(mdev, VP_MODE, val,
413 VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
414
415 mxr_vsync_set_update(mdev, MXR_ENABLE);
416 spin_unlock_irqrestore(&mdev->reg_slock, flags);
417}
418
419void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
420{
421
422}
423
424void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
425{
426
427}
428
429static const u8 filter_y_horiz_tap8[] = {
430 0, -1, -1, -1, -1, -1, -1, -1,
431 -1, -1, -1, -1, -1, 0, 0, 0,
432 0, 2, 4, 5, 6, 6, 6, 6,
433 6, 5, 5, 4, 3, 2, 1, 1,
434 0, -6, -12, -16, -18, -20, -21, -20,
435 -20, -18, -16, -13, -10, -8, -5, -2,
436 127, 126, 125, 121, 114, 107, 99, 89,
437 79, 68, 57, 46, 35, 25, 16, 8,
438};
439
440static const u8 filter_y_vert_tap4[] = {
441 0, -3, -6, -8, -8, -8, -8, -7,
442 -6, -5, -4, -3, -2, -1, -1, 0,
443 127, 126, 124, 118, 111, 102, 92, 81,
444 70, 59, 48, 37, 27, 19, 11, 5,
445 0, 5, 11, 19, 27, 37, 48, 59,
446 70, 81, 92, 102, 111, 118, 124, 126,
447 0, 0, -1, -1, -2, -3, -4, -5,
448 -6, -7, -8, -8, -8, -8, -6, -3,
449};
450
451static const u8 filter_cr_horiz_tap4[] = {
452 0, -3, -6, -8, -8, -8, -8, -7,
453 -6, -5, -4, -3, -2, -1, -1, 0,
454 127, 126, 124, 118, 111, 102, 92, 81,
455 70, 59, 48, 37, 27, 19, 11, 5,
456};
457
458static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
459 int reg_id, const u8 *data, unsigned int size)
460{
461
462 BUG_ON(size & 3);
463 for (; size; size -= 4, reg_id += 4, data += 4) {
464 u32 val = (data[0] << 24) | (data[1] << 16) |
465 (data[2] << 8) | data[3];
466 vp_write(mdev, reg_id, val);
467 }
468}
469
470static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
471{
472 mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
473 filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
474 mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
475 filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
476 mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
477 filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
478}
479
480static void mxr_reg_mxr_dump(struct mxr_device *mdev)
481{
482#define DUMPREG(reg_id) \
483do { \
484 mxr_dbg(mdev, #reg_id " = %08x\n", \
485 (u32)readl(mdev->res.mxr_regs + reg_id)); \
486} while (0)
487
488 DUMPREG(MXR_STATUS);
489 DUMPREG(MXR_CFG);
490 DUMPREG(MXR_INT_EN);
491 DUMPREG(MXR_INT_STATUS);
492
493 DUMPREG(MXR_LAYER_CFG);
494 DUMPREG(MXR_VIDEO_CFG);
495
496 DUMPREG(MXR_GRAPHIC0_CFG);
497 DUMPREG(MXR_GRAPHIC0_BASE);
498 DUMPREG(MXR_GRAPHIC0_SPAN);
499 DUMPREG(MXR_GRAPHIC0_WH);
500 DUMPREG(MXR_GRAPHIC0_SXY);
501 DUMPREG(MXR_GRAPHIC0_DXY);
502
503 DUMPREG(MXR_GRAPHIC1_CFG);
504 DUMPREG(MXR_GRAPHIC1_BASE);
505 DUMPREG(MXR_GRAPHIC1_SPAN);
506 DUMPREG(MXR_GRAPHIC1_WH);
507 DUMPREG(MXR_GRAPHIC1_SXY);
508 DUMPREG(MXR_GRAPHIC1_DXY);
509#undef DUMPREG
510}
511
512static void mxr_reg_vp_dump(struct mxr_device *mdev)
513{
514#define DUMPREG(reg_id) \
515do { \
516 mxr_dbg(mdev, #reg_id " = %08x\n", \
517 (u32) readl(mdev->res.vp_regs + reg_id)); \
518} while (0)
519
520
521 DUMPREG(VP_ENABLE);
522 DUMPREG(VP_SRESET);
523 DUMPREG(VP_SHADOW_UPDATE);
524 DUMPREG(VP_FIELD_ID);
525 DUMPREG(VP_MODE);
526 DUMPREG(VP_IMG_SIZE_Y);
527 DUMPREG(VP_IMG_SIZE_C);
528 DUMPREG(VP_PER_RATE_CTRL);
529 DUMPREG(VP_TOP_Y_PTR);
530 DUMPREG(VP_BOT_Y_PTR);
531 DUMPREG(VP_TOP_C_PTR);
532 DUMPREG(VP_BOT_C_PTR);
533 DUMPREG(VP_ENDIAN_MODE);
534 DUMPREG(VP_SRC_H_POSITION);
535 DUMPREG(VP_SRC_V_POSITION);
536 DUMPREG(VP_SRC_WIDTH);
537 DUMPREG(VP_SRC_HEIGHT);
538 DUMPREG(VP_DST_H_POSITION);
539 DUMPREG(VP_DST_V_POSITION);
540 DUMPREG(VP_DST_WIDTH);
541 DUMPREG(VP_DST_HEIGHT);
542 DUMPREG(VP_H_RATIO);
543 DUMPREG(VP_V_RATIO);
544
545#undef DUMPREG
546}
547
548void mxr_reg_dump(struct mxr_device *mdev)
549{
550 mxr_reg_mxr_dump(mdev);
551 mxr_reg_vp_dump(mdev);
552}
553
554