1
2
3
4
5
6
7
8
9
10
11
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/ioctl.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/delay.h>
18#include <linux/pm_runtime.h>
19#include <linux/slab.h>
20#include <linux/videodev2.h>
21#include <linux/of_device.h>
22#include <linux/of_graph.h>
23
24#include <media/v4l2-fwnode.h>
25#include <media/v4l2-async.h>
26#include <media/v4l2-common.h>
27#include <media/v4l2-ctrls.h>
28#include <media/v4l2-device.h>
29#include <media/v4l2-event.h>
30#include <media/v4l2-ioctl.h>
31#include <media/v4l2-fh.h>
32#include <media/videobuf2-core.h>
33#include <media/videobuf2-dma-contig.h>
34#include "cal_regs.h"
35
36#define CAL_MODULE_NAME "cal"
37
38#define MAX_WIDTH 1920
39#define MAX_HEIGHT 1200
40
41#define CAL_VERSION "0.1.0"
42
43MODULE_DESCRIPTION("TI CAL driver");
44MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>");
45MODULE_LICENSE("GPL v2");
46MODULE_VERSION(CAL_VERSION);
47
48static unsigned video_nr = -1;
49module_param(video_nr, uint, 0644);
50MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
51
52static unsigned debug;
53module_param(debug, uint, 0644);
54MODULE_PARM_DESC(debug, "activates debug info");
55
56
57static const struct v4l2_fract
58 tpf_default = {.numerator = 1001, .denominator = 30000};
59
60#define cal_dbg(level, caldev, fmt, arg...) \
61 v4l2_dbg(level, debug, &caldev->v4l2_dev, fmt, ##arg)
62#define cal_info(caldev, fmt, arg...) \
63 v4l2_info(&caldev->v4l2_dev, fmt, ##arg)
64#define cal_err(caldev, fmt, arg...) \
65 v4l2_err(&caldev->v4l2_dev, fmt, ##arg)
66
67#define ctx_dbg(level, ctx, fmt, arg...) \
68 v4l2_dbg(level, debug, &ctx->v4l2_dev, fmt, ##arg)
69#define ctx_info(ctx, fmt, arg...) \
70 v4l2_info(&ctx->v4l2_dev, fmt, ##arg)
71#define ctx_err(ctx, fmt, arg...) \
72 v4l2_err(&ctx->v4l2_dev, fmt, ##arg)
73
74#define CAL_NUM_INPUT 1
75#define CAL_NUM_CONTEXT 2
76
77#define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
78
79#define reg_read(dev, offset) ioread32(dev->base + offset)
80#define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
81
82#define reg_read_field(dev, offset, mask) get_field(reg_read(dev, offset), \
83 mask)
84#define reg_write_field(dev, offset, field, mask) { \
85 u32 val = reg_read(dev, offset); \
86 set_field(&val, field, mask); \
87 reg_write(dev, offset, val); }
88
89
90
91
92
93
94struct cal_fmt {
95 u32 fourcc;
96 u32 code;
97 u8 depth;
98};
99
100static struct cal_fmt cal_formats[] = {
101 {
102 .fourcc = V4L2_PIX_FMT_YUYV,
103 .code = MEDIA_BUS_FMT_YUYV8_2X8,
104 .depth = 16,
105 }, {
106 .fourcc = V4L2_PIX_FMT_UYVY,
107 .code = MEDIA_BUS_FMT_UYVY8_2X8,
108 .depth = 16,
109 }, {
110 .fourcc = V4L2_PIX_FMT_YVYU,
111 .code = MEDIA_BUS_FMT_YVYU8_2X8,
112 .depth = 16,
113 }, {
114 .fourcc = V4L2_PIX_FMT_VYUY,
115 .code = MEDIA_BUS_FMT_VYUY8_2X8,
116 .depth = 16,
117 }, {
118 .fourcc = V4L2_PIX_FMT_RGB565,
119 .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
120 .depth = 16,
121 }, {
122 .fourcc = V4L2_PIX_FMT_RGB565X,
123 .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
124 .depth = 16,
125 }, {
126 .fourcc = V4L2_PIX_FMT_RGB555,
127 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
128 .depth = 16,
129 }, {
130 .fourcc = V4L2_PIX_FMT_RGB555X,
131 .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
132 .depth = 16,
133 }, {
134 .fourcc = V4L2_PIX_FMT_RGB24,
135 .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
136 .depth = 24,
137 }, {
138 .fourcc = V4L2_PIX_FMT_BGR24,
139 .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
140 .depth = 24,
141 }, {
142 .fourcc = V4L2_PIX_FMT_RGB32,
143 .code = MEDIA_BUS_FMT_ARGB8888_1X32,
144 .depth = 32,
145 }, {
146 .fourcc = V4L2_PIX_FMT_SBGGR8,
147 .code = MEDIA_BUS_FMT_SBGGR8_1X8,
148 .depth = 8,
149 }, {
150 .fourcc = V4L2_PIX_FMT_SGBRG8,
151 .code = MEDIA_BUS_FMT_SGBRG8_1X8,
152 .depth = 8,
153 }, {
154 .fourcc = V4L2_PIX_FMT_SGRBG8,
155 .code = MEDIA_BUS_FMT_SGRBG8_1X8,
156 .depth = 8,
157 }, {
158 .fourcc = V4L2_PIX_FMT_SRGGB8,
159 .code = MEDIA_BUS_FMT_SRGGB8_1X8,
160 .depth = 8,
161 }, {
162 .fourcc = V4L2_PIX_FMT_SBGGR10,
163 .code = MEDIA_BUS_FMT_SBGGR10_1X10,
164 .depth = 16,
165 }, {
166 .fourcc = V4L2_PIX_FMT_SGBRG10,
167 .code = MEDIA_BUS_FMT_SGBRG10_1X10,
168 .depth = 16,
169 }, {
170 .fourcc = V4L2_PIX_FMT_SGRBG10,
171 .code = MEDIA_BUS_FMT_SGRBG10_1X10,
172 .depth = 16,
173 }, {
174 .fourcc = V4L2_PIX_FMT_SRGGB10,
175 .code = MEDIA_BUS_FMT_SRGGB10_1X10,
176 .depth = 16,
177 }, {
178 .fourcc = V4L2_PIX_FMT_SBGGR12,
179 .code = MEDIA_BUS_FMT_SBGGR12_1X12,
180 .depth = 16,
181 }, {
182 .fourcc = V4L2_PIX_FMT_SGBRG12,
183 .code = MEDIA_BUS_FMT_SGBRG12_1X12,
184 .depth = 16,
185 }, {
186 .fourcc = V4L2_PIX_FMT_SGRBG12,
187 .code = MEDIA_BUS_FMT_SGRBG12_1X12,
188 .depth = 16,
189 }, {
190 .fourcc = V4L2_PIX_FMT_SRGGB12,
191 .code = MEDIA_BUS_FMT_SRGGB12_1X12,
192 .depth = 16,
193 },
194};
195
196
197static char *fourcc_to_str(u32 fmt)
198{
199 static char code[5];
200
201 code[0] = (unsigned char)(fmt & 0xff);
202 code[1] = (unsigned char)((fmt >> 8) & 0xff);
203 code[2] = (unsigned char)((fmt >> 16) & 0xff);
204 code[3] = (unsigned char)((fmt >> 24) & 0xff);
205 code[4] = '\0';
206
207 return code;
208}
209
210
211struct cal_buffer {
212
213 struct vb2_v4l2_buffer vb;
214 struct list_head list;
215 const struct cal_fmt *fmt;
216};
217
218struct cal_dmaqueue {
219 struct list_head active;
220
221
222 int frame;
223 int ini_jiffies;
224};
225
226struct cm_data {
227 void __iomem *base;
228 struct resource *res;
229
230 unsigned int camerrx_control;
231
232 struct platform_device *pdev;
233};
234
235struct cc_data {
236 void __iomem *base;
237 struct resource *res;
238
239 struct platform_device *pdev;
240};
241
242
243
244
245
246struct cal_dev {
247 int irq;
248 void __iomem *base;
249 struct resource *res;
250 struct platform_device *pdev;
251 struct v4l2_device v4l2_dev;
252
253
254 struct cm_data *cm;
255
256 struct cc_data *cc[CAL_NUM_CSI2_PORTS];
257
258 struct cal_ctx *ctx[CAL_NUM_CONTEXT];
259};
260
261
262
263
264struct cal_ctx {
265 struct v4l2_device v4l2_dev;
266 struct v4l2_ctrl_handler ctrl_handler;
267 struct video_device vdev;
268 struct v4l2_async_notifier notifier;
269 struct v4l2_subdev *sensor;
270 struct v4l2_fwnode_endpoint endpoint;
271
272 struct v4l2_async_subdev asd;
273 struct v4l2_async_subdev *asd_list[1];
274
275 struct v4l2_fh fh;
276 struct cal_dev *dev;
277 struct cc_data *cc;
278
279
280 struct mutex mutex;
281
282 spinlock_t slock;
283
284
285 unsigned long jiffies;
286
287 struct cal_dmaqueue vidq;
288
289
290 int input;
291
292
293 const struct cal_fmt *fmt;
294
295 struct v4l2_format v_fmt;
296
297 struct v4l2_mbus_framefmt m_fmt;
298
299
300 struct cal_fmt *active_fmt[ARRAY_SIZE(cal_formats)];
301 int num_active_fmt;
302
303 struct v4l2_fract timeperframe;
304 unsigned int sequence;
305 unsigned int external_rate;
306 struct vb2_queue vb_vidq;
307 unsigned int seq_count;
308 unsigned int csi2_port;
309 unsigned int virtual_channel;
310
311
312 struct cal_buffer *cur_frm;
313
314 struct cal_buffer *next_frm;
315};
316
317static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
318 u32 pixelformat)
319{
320 const struct cal_fmt *fmt;
321 unsigned int k;
322
323 for (k = 0; k < ctx->num_active_fmt; k++) {
324 fmt = ctx->active_fmt[k];
325 if (fmt->fourcc == pixelformat)
326 return fmt;
327 }
328
329 return NULL;
330}
331
332static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
333 u32 code)
334{
335 const struct cal_fmt *fmt;
336 unsigned int k;
337
338 for (k = 0; k < ctx->num_active_fmt; k++) {
339 fmt = ctx->active_fmt[k];
340 if (fmt->code == code)
341 return fmt;
342 }
343
344 return NULL;
345}
346
347static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n)
348{
349 return container_of(n, struct cal_ctx, notifier);
350}
351
352static inline int get_field(u32 value, u32 mask)
353{
354 return (value & mask) >> __ffs(mask);
355}
356
357static inline void set_field(u32 *valp, u32 field, u32 mask)
358{
359 u32 val = *valp;
360
361 val &= ~mask;
362 val |= (field << __ffs(mask)) & mask;
363 *valp = val;
364}
365
366
367
368
369static struct cm_data *cm_create(struct cal_dev *dev)
370{
371 struct platform_device *pdev = dev->pdev;
372 struct cm_data *cm;
373
374 cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
375 if (!cm)
376 return ERR_PTR(-ENOMEM);
377
378 cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
379 "camerrx_control");
380 cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
381 if (IS_ERR(cm->base)) {
382 cal_err(dev, "failed to ioremap\n");
383 return ERR_CAST(cm->base);
384 }
385
386 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
387 cm->res->name, &cm->res->start, &cm->res->end);
388
389 return cm;
390}
391
392static void camerarx_phy_enable(struct cal_ctx *ctx)
393{
394 u32 val;
395
396 if (!ctx->dev->cm->base) {
397 ctx_err(ctx, "cm not mapped\n");
398 return;
399 }
400
401 val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
402 if (ctx->csi2_port == 1) {
403 set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
404 set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
405
406 set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
407 set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
408 } else if (ctx->csi2_port == 2) {
409 set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
410 set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
411
412 set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
413 set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
414 }
415 reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
416}
417
418static void camerarx_phy_disable(struct cal_ctx *ctx)
419{
420 u32 val;
421
422 if (!ctx->dev->cm->base) {
423 ctx_err(ctx, "cm not mapped\n");
424 return;
425 }
426
427 val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
428 if (ctx->csi2_port == 1)
429 set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
430 else if (ctx->csi2_port == 2)
431 set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
432 reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
433}
434
435
436
437
438static struct cc_data *cc_create(struct cal_dev *dev, unsigned int core)
439{
440 struct platform_device *pdev = dev->pdev;
441 struct cc_data *cc;
442
443 cc = devm_kzalloc(&pdev->dev, sizeof(*cc), GFP_KERNEL);
444 if (!cc)
445 return ERR_PTR(-ENOMEM);
446
447 cc->res = platform_get_resource_byname(pdev,
448 IORESOURCE_MEM,
449 (core == 0) ?
450 "cal_rx_core0" :
451 "cal_rx_core1");
452 cc->base = devm_ioremap_resource(&pdev->dev, cc->res);
453 if (IS_ERR(cc->base)) {
454 cal_err(dev, "failed to ioremap\n");
455 return ERR_CAST(cc->base);
456 }
457
458 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
459 cc->res->name, &cc->res->start, &cc->res->end);
460
461 return cc;
462}
463
464
465
466
467static void cal_get_hwinfo(struct cal_dev *dev)
468{
469 u32 revision = 0;
470 u32 hwinfo = 0;
471
472 revision = reg_read(dev, CAL_HL_REVISION);
473 cal_dbg(3, dev, "CAL_HL_REVISION = 0x%08x (expecting 0x40000200)\n",
474 revision);
475
476 hwinfo = reg_read(dev, CAL_HL_HWINFO);
477 cal_dbg(3, dev, "CAL_HL_HWINFO = 0x%08x (expecting 0xA3C90469)\n",
478 hwinfo);
479}
480
481static inline int cal_runtime_get(struct cal_dev *dev)
482{
483 return pm_runtime_get_sync(&dev->pdev->dev);
484}
485
486static inline void cal_runtime_put(struct cal_dev *dev)
487{
488 pm_runtime_put_sync(&dev->pdev->dev);
489}
490
491static void cal_quickdump_regs(struct cal_dev *dev)
492{
493 cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start);
494 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
495 (__force const void *)dev->base,
496 resource_size(dev->res), false);
497
498 if (dev->ctx[0]) {
499 cal_info(dev, "CSI2 Core 0 Registers @ %pa:\n",
500 &dev->ctx[0]->cc->res->start);
501 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
502 (__force const void *)dev->ctx[0]->cc->base,
503 resource_size(dev->ctx[0]->cc->res),
504 false);
505 }
506
507 if (dev->ctx[1]) {
508 cal_info(dev, "CSI2 Core 1 Registers @ %pa:\n",
509 &dev->ctx[1]->cc->res->start);
510 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
511 (__force const void *)dev->ctx[1]->cc->base,
512 resource_size(dev->ctx[1]->cc->res),
513 false);
514 }
515
516 cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
517 &dev->cm->res->start);
518 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
519 (__force const void *)dev->cm->base,
520 resource_size(dev->cm->res), false);
521}
522
523
524
525
526static void enable_irqs(struct cal_ctx *ctx)
527{
528
529 reg_write_field(ctx->dev,
530 CAL_HL_IRQENABLE_SET(2),
531 CAL_HL_IRQ_ENABLE,
532 CAL_HL_IRQ_MASK(ctx->csi2_port));
533
534 reg_write_field(ctx->dev,
535 CAL_HL_IRQENABLE_SET(3),
536 CAL_HL_IRQ_ENABLE,
537 CAL_HL_IRQ_MASK(ctx->csi2_port));
538
539 reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000);
540}
541
542static void disable_irqs(struct cal_ctx *ctx)
543{
544
545 reg_write_field(ctx->dev,
546 CAL_HL_IRQENABLE_CLR(2),
547 CAL_HL_IRQ_CLEAR,
548 CAL_HL_IRQ_MASK(ctx->csi2_port));
549
550 reg_write_field(ctx->dev,
551 CAL_HL_IRQENABLE_CLR(3),
552 CAL_HL_IRQ_CLEAR,
553 CAL_HL_IRQ_MASK(ctx->csi2_port));
554
555 reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
556}
557
558static void csi2_init(struct cal_ctx *ctx)
559{
560 int i;
561 u32 val;
562
563 val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
564 set_field(&val, CAL_GEN_ENABLE,
565 CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
566 set_field(&val, CAL_GEN_ENABLE,
567 CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
568 set_field(&val, CAL_GEN_DISABLE,
569 CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
570 set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
571 reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
572 ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
573 reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
574
575 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
576 set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
577 CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
578 set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
579 CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
580 reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
581 for (i = 0; i < 10; i++) {
582 if (reg_read_field(ctx->dev,
583 CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
584 CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
585 CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON)
586 break;
587 usleep_range(1000, 1100);
588 }
589 ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
590 reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
591
592 val = reg_read(ctx->dev, CAL_CTRL);
593 set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
594 set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
595 set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
596 CAL_CTRL_POSTED_WRITES_MASK);
597 set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
598 set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
599 reg_write(ctx->dev, CAL_CTRL, val);
600 ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
601}
602
603static void csi2_lane_config(struct cal_ctx *ctx)
604{
605 u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
606 u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
607 u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
608 struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 =
609 &ctx->endpoint.bus.mipi_csi2;
610 int lane;
611
612 set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
613 set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask);
614 for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) {
615
616
617
618
619 lane_mask <<= 4;
620 polarity_mask <<= 4;
621 set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask);
622 set_field(&val, mipi_csi2->lane_polarities[lane + 1],
623 polarity_mask);
624 }
625
626 reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
627 ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n",
628 ctx->csi2_port, val);
629}
630
631static void csi2_ppi_enable(struct cal_ctx *ctx)
632{
633 reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
634 CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
635}
636
637static void csi2_ppi_disable(struct cal_ctx *ctx)
638{
639 reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
640 CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
641}
642
643static void csi2_ctx_config(struct cal_ctx *ctx)
644{
645 u32 val;
646
647 val = reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port));
648 set_field(&val, ctx->csi2_port, CAL_CSI2_CTX_CPORT_MASK);
649
650
651
652
653
654
655
656
657 set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
658
659 set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK);
660
661 set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK);
662 set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
663 set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
664 CAL_CSI2_CTX_PACK_MODE_MASK);
665 reg_write(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port), val);
666 ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->csi2_port,
667 reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port)));
668}
669
670static void pix_proc_config(struct cal_ctx *ctx)
671{
672 u32 val;
673
674 val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
675 set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
676 set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
677 set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
678 set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
679 set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
680 set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
681 reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
682 ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port,
683 reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)));
684}
685
686static void cal_wr_dma_config(struct cal_ctx *ctx,
687 unsigned int width)
688{
689 u32 val;
690
691 val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
692 set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
693 set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
694 CAL_WR_DMA_CTRL_DTAG_MASK);
695 set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
696 CAL_WR_DMA_CTRL_MODE_MASK);
697 set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
698 CAL_WR_DMA_CTRL_PATTERN_MASK);
699 set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK);
700 reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val);
701 ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port,
702 reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)));
703
704
705
706
707
708 reg_write_field(ctx->dev,
709 CAL_WR_DMA_OFST(ctx->csi2_port),
710 (width / 16),
711 CAL_WR_DMA_OFST_MASK);
712 ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->csi2_port,
713 reg_read(ctx->dev, CAL_WR_DMA_OFST(ctx->csi2_port)));
714
715 val = reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port));
716
717 set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
718
719
720
721
722
723 set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
724 reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
725 ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
726 reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
727}
728
729static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
730{
731 reg_write(ctx->dev, CAL_WR_DMA_ADDR(ctx->csi2_port), dmaaddr);
732}
733
734
735
736
737#define TCLK_TERM 0
738#define TCLK_MISS 1
739#define TCLK_SETTLE 14
740#define THS_SETTLE 15
741
742static void csi2_phy_config(struct cal_ctx *ctx)
743{
744 unsigned int reg0, reg1;
745 unsigned int ths_term, ths_settle;
746 unsigned int ddrclkperiod_us;
747
748
749
750
751 ddrclkperiod_us = ctx->external_rate / 2000000;
752 ddrclkperiod_us = 1000000 / ddrclkperiod_us;
753 ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
754
755 ths_term = 20000 / ddrclkperiod_us;
756 ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
757 ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774 ths_settle = THS_SETTLE;
775 ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
776
777 reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
778 set_field(®0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
779 CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK);
780 set_field(®0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK);
781 set_field(®0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK);
782
783 ctx_dbg(1, ctx, "CSI2_%d_REG0 = 0x%08x\n", (ctx->csi2_port - 1), reg0);
784 reg_write(ctx->cc, CAL_CSI2_PHY_REG0, reg0);
785
786 reg1 = reg_read(ctx->cc, CAL_CSI2_PHY_REG1);
787 set_field(®1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK);
788 set_field(®1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK);
789 set_field(®1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK);
790 set_field(®1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK);
791
792 ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x\n", (ctx->csi2_port - 1), reg1);
793 reg_write(ctx->cc, CAL_CSI2_PHY_REG1, reg1);
794}
795
796static int cal_get_external_info(struct cal_ctx *ctx)
797{
798 struct v4l2_ctrl *ctrl;
799
800 if (!ctx->sensor)
801 return -ENODEV;
802
803 ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
804 if (!ctrl) {
805 ctx_err(ctx, "no pixel rate control in subdev: %s\n",
806 ctx->sensor->name);
807 return -EPIPE;
808 }
809
810 ctx->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
811 ctx_dbg(3, ctx, "sensor Pixel Rate: %d\n", ctx->external_rate);
812
813 return 0;
814}
815
816static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
817{
818 struct cal_dmaqueue *dma_q = &ctx->vidq;
819 struct cal_buffer *buf;
820 unsigned long addr;
821
822 buf = list_entry(dma_q->active.next, struct cal_buffer, list);
823 ctx->next_frm = buf;
824 list_del(&buf->list);
825
826 addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
827 cal_wr_dma_addr(ctx, addr);
828}
829
830static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
831{
832 ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
833 ctx->cur_frm->vb.field = ctx->m_fmt.field;
834 ctx->cur_frm->vb.sequence = ctx->sequence++;
835
836 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
837 ctx->cur_frm = ctx->next_frm;
838}
839
840#define isvcirqset(irq, vc, ff) (irq & \
841 (CAL_CSI2_VC_IRQENABLE_ ##ff ##_IRQ_##vc ##_MASK))
842
843#define isportirqset(irq, port) (irq & CAL_HL_IRQ_MASK(port))
844
845static irqreturn_t cal_irq(int irq_cal, void *data)
846{
847 struct cal_dev *dev = (struct cal_dev *)data;
848 struct cal_ctx *ctx;
849 struct cal_dmaqueue *dma_q;
850 u32 irqst2, irqst3;
851
852
853 irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2));
854 if (irqst2) {
855
856 reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2);
857
858
859 if (isportirqset(irqst2, 1)) {
860 ctx = dev->ctx[0];
861
862 if (ctx->cur_frm != ctx->next_frm)
863 cal_process_buffer_complete(ctx);
864 }
865
866 if (isportirqset(irqst2, 2)) {
867 ctx = dev->ctx[1];
868
869 if (ctx->cur_frm != ctx->next_frm)
870 cal_process_buffer_complete(ctx);
871 }
872 }
873
874
875 irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3));
876 if (irqst3) {
877
878 reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3);
879
880
881 if (isportirqset(irqst3, 1)) {
882 ctx = dev->ctx[0];
883 dma_q = &ctx->vidq;
884
885 spin_lock(&ctx->slock);
886 if (!list_empty(&dma_q->active) &&
887 ctx->cur_frm == ctx->next_frm)
888 cal_schedule_next_buffer(ctx);
889 spin_unlock(&ctx->slock);
890 }
891
892 if (isportirqset(irqst3, 2)) {
893 ctx = dev->ctx[1];
894 dma_q = &ctx->vidq;
895
896 spin_lock(&ctx->slock);
897 if (!list_empty(&dma_q->active) &&
898 ctx->cur_frm == ctx->next_frm)
899 cal_schedule_next_buffer(ctx);
900 spin_unlock(&ctx->slock);
901 }
902 }
903
904 return IRQ_HANDLED;
905}
906
907
908
909
910static int cal_querycap(struct file *file, void *priv,
911 struct v4l2_capability *cap)
912{
913 struct cal_ctx *ctx = video_drvdata(file);
914
915 strscpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
916 strscpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
917
918 snprintf(cap->bus_info, sizeof(cap->bus_info),
919 "platform:%s", ctx->v4l2_dev.name);
920 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
921 V4L2_CAP_READWRITE;
922 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
923 return 0;
924}
925
926static int cal_enum_fmt_vid_cap(struct file *file, void *priv,
927 struct v4l2_fmtdesc *f)
928{
929 struct cal_ctx *ctx = video_drvdata(file);
930 const struct cal_fmt *fmt = NULL;
931
932 if (f->index >= ctx->num_active_fmt)
933 return -EINVAL;
934
935 fmt = ctx->active_fmt[f->index];
936
937 f->pixelformat = fmt->fourcc;
938 f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
939 return 0;
940}
941
942static int __subdev_get_format(struct cal_ctx *ctx,
943 struct v4l2_mbus_framefmt *fmt)
944{
945 struct v4l2_subdev_format sd_fmt;
946 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
947 int ret;
948
949 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
950 sd_fmt.pad = 0;
951
952 ret = v4l2_subdev_call(ctx->sensor, pad, get_fmt, NULL, &sd_fmt);
953 if (ret)
954 return ret;
955
956 *fmt = *mbus_fmt;
957
958 ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
959 fmt->width, fmt->height, fmt->code);
960
961 return 0;
962}
963
964static int __subdev_set_format(struct cal_ctx *ctx,
965 struct v4l2_mbus_framefmt *fmt)
966{
967 struct v4l2_subdev_format sd_fmt;
968 struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
969 int ret;
970
971 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
972 sd_fmt.pad = 0;
973 *mbus_fmt = *fmt;
974
975 ret = v4l2_subdev_call(ctx->sensor, pad, set_fmt, NULL, &sd_fmt);
976 if (ret)
977 return ret;
978
979 ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
980 fmt->width, fmt->height, fmt->code);
981
982 return 0;
983}
984
985static int cal_calc_format_size(struct cal_ctx *ctx,
986 const struct cal_fmt *fmt,
987 struct v4l2_format *f)
988{
989 if (!fmt) {
990 ctx_dbg(3, ctx, "No cal_fmt provided!\n");
991 return -EINVAL;
992 }
993
994 v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
995 &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
996 f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
997 fmt->depth >> 3);
998 f->fmt.pix.sizeimage = f->fmt.pix.height *
999 f->fmt.pix.bytesperline;
1000
1001 ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
1002 __func__, fourcc_to_str(f->fmt.pix.pixelformat),
1003 f->fmt.pix.width, f->fmt.pix.height,
1004 f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
1005
1006 return 0;
1007}
1008
1009static int cal_g_fmt_vid_cap(struct file *file, void *priv,
1010 struct v4l2_format *f)
1011{
1012 struct cal_ctx *ctx = video_drvdata(file);
1013
1014 *f = ctx->v_fmt;
1015
1016 return 0;
1017}
1018
1019static int cal_try_fmt_vid_cap(struct file *file, void *priv,
1020 struct v4l2_format *f)
1021{
1022 struct cal_ctx *ctx = video_drvdata(file);
1023 const struct cal_fmt *fmt;
1024 struct v4l2_subdev_frame_size_enum fse;
1025 int ret, found;
1026
1027 fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
1028 if (!fmt) {
1029 ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
1030 f->fmt.pix.pixelformat);
1031
1032
1033 fmt = ctx->active_fmt[0];
1034 f->fmt.pix.pixelformat = fmt->fourcc;
1035 }
1036
1037 f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
1038
1039
1040 ret = 0;
1041 found = false;
1042 fse.pad = 0;
1043 fse.code = fmt->code;
1044 fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1045 for (fse.index = 0; ; fse.index++) {
1046 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
1047 NULL, &fse);
1048 if (ret)
1049 break;
1050
1051 if ((f->fmt.pix.width == fse.max_width) &&
1052 (f->fmt.pix.height == fse.max_height)) {
1053 found = true;
1054 break;
1055 } else if ((f->fmt.pix.width >= fse.min_width) &&
1056 (f->fmt.pix.width <= fse.max_width) &&
1057 (f->fmt.pix.height >= fse.min_height) &&
1058 (f->fmt.pix.height <= fse.max_height)) {
1059 found = true;
1060 break;
1061 }
1062 }
1063
1064 if (!found) {
1065
1066 f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
1067 f->fmt.pix.height = ctx->v_fmt.fmt.pix.height;
1068 }
1069
1070
1071
1072
1073
1074 f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
1075 return cal_calc_format_size(ctx, fmt, f);
1076}
1077
1078static int cal_s_fmt_vid_cap(struct file *file, void *priv,
1079 struct v4l2_format *f)
1080{
1081 struct cal_ctx *ctx = video_drvdata(file);
1082 struct vb2_queue *q = &ctx->vb_vidq;
1083 const struct cal_fmt *fmt;
1084 struct v4l2_mbus_framefmt mbus_fmt;
1085 int ret;
1086
1087 if (vb2_is_busy(q)) {
1088 ctx_dbg(3, ctx, "%s device busy\n", __func__);
1089 return -EBUSY;
1090 }
1091
1092 ret = cal_try_fmt_vid_cap(file, priv, f);
1093 if (ret < 0)
1094 return ret;
1095
1096 fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
1097
1098 v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
1099
1100 ret = __subdev_set_format(ctx, &mbus_fmt);
1101 if (ret)
1102 return ret;
1103
1104
1105 if (mbus_fmt.code != fmt->code) {
1106 ctx_dbg(3, ctx,
1107 "%s subdev changed format on us, this should not happen\n",
1108 __func__);
1109 return -EINVAL;
1110 }
1111
1112 v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
1113 ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1114 ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
1115 cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
1116 ctx->fmt = fmt;
1117 ctx->m_fmt = mbus_fmt;
1118 *f = ctx->v_fmt;
1119
1120 return 0;
1121}
1122
1123static int cal_enum_framesizes(struct file *file, void *fh,
1124 struct v4l2_frmsizeenum *fsize)
1125{
1126 struct cal_ctx *ctx = video_drvdata(file);
1127 const struct cal_fmt *fmt;
1128 struct v4l2_subdev_frame_size_enum fse;
1129 int ret;
1130
1131
1132 fmt = find_format_by_pix(ctx, fsize->pixel_format);
1133 if (!fmt) {
1134 ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
1135 fsize->pixel_format);
1136 return -EINVAL;
1137 }
1138
1139 fse.index = fsize->index;
1140 fse.pad = 0;
1141 fse.code = fmt->code;
1142
1143 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
1144 if (ret)
1145 return ret;
1146
1147 ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
1148 __func__, fse.index, fse.code, fse.min_width, fse.max_width,
1149 fse.min_height, fse.max_height);
1150
1151 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1152 fsize->discrete.width = fse.max_width;
1153 fsize->discrete.height = fse.max_height;
1154
1155 return 0;
1156}
1157
1158static int cal_enum_input(struct file *file, void *priv,
1159 struct v4l2_input *inp)
1160{
1161 if (inp->index >= CAL_NUM_INPUT)
1162 return -EINVAL;
1163
1164 inp->type = V4L2_INPUT_TYPE_CAMERA;
1165 sprintf(inp->name, "Camera %u", inp->index);
1166 return 0;
1167}
1168
1169static int cal_g_input(struct file *file, void *priv, unsigned int *i)
1170{
1171 struct cal_ctx *ctx = video_drvdata(file);
1172
1173 *i = ctx->input;
1174 return 0;
1175}
1176
1177static int cal_s_input(struct file *file, void *priv, unsigned int i)
1178{
1179 struct cal_ctx *ctx = video_drvdata(file);
1180
1181 if (i >= CAL_NUM_INPUT)
1182 return -EINVAL;
1183
1184 ctx->input = i;
1185 return 0;
1186}
1187
1188
1189static int cal_enum_frameintervals(struct file *file, void *priv,
1190 struct v4l2_frmivalenum *fival)
1191{
1192 struct cal_ctx *ctx = video_drvdata(file);
1193 const struct cal_fmt *fmt;
1194 struct v4l2_subdev_frame_interval_enum fie = {
1195 .index = fival->index,
1196 .width = fival->width,
1197 .height = fival->height,
1198 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1199 };
1200 int ret;
1201
1202 fmt = find_format_by_pix(ctx, fival->pixel_format);
1203 if (!fmt)
1204 return -EINVAL;
1205
1206 fie.code = fmt->code;
1207 ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_interval,
1208 NULL, &fie);
1209 if (ret)
1210 return ret;
1211 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1212 fival->discrete = fie.interval;
1213
1214 return 0;
1215}
1216
1217
1218
1219
1220static int cal_queue_setup(struct vb2_queue *vq,
1221 unsigned int *nbuffers, unsigned int *nplanes,
1222 unsigned int sizes[], struct device *alloc_devs[])
1223{
1224 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1225 unsigned size = ctx->v_fmt.fmt.pix.sizeimage;
1226
1227 if (vq->num_buffers + *nbuffers < 3)
1228 *nbuffers = 3 - vq->num_buffers;
1229
1230 if (*nplanes) {
1231 if (sizes[0] < size)
1232 return -EINVAL;
1233 size = sizes[0];
1234 }
1235
1236 *nplanes = 1;
1237 sizes[0] = size;
1238
1239 ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
1240
1241 return 0;
1242}
1243
1244static int cal_buffer_prepare(struct vb2_buffer *vb)
1245{
1246 struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1247 struct cal_buffer *buf = container_of(vb, struct cal_buffer,
1248 vb.vb2_buf);
1249 unsigned long size;
1250
1251 if (WARN_ON(!ctx->fmt))
1252 return -EINVAL;
1253
1254 size = ctx->v_fmt.fmt.pix.sizeimage;
1255 if (vb2_plane_size(vb, 0) < size) {
1256 ctx_err(ctx,
1257 "data will not fit into plane (%lu < %lu)\n",
1258 vb2_plane_size(vb, 0), size);
1259 return -EINVAL;
1260 }
1261
1262 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1263 return 0;
1264}
1265
1266static void cal_buffer_queue(struct vb2_buffer *vb)
1267{
1268 struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1269 struct cal_buffer *buf = container_of(vb, struct cal_buffer,
1270 vb.vb2_buf);
1271 struct cal_dmaqueue *vidq = &ctx->vidq;
1272 unsigned long flags = 0;
1273
1274
1275 spin_lock_irqsave(&ctx->slock, flags);
1276 list_add_tail(&buf->list, &vidq->active);
1277 spin_unlock_irqrestore(&ctx->slock, flags);
1278}
1279
1280static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
1281{
1282 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1283 struct cal_dmaqueue *dma_q = &ctx->vidq;
1284 struct cal_buffer *buf, *tmp;
1285 unsigned long addr = 0;
1286 unsigned long flags;
1287 int ret;
1288
1289 spin_lock_irqsave(&ctx->slock, flags);
1290 if (list_empty(&dma_q->active)) {
1291 spin_unlock_irqrestore(&ctx->slock, flags);
1292 ctx_dbg(3, ctx, "buffer queue is empty\n");
1293 return -EIO;
1294 }
1295
1296 buf = list_entry(dma_q->active.next, struct cal_buffer, list);
1297 ctx->cur_frm = buf;
1298 ctx->next_frm = buf;
1299 list_del(&buf->list);
1300 spin_unlock_irqrestore(&ctx->slock, flags);
1301
1302 addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
1303 ctx->sequence = 0;
1304
1305 ret = cal_get_external_info(ctx);
1306 if (ret < 0)
1307 goto err;
1308
1309 cal_runtime_get(ctx->dev);
1310
1311 enable_irqs(ctx);
1312 camerarx_phy_enable(ctx);
1313 csi2_init(ctx);
1314 csi2_phy_config(ctx);
1315 csi2_lane_config(ctx);
1316 csi2_ctx_config(ctx);
1317 pix_proc_config(ctx);
1318 cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
1319 cal_wr_dma_addr(ctx, addr);
1320 csi2_ppi_enable(ctx);
1321
1322 ret = v4l2_subdev_call(ctx->sensor, video, s_stream, 1);
1323 if (ret) {
1324 ctx_err(ctx, "stream on failed in subdev\n");
1325 cal_runtime_put(ctx->dev);
1326 goto err;
1327 }
1328
1329 if (debug >= 4)
1330 cal_quickdump_regs(ctx->dev);
1331
1332 return 0;
1333
1334err:
1335 list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
1336 list_del(&buf->list);
1337 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
1338 }
1339 return ret;
1340}
1341
1342static void cal_stop_streaming(struct vb2_queue *vq)
1343{
1344 struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1345 struct cal_dmaqueue *dma_q = &ctx->vidq;
1346 struct cal_buffer *buf, *tmp;
1347 unsigned long flags;
1348
1349 if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
1350 ctx_err(ctx, "stream off failed in subdev\n");
1351
1352 csi2_ppi_disable(ctx);
1353 disable_irqs(ctx);
1354
1355
1356 spin_lock_irqsave(&ctx->slock, flags);
1357 list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
1358 list_del(&buf->list);
1359 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1360 }
1361
1362 if (ctx->cur_frm == ctx->next_frm) {
1363 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1364 } else {
1365 vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1366 vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
1367 VB2_BUF_STATE_ERROR);
1368 }
1369 ctx->cur_frm = NULL;
1370 ctx->next_frm = NULL;
1371 spin_unlock_irqrestore(&ctx->slock, flags);
1372
1373 cal_runtime_put(ctx->dev);
1374}
1375
1376static const struct vb2_ops cal_video_qops = {
1377 .queue_setup = cal_queue_setup,
1378 .buf_prepare = cal_buffer_prepare,
1379 .buf_queue = cal_buffer_queue,
1380 .start_streaming = cal_start_streaming,
1381 .stop_streaming = cal_stop_streaming,
1382 .wait_prepare = vb2_ops_wait_prepare,
1383 .wait_finish = vb2_ops_wait_finish,
1384};
1385
1386static const struct v4l2_file_operations cal_fops = {
1387 .owner = THIS_MODULE,
1388 .open = v4l2_fh_open,
1389 .release = vb2_fop_release,
1390 .read = vb2_fop_read,
1391 .poll = vb2_fop_poll,
1392 .unlocked_ioctl = video_ioctl2,
1393 .mmap = vb2_fop_mmap,
1394};
1395
1396static const struct v4l2_ioctl_ops cal_ioctl_ops = {
1397 .vidioc_querycap = cal_querycap,
1398 .vidioc_enum_fmt_vid_cap = cal_enum_fmt_vid_cap,
1399 .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap,
1400 .vidioc_try_fmt_vid_cap = cal_try_fmt_vid_cap,
1401 .vidioc_s_fmt_vid_cap = cal_s_fmt_vid_cap,
1402 .vidioc_enum_framesizes = cal_enum_framesizes,
1403 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1404 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1405 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1406 .vidioc_querybuf = vb2_ioctl_querybuf,
1407 .vidioc_qbuf = vb2_ioctl_qbuf,
1408 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1409 .vidioc_enum_input = cal_enum_input,
1410 .vidioc_g_input = cal_g_input,
1411 .vidioc_s_input = cal_s_input,
1412 .vidioc_enum_frameintervals = cal_enum_frameintervals,
1413 .vidioc_streamon = vb2_ioctl_streamon,
1414 .vidioc_streamoff = vb2_ioctl_streamoff,
1415 .vidioc_log_status = v4l2_ctrl_log_status,
1416 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1417 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1418};
1419
1420static const struct video_device cal_videodev = {
1421 .name = CAL_MODULE_NAME,
1422 .fops = &cal_fops,
1423 .ioctl_ops = &cal_ioctl_ops,
1424 .minor = -1,
1425 .release = video_device_release_empty,
1426};
1427
1428
1429
1430
1431
1432static int cal_complete_ctx(struct cal_ctx *ctx);
1433
1434static int cal_async_bound(struct v4l2_async_notifier *notifier,
1435 struct v4l2_subdev *subdev,
1436 struct v4l2_async_subdev *asd)
1437{
1438 struct cal_ctx *ctx = notifier_to_ctx(notifier);
1439 struct v4l2_subdev_mbus_code_enum mbus_code;
1440 int ret = 0;
1441 int i, j, k;
1442
1443 if (ctx->sensor) {
1444 ctx_info(ctx, "Rejecting subdev %s (Already set!!)",
1445 subdev->name);
1446 return 0;
1447 }
1448
1449 ctx->sensor = subdev;
1450 ctx_dbg(1, ctx, "Using sensor %s for capture\n", subdev->name);
1451
1452
1453 ctx->num_active_fmt = 0;
1454 for (j = 0, i = 0; ret != -EINVAL; ++j) {
1455 struct cal_fmt *fmt;
1456
1457 memset(&mbus_code, 0, sizeof(mbus_code));
1458 mbus_code.index = j;
1459 ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
1460 NULL, &mbus_code);
1461 if (ret)
1462 continue;
1463
1464 ctx_dbg(2, ctx,
1465 "subdev %s: code: %04x idx: %d\n",
1466 subdev->name, mbus_code.code, j);
1467
1468 for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
1469 fmt = &cal_formats[k];
1470
1471 if (mbus_code.code == fmt->code) {
1472 ctx->active_fmt[i] = fmt;
1473 ctx_dbg(2, ctx,
1474 "matched fourcc: %s: code: %04x idx: %d\n",
1475 fourcc_to_str(fmt->fourcc),
1476 fmt->code, i);
1477 ctx->num_active_fmt = ++i;
1478 }
1479 }
1480 }
1481
1482 if (i == 0) {
1483 ctx_err(ctx, "No suitable format reported by subdev %s\n",
1484 subdev->name);
1485 return -EINVAL;
1486 }
1487
1488 cal_complete_ctx(ctx);
1489
1490 return 0;
1491}
1492
1493static int cal_async_complete(struct v4l2_async_notifier *notifier)
1494{
1495 struct cal_ctx *ctx = notifier_to_ctx(notifier);
1496 const struct cal_fmt *fmt;
1497 struct v4l2_mbus_framefmt mbus_fmt;
1498 int ret;
1499
1500 ret = __subdev_get_format(ctx, &mbus_fmt);
1501 if (ret)
1502 return ret;
1503
1504 fmt = find_format_by_code(ctx, mbus_fmt.code);
1505 if (!fmt) {
1506 ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
1507 mbus_fmt.code);
1508 return -EINVAL;
1509 }
1510
1511
1512 v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
1513 ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1514 ctx->v_fmt.fmt.pix.pixelformat = fmt->fourcc;
1515 cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
1516 ctx->fmt = fmt;
1517 ctx->m_fmt = mbus_fmt;
1518
1519 return 0;
1520}
1521
1522static const struct v4l2_async_notifier_operations cal_async_ops = {
1523 .bound = cal_async_bound,
1524 .complete = cal_async_complete,
1525};
1526
1527static int cal_complete_ctx(struct cal_ctx *ctx)
1528{
1529 struct video_device *vfd;
1530 struct vb2_queue *q;
1531 int ret;
1532
1533 ctx->timeperframe = tpf_default;
1534 ctx->external_rate = 192000000;
1535
1536
1537 spin_lock_init(&ctx->slock);
1538 mutex_init(&ctx->mutex);
1539
1540
1541 q = &ctx->vb_vidq;
1542 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1543 q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
1544 q->drv_priv = ctx;
1545 q->buf_struct_size = sizeof(struct cal_buffer);
1546 q->ops = &cal_video_qops;
1547 q->mem_ops = &vb2_dma_contig_memops;
1548 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1549 q->lock = &ctx->mutex;
1550 q->min_buffers_needed = 3;
1551 q->dev = ctx->v4l2_dev.dev;
1552
1553 ret = vb2_queue_init(q);
1554 if (ret)
1555 return ret;
1556
1557
1558 INIT_LIST_HEAD(&ctx->vidq.active);
1559
1560 vfd = &ctx->vdev;
1561 *vfd = cal_videodev;
1562 vfd->v4l2_dev = &ctx->v4l2_dev;
1563 vfd->queue = q;
1564
1565
1566
1567
1568
1569 vfd->lock = &ctx->mutex;
1570 video_set_drvdata(vfd, ctx);
1571
1572 ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
1573 if (ret < 0)
1574 return ret;
1575
1576 v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n",
1577 video_device_node_name(vfd));
1578
1579 return 0;
1580}
1581
1582static struct device_node *
1583of_get_next_port(const struct device_node *parent,
1584 struct device_node *prev)
1585{
1586 struct device_node *port = NULL;
1587
1588 if (!parent)
1589 return NULL;
1590
1591 if (!prev) {
1592 struct device_node *ports;
1593
1594
1595
1596
1597 ports = of_get_child_by_name(parent, "ports");
1598 if (ports)
1599 parent = ports;
1600
1601 port = of_get_child_by_name(parent, "port");
1602
1603
1604 of_node_put(ports);
1605 } else {
1606 struct device_node *ports;
1607
1608 ports = of_get_parent(prev);
1609 if (!ports)
1610 return NULL;
1611
1612 do {
1613 port = of_get_next_child(ports, prev);
1614 if (!port) {
1615 of_node_put(ports);
1616 return NULL;
1617 }
1618 prev = port;
1619 } while (of_node_cmp(port->name, "port") != 0);
1620 }
1621
1622 return port;
1623}
1624
1625static struct device_node *
1626of_get_next_endpoint(const struct device_node *parent,
1627 struct device_node *prev)
1628{
1629 struct device_node *ep = NULL;
1630
1631 if (!parent)
1632 return NULL;
1633
1634 do {
1635 ep = of_get_next_child(parent, prev);
1636 if (!ep)
1637 return NULL;
1638 prev = ep;
1639 } while (of_node_cmp(ep->name, "endpoint") != 0);
1640
1641 return ep;
1642}
1643
1644static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
1645{
1646 struct platform_device *pdev = ctx->dev->pdev;
1647 struct device_node *ep_node, *port, *remote_ep,
1648 *sensor_node, *parent;
1649 struct v4l2_fwnode_endpoint *endpoint;
1650 struct v4l2_async_subdev *asd;
1651 u32 regval = 0;
1652 int ret, index, found_port = 0, lane;
1653
1654 parent = pdev->dev.of_node;
1655
1656 asd = &ctx->asd;
1657 endpoint = &ctx->endpoint;
1658
1659 ep_node = NULL;
1660 port = NULL;
1661 remote_ep = NULL;
1662 sensor_node = NULL;
1663 ret = -EINVAL;
1664
1665 ctx_dbg(3, ctx, "Scanning Port node for csi2 port: %d\n", inst);
1666 for (index = 0; index < CAL_NUM_CSI2_PORTS; index++) {
1667 port = of_get_next_port(parent, port);
1668 if (!port) {
1669 ctx_dbg(1, ctx, "No port node found for csi2 port:%d\n",
1670 index);
1671 goto cleanup_exit;
1672 }
1673
1674
1675 of_property_read_u32(port, "reg", ®val);
1676 ctx_dbg(3, ctx, "port:%d inst:%d <reg>:%d\n",
1677 index, inst, regval);
1678 if ((regval == inst) && (index == inst)) {
1679 found_port = 1;
1680 break;
1681 }
1682 }
1683
1684 if (!found_port) {
1685 ctx_dbg(1, ctx, "No port node matches csi2 port:%d\n",
1686 inst);
1687 goto cleanup_exit;
1688 }
1689
1690 ctx_dbg(3, ctx, "Scanning sub-device for csi2 port: %d\n",
1691 inst);
1692
1693 ep_node = of_get_next_endpoint(port, ep_node);
1694 if (!ep_node) {
1695 ctx_dbg(3, ctx, "can't get next endpoint\n");
1696 goto cleanup_exit;
1697 }
1698
1699 sensor_node = of_graph_get_remote_port_parent(ep_node);
1700 if (!sensor_node) {
1701 ctx_dbg(3, ctx, "can't get remote parent\n");
1702 goto cleanup_exit;
1703 }
1704 asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
1705 asd->match.fwnode = of_fwnode_handle(sensor_node);
1706
1707 remote_ep = of_graph_get_remote_endpoint(ep_node);
1708 if (!remote_ep) {
1709 ctx_dbg(3, ctx, "can't get remote-endpoint\n");
1710 goto cleanup_exit;
1711 }
1712 v4l2_fwnode_endpoint_parse(of_fwnode_handle(remote_ep), endpoint);
1713
1714 if (endpoint->bus_type != V4L2_MBUS_CSI2) {
1715 ctx_err(ctx, "Port:%d sub-device %s is not a CSI2 device\n",
1716 inst, sensor_node->name);
1717 goto cleanup_exit;
1718 }
1719
1720
1721 ctx->virtual_channel = endpoint->base.id;
1722
1723 ctx_dbg(3, ctx, "Port:%d v4l2-endpoint: CSI2\n", inst);
1724 ctx_dbg(3, ctx, "Virtual Channel=%d\n", ctx->virtual_channel);
1725 ctx_dbg(3, ctx, "flags=0x%08x\n", endpoint->bus.mipi_csi2.flags);
1726 ctx_dbg(3, ctx, "clock_lane=%d\n", endpoint->bus.mipi_csi2.clock_lane);
1727 ctx_dbg(3, ctx, "num_data_lanes=%d\n",
1728 endpoint->bus.mipi_csi2.num_data_lanes);
1729 ctx_dbg(3, ctx, "data_lanes= <\n");
1730 for (lane = 0; lane < endpoint->bus.mipi_csi2.num_data_lanes; lane++)
1731 ctx_dbg(3, ctx, "\t%d\n",
1732 endpoint->bus.mipi_csi2.data_lanes[lane]);
1733 ctx_dbg(3, ctx, "\t>\n");
1734
1735 ctx_dbg(1, ctx, "Port: %d found sub-device %s\n",
1736 inst, sensor_node->name);
1737
1738 ctx->asd_list[0] = asd;
1739 ctx->notifier.subdevs = ctx->asd_list;
1740 ctx->notifier.num_subdevs = 1;
1741 ctx->notifier.ops = &cal_async_ops;
1742 ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
1743 &ctx->notifier);
1744 if (ret) {
1745 ctx_err(ctx, "Error registering async notifier\n");
1746 ret = -EINVAL;
1747 }
1748
1749cleanup_exit:
1750 if (remote_ep)
1751 of_node_put(remote_ep);
1752 if (sensor_node)
1753 of_node_put(sensor_node);
1754 if (ep_node)
1755 of_node_put(ep_node);
1756 if (port)
1757 of_node_put(port);
1758
1759 return ret;
1760}
1761
1762static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst)
1763{
1764 struct cal_ctx *ctx;
1765 struct v4l2_ctrl_handler *hdl;
1766 int ret;
1767
1768 ctx = devm_kzalloc(&dev->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1769 if (!ctx)
1770 return NULL;
1771
1772
1773 ctx->dev = dev;
1774
1775 snprintf(ctx->v4l2_dev.name, sizeof(ctx->v4l2_dev.name),
1776 "%s-%03d", CAL_MODULE_NAME, inst);
1777 ret = v4l2_device_register(&dev->pdev->dev, &ctx->v4l2_dev);
1778 if (ret)
1779 goto err_exit;
1780
1781 hdl = &ctx->ctrl_handler;
1782 ret = v4l2_ctrl_handler_init(hdl, 11);
1783 if (ret) {
1784 ctx_err(ctx, "Failed to init ctrl handler\n");
1785 goto unreg_dev;
1786 }
1787 ctx->v4l2_dev.ctrl_handler = hdl;
1788
1789
1790 ctx->cc = dev->cc[inst];
1791
1792
1793 ctx->csi2_port = inst + 1;
1794
1795 ret = of_cal_create_instance(ctx, inst);
1796 if (ret) {
1797 ret = -EINVAL;
1798 goto free_hdl;
1799 }
1800 return ctx;
1801
1802free_hdl:
1803 v4l2_ctrl_handler_free(hdl);
1804unreg_dev:
1805 v4l2_device_unregister(&ctx->v4l2_dev);
1806err_exit:
1807 return NULL;
1808}
1809
1810static int cal_probe(struct platform_device *pdev)
1811{
1812 struct cal_dev *dev;
1813 int ret;
1814 int irq;
1815
1816 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1817 if (!dev)
1818 return -ENOMEM;
1819
1820
1821 strscpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
1822 sizeof(dev->v4l2_dev.name));
1823
1824
1825 dev->pdev = pdev;
1826
1827 dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1828 "cal_top");
1829 dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
1830 if (IS_ERR(dev->base))
1831 return PTR_ERR(dev->base);
1832
1833 cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
1834 dev->res->name, &dev->res->start, &dev->res->end);
1835
1836 irq = platform_get_irq(pdev, 0);
1837 cal_dbg(1, dev, "got irq# %d\n", irq);
1838 ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME,
1839 dev);
1840 if (ret)
1841 return ret;
1842
1843 platform_set_drvdata(pdev, dev);
1844
1845 dev->cm = cm_create(dev);
1846 if (IS_ERR(dev->cm))
1847 return PTR_ERR(dev->cm);
1848
1849 dev->cc[0] = cc_create(dev, 0);
1850 if (IS_ERR(dev->cc[0]))
1851 return PTR_ERR(dev->cc[0]);
1852
1853 dev->cc[1] = cc_create(dev, 1);
1854 if (IS_ERR(dev->cc[1]))
1855 return PTR_ERR(dev->cc[1]);
1856
1857 dev->ctx[0] = NULL;
1858 dev->ctx[1] = NULL;
1859
1860 dev->ctx[0] = cal_create_instance(dev, 0);
1861 dev->ctx[1] = cal_create_instance(dev, 1);
1862 if (!dev->ctx[0] && !dev->ctx[1]) {
1863 cal_err(dev, "Neither port is configured, no point in staying up\n");
1864 return -ENODEV;
1865 }
1866
1867 pm_runtime_enable(&pdev->dev);
1868
1869 ret = cal_runtime_get(dev);
1870 if (ret)
1871 goto runtime_disable;
1872
1873
1874 cal_get_hwinfo(dev);
1875
1876 cal_runtime_put(dev);
1877
1878 return 0;
1879
1880runtime_disable:
1881 pm_runtime_disable(&pdev->dev);
1882 return ret;
1883}
1884
1885static int cal_remove(struct platform_device *pdev)
1886{
1887 struct cal_dev *dev =
1888 (struct cal_dev *)platform_get_drvdata(pdev);
1889 struct cal_ctx *ctx;
1890 int i;
1891
1892 cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME);
1893
1894 cal_runtime_get(dev);
1895
1896 for (i = 0; i < CAL_NUM_CONTEXT; i++) {
1897 ctx = dev->ctx[i];
1898 if (ctx) {
1899 ctx_dbg(1, ctx, "unregistering %s\n",
1900 video_device_node_name(&ctx->vdev));
1901 camerarx_phy_disable(ctx);
1902 v4l2_async_notifier_unregister(&ctx->notifier);
1903 v4l2_ctrl_handler_free(&ctx->ctrl_handler);
1904 v4l2_device_unregister(&ctx->v4l2_dev);
1905 video_unregister_device(&ctx->vdev);
1906 }
1907 }
1908
1909 cal_runtime_put(dev);
1910 pm_runtime_disable(&pdev->dev);
1911
1912 return 0;
1913}
1914
1915#if defined(CONFIG_OF)
1916static const struct of_device_id cal_of_match[] = {
1917 { .compatible = "ti,dra72-cal", },
1918 {},
1919};
1920MODULE_DEVICE_TABLE(of, cal_of_match);
1921#endif
1922
1923static struct platform_driver cal_pdrv = {
1924 .probe = cal_probe,
1925 .remove = cal_remove,
1926 .driver = {
1927 .name = CAL_MODULE_NAME,
1928 .of_match_table = of_match_ptr(cal_of_match),
1929 },
1930};
1931
1932module_platform_driver(cal_pdrv);
1933