1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/clk.h>
19#include <linux/completion.h>
20#include <linux/interrupt.h>
21#include <linux/kernel.h>
22#include <linux/of.h>
23#include <linux/platform_device.h>
24#include <linux/regulator/consumer.h>
25#include <media/media-entity.h>
26#include <media/v4l2-device.h>
27#include <media/v4l2-subdev.h>
28
29#include "camss-csid.h"
30#include "camss.h"
31
32#define MSM_CSID_NAME "msm_csid"
33
34#define CAMSS_CSID_HW_VERSION 0x0
35#define CAMSS_CSID_CORE_CTRL_0 0x004
36#define CAMSS_CSID_CORE_CTRL_1 0x008
37#define CAMSS_CSID_RST_CMD 0x00c
38#define CAMSS_CSID_CID_LUT_VC_n(n) (0x010 + 0x4 * (n))
39#define CAMSS_CSID_CID_n_CFG(n) (0x020 + 0x4 * (n))
40#define CAMSS_CSID_IRQ_CLEAR_CMD 0x060
41#define CAMSS_CSID_IRQ_MASK 0x064
42#define CAMSS_CSID_IRQ_STATUS 0x068
43#define CAMSS_CSID_TG_CTRL 0x0a0
44#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436
45#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437
46#define CAMSS_CSID_TG_VC_CFG 0x0a4
47#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff
48#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f
49#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0ac + 0xc * (n))
50#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b0 + 0xc * (n))
51#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0b4 + 0xc * (n))
52
53#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12
54#define DATA_TYPE_YUV422_8BIT 0x1e
55#define DATA_TYPE_RAW_6BIT 0x28
56#define DATA_TYPE_RAW_8BIT 0x2a
57#define DATA_TYPE_RAW_10BIT 0x2b
58#define DATA_TYPE_RAW_12BIT 0x2c
59
60#define DECODE_FORMAT_UNCOMPRESSED_6_BIT 0x0
61#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1
62#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2
63#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3
64
65#define CSID_RESET_TIMEOUT_MS 500
66
67struct csid_fmts {
68 u32 code;
69 u8 data_type;
70 u8 decode_format;
71 u8 bpp;
72 u8 spp;
73};
74
75static const struct csid_fmts csid_input_fmts[] = {
76 {
77 MEDIA_BUS_FMT_UYVY8_2X8,
78 DATA_TYPE_YUV422_8BIT,
79 DECODE_FORMAT_UNCOMPRESSED_8_BIT,
80 8,
81 2,
82 },
83 {
84 MEDIA_BUS_FMT_VYUY8_2X8,
85 DATA_TYPE_YUV422_8BIT,
86 DECODE_FORMAT_UNCOMPRESSED_8_BIT,
87 8,
88 2,
89 },
90 {
91 MEDIA_BUS_FMT_YUYV8_2X8,
92 DATA_TYPE_YUV422_8BIT,
93 DECODE_FORMAT_UNCOMPRESSED_8_BIT,
94 8,
95 2,
96 },
97 {
98 MEDIA_BUS_FMT_YVYU8_2X8,
99 DATA_TYPE_YUV422_8BIT,
100 DECODE_FORMAT_UNCOMPRESSED_8_BIT,
101 8,
102 2,
103 },
104 {
105 MEDIA_BUS_FMT_SBGGR8_1X8,
106 DATA_TYPE_RAW_8BIT,
107 DECODE_FORMAT_UNCOMPRESSED_8_BIT,
108 8,
109 1,
110 },
111 {
112 MEDIA_BUS_FMT_SGBRG8_1X8,
113 DATA_TYPE_RAW_8BIT,
114 DECODE_FORMAT_UNCOMPRESSED_8_BIT,
115 8,
116 1,
117 },
118 {
119 MEDIA_BUS_FMT_SGRBG8_1X8,
120 DATA_TYPE_RAW_8BIT,
121 DECODE_FORMAT_UNCOMPRESSED_8_BIT,
122 8,
123 1,
124 },
125 {
126 MEDIA_BUS_FMT_SRGGB8_1X8,
127 DATA_TYPE_RAW_8BIT,
128 DECODE_FORMAT_UNCOMPRESSED_8_BIT,
129 8,
130 1,
131 },
132 {
133 MEDIA_BUS_FMT_SBGGR10_1X10,
134 DATA_TYPE_RAW_10BIT,
135 DECODE_FORMAT_UNCOMPRESSED_10_BIT,
136 10,
137 1,
138 },
139 {
140 MEDIA_BUS_FMT_SGBRG10_1X10,
141 DATA_TYPE_RAW_10BIT,
142 DECODE_FORMAT_UNCOMPRESSED_10_BIT,
143 10,
144 1,
145 },
146 {
147 MEDIA_BUS_FMT_SGRBG10_1X10,
148 DATA_TYPE_RAW_10BIT,
149 DECODE_FORMAT_UNCOMPRESSED_10_BIT,
150 10,
151 1,
152 },
153 {
154 MEDIA_BUS_FMT_SRGGB10_1X10,
155 DATA_TYPE_RAW_10BIT,
156 DECODE_FORMAT_UNCOMPRESSED_10_BIT,
157 10,
158 1,
159 },
160 {
161 MEDIA_BUS_FMT_SBGGR12_1X12,
162 DATA_TYPE_RAW_12BIT,
163 DECODE_FORMAT_UNCOMPRESSED_12_BIT,
164 12,
165 1,
166 },
167 {
168 MEDIA_BUS_FMT_SGBRG12_1X12,
169 DATA_TYPE_RAW_12BIT,
170 DECODE_FORMAT_UNCOMPRESSED_12_BIT,
171 12,
172 1,
173 },
174 {
175 MEDIA_BUS_FMT_SGRBG12_1X12,
176 DATA_TYPE_RAW_12BIT,
177 DECODE_FORMAT_UNCOMPRESSED_12_BIT,
178 12,
179 1,
180 },
181 {
182 MEDIA_BUS_FMT_SRGGB12_1X12,
183 DATA_TYPE_RAW_12BIT,
184 DECODE_FORMAT_UNCOMPRESSED_12_BIT,
185 12,
186 1,
187 }
188};
189
190static const struct csid_fmts *csid_get_fmt_entry(u32 code)
191{
192 unsigned int i;
193
194 for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++)
195 if (code == csid_input_fmts[i].code)
196 return &csid_input_fmts[i];
197
198 WARN(1, "Unknown format\n");
199
200 return &csid_input_fmts[0];
201}
202
203
204
205
206
207
208
209
210static irqreturn_t csid_isr(int irq, void *dev)
211{
212 struct csid_device *csid = dev;
213 u32 value;
214
215 value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS);
216 writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD);
217
218 if ((value >> 11) & 0x1)
219 complete(&csid->reset_complete);
220
221 return IRQ_HANDLED;
222}
223
224
225
226
227
228static int csid_set_clock_rates(struct csid_device *csid)
229{
230 struct device *dev = to_device_index(csid, csid->id);
231 u32 pixel_clock;
232 int i, j;
233 int ret;
234
235 ret = camss_get_pixel_clock(&csid->subdev.entity, &pixel_clock);
236 if (ret)
237 pixel_clock = 0;
238
239 for (i = 0; i < csid->nclocks; i++) {
240 struct camss_clock *clock = &csid->clock[i];
241
242 if (!strcmp(clock->name, "csi0") ||
243 !strcmp(clock->name, "csi1")) {
244 u8 bpp = csid_get_fmt_entry(
245 csid->fmt[MSM_CSIPHY_PAD_SINK].code)->bpp;
246 u8 num_lanes = csid->phy.lane_cnt;
247 u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4);
248 long rate;
249
250 camss_add_clock_margin(&min_rate);
251
252 for (j = 0; j < clock->nfreqs; j++)
253 if (min_rate < clock->freq[j])
254 break;
255
256 if (j == clock->nfreqs) {
257 dev_err(dev,
258 "Pixel clock is too high for CSID\n");
259 return -EINVAL;
260 }
261
262
263
264 if (min_rate == 0)
265 j = clock->nfreqs - 1;
266
267 rate = clk_round_rate(clock->clk, clock->freq[j]);
268 if (rate < 0) {
269 dev_err(dev, "clk round rate failed: %ld\n",
270 rate);
271 return -EINVAL;
272 }
273
274 ret = clk_set_rate(clock->clk, rate);
275 if (ret < 0) {
276 dev_err(dev, "clk set rate failed: %d\n", ret);
277 return ret;
278 }
279 }
280 }
281
282 return 0;
283}
284
285
286
287
288
289
290
291static int csid_reset(struct csid_device *csid)
292{
293 unsigned long time;
294
295 reinit_completion(&csid->reset_complete);
296
297 writel_relaxed(0x7fff, csid->base + CAMSS_CSID_RST_CMD);
298
299 time = wait_for_completion_timeout(&csid->reset_complete,
300 msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
301 if (!time) {
302 dev_err(to_device_index(csid, csid->id),
303 "CSID reset timeout\n");
304 return -EIO;
305 }
306
307 return 0;
308}
309
310
311
312
313
314
315
316
317static int csid_set_power(struct v4l2_subdev *sd, int on)
318{
319 struct csid_device *csid = v4l2_get_subdevdata(sd);
320 struct device *dev = to_device_index(csid, csid->id);
321 int ret;
322
323 if (on) {
324 u32 hw_version;
325
326 ret = regulator_enable(csid->vdda);
327 if (ret < 0)
328 return ret;
329
330 ret = csid_set_clock_rates(csid);
331 if (ret < 0) {
332 regulator_disable(csid->vdda);
333 return ret;
334 }
335
336 ret = camss_enable_clocks(csid->nclocks, csid->clock, dev);
337 if (ret < 0) {
338 regulator_disable(csid->vdda);
339 return ret;
340 }
341
342 enable_irq(csid->irq);
343
344 ret = csid_reset(csid);
345 if (ret < 0) {
346 disable_irq(csid->irq);
347 camss_disable_clocks(csid->nclocks, csid->clock);
348 regulator_disable(csid->vdda);
349 return ret;
350 }
351
352 hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION);
353 dev_dbg(dev, "CSID HW Version = 0x%08x\n", hw_version);
354 } else {
355 disable_irq(csid->irq);
356 camss_disable_clocks(csid->nclocks, csid->clock);
357 ret = regulator_disable(csid->vdda);
358 }
359
360 return ret;
361}
362
363
364
365
366
367
368
369
370
371
372static int csid_set_stream(struct v4l2_subdev *sd, int enable)
373{
374 struct csid_device *csid = v4l2_get_subdevdata(sd);
375 struct csid_testgen_config *tg = &csid->testgen;
376 u32 val;
377
378 if (enable) {
379 u8 vc = 0;
380 u8 cid = vc * 4;
381 u8 dt, dt_shift, df;
382 int ret;
383
384 ret = v4l2_ctrl_handler_setup(&csid->ctrls);
385 if (ret < 0) {
386 dev_err(to_device_index(csid, csid->id),
387 "could not sync v4l2 controls: %d\n", ret);
388 return ret;
389 }
390
391 if (!tg->enabled &&
392 !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
393 return -ENOLINK;
394
395 dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)->
396 data_type;
397
398 if (tg->enabled) {
399
400 struct v4l2_mbus_framefmt *f =
401 &csid->fmt[MSM_CSID_PAD_SRC];
402 u8 bpp = csid_get_fmt_entry(f->code)->bpp;
403 u8 spp = csid_get_fmt_entry(f->code)->spp;
404 u32 num_bytes_per_line = f->width * bpp * spp / 8;
405 u32 num_lines = f->height;
406
407
408
409 val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) |
410 ((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13);
411 writel_relaxed(val, csid->base + CAMSS_CSID_TG_VC_CFG);
412
413
414 val = ((num_bytes_per_line & 0x1fff) << 16) |
415 (num_lines & 0x1fff);
416 writel_relaxed(val, csid->base +
417 CAMSS_CSID_TG_DT_n_CGG_0(0));
418
419
420 val = dt;
421 writel_relaxed(val, csid->base +
422 CAMSS_CSID_TG_DT_n_CGG_1(0));
423
424
425 val = tg->payload_mode;
426 writel_relaxed(val, csid->base +
427 CAMSS_CSID_TG_DT_n_CGG_2(0));
428 } else {
429 struct csid_phy_config *phy = &csid->phy;
430
431 val = phy->lane_cnt - 1;
432 val |= phy->lane_assign << 4;
433
434 writel_relaxed(val,
435 csid->base + CAMSS_CSID_CORE_CTRL_0);
436
437 val = phy->csiphy_id << 17;
438 val |= 0x9;
439
440 writel_relaxed(val,
441 csid->base + CAMSS_CSID_CORE_CTRL_1);
442 }
443
444
445
446 dt_shift = (cid % 4) * 8;
447 df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)->
448 decode_format;
449
450 val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
451 val &= ~(0xff << dt_shift);
452 val |= dt << dt_shift;
453 writel_relaxed(val, csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
454
455 val = (df << 4) | 0x3;
456 writel_relaxed(val, csid->base + CAMSS_CSID_CID_n_CFG(cid));
457
458 if (tg->enabled) {
459 val = CAMSS_CSID_TG_CTRL_ENABLE;
460 writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL);
461 }
462 } else {
463 if (tg->enabled) {
464 val = CAMSS_CSID_TG_CTRL_DISABLE;
465 writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL);
466 }
467 }
468
469 return 0;
470}
471
472
473
474
475
476
477
478
479
480
481static struct v4l2_mbus_framefmt *
482__csid_get_format(struct csid_device *csid,
483 struct v4l2_subdev_pad_config *cfg,
484 unsigned int pad,
485 enum v4l2_subdev_format_whence which)
486{
487 if (which == V4L2_SUBDEV_FORMAT_TRY)
488 return v4l2_subdev_get_try_format(&csid->subdev, cfg, pad);
489
490 return &csid->fmt[pad];
491}
492
493
494
495
496
497
498
499
500
501static void csid_try_format(struct csid_device *csid,
502 struct v4l2_subdev_pad_config *cfg,
503 unsigned int pad,
504 struct v4l2_mbus_framefmt *fmt,
505 enum v4l2_subdev_format_whence which)
506{
507 unsigned int i;
508
509 switch (pad) {
510 case MSM_CSID_PAD_SINK:
511
512
513 for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++)
514 if (fmt->code == csid_input_fmts[i].code)
515 break;
516
517
518 if (i >= ARRAY_SIZE(csid_input_fmts))
519 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
520
521 fmt->width = clamp_t(u32, fmt->width, 1, 8191);
522 fmt->height = clamp_t(u32, fmt->height, 1, 8191);
523
524 fmt->field = V4L2_FIELD_NONE;
525 fmt->colorspace = V4L2_COLORSPACE_SRGB;
526
527 break;
528
529 case MSM_CSID_PAD_SRC:
530 if (csid->testgen_mode->cur.val == 0) {
531
532
533 struct v4l2_mbus_framefmt format;
534
535 format = *__csid_get_format(csid, cfg,
536 MSM_CSID_PAD_SINK, which);
537 *fmt = format;
538 } else {
539
540
541
542 for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++)
543 if (csid_input_fmts[i].code == fmt->code)
544 break;
545
546
547 if (i >= ARRAY_SIZE(csid_input_fmts))
548 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
549
550 fmt->width = clamp_t(u32, fmt->width, 1, 8191);
551 fmt->height = clamp_t(u32, fmt->height, 1, 8191);
552
553 fmt->field = V4L2_FIELD_NONE;
554 }
555 break;
556 }
557
558 fmt->colorspace = V4L2_COLORSPACE_SRGB;
559}
560
561
562
563
564
565
566
567
568static int csid_enum_mbus_code(struct v4l2_subdev *sd,
569 struct v4l2_subdev_pad_config *cfg,
570 struct v4l2_subdev_mbus_code_enum *code)
571{
572 struct csid_device *csid = v4l2_get_subdevdata(sd);
573 struct v4l2_mbus_framefmt *format;
574
575 if (code->pad == MSM_CSID_PAD_SINK) {
576 if (code->index >= ARRAY_SIZE(csid_input_fmts))
577 return -EINVAL;
578
579 code->code = csid_input_fmts[code->index].code;
580 } else {
581 if (csid->testgen_mode->cur.val == 0) {
582 if (code->index > 0)
583 return -EINVAL;
584
585 format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SINK,
586 code->which);
587
588 code->code = format->code;
589 } else {
590 if (code->index >= ARRAY_SIZE(csid_input_fmts))
591 return -EINVAL;
592
593 code->code = csid_input_fmts[code->index].code;
594 }
595 }
596
597 return 0;
598}
599
600
601
602
603
604
605
606
607static int csid_enum_frame_size(struct v4l2_subdev *sd,
608 struct v4l2_subdev_pad_config *cfg,
609 struct v4l2_subdev_frame_size_enum *fse)
610{
611 struct csid_device *csid = v4l2_get_subdevdata(sd);
612 struct v4l2_mbus_framefmt format;
613
614 if (fse->index != 0)
615 return -EINVAL;
616
617 format.code = fse->code;
618 format.width = 1;
619 format.height = 1;
620 csid_try_format(csid, cfg, fse->pad, &format, fse->which);
621 fse->min_width = format.width;
622 fse->min_height = format.height;
623
624 if (format.code != fse->code)
625 return -EINVAL;
626
627 format.code = fse->code;
628 format.width = -1;
629 format.height = -1;
630 csid_try_format(csid, cfg, fse->pad, &format, fse->which);
631 fse->max_width = format.width;
632 fse->max_height = format.height;
633
634 return 0;
635}
636
637
638
639
640
641
642
643
644
645static int csid_get_format(struct v4l2_subdev *sd,
646 struct v4l2_subdev_pad_config *cfg,
647 struct v4l2_subdev_format *fmt)
648{
649 struct csid_device *csid = v4l2_get_subdevdata(sd);
650 struct v4l2_mbus_framefmt *format;
651
652 format = __csid_get_format(csid, cfg, fmt->pad, fmt->which);
653 if (format == NULL)
654 return -EINVAL;
655
656 fmt->format = *format;
657
658 return 0;
659}
660
661
662
663
664
665
666
667
668
669static int csid_set_format(struct v4l2_subdev *sd,
670 struct v4l2_subdev_pad_config *cfg,
671 struct v4l2_subdev_format *fmt)
672{
673 struct csid_device *csid = v4l2_get_subdevdata(sd);
674 struct v4l2_mbus_framefmt *format;
675
676 format = __csid_get_format(csid, cfg, fmt->pad, fmt->which);
677 if (format == NULL)
678 return -EINVAL;
679
680 csid_try_format(csid, cfg, fmt->pad, &fmt->format, fmt->which);
681 *format = fmt->format;
682
683
684 if (fmt->pad == MSM_CSID_PAD_SINK) {
685 format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SRC,
686 fmt->which);
687
688 *format = fmt->format;
689 csid_try_format(csid, cfg, MSM_CSID_PAD_SRC, format,
690 fmt->which);
691 }
692
693 return 0;
694}
695
696
697
698
699
700
701
702
703
704
705static int csid_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
706{
707 struct v4l2_subdev_format format = {
708 .pad = MSM_CSID_PAD_SINK,
709 .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
710 V4L2_SUBDEV_FORMAT_ACTIVE,
711 .format = {
712 .code = MEDIA_BUS_FMT_UYVY8_2X8,
713 .width = 1920,
714 .height = 1080
715 }
716 };
717
718 return csid_set_format(sd, fh ? fh->pad : NULL, &format);
719}
720
721static const char * const csid_test_pattern_menu[] = {
722 "Disabled",
723 "Incrementing",
724 "Alternating 0x55/0xAA",
725 "All Zeros 0x00",
726 "All Ones 0xFF",
727 "Pseudo-random Data",
728};
729
730
731
732
733
734
735
736
737static int csid_set_test_pattern(struct csid_device *csid, s32 value)
738{
739 struct csid_testgen_config *tg = &csid->testgen;
740
741
742 if (value && media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
743 return -EBUSY;
744
745 tg->enabled = !!value;
746
747 switch (value) {
748 case 1:
749 tg->payload_mode = CSID_PAYLOAD_MODE_INCREMENTING;
750 break;
751 case 2:
752 tg->payload_mode = CSID_PAYLOAD_MODE_ALTERNATING_55_AA;
753 break;
754 case 3:
755 tg->payload_mode = CSID_PAYLOAD_MODE_ALL_ZEROES;
756 break;
757 case 4:
758 tg->payload_mode = CSID_PAYLOAD_MODE_ALL_ONES;
759 break;
760 case 5:
761 tg->payload_mode = CSID_PAYLOAD_MODE_RANDOM;
762 break;
763 }
764
765 return 0;
766}
767
768
769
770
771
772
773
774static int csid_s_ctrl(struct v4l2_ctrl *ctrl)
775{
776 struct csid_device *csid = container_of(ctrl->handler,
777 struct csid_device, ctrls);
778 int ret = -EINVAL;
779
780 switch (ctrl->id) {
781 case V4L2_CID_TEST_PATTERN:
782 ret = csid_set_test_pattern(csid, ctrl->val);
783 break;
784 }
785
786 return ret;
787}
788
789static const struct v4l2_ctrl_ops csid_ctrl_ops = {
790 .s_ctrl = csid_s_ctrl,
791};
792
793
794
795
796
797
798
799
800
801int msm_csid_subdev_init(struct csid_device *csid,
802 const struct resources *res, u8 id)
803{
804 struct device *dev = to_device_index(csid, id);
805 struct platform_device *pdev = to_platform_device(dev);
806 struct resource *r;
807 int i, j;
808 int ret;
809
810 csid->id = id;
811
812
813
814 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
815 csid->base = devm_ioremap_resource(dev, r);
816 if (IS_ERR(csid->base)) {
817 dev_err(dev, "could not map memory\n");
818 return PTR_ERR(csid->base);
819 }
820
821
822
823 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
824 res->interrupt[0]);
825 if (!r) {
826 dev_err(dev, "missing IRQ\n");
827 return -EINVAL;
828 }
829
830 csid->irq = r->start;
831 snprintf(csid->irq_name, sizeof(csid->irq_name), "%s_%s%d",
832 dev_name(dev), MSM_CSID_NAME, csid->id);
833 ret = devm_request_irq(dev, csid->irq, csid_isr,
834 IRQF_TRIGGER_RISING, csid->irq_name, csid);
835 if (ret < 0) {
836 dev_err(dev, "request_irq failed: %d\n", ret);
837 return ret;
838 }
839
840 disable_irq(csid->irq);
841
842
843
844 csid->nclocks = 0;
845 while (res->clock[csid->nclocks])
846 csid->nclocks++;
847
848 csid->clock = devm_kcalloc(dev, csid->nclocks, sizeof(*csid->clock),
849 GFP_KERNEL);
850 if (!csid->clock)
851 return -ENOMEM;
852
853 for (i = 0; i < csid->nclocks; i++) {
854 struct camss_clock *clock = &csid->clock[i];
855
856 clock->clk = devm_clk_get(dev, res->clock[i]);
857 if (IS_ERR(clock->clk))
858 return PTR_ERR(clock->clk);
859
860 clock->name = res->clock[i];
861
862 clock->nfreqs = 0;
863 while (res->clock_rate[i][clock->nfreqs])
864 clock->nfreqs++;
865
866 if (!clock->nfreqs) {
867 clock->freq = NULL;
868 continue;
869 }
870
871 clock->freq = devm_kcalloc(dev,
872 clock->nfreqs,
873 sizeof(*clock->freq),
874 GFP_KERNEL);
875 if (!clock->freq)
876 return -ENOMEM;
877
878 for (j = 0; j < clock->nfreqs; j++)
879 clock->freq[j] = res->clock_rate[i][j];
880 }
881
882
883
884 csid->vdda = devm_regulator_get(dev, res->regulator[0]);
885 if (IS_ERR(csid->vdda)) {
886 dev_err(dev, "could not get regulator\n");
887 return PTR_ERR(csid->vdda);
888 }
889
890 init_completion(&csid->reset_complete);
891
892 return 0;
893}
894
895
896
897
898
899
900void msm_csid_get_csid_id(struct media_entity *entity, u8 *id)
901{
902 struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
903 struct csid_device *csid = v4l2_get_subdevdata(sd);
904
905 *id = csid->id;
906}
907
908
909
910
911
912
913
914static u32 csid_get_lane_assign(struct csiphy_lanes_cfg *lane_cfg)
915{
916 u32 lane_assign = 0;
917 int i;
918
919 for (i = 0; i < lane_cfg->num_data; i++)
920 lane_assign |= lane_cfg->data[i].pos << (i * 4);
921
922 return lane_assign;
923}
924
925
926
927
928
929
930
931
932
933
934static int csid_link_setup(struct media_entity *entity,
935 const struct media_pad *local,
936 const struct media_pad *remote, u32 flags)
937{
938 if (flags & MEDIA_LNK_FL_ENABLED)
939 if (media_entity_remote_pad(local))
940 return -EBUSY;
941
942 if ((local->flags & MEDIA_PAD_FL_SINK) &&
943 (flags & MEDIA_LNK_FL_ENABLED)) {
944 struct v4l2_subdev *sd;
945 struct csid_device *csid;
946 struct csiphy_device *csiphy;
947 struct csiphy_lanes_cfg *lane_cfg;
948 struct v4l2_subdev_format format = { 0 };
949
950 sd = media_entity_to_v4l2_subdev(entity);
951 csid = v4l2_get_subdevdata(sd);
952
953
954
955 if (csid->testgen_mode->cur.val != 0)
956 return -EBUSY;
957
958 sd = media_entity_to_v4l2_subdev(remote->entity);
959 csiphy = v4l2_get_subdevdata(sd);
960
961
962
963 if (!csiphy->cfg.csi2)
964 return -EPERM;
965
966 csid->phy.csiphy_id = csiphy->id;
967
968 lane_cfg = &csiphy->cfg.csi2->lane_cfg;
969 csid->phy.lane_cnt = lane_cfg->num_data;
970 csid->phy.lane_assign = csid_get_lane_assign(lane_cfg);
971
972
973 format.pad = MSM_CSID_PAD_SRC;
974 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
975 csid_set_format(&csid->subdev, NULL, &format);
976 }
977
978 return 0;
979}
980
981static const struct v4l2_subdev_core_ops csid_core_ops = {
982 .s_power = csid_set_power,
983};
984
985static const struct v4l2_subdev_video_ops csid_video_ops = {
986 .s_stream = csid_set_stream,
987};
988
989static const struct v4l2_subdev_pad_ops csid_pad_ops = {
990 .enum_mbus_code = csid_enum_mbus_code,
991 .enum_frame_size = csid_enum_frame_size,
992 .get_fmt = csid_get_format,
993 .set_fmt = csid_set_format,
994};
995
996static const struct v4l2_subdev_ops csid_v4l2_ops = {
997 .core = &csid_core_ops,
998 .video = &csid_video_ops,
999 .pad = &csid_pad_ops,
1000};
1001
1002static const struct v4l2_subdev_internal_ops csid_v4l2_internal_ops = {
1003 .open = csid_init_formats,
1004};
1005
1006static const struct media_entity_operations csid_media_ops = {
1007 .link_setup = csid_link_setup,
1008 .link_validate = v4l2_subdev_link_validate,
1009};
1010
1011
1012
1013
1014
1015
1016
1017
1018int msm_csid_register_entity(struct csid_device *csid,
1019 struct v4l2_device *v4l2_dev)
1020{
1021 struct v4l2_subdev *sd = &csid->subdev;
1022 struct media_pad *pads = csid->pads;
1023 struct device *dev = to_device_index(csid, csid->id);
1024 int ret;
1025
1026 v4l2_subdev_init(sd, &csid_v4l2_ops);
1027 sd->internal_ops = &csid_v4l2_internal_ops;
1028 sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
1029 snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
1030 MSM_CSID_NAME, csid->id);
1031 v4l2_set_subdevdata(sd, csid);
1032
1033 ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
1034 if (ret < 0) {
1035 dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
1036 return ret;
1037 }
1038
1039 csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls,
1040 &csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
1041 ARRAY_SIZE(csid_test_pattern_menu) - 1, 0, 0,
1042 csid_test_pattern_menu);
1043
1044 if (csid->ctrls.error) {
1045 dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
1046 ret = csid->ctrls.error;
1047 goto free_ctrl;
1048 }
1049
1050 csid->subdev.ctrl_handler = &csid->ctrls;
1051
1052 ret = csid_init_formats(sd, NULL);
1053 if (ret < 0) {
1054 dev_err(dev, "Failed to init format: %d\n", ret);
1055 goto free_ctrl;
1056 }
1057
1058 pads[MSM_CSID_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
1059 pads[MSM_CSID_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
1060
1061 sd->entity.function = MEDIA_ENT_F_IO_V4L;
1062 sd->entity.ops = &csid_media_ops;
1063 ret = media_entity_pads_init(&sd->entity, MSM_CSID_PADS_NUM, pads);
1064 if (ret < 0) {
1065 dev_err(dev, "Failed to init media entity: %d\n", ret);
1066 goto free_ctrl;
1067 }
1068
1069 ret = v4l2_device_register_subdev(v4l2_dev, sd);
1070 if (ret < 0) {
1071 dev_err(dev, "Failed to register subdev: %d\n", ret);
1072 goto media_cleanup;
1073 }
1074
1075 return 0;
1076
1077media_cleanup:
1078 media_entity_cleanup(&sd->entity);
1079free_ctrl:
1080 v4l2_ctrl_handler_free(&csid->ctrls);
1081
1082 return ret;
1083}
1084
1085
1086
1087
1088
1089void msm_csid_unregister_entity(struct csid_device *csid)
1090{
1091 v4l2_device_unregister_subdev(&csid->subdev);
1092 media_entity_cleanup(&csid->subdev.entity);
1093 v4l2_ctrl_handler_free(&csid->ctrls);
1094}
1095