1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk.h>
15#include <linux/completion.h>
16#include <linux/delay.h>
17#include <linux/dmaengine.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_graph.h>
25#include <linux/pinctrl/consumer.h>
26#include <linux/platform_device.h>
27#include <linux/pm_runtime.h>
28#include <linux/reset.h>
29#include <linux/videodev2.h>
30
31#include <media/v4l2-ctrls.h>
32#include <media/v4l2-dev.h>
33#include <media/v4l2-device.h>
34#include <media/v4l2-event.h>
35#include <media/v4l2-fwnode.h>
36#include <media/v4l2-image-sizes.h>
37#include <media/v4l2-ioctl.h>
38#include <media/v4l2-rect.h>
39#include <media/videobuf2-dma-contig.h>
40
41#define DRV_NAME "stm32-dcmi"
42
43
44#define DCMI_CR 0x00
45#define DCMI_SR 0x04
46#define DCMI_RIS 0x08
47#define DCMI_IER 0x0C
48#define DCMI_MIS 0x10
49#define DCMI_ICR 0x14
50#define DCMI_ESCR 0x18
51#define DCMI_ESUR 0x1C
52#define DCMI_CWSTRT 0x20
53#define DCMI_CWSIZE 0x24
54#define DCMI_DR 0x28
55#define DCMI_IDR 0x2C
56
57
58#define CR_CAPTURE BIT(0)
59#define CR_CM BIT(1)
60#define CR_CROP BIT(2)
61#define CR_JPEG BIT(3)
62#define CR_ESS BIT(4)
63#define CR_PCKPOL BIT(5)
64#define CR_HSPOL BIT(6)
65#define CR_VSPOL BIT(7)
66#define CR_FCRC_0 BIT(8)
67#define CR_FCRC_1 BIT(9)
68#define CR_EDM_0 BIT(10)
69#define CR_EDM_1 BIT(11)
70#define CR_ENABLE BIT(14)
71
72
73#define SR_HSYNC BIT(0)
74#define SR_VSYNC BIT(1)
75#define SR_FNE BIT(2)
76
77
78
79
80
81#define IT_FRAME BIT(0)
82#define IT_OVR BIT(1)
83#define IT_ERR BIT(2)
84#define IT_VSYNC BIT(3)
85#define IT_LINE BIT(4)
86
87enum state {
88 STOPPED = 0,
89 WAIT_FOR_BUFFER,
90 RUNNING,
91};
92
93#define MIN_WIDTH 16U
94#define MAX_WIDTH 2592U
95#define MIN_HEIGHT 16U
96#define MAX_HEIGHT 2592U
97
98#define TIMEOUT_MS 1000
99
100#define OVERRUN_ERROR_THRESHOLD 3
101
102struct dcmi_graph_entity {
103 struct device_node *node;
104
105 struct v4l2_async_subdev asd;
106 struct v4l2_subdev *subdev;
107};
108
109struct dcmi_format {
110 u32 fourcc;
111 u32 mbus_code;
112 u8 bpp;
113};
114
115struct dcmi_framesize {
116 u32 width;
117 u32 height;
118};
119
120struct dcmi_buf {
121 struct vb2_v4l2_buffer vb;
122 bool prepared;
123 dma_addr_t paddr;
124 size_t size;
125 struct list_head list;
126};
127
128struct stm32_dcmi {
129
130 spinlock_t irqlock;
131 struct device *dev;
132 void __iomem *regs;
133 struct resource *res;
134 struct reset_control *rstc;
135 int sequence;
136 struct list_head buffers;
137 struct dcmi_buf *active;
138
139 struct v4l2_device v4l2_dev;
140 struct video_device *vdev;
141 struct v4l2_async_notifier notifier;
142 struct dcmi_graph_entity entity;
143 struct v4l2_format fmt;
144 struct v4l2_rect crop;
145 bool do_crop;
146
147 const struct dcmi_format **sd_formats;
148 unsigned int num_of_sd_formats;
149 const struct dcmi_format *sd_format;
150 struct dcmi_framesize *sd_framesizes;
151 unsigned int num_of_sd_framesizes;
152 struct dcmi_framesize sd_framesize;
153 struct v4l2_rect sd_bounds;
154
155
156 struct mutex lock;
157 struct vb2_queue queue;
158
159 struct v4l2_fwnode_bus_parallel bus;
160 struct completion complete;
161 struct clk *mclk;
162 enum state state;
163 struct dma_chan *dma_chan;
164 dma_cookie_t dma_cookie;
165 u32 misr;
166 int errors_count;
167 int overrun_count;
168 int buffers_count;
169
170
171 struct mutex dma_lock;
172};
173
174static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
175{
176 return container_of(n, struct stm32_dcmi, notifier);
177}
178
179static inline u32 reg_read(void __iomem *base, u32 reg)
180{
181 return readl_relaxed(base + reg);
182}
183
184static inline void reg_write(void __iomem *base, u32 reg, u32 val)
185{
186 writel_relaxed(val, base + reg);
187}
188
189static inline void reg_set(void __iomem *base, u32 reg, u32 mask)
190{
191 reg_write(base, reg, reg_read(base, reg) | mask);
192}
193
194static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
195{
196 reg_write(base, reg, reg_read(base, reg) & ~mask);
197}
198
199static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf);
200
201static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
202 struct dcmi_buf *buf,
203 size_t bytesused,
204 int err)
205{
206 struct vb2_v4l2_buffer *vbuf;
207
208 if (!buf)
209 return;
210
211 list_del_init(&buf->list);
212
213 vbuf = &buf->vb;
214
215 vbuf->sequence = dcmi->sequence++;
216 vbuf->field = V4L2_FIELD_NONE;
217 vbuf->vb2_buf.timestamp = ktime_get_ns();
218 vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
219 vb2_buffer_done(&vbuf->vb2_buf,
220 err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
221 dev_dbg(dcmi->dev, "buffer[%d] done seq=%d, bytesused=%zu\n",
222 vbuf->vb2_buf.index, vbuf->sequence, bytesused);
223
224 dcmi->buffers_count++;
225 dcmi->active = NULL;
226}
227
228static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
229{
230 struct dcmi_buf *buf;
231
232 spin_lock_irq(&dcmi->irqlock);
233
234 if (dcmi->state != RUNNING) {
235 spin_unlock_irq(&dcmi->irqlock);
236 return -EINVAL;
237 }
238
239
240 if (list_empty(&dcmi->buffers)) {
241 dev_dbg(dcmi->dev, "Capture restart is deferred to next buffer queueing\n");
242 dcmi->state = WAIT_FOR_BUFFER;
243 spin_unlock_irq(&dcmi->irqlock);
244 return 0;
245 }
246 buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
247 dcmi->active = buf;
248
249 spin_unlock_irq(&dcmi->irqlock);
250
251 return dcmi_start_capture(dcmi, buf);
252}
253
254static void dcmi_dma_callback(void *param)
255{
256 struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
257 struct dma_tx_state state;
258 enum dma_status status;
259 struct dcmi_buf *buf = dcmi->active;
260
261 spin_lock_irq(&dcmi->irqlock);
262
263
264 status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
265
266 switch (status) {
267 case DMA_IN_PROGRESS:
268 dev_dbg(dcmi->dev, "%s: Received DMA_IN_PROGRESS\n", __func__);
269 break;
270 case DMA_PAUSED:
271 dev_err(dcmi->dev, "%s: Received DMA_PAUSED\n", __func__);
272 break;
273 case DMA_ERROR:
274 dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__);
275
276
277 dcmi_buffer_done(dcmi, buf, 0, -EIO);
278 break;
279 case DMA_COMPLETE:
280 dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
281
282
283 dcmi_buffer_done(dcmi, buf, buf->size, 0);
284
285 spin_unlock_irq(&dcmi->irqlock);
286
287
288 if (dcmi_restart_capture(dcmi))
289 dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n",
290 __func__);
291 return;
292 default:
293 dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
294 break;
295 }
296
297 spin_unlock_irq(&dcmi->irqlock);
298}
299
300static int dcmi_start_dma(struct stm32_dcmi *dcmi,
301 struct dcmi_buf *buf)
302{
303 struct dma_async_tx_descriptor *desc = NULL;
304 struct dma_slave_config config;
305 int ret;
306
307 memset(&config, 0, sizeof(config));
308
309 config.src_addr = (dma_addr_t)dcmi->res->start + DCMI_DR;
310 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
311 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
312 config.dst_maxburst = 4;
313
314
315 ret = dmaengine_slave_config(dcmi->dma_chan, &config);
316 if (ret < 0) {
317 dev_err(dcmi->dev, "%s: DMA channel config failed (%d)\n",
318 __func__, ret);
319 return ret;
320 }
321
322
323
324
325
326
327 mutex_lock(&dcmi->dma_lock);
328
329
330 desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
331 buf->size,
332 DMA_DEV_TO_MEM,
333 DMA_PREP_INTERRUPT);
334 if (!desc) {
335 dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n",
336 __func__, &buf->paddr, buf->size);
337 mutex_unlock(&dcmi->dma_lock);
338 return -EINVAL;
339 }
340
341
342 desc->callback = dcmi_dma_callback;
343 desc->callback_param = dcmi;
344
345
346 dcmi->dma_cookie = dmaengine_submit(desc);
347 if (dma_submit_error(dcmi->dma_cookie)) {
348 dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
349 mutex_unlock(&dcmi->dma_lock);
350 return -ENXIO;
351 }
352
353 mutex_unlock(&dcmi->dma_lock);
354
355 dma_async_issue_pending(dcmi->dma_chan);
356
357 return 0;
358}
359
360static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf)
361{
362 int ret;
363
364 if (!buf)
365 return -EINVAL;
366
367 ret = dcmi_start_dma(dcmi, buf);
368 if (ret) {
369 dcmi->errors_count++;
370 return ret;
371 }
372
373
374 reg_set(dcmi->regs, DCMI_CR, CR_CAPTURE);
375
376 return 0;
377}
378
379static void dcmi_set_crop(struct stm32_dcmi *dcmi)
380{
381 u32 size, start;
382
383
384 size = ((dcmi->crop.height - 1) << 16) |
385 ((dcmi->crop.width << 1) - 1);
386 reg_write(dcmi->regs, DCMI_CWSIZE, size);
387
388
389 start = ((dcmi->crop.top) << 16) |
390 ((dcmi->crop.left << 1));
391 reg_write(dcmi->regs, DCMI_CWSTRT, start);
392
393 dev_dbg(dcmi->dev, "Cropping to %ux%u@%u:%u\n",
394 dcmi->crop.width, dcmi->crop.height,
395 dcmi->crop.left, dcmi->crop.top);
396
397
398 reg_set(dcmi->regs, DCMI_CR, CR_CROP);
399}
400
401static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
402{
403 struct dma_tx_state state;
404 enum dma_status status;
405 struct dcmi_buf *buf = dcmi->active;
406
407 if (!buf)
408 return;
409
410
411
412
413
414
415
416
417
418
419
420
421 dmaengine_synchronize(dcmi->dma_chan);
422
423
424 status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
425 if (status != DMA_ERROR && state.residue < buf->size) {
426
427 dcmi_buffer_done(dcmi, buf, buf->size - state.residue, 0);
428 } else {
429 dcmi->errors_count++;
430 dev_err(dcmi->dev, "%s: Cannot get JPEG size from DMA\n",
431 __func__);
432
433 dcmi_buffer_done(dcmi, buf, 0, -EIO);
434 }
435
436
437 dmaengine_terminate_all(dcmi->dma_chan);
438
439
440 if (dcmi_restart_capture(dcmi))
441 dev_err(dcmi->dev, "%s: Cannot restart capture on JPEG received\n",
442 __func__);
443}
444
445static irqreturn_t dcmi_irq_thread(int irq, void *arg)
446{
447 struct stm32_dcmi *dcmi = arg;
448
449 spin_lock_irq(&dcmi->irqlock);
450
451 if (dcmi->misr & IT_OVR) {
452 dcmi->overrun_count++;
453 if (dcmi->overrun_count > OVERRUN_ERROR_THRESHOLD)
454 dcmi->errors_count++;
455 }
456 if (dcmi->misr & IT_ERR)
457 dcmi->errors_count++;
458
459 if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG &&
460 dcmi->misr & IT_FRAME) {
461
462 spin_unlock_irq(&dcmi->irqlock);
463 dcmi_process_jpeg(dcmi);
464 return IRQ_HANDLED;
465 }
466
467 spin_unlock_irq(&dcmi->irqlock);
468 return IRQ_HANDLED;
469}
470
471static irqreturn_t dcmi_irq_callback(int irq, void *arg)
472{
473 struct stm32_dcmi *dcmi = arg;
474 unsigned long flags;
475
476 spin_lock_irqsave(&dcmi->irqlock, flags);
477
478 dcmi->misr = reg_read(dcmi->regs, DCMI_MIS);
479
480
481 reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR);
482
483 spin_unlock_irqrestore(&dcmi->irqlock, flags);
484
485 return IRQ_WAKE_THREAD;
486}
487
488static int dcmi_queue_setup(struct vb2_queue *vq,
489 unsigned int *nbuffers,
490 unsigned int *nplanes,
491 unsigned int sizes[],
492 struct device *alloc_devs[])
493{
494 struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
495 unsigned int size;
496
497 size = dcmi->fmt.fmt.pix.sizeimage;
498
499
500 if (*nplanes)
501 return sizes[0] < size ? -EINVAL : 0;
502
503 *nplanes = 1;
504 sizes[0] = size;
505
506 dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n",
507 *nbuffers, size);
508
509 return 0;
510}
511
512static int dcmi_buf_init(struct vb2_buffer *vb)
513{
514 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
515 struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
516
517 INIT_LIST_HEAD(&buf->list);
518
519 return 0;
520}
521
522static int dcmi_buf_prepare(struct vb2_buffer *vb)
523{
524 struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
525 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
526 struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
527 unsigned long size;
528
529 size = dcmi->fmt.fmt.pix.sizeimage;
530
531 if (vb2_plane_size(vb, 0) < size) {
532 dev_err(dcmi->dev, "%s data will not fit into plane (%lu < %lu)\n",
533 __func__, vb2_plane_size(vb, 0), size);
534 return -EINVAL;
535 }
536
537 vb2_set_plane_payload(vb, 0, size);
538
539 if (!buf->prepared) {
540
541 buf->paddr =
542 vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
543 buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
544 buf->prepared = true;
545
546 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
547
548 dev_dbg(dcmi->dev, "buffer[%d] phy=%pad size=%zu\n",
549 vb->index, &buf->paddr, buf->size);
550 }
551
552 return 0;
553}
554
555static void dcmi_buf_queue(struct vb2_buffer *vb)
556{
557 struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
558 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
559 struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
560
561 spin_lock_irq(&dcmi->irqlock);
562
563
564 list_add_tail(&buf->list, &dcmi->buffers);
565
566 if (dcmi->state == WAIT_FOR_BUFFER) {
567 dcmi->state = RUNNING;
568 dcmi->active = buf;
569
570 dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n",
571 buf->vb.vb2_buf.index);
572
573 spin_unlock_irq(&dcmi->irqlock);
574 if (dcmi_start_capture(dcmi, buf))
575 dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
576 __func__);
577 return;
578 }
579
580 spin_unlock_irq(&dcmi->irqlock);
581}
582
583static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
584{
585 struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
586 struct dcmi_buf *buf, *node;
587 u32 val = 0;
588 int ret;
589
590 ret = pm_runtime_get_sync(dcmi->dev);
591 if (ret < 0) {
592 dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
593 __func__, ret);
594 goto err_release_buffers;
595 }
596
597
598 ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 1);
599 if (ret && ret != -ENOIOCTLCMD) {
600 dev_err(dcmi->dev, "%s: Failed to start streaming, subdev streamon error",
601 __func__);
602 goto err_pm_put;
603 }
604
605 spin_lock_irq(&dcmi->irqlock);
606
607
608 switch (dcmi->bus.bus_width) {
609 case 14:
610 val |= CR_EDM_0 | CR_EDM_1;
611 break;
612 case 12:
613 val |= CR_EDM_1;
614 break;
615 case 10:
616 val |= CR_EDM_0;
617 break;
618 default:
619
620 break;
621 }
622
623
624 if (dcmi->bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
625 val |= CR_VSPOL;
626
627
628 if (dcmi->bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
629 val |= CR_HSPOL;
630
631
632 if (dcmi->bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
633 val |= CR_PCKPOL;
634
635 reg_write(dcmi->regs, DCMI_CR, val);
636
637
638 if (dcmi->do_crop)
639 dcmi_set_crop(dcmi);
640
641
642 if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG)
643 reg_set(dcmi->regs, DCMI_CR, CR_CM);
644
645
646 reg_set(dcmi->regs, DCMI_CR, CR_ENABLE);
647
648 dcmi->sequence = 0;
649 dcmi->errors_count = 0;
650 dcmi->overrun_count = 0;
651 dcmi->buffers_count = 0;
652
653
654
655
656
657 if (list_empty(&dcmi->buffers)) {
658 dev_dbg(dcmi->dev, "Start streaming is deferred to next buffer queueing\n");
659 dcmi->state = WAIT_FOR_BUFFER;
660 spin_unlock_irq(&dcmi->irqlock);
661 return 0;
662 }
663
664 buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
665 dcmi->active = buf;
666
667 dcmi->state = RUNNING;
668
669 dev_dbg(dcmi->dev, "Start streaming, starting capture\n");
670
671 spin_unlock_irq(&dcmi->irqlock);
672 ret = dcmi_start_capture(dcmi, buf);
673 if (ret) {
674 dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n",
675 __func__);
676 goto err_subdev_streamoff;
677 }
678
679
680 if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG)
681 reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
682 else
683 reg_set(dcmi->regs, DCMI_IER, IT_OVR | IT_ERR);
684
685 return 0;
686
687err_subdev_streamoff:
688 v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
689
690err_pm_put:
691 pm_runtime_put(dcmi->dev);
692
693err_release_buffers:
694 spin_lock_irq(&dcmi->irqlock);
695
696
697
698
699 list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
700 list_del_init(&buf->list);
701 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
702 }
703 dcmi->active = NULL;
704 spin_unlock_irq(&dcmi->irqlock);
705
706 return ret;
707}
708
709static void dcmi_stop_streaming(struct vb2_queue *vq)
710{
711 struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
712 struct dcmi_buf *buf, *node;
713 int ret;
714
715
716 ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
717 if (ret && ret != -ENOIOCTLCMD)
718 dev_err(dcmi->dev, "%s: Failed to stop streaming, subdev streamoff error (%d)\n",
719 __func__, ret);
720
721 spin_lock_irq(&dcmi->irqlock);
722
723
724 reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
725
726
727 reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE);
728
729
730 list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
731 list_del_init(&buf->list);
732 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
733 }
734
735 dcmi->active = NULL;
736 dcmi->state = STOPPED;
737
738 spin_unlock_irq(&dcmi->irqlock);
739
740
741 mutex_lock(&dcmi->dma_lock);
742 dmaengine_terminate_all(dcmi->dma_chan);
743 mutex_unlock(&dcmi->dma_lock);
744
745 pm_runtime_put(dcmi->dev);
746
747 if (dcmi->errors_count)
748 dev_warn(dcmi->dev, "Some errors found while streaming: errors=%d (overrun=%d), buffers=%d\n",
749 dcmi->errors_count, dcmi->overrun_count,
750 dcmi->buffers_count);
751 dev_dbg(dcmi->dev, "Stop streaming, errors=%d (overrun=%d), buffers=%d\n",
752 dcmi->errors_count, dcmi->overrun_count,
753 dcmi->buffers_count);
754}
755
756static const struct vb2_ops dcmi_video_qops = {
757 .queue_setup = dcmi_queue_setup,
758 .buf_init = dcmi_buf_init,
759 .buf_prepare = dcmi_buf_prepare,
760 .buf_queue = dcmi_buf_queue,
761 .start_streaming = dcmi_start_streaming,
762 .stop_streaming = dcmi_stop_streaming,
763 .wait_prepare = vb2_ops_wait_prepare,
764 .wait_finish = vb2_ops_wait_finish,
765};
766
767static int dcmi_g_fmt_vid_cap(struct file *file, void *priv,
768 struct v4l2_format *fmt)
769{
770 struct stm32_dcmi *dcmi = video_drvdata(file);
771
772 *fmt = dcmi->fmt;
773
774 return 0;
775}
776
777static const struct dcmi_format *find_format_by_fourcc(struct stm32_dcmi *dcmi,
778 unsigned int fourcc)
779{
780 unsigned int num_formats = dcmi->num_of_sd_formats;
781 const struct dcmi_format *fmt;
782 unsigned int i;
783
784 for (i = 0; i < num_formats; i++) {
785 fmt = dcmi->sd_formats[i];
786 if (fmt->fourcc == fourcc)
787 return fmt;
788 }
789
790 return NULL;
791}
792
793static void __find_outer_frame_size(struct stm32_dcmi *dcmi,
794 struct v4l2_pix_format *pix,
795 struct dcmi_framesize *framesize)
796{
797 struct dcmi_framesize *match = NULL;
798 unsigned int i;
799 unsigned int min_err = UINT_MAX;
800
801 for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
802 struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
803 int w_err = (fsize->width - pix->width);
804 int h_err = (fsize->height - pix->height);
805 int err = w_err + h_err;
806
807 if (w_err >= 0 && h_err >= 0 && err < min_err) {
808 min_err = err;
809 match = fsize;
810 }
811 }
812 if (!match)
813 match = &dcmi->sd_framesizes[0];
814
815 *framesize = *match;
816}
817
818static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
819 const struct dcmi_format **sd_format,
820 struct dcmi_framesize *sd_framesize)
821{
822 const struct dcmi_format *sd_fmt;
823 struct dcmi_framesize sd_fsize;
824 struct v4l2_pix_format *pix = &f->fmt.pix;
825 struct v4l2_subdev_pad_config pad_cfg;
826 struct v4l2_subdev_format format = {
827 .which = V4L2_SUBDEV_FORMAT_TRY,
828 };
829 bool do_crop;
830 int ret;
831
832 sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
833 if (!sd_fmt) {
834 if (!dcmi->num_of_sd_formats)
835 return -ENODATA;
836
837 sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
838 pix->pixelformat = sd_fmt->fourcc;
839 }
840
841
842 pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
843 pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
844
845
846 do_crop = dcmi->do_crop && (pix->pixelformat != V4L2_PIX_FMT_JPEG);
847
848 if (do_crop && dcmi->num_of_sd_framesizes) {
849 struct dcmi_framesize outer_sd_fsize;
850
851
852
853
854 __find_outer_frame_size(dcmi, pix, &outer_sd_fsize);
855 pix->width = outer_sd_fsize.width;
856 pix->height = outer_sd_fsize.height;
857 }
858
859 v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
860 ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
861 &pad_cfg, &format);
862 if (ret < 0)
863 return ret;
864
865
866 v4l2_fill_pix_format(pix, &format.format);
867
868
869 sd_fsize.width = pix->width;
870 sd_fsize.height = pix->height;
871
872 if (do_crop) {
873 struct v4l2_rect c = dcmi->crop;
874 struct v4l2_rect max_rect;
875
876
877
878
879
880 max_rect.top = 0;
881 max_rect.left = 0;
882 max_rect.width = pix->width;
883 max_rect.height = pix->height;
884 v4l2_rect_map_inside(&c, &max_rect);
885 c.top = clamp_t(s32, c.top, 0, pix->height - c.height);
886 c.left = clamp_t(s32, c.left, 0, pix->width - c.width);
887 dcmi->crop = c;
888
889
890 pix->width = dcmi->crop.width;
891 pix->height = dcmi->crop.height;
892 }
893
894 pix->field = V4L2_FIELD_NONE;
895 pix->bytesperline = pix->width * sd_fmt->bpp;
896 pix->sizeimage = pix->bytesperline * pix->height;
897
898 if (sd_format)
899 *sd_format = sd_fmt;
900 if (sd_framesize)
901 *sd_framesize = sd_fsize;
902
903 return 0;
904}
905
906static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f)
907{
908 struct v4l2_subdev_format format = {
909 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
910 };
911 const struct dcmi_format *sd_format;
912 struct dcmi_framesize sd_framesize;
913 struct v4l2_mbus_framefmt *mf = &format.format;
914 struct v4l2_pix_format *pix = &f->fmt.pix;
915 int ret;
916
917
918
919
920
921
922
923 ret = dcmi_try_fmt(dcmi, f, &sd_format, &sd_framesize);
924 if (ret)
925 return ret;
926
927
928 if (pix->pixelformat == V4L2_PIX_FMT_JPEG)
929 dcmi->do_crop = false;
930
931
932 v4l2_fill_mbus_format(mf, pix,
933 sd_format->mbus_code);
934 mf->width = sd_framesize.width;
935 mf->height = sd_framesize.height;
936
937 ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
938 set_fmt, NULL, &format);
939 if (ret < 0)
940 return ret;
941
942 dev_dbg(dcmi->dev, "Sensor format set to 0x%x %ux%u\n",
943 mf->code, mf->width, mf->height);
944 dev_dbg(dcmi->dev, "Buffer format set to %4.4s %ux%u\n",
945 (char *)&pix->pixelformat,
946 pix->width, pix->height);
947
948 dcmi->fmt = *f;
949 dcmi->sd_format = sd_format;
950 dcmi->sd_framesize = sd_framesize;
951
952 return 0;
953}
954
955static int dcmi_s_fmt_vid_cap(struct file *file, void *priv,
956 struct v4l2_format *f)
957{
958 struct stm32_dcmi *dcmi = video_drvdata(file);
959
960 if (vb2_is_streaming(&dcmi->queue))
961 return -EBUSY;
962
963 return dcmi_set_fmt(dcmi, f);
964}
965
966static int dcmi_try_fmt_vid_cap(struct file *file, void *priv,
967 struct v4l2_format *f)
968{
969 struct stm32_dcmi *dcmi = video_drvdata(file);
970
971 return dcmi_try_fmt(dcmi, f, NULL, NULL);
972}
973
974static int dcmi_enum_fmt_vid_cap(struct file *file, void *priv,
975 struct v4l2_fmtdesc *f)
976{
977 struct stm32_dcmi *dcmi = video_drvdata(file);
978
979 if (f->index >= dcmi->num_of_sd_formats)
980 return -EINVAL;
981
982 f->pixelformat = dcmi->sd_formats[f->index]->fourcc;
983 return 0;
984}
985
986static int dcmi_get_sensor_format(struct stm32_dcmi *dcmi,
987 struct v4l2_pix_format *pix)
988{
989 struct v4l2_subdev_format fmt = {
990 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
991 };
992 int ret;
993
994 ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_fmt, NULL, &fmt);
995 if (ret)
996 return ret;
997
998 v4l2_fill_pix_format(pix, &fmt.format);
999
1000 return 0;
1001}
1002
1003static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
1004 struct v4l2_pix_format *pix)
1005{
1006 const struct dcmi_format *sd_fmt;
1007 struct v4l2_subdev_format format = {
1008 .which = V4L2_SUBDEV_FORMAT_TRY,
1009 };
1010 struct v4l2_subdev_pad_config pad_cfg;
1011 int ret;
1012
1013 sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
1014 if (!sd_fmt) {
1015 if (!dcmi->num_of_sd_formats)
1016 return -ENODATA;
1017
1018 sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
1019 pix->pixelformat = sd_fmt->fourcc;
1020 }
1021
1022 v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
1023 ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
1024 &pad_cfg, &format);
1025 if (ret < 0)
1026 return ret;
1027
1028 return 0;
1029}
1030
1031static int dcmi_get_sensor_bounds(struct stm32_dcmi *dcmi,
1032 struct v4l2_rect *r)
1033{
1034 struct v4l2_subdev_selection bounds = {
1035 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1036 .target = V4L2_SEL_TGT_CROP_BOUNDS,
1037 };
1038 unsigned int max_width, max_height, max_pixsize;
1039 struct v4l2_pix_format pix;
1040 unsigned int i;
1041 int ret;
1042
1043
1044
1045
1046 ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_selection,
1047 NULL, &bounds);
1048 if (!ret)
1049 *r = bounds.r;
1050 if (ret != -ENOIOCTLCMD)
1051 return ret;
1052
1053
1054
1055
1056
1057
1058 max_width = 0;
1059 max_height = 0;
1060 max_pixsize = 0;
1061 for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
1062 struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
1063 unsigned int pixsize = fsize->width * fsize->height;
1064
1065 if (pixsize > max_pixsize) {
1066 max_pixsize = pixsize;
1067 max_width = fsize->width;
1068 max_height = fsize->height;
1069 }
1070 }
1071 if (max_pixsize > 0) {
1072 r->top = 0;
1073 r->left = 0;
1074 r->width = max_width;
1075 r->height = max_height;
1076 return 0;
1077 }
1078
1079
1080
1081
1082
1083 ret = dcmi_get_sensor_format(dcmi, &pix);
1084 if (ret)
1085 return ret;
1086
1087 r->top = 0;
1088 r->left = 0;
1089 r->width = pix.width;
1090 r->height = pix.height;
1091
1092 return 0;
1093}
1094
1095static int dcmi_g_selection(struct file *file, void *fh,
1096 struct v4l2_selection *s)
1097{
1098 struct stm32_dcmi *dcmi = video_drvdata(file);
1099
1100 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1101 return -EINVAL;
1102
1103 switch (s->target) {
1104 case V4L2_SEL_TGT_CROP_DEFAULT:
1105 case V4L2_SEL_TGT_CROP_BOUNDS:
1106 s->r = dcmi->sd_bounds;
1107 return 0;
1108 case V4L2_SEL_TGT_CROP:
1109 if (dcmi->do_crop) {
1110 s->r = dcmi->crop;
1111 } else {
1112 s->r.top = 0;
1113 s->r.left = 0;
1114 s->r.width = dcmi->fmt.fmt.pix.width;
1115 s->r.height = dcmi->fmt.fmt.pix.height;
1116 }
1117 break;
1118 default:
1119 return -EINVAL;
1120 }
1121
1122 return 0;
1123}
1124
1125static int dcmi_s_selection(struct file *file, void *priv,
1126 struct v4l2_selection *s)
1127{
1128 struct stm32_dcmi *dcmi = video_drvdata(file);
1129 struct v4l2_rect r = s->r;
1130 struct v4l2_rect max_rect;
1131 struct v4l2_pix_format pix;
1132
1133 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1134 s->target != V4L2_SEL_TGT_CROP)
1135 return -EINVAL;
1136
1137
1138 pix.pixelformat = dcmi->fmt.fmt.pix.pixelformat;
1139 pix.width = dcmi->sd_bounds.width;
1140 pix.height = dcmi->sd_bounds.height;
1141 dcmi_set_sensor_format(dcmi, &pix);
1142
1143
1144
1145
1146
1147
1148 max_rect.top = 0;
1149 max_rect.left = 0;
1150 max_rect.width = pix.width;
1151 max_rect.height = pix.height;
1152 v4l2_rect_map_inside(&r, &max_rect);
1153 r.top = clamp_t(s32, r.top, 0, pix.height - r.height);
1154 r.left = clamp_t(s32, r.left, 0, pix.width - r.width);
1155
1156 if (!(r.top == dcmi->sd_bounds.top &&
1157 r.left == dcmi->sd_bounds.left &&
1158 r.width == dcmi->sd_bounds.width &&
1159 r.height == dcmi->sd_bounds.height)) {
1160
1161 dcmi->do_crop = true;
1162 dcmi->crop = r;
1163 dev_dbg(dcmi->dev, "s_selection: crop %ux%u@(%u,%u) from %ux%u\n",
1164 r.width, r.height, r.left, r.top,
1165 pix.width, pix.height);
1166 } else {
1167
1168 dcmi->do_crop = false;
1169 dev_dbg(dcmi->dev, "s_selection: crop is disabled\n");
1170 }
1171
1172 s->r = r;
1173 return 0;
1174}
1175
1176static int dcmi_querycap(struct file *file, void *priv,
1177 struct v4l2_capability *cap)
1178{
1179 strscpy(cap->driver, DRV_NAME, sizeof(cap->driver));
1180 strscpy(cap->card, "STM32 Camera Memory Interface",
1181 sizeof(cap->card));
1182 strscpy(cap->bus_info, "platform:dcmi", sizeof(cap->bus_info));
1183 return 0;
1184}
1185
1186static int dcmi_enum_input(struct file *file, void *priv,
1187 struct v4l2_input *i)
1188{
1189 if (i->index != 0)
1190 return -EINVAL;
1191
1192 i->type = V4L2_INPUT_TYPE_CAMERA;
1193 strscpy(i->name, "Camera", sizeof(i->name));
1194 return 0;
1195}
1196
1197static int dcmi_g_input(struct file *file, void *priv, unsigned int *i)
1198{
1199 *i = 0;
1200 return 0;
1201}
1202
1203static int dcmi_s_input(struct file *file, void *priv, unsigned int i)
1204{
1205 if (i > 0)
1206 return -EINVAL;
1207 return 0;
1208}
1209
1210static int dcmi_enum_framesizes(struct file *file, void *fh,
1211 struct v4l2_frmsizeenum *fsize)
1212{
1213 struct stm32_dcmi *dcmi = video_drvdata(file);
1214 const struct dcmi_format *sd_fmt;
1215 struct v4l2_subdev_frame_size_enum fse = {
1216 .index = fsize->index,
1217 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1218 };
1219 int ret;
1220
1221 sd_fmt = find_format_by_fourcc(dcmi, fsize->pixel_format);
1222 if (!sd_fmt)
1223 return -EINVAL;
1224
1225 fse.code = sd_fmt->mbus_code;
1226
1227 ret = v4l2_subdev_call(dcmi->entity.subdev, pad, enum_frame_size,
1228 NULL, &fse);
1229 if (ret)
1230 return ret;
1231
1232 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1233 fsize->discrete.width = fse.max_width;
1234 fsize->discrete.height = fse.max_height;
1235
1236 return 0;
1237}
1238
1239static int dcmi_g_parm(struct file *file, void *priv,
1240 struct v4l2_streamparm *p)
1241{
1242 struct stm32_dcmi *dcmi = video_drvdata(file);
1243
1244 return v4l2_g_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
1245}
1246
1247static int dcmi_s_parm(struct file *file, void *priv,
1248 struct v4l2_streamparm *p)
1249{
1250 struct stm32_dcmi *dcmi = video_drvdata(file);
1251
1252 return v4l2_s_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
1253}
1254
1255static int dcmi_enum_frameintervals(struct file *file, void *fh,
1256 struct v4l2_frmivalenum *fival)
1257{
1258 struct stm32_dcmi *dcmi = video_drvdata(file);
1259 const struct dcmi_format *sd_fmt;
1260 struct v4l2_subdev_frame_interval_enum fie = {
1261 .index = fival->index,
1262 .width = fival->width,
1263 .height = fival->height,
1264 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1265 };
1266 int ret;
1267
1268 sd_fmt = find_format_by_fourcc(dcmi, fival->pixel_format);
1269 if (!sd_fmt)
1270 return -EINVAL;
1271
1272 fie.code = sd_fmt->mbus_code;
1273
1274 ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
1275 enum_frame_interval, NULL, &fie);
1276 if (ret)
1277 return ret;
1278
1279 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1280 fival->discrete = fie.interval;
1281
1282 return 0;
1283}
1284
1285static const struct of_device_id stm32_dcmi_of_match[] = {
1286 { .compatible = "st,stm32-dcmi"},
1287 { },
1288};
1289MODULE_DEVICE_TABLE(of, stm32_dcmi_of_match);
1290
1291static int dcmi_open(struct file *file)
1292{
1293 struct stm32_dcmi *dcmi = video_drvdata(file);
1294 struct v4l2_subdev *sd = dcmi->entity.subdev;
1295 int ret;
1296
1297 if (mutex_lock_interruptible(&dcmi->lock))
1298 return -ERESTARTSYS;
1299
1300 ret = v4l2_fh_open(file);
1301 if (ret < 0)
1302 goto unlock;
1303
1304 if (!v4l2_fh_is_singular_file(file))
1305 goto fh_rel;
1306
1307 ret = v4l2_subdev_call(sd, core, s_power, 1);
1308 if (ret < 0 && ret != -ENOIOCTLCMD)
1309 goto fh_rel;
1310
1311 ret = dcmi_set_fmt(dcmi, &dcmi->fmt);
1312 if (ret)
1313 v4l2_subdev_call(sd, core, s_power, 0);
1314fh_rel:
1315 if (ret)
1316 v4l2_fh_release(file);
1317unlock:
1318 mutex_unlock(&dcmi->lock);
1319 return ret;
1320}
1321
1322static int dcmi_release(struct file *file)
1323{
1324 struct stm32_dcmi *dcmi = video_drvdata(file);
1325 struct v4l2_subdev *sd = dcmi->entity.subdev;
1326 bool fh_singular;
1327 int ret;
1328
1329 mutex_lock(&dcmi->lock);
1330
1331 fh_singular = v4l2_fh_is_singular_file(file);
1332
1333 ret = _vb2_fop_release(file, NULL);
1334
1335 if (fh_singular)
1336 v4l2_subdev_call(sd, core, s_power, 0);
1337
1338 mutex_unlock(&dcmi->lock);
1339
1340 return ret;
1341}
1342
1343static const struct v4l2_ioctl_ops dcmi_ioctl_ops = {
1344 .vidioc_querycap = dcmi_querycap,
1345
1346 .vidioc_try_fmt_vid_cap = dcmi_try_fmt_vid_cap,
1347 .vidioc_g_fmt_vid_cap = dcmi_g_fmt_vid_cap,
1348 .vidioc_s_fmt_vid_cap = dcmi_s_fmt_vid_cap,
1349 .vidioc_enum_fmt_vid_cap = dcmi_enum_fmt_vid_cap,
1350 .vidioc_g_selection = dcmi_g_selection,
1351 .vidioc_s_selection = dcmi_s_selection,
1352
1353 .vidioc_enum_input = dcmi_enum_input,
1354 .vidioc_g_input = dcmi_g_input,
1355 .vidioc_s_input = dcmi_s_input,
1356
1357 .vidioc_g_parm = dcmi_g_parm,
1358 .vidioc_s_parm = dcmi_s_parm,
1359
1360 .vidioc_enum_framesizes = dcmi_enum_framesizes,
1361 .vidioc_enum_frameintervals = dcmi_enum_frameintervals,
1362
1363 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1364 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1365 .vidioc_querybuf = vb2_ioctl_querybuf,
1366 .vidioc_qbuf = vb2_ioctl_qbuf,
1367 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1368 .vidioc_expbuf = vb2_ioctl_expbuf,
1369 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1370 .vidioc_streamon = vb2_ioctl_streamon,
1371 .vidioc_streamoff = vb2_ioctl_streamoff,
1372
1373 .vidioc_log_status = v4l2_ctrl_log_status,
1374 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1375 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1376};
1377
1378static const struct v4l2_file_operations dcmi_fops = {
1379 .owner = THIS_MODULE,
1380 .unlocked_ioctl = video_ioctl2,
1381 .open = dcmi_open,
1382 .release = dcmi_release,
1383 .poll = vb2_fop_poll,
1384 .mmap = vb2_fop_mmap,
1385#ifndef CONFIG_MMU
1386 .get_unmapped_area = vb2_fop_get_unmapped_area,
1387#endif
1388 .read = vb2_fop_read,
1389};
1390
1391static int dcmi_set_default_fmt(struct stm32_dcmi *dcmi)
1392{
1393 struct v4l2_format f = {
1394 .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
1395 .fmt.pix = {
1396 .width = CIF_WIDTH,
1397 .height = CIF_HEIGHT,
1398 .field = V4L2_FIELD_NONE,
1399 .pixelformat = dcmi->sd_formats[0]->fourcc,
1400 },
1401 };
1402 int ret;
1403
1404 ret = dcmi_try_fmt(dcmi, &f, NULL, NULL);
1405 if (ret)
1406 return ret;
1407 dcmi->sd_format = dcmi->sd_formats[0];
1408 dcmi->fmt = f;
1409 return 0;
1410}
1411
1412static const struct dcmi_format dcmi_formats[] = {
1413 {
1414 .fourcc = V4L2_PIX_FMT_RGB565,
1415 .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
1416 .bpp = 2,
1417 }, {
1418 .fourcc = V4L2_PIX_FMT_YUYV,
1419 .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
1420 .bpp = 2,
1421 }, {
1422 .fourcc = V4L2_PIX_FMT_UYVY,
1423 .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
1424 .bpp = 2,
1425 }, {
1426 .fourcc = V4L2_PIX_FMT_JPEG,
1427 .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
1428 .bpp = 1,
1429 },
1430};
1431
1432static int dcmi_formats_init(struct stm32_dcmi *dcmi)
1433{
1434 const struct dcmi_format *sd_fmts[ARRAY_SIZE(dcmi_formats)];
1435 unsigned int num_fmts = 0, i, j;
1436 struct v4l2_subdev *subdev = dcmi->entity.subdev;
1437 struct v4l2_subdev_mbus_code_enum mbus_code = {
1438 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1439 };
1440
1441 while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
1442 NULL, &mbus_code)) {
1443 for (i = 0; i < ARRAY_SIZE(dcmi_formats); i++) {
1444 if (dcmi_formats[i].mbus_code != mbus_code.code)
1445 continue;
1446
1447
1448 for (j = 0; j < num_fmts; j++)
1449 if (sd_fmts[j]->fourcc ==
1450 dcmi_formats[i].fourcc)
1451
1452 break;
1453 if (j == num_fmts)
1454
1455 sd_fmts[num_fmts++] = dcmi_formats + i;
1456 }
1457 mbus_code.index++;
1458 }
1459
1460 if (!num_fmts)
1461 return -ENXIO;
1462
1463 dcmi->num_of_sd_formats = num_fmts;
1464 dcmi->sd_formats = devm_kcalloc(dcmi->dev,
1465 num_fmts, sizeof(struct dcmi_format *),
1466 GFP_KERNEL);
1467 if (!dcmi->sd_formats) {
1468 dev_err(dcmi->dev, "Could not allocate memory\n");
1469 return -ENOMEM;
1470 }
1471
1472 memcpy(dcmi->sd_formats, sd_fmts,
1473 num_fmts * sizeof(struct dcmi_format *));
1474 dcmi->sd_format = dcmi->sd_formats[0];
1475
1476 return 0;
1477}
1478
1479static int dcmi_framesizes_init(struct stm32_dcmi *dcmi)
1480{
1481 unsigned int num_fsize = 0;
1482 struct v4l2_subdev *subdev = dcmi->entity.subdev;
1483 struct v4l2_subdev_frame_size_enum fse = {
1484 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
1485 .code = dcmi->sd_format->mbus_code,
1486 };
1487 unsigned int ret;
1488 unsigned int i;
1489
1490
1491 while (!v4l2_subdev_call(subdev, pad, enum_frame_size,
1492 NULL, &fse))
1493 fse.index++;
1494
1495 num_fsize = fse.index;
1496 if (!num_fsize)
1497 return 0;
1498
1499 dcmi->num_of_sd_framesizes = num_fsize;
1500 dcmi->sd_framesizes = devm_kcalloc(dcmi->dev, num_fsize,
1501 sizeof(struct dcmi_framesize),
1502 GFP_KERNEL);
1503 if (!dcmi->sd_framesizes) {
1504 dev_err(dcmi->dev, "Could not allocate memory\n");
1505 return -ENOMEM;
1506 }
1507
1508
1509 dev_dbg(dcmi->dev, "Sensor supports %u frame sizes:\n", num_fsize);
1510 for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
1511 fse.index = i;
1512 ret = v4l2_subdev_call(subdev, pad, enum_frame_size,
1513 NULL, &fse);
1514 if (ret)
1515 return ret;
1516 dcmi->sd_framesizes[fse.index].width = fse.max_width;
1517 dcmi->sd_framesizes[fse.index].height = fse.max_height;
1518 dev_dbg(dcmi->dev, "%ux%u\n", fse.max_width, fse.max_height);
1519 }
1520
1521 return 0;
1522}
1523
1524static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
1525{
1526 struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
1527 int ret;
1528
1529 dcmi->vdev->ctrl_handler = dcmi->entity.subdev->ctrl_handler;
1530 ret = dcmi_formats_init(dcmi);
1531 if (ret) {
1532 dev_err(dcmi->dev, "No supported mediabus format found\n");
1533 return ret;
1534 }
1535
1536 ret = dcmi_framesizes_init(dcmi);
1537 if (ret) {
1538 dev_err(dcmi->dev, "Could not initialize framesizes\n");
1539 return ret;
1540 }
1541
1542 ret = dcmi_get_sensor_bounds(dcmi, &dcmi->sd_bounds);
1543 if (ret) {
1544 dev_err(dcmi->dev, "Could not get sensor bounds\n");
1545 return ret;
1546 }
1547
1548 ret = dcmi_set_default_fmt(dcmi);
1549 if (ret) {
1550 dev_err(dcmi->dev, "Could not set default format\n");
1551 return ret;
1552 }
1553
1554 ret = video_register_device(dcmi->vdev, VFL_TYPE_GRABBER, -1);
1555 if (ret) {
1556 dev_err(dcmi->dev, "Failed to register video device\n");
1557 return ret;
1558 }
1559
1560 dev_dbg(dcmi->dev, "Device registered as %s\n",
1561 video_device_node_name(dcmi->vdev));
1562 return 0;
1563}
1564
1565static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
1566 struct v4l2_subdev *sd,
1567 struct v4l2_async_subdev *asd)
1568{
1569 struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
1570
1571 dev_dbg(dcmi->dev, "Removing %s\n", video_device_node_name(dcmi->vdev));
1572
1573
1574 video_unregister_device(dcmi->vdev);
1575}
1576
1577static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
1578 struct v4l2_subdev *subdev,
1579 struct v4l2_async_subdev *asd)
1580{
1581 struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
1582
1583 dev_dbg(dcmi->dev, "Subdev %s bound\n", subdev->name);
1584
1585 dcmi->entity.subdev = subdev;
1586
1587 return 0;
1588}
1589
1590static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
1591 .bound = dcmi_graph_notify_bound,
1592 .unbind = dcmi_graph_notify_unbind,
1593 .complete = dcmi_graph_notify_complete,
1594};
1595
1596static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
1597{
1598 struct device_node *ep = NULL;
1599 struct device_node *remote;
1600
1601 ep = of_graph_get_next_endpoint(node, ep);
1602 if (!ep)
1603 return -EINVAL;
1604
1605 remote = of_graph_get_remote_port_parent(ep);
1606 of_node_put(ep);
1607 if (!remote)
1608 return -EINVAL;
1609
1610
1611 dcmi->entity.node = remote;
1612 dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
1613 dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote);
1614 return 0;
1615}
1616
1617static int dcmi_graph_init(struct stm32_dcmi *dcmi)
1618{
1619 int ret;
1620
1621
1622 ret = dcmi_graph_parse(dcmi, dcmi->dev->of_node);
1623 if (ret < 0) {
1624 dev_err(dcmi->dev, "Failed to parse graph\n");
1625 return ret;
1626 }
1627
1628 v4l2_async_notifier_init(&dcmi->notifier);
1629
1630 ret = v4l2_async_notifier_add_subdev(&dcmi->notifier,
1631 &dcmi->entity.asd);
1632 if (ret) {
1633 dev_err(dcmi->dev, "Failed to add subdev notifier\n");
1634 of_node_put(dcmi->entity.node);
1635 return ret;
1636 }
1637
1638 dcmi->notifier.ops = &dcmi_graph_notify_ops;
1639
1640 ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
1641 if (ret < 0) {
1642 dev_err(dcmi->dev, "Failed to register notifier\n");
1643 v4l2_async_notifier_cleanup(&dcmi->notifier);
1644 return ret;
1645 }
1646
1647 return 0;
1648}
1649
1650static int dcmi_probe(struct platform_device *pdev)
1651{
1652 struct device_node *np = pdev->dev.of_node;
1653 const struct of_device_id *match = NULL;
1654 struct v4l2_fwnode_endpoint ep = { .bus_type = 0 };
1655 struct stm32_dcmi *dcmi;
1656 struct vb2_queue *q;
1657 struct dma_chan *chan;
1658 struct clk *mclk;
1659 int irq;
1660 int ret = 0;
1661
1662 match = of_match_device(of_match_ptr(stm32_dcmi_of_match), &pdev->dev);
1663 if (!match) {
1664 dev_err(&pdev->dev, "Could not find a match in devicetree\n");
1665 return -ENODEV;
1666 }
1667
1668 dcmi = devm_kzalloc(&pdev->dev, sizeof(struct stm32_dcmi), GFP_KERNEL);
1669 if (!dcmi)
1670 return -ENOMEM;
1671
1672 dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
1673 if (IS_ERR(dcmi->rstc)) {
1674 dev_err(&pdev->dev, "Could not get reset control\n");
1675 return PTR_ERR(dcmi->rstc);
1676 }
1677
1678
1679 np = of_graph_get_next_endpoint(np, NULL);
1680 if (!np) {
1681 dev_err(&pdev->dev, "Could not find the endpoint\n");
1682 of_node_put(np);
1683 return -ENODEV;
1684 }
1685
1686 ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
1687 of_node_put(np);
1688 if (ret) {
1689 dev_err(&pdev->dev, "Could not parse the endpoint\n");
1690 return ret;
1691 }
1692
1693 if (ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
1694 dev_err(&pdev->dev, "CSI bus not supported\n");
1695 return -ENODEV;
1696 }
1697 dcmi->bus.flags = ep.bus.parallel.flags;
1698 dcmi->bus.bus_width = ep.bus.parallel.bus_width;
1699 dcmi->bus.data_shift = ep.bus.parallel.data_shift;
1700
1701 irq = platform_get_irq(pdev, 0);
1702 if (irq <= 0) {
1703 if (irq != -EPROBE_DEFER)
1704 dev_err(&pdev->dev, "Could not get irq\n");
1705 return irq ? irq : -ENXIO;
1706 }
1707
1708 dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1709 if (!dcmi->res) {
1710 dev_err(&pdev->dev, "Could not get resource\n");
1711 return -ENODEV;
1712 }
1713
1714 dcmi->regs = devm_ioremap_resource(&pdev->dev, dcmi->res);
1715 if (IS_ERR(dcmi->regs)) {
1716 dev_err(&pdev->dev, "Could not map registers\n");
1717 return PTR_ERR(dcmi->regs);
1718 }
1719
1720 ret = devm_request_threaded_irq(&pdev->dev, irq, dcmi_irq_callback,
1721 dcmi_irq_thread, IRQF_ONESHOT,
1722 dev_name(&pdev->dev), dcmi);
1723 if (ret) {
1724 dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
1725 return ret;
1726 }
1727
1728 mclk = devm_clk_get(&pdev->dev, "mclk");
1729 if (IS_ERR(mclk)) {
1730 if (PTR_ERR(mclk) != -EPROBE_DEFER)
1731 dev_err(&pdev->dev, "Unable to get mclk\n");
1732 return PTR_ERR(mclk);
1733 }
1734
1735 chan = dma_request_slave_channel(&pdev->dev, "tx");
1736 if (!chan) {
1737 dev_info(&pdev->dev, "Unable to request DMA channel, defer probing\n");
1738 return -EPROBE_DEFER;
1739 }
1740
1741 spin_lock_init(&dcmi->irqlock);
1742 mutex_init(&dcmi->lock);
1743 mutex_init(&dcmi->dma_lock);
1744 init_completion(&dcmi->complete);
1745 INIT_LIST_HEAD(&dcmi->buffers);
1746
1747 dcmi->dev = &pdev->dev;
1748 dcmi->mclk = mclk;
1749 dcmi->state = STOPPED;
1750 dcmi->dma_chan = chan;
1751
1752 q = &dcmi->queue;
1753
1754
1755 ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev);
1756 if (ret)
1757 goto err_dma_release;
1758
1759 dcmi->vdev = video_device_alloc();
1760 if (!dcmi->vdev) {
1761 ret = -ENOMEM;
1762 goto err_device_unregister;
1763 }
1764
1765
1766 dcmi->vdev->fops = &dcmi_fops;
1767 dcmi->vdev->v4l2_dev = &dcmi->v4l2_dev;
1768 dcmi->vdev->queue = &dcmi->queue;
1769 strscpy(dcmi->vdev->name, KBUILD_MODNAME, sizeof(dcmi->vdev->name));
1770 dcmi->vdev->release = video_device_release;
1771 dcmi->vdev->ioctl_ops = &dcmi_ioctl_ops;
1772 dcmi->vdev->lock = &dcmi->lock;
1773 dcmi->vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
1774 V4L2_CAP_READWRITE;
1775 video_set_drvdata(dcmi->vdev, dcmi);
1776
1777
1778 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1779 q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
1780 q->lock = &dcmi->lock;
1781 q->drv_priv = dcmi;
1782 q->buf_struct_size = sizeof(struct dcmi_buf);
1783 q->ops = &dcmi_video_qops;
1784 q->mem_ops = &vb2_dma_contig_memops;
1785 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1786 q->min_buffers_needed = 2;
1787 q->dev = &pdev->dev;
1788
1789 ret = vb2_queue_init(q);
1790 if (ret < 0) {
1791 dev_err(&pdev->dev, "Failed to initialize vb2 queue\n");
1792 goto err_device_release;
1793 }
1794
1795 ret = dcmi_graph_init(dcmi);
1796 if (ret < 0)
1797 goto err_device_release;
1798
1799
1800 ret = reset_control_assert(dcmi->rstc);
1801 if (ret) {
1802 dev_err(&pdev->dev, "Failed to assert the reset line\n");
1803 goto err_cleanup;
1804 }
1805
1806 usleep_range(3000, 5000);
1807
1808 ret = reset_control_deassert(dcmi->rstc);
1809 if (ret) {
1810 dev_err(&pdev->dev, "Failed to deassert the reset line\n");
1811 goto err_cleanup;
1812 }
1813
1814 dev_info(&pdev->dev, "Probe done\n");
1815
1816 platform_set_drvdata(pdev, dcmi);
1817
1818 pm_runtime_enable(&pdev->dev);
1819
1820 return 0;
1821
1822err_cleanup:
1823 v4l2_async_notifier_cleanup(&dcmi->notifier);
1824err_device_release:
1825 video_device_release(dcmi->vdev);
1826err_device_unregister:
1827 v4l2_device_unregister(&dcmi->v4l2_dev);
1828err_dma_release:
1829 dma_release_channel(dcmi->dma_chan);
1830
1831 return ret;
1832}
1833
1834static int dcmi_remove(struct platform_device *pdev)
1835{
1836 struct stm32_dcmi *dcmi = platform_get_drvdata(pdev);
1837
1838 pm_runtime_disable(&pdev->dev);
1839
1840 v4l2_async_notifier_unregister(&dcmi->notifier);
1841 v4l2_async_notifier_cleanup(&dcmi->notifier);
1842 v4l2_device_unregister(&dcmi->v4l2_dev);
1843
1844 dma_release_channel(dcmi->dma_chan);
1845
1846 return 0;
1847}
1848
1849static __maybe_unused int dcmi_runtime_suspend(struct device *dev)
1850{
1851 struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
1852
1853 clk_disable_unprepare(dcmi->mclk);
1854
1855 return 0;
1856}
1857
1858static __maybe_unused int dcmi_runtime_resume(struct device *dev)
1859{
1860 struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
1861 int ret;
1862
1863 ret = clk_prepare_enable(dcmi->mclk);
1864 if (ret)
1865 dev_err(dev, "%s: Failed to prepare_enable clock\n", __func__);
1866
1867 return ret;
1868}
1869
1870static __maybe_unused int dcmi_suspend(struct device *dev)
1871{
1872
1873 pm_runtime_force_suspend(dev);
1874
1875
1876 pinctrl_pm_select_sleep_state(dev);
1877
1878 return 0;
1879}
1880
1881static __maybe_unused int dcmi_resume(struct device *dev)
1882{
1883
1884 pinctrl_pm_select_default_state(dev);
1885
1886
1887 pm_runtime_force_resume(dev);
1888
1889 return 0;
1890}
1891
1892static const struct dev_pm_ops dcmi_pm_ops = {
1893 SET_SYSTEM_SLEEP_PM_OPS(dcmi_suspend, dcmi_resume)
1894 SET_RUNTIME_PM_OPS(dcmi_runtime_suspend,
1895 dcmi_runtime_resume, NULL)
1896};
1897
1898static struct platform_driver stm32_dcmi_driver = {
1899 .probe = dcmi_probe,
1900 .remove = dcmi_remove,
1901 .driver = {
1902 .name = DRV_NAME,
1903 .of_match_table = of_match_ptr(stm32_dcmi_of_match),
1904 .pm = &dcmi_pm_ops,
1905 },
1906};
1907
1908module_platform_driver(stm32_dcmi_driver);
1909
1910MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
1911MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
1912MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");
1913MODULE_LICENSE("GPL");
1914MODULE_SUPPORTED_DEVICE("video");
1915