linux/drivers/staging/media/ipu3/ipu3.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2017 - 2018 Intel Corporation
   4 * Copyright 2017 Google LLC
   5 *
   6 * Based on Intel IPU4 driver.
   7 *
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/interrupt.h>
  12#include <linux/module.h>
  13#include <linux/pm_runtime.h>
  14
  15#include "ipu3.h"
  16#include "ipu3-dmamap.h"
  17#include "ipu3-mmu.h"
  18
  19#define IMGU_PCI_ID                     0x1919
  20#define IMGU_PCI_BAR                    0
  21#define IMGU_DMA_MASK                   DMA_BIT_MASK(39)
  22#define IMGU_MAX_QUEUE_DEPTH            (2 + 2)
  23
  24/*
  25 * pre-allocated buffer size for IMGU dummy buffers. Those
  26 * values should be tuned to big enough to avoid buffer
  27 * re-allocation when streaming to lower streaming latency.
  28 */
  29#define CSS_QUEUE_IN_BUF_SIZE           0
  30#define CSS_QUEUE_PARAMS_BUF_SIZE       0
  31#define CSS_QUEUE_OUT_BUF_SIZE          (4160 * 3120 * 12 / 8)
  32#define CSS_QUEUE_VF_BUF_SIZE           (1920 * 1080 * 12 / 8)
  33#define CSS_QUEUE_STAT_3A_BUF_SIZE      sizeof(struct ipu3_uapi_stats_3a)
  34
  35static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = {
  36        [IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE,
  37        [IPU3_CSS_QUEUE_PARAMS] = CSS_QUEUE_PARAMS_BUF_SIZE,
  38        [IPU3_CSS_QUEUE_OUT] = CSS_QUEUE_OUT_BUF_SIZE,
  39        [IPU3_CSS_QUEUE_VF] = CSS_QUEUE_VF_BUF_SIZE,
  40        [IPU3_CSS_QUEUE_STAT_3A] = CSS_QUEUE_STAT_3A_BUF_SIZE,
  41};
  42
  43static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = {
  44        [IMGU_NODE_IN] = {IPU3_CSS_QUEUE_IN, "input"},
  45        [IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters"},
  46        [IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output"},
  47        [IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder"},
  48        [IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat"},
  49};
  50
  51unsigned int imgu_node_to_queue(unsigned int node)
  52{
  53        return imgu_node_map[node].css_queue;
  54}
  55
  56unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue)
  57{
  58        unsigned int i;
  59
  60        for (i = 0; i < IMGU_NODE_NUM; i++)
  61                if (imgu_node_map[i].css_queue == css_queue)
  62                        break;
  63
  64        return i;
  65}
  66
  67/**************** Dummy buffers ****************/
  68
  69static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe)
  70{
  71        unsigned int i;
  72        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
  73
  74        for (i = 0; i < IPU3_CSS_QUEUES; i++)
  75                imgu_dmamap_free(imgu,
  76                                 &imgu_pipe->queues[i].dmap);
  77}
  78
  79static int imgu_dummybufs_preallocate(struct imgu_device *imgu,
  80                                      unsigned int pipe)
  81{
  82        unsigned int i;
  83        size_t size;
  84        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
  85
  86        for (i = 0; i < IPU3_CSS_QUEUES; i++) {
  87                size = css_queue_buf_size_map[i];
  88                /*
  89                 * Do not enable dummy buffers for master queue,
  90                 * always require that real buffers from user are
  91                 * available.
  92                 */
  93                if (i == IMGU_QUEUE_MASTER || size == 0)
  94                        continue;
  95
  96                if (!imgu_dmamap_alloc(imgu,
  97                                       &imgu_pipe->queues[i].dmap, size)) {
  98                        imgu_dummybufs_cleanup(imgu, pipe);
  99                        return -ENOMEM;
 100                }
 101        }
 102
 103        return 0;
 104}
 105
 106static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
 107{
 108        const struct v4l2_pix_format_mplane *mpix;
 109        const struct v4l2_meta_format   *meta;
 110        unsigned int i, k, node;
 111        size_t size;
 112        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
 113
 114        /* Allocate a dummy buffer for each queue where buffer is optional */
 115        for (i = 0; i < IPU3_CSS_QUEUES; i++) {
 116                node = imgu_map_node(imgu, i);
 117                if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
 118                        continue;
 119
 120                if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled &&
 121                    i == IPU3_CSS_QUEUE_VF)
 122                        /*
 123                         * Do not enable dummy buffers for VF if it is not
 124                         * requested by the user.
 125                         */
 126                        continue;
 127
 128                meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta;
 129                mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp;
 130
 131                if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
 132                        size = meta->buffersize;
 133                else
 134                        size = mpix->plane_fmt[0].sizeimage;
 135
 136                if (imgu_css_dma_buffer_resize(imgu,
 137                                               &imgu_pipe->queues[i].dmap,
 138                                               size)) {
 139                        imgu_dummybufs_cleanup(imgu, pipe);
 140                        return -ENOMEM;
 141                }
 142
 143                for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++)
 144                        imgu_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
 145                                          imgu_pipe->queues[i].dmap.daddr);
 146        }
 147
 148        return 0;
 149}
 150
 151/* May be called from atomic context */
 152static struct imgu_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
 153                                                   int queue, unsigned int pipe)
 154{
 155        unsigned int i;
 156        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
 157
 158        /* dummybufs are not allocated for master q */
 159        if (queue == IPU3_CSS_QUEUE_IN)
 160                return NULL;
 161
 162        if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
 163                /* Buffer should not be allocated here */
 164                return NULL;
 165
 166        for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
 167                if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
 168                        IPU3_CSS_BUFFER_QUEUED)
 169                        break;
 170
 171        if (i == IMGU_MAX_QUEUE_DEPTH)
 172                return NULL;
 173
 174        imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
 175                          imgu_pipe->queues[queue].dmap.daddr);
 176
 177        return &imgu_pipe->queues[queue].dummybufs[i];
 178}
 179
 180/* Check if given buffer is a dummy buffer */
 181static bool imgu_dummybufs_check(struct imgu_device *imgu,
 182                                 struct imgu_css_buffer *buf,
 183                                 unsigned int pipe)
 184{
 185        unsigned int i;
 186        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
 187
 188        for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
 189                if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i])
 190                        break;
 191
 192        return i < IMGU_MAX_QUEUE_DEPTH;
 193}
 194
 195static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb,
 196                             enum vb2_buffer_state state)
 197{
 198        mutex_lock(&imgu->lock);
 199        imgu_v4l2_buffer_done(vb, state);
 200        mutex_unlock(&imgu->lock);
 201}
 202
 203static struct imgu_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
 204                                                 unsigned int node,
 205                                                 unsigned int pipe)
 206{
 207        struct imgu_buffer *buf;
 208        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
 209
 210        if (WARN_ON(node >= IMGU_NODE_NUM))
 211                return NULL;
 212
 213        /* Find first free buffer from the node */
 214        list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
 215                if (imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
 216                        return &buf->css_buf;
 217        }
 218
 219        /* There were no free buffers, try to return a dummy buffer */
 220        return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe);
 221}
 222
 223/*
 224 * Queue as many buffers to CSS as possible. If all buffers don't fit into
 225 * CSS buffer queues, they remain unqueued and will be queued later.
 226 */
 227int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe)
 228{
 229        unsigned int node;
 230        int r = 0;
 231        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
 232
 233        if (!imgu_css_is_streaming(&imgu->css))
 234                return 0;
 235
 236        dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
 237        mutex_lock(&imgu->lock);
 238
 239        if (!imgu_css_pipe_queue_empty(&imgu->css, pipe)) {
 240                mutex_unlock(&imgu->lock);
 241                return 0;
 242        }
 243
 244        /* Buffer set is queued to FW only when input buffer is ready */
 245        for (node = IMGU_NODE_NUM - 1;
 246             imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe);
 247             node = node ? node - 1 : IMGU_NODE_NUM - 1) {
 248                if (node == IMGU_NODE_VF &&
 249                    !imgu_pipe->nodes[IMGU_NODE_VF].enabled) {
 250                        dev_warn(&imgu->pci_dev->dev,
 251                                 "Vf not enabled, ignore queue");
 252                        continue;
 253                } else if (node == IMGU_NODE_PARAMS &&
 254                           imgu_pipe->nodes[node].enabled) {
 255                        struct vb2_buffer *vb;
 256                        struct imgu_vb2_buffer *ivb;
 257
 258                        /* No parameters for this frame */
 259                        if (list_empty(&imgu_pipe->nodes[node].buffers))
 260                                continue;
 261
 262                        ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
 263                                               struct imgu_vb2_buffer, list);
 264                        list_del(&ivb->list);
 265                        vb = &ivb->vbb.vb2_buf;
 266                        r = imgu_css_set_parameters(&imgu->css, pipe,
 267                                                    vb2_plane_vaddr(vb, 0));
 268                        if (r) {
 269                                vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
 270                                dev_warn(&imgu->pci_dev->dev,
 271                                         "set parameters failed.");
 272                                continue;
 273                        }
 274
 275                        vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
 276                        dev_dbg(&imgu->pci_dev->dev,
 277                                "queue user parameters %d to css.", vb->index);
 278                } else if (imgu_pipe->queue_enabled[node]) {
 279                        struct imgu_css_buffer *buf =
 280                                imgu_queue_getbuf(imgu, node, pipe);
 281                        struct imgu_buffer *ibuf = NULL;
 282                        bool dummy;
 283
 284                        if (!buf)
 285                                break;
 286
 287                        r = imgu_css_buf_queue(&imgu->css, pipe, buf);
 288                        if (r)
 289                                break;
 290                        dummy = imgu_dummybufs_check(imgu, buf, pipe);
 291                        if (!dummy)
 292                                ibuf = container_of(buf, struct imgu_buffer,
 293                                                    css_buf);
 294                        dev_dbg(&imgu->pci_dev->dev,
 295                                "queue %s %s buffer %u to css da: 0x%08x\n",
 296                                dummy ? "dummy" : "user",
 297                                imgu_node_map[node].name,
 298                                dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index,
 299                                (u32)buf->daddr);
 300                }
 301        }
 302        mutex_unlock(&imgu->lock);
 303
 304        if (r && r != -EBUSY)
 305                goto failed;
 306
 307        return 0;
 308
 309failed:
 310        /*
 311         * On error, mark all buffers as failed which are not
 312         * yet queued to CSS
 313         */
 314        dev_err(&imgu->pci_dev->dev,
 315                "failed to queue buffer to CSS on queue %i (%d)\n",
 316                node, r);
 317
 318        if (initial)
 319                /* If we were called from streamon(), no need to finish bufs */
 320                return r;
 321
 322        for (node = 0; node < IMGU_NODE_NUM; node++) {
 323                struct imgu_buffer *buf, *buf0;
 324
 325                if (!imgu_pipe->queue_enabled[node])
 326                        continue;       /* Skip disabled queues */
 327
 328                mutex_lock(&imgu->lock);
 329                list_for_each_entry_safe(buf, buf0,
 330                                         &imgu_pipe->nodes[node].buffers,
 331                                         vid_buf.list) {
 332                        if (imgu_css_buf_state(&buf->css_buf) ==
 333                            IPU3_CSS_BUFFER_QUEUED)
 334                                continue;       /* Was already queued, skip */
 335
 336                        imgu_v4l2_buffer_done(&buf->vid_buf.vbb.vb2_buf,
 337                                              VB2_BUF_STATE_ERROR);
 338                }
 339                mutex_unlock(&imgu->lock);
 340        }
 341
 342        return r;
 343}
 344
 345static int imgu_powerup(struct imgu_device *imgu)
 346{
 347        int r;
 348        unsigned int pipe;
 349        unsigned int freq = 200;
 350        struct v4l2_mbus_framefmt *fmt;
 351
 352        /* input larger than 2048*1152, ask imgu to work on high freq */
 353        for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
 354                fmt = &imgu->imgu_pipe[pipe].nodes[IMGU_NODE_IN].pad_fmt;
 355                dev_dbg(&imgu->pci_dev->dev, "pipe %u input format = %ux%u",
 356                        pipe, fmt->width, fmt->height);
 357                if ((fmt->width * fmt->height) >= (2048 * 1152))
 358                        freq = 450;
 359        }
 360
 361        r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base, freq);
 362        if (r)
 363                return r;
 364
 365        imgu_mmu_resume(imgu->mmu);
 366        return 0;
 367}
 368
 369static void imgu_powerdown(struct imgu_device *imgu)
 370{
 371        imgu_mmu_suspend(imgu->mmu);
 372        imgu_css_set_powerdown(&imgu->pci_dev->dev, imgu->base);
 373}
 374
 375int imgu_s_stream(struct imgu_device *imgu, int enable)
 376{
 377        struct device *dev = &imgu->pci_dev->dev;
 378        int r, pipe;
 379
 380        if (!enable) {
 381                /* Stop streaming */
 382                dev_dbg(dev, "stream off\n");
 383                /* Block new buffers to be queued to CSS. */
 384                atomic_set(&imgu->qbuf_barrier, 1);
 385                imgu_css_stop_streaming(&imgu->css);
 386                synchronize_irq(imgu->pci_dev->irq);
 387                atomic_set(&imgu->qbuf_barrier, 0);
 388                imgu_powerdown(imgu);
 389                pm_runtime_put(&imgu->pci_dev->dev);
 390
 391                return 0;
 392        }
 393
 394        /* Set Power */
 395        r = pm_runtime_resume_and_get(dev);
 396        if (r < 0) {
 397                dev_err(dev, "failed to set imgu power\n");
 398                return r;
 399        }
 400
 401        r = imgu_powerup(imgu);
 402        if (r) {
 403                dev_err(dev, "failed to power up imgu\n");
 404                pm_runtime_put(dev);
 405                return r;
 406        }
 407
 408        /* Start CSS streaming */
 409        r = imgu_css_start_streaming(&imgu->css);
 410        if (r) {
 411                dev_err(dev, "failed to start css streaming (%d)", r);
 412                goto fail_start_streaming;
 413        }
 414
 415        for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
 416                /* Initialize dummy buffers */
 417                r = imgu_dummybufs_init(imgu, pipe);
 418                if (r) {
 419                        dev_err(dev, "failed to initialize dummy buffers (%d)", r);
 420                        goto fail_dummybufs;
 421                }
 422
 423                /* Queue as many buffers from queue as possible */
 424                r = imgu_queue_buffers(imgu, true, pipe);
 425                if (r) {
 426                        dev_err(dev, "failed to queue initial buffers (%d)", r);
 427                        goto fail_queueing;
 428                }
 429        }
 430
 431        return 0;
 432fail_queueing:
 433        for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
 434                imgu_dummybufs_cleanup(imgu, pipe);
 435fail_dummybufs:
 436        imgu_css_stop_streaming(&imgu->css);
 437fail_start_streaming:
 438        pm_runtime_put(dev);
 439
 440        return r;
 441}
 442
 443static int imgu_video_nodes_init(struct imgu_device *imgu)
 444{
 445        struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
 446        struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
 447        struct imgu_media_pipe *imgu_pipe;
 448        unsigned int i, j;
 449        int r;
 450
 451        imgu->buf_struct_size = sizeof(struct imgu_buffer);
 452
 453        for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
 454                imgu_pipe = &imgu->imgu_pipe[j];
 455
 456                for (i = 0; i < IMGU_NODE_NUM; i++) {
 457                        imgu_pipe->nodes[i].name = imgu_node_map[i].name;
 458                        imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT;
 459                        imgu_pipe->nodes[i].enabled = false;
 460
 461                        if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A)
 462                                fmts[imgu_node_map[i].css_queue] =
 463                                        &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp;
 464                        atomic_set(&imgu_pipe->nodes[i].sequence, 0);
 465                }
 466        }
 467
 468        r = imgu_v4l2_register(imgu);
 469        if (r)
 470                return r;
 471
 472        /* Set initial formats and initialize formats of video nodes */
 473        for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
 474                imgu_pipe = &imgu->imgu_pipe[j];
 475
 476                rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff;
 477                rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds;
 478                imgu_css_fmt_set(&imgu->css, fmts, rects, j);
 479
 480                /* Pre-allocate dummy buffers */
 481                r = imgu_dummybufs_preallocate(imgu, j);
 482                if (r) {
 483                        dev_err(&imgu->pci_dev->dev,
 484                                "failed to pre-allocate dummy buffers (%d)", r);
 485                        goto out_cleanup;
 486                }
 487        }
 488
 489        return 0;
 490
 491out_cleanup:
 492        for (j = 0; j < IMGU_MAX_PIPE_NUM; j++)
 493                imgu_dummybufs_cleanup(imgu, j);
 494
 495        imgu_v4l2_unregister(imgu);
 496
 497        return r;
 498}
 499
 500static void imgu_video_nodes_exit(struct imgu_device *imgu)
 501{
 502        int i;
 503
 504        for (i = 0; i < IMGU_MAX_PIPE_NUM; i++)
 505                imgu_dummybufs_cleanup(imgu, i);
 506
 507        imgu_v4l2_unregister(imgu);
 508}
 509
 510/**************** PCI interface ****************/
 511
 512static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
 513{
 514        struct imgu_device *imgu = imgu_ptr;
 515        struct imgu_media_pipe *imgu_pipe;
 516        int p;
 517
 518        /* Dequeue / queue buffers */
 519        do {
 520                u64 ns = ktime_get_ns();
 521                struct imgu_css_buffer *b;
 522                struct imgu_buffer *buf = NULL;
 523                unsigned int node, pipe;
 524                bool dummy;
 525
 526                do {
 527                        mutex_lock(&imgu->lock);
 528                        b = imgu_css_buf_dequeue(&imgu->css);
 529                        mutex_unlock(&imgu->lock);
 530                } while (PTR_ERR(b) == -EAGAIN);
 531
 532                if (IS_ERR(b)) {
 533                        if (PTR_ERR(b) != -EBUSY)       /* All done */
 534                                dev_err(&imgu->pci_dev->dev,
 535                                        "failed to dequeue buffers (%ld)\n",
 536                                        PTR_ERR(b));
 537                        break;
 538                }
 539
 540                node = imgu_map_node(imgu, b->queue);
 541                pipe = b->pipe;
 542                dummy = imgu_dummybufs_check(imgu, b, pipe);
 543                if (!dummy)
 544                        buf = container_of(b, struct imgu_buffer, css_buf);
 545                dev_dbg(&imgu->pci_dev->dev,
 546                        "dequeue %s %s buffer %d daddr 0x%x from css\n",
 547                        dummy ? "dummy" : "user",
 548                        imgu_node_map[node].name,
 549                        dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index,
 550                        (u32)b->daddr);
 551
 552                if (dummy)
 553                        /* It was a dummy buffer, skip it */
 554                        continue;
 555
 556                /* Fill vb2 buffer entries and tell it's ready */
 557                imgu_pipe = &imgu->imgu_pipe[pipe];
 558                if (!imgu_pipe->nodes[node].output) {
 559                        buf->vid_buf.vbb.vb2_buf.timestamp = ns;
 560                        buf->vid_buf.vbb.field = V4L2_FIELD_NONE;
 561                        buf->vid_buf.vbb.sequence =
 562                                atomic_inc_return(
 563                                &imgu_pipe->nodes[node].sequence);
 564                        dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d",
 565                                buf->vid_buf.vbb.sequence);
 566                }
 567                imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf,
 568                                 imgu_css_buf_state(&buf->css_buf) ==
 569                                                    IPU3_CSS_BUFFER_DONE ?
 570                                                    VB2_BUF_STATE_DONE :
 571                                                    VB2_BUF_STATE_ERROR);
 572                mutex_lock(&imgu->lock);
 573                if (imgu_css_queue_empty(&imgu->css))
 574                        wake_up_all(&imgu->buf_drain_wq);
 575                mutex_unlock(&imgu->lock);
 576        } while (1);
 577
 578        /*
 579         * Try to queue more buffers for CSS.
 580         * qbuf_barrier is used to disable new buffers
 581         * to be queued to CSS.
 582         */
 583        if (!atomic_read(&imgu->qbuf_barrier))
 584                for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
 585                        imgu_queue_buffers(imgu, false, p);
 586
 587        return IRQ_HANDLED;
 588}
 589
 590static irqreturn_t imgu_isr(int irq, void *imgu_ptr)
 591{
 592        struct imgu_device *imgu = imgu_ptr;
 593
 594        /* acknowledge interruption */
 595        if (imgu_css_irq_ack(&imgu->css) < 0)
 596                return IRQ_NONE;
 597
 598        return IRQ_WAKE_THREAD;
 599}
 600
 601static int imgu_pci_config_setup(struct pci_dev *dev)
 602{
 603        u16 pci_command;
 604        int r = pci_enable_msi(dev);
 605
 606        if (r) {
 607                dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
 608                return r;
 609        }
 610
 611        pci_read_config_word(dev, PCI_COMMAND, &pci_command);
 612        pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
 613                        PCI_COMMAND_INTX_DISABLE;
 614        pci_write_config_word(dev, PCI_COMMAND, pci_command);
 615
 616        return 0;
 617}
 618
 619static int imgu_pci_probe(struct pci_dev *pci_dev,
 620                          const struct pci_device_id *id)
 621{
 622        struct imgu_device *imgu;
 623        phys_addr_t phys;
 624        unsigned long phys_len;
 625        void __iomem *const *iomap;
 626        int r;
 627
 628        imgu = devm_kzalloc(&pci_dev->dev, sizeof(*imgu), GFP_KERNEL);
 629        if (!imgu)
 630                return -ENOMEM;
 631
 632        imgu->pci_dev = pci_dev;
 633
 634        r = pcim_enable_device(pci_dev);
 635        if (r) {
 636                dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
 637                return r;
 638        }
 639
 640        dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
 641                 pci_dev->device, pci_dev->revision);
 642
 643        phys = pci_resource_start(pci_dev, IMGU_PCI_BAR);
 644        phys_len = pci_resource_len(pci_dev, IMGU_PCI_BAR);
 645
 646        r = pcim_iomap_regions(pci_dev, 1 << IMGU_PCI_BAR, pci_name(pci_dev));
 647        if (r) {
 648                dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
 649                return r;
 650        }
 651        dev_info(&pci_dev->dev, "physical base address %pap, %lu bytes\n",
 652                 &phys, phys_len);
 653
 654        iomap = pcim_iomap_table(pci_dev);
 655        if (!iomap) {
 656                dev_err(&pci_dev->dev, "failed to iomap table\n");
 657                return -ENODEV;
 658        }
 659
 660        imgu->base = iomap[IMGU_PCI_BAR];
 661
 662        pci_set_drvdata(pci_dev, imgu);
 663
 664        pci_set_master(pci_dev);
 665
 666        r = dma_coerce_mask_and_coherent(&pci_dev->dev, IMGU_DMA_MASK);
 667        if (r) {
 668                dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
 669                return -ENODEV;
 670        }
 671
 672        r = imgu_pci_config_setup(pci_dev);
 673        if (r)
 674                return r;
 675
 676        mutex_init(&imgu->lock);
 677        mutex_init(&imgu->streaming_lock);
 678        atomic_set(&imgu->qbuf_barrier, 0);
 679        init_waitqueue_head(&imgu->buf_drain_wq);
 680
 681        r = imgu_css_set_powerup(&pci_dev->dev, imgu->base, 200);
 682        if (r) {
 683                dev_err(&pci_dev->dev,
 684                        "failed to power up CSS (%d)\n", r);
 685                goto out_mutex_destroy;
 686        }
 687
 688        imgu->mmu = imgu_mmu_init(&pci_dev->dev, imgu->base);
 689        if (IS_ERR(imgu->mmu)) {
 690                r = PTR_ERR(imgu->mmu);
 691                dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n", r);
 692                goto out_css_powerdown;
 693        }
 694
 695        r = imgu_dmamap_init(imgu);
 696        if (r) {
 697                dev_err(&pci_dev->dev,
 698                        "failed to initialize DMA mapping (%d)\n", r);
 699                goto out_mmu_exit;
 700        }
 701
 702        /* ISP programming */
 703        r = imgu_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len);
 704        if (r) {
 705                dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n", r);
 706                goto out_dmamap_exit;
 707        }
 708
 709        /* v4l2 sub-device registration */
 710        r = imgu_video_nodes_init(imgu);
 711        if (r) {
 712                dev_err(&pci_dev->dev, "failed to create V4L2 devices (%d)\n",
 713                        r);
 714                goto out_css_cleanup;
 715        }
 716
 717        r = devm_request_threaded_irq(&pci_dev->dev, pci_dev->irq,
 718                                      imgu_isr, imgu_isr_threaded,
 719                                      IRQF_SHARED, IMGU_NAME, imgu);
 720        if (r) {
 721                dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
 722                goto out_video_exit;
 723        }
 724
 725        pm_runtime_put_noidle(&pci_dev->dev);
 726        pm_runtime_allow(&pci_dev->dev);
 727
 728        return 0;
 729
 730out_video_exit:
 731        imgu_video_nodes_exit(imgu);
 732out_css_cleanup:
 733        imgu_css_cleanup(&imgu->css);
 734out_dmamap_exit:
 735        imgu_dmamap_exit(imgu);
 736out_mmu_exit:
 737        imgu_mmu_exit(imgu->mmu);
 738out_css_powerdown:
 739        imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
 740out_mutex_destroy:
 741        mutex_destroy(&imgu->streaming_lock);
 742        mutex_destroy(&imgu->lock);
 743
 744        return r;
 745}
 746
 747static void imgu_pci_remove(struct pci_dev *pci_dev)
 748{
 749        struct imgu_device *imgu = pci_get_drvdata(pci_dev);
 750
 751        pm_runtime_forbid(&pci_dev->dev);
 752        pm_runtime_get_noresume(&pci_dev->dev);
 753
 754        imgu_video_nodes_exit(imgu);
 755        imgu_css_cleanup(&imgu->css);
 756        imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
 757        imgu_dmamap_exit(imgu);
 758        imgu_mmu_exit(imgu->mmu);
 759        mutex_destroy(&imgu->streaming_lock);
 760        mutex_destroy(&imgu->lock);
 761}
 762
 763static int __maybe_unused imgu_suspend(struct device *dev)
 764{
 765        struct pci_dev *pci_dev = to_pci_dev(dev);
 766        struct imgu_device *imgu = pci_get_drvdata(pci_dev);
 767
 768        dev_dbg(dev, "enter %s\n", __func__);
 769        imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css);
 770        if (!imgu->suspend_in_stream)
 771                goto out;
 772        /* Block new buffers to be queued to CSS. */
 773        atomic_set(&imgu->qbuf_barrier, 1);
 774        /*
 775         * Wait for currently running irq handler to be done so that
 776         * no new buffers will be queued to fw later.
 777         */
 778        synchronize_irq(pci_dev->irq);
 779        /* Wait until all buffers in CSS are done. */
 780        if (!wait_event_timeout(imgu->buf_drain_wq,
 781            imgu_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
 782                dev_err(dev, "wait buffer drain timeout.\n");
 783
 784        imgu_css_stop_streaming(&imgu->css);
 785        atomic_set(&imgu->qbuf_barrier, 0);
 786        imgu_powerdown(imgu);
 787        pm_runtime_force_suspend(dev);
 788out:
 789        dev_dbg(dev, "leave %s\n", __func__);
 790        return 0;
 791}
 792
 793static int __maybe_unused imgu_resume(struct device *dev)
 794{
 795        struct imgu_device *imgu = dev_get_drvdata(dev);
 796        int r = 0;
 797        unsigned int pipe;
 798
 799        dev_dbg(dev, "enter %s\n", __func__);
 800
 801        if (!imgu->suspend_in_stream)
 802                goto out;
 803
 804        pm_runtime_force_resume(dev);
 805
 806        r = imgu_powerup(imgu);
 807        if (r) {
 808                dev_err(dev, "failed to power up imgu\n");
 809                goto out;
 810        }
 811
 812        /* Start CSS streaming */
 813        r = imgu_css_start_streaming(&imgu->css);
 814        if (r) {
 815                dev_err(dev, "failed to resume css streaming (%d)", r);
 816                goto out;
 817        }
 818
 819        for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
 820                r = imgu_queue_buffers(imgu, true, pipe);
 821                if (r)
 822                        dev_err(dev, "failed to queue buffers to pipe %d (%d)",
 823                                pipe, r);
 824        }
 825
 826out:
 827        dev_dbg(dev, "leave %s\n", __func__);
 828
 829        return r;
 830}
 831
 832/*
 833 * PCI rpm framework checks the existence of driver rpm callbacks.
 834 * Place a dummy callback here to avoid rpm going into error state.
 835 */
 836static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev)
 837{
 838        return 0;
 839}
 840
 841static const struct dev_pm_ops imgu_pm_ops = {
 842        SET_RUNTIME_PM_OPS(&imgu_rpm_dummy_cb, &imgu_rpm_dummy_cb, NULL)
 843        SET_SYSTEM_SLEEP_PM_OPS(&imgu_suspend, &imgu_resume)
 844};
 845
 846static const struct pci_device_id imgu_pci_tbl[] = {
 847        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, IMGU_PCI_ID) },
 848        { 0, }
 849};
 850
 851MODULE_DEVICE_TABLE(pci, imgu_pci_tbl);
 852
 853static struct pci_driver imgu_pci_driver = {
 854        .name = IMGU_NAME,
 855        .id_table = imgu_pci_tbl,
 856        .probe = imgu_pci_probe,
 857        .remove = imgu_pci_remove,
 858        .driver = {
 859                .pm = &imgu_pm_ops,
 860        },
 861};
 862
 863module_pci_driver(imgu_pci_driver);
 864
 865MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
 866MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
 867MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
 868MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
 869MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
 870MODULE_LICENSE("GPL v2");
 871MODULE_DESCRIPTION("Intel ipu3_imgu PCI driver");
 872