linux/drivers/staging/media/ipu3/ipu3.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2017 - 2018 Intel Corporation
   4 * Copyright 2017 Google LLC
   5 *
   6 * Based on Intel IPU4 driver.
   7 *
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/interrupt.h>
  12#include <linux/module.h>
  13#include <linux/pm_runtime.h>
  14
  15#include "ipu3.h"
  16#include "ipu3-dmamap.h"
  17#include "ipu3-mmu.h"
  18
  19#define IMGU_PCI_ID                     0x1919
  20#define IMGU_PCI_BAR                    0
  21#define IMGU_DMA_MASK                   DMA_BIT_MASK(39)
  22#define IMGU_MAX_QUEUE_DEPTH            (2 + 2)
  23
  24/*
  25 * pre-allocated buffer size for IMGU dummy buffers. Those
  26 * values should be tuned to big enough to avoid buffer
  27 * re-allocation when streaming to lower streaming latency.
  28 */
  29#define CSS_QUEUE_IN_BUF_SIZE           0
  30#define CSS_QUEUE_PARAMS_BUF_SIZE       0
  31#define CSS_QUEUE_OUT_BUF_SIZE          (4160 * 3120 * 12 / 8)
  32#define CSS_QUEUE_VF_BUF_SIZE           (1920 * 1080 * 12 / 8)
  33#define CSS_QUEUE_STAT_3A_BUF_SIZE      sizeof(struct ipu3_uapi_stats_3a)
  34
  35static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = {
  36        [IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE,
  37        [IPU3_CSS_QUEUE_PARAMS] = CSS_QUEUE_PARAMS_BUF_SIZE,
  38        [IPU3_CSS_QUEUE_OUT] = CSS_QUEUE_OUT_BUF_SIZE,
  39        [IPU3_CSS_QUEUE_VF] = CSS_QUEUE_VF_BUF_SIZE,
  40        [IPU3_CSS_QUEUE_STAT_3A] = CSS_QUEUE_STAT_3A_BUF_SIZE,
  41};
  42
  43static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = {
  44        [IMGU_NODE_IN] = {IPU3_CSS_QUEUE_IN, "input"},
  45        [IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters"},
  46        [IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output"},
  47        [IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder"},
  48        [IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat"},
  49};
  50
  51unsigned int imgu_node_to_queue(unsigned int node)
  52{
  53        return imgu_node_map[node].css_queue;
  54}
  55
  56unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue)
  57{
  58        unsigned int i;
  59
  60        for (i = 0; i < IMGU_NODE_NUM; i++)
  61                if (imgu_node_map[i].css_queue == css_queue)
  62                        break;
  63
  64        return i;
  65}
  66
  67/**************** Dummy buffers ****************/
  68
  69static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe)
  70{
  71        unsigned int i;
  72        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
  73
  74        for (i = 0; i < IPU3_CSS_QUEUES; i++)
  75                imgu_dmamap_free(imgu,
  76                                 &imgu_pipe->queues[i].dmap);
  77}
  78
  79static int imgu_dummybufs_preallocate(struct imgu_device *imgu,
  80                                      unsigned int pipe)
  81{
  82        unsigned int i;
  83        size_t size;
  84        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
  85
  86        for (i = 0; i < IPU3_CSS_QUEUES; i++) {
  87                size = css_queue_buf_size_map[i];
  88                /*
  89                 * Do not enable dummy buffers for master queue,
  90                 * always require that real buffers from user are
  91                 * available.
  92                 */
  93                if (i == IMGU_QUEUE_MASTER || size == 0)
  94                        continue;
  95
  96                if (!imgu_dmamap_alloc(imgu,
  97                                       &imgu_pipe->queues[i].dmap, size)) {
  98                        imgu_dummybufs_cleanup(imgu, pipe);
  99                        return -ENOMEM;
 100                }
 101        }
 102
 103        return 0;
 104}
 105
 106static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
 107{
 108        const struct v4l2_pix_format_mplane *mpix;
 109        const struct v4l2_meta_format   *meta;
 110        unsigned int i, k, node;
 111        size_t size;
 112        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
 113
 114        /* Allocate a dummy buffer for each queue where buffer is optional */
 115        for (i = 0; i < IPU3_CSS_QUEUES; i++) {
 116                node = imgu_map_node(imgu, i);
 117                if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
 118                        continue;
 119
 120                if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled &&
 121                    i == IPU3_CSS_QUEUE_VF)
 122                        /*
 123                         * Do not enable dummy buffers for VF if it is not
 124                         * requested by the user.
 125                         */
 126                        continue;
 127
 128                meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta;
 129                mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp;
 130
 131                if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
 132                        size = meta->buffersize;
 133                else
 134                        size = mpix->plane_fmt[0].sizeimage;
 135
 136                if (imgu_css_dma_buffer_resize(imgu,
 137                                               &imgu_pipe->queues[i].dmap,
 138                                               size)) {
 139                        imgu_dummybufs_cleanup(imgu, pipe);
 140                        return -ENOMEM;
 141                }
 142
 143                for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++)
 144                        imgu_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
 145                                          imgu_pipe->queues[i].dmap.daddr);
 146        }
 147
 148        return 0;
 149}
 150
 151/* May be called from atomic context */
 152static struct imgu_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
 153                                                   int queue, unsigned int pipe)
 154{
 155        unsigned int i;
 156        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
 157
 158        /* dummybufs are not allocated for master q */
 159        if (queue == IPU3_CSS_QUEUE_IN)
 160                return NULL;
 161
 162        if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
 163                /* Buffer should not be allocated here */
 164                return NULL;
 165
 166        for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
 167                if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
 168                        IPU3_CSS_BUFFER_QUEUED)
 169                        break;
 170
 171        if (i == IMGU_MAX_QUEUE_DEPTH)
 172                return NULL;
 173
 174        imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
 175                          imgu_pipe->queues[queue].dmap.daddr);
 176
 177        return &imgu_pipe->queues[queue].dummybufs[i];
 178}
 179
 180/* Check if given buffer is a dummy buffer */
 181static bool imgu_dummybufs_check(struct imgu_device *imgu,
 182                                 struct imgu_css_buffer *buf,
 183                                 unsigned int pipe)
 184{
 185        unsigned int i;
 186        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
 187
 188        for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
 189                if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i])
 190                        break;
 191
 192        return i < IMGU_MAX_QUEUE_DEPTH;
 193}
 194
 195static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb,
 196                             enum vb2_buffer_state state)
 197{
 198        mutex_lock(&imgu->lock);
 199        imgu_v4l2_buffer_done(vb, state);
 200        mutex_unlock(&imgu->lock);
 201}
 202
 203static struct imgu_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
 204                                                 unsigned int node,
 205                                                 unsigned int pipe)
 206{
 207        struct imgu_buffer *buf;
 208        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
 209
 210        if (WARN_ON(node >= IMGU_NODE_NUM))
 211                return NULL;
 212
 213        /* Find first free buffer from the node */
 214        list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
 215                if (imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
 216                        return &buf->css_buf;
 217        }
 218
 219        /* There were no free buffers, try to return a dummy buffer */
 220        return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe);
 221}
 222
 223/*
 224 * Queue as many buffers to CSS as possible. If all buffers don't fit into
 225 * CSS buffer queues, they remain unqueued and will be queued later.
 226 */
 227int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe)
 228{
 229        unsigned int node;
 230        int r = 0;
 231        struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
 232
 233        if (!imgu_css_is_streaming(&imgu->css))
 234                return 0;
 235
 236        dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
 237        mutex_lock(&imgu->lock);
 238
 239        if (!imgu_css_pipe_queue_empty(&imgu->css, pipe)) {
 240                mutex_unlock(&imgu->lock);
 241                return 0;
 242        }
 243
 244        /* Buffer set is queued to FW only when input buffer is ready */
 245        for (node = IMGU_NODE_NUM - 1;
 246             imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe);
 247             node = node ? node - 1 : IMGU_NODE_NUM - 1) {
 248                if (node == IMGU_NODE_VF &&
 249                    !imgu_pipe->nodes[IMGU_NODE_VF].enabled) {
 250                        dev_warn(&imgu->pci_dev->dev,
 251                                 "Vf not enabled, ignore queue");
 252                        continue;
 253                } else if (node == IMGU_NODE_PARAMS &&
 254                           imgu_pipe->nodes[node].enabled) {
 255                        struct vb2_buffer *vb;
 256                        struct imgu_vb2_buffer *ivb;
 257
 258                        /* No parameters for this frame */
 259                        if (list_empty(&imgu_pipe->nodes[node].buffers))
 260                                continue;
 261
 262                        ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
 263                                               struct imgu_vb2_buffer, list);
 264                        list_del(&ivb->list);
 265                        vb = &ivb->vbb.vb2_buf;
 266                        r = imgu_css_set_parameters(&imgu->css, pipe,
 267                                                    vb2_plane_vaddr(vb, 0));
 268                        if (r) {
 269                                vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
 270                                dev_warn(&imgu->pci_dev->dev,
 271                                         "set parameters failed.");
 272                                continue;
 273                        }
 274
 275                        vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
 276                        dev_dbg(&imgu->pci_dev->dev,
 277                                "queue user parameters %d to css.", vb->index);
 278                } else if (imgu_pipe->queue_enabled[node]) {
 279                        struct imgu_css_buffer *buf =
 280                                imgu_queue_getbuf(imgu, node, pipe);
 281                        struct imgu_buffer *ibuf = NULL;
 282                        bool dummy;
 283
 284                        if (!buf)
 285                                break;
 286
 287                        r = imgu_css_buf_queue(&imgu->css, pipe, buf);
 288                        if (r)
 289                                break;
 290                        dummy = imgu_dummybufs_check(imgu, buf, pipe);
 291                        if (!dummy)
 292                                ibuf = container_of(buf, struct imgu_buffer,
 293                                                    css_buf);
 294                        dev_dbg(&imgu->pci_dev->dev,
 295                                "queue %s %s buffer %u to css da: 0x%08x\n",
 296                                dummy ? "dummy" : "user",
 297                                imgu_node_map[node].name,
 298                                dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index,
 299                                (u32)buf->daddr);
 300                }
 301        }
 302        mutex_unlock(&imgu->lock);
 303
 304        if (r && r != -EBUSY)
 305                goto failed;
 306
 307        return 0;
 308
 309failed:
 310        /*
 311         * On error, mark all buffers as failed which are not
 312         * yet queued to CSS
 313         */
 314        dev_err(&imgu->pci_dev->dev,
 315                "failed to queue buffer to CSS on queue %i (%d)\n",
 316                node, r);
 317
 318        if (initial)
 319                /* If we were called from streamon(), no need to finish bufs */
 320                return r;
 321
 322        for (node = 0; node < IMGU_NODE_NUM; node++) {
 323                struct imgu_buffer *buf, *buf0;
 324
 325                if (!imgu_pipe->queue_enabled[node])
 326                        continue;       /* Skip disabled queues */
 327
 328                mutex_lock(&imgu->lock);
 329                list_for_each_entry_safe(buf, buf0,
 330                                         &imgu_pipe->nodes[node].buffers,
 331                                         vid_buf.list) {
 332                        if (imgu_css_buf_state(&buf->css_buf) ==
 333                            IPU3_CSS_BUFFER_QUEUED)
 334                                continue;       /* Was already queued, skip */
 335
 336                        imgu_v4l2_buffer_done(&buf->vid_buf.vbb.vb2_buf,
 337                                              VB2_BUF_STATE_ERROR);
 338                }
 339                mutex_unlock(&imgu->lock);
 340        }
 341
 342        return r;
 343}
 344
 345static int imgu_powerup(struct imgu_device *imgu)
 346{
 347        int r;
 348        unsigned int pipe;
 349        unsigned int freq = 200;
 350        struct v4l2_mbus_framefmt *fmt;
 351
 352        /* input larger than 2048*1152, ask imgu to work on high freq */
 353        for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
 354                fmt = &imgu->imgu_pipe[pipe].nodes[IMGU_NODE_IN].pad_fmt;
 355                dev_dbg(&imgu->pci_dev->dev, "pipe %u input format = %ux%u",
 356                        pipe, fmt->width, fmt->height);
 357                if ((fmt->width * fmt->height) >= (2048 * 1152))
 358                        freq = 450;
 359        }
 360
 361        r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base, freq);
 362        if (r)
 363                return r;
 364
 365        imgu_mmu_resume(imgu->mmu);
 366        return 0;
 367}
 368
 369static void imgu_powerdown(struct imgu_device *imgu)
 370{
 371        imgu_mmu_suspend(imgu->mmu);
 372        imgu_css_set_powerdown(&imgu->pci_dev->dev, imgu->base);
 373}
 374
 375int imgu_s_stream(struct imgu_device *imgu, int enable)
 376{
 377        struct device *dev = &imgu->pci_dev->dev;
 378        int r, pipe;
 379
 380        if (!enable) {
 381                /* Stop streaming */
 382                dev_dbg(dev, "stream off\n");
 383                /* Block new buffers to be queued to CSS. */
 384                atomic_set(&imgu->qbuf_barrier, 1);
 385                imgu_css_stop_streaming(&imgu->css);
 386                synchronize_irq(imgu->pci_dev->irq);
 387                atomic_set(&imgu->qbuf_barrier, 0);
 388                imgu_powerdown(imgu);
 389                pm_runtime_put(&imgu->pci_dev->dev);
 390
 391                return 0;
 392        }
 393
 394        /* Set Power */
 395        r = pm_runtime_get_sync(dev);
 396        if (r < 0) {
 397                dev_err(dev, "failed to set imgu power\n");
 398                pm_runtime_put(dev);
 399                return r;
 400        }
 401
 402        r = imgu_powerup(imgu);
 403        if (r) {
 404                dev_err(dev, "failed to power up imgu\n");
 405                pm_runtime_put(dev);
 406                return r;
 407        }
 408
 409        /* Start CSS streaming */
 410        r = imgu_css_start_streaming(&imgu->css);
 411        if (r) {
 412                dev_err(dev, "failed to start css streaming (%d)", r);
 413                goto fail_start_streaming;
 414        }
 415
 416        for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
 417                /* Initialize dummy buffers */
 418                r = imgu_dummybufs_init(imgu, pipe);
 419                if (r) {
 420                        dev_err(dev, "failed to initialize dummy buffers (%d)", r);
 421                        goto fail_dummybufs;
 422                }
 423
 424                /* Queue as many buffers from queue as possible */
 425                r = imgu_queue_buffers(imgu, true, pipe);
 426                if (r) {
 427                        dev_err(dev, "failed to queue initial buffers (%d)", r);
 428                        goto fail_queueing;
 429                }
 430        }
 431
 432        return 0;
 433fail_queueing:
 434        for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
 435                imgu_dummybufs_cleanup(imgu, pipe);
 436fail_dummybufs:
 437        imgu_css_stop_streaming(&imgu->css);
 438fail_start_streaming:
 439        pm_runtime_put(dev);
 440
 441        return r;
 442}
 443
 444static int imgu_video_nodes_init(struct imgu_device *imgu)
 445{
 446        struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
 447        struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
 448        struct imgu_media_pipe *imgu_pipe;
 449        unsigned int i, j;
 450        int r;
 451
 452        imgu->buf_struct_size = sizeof(struct imgu_buffer);
 453
 454        for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
 455                imgu_pipe = &imgu->imgu_pipe[j];
 456
 457                for (i = 0; i < IMGU_NODE_NUM; i++) {
 458                        imgu_pipe->nodes[i].name = imgu_node_map[i].name;
 459                        imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT;
 460                        imgu_pipe->nodes[i].enabled = false;
 461
 462                        if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A)
 463                                fmts[imgu_node_map[i].css_queue] =
 464                                        &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp;
 465                        atomic_set(&imgu_pipe->nodes[i].sequence, 0);
 466                }
 467        }
 468
 469        r = imgu_v4l2_register(imgu);
 470        if (r)
 471                return r;
 472
 473        /* Set initial formats and initialize formats of video nodes */
 474        for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
 475                imgu_pipe = &imgu->imgu_pipe[j];
 476
 477                rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff;
 478                rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds;
 479                imgu_css_fmt_set(&imgu->css, fmts, rects, j);
 480
 481                /* Pre-allocate dummy buffers */
 482                r = imgu_dummybufs_preallocate(imgu, j);
 483                if (r) {
 484                        dev_err(&imgu->pci_dev->dev,
 485                                "failed to pre-allocate dummy buffers (%d)", r);
 486                        goto out_cleanup;
 487                }
 488        }
 489
 490        return 0;
 491
 492out_cleanup:
 493        for (j = 0; j < IMGU_MAX_PIPE_NUM; j++)
 494                imgu_dummybufs_cleanup(imgu, j);
 495
 496        imgu_v4l2_unregister(imgu);
 497
 498        return r;
 499}
 500
 501static void imgu_video_nodes_exit(struct imgu_device *imgu)
 502{
 503        int i;
 504
 505        for (i = 0; i < IMGU_MAX_PIPE_NUM; i++)
 506                imgu_dummybufs_cleanup(imgu, i);
 507
 508        imgu_v4l2_unregister(imgu);
 509}
 510
 511/**************** PCI interface ****************/
 512
 513static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
 514{
 515        struct imgu_device *imgu = imgu_ptr;
 516        struct imgu_media_pipe *imgu_pipe;
 517        int p;
 518
 519        /* Dequeue / queue buffers */
 520        do {
 521                u64 ns = ktime_get_ns();
 522                struct imgu_css_buffer *b;
 523                struct imgu_buffer *buf = NULL;
 524                unsigned int node, pipe;
 525                bool dummy;
 526
 527                do {
 528                        mutex_lock(&imgu->lock);
 529                        b = imgu_css_buf_dequeue(&imgu->css);
 530                        mutex_unlock(&imgu->lock);
 531                } while (PTR_ERR(b) == -EAGAIN);
 532
 533                if (IS_ERR(b)) {
 534                        if (PTR_ERR(b) != -EBUSY)       /* All done */
 535                                dev_err(&imgu->pci_dev->dev,
 536                                        "failed to dequeue buffers (%ld)\n",
 537                                        PTR_ERR(b));
 538                        break;
 539                }
 540
 541                node = imgu_map_node(imgu, b->queue);
 542                pipe = b->pipe;
 543                dummy = imgu_dummybufs_check(imgu, b, pipe);
 544                if (!dummy)
 545                        buf = container_of(b, struct imgu_buffer, css_buf);
 546                dev_dbg(&imgu->pci_dev->dev,
 547                        "dequeue %s %s buffer %d daddr 0x%x from css\n",
 548                        dummy ? "dummy" : "user",
 549                        imgu_node_map[node].name,
 550                        dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index,
 551                        (u32)b->daddr);
 552
 553                if (dummy)
 554                        /* It was a dummy buffer, skip it */
 555                        continue;
 556
 557                /* Fill vb2 buffer entries and tell it's ready */
 558                imgu_pipe = &imgu->imgu_pipe[pipe];
 559                if (!imgu_pipe->nodes[node].output) {
 560                        buf->vid_buf.vbb.vb2_buf.timestamp = ns;
 561                        buf->vid_buf.vbb.field = V4L2_FIELD_NONE;
 562                        buf->vid_buf.vbb.sequence =
 563                                atomic_inc_return(
 564                                &imgu_pipe->nodes[node].sequence);
 565                        dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d",
 566                                buf->vid_buf.vbb.sequence);
 567                }
 568                imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf,
 569                                 imgu_css_buf_state(&buf->css_buf) ==
 570                                                    IPU3_CSS_BUFFER_DONE ?
 571                                                    VB2_BUF_STATE_DONE :
 572                                                    VB2_BUF_STATE_ERROR);
 573                mutex_lock(&imgu->lock);
 574                if (imgu_css_queue_empty(&imgu->css))
 575                        wake_up_all(&imgu->buf_drain_wq);
 576                mutex_unlock(&imgu->lock);
 577        } while (1);
 578
 579        /*
 580         * Try to queue more buffers for CSS.
 581         * qbuf_barrier is used to disable new buffers
 582         * to be queued to CSS.
 583         */
 584        if (!atomic_read(&imgu->qbuf_barrier))
 585                for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
 586                        imgu_queue_buffers(imgu, false, p);
 587
 588        return IRQ_HANDLED;
 589}
 590
 591static irqreturn_t imgu_isr(int irq, void *imgu_ptr)
 592{
 593        struct imgu_device *imgu = imgu_ptr;
 594
 595        /* acknowledge interruption */
 596        if (imgu_css_irq_ack(&imgu->css) < 0)
 597                return IRQ_NONE;
 598
 599        return IRQ_WAKE_THREAD;
 600}
 601
 602static int imgu_pci_config_setup(struct pci_dev *dev)
 603{
 604        u16 pci_command;
 605        int r = pci_enable_msi(dev);
 606
 607        if (r) {
 608                dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
 609                return r;
 610        }
 611
 612        pci_read_config_word(dev, PCI_COMMAND, &pci_command);
 613        pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
 614                        PCI_COMMAND_INTX_DISABLE;
 615        pci_write_config_word(dev, PCI_COMMAND, pci_command);
 616
 617        return 0;
 618}
 619
 620static int imgu_pci_probe(struct pci_dev *pci_dev,
 621                          const struct pci_device_id *id)
 622{
 623        struct imgu_device *imgu;
 624        phys_addr_t phys;
 625        unsigned long phys_len;
 626        void __iomem *const *iomap;
 627        int r;
 628
 629        imgu = devm_kzalloc(&pci_dev->dev, sizeof(*imgu), GFP_KERNEL);
 630        if (!imgu)
 631                return -ENOMEM;
 632
 633        imgu->pci_dev = pci_dev;
 634
 635        r = pcim_enable_device(pci_dev);
 636        if (r) {
 637                dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
 638                return r;
 639        }
 640
 641        dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
 642                 pci_dev->device, pci_dev->revision);
 643
 644        phys = pci_resource_start(pci_dev, IMGU_PCI_BAR);
 645        phys_len = pci_resource_len(pci_dev, IMGU_PCI_BAR);
 646
 647        r = pcim_iomap_regions(pci_dev, 1 << IMGU_PCI_BAR, pci_name(pci_dev));
 648        if (r) {
 649                dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
 650                return r;
 651        }
 652        dev_info(&pci_dev->dev, "physical base address %pap, %lu bytes\n",
 653                 &phys, phys_len);
 654
 655        iomap = pcim_iomap_table(pci_dev);
 656        if (!iomap) {
 657                dev_err(&pci_dev->dev, "failed to iomap table\n");
 658                return -ENODEV;
 659        }
 660
 661        imgu->base = iomap[IMGU_PCI_BAR];
 662
 663        pci_set_drvdata(pci_dev, imgu);
 664
 665        pci_set_master(pci_dev);
 666
 667        r = dma_coerce_mask_and_coherent(&pci_dev->dev, IMGU_DMA_MASK);
 668        if (r) {
 669                dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
 670                return -ENODEV;
 671        }
 672
 673        r = imgu_pci_config_setup(pci_dev);
 674        if (r)
 675                return r;
 676
 677        mutex_init(&imgu->lock);
 678        mutex_init(&imgu->streaming_lock);
 679        atomic_set(&imgu->qbuf_barrier, 0);
 680        init_waitqueue_head(&imgu->buf_drain_wq);
 681
 682        r = imgu_css_set_powerup(&pci_dev->dev, imgu->base, 200);
 683        if (r) {
 684                dev_err(&pci_dev->dev,
 685                        "failed to power up CSS (%d)\n", r);
 686                goto out_mutex_destroy;
 687        }
 688
 689        imgu->mmu = imgu_mmu_init(&pci_dev->dev, imgu->base);
 690        if (IS_ERR(imgu->mmu)) {
 691                r = PTR_ERR(imgu->mmu);
 692                dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n", r);
 693                goto out_css_powerdown;
 694        }
 695
 696        r = imgu_dmamap_init(imgu);
 697        if (r) {
 698                dev_err(&pci_dev->dev,
 699                        "failed to initialize DMA mapping (%d)\n", r);
 700                goto out_mmu_exit;
 701        }
 702
 703        /* ISP programming */
 704        r = imgu_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len);
 705        if (r) {
 706                dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n", r);
 707                goto out_dmamap_exit;
 708        }
 709
 710        /* v4l2 sub-device registration */
 711        r = imgu_video_nodes_init(imgu);
 712        if (r) {
 713                dev_err(&pci_dev->dev, "failed to create V4L2 devices (%d)\n",
 714                        r);
 715                goto out_css_cleanup;
 716        }
 717
 718        r = devm_request_threaded_irq(&pci_dev->dev, pci_dev->irq,
 719                                      imgu_isr, imgu_isr_threaded,
 720                                      IRQF_SHARED, IMGU_NAME, imgu);
 721        if (r) {
 722                dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
 723                goto out_video_exit;
 724        }
 725
 726        pm_runtime_put_noidle(&pci_dev->dev);
 727        pm_runtime_allow(&pci_dev->dev);
 728
 729        return 0;
 730
 731out_video_exit:
 732        imgu_video_nodes_exit(imgu);
 733out_css_cleanup:
 734        imgu_css_cleanup(&imgu->css);
 735out_dmamap_exit:
 736        imgu_dmamap_exit(imgu);
 737out_mmu_exit:
 738        imgu_mmu_exit(imgu->mmu);
 739out_css_powerdown:
 740        imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
 741out_mutex_destroy:
 742        mutex_destroy(&imgu->streaming_lock);
 743        mutex_destroy(&imgu->lock);
 744
 745        return r;
 746}
 747
 748static void imgu_pci_remove(struct pci_dev *pci_dev)
 749{
 750        struct imgu_device *imgu = pci_get_drvdata(pci_dev);
 751
 752        pm_runtime_forbid(&pci_dev->dev);
 753        pm_runtime_get_noresume(&pci_dev->dev);
 754
 755        imgu_video_nodes_exit(imgu);
 756        imgu_css_cleanup(&imgu->css);
 757        imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
 758        imgu_dmamap_exit(imgu);
 759        imgu_mmu_exit(imgu->mmu);
 760        mutex_destroy(&imgu->streaming_lock);
 761        mutex_destroy(&imgu->lock);
 762}
 763
 764static int __maybe_unused imgu_suspend(struct device *dev)
 765{
 766        struct pci_dev *pci_dev = to_pci_dev(dev);
 767        struct imgu_device *imgu = pci_get_drvdata(pci_dev);
 768
 769        dev_dbg(dev, "enter %s\n", __func__);
 770        imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css);
 771        if (!imgu->suspend_in_stream)
 772                goto out;
 773        /* Block new buffers to be queued to CSS. */
 774        atomic_set(&imgu->qbuf_barrier, 1);
 775        /*
 776         * Wait for currently running irq handler to be done so that
 777         * no new buffers will be queued to fw later.
 778         */
 779        synchronize_irq(pci_dev->irq);
 780        /* Wait until all buffers in CSS are done. */
 781        if (!wait_event_timeout(imgu->buf_drain_wq,
 782            imgu_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
 783                dev_err(dev, "wait buffer drain timeout.\n");
 784
 785        imgu_css_stop_streaming(&imgu->css);
 786        atomic_set(&imgu->qbuf_barrier, 0);
 787        imgu_powerdown(imgu);
 788        pm_runtime_force_suspend(dev);
 789out:
 790        dev_dbg(dev, "leave %s\n", __func__);
 791        return 0;
 792}
 793
 794static int __maybe_unused imgu_resume(struct device *dev)
 795{
 796        struct imgu_device *imgu = dev_get_drvdata(dev);
 797        int r = 0;
 798        unsigned int pipe;
 799
 800        dev_dbg(dev, "enter %s\n", __func__);
 801
 802        if (!imgu->suspend_in_stream)
 803                goto out;
 804
 805        pm_runtime_force_resume(dev);
 806
 807        r = imgu_powerup(imgu);
 808        if (r) {
 809                dev_err(dev, "failed to power up imgu\n");
 810                goto out;
 811        }
 812
 813        /* Start CSS streaming */
 814        r = imgu_css_start_streaming(&imgu->css);
 815        if (r) {
 816                dev_err(dev, "failed to resume css streaming (%d)", r);
 817                goto out;
 818        }
 819
 820        for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
 821                r = imgu_queue_buffers(imgu, true, pipe);
 822                if (r)
 823                        dev_err(dev, "failed to queue buffers to pipe %d (%d)",
 824                                pipe, r);
 825        }
 826
 827out:
 828        dev_dbg(dev, "leave %s\n", __func__);
 829
 830        return r;
 831}
 832
 833/*
 834 * PCI rpm framework checks the existence of driver rpm callbacks.
 835 * Place a dummy callback here to avoid rpm going into error state.
 836 */
 837static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev)
 838{
 839        return 0;
 840}
 841
 842static const struct dev_pm_ops imgu_pm_ops = {
 843        SET_RUNTIME_PM_OPS(&imgu_rpm_dummy_cb, &imgu_rpm_dummy_cb, NULL)
 844        SET_SYSTEM_SLEEP_PM_OPS(&imgu_suspend, &imgu_resume)
 845};
 846
 847static const struct pci_device_id imgu_pci_tbl[] = {
 848        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, IMGU_PCI_ID) },
 849        { 0, }
 850};
 851
 852MODULE_DEVICE_TABLE(pci, imgu_pci_tbl);
 853
 854static struct pci_driver imgu_pci_driver = {
 855        .name = IMGU_NAME,
 856        .id_table = imgu_pci_tbl,
 857        .probe = imgu_pci_probe,
 858        .remove = imgu_pci_remove,
 859        .driver = {
 860                .pm = &imgu_pm_ops,
 861        },
 862};
 863
 864module_pci_driver(imgu_pci_driver);
 865
 866MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
 867MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
 868MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
 869MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
 870MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
 871MODULE_LICENSE("GPL v2");
 872MODULE_DESCRIPTION("Intel ipu3_imgu PCI driver");
 873