linux/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
<<
>>
Prefs
   1/*
   2 * netup_unidvb_core.c
   3 *
   4 * Main module for NetUP Universal Dual DVB-CI
   5 *
   6 * Copyright (C) 2014 NetUP Inc.
   7 * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
   8 * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 */
  20
  21#include <linux/init.h>
  22#include <linux/module.h>
  23#include <linux/moduleparam.h>
  24#include <linux/kmod.h>
  25#include <linux/kernel.h>
  26#include <linux/slab.h>
  27#include <linux/interrupt.h>
  28#include <linux/delay.h>
  29#include <linux/list.h>
  30#include <media/videobuf2-vmalloc.h>
  31
  32#include "netup_unidvb.h"
  33#include "cxd2841er.h"
  34#include "horus3a.h"
  35#include "ascot2e.h"
  36#include "lnbh25.h"
  37
  38static int spi_enable;
  39module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
  40
  41MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
  42MODULE_AUTHOR("info@netup.ru");
  43MODULE_VERSION(NETUP_UNIDVB_VERSION);
  44MODULE_LICENSE("GPL");
  45
  46DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
  47
  48/* Avalon-MM PCI-E registers */
  49#define AVL_PCIE_IENR           0x50
  50#define AVL_PCIE_ISR            0x40
  51#define AVL_IRQ_ENABLE          0x80
  52#define AVL_IRQ_ASSERTED        0x80
  53/* GPIO registers */
  54#define GPIO_REG_IO             0x4880
  55#define GPIO_REG_IO_TOGGLE      0x4882
  56#define GPIO_REG_IO_SET         0x4884
  57#define GPIO_REG_IO_CLEAR       0x4886
  58/* GPIO bits */
  59#define GPIO_FEA_RESET          (1 << 0)
  60#define GPIO_FEB_RESET          (1 << 1)
  61#define GPIO_RFA_CTL            (1 << 2)
  62#define GPIO_RFB_CTL            (1 << 3)
  63#define GPIO_FEA_TU_RESET       (1 << 4)
  64#define GPIO_FEB_TU_RESET       (1 << 5)
  65/* DMA base address */
  66#define NETUP_DMA0_ADDR         0x4900
  67#define NETUP_DMA1_ADDR         0x4940
  68/* 8 DMA blocks * 128 packets * 188 bytes*/
  69#define NETUP_DMA_BLOCKS_COUNT  8
  70#define NETUP_DMA_PACKETS_COUNT 128
  71/* DMA status bits */
  72#define BIT_DMA_RUN             1
  73#define BIT_DMA_ERROR           2
  74#define BIT_DMA_IRQ             0x200
  75
  76/**
  77 * struct netup_dma_regs - the map of DMA module registers
  78 * @ctrlstat_set:       Control register, write to set control bits
  79 * @ctrlstat_clear:     Control register, write to clear control bits
  80 * @start_addr_lo:      DMA ring buffer start address, lower part
  81 * @start_addr_hi:      DMA ring buffer start address, higher part
  82 * @size:               DMA ring buffer size register
  83                        Bits [0-7]:     DMA packet size, 188 bytes
  84                        Bits [16-23]:   packets count in block, 128 packets
  85                        Bits [24-31]:   blocks count, 8 blocks
  86 * @timeout:            DMA timeout in units of 8ns
  87                        For example, value of 375000000 equals to 3 sec
  88 * @curr_addr_lo:       Current ring buffer head address, lower part
  89 * @curr_addr_hi:       Current ring buffer head address, higher part
  90 * @stat_pkt_received:  Statistic register, not tested
  91 * @stat_pkt_accepted:  Statistic register, not tested
  92 * @stat_pkt_overruns:  Statistic register, not tested
  93 * @stat_pkt_underruns: Statistic register, not tested
  94 * @stat_fifo_overruns: Statistic register, not tested
  95 */
  96struct netup_dma_regs {
  97        __le32  ctrlstat_set;
  98        __le32  ctrlstat_clear;
  99        __le32  start_addr_lo;
 100        __le32  start_addr_hi;
 101        __le32  size;
 102        __le32  timeout;
 103        __le32  curr_addr_lo;
 104        __le32  curr_addr_hi;
 105        __le32  stat_pkt_received;
 106        __le32  stat_pkt_accepted;
 107        __le32  stat_pkt_overruns;
 108        __le32  stat_pkt_underruns;
 109        __le32  stat_fifo_overruns;
 110} __packed __aligned(1);
 111
 112struct netup_unidvb_buffer {
 113        struct vb2_buffer       vb;
 114        struct list_head        list;
 115        u32                     size;
 116};
 117
 118static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
 119static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
 120
 121static struct cxd2841er_config demod_config = {
 122        .i2c_addr = 0xc8
 123};
 124
 125static struct horus3a_config horus3a_conf = {
 126        .i2c_address = 0xc0,
 127        .xtal_freq_mhz = 16,
 128        .set_tuner_callback = netup_unidvb_tuner_ctrl
 129};
 130
 131static struct ascot2e_config ascot2e_conf = {
 132        .i2c_address = 0xc2,
 133        .set_tuner_callback = netup_unidvb_tuner_ctrl
 134};
 135
 136static struct lnbh25_config lnbh25_conf = {
 137        .i2c_address = 0x10,
 138        .data2_config = LNBH25_TEN | LNBH25_EXTM
 139};
 140
 141static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
 142{
 143        u8 reg, mask;
 144        struct netup_dma *dma = priv;
 145        struct netup_unidvb_dev *ndev;
 146
 147        if (!priv)
 148                return -EINVAL;
 149        ndev = dma->ndev;
 150        dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
 151                __func__, dma->num, is_dvb_tc);
 152        reg = readb(ndev->bmmio0 + GPIO_REG_IO);
 153        mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
 154        if (!is_dvb_tc)
 155                reg |= mask;
 156        else
 157                reg &= ~mask;
 158        writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
 159        return 0;
 160}
 161
 162static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
 163{
 164        u16 gpio_reg;
 165
 166        /* enable PCI-E interrupts */
 167        writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
 168        /* unreset frontends bits[0:1] */
 169        writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
 170        msleep(100);
 171        gpio_reg =
 172                GPIO_FEA_RESET | GPIO_FEB_RESET |
 173                GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
 174                GPIO_RFA_CTL | GPIO_RFB_CTL;
 175        writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
 176        dev_dbg(&ndev->pci_dev->dev,
 177                "%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
 178                __func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
 179                (int)readb(ndev->bmmio0 + GPIO_REG_IO));
 180
 181}
 182
 183static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
 184{
 185        u32 irq_mask = (dma->num == 0 ?
 186                NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
 187
 188        dev_dbg(&dma->ndev->pci_dev->dev,
 189                "%s(): DMA%d enable %d\n", __func__, dma->num, enable);
 190        if (enable) {
 191                writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
 192                writew(irq_mask,
 193                        (u16 *)(dma->ndev->bmmio0 + REG_IMASK_SET));
 194        } else {
 195                writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
 196                writew(irq_mask,
 197                        (u16 *)(dma->ndev->bmmio0 + REG_IMASK_CLEAR));
 198        }
 199}
 200
 201static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
 202{
 203        u64 addr_curr;
 204        u32 size;
 205        unsigned long flags;
 206        struct device *dev = &dma->ndev->pci_dev->dev;
 207
 208        spin_lock_irqsave(&dma->lock, flags);
 209        addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
 210                (u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
 211        /* clear IRQ */
 212        writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
 213        /* sanity check */
 214        if (addr_curr < dma->addr_phys ||
 215                        addr_curr > dma->addr_phys +  dma->ring_buffer_size) {
 216                if (addr_curr != 0) {
 217                        dev_err(dev,
 218                                "%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
 219                                __func__, addr_curr, (u64)dma->addr_phys,
 220                                (u64)(dma->addr_phys + dma->ring_buffer_size));
 221                }
 222                goto irq_handled;
 223        }
 224        size = (addr_curr >= dma->addr_last) ?
 225                (u32)(addr_curr - dma->addr_last) :
 226                (u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
 227        if (dma->data_size != 0) {
 228                printk_ratelimited("%s(): lost interrupt, data size %d\n",
 229                        __func__, dma->data_size);
 230                dma->data_size += size;
 231        }
 232        if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
 233                dma->data_size = size;
 234                dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
 235        }
 236        dma->addr_last = addr_curr;
 237        queue_work(dma->ndev->wq, &dma->work);
 238irq_handled:
 239        spin_unlock_irqrestore(&dma->lock, flags);
 240        return IRQ_HANDLED;
 241}
 242
 243static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
 244{
 245        struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
 246        struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
 247        u32 reg40, reg_isr;
 248        irqreturn_t iret = IRQ_NONE;
 249
 250        /* disable interrupts */
 251        writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
 252        /* check IRQ source */
 253        reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
 254        if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
 255                /* IRQ is being signaled */
 256                reg_isr = readw(ndev->bmmio0 + REG_ISR);
 257                if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
 258                        iret = netup_i2c_interrupt(&ndev->i2c[0]);
 259                } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
 260                        iret = netup_i2c_interrupt(&ndev->i2c[1]);
 261                } else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
 262                        iret = netup_spi_interrupt(ndev->spi);
 263                } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
 264                        iret = netup_dma_interrupt(&ndev->dma[0]);
 265                } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
 266                        iret = netup_dma_interrupt(&ndev->dma[1]);
 267                } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
 268                        iret = netup_ci_interrupt(ndev);
 269                } else {
 270                        dev_err(&pci_dev->dev,
 271                                "%s(): unknown interrupt 0x%x\n",
 272                                __func__, reg_isr);
 273                }
 274        }
 275        /* re-enable interrupts */
 276        writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
 277        return iret;
 278}
 279
 280static int netup_unidvb_queue_setup(struct vb2_queue *vq,
 281                                    const struct v4l2_format *fmt,
 282                                    unsigned int *nbuffers,
 283                                    unsigned int *nplanes,
 284                                    unsigned int sizes[],
 285                                    void *alloc_ctxs[])
 286{
 287        struct netup_dma *dma = vb2_get_drv_priv(vq);
 288
 289        dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
 290
 291        *nplanes = 1;
 292        if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
 293                *nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
 294        sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
 295        dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
 296                __func__, *nbuffers, sizes[0]);
 297        return 0;
 298}
 299
 300static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
 301{
 302        struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
 303        struct netup_unidvb_buffer *buf = container_of(vb,
 304                                struct netup_unidvb_buffer, vb);
 305
 306        dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
 307        buf->size = 0;
 308        return 0;
 309}
 310
 311static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
 312{
 313        unsigned long flags;
 314        struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
 315        struct netup_unidvb_buffer *buf = container_of(vb,
 316                                struct netup_unidvb_buffer, vb);
 317
 318        dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
 319        spin_lock_irqsave(&dma->lock, flags);
 320        list_add_tail(&buf->list, &dma->free_buffers);
 321        spin_unlock_irqrestore(&dma->lock, flags);
 322        mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
 323}
 324
 325static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
 326{
 327        struct netup_dma *dma = vb2_get_drv_priv(q);
 328
 329        dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
 330        netup_unidvb_dma_enable(dma, 1);
 331        return 0;
 332}
 333
 334static void netup_unidvb_stop_streaming(struct vb2_queue *q)
 335{
 336        struct netup_dma *dma = vb2_get_drv_priv(q);
 337
 338        dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
 339        netup_unidvb_dma_enable(dma, 0);
 340        netup_unidvb_queue_cleanup(dma);
 341}
 342
 343static struct vb2_ops dvb_qops = {
 344        .queue_setup            = netup_unidvb_queue_setup,
 345        .buf_prepare            = netup_unidvb_buf_prepare,
 346        .buf_queue              = netup_unidvb_buf_queue,
 347        .start_streaming        = netup_unidvb_start_streaming,
 348        .stop_streaming         = netup_unidvb_stop_streaming,
 349};
 350
 351static int netup_unidvb_queue_init(struct netup_dma *dma,
 352                                   struct vb2_queue *vb_queue)
 353{
 354        int res;
 355
 356        /* Init videobuf2 queue structure */
 357        vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 358        vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
 359        vb_queue->drv_priv = dma;
 360        vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
 361        vb_queue->ops = &dvb_qops;
 362        vb_queue->mem_ops = &vb2_vmalloc_memops;
 363        vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 364        res = vb2_queue_init(vb_queue);
 365        if (res != 0) {
 366                dev_err(&dma->ndev->pci_dev->dev,
 367                        "%s(): vb2_queue_init failed (%d)\n", __func__, res);
 368        }
 369        return res;
 370}
 371
 372static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
 373                                 int num)
 374{
 375        struct vb2_dvb_frontend *fe0, *fe1, *fe2;
 376
 377        if (num < 0 || num > 1) {
 378                dev_dbg(&ndev->pci_dev->dev,
 379                        "%s(): unable to init DVB bus %d\n", __func__, num);
 380                return -ENODEV;
 381        }
 382        mutex_init(&ndev->frontends[num].lock);
 383        INIT_LIST_HEAD(&ndev->frontends[num].felist);
 384        if (vb2_dvb_alloc_frontend(&ndev->frontends[num], 1) == NULL ||
 385                vb2_dvb_alloc_frontend(
 386                        &ndev->frontends[num], 2) == NULL ||
 387                vb2_dvb_alloc_frontend(
 388                        &ndev->frontends[num], 3) == NULL) {
 389                dev_dbg(&ndev->pci_dev->dev,
 390                        "%s(): unable to to alllocate vb2_dvb_frontend\n",
 391                        __func__);
 392                return -ENOMEM;
 393        }
 394        fe0 = vb2_dvb_get_frontend(&ndev->frontends[num], 1);
 395        fe1 = vb2_dvb_get_frontend(&ndev->frontends[num], 2);
 396        fe2 = vb2_dvb_get_frontend(&ndev->frontends[num], 3);
 397        if (fe0 == NULL || fe1 == NULL || fe2 == NULL) {
 398                dev_dbg(&ndev->pci_dev->dev,
 399                        "%s(): frontends has not been allocated\n", __func__);
 400                return -EINVAL;
 401        }
 402        netup_unidvb_queue_init(&ndev->dma[num], &fe0->dvb.dvbq);
 403        netup_unidvb_queue_init(&ndev->dma[num], &fe1->dvb.dvbq);
 404        netup_unidvb_queue_init(&ndev->dma[num], &fe2->dvb.dvbq);
 405        fe0->dvb.name = "netup_fe0";
 406        fe1->dvb.name = "netup_fe1";
 407        fe2->dvb.name = "netup_fe2";
 408        fe0->dvb.frontend = dvb_attach(cxd2841er_attach_s,
 409                &demod_config, &ndev->i2c[num].adap);
 410        if (fe0->dvb.frontend == NULL) {
 411                dev_dbg(&ndev->pci_dev->dev,
 412                        "%s(): unable to attach DVB-S/S2 frontend\n",
 413                        __func__);
 414                goto frontend_detach;
 415        }
 416        horus3a_conf.set_tuner_priv = &ndev->dma[num];
 417        if (!dvb_attach(horus3a_attach, fe0->dvb.frontend,
 418                        &horus3a_conf, &ndev->i2c[num].adap)) {
 419                dev_dbg(&ndev->pci_dev->dev,
 420                        "%s(): unable to attach DVB-S/S2 tuner frontend\n",
 421                        __func__);
 422                goto frontend_detach;
 423        }
 424        if (!dvb_attach(lnbh25_attach, fe0->dvb.frontend,
 425                        &lnbh25_conf, &ndev->i2c[num].adap)) {
 426                dev_dbg(&ndev->pci_dev->dev,
 427                        "%s(): unable to attach SEC frontend\n", __func__);
 428                goto frontend_detach;
 429        }
 430        /* DVB-T/T2 frontend */
 431        fe1->dvb.frontend = dvb_attach(cxd2841er_attach_t,
 432                &demod_config, &ndev->i2c[num].adap);
 433        if (fe1->dvb.frontend == NULL) {
 434                dev_dbg(&ndev->pci_dev->dev,
 435                        "%s(): unable to attach DVB-T frontend\n", __func__);
 436                goto frontend_detach;
 437        }
 438        fe1->dvb.frontend->id = 1;
 439        ascot2e_conf.set_tuner_priv = &ndev->dma[num];
 440        if (!dvb_attach(ascot2e_attach, fe1->dvb.frontend,
 441                        &ascot2e_conf, &ndev->i2c[num].adap)) {
 442                dev_dbg(&ndev->pci_dev->dev,
 443                        "%s(): unable to attach DVB-T tuner frontend\n",
 444                        __func__);
 445                goto frontend_detach;
 446        }
 447        /* DVB-C/C2 frontend */
 448        fe2->dvb.frontend = dvb_attach(cxd2841er_attach_c,
 449                                &demod_config, &ndev->i2c[num].adap);
 450        if (fe2->dvb.frontend == NULL) {
 451                dev_dbg(&ndev->pci_dev->dev,
 452                        "%s(): unable to attach DVB-C frontend\n", __func__);
 453                goto frontend_detach;
 454        }
 455        fe2->dvb.frontend->id = 2;
 456        if (!dvb_attach(ascot2e_attach, fe2->dvb.frontend,
 457                        &ascot2e_conf, &ndev->i2c[num].adap)) {
 458                dev_dbg(&ndev->pci_dev->dev,
 459                        "%s(): unable to attach DVB-T/C tuner frontend\n",
 460                        __func__);
 461                goto frontend_detach;
 462        }
 463
 464        if (vb2_dvb_register_bus(&ndev->frontends[num],
 465                        THIS_MODULE, NULL,
 466                        &ndev->pci_dev->dev, adapter_nr, 1)) {
 467                dev_dbg(&ndev->pci_dev->dev,
 468                        "%s(): unable to register DVB bus %d\n",
 469                        __func__, num);
 470                goto frontend_detach;
 471        }
 472        dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
 473        return 0;
 474frontend_detach:
 475        vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
 476        return -EINVAL;
 477}
 478
 479static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
 480{
 481        if (num < 0 || num > 1) {
 482                dev_err(&ndev->pci_dev->dev,
 483                        "%s(): unable to unregister DVB bus %d\n",
 484                        __func__, num);
 485                return;
 486        }
 487        vb2_dvb_unregister_bus(&ndev->frontends[num]);
 488        dev_info(&ndev->pci_dev->dev,
 489                "%s(): DVB bus %d unregistered\n", __func__, num);
 490}
 491
 492static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
 493{
 494        int res;
 495
 496        res = netup_unidvb_dvb_init(ndev, 0);
 497        if (res)
 498                return res;
 499        res = netup_unidvb_dvb_init(ndev, 1);
 500        if (res) {
 501                netup_unidvb_dvb_fini(ndev, 0);
 502                return res;
 503        }
 504        return 0;
 505}
 506
 507static int netup_unidvb_ring_copy(struct netup_dma *dma,
 508                                  struct netup_unidvb_buffer *buf)
 509{
 510        u32 copy_bytes, ring_bytes;
 511        u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
 512        u8 *p = vb2_plane_vaddr(&buf->vb, 0);
 513        struct netup_unidvb_dev *ndev = dma->ndev;
 514
 515        if (p == NULL) {
 516                dev_err(&ndev->pci_dev->dev,
 517                        "%s(): buffer is NULL\n", __func__);
 518                return -EINVAL;
 519        }
 520        p += buf->size;
 521        if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
 522                ring_bytes = dma->ring_buffer_size - dma->data_offset;
 523                copy_bytes = (ring_bytes > buff_bytes) ?
 524                        buff_bytes : ring_bytes;
 525                memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes);
 526                p += copy_bytes;
 527                buf->size += copy_bytes;
 528                buff_bytes -= copy_bytes;
 529                dma->data_size -= copy_bytes;
 530                dma->data_offset += copy_bytes;
 531                if (dma->data_offset == dma->ring_buffer_size)
 532                        dma->data_offset = 0;
 533        }
 534        if (buff_bytes > 0) {
 535                ring_bytes = dma->data_size;
 536                copy_bytes = (ring_bytes > buff_bytes) ?
 537                                buff_bytes : ring_bytes;
 538                memcpy_fromio(p, dma->addr_virt + dma->data_offset, copy_bytes);
 539                buf->size += copy_bytes;
 540                dma->data_size -= copy_bytes;
 541                dma->data_offset += copy_bytes;
 542                if (dma->data_offset == dma->ring_buffer_size)
 543                        dma->data_offset = 0;
 544        }
 545        return 0;
 546}
 547
 548static void netup_unidvb_dma_worker(struct work_struct *work)
 549{
 550        struct netup_dma *dma = container_of(work, struct netup_dma, work);
 551        struct netup_unidvb_dev *ndev = dma->ndev;
 552        struct netup_unidvb_buffer *buf;
 553        unsigned long flags;
 554
 555        spin_lock_irqsave(&dma->lock, flags);
 556        if (dma->data_size == 0) {
 557                dev_dbg(&ndev->pci_dev->dev,
 558                        "%s(): data_size == 0\n", __func__);
 559                goto work_done;
 560        }
 561        while (dma->data_size > 0) {
 562                if (list_empty(&dma->free_buffers)) {
 563                        dev_dbg(&ndev->pci_dev->dev,
 564                                "%s(): no free buffers\n", __func__);
 565                        goto work_done;
 566                }
 567                buf = list_first_entry(&dma->free_buffers,
 568                        struct netup_unidvb_buffer, list);
 569                if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
 570                        dev_dbg(&ndev->pci_dev->dev,
 571                                "%s(): buffer overflow, size %d\n",
 572                                __func__, buf->size);
 573                        goto work_done;
 574                }
 575                if (netup_unidvb_ring_copy(dma, buf))
 576                        goto work_done;
 577                if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
 578                        list_del(&buf->list);
 579                        dev_dbg(&ndev->pci_dev->dev,
 580                                "%s(): buffer %p done, size %d\n",
 581                                __func__, buf, buf->size);
 582                        v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
 583                        vb2_set_plane_payload(&buf->vb, 0, buf->size);
 584                        vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
 585                }
 586        }
 587work_done:
 588        dma->data_size = 0;
 589        spin_unlock_irqrestore(&dma->lock, flags);
 590}
 591
 592static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
 593{
 594        struct netup_unidvb_buffer *buf;
 595        unsigned long flags;
 596
 597        spin_lock_irqsave(&dma->lock, flags);
 598        while (!list_empty(&dma->free_buffers)) {
 599                buf = list_first_entry(&dma->free_buffers,
 600                        struct netup_unidvb_buffer, list);
 601                list_del(&buf->list);
 602                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
 603        }
 604        spin_unlock_irqrestore(&dma->lock, flags);
 605}
 606
 607static void netup_unidvb_dma_timeout(unsigned long data)
 608{
 609        struct netup_dma *dma = (struct netup_dma *)data;
 610        struct netup_unidvb_dev *ndev = dma->ndev;
 611
 612        dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
 613        netup_unidvb_queue_cleanup(dma);
 614}
 615
 616static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
 617{
 618        struct netup_dma *dma;
 619        struct device *dev = &ndev->pci_dev->dev;
 620
 621        if (num < 0 || num > 1) {
 622                dev_err(dev, "%s(): unable to register DMA%d\n",
 623                        __func__, num);
 624                return -ENODEV;
 625        }
 626        dma = &ndev->dma[num];
 627        dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
 628        dma->num = num;
 629        dma->ndev = ndev;
 630        spin_lock_init(&dma->lock);
 631        INIT_WORK(&dma->work, netup_unidvb_dma_worker);
 632        INIT_LIST_HEAD(&dma->free_buffers);
 633        dma->timeout.function = netup_unidvb_dma_timeout;
 634        dma->timeout.data = (unsigned long)dma;
 635        init_timer(&dma->timeout);
 636        dma->ring_buffer_size = ndev->dma_size / 2;
 637        dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
 638        dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
 639                dma->ring_buffer_size * num);
 640        dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
 641                __func__, num, dma->addr_virt,
 642                (unsigned long long)dma->addr_phys,
 643                dma->ring_buffer_size);
 644        memset_io(dma->addr_virt, 0, dma->ring_buffer_size);
 645        dma->addr_last = dma->addr_phys;
 646        dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
 647        dma->regs = (struct netup_dma_regs *)(num == 0 ?
 648                ndev->bmmio0 + NETUP_DMA0_ADDR :
 649                ndev->bmmio0 + NETUP_DMA1_ADDR);
 650        writel((NETUP_DMA_BLOCKS_COUNT << 24) |
 651                (NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
 652        writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
 653        writel(0, &dma->regs->start_addr_hi);
 654        writel(dma->high_addr, ndev->bmmio0 + 0x1000);
 655        writel(375000000, &dma->regs->timeout);
 656        msleep(1000);
 657        writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
 658        return 0;
 659}
 660
 661static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
 662{
 663        struct netup_dma *dma;
 664
 665        if (num < 0 || num > 1)
 666                return;
 667        dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
 668        dma = &ndev->dma[num];
 669        netup_unidvb_dma_enable(dma, 0);
 670        msleep(50);
 671        cancel_work_sync(&dma->work);
 672        del_timer(&dma->timeout);
 673}
 674
 675static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
 676{
 677        int res;
 678
 679        res = netup_unidvb_dma_init(ndev, 0);
 680        if (res)
 681                return res;
 682        res = netup_unidvb_dma_init(ndev, 1);
 683        if (res) {
 684                netup_unidvb_dma_fini(ndev, 0);
 685                return res;
 686        }
 687        netup_unidvb_dma_enable(&ndev->dma[0], 0);
 688        netup_unidvb_dma_enable(&ndev->dma[1], 0);
 689        return 0;
 690}
 691
 692static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
 693                                 struct pci_dev *pci_dev)
 694{
 695        int res;
 696
 697        writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
 698        res = netup_unidvb_ci_register(ndev, 0, pci_dev);
 699        if (res)
 700                return res;
 701        res = netup_unidvb_ci_register(ndev, 1, pci_dev);
 702        if (res)
 703                netup_unidvb_ci_unregister(ndev, 0);
 704        return res;
 705}
 706
 707static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
 708{
 709        if (!request_mem_region(pci_resource_start(pci_dev, 0),
 710                        pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
 711                dev_err(&pci_dev->dev,
 712                        "%s(): unable to request MMIO bar 0 at 0x%llx\n",
 713                        __func__,
 714                        (unsigned long long)pci_resource_start(pci_dev, 0));
 715                return -EBUSY;
 716        }
 717        if (!request_mem_region(pci_resource_start(pci_dev, 1),
 718                        pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
 719                dev_err(&pci_dev->dev,
 720                        "%s(): unable to request MMIO bar 1 at 0x%llx\n",
 721                        __func__,
 722                        (unsigned long long)pci_resource_start(pci_dev, 1));
 723                release_mem_region(pci_resource_start(pci_dev, 0),
 724                        pci_resource_len(pci_dev, 0));
 725                return -EBUSY;
 726        }
 727        return 0;
 728}
 729
 730static int netup_unidvb_request_modules(struct device *dev)
 731{
 732        static const char * const modules[] = {
 733                "lnbh25", "ascot2e", "horus3a", "cxd2841er", NULL
 734        };
 735        const char * const *curr_mod = modules;
 736        int err;
 737
 738        while (*curr_mod != NULL) {
 739                err = request_module(*curr_mod);
 740                if (err) {
 741                        dev_warn(dev, "request_module(%s) failed: %d\n",
 742                                *curr_mod, err);
 743                }
 744                ++curr_mod;
 745        }
 746        return 0;
 747}
 748
 749static int netup_unidvb_initdev(struct pci_dev *pci_dev,
 750                                const struct pci_device_id *pci_id)
 751{
 752        u8 board_revision;
 753        u16 board_vendor;
 754        struct netup_unidvb_dev *ndev;
 755        int old_firmware = 0;
 756
 757        netup_unidvb_request_modules(&pci_dev->dev);
 758
 759        /* Check card revision */
 760        if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
 761                dev_err(&pci_dev->dev,
 762                        "netup_unidvb: expected card revision %d, got %d\n",
 763                        NETUP_PCI_DEV_REVISION, pci_dev->revision);
 764                dev_err(&pci_dev->dev,
 765                        "Please upgrade firmware!\n");
 766                dev_err(&pci_dev->dev,
 767                        "Instructions on http://www.netup.tv\n");
 768                old_firmware = 1;
 769                spi_enable = 1;
 770        }
 771
 772        /* allocate device context */
 773        ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
 774
 775        if (!ndev)
 776                goto dev_alloc_err;
 777        memset(ndev, 0, sizeof(*ndev));
 778        ndev->old_fw = old_firmware;
 779        ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
 780        if (!ndev->wq) {
 781                dev_err(&pci_dev->dev,
 782                        "%s(): unable to create workqueue\n", __func__);
 783                goto wq_create_err;
 784        }
 785        ndev->pci_dev = pci_dev;
 786        ndev->pci_bus = pci_dev->bus->number;
 787        ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
 788        ndev->pci_func = PCI_FUNC(pci_dev->devfn);
 789        ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
 790        pci_set_drvdata(pci_dev, ndev);
 791        /* PCI init */
 792        dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
 793                __func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
 794
 795        if (pci_enable_device(pci_dev)) {
 796                dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
 797                        __func__);
 798                goto pci_enable_err;
 799        }
 800        /* read PCI info */
 801        pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
 802        pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
 803        if (board_vendor != NETUP_VENDOR_ID) {
 804                dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
 805                        __func__, board_vendor);
 806                goto pci_detect_err;
 807        }
 808        dev_info(&pci_dev->dev,
 809                "%s(): board vendor 0x%x, revision 0x%x\n",
 810                __func__, board_vendor, board_revision);
 811        pci_set_master(pci_dev);
 812        if (!pci_dma_supported(pci_dev, 0xffffffff)) {
 813                dev_err(&pci_dev->dev,
 814                        "%s(): 32bit PCI DMA is not supported\n", __func__);
 815                goto pci_detect_err;
 816        }
 817        dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
 818        /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
 819        pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
 820                PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
 821                PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
 822        /* Adjust PCIe completion timeout. */
 823        pcie_capability_clear_and_set_word(pci_dev,
 824                PCI_EXP_DEVCTL2, 0xf, 0x2);
 825
 826        if (netup_unidvb_request_mmio(pci_dev)) {
 827                dev_err(&pci_dev->dev,
 828                        "%s(): unable to request MMIO regions\n", __func__);
 829                goto pci_detect_err;
 830        }
 831        ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
 832                pci_resource_len(pci_dev, 0));
 833        if (!ndev->lmmio0) {
 834                dev_err(&pci_dev->dev,
 835                        "%s(): unable to remap MMIO bar 0\n", __func__);
 836                goto pci_bar0_error;
 837        }
 838        ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
 839                pci_resource_len(pci_dev, 1));
 840        if (!ndev->lmmio1) {
 841                dev_err(&pci_dev->dev,
 842                        "%s(): unable to remap MMIO bar 1\n", __func__);
 843                goto pci_bar1_error;
 844        }
 845        ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
 846        ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
 847        dev_info(&pci_dev->dev,
 848                "%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
 849                __func__,
 850                ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
 851                ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
 852                pci_dev->irq);
 853        if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
 854                        "netup_unidvb", pci_dev) < 0) {
 855                dev_err(&pci_dev->dev,
 856                        "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
 857                goto irq_request_err;
 858        }
 859        ndev->dma_size = 2 * 188 *
 860                NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
 861        ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
 862                ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
 863        if (!ndev->dma_virt) {
 864                dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
 865                        __func__);
 866                goto dma_alloc_err;
 867        }
 868        netup_unidvb_dev_enable(ndev);
 869        if (spi_enable && netup_spi_init(ndev)) {
 870                dev_warn(&pci_dev->dev,
 871                        "netup_unidvb: SPI flash setup failed\n");
 872                goto spi_setup_err;
 873        }
 874        if (old_firmware) {
 875                dev_err(&pci_dev->dev,
 876                        "netup_unidvb: card initialization was incomplete\n");
 877                return 0;
 878        }
 879        if (netup_i2c_register(ndev)) {
 880                dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
 881                goto i2c_setup_err;
 882        }
 883        /* enable I2C IRQs */
 884        writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
 885                ndev->bmmio0 + REG_IMASK_SET);
 886        usleep_range(5000, 10000);
 887        if (netup_unidvb_dvb_setup(ndev)) {
 888                dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
 889                goto dvb_setup_err;
 890        }
 891        if (netup_unidvb_ci_setup(ndev, pci_dev)) {
 892                dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
 893                goto ci_setup_err;
 894        }
 895        if (netup_unidvb_dma_setup(ndev)) {
 896                dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
 897                goto dma_setup_err;
 898        }
 899        dev_info(&pci_dev->dev,
 900                "netup_unidvb: device has been initialized\n");
 901        return 0;
 902dma_setup_err:
 903        netup_unidvb_ci_unregister(ndev, 0);
 904        netup_unidvb_ci_unregister(ndev, 1);
 905ci_setup_err:
 906        netup_unidvb_dvb_fini(ndev, 0);
 907        netup_unidvb_dvb_fini(ndev, 1);
 908dvb_setup_err:
 909        netup_i2c_unregister(ndev);
 910i2c_setup_err:
 911        if (ndev->spi)
 912                netup_spi_release(ndev);
 913spi_setup_err:
 914        dma_free_coherent(&pci_dev->dev, ndev->dma_size,
 915                        ndev->dma_virt, ndev->dma_phys);
 916dma_alloc_err:
 917        free_irq(pci_dev->irq, pci_dev);
 918irq_request_err:
 919        iounmap(ndev->lmmio1);
 920pci_bar1_error:
 921        iounmap(ndev->lmmio0);
 922pci_bar0_error:
 923        release_mem_region(pci_resource_start(pci_dev, 0),
 924                pci_resource_len(pci_dev, 0));
 925        release_mem_region(pci_resource_start(pci_dev, 1),
 926                pci_resource_len(pci_dev, 1));
 927pci_detect_err:
 928        pci_disable_device(pci_dev);
 929pci_enable_err:
 930        pci_set_drvdata(pci_dev, NULL);
 931        destroy_workqueue(ndev->wq);
 932wq_create_err:
 933        kfree(ndev);
 934dev_alloc_err:
 935        dev_err(&pci_dev->dev,
 936                "%s(): failed to initizalize device\n", __func__);
 937        return -EIO;
 938}
 939
 940static void netup_unidvb_finidev(struct pci_dev *pci_dev)
 941{
 942        struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
 943
 944        dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
 945        if (!ndev->old_fw) {
 946                netup_unidvb_dma_fini(ndev, 0);
 947                netup_unidvb_dma_fini(ndev, 1);
 948                netup_unidvb_ci_unregister(ndev, 0);
 949                netup_unidvb_ci_unregister(ndev, 1);
 950                netup_unidvb_dvb_fini(ndev, 0);
 951                netup_unidvb_dvb_fini(ndev, 1);
 952                netup_i2c_unregister(ndev);
 953        }
 954        if (ndev->spi)
 955                netup_spi_release(ndev);
 956        writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
 957        dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
 958                        ndev->dma_virt, ndev->dma_phys);
 959        free_irq(pci_dev->irq, pci_dev);
 960        iounmap(ndev->lmmio0);
 961        iounmap(ndev->lmmio1);
 962        release_mem_region(pci_resource_start(pci_dev, 0),
 963                pci_resource_len(pci_dev, 0));
 964        release_mem_region(pci_resource_start(pci_dev, 1),
 965                pci_resource_len(pci_dev, 1));
 966        pci_disable_device(pci_dev);
 967        pci_set_drvdata(pci_dev, NULL);
 968        destroy_workqueue(ndev->wq);
 969        kfree(ndev);
 970        dev_info(&pci_dev->dev,
 971                "%s(): device has been successfully stopped\n", __func__);
 972}
 973
 974
 975static struct pci_device_id netup_unidvb_pci_tbl[] = {
 976        { PCI_DEVICE(0x1b55, 0x18f6) },
 977        { 0, }
 978};
 979MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
 980
 981static struct pci_driver netup_unidvb_pci_driver = {
 982        .name     = "netup_unidvb",
 983        .id_table = netup_unidvb_pci_tbl,
 984        .probe    = netup_unidvb_initdev,
 985        .remove   = netup_unidvb_finidev,
 986        .suspend  = NULL,
 987        .resume   = NULL,
 988};
 989
 990static int __init netup_unidvb_init(void)
 991{
 992        return pci_register_driver(&netup_unidvb_pci_driver);
 993}
 994
 995static void __exit netup_unidvb_fini(void)
 996{
 997        pci_unregister_driver(&netup_unidvb_pci_driver);
 998}
 999
1000module_init(netup_unidvb_init);
1001module_exit(netup_unidvb_fini);
1002