linux/drivers/staging/crystalhd/crystalhd_hw.c
<<
>>
Prefs
   1/***************************************************************************
   2 * Copyright (c) 2005-2009, Broadcom Corporation.
   3 *
   4 *  Name: crystalhd_hw . c
   5 *
   6 *  Description:
   7 *              BCM70010 Linux driver HW layer.
   8 *
   9 **********************************************************************
  10 * This file is part of the crystalhd device driver.
  11 *
  12 * This driver is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation, version 2 of the License.
  15 *
  16 * This driver is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 * GNU General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this driver.  If not, see <http://www.gnu.org/licenses/>.
  23 **********************************************************************/
  24
  25#include <linux/pci.h>
  26#include <linux/slab.h>
  27#include <linux/delay.h>
  28#include "crystalhd_hw.h"
  29
  30/* Functions internal to this file */
  31
  32static void crystalhd_enable_uarts(struct crystalhd_adp *adp)
  33{
  34        bc_dec_reg_wr(adp, UartSelectA, BSVS_UART_STREAM);
  35        bc_dec_reg_wr(adp, UartSelectB, BSVS_UART_DEC_OUTER);
  36}
  37
  38
  39static void crystalhd_start_dram(struct crystalhd_adp *adp)
  40{
  41        bc_dec_reg_wr(adp, SDRAM_PARAM, ((40 / 5 - 1) <<  0) |
  42        /* tras (40ns tras)/(5ns period) -1 ((15/5 - 1) <<  4) | // trcd */
  43                      ((15 / 5 - 1) <<  7) |    /* trp */
  44                      ((10 / 5 - 1) << 10) |    /* trrd */
  45                      ((15 / 5 + 1) << 12) |    /* twr */
  46                      ((2 + 1) << 16) |         /* twtr */
  47                      ((70 / 5 - 2) << 19) |    /* trfc */
  48                      (0 << 23));
  49
  50        bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
  51        bc_dec_reg_wr(adp, SDRAM_EXT_MODE, 2);
  52        bc_dec_reg_wr(adp, SDRAM_MODE, 0x132);
  53        bc_dec_reg_wr(adp, SDRAM_PRECHARGE, 0);
  54        bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
  55        bc_dec_reg_wr(adp, SDRAM_REFRESH, 0);
  56        bc_dec_reg_wr(adp, SDRAM_MODE, 0x32);
  57        /* setting the refresh rate here */
  58        bc_dec_reg_wr(adp, SDRAM_REF_PARAM, ((1 << 12) | 96));
  59}
  60
  61
  62static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
  63{
  64        union link_misc_perst_deco_ctrl rst_deco_cntrl;
  65        union link_misc_perst_clk_ctrl rst_clk_cntrl;
  66        uint32_t temp;
  67
  68        /*
  69         * Link clocks: MISC_PERST_CLOCK_CTRL Clear PLL power down bit,
  70         * delay to allow PLL to lock Clear alternate clock, stop clock bits
  71         */
  72        rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
  73        rst_clk_cntrl.pll_pwr_dn = 0;
  74        crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
  75        msleep_interruptible(50);
  76
  77        rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
  78        rst_clk_cntrl.stop_core_clk = 0;
  79        rst_clk_cntrl.sel_alt_clk = 0;
  80
  81        crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
  82        msleep_interruptible(50);
  83
  84        /*
  85         * Bus Arbiter Timeout: GISB_ARBITER_TIMER
  86         * Set internal bus arbiter timeout to 40us based on core clock speed
  87         * (63MHz * 40us = 0x9D8)
  88         */
  89        crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x9D8);
  90
  91        /*
  92         * Decoder clocks: MISC_PERST_DECODER_CTRL
  93         * Enable clocks while 7412 reset is asserted, delay
  94         * De-assert 7412 reset
  95         */
  96        rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
  97        rst_deco_cntrl.stop_bcm_7412_clk = 0;
  98        rst_deco_cntrl.bcm7412_rst = 1;
  99        crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
 100        msleep_interruptible(10);
 101
 102        rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
 103        rst_deco_cntrl.bcm7412_rst = 0;
 104        crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
 105        msleep_interruptible(50);
 106
 107        /* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
 108        crystalhd_reg_wr(adp, OTP_CONTENT_MISC, 0);
 109
 110        /* Clear bit 29 of 0x404 */
 111        temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
 112        temp &= ~BC_BIT(29);
 113        crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
 114
 115        /* 2.5V regulator must be set to 2.6 volts (+6%) */
 116        /* FIXME: jarod: what's the point of this reg read? */
 117        temp = crystalhd_reg_rd(adp, MISC_PERST_VREG_CTRL);
 118        crystalhd_reg_wr(adp, MISC_PERST_VREG_CTRL, 0xF3);
 119
 120        return true;
 121}
 122
 123static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
 124{
 125        union link_misc_perst_deco_ctrl rst_deco_cntrl;
 126        union link_misc_perst_clk_ctrl  rst_clk_cntrl;
 127        uint32_t                  temp;
 128
 129        /*
 130         * Decoder clocks: MISC_PERST_DECODER_CTRL
 131         * Assert 7412 reset, delay
 132         * Assert 7412 stop clock
 133         */
 134        rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
 135        rst_deco_cntrl.stop_bcm_7412_clk = 1;
 136        crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
 137        msleep_interruptible(50);
 138
 139        /* Bus Arbiter Timeout: GISB_ARBITER_TIMER
 140         * Set internal bus arbiter timeout to 40us based on core clock speed
 141         * (6.75MHZ * 40us = 0x10E)
 142         */
 143        crystalhd_reg_wr(adp, GISB_ARBITER_TIMER, 0x10E);
 144
 145        /* Link clocks: MISC_PERST_CLOCK_CTRL
 146         * Stop core clk, delay
 147         * Set alternate clk, delay, set PLL power down
 148         */
 149        rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
 150        rst_clk_cntrl.stop_core_clk = 1;
 151        rst_clk_cntrl.sel_alt_clk = 1;
 152        crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
 153        msleep_interruptible(50);
 154
 155        rst_clk_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_CLOCK_CTRL);
 156        rst_clk_cntrl.pll_pwr_dn = 1;
 157        crystalhd_reg_wr(adp, MISC_PERST_CLOCK_CTRL, rst_clk_cntrl.whole_reg);
 158
 159        /*
 160         * Read and restore the Transaction Configuration Register
 161         * after core reset
 162         */
 163        temp = crystalhd_reg_rd(adp, PCIE_TL_TRANSACTION_CONFIGURATION);
 164
 165        /*
 166         * Link core soft reset: MISC3_RESET_CTRL
 167         * - Write BIT[0]=1 and read it back for core reset to take place
 168         */
 169        crystalhd_reg_wr(adp, MISC3_RESET_CTRL, 1);
 170        rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC3_RESET_CTRL);
 171        msleep_interruptible(50);
 172
 173        /* restore the transaction configuration register */
 174        crystalhd_reg_wr(adp, PCIE_TL_TRANSACTION_CONFIGURATION, temp);
 175
 176        return true;
 177}
 178
 179static void crystalhd_disable_interrupts(struct crystalhd_adp *adp)
 180{
 181        union intr_mask_reg   intr_mask;
 182        intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
 183        intr_mask.mask_pcie_err = 1;
 184        intr_mask.mask_pcie_rbusmast_err = 1;
 185        intr_mask.mask_pcie_rgr_bridge   = 1;
 186        intr_mask.mask_rx_done = 1;
 187        intr_mask.mask_rx_err  = 1;
 188        intr_mask.mask_tx_done = 1;
 189        intr_mask.mask_tx_err  = 1;
 190        crystalhd_reg_wr(adp, INTR_INTR_MSK_SET_REG, intr_mask.whole_reg);
 191
 192        return;
 193}
 194
 195static void crystalhd_enable_interrupts(struct crystalhd_adp *adp)
 196{
 197        union intr_mask_reg   intr_mask;
 198        intr_mask.whole_reg = crystalhd_reg_rd(adp, INTR_INTR_MSK_STS_REG);
 199        intr_mask.mask_pcie_err = 1;
 200        intr_mask.mask_pcie_rbusmast_err = 1;
 201        intr_mask.mask_pcie_rgr_bridge   = 1;
 202        intr_mask.mask_rx_done = 1;
 203        intr_mask.mask_rx_err  = 1;
 204        intr_mask.mask_tx_done = 1;
 205        intr_mask.mask_tx_err  = 1;
 206        crystalhd_reg_wr(adp, INTR_INTR_MSK_CLR_REG, intr_mask.whole_reg);
 207
 208        return;
 209}
 210
 211static void crystalhd_clear_errors(struct crystalhd_adp *adp)
 212{
 213        uint32_t reg;
 214
 215        /* FIXME: jarod: wouldn't we want to write a 0 to the reg? Or does the write clear the bits specified? */
 216        reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
 217        if (reg)
 218                crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
 219
 220        reg = crystalhd_reg_rd(adp, MISC1_UV_RX_ERROR_STATUS);
 221        if (reg)
 222                crystalhd_reg_wr(adp, MISC1_UV_RX_ERROR_STATUS, reg);
 223
 224        reg = crystalhd_reg_rd(adp, MISC1_TX_DMA_ERROR_STATUS);
 225        if (reg)
 226                crystalhd_reg_wr(adp, MISC1_TX_DMA_ERROR_STATUS, reg);
 227}
 228
 229static void crystalhd_clear_interrupts(struct crystalhd_adp *adp)
 230{
 231        uint32_t intr_sts = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
 232
 233        if (intr_sts) {
 234                crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
 235
 236                /* Write End Of Interrupt for PCIE */
 237                crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
 238        }
 239}
 240
 241static void crystalhd_soft_rst(struct crystalhd_adp *adp)
 242{
 243        uint32_t val;
 244
 245        /* Assert c011 soft reset*/
 246        bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000001);
 247        msleep_interruptible(50);
 248
 249        /* Release c011 soft reset*/
 250        bc_dec_reg_wr(adp, DecHt_HostSwReset, 0x00000000);
 251
 252        /* Disable Stuffing..*/
 253        val = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
 254        val |= BC_BIT(8);
 255        crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, val);
 256}
 257
 258static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
 259{
 260        uint32_t i = 0, reg;
 261
 262        crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
 263
 264        crystalhd_reg_wr(adp, AES_CMD, 0);
 265        crystalhd_reg_wr(adp, AES_CONFIG_INFO, (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
 266        crystalhd_reg_wr(adp, AES_CMD, 0x1);
 267
 268        /* FIXME: jarod: I've seen this fail, and introducing extra delays helps... */
 269        for (i = 0; i < 100; ++i) {
 270                reg = crystalhd_reg_rd(adp, AES_STATUS);
 271                if (reg & 0x1)
 272                        return true;
 273                msleep_interruptible(10);
 274        }
 275
 276        return false;
 277}
 278
 279
 280static bool crystalhd_start_device(struct crystalhd_adp *adp)
 281{
 282        uint32_t dbg_options, glb_cntrl = 0, reg_pwrmgmt = 0;
 283
 284        BCMLOG(BCMLOG_INFO, "Starting BCM70012 Device\n");
 285
 286        reg_pwrmgmt = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
 287        reg_pwrmgmt &= ~ASPM_L1_ENABLE;
 288
 289        crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg_pwrmgmt);
 290
 291        if (!crystalhd_bring_out_of_rst(adp)) {
 292                BCMLOG_ERR("Failed To Bring Link Out Of Reset\n");
 293                return false;
 294        }
 295
 296        crystalhd_disable_interrupts(adp);
 297
 298        crystalhd_clear_errors(adp);
 299
 300        crystalhd_clear_interrupts(adp);
 301
 302        crystalhd_enable_interrupts(adp);
 303
 304        /* Enable the option for getting the total no. of DWORDS
 305         * that have been transfered by the RXDMA engine
 306         */
 307        dbg_options = crystalhd_reg_rd(adp, MISC1_DMA_DEBUG_OPTIONS_REG);
 308        dbg_options |= 0x10;
 309        crystalhd_reg_wr(adp, MISC1_DMA_DEBUG_OPTIONS_REG, dbg_options);
 310
 311        /* Enable PCI Global Control options */
 312        glb_cntrl = crystalhd_reg_rd(adp, MISC2_GLOBAL_CTRL);
 313        glb_cntrl |= 0x100;
 314        glb_cntrl |= 0x8000;
 315        crystalhd_reg_wr(adp, MISC2_GLOBAL_CTRL, glb_cntrl);
 316
 317        crystalhd_enable_interrupts(adp);
 318
 319        crystalhd_soft_rst(adp);
 320        crystalhd_start_dram(adp);
 321        crystalhd_enable_uarts(adp);
 322
 323        return true;
 324}
 325
 326static bool crystalhd_stop_device(struct crystalhd_adp *adp)
 327{
 328        uint32_t reg;
 329
 330        BCMLOG(BCMLOG_INFO, "Stopping BCM70012 Device\n");
 331        /* Clear and disable interrupts */
 332        crystalhd_disable_interrupts(adp);
 333        crystalhd_clear_errors(adp);
 334        crystalhd_clear_interrupts(adp);
 335
 336        if (!crystalhd_put_in_reset(adp))
 337                BCMLOG_ERR("Failed to Put Link To Reset State\n");
 338
 339        reg = crystalhd_reg_rd(adp, PCIE_DLL_DATA_LINK_CONTROL);
 340        reg |= ASPM_L1_ENABLE;
 341        crystalhd_reg_wr(adp, PCIE_DLL_DATA_LINK_CONTROL, reg);
 342
 343        /* Set PCI Clk Req */
 344        reg = crystalhd_reg_rd(adp, PCIE_CLK_REQ_REG);
 345        reg |= PCI_CLK_REQ_ENABLE;
 346        crystalhd_reg_wr(adp, PCIE_CLK_REQ_REG, reg);
 347
 348        return true;
 349}
 350
 351static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
 352{
 353        unsigned long flags = 0;
 354        struct crystalhd_rx_dma_pkt *temp = NULL;
 355
 356        if (!hw)
 357                return NULL;
 358
 359        spin_lock_irqsave(&hw->lock, flags);
 360        temp = hw->rx_pkt_pool_head;
 361        if (temp) {
 362                hw->rx_pkt_pool_head = hw->rx_pkt_pool_head->next;
 363                temp->dio_req = NULL;
 364                temp->pkt_tag = 0;
 365                temp->flags = 0;
 366        }
 367        spin_unlock_irqrestore(&hw->lock, flags);
 368
 369        return temp;
 370}
 371
 372static void crystalhd_hw_free_rx_pkt(struct crystalhd_hw *hw,
 373                                   struct crystalhd_rx_dma_pkt *pkt)
 374{
 375        unsigned long flags = 0;
 376
 377        if (!hw || !pkt)
 378                return;
 379
 380        spin_lock_irqsave(&hw->lock, flags);
 381        pkt->next = hw->rx_pkt_pool_head;
 382        hw->rx_pkt_pool_head = pkt;
 383        spin_unlock_irqrestore(&hw->lock, flags);
 384}
 385
 386/*
 387 * Call back from TX - IOQ deletion.
 388 *
 389 * This routine will release the TX DMA rings allocated
 390 * druing setup_dma rings interface.
 391 *
 392 * Memory is allocated per DMA ring basis. This is just
 393 * a place holder to be able to create the dio queues.
 394 */
 395static void crystalhd_tx_desc_rel_call_back(void *context, void *data)
 396{
 397}
 398
 399/*
 400 * Rx Packet release callback..
 401 *
 402 * Release All user mapped capture buffers and Our DMA packets
 403 * back to our free pool. The actual cleanup of the DMA
 404 * ring descriptors happen during dma ring release.
 405 */
 406static void crystalhd_rx_pkt_rel_call_back(void *context, void *data)
 407{
 408        struct crystalhd_hw *hw = (struct crystalhd_hw *)context;
 409        struct crystalhd_rx_dma_pkt *pkt = (struct crystalhd_rx_dma_pkt *)data;
 410
 411        if (!pkt || !hw) {
 412                BCMLOG_ERR("Invalid arg - %p %p\n", hw, pkt);
 413                return;
 414        }
 415
 416        if (pkt->dio_req)
 417                crystalhd_unmap_dio(hw->adp, pkt->dio_req);
 418        else
 419                BCMLOG_ERR("Missing dio_req: 0x%x\n", pkt->pkt_tag);
 420
 421        crystalhd_hw_free_rx_pkt(hw, pkt);
 422}
 423
 424#define crystalhd_hw_delete_ioq(adp, q)         \
 425        if (q) {                                \
 426                crystalhd_delete_dioq(adp, q);  \
 427                q = NULL;                       \
 428        }
 429
 430static void crystalhd_hw_delete_ioqs(struct crystalhd_hw *hw)
 431{
 432        if (!hw)
 433                return;
 434
 435        BCMLOG(BCMLOG_DBG, "Deleting IOQs\n");
 436        crystalhd_hw_delete_ioq(hw->adp, hw->tx_actq);
 437        crystalhd_hw_delete_ioq(hw->adp, hw->tx_freeq);
 438        crystalhd_hw_delete_ioq(hw->adp, hw->rx_actq);
 439        crystalhd_hw_delete_ioq(hw->adp, hw->rx_freeq);
 440        crystalhd_hw_delete_ioq(hw->adp, hw->rx_rdyq);
 441}
 442
 443#define crystalhd_hw_create_ioq(sts, hw, q, cb)                 \
 444do {                                                            \
 445        sts = crystalhd_create_dioq(hw->adp, &q, cb, hw);       \
 446        if (sts != BC_STS_SUCCESS)                              \
 447                goto hw_create_ioq_err;                         \
 448} while (0)
 449
 450/*
 451 * Create IOQs..
 452 *
 453 * TX - Active & Free
 454 * RX - Active, Ready and Free.
 455 */
 456static enum BC_STATUS crystalhd_hw_create_ioqs(struct crystalhd_hw   *hw)
 457{
 458        enum BC_STATUS   sts = BC_STS_SUCCESS;
 459
 460        if (!hw) {
 461                BCMLOG_ERR("Invalid Arg!!\n");
 462                return BC_STS_INV_ARG;
 463        }
 464
 465        crystalhd_hw_create_ioq(sts, hw, hw->tx_freeq,
 466                              crystalhd_tx_desc_rel_call_back);
 467        crystalhd_hw_create_ioq(sts, hw, hw->tx_actq,
 468                              crystalhd_tx_desc_rel_call_back);
 469
 470        crystalhd_hw_create_ioq(sts, hw, hw->rx_freeq,
 471                              crystalhd_rx_pkt_rel_call_back);
 472        crystalhd_hw_create_ioq(sts, hw, hw->rx_rdyq,
 473                              crystalhd_rx_pkt_rel_call_back);
 474        crystalhd_hw_create_ioq(sts, hw, hw->rx_actq,
 475                              crystalhd_rx_pkt_rel_call_back);
 476
 477        return sts;
 478
 479hw_create_ioq_err:
 480        crystalhd_hw_delete_ioqs(hw);
 481
 482        return sts;
 483}
 484
 485
 486static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz,
 487                                 bool b_188_byte_pkts,  uint8_t flags)
 488{
 489        uint32_t base, end, writep, readp;
 490        uint32_t cpbSize, cpbFullness, fifoSize;
 491
 492        if (flags & 0x02) { /* ASF Bit is set */
 493                base   = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Base);
 494                end    = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2End);
 495                writep = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Wrptr);
 496                readp  = bc_dec_reg_rd(adp, REG_Dec_TsAudCDB2Rdptr);
 497        } else if (b_188_byte_pkts) { /*Encrypted 188 byte packets*/
 498                base   = bc_dec_reg_rd(adp, REG_Dec_TsUser0Base);
 499                end    = bc_dec_reg_rd(adp, REG_Dec_TsUser0End);
 500                writep = bc_dec_reg_rd(adp, REG_Dec_TsUser0Wrptr);
 501                readp  = bc_dec_reg_rd(adp, REG_Dec_TsUser0Rdptr);
 502        } else {
 503                base   = bc_dec_reg_rd(adp, REG_DecCA_RegCinBase);
 504                end    = bc_dec_reg_rd(adp, REG_DecCA_RegCinEnd);
 505                writep = bc_dec_reg_rd(adp, REG_DecCA_RegCinWrPtr);
 506                readp  = bc_dec_reg_rd(adp, REG_DecCA_RegCinRdPtr);
 507        }
 508
 509        cpbSize = end - base;
 510        if (writep >= readp)
 511                cpbFullness = writep - readp;
 512        else
 513                cpbFullness = (end - base) - (readp - writep);
 514
 515        fifoSize = cpbSize - cpbFullness;
 516
 517        if (fifoSize < BC_INFIFO_THRESHOLD)
 518                return true;
 519
 520        if (needed_sz > (fifoSize - BC_INFIFO_THRESHOLD))
 521                return true;
 522
 523        return false;
 524}
 525
 526static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
 527                                            uint32_t list_id, enum BC_STATUS cs)
 528{
 529        struct tx_dma_pkt *tx_req;
 530
 531        if (!hw || !list_id) {
 532                BCMLOG_ERR("Invalid Arg..\n");
 533                return BC_STS_INV_ARG;
 534        }
 535
 536        hw->pwr_lock--;
 537
 538        tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
 539        if (!tx_req) {
 540                if (cs != BC_STS_IO_USER_ABORT)
 541                        BCMLOG_ERR("Find and Fetch Did not find req\n");
 542                return BC_STS_NO_DATA;
 543        }
 544
 545        if (tx_req->call_back) {
 546                tx_req->call_back(tx_req->dio_req, tx_req->cb_event, cs);
 547                tx_req->dio_req   = NULL;
 548                tx_req->cb_event  = NULL;
 549                tx_req->call_back = NULL;
 550        } else {
 551                BCMLOG(BCMLOG_DBG, "Missing Tx Callback - %X\n",
 552                       tx_req->list_tag);
 553        }
 554
 555        /* Now put back the tx_list back in FreeQ */
 556        tx_req->list_tag = 0;
 557
 558        return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
 559}
 560
 561static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts)
 562{
 563        uint32_t err_mask, tmp;
 564        unsigned long flags = 0;
 565
 566        err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L0_DESC_TX_ABORT_ERRORS_MASK |
 567                MISC1_TX_DMA_ERROR_STATUS_TX_L0_DMA_DATA_TX_ABORT_ERRORS_MASK |
 568                MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
 569
 570        if (!(err_sts & err_mask))
 571                return false;
 572
 573        BCMLOG_ERR("Error on Tx-L0 %x\n", err_sts);
 574
 575        tmp = err_mask;
 576
 577        if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK)
 578                tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L0_FIFO_FULL_ERRORS_MASK;
 579
 580        if (tmp) {
 581                spin_lock_irqsave(&hw->lock, flags);
 582                /* reset list index.*/
 583                hw->tx_list_post_index = 0;
 584                spin_unlock_irqrestore(&hw->lock, flags);
 585        }
 586
 587        tmp = err_sts & err_mask;
 588        crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
 589
 590        return true;
 591}
 592
 593static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts)
 594{
 595        uint32_t err_mask, tmp;
 596        unsigned long flags = 0;
 597
 598        err_mask = MISC1_TX_DMA_ERROR_STATUS_TX_L1_DESC_TX_ABORT_ERRORS_MASK |
 599                MISC1_TX_DMA_ERROR_STATUS_TX_L1_DMA_DATA_TX_ABORT_ERRORS_MASK |
 600                MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
 601
 602        if (!(err_sts & err_mask))
 603                return false;
 604
 605        BCMLOG_ERR("Error on Tx-L1 %x\n", err_sts);
 606
 607        tmp = err_mask;
 608
 609        if (err_sts & MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK)
 610                tmp &= ~MISC1_TX_DMA_ERROR_STATUS_TX_L1_FIFO_FULL_ERRORS_MASK;
 611
 612        if (tmp) {
 613                spin_lock_irqsave(&hw->lock, flags);
 614                /* reset list index.*/
 615                hw->tx_list_post_index = 0;
 616                spin_unlock_irqrestore(&hw->lock, flags);
 617        }
 618
 619        tmp = err_sts & err_mask;
 620        crystalhd_reg_wr(hw->adp, MISC1_TX_DMA_ERROR_STATUS, tmp);
 621
 622        return true;
 623}
 624
 625static void crystalhd_tx_isr(struct crystalhd_hw *hw, uint32_t int_sts)
 626{
 627        uint32_t err_sts;
 628
 629        if (int_sts & INTR_INTR_STATUS_L0_TX_DMA_DONE_INTR_MASK)
 630                crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
 631                                           BC_STS_SUCCESS);
 632
 633        if (int_sts & INTR_INTR_STATUS_L1_TX_DMA_DONE_INTR_MASK)
 634                crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
 635                                           BC_STS_SUCCESS);
 636
 637        if (!(int_sts & (INTR_INTR_STATUS_L0_TX_DMA_ERR_INTR_MASK |
 638                        INTR_INTR_STATUS_L1_TX_DMA_ERR_INTR_MASK))) {
 639                        /* No error mask set.. */
 640                        return;
 641        }
 642
 643        /* Handle Tx errors. */
 644        err_sts = crystalhd_reg_rd(hw->adp, MISC1_TX_DMA_ERROR_STATUS);
 645
 646        if (crystalhd_tx_list0_handler(hw, err_sts))
 647                crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 0,
 648                                           BC_STS_ERROR);
 649
 650        if (crystalhd_tx_list1_handler(hw, err_sts))
 651                crystalhd_hw_tx_req_complete(hw, hw->tx_ioq_tag_seed + 1,
 652                                           BC_STS_ERROR);
 653
 654        hw->stats.tx_errors++;
 655}
 656
 657static void crystalhd_hw_dump_desc(struct dma_descriptor *p_dma_desc,
 658                                 uint32_t ul_desc_index, uint32_t cnt)
 659{
 660        uint32_t ix, ll = 0;
 661
 662        if (!p_dma_desc || !cnt)
 663                return;
 664
 665        /* FIXME: jarod: perhaps a modparam desc_debug to enable this, rather than
 666         * setting ll (log level, I presume) to non-zero? */
 667        if (!ll)
 668                return;
 669
 670        for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
 671                BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
 672                       ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
 673                       ul_desc_index,
 674                       p_dma_desc[ul_desc_index].buff_addr_high,
 675                       p_dma_desc[ul_desc_index].buff_addr_low,
 676                       p_dma_desc[ul_desc_index].next_desc_addr_high,
 677                       p_dma_desc[ul_desc_index].next_desc_addr_low,
 678                       p_dma_desc[ul_desc_index].xfer_size,
 679                       p_dma_desc[ul_desc_index].intr_enable,
 680                       p_dma_desc[ul_desc_index].last_rec_indicator);
 681        }
 682
 683}
 684
 685static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
 686                                      struct dma_descriptor *desc,
 687                                      dma_addr_t desc_paddr_base,
 688                                      uint32_t sg_cnt, uint32_t sg_st_ix,
 689                                      uint32_t sg_st_off, uint32_t xfr_sz)
 690{
 691        uint32_t count = 0, ix = 0, sg_ix = 0, len = 0, last_desc_ix = 0;
 692        dma_addr_t desc_phy_addr = desc_paddr_base;
 693        union addr_64 addr_temp;
 694
 695        if (!ioreq || !desc || !desc_paddr_base || !xfr_sz ||
 696            (!sg_cnt && !ioreq->uinfo.dir_tx)) {
 697                BCMLOG_ERR("Invalid Args\n");
 698                return BC_STS_INV_ARG;
 699        }
 700
 701        for (ix = 0; ix < sg_cnt; ix++) {
 702
 703                /* Setup SGLE index. */
 704                sg_ix = ix + sg_st_ix;
 705
 706                /* Get SGLE length */
 707                len = crystalhd_get_sgle_len(ioreq, sg_ix);
 708                if (len % 4) {
 709                        BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt);
 710                        return BC_STS_NOT_IMPL;
 711                }
 712                /* Setup DMA desc with Phy addr & Length at current index. */
 713                addr_temp.full_addr = crystalhd_get_sgle_paddr(ioreq, sg_ix);
 714                if (sg_ix == sg_st_ix) {
 715                        addr_temp.full_addr += sg_st_off;
 716                        len -= sg_st_off;
 717                }
 718                memset(&desc[ix], 0, sizeof(desc[ix]));
 719                desc[ix].buff_addr_low  = addr_temp.low_part;
 720                desc[ix].buff_addr_high = addr_temp.high_part;
 721                desc[ix].dma_dir        = ioreq->uinfo.dir_tx;
 722
 723                /* Chain DMA descriptor.  */
 724                addr_temp.full_addr = desc_phy_addr + sizeof(struct dma_descriptor);
 725                desc[ix].next_desc_addr_low = addr_temp.low_part;
 726                desc[ix].next_desc_addr_high = addr_temp.high_part;
 727
 728                if ((count + len) > xfr_sz)
 729                        len = xfr_sz - count;
 730
 731                /* Debug.. */
 732                if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
 733                        BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
 734                                   len, ix, count, xfr_sz, sg_cnt);
 735                        return BC_STS_ERROR;
 736                }
 737                /* Length expects Multiple of 4 */
 738                desc[ix].xfer_size = (len / 4);
 739
 740                crystalhd_hw_dump_desc(desc, ix, 1);
 741
 742                count += len;
 743                desc_phy_addr += sizeof(struct dma_descriptor);
 744        }
 745
 746        last_desc_ix = ix - 1;
 747
 748        if (ioreq->fb_size) {
 749                memset(&desc[ix], 0, sizeof(desc[ix]));
 750                addr_temp.full_addr     = ioreq->fb_pa;
 751                desc[ix].buff_addr_low  = addr_temp.low_part;
 752                desc[ix].buff_addr_high = addr_temp.high_part;
 753                desc[ix].dma_dir        = ioreq->uinfo.dir_tx;
 754                desc[ix].xfer_size      = 1;
 755                desc[ix].fill_bytes     = 4 - ioreq->fb_size;
 756                count += ioreq->fb_size;
 757                last_desc_ix++;
 758        }
 759
 760        /* setup last descriptor..*/
 761        desc[last_desc_ix].last_rec_indicator  = 1;
 762        desc[last_desc_ix].next_desc_addr_low  = 0;
 763        desc[last_desc_ix].next_desc_addr_high = 0;
 764        desc[last_desc_ix].intr_enable = 1;
 765
 766        crystalhd_hw_dump_desc(desc, last_desc_ix, 1);
 767
 768        if (count != xfr_sz) {
 769                BCMLOG_ERR("interal error sz curr:%x exp:%x\n", count, xfr_sz);
 770                return BC_STS_ERROR;
 771        }
 772
 773        return BC_STS_SUCCESS;
 774}
 775
 776static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(struct crystalhd_dio_req *ioreq,
 777                                              struct dma_desc_mem *pdesc_mem,
 778                                              uint32_t *uv_desc_index)
 779{
 780        struct dma_descriptor *desc = NULL;
 781        dma_addr_t desc_paddr_base = 0;
 782        uint32_t sg_cnt = 0, sg_st_ix = 0, sg_st_off = 0;
 783        uint32_t xfr_sz = 0;
 784        enum BC_STATUS sts = BC_STS_SUCCESS;
 785
 786        /* Check params.. */
 787        if (!ioreq || !pdesc_mem || !uv_desc_index) {
 788                BCMLOG_ERR("Invalid Args\n");
 789                return BC_STS_INV_ARG;
 790        }
 791
 792        if (!pdesc_mem->sz || !pdesc_mem->pdma_desc_start ||
 793            !ioreq->sg || (!ioreq->sg_cnt && !ioreq->uinfo.dir_tx)) {
 794                BCMLOG_ERR("Invalid Args\n");
 795                return BC_STS_INV_ARG;
 796        }
 797
 798        if ((ioreq->uinfo.dir_tx) && (ioreq->uinfo.uv_offset)) {
 799                BCMLOG_ERR("UV offset for TX??\n");
 800                return BC_STS_INV_ARG;
 801
 802        }
 803
 804        desc = pdesc_mem->pdma_desc_start;
 805        desc_paddr_base = pdesc_mem->phy_addr;
 806
 807        if (ioreq->uinfo.dir_tx || (ioreq->uinfo.uv_offset == 0)) {
 808                sg_cnt = ioreq->sg_cnt;
 809                xfr_sz = ioreq->uinfo.xfr_len;
 810        } else {
 811                sg_cnt = ioreq->uinfo.uv_sg_ix + 1;
 812                xfr_sz = ioreq->uinfo.uv_offset;
 813        }
 814
 815        sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
 816                                   sg_st_ix, sg_st_off, xfr_sz);
 817
 818        if ((sts != BC_STS_SUCCESS) || !ioreq->uinfo.uv_offset)
 819                return sts;
 820
 821        /* Prepare for UV mapping.. */
 822        desc = &pdesc_mem->pdma_desc_start[sg_cnt];
 823        desc_paddr_base = pdesc_mem->phy_addr +
 824                          (sg_cnt * sizeof(struct dma_descriptor));
 825
 826        /* Done with desc addr.. now update sg stuff.*/
 827        sg_cnt    = ioreq->sg_cnt - ioreq->uinfo.uv_sg_ix;
 828        xfr_sz    = ioreq->uinfo.xfr_len - ioreq->uinfo.uv_offset;
 829        sg_st_ix  = ioreq->uinfo.uv_sg_ix;
 830        sg_st_off = ioreq->uinfo.uv_sg_off;
 831
 832        sts = crystalhd_hw_fill_desc(ioreq, desc, desc_paddr_base, sg_cnt,
 833                                   sg_st_ix, sg_st_off, xfr_sz);
 834        if (sts != BC_STS_SUCCESS)
 835                return sts;
 836
 837        *uv_desc_index = sg_st_ix;
 838
 839        return sts;
 840}
 841
 842static void crystalhd_start_tx_dma_engine(struct crystalhd_hw *hw)
 843{
 844        uint32_t dma_cntrl;
 845
 846        dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
 847        if (!(dma_cntrl & DMA_START_BIT)) {
 848                dma_cntrl |= DMA_START_BIT;
 849                crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS,
 850                               dma_cntrl);
 851        }
 852
 853        return;
 854}
 855
 856/* _CHECK_THIS_
 857 *
 858 * Verify if the Stop generates a completion interrupt or not.
 859 * if it does not generate an interrupt, then add polling here.
 860 */
 861static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
 862{
 863        uint32_t dma_cntrl, cnt = 30;
 864        uint32_t l1 = 1, l2 = 1;
 865        unsigned long flags = 0;
 866
 867        dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS);
 868
 869        BCMLOG(BCMLOG_DBG, "Stopping TX DMA Engine..\n");
 870
 871        /* FIXME: jarod: invert dma_ctrl and check bit? or are there missing parens? */
 872        if (!dma_cntrl & DMA_START_BIT) {
 873                BCMLOG(BCMLOG_DBG, "Already Stopped\n");
 874                return BC_STS_SUCCESS;
 875        }
 876
 877        crystalhd_disable_interrupts(hw->adp);
 878
 879        /* Issue stop to HW */
 880        /* This bit when set gave problems. Please check*/
 881        dma_cntrl &= ~DMA_START_BIT;
 882        crystalhd_reg_wr(hw->adp, MISC1_TX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
 883
 884        BCMLOG(BCMLOG_DBG, "Cleared the DMA Start bit\n");
 885
 886        /* Poll for 3seconds (30 * 100ms) on both the lists..*/
 887        while ((l1 || l2) && cnt) {
 888
 889                if (l1) {
 890                        l1 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
 891                        l1 &= DMA_START_BIT;
 892                }
 893
 894                if (l2) {
 895                        l2 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
 896                        l2 &= DMA_START_BIT;
 897                }
 898
 899                msleep_interruptible(100);
 900
 901                cnt--;
 902        }
 903
 904        if (!cnt) {
 905                BCMLOG_ERR("Failed to stop TX DMA.. l1 %d, l2 %d\n", l1, l2);
 906                crystalhd_enable_interrupts(hw->adp);
 907                return BC_STS_ERROR;
 908        }
 909
 910        spin_lock_irqsave(&hw->lock, flags);
 911        hw->tx_list_post_index = 0;
 912        spin_unlock_irqrestore(&hw->lock, flags);
 913        BCMLOG(BCMLOG_DBG, "stopped TX DMA..\n");
 914        crystalhd_enable_interrupts(hw->adp);
 915
 916        return BC_STS_SUCCESS;
 917}
 918
 919static uint32_t crystalhd_get_pib_avail_cnt(struct crystalhd_hw *hw)
 920{
 921        /*
 922        * Position of the PIB Entries can be found at
 923        * 0th and the 1st location of the Circular list.
 924        */
 925        uint32_t Q_addr;
 926        uint32_t pib_cnt, r_offset, w_offset;
 927
 928        Q_addr = hw->pib_del_Q_addr;
 929
 930        /* Get the Read Pointer */
 931        crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
 932
 933        /* Get the Write Pointer */
 934        crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
 935
 936        if (r_offset == w_offset)
 937                return 0;       /* Queue is empty */
 938
 939        if (w_offset > r_offset)
 940                pib_cnt = w_offset - r_offset;
 941        else
 942                pib_cnt = (w_offset + MAX_PIB_Q_DEPTH) -
 943                          (r_offset + MIN_PIB_Q_DEPTH);
 944
 945        if (pib_cnt > MAX_PIB_Q_DEPTH) {
 946                BCMLOG_ERR("Invalid PIB Count (%u)\n", pib_cnt);
 947                return 0;
 948        }
 949
 950        return pib_cnt;
 951}
 952
 953static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
 954{
 955        uint32_t Q_addr;
 956        uint32_t addr_entry, r_offset, w_offset;
 957
 958        Q_addr = hw->pib_del_Q_addr;
 959
 960        /* Get the Read Pointer 0Th Location is Read Pointer */
 961        crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
 962
 963        /* Get the Write Pointer 1st Location is Write pointer */
 964        crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
 965
 966        /* Queue is empty */
 967        if (r_offset == w_offset)
 968                return 0;
 969
 970        if ((r_offset < MIN_PIB_Q_DEPTH) || (r_offset >= MAX_PIB_Q_DEPTH))
 971                return 0;
 972
 973        /* Get the Actual Address of the PIB */
 974        crystalhd_mem_rd(hw->adp, Q_addr + (r_offset * sizeof(uint32_t)),
 975                       1, &addr_entry);
 976
 977        /* Increment the Read Pointer */
 978        r_offset++;
 979
 980        if (MAX_PIB_Q_DEPTH == r_offset)
 981                r_offset = MIN_PIB_Q_DEPTH;
 982
 983        /* Write back the read pointer to It's Location */
 984        crystalhd_mem_wr(hw->adp, Q_addr, 1, &r_offset);
 985
 986        return addr_entry;
 987}
 988
 989static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel)
 990{
 991        uint32_t Q_addr;
 992        uint32_t r_offset, w_offset, n_offset;
 993
 994        Q_addr = hw->pib_rel_Q_addr;
 995
 996        /* Get the Read Pointer */
 997        crystalhd_mem_rd(hw->adp, Q_addr, 1, &r_offset);
 998
 999        /* Get the Write Pointer */
1000        crystalhd_mem_rd(hw->adp, Q_addr + sizeof(uint32_t), 1, &w_offset);
1001
1002        if ((r_offset < MIN_PIB_Q_DEPTH) ||
1003            (r_offset >= MAX_PIB_Q_DEPTH))
1004                return false;
1005
1006        n_offset = w_offset + 1;
1007
1008        if (MAX_PIB_Q_DEPTH == n_offset)
1009                n_offset = MIN_PIB_Q_DEPTH;
1010
1011        if (r_offset == n_offset)
1012                return false; /* should never happen */
1013
1014        /* Write the DRAM ADDR to the Queue at Next Offset */
1015        crystalhd_mem_wr(hw->adp, Q_addr + (w_offset * sizeof(uint32_t)),
1016                       1, &addr_to_rel);
1017
1018        /* Put the New value of the write pointer in Queue */
1019        crystalhd_mem_wr(hw->adp, Q_addr + sizeof(uint32_t), 1, &n_offset);
1020
1021        return true;
1022}
1023
1024static void cpy_pib_to_app(struct c011_pib *src_pib, struct BC_PIC_INFO_BLOCK *dst_pib)
1025{
1026        if (!src_pib || !dst_pib) {
1027                BCMLOG_ERR("Invalid Arguments\n");
1028                return;
1029        }
1030
1031        dst_pib->timeStamp           = 0;
1032        dst_pib->picture_number      = src_pib->ppb.picture_number;
1033        dst_pib->width               = src_pib->ppb.width;
1034        dst_pib->height              = src_pib->ppb.height;
1035        dst_pib->chroma_format       = src_pib->ppb.chroma_format;
1036        dst_pib->pulldown            = src_pib->ppb.pulldown;
1037        dst_pib->flags               = src_pib->ppb.flags;
1038        dst_pib->sess_num            = src_pib->ptsStcOffset;
1039        dst_pib->aspect_ratio        = src_pib->ppb.aspect_ratio;
1040        dst_pib->colour_primaries     = src_pib->ppb.colour_primaries;
1041        dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
1042        dst_pib->frame_rate             = src_pib->resolution ;
1043        return;
1044}
1045
1046static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
1047{
1048        unsigned int cnt;
1049        struct c011_pib src_pib;
1050        uint32_t pib_addr, pib_cnt;
1051        struct BC_PIC_INFO_BLOCK *AppPib;
1052        struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1053
1054        pib_cnt = crystalhd_get_pib_avail_cnt(hw);
1055
1056        if (!pib_cnt)
1057                return;
1058
1059        for (cnt = 0; cnt < pib_cnt; cnt++) {
1060
1061                pib_addr = crystalhd_get_addr_from_pib_Q(hw);
1062                crystalhd_mem_rd(hw->adp, pib_addr, sizeof(struct c011_pib) / 4,
1063                               (uint32_t *)&src_pib);
1064
1065                if (src_pib.bFormatChange) {
1066                        rx_pkt = (struct crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
1067                        if (!rx_pkt)
1068                                return;
1069                        rx_pkt->flags = 0;
1070                        rx_pkt->flags |= COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE;
1071                        AppPib = &rx_pkt->pib;
1072                        cpy_pib_to_app(&src_pib, AppPib);
1073
1074                        BCMLOG(BCMLOG_DBG,
1075                               "App PIB:%x %x %x %x %x %x %x %x %x %x\n",
1076                               rx_pkt->pib.picture_number,
1077                               rx_pkt->pib.aspect_ratio,
1078                               rx_pkt->pib.chroma_format,
1079                               rx_pkt->pib.colour_primaries,
1080                               rx_pkt->pib.frame_rate,
1081                               rx_pkt->pib.height,
1082                               rx_pkt->pib.height,
1083                               rx_pkt->pib.n_drop,
1084                               rx_pkt->pib.pulldown,
1085                               rx_pkt->pib.ycom);
1086
1087                        crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag);
1088
1089                }
1090
1091                crystalhd_rel_addr_to_pib_Q(hw, pib_addr);
1092        }
1093}
1094
1095static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
1096{
1097        uint32_t        dma_cntrl;
1098
1099        dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1100        if (!(dma_cntrl & DMA_START_BIT)) {
1101                dma_cntrl |= DMA_START_BIT;
1102                crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1103        }
1104
1105        dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1106        if (!(dma_cntrl & DMA_START_BIT)) {
1107                dma_cntrl |= DMA_START_BIT;
1108                crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1109        }
1110
1111        return;
1112}
1113
1114static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
1115{
1116        uint32_t dma_cntrl = 0, count = 30;
1117        uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
1118
1119        dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1120        if ((dma_cntrl & DMA_START_BIT)) {
1121                dma_cntrl &= ~DMA_START_BIT;
1122                crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1123        }
1124
1125        dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1126        if ((dma_cntrl & DMA_START_BIT)) {
1127                dma_cntrl &= ~DMA_START_BIT;
1128                crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1129        }
1130
1131        /* Poll for 3seconds (30 * 100ms) on both the lists..*/
1132        while ((l0y || l0uv || l1y || l1uv) && count) {
1133
1134                if (l0y) {
1135                        l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
1136                        l0y &= DMA_START_BIT;
1137                        if (!l0y)
1138                                hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1139                }
1140
1141                if (l1y) {
1142                        l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
1143                        l1y &= DMA_START_BIT;
1144                        if (!l1y)
1145                                hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1146                }
1147
1148                if (l0uv) {
1149                        l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
1150                        l0uv &= DMA_START_BIT;
1151                        if (!l0uv)
1152                                hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1153                }
1154
1155                if (l1uv) {
1156                        l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
1157                        l1uv &= DMA_START_BIT;
1158                        if (!l1uv)
1159                                hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1160                }
1161                msleep_interruptible(100);
1162                count--;
1163        }
1164
1165        hw->rx_list_post_index = 0;
1166
1167        BCMLOG(BCMLOG_SSTEP, "Capture Stop: %d List0:Sts:%x List1:Sts:%x\n",
1168               count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
1169}
1170
1171static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, struct crystalhd_rx_dma_pkt *rx_pkt)
1172{
1173        uint32_t y_low_addr_reg, y_high_addr_reg;
1174        uint32_t uv_low_addr_reg, uv_high_addr_reg;
1175        union addr_64 desc_addr;
1176        unsigned long flags;
1177
1178        if (!hw || !rx_pkt) {
1179                BCMLOG_ERR("Invalid Arguments\n");
1180                return BC_STS_INV_ARG;
1181        }
1182
1183        if (hw->rx_list_post_index >= DMA_ENGINE_CNT) {
1184                BCMLOG_ERR("List Out Of bounds %x\n", hw->rx_list_post_index);
1185                return BC_STS_INV_ARG;
1186        }
1187
1188        spin_lock_irqsave(&hw->rx_lock, flags);
1189        /* FIXME: jarod: sts_free is an enum for 0, in crystalhd_hw.h... yuk... */
1190        if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
1191                spin_unlock_irqrestore(&hw->rx_lock, flags);
1192                return BC_STS_BUSY;
1193        }
1194
1195        if (!hw->rx_list_post_index) {
1196                y_low_addr_reg   = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0;
1197                y_high_addr_reg  = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST0;
1198                uv_low_addr_reg  = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0;
1199                uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST0;
1200        } else {
1201                y_low_addr_reg   = MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1;
1202                y_high_addr_reg  = MISC1_Y_RX_FIRST_DESC_U_ADDR_LIST1;
1203                uv_low_addr_reg  = MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1;
1204                uv_high_addr_reg = MISC1_UV_RX_FIRST_DESC_U_ADDR_LIST1;
1205        }
1206        rx_pkt->pkt_tag = hw->rx_pkt_tag_seed + hw->rx_list_post_index;
1207        hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_y_intr;
1208        if (rx_pkt->uv_phy_addr)
1209                hw->rx_list_sts[hw->rx_list_post_index] |= rx_waiting_uv_intr;
1210        hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
1211        spin_unlock_irqrestore(&hw->rx_lock, flags);
1212
1213        crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag);
1214
1215        crystalhd_start_rx_dma_engine(hw);
1216        /* Program the Y descriptor */
1217        desc_addr.full_addr = rx_pkt->desc_mem.phy_addr;
1218        crystalhd_reg_wr(hw->adp, y_high_addr_reg, desc_addr.high_part);
1219        crystalhd_reg_wr(hw->adp, y_low_addr_reg, desc_addr.low_part | 0x01);
1220
1221        if (rx_pkt->uv_phy_addr) {
1222                /* Program the UV descriptor */
1223                desc_addr.full_addr = rx_pkt->uv_phy_addr;
1224                crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part);
1225                crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01);
1226        }
1227
1228        return BC_STS_SUCCESS;
1229}
1230
1231static enum BC_STATUS crystalhd_hw_post_cap_buff(struct crystalhd_hw *hw,
1232                                          struct crystalhd_rx_dma_pkt *rx_pkt)
1233{
1234        enum BC_STATUS sts = crystalhd_hw_prog_rxdma(hw, rx_pkt);
1235
1236        if (sts == BC_STS_BUSY)
1237                crystalhd_dioq_add(hw->rx_freeq, (void *)rx_pkt,
1238                                 false, rx_pkt->pkt_tag);
1239
1240        return sts;
1241}
1242
1243static void crystalhd_get_dnsz(struct crystalhd_hw *hw, uint32_t list_index,
1244                             uint32_t *y_dw_dnsz, uint32_t *uv_dw_dnsz)
1245{
1246        uint32_t y_dn_sz_reg, uv_dn_sz_reg;
1247
1248        if (!list_index) {
1249                y_dn_sz_reg  = MISC1_Y_RX_LIST0_CUR_BYTE_CNT;
1250                uv_dn_sz_reg = MISC1_UV_RX_LIST0_CUR_BYTE_CNT;
1251        } else {
1252                y_dn_sz_reg  = MISC1_Y_RX_LIST1_CUR_BYTE_CNT;
1253                uv_dn_sz_reg = MISC1_UV_RX_LIST1_CUR_BYTE_CNT;
1254        }
1255
1256        *y_dw_dnsz  = crystalhd_reg_rd(hw->adp, y_dn_sz_reg);
1257        *uv_dw_dnsz = crystalhd_reg_rd(hw->adp, uv_dn_sz_reg);
1258}
1259
1260/*
1261 * This function should be called only after making sure that the two DMA
1262 * lists are free. This function does not check if DMA's are active, before
1263 * turning off the DMA.
1264 */
1265static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
1266{
1267        uint32_t dma_cntrl, aspm;
1268
1269        hw->stop_pending = 0;
1270
1271        dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
1272        if (dma_cntrl & DMA_START_BIT) {
1273                dma_cntrl &= ~DMA_START_BIT;
1274                crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1275        }
1276
1277        dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
1278        if (dma_cntrl & DMA_START_BIT) {
1279                dma_cntrl &= ~DMA_START_BIT;
1280                crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
1281        }
1282        hw->rx_list_post_index = 0;
1283
1284        aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
1285        aspm |= ASPM_L1_ENABLE;
1286        /* NAREN BCMLOG(BCMLOG_INFO, "aspm on\n"); */
1287        crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
1288}
1289
1290static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
1291                                     enum BC_STATUS comp_sts)
1292{
1293        struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
1294        uint32_t y_dw_dnsz, uv_dw_dnsz;
1295        enum BC_STATUS sts = BC_STS_SUCCESS;
1296
1297        if (!hw || list_index >= DMA_ENGINE_CNT) {
1298                BCMLOG_ERR("Invalid Arguments\n");
1299                return BC_STS_INV_ARG;
1300        }
1301
1302        rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
1303                                             hw->rx_pkt_tag_seed + list_index);
1304        if (!rx_pkt) {
1305                BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
1306                           hw->rx_list_post_index, hw->rx_list_sts[0],
1307                           hw->rx_list_sts[1], list_index,
1308                           hw->rx_pkt_tag_seed + list_index, comp_sts);
1309                return BC_STS_INV_ARG;
1310        }
1311
1312        if (comp_sts == BC_STS_SUCCESS) {
1313                crystalhd_get_dnsz(hw, list_index, &y_dw_dnsz, &uv_dw_dnsz);
1314                rx_pkt->dio_req->uinfo.y_done_sz = y_dw_dnsz;
1315                rx_pkt->flags = COMP_FLAG_DATA_VALID;
1316                if (rx_pkt->uv_phy_addr)
1317                        rx_pkt->dio_req->uinfo.uv_done_sz = uv_dw_dnsz;
1318                crystalhd_dioq_add(hw->rx_rdyq, rx_pkt, true,
1319                                hw->rx_pkt_tag_seed + list_index);
1320                return sts;
1321        }
1322
1323        /* Check if we can post this DIO again. */
1324        return crystalhd_hw_post_cap_buff(hw, rx_pkt);
1325}
1326
1327static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1328                                     uint32_t y_err_sts, uint32_t uv_err_sts)
1329{
1330        uint32_t tmp;
1331        enum list_sts tmp_lsts;
1332
1333        if (!(y_err_sts & GET_Y0_ERR_MSK) && !(uv_err_sts & GET_UV0_ERR_MSK))
1334                return false;
1335
1336        tmp_lsts = hw->rx_list_sts[0];
1337
1338        /* Y0 - DMA */
1339        tmp = y_err_sts & GET_Y0_ERR_MSK;
1340        if (int_sts & INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
1341                hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1342
1343        if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1344                hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
1345                tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1346        }
1347
1348        if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1349                hw->rx_list_sts[0] &= ~rx_y_mask;
1350                hw->rx_list_sts[0] |= rx_y_error;
1351                tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1352        }
1353
1354        if (tmp) {
1355                hw->rx_list_sts[0] &= ~rx_y_mask;
1356                hw->rx_list_sts[0] |= rx_y_error;
1357                hw->rx_list_post_index = 0;
1358        }
1359
1360        /* UV0 - DMA */
1361        tmp = uv_err_sts & GET_UV0_ERR_MSK;
1362        if (int_sts & INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK)
1363                hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1364
1365        if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK) {
1366                hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
1367                tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
1368        }
1369
1370        if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
1371                hw->rx_list_sts[0] &= ~rx_uv_mask;
1372                hw->rx_list_sts[0] |= rx_uv_error;
1373                tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
1374        }
1375
1376        if (tmp) {
1377                hw->rx_list_sts[0] &= ~rx_uv_mask;
1378                hw->rx_list_sts[0] |= rx_uv_error;
1379                hw->rx_list_post_index = 0;
1380        }
1381
1382        if (y_err_sts & GET_Y0_ERR_MSK) {
1383                tmp = y_err_sts & GET_Y0_ERR_MSK;
1384                crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1385        }
1386
1387        if (uv_err_sts & GET_UV0_ERR_MSK) {
1388                tmp = uv_err_sts & GET_UV0_ERR_MSK;
1389                crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1390        }
1391
1392        return (tmp_lsts != hw->rx_list_sts[0]);
1393}
1394
1395static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts,
1396                                     uint32_t y_err_sts, uint32_t uv_err_sts)
1397{
1398        uint32_t tmp;
1399        enum list_sts tmp_lsts;
1400
1401        if (!(y_err_sts & GET_Y1_ERR_MSK) && !(uv_err_sts & GET_UV1_ERR_MSK))
1402                return false;
1403
1404        tmp_lsts = hw->rx_list_sts[1];
1405
1406        /* Y1 - DMA */
1407        tmp = y_err_sts & GET_Y1_ERR_MSK;
1408        if (int_sts & INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK)
1409                hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1410
1411        if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1412                hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
1413                tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1414        }
1415
1416        if (y_err_sts & MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1417                /* Add retry-support..*/
1418                hw->rx_list_sts[1] &= ~rx_y_mask;
1419                hw->rx_list_sts[1] |= rx_y_error;
1420                tmp &= ~MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1421        }
1422
1423        if (tmp) {
1424                hw->rx_list_sts[1] &= ~rx_y_mask;
1425                hw->rx_list_sts[1] |= rx_y_error;
1426                hw->rx_list_post_index = 0;
1427        }
1428
1429        /* UV1 - DMA */
1430        tmp = uv_err_sts & GET_UV1_ERR_MSK;
1431        if (int_sts & INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK)
1432                hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1433
1434        if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK) {
1435                hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
1436                tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK;
1437        }
1438
1439        if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK) {
1440                /* Add retry-support*/
1441                hw->rx_list_sts[1] &= ~rx_uv_mask;
1442                hw->rx_list_sts[1] |= rx_uv_error;
1443                tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK;
1444        }
1445
1446        if (tmp) {
1447                hw->rx_list_sts[1] &= ~rx_uv_mask;
1448                hw->rx_list_sts[1] |= rx_uv_error;
1449                hw->rx_list_post_index = 0;
1450        }
1451
1452        if (y_err_sts & GET_Y1_ERR_MSK) {
1453                tmp = y_err_sts & GET_Y1_ERR_MSK;
1454                crystalhd_reg_wr(hw->adp, MISC1_Y_RX_ERROR_STATUS, tmp);
1455        }
1456
1457        if (uv_err_sts & GET_UV1_ERR_MSK) {
1458                tmp = uv_err_sts & GET_UV1_ERR_MSK;
1459                crystalhd_reg_wr(hw->adp, MISC1_UV_RX_ERROR_STATUS, tmp);
1460        }
1461
1462        return (tmp_lsts != hw->rx_list_sts[1]);
1463}
1464
1465
1466static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
1467{
1468        unsigned long flags;
1469        uint32_t i, list_avail = 0;
1470        enum BC_STATUS comp_sts = BC_STS_NO_DATA;
1471        uint32_t y_err_sts, uv_err_sts, y_dn_sz = 0, uv_dn_sz = 0;
1472        bool ret = 0;
1473
1474        if (!hw) {
1475                BCMLOG_ERR("Invalid Arguments\n");
1476                return;
1477        }
1478
1479        if (!(intr_sts & GET_RX_INTR_MASK))
1480                return;
1481
1482        y_err_sts = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_ERROR_STATUS);
1483        uv_err_sts = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_ERROR_STATUS);
1484
1485        for (i = 0; i < DMA_ENGINE_CNT; i++) {
1486                /* Update States..*/
1487                spin_lock_irqsave(&hw->rx_lock, flags);
1488                if (i == 0)
1489                        ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1490                else
1491                        ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts);
1492                if (ret) {
1493                        switch (hw->rx_list_sts[i]) {
1494                        case sts_free:
1495                                comp_sts = BC_STS_SUCCESS;
1496                                list_avail = 1;
1497                                break;
1498                        case rx_y_error:
1499                        case rx_uv_error:
1500                        case rx_sts_error:
1501                                /* We got error on both or Y or uv. */
1502                                hw->stats.rx_errors++;
1503                                crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
1504                                /* FIXME: jarod: this is where my mini pci-e card is tripping up */
1505                                BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x "
1506                                       "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
1507                                       i, hw->stats.rx_errors, y_err_sts,
1508                                       uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz);
1509                                hw->rx_list_sts[i] = sts_free;
1510                                comp_sts = BC_STS_ERROR;
1511                                break;
1512                        default:
1513                                /* Wait for completion..*/
1514                                comp_sts = BC_STS_NO_DATA;
1515                                break;
1516                        }
1517                }
1518                spin_unlock_irqrestore(&hw->rx_lock, flags);
1519
1520                /* handle completion...*/
1521                if (comp_sts != BC_STS_NO_DATA) {
1522                        crystalhd_rx_pkt_done(hw, i, comp_sts);
1523                        comp_sts = BC_STS_NO_DATA;
1524                }
1525        }
1526
1527        if (list_avail) {
1528                if (hw->stop_pending) {
1529                        if ((hw->rx_list_sts[0] == sts_free) &&
1530                            (hw->rx_list_sts[1] == sts_free))
1531                                crystalhd_hw_finalize_pause(hw);
1532                } else {
1533                        crystalhd_hw_start_capture(hw);
1534                }
1535        }
1536}
1537
1538static enum BC_STATUS crystalhd_fw_cmd_post_proc(struct crystalhd_hw *hw,
1539                                          struct BC_FW_CMD *fw_cmd)
1540{
1541        enum BC_STATUS sts = BC_STS_SUCCESS;
1542        struct dec_rsp_channel_start_video *st_rsp = NULL;
1543
1544        switch (fw_cmd->cmd[0]) {
1545        case eCMD_C011_DEC_CHAN_START_VIDEO:
1546                st_rsp = (struct dec_rsp_channel_start_video *)fw_cmd->rsp;
1547                hw->pib_del_Q_addr = st_rsp->picInfoDeliveryQ;
1548                hw->pib_rel_Q_addr = st_rsp->picInfoReleaseQ;
1549                BCMLOG(BCMLOG_DBG, "DelQAddr:%x RelQAddr:%x\n",
1550                       hw->pib_del_Q_addr, hw->pib_rel_Q_addr);
1551                break;
1552        case eCMD_C011_INIT:
1553                if (!(crystalhd_load_firmware_config(hw->adp))) {
1554                        BCMLOG_ERR("Invalid Params.\n");
1555                        sts = BC_STS_FW_AUTH_FAILED;
1556                }
1557                break;
1558        default:
1559                break;
1560        }
1561        return sts;
1562}
1563
1564static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
1565{
1566        uint32_t reg;
1567        union link_misc_perst_decoder_ctrl rst_cntrl_reg;
1568
1569        /* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
1570        rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL);
1571
1572        rst_cntrl_reg.bcm_7412_rst = 1;
1573        crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1574        msleep_interruptible(50);
1575
1576        rst_cntrl_reg.bcm_7412_rst = 0;
1577        crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
1578
1579        /* Close all banks, put DDR in idle */
1580        bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
1581
1582        /* Set bit 25 (drop CKE pin of DDR) */
1583        reg = bc_dec_reg_rd(hw->adp, SDRAM_PARAM);
1584        reg |= 0x02000000;
1585        bc_dec_reg_wr(hw->adp, SDRAM_PARAM, reg);
1586
1587        /* Reset the audio block */
1588        bc_dec_reg_wr(hw->adp, AUD_DSP_MISC_SOFT_RESET, 0x1);
1589
1590        /* Power down Raptor PLL */
1591        reg = bc_dec_reg_rd(hw->adp, DecHt_PllCCtl);
1592        reg |= 0x00008000;
1593        bc_dec_reg_wr(hw->adp, DecHt_PllCCtl, reg);
1594
1595        /* Power down all Audio PLL */
1596        bc_dec_reg_wr(hw->adp, AIO_MISC_PLL_RESET, 0x1);
1597
1598        /* Power down video clock (75MHz) */
1599        reg = bc_dec_reg_rd(hw->adp, DecHt_PllECtl);
1600        reg |= 0x00008000;
1601        bc_dec_reg_wr(hw->adp, DecHt_PllECtl, reg);
1602
1603        /* Power down video clock (75MHz) */
1604        reg = bc_dec_reg_rd(hw->adp, DecHt_PllDCtl);
1605        reg |= 0x00008000;
1606        bc_dec_reg_wr(hw->adp, DecHt_PllDCtl, reg);
1607
1608        /* Power down core clock (200MHz) */
1609        reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
1610        reg |= 0x00008000;
1611        bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
1612
1613        /* Power down core clock (200MHz) */
1614        reg = bc_dec_reg_rd(hw->adp, DecHt_PllBCtl);
1615        reg |= 0x00008000;
1616        bc_dec_reg_wr(hw->adp, DecHt_PllBCtl, reg);
1617
1618        return BC_STS_SUCCESS;
1619}
1620
1621/************************************************
1622**
1623*************************************************/
1624
1625enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz)
1626{
1627        uint32_t reg_data, cnt, *temp_buff;
1628        uint32_t fw_sig_len = 36;
1629        uint32_t dram_offset = BC_FWIMG_ST_ADDR, sig_reg;
1630
1631        BCMLOG_ENTER;
1632
1633        if (!adp || !buffer || !sz) {
1634                BCMLOG_ERR("Invalid Params.\n");
1635                return BC_STS_INV_ARG;
1636        }
1637
1638        reg_data = crystalhd_reg_rd(adp, OTP_CMD);
1639        if (!(reg_data & 0x02)) {
1640                BCMLOG_ERR("Invalid hw config.. otp not programmed\n");
1641                return BC_STS_ERROR;
1642        }
1643
1644        reg_data = 0;
1645        crystalhd_reg_wr(adp, DCI_CMD, 0);
1646        reg_data |= BC_BIT(0);
1647        crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1648
1649        reg_data = 0;
1650        cnt = 1000;
1651        msleep_interruptible(10);
1652
1653        while (reg_data != BC_BIT(4)) {
1654                reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1655                reg_data &= BC_BIT(4);
1656                if (--cnt == 0) {
1657                        BCMLOG_ERR("Firmware Download RDY Timeout.\n");
1658                        return BC_STS_TIMEOUT;
1659                }
1660        }
1661
1662        msleep_interruptible(10);
1663        /*  Load the FW to the FW_ADDR field in the DCI_FIRMWARE_ADDR */
1664        crystalhd_reg_wr(adp, DCI_FIRMWARE_ADDR, dram_offset);
1665        temp_buff = (uint32_t *)buffer;
1666        for (cnt = 0; cnt < (sz - fw_sig_len); cnt += 4) {
1667                crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (dram_offset >> 19));
1668                crystalhd_reg_wr(adp, DCI_FIRMWARE_DATA, *temp_buff);
1669                dram_offset += 4;
1670                temp_buff++;
1671        }
1672        msleep_interruptible(10);
1673
1674        temp_buff++;
1675
1676        sig_reg = (uint32_t)DCI_SIGNATURE_DATA_7;
1677        for (cnt = 0; cnt < 8; cnt++) {
1678                uint32_t swapped_data = *temp_buff;
1679                swapped_data = bswap_32_1(swapped_data);
1680                crystalhd_reg_wr(adp, sig_reg, swapped_data);
1681                sig_reg -= 4;
1682                temp_buff++;
1683        }
1684        msleep_interruptible(10);
1685
1686        reg_data = 0;
1687        reg_data |= BC_BIT(1);
1688        crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1689        msleep_interruptible(10);
1690
1691        reg_data = 0;
1692        reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1693
1694        if ((reg_data & BC_BIT(9)) == BC_BIT(9)) {
1695                cnt = 1000;
1696                while ((reg_data & BC_BIT(0)) != BC_BIT(0)) {
1697                        reg_data = crystalhd_reg_rd(adp, DCI_STATUS);
1698                        reg_data &= BC_BIT(0);
1699                        if (!(--cnt))
1700                                break;
1701                        msleep_interruptible(10);
1702                }
1703                reg_data = 0;
1704                reg_data = crystalhd_reg_rd(adp, DCI_CMD);
1705                reg_data |= BC_BIT(4);
1706                crystalhd_reg_wr(adp, DCI_CMD, reg_data);
1707
1708        } else {
1709                BCMLOG_ERR("F/w Signature mismatch\n");
1710                return BC_STS_FW_AUTH_FAILED;
1711        }
1712
1713        BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
1714        return BC_STS_SUCCESS;
1715}
1716
1717enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
1718                                struct BC_FW_CMD *fw_cmd)
1719{
1720        uint32_t cnt = 0, cmd_res_addr;
1721        uint32_t *cmd_buff, *res_buff;
1722        wait_queue_head_t fw_cmd_event;
1723        int rc = 0;
1724        enum BC_STATUS sts;
1725
1726        crystalhd_create_event(&fw_cmd_event);
1727
1728        BCMLOG_ENTER;
1729
1730        if (!hw || !fw_cmd) {
1731                BCMLOG_ERR("Invalid Arguments\n");
1732                return BC_STS_INV_ARG;
1733        }
1734
1735        cmd_buff = fw_cmd->cmd;
1736        res_buff = fw_cmd->rsp;
1737
1738        if (!cmd_buff || !res_buff) {
1739                BCMLOG_ERR("Invalid Parameters for F/W Command\n");
1740                return BC_STS_INV_ARG;
1741        }
1742
1743        hw->pwr_lock++;
1744
1745        hw->fwcmd_evt_sts = 0;
1746        hw->pfw_cmd_event = &fw_cmd_event;
1747
1748        /*Write the command to the memory*/
1749        crystalhd_mem_wr(hw->adp, TS_Host2CpuSnd, FW_CMD_BUFF_SZ, cmd_buff);
1750
1751        /*Memory Read for memory arbitrator flush*/
1752        crystalhd_mem_rd(hw->adp, TS_Host2CpuSnd, 1, &cnt);
1753
1754        /* Write the command address to mailbox */
1755        bc_dec_reg_wr(hw->adp, Hst2CpuMbx1, TS_Host2CpuSnd);
1756        msleep_interruptible(50);
1757
1758        crystalhd_wait_on_event(&fw_cmd_event, hw->fwcmd_evt_sts, 20000, rc, 0);
1759
1760        if (!rc) {
1761                sts = BC_STS_SUCCESS;
1762        } else if (rc == -EBUSY) {
1763                BCMLOG_ERR("Firmware command T/O\n");
1764                sts = BC_STS_TIMEOUT;
1765        } else if (rc == -EINTR) {
1766                BCMLOG(BCMLOG_DBG, "FwCmd Wait Signal int.\n");
1767                sts = BC_STS_IO_USER_ABORT;
1768        } else {
1769                BCMLOG_ERR("FwCmd IO Error.\n");
1770                sts = BC_STS_IO_ERROR;
1771        }
1772
1773        if (sts != BC_STS_SUCCESS) {
1774                BCMLOG_ERR("FwCmd Failed.\n");
1775                hw->pwr_lock--;
1776                return sts;
1777        }
1778
1779        /*Get the Responce Address*/
1780        cmd_res_addr = bc_dec_reg_rd(hw->adp, Cpu2HstMbx1);
1781
1782        /*Read the Response*/
1783        crystalhd_mem_rd(hw->adp, cmd_res_addr, FW_CMD_BUFF_SZ, res_buff);
1784
1785        hw->pwr_lock--;
1786
1787        if (res_buff[2] != C011_RET_SUCCESS) {
1788                BCMLOG_ERR("res_buff[2] != C011_RET_SUCCESS\n");
1789                return BC_STS_FW_CMD_ERR;
1790        }
1791
1792        sts = crystalhd_fw_cmd_post_proc(hw, fw_cmd);
1793        if (sts != BC_STS_SUCCESS)
1794                BCMLOG_ERR("crystalhd_fw_cmd_post_proc Failed.\n");
1795
1796        return sts;
1797}
1798
1799bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
1800{
1801        uint32_t intr_sts = 0;
1802        uint32_t deco_intr = 0;
1803        bool rc = 0;
1804
1805        if (!adp || !hw->dev_started)
1806                return rc;
1807
1808        hw->stats.num_interrupts++;
1809        hw->pwr_lock++;
1810
1811        deco_intr = bc_dec_reg_rd(adp, Stream2Host_Intr_Sts);
1812        intr_sts  = crystalhd_reg_rd(adp, INTR_INTR_STATUS);
1813
1814        if (intr_sts) {
1815                /* let system know we processed interrupt..*/
1816                rc = 1;
1817                hw->stats.dev_interrupts++;
1818        }
1819
1820        if (deco_intr && (deco_intr != 0xdeaddead)) {
1821
1822                if (deco_intr & 0x80000000) {
1823                        /*Set the Event and the status flag*/
1824                        if (hw->pfw_cmd_event) {
1825                                hw->fwcmd_evt_sts = 1;
1826                                crystalhd_set_event(hw->pfw_cmd_event);
1827                        }
1828                }
1829
1830                if (deco_intr & BC_BIT(1))
1831                        crystalhd_hw_proc_pib(hw);
1832
1833                bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
1834                /* FIXME: jarod: No udelay? might this be the real reason mini pci-e cards were stalling out? */
1835                bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
1836                rc = 1;
1837        }
1838
1839        /* Rx interrupts */
1840        crystalhd_rx_isr(hw, intr_sts);
1841
1842        /* Tx interrupts*/
1843        crystalhd_tx_isr(hw, intr_sts);
1844
1845        /* Clear interrupts */
1846        if (rc) {
1847                if (intr_sts)
1848                        crystalhd_reg_wr(adp, INTR_INTR_CLR_REG, intr_sts);
1849
1850                crystalhd_reg_wr(adp, INTR_EOI_CTRL, 1);
1851        }
1852
1853        hw->pwr_lock--;
1854
1855        return rc;
1856}
1857
1858enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
1859{
1860        if (!hw || !adp) {
1861                BCMLOG_ERR("Invalid Arguments\n");
1862                return BC_STS_INV_ARG;
1863        }
1864
1865        if (hw->dev_started)
1866                return BC_STS_SUCCESS;
1867
1868        memset(hw, 0, sizeof(struct crystalhd_hw));
1869
1870        hw->adp = adp;
1871        spin_lock_init(&hw->lock);
1872        spin_lock_init(&hw->rx_lock);
1873        /* FIXME: jarod: what are these magic numbers?!? */
1874        hw->tx_ioq_tag_seed = 0x70023070;
1875        hw->rx_pkt_tag_seed = 0x70029070;
1876
1877        hw->stop_pending = 0;
1878        crystalhd_start_device(hw->adp);
1879        hw->dev_started = true;
1880
1881        /* set initial core clock  */
1882        hw->core_clock_mhz = CLOCK_PRESET;
1883        hw->prev_n = 0;
1884        hw->pwr_lock = 0;
1885        crystalhd_hw_set_core_clock(hw);
1886
1887        return BC_STS_SUCCESS;
1888}
1889
1890enum BC_STATUS crystalhd_hw_close(struct crystalhd_hw *hw)
1891{
1892        if (!hw) {
1893                BCMLOG_ERR("Invalid Arguments\n");
1894                return BC_STS_INV_ARG;
1895        }
1896
1897        if (!hw->dev_started)
1898                return BC_STS_SUCCESS;
1899
1900        /* Stop and DDR sleep will happen in here */
1901        crystalhd_hw_suspend(hw);
1902        hw->dev_started = false;
1903
1904        return BC_STS_SUCCESS;
1905}
1906
1907enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
1908{
1909        unsigned int i;
1910        void *mem;
1911        size_t mem_len;
1912        dma_addr_t phy_addr;
1913        enum BC_STATUS sts = BC_STS_SUCCESS;
1914        struct crystalhd_rx_dma_pkt *rpkt;
1915
1916        if (!hw || !hw->adp) {
1917                BCMLOG_ERR("Invalid Arguments\n");
1918                return BC_STS_INV_ARG;
1919        }
1920
1921        sts = crystalhd_hw_create_ioqs(hw);
1922        if (sts != BC_STS_SUCCESS) {
1923                BCMLOG_ERR("Failed to create IOQs..\n");
1924                return sts;
1925        }
1926
1927        mem_len = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
1928
1929        for (i = 0; i < BC_TX_LIST_CNT; i++) {
1930                mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1931                if (mem) {
1932                        memset(mem, 0, mem_len);
1933                } else {
1934                        BCMLOG_ERR("Insufficient Memory For TX\n");
1935                        crystalhd_hw_free_dma_rings(hw);
1936                        return BC_STS_INSUFF_RES;
1937                }
1938                /* rx_pkt_pool -- static memory allocation  */
1939                hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = mem;
1940                hw->tx_pkt_pool[i].desc_mem.phy_addr = phy_addr;
1941                hw->tx_pkt_pool[i].desc_mem.sz = BC_LINK_MAX_SGLS *
1942                                                 sizeof(struct dma_descriptor);
1943                hw->tx_pkt_pool[i].list_tag = 0;
1944
1945                /* Add TX dma requests to Free Queue..*/
1946                sts = crystalhd_dioq_add(hw->tx_freeq,
1947                                       &hw->tx_pkt_pool[i], false, 0);
1948                if (sts != BC_STS_SUCCESS) {
1949                        crystalhd_hw_free_dma_rings(hw);
1950                        return sts;
1951                }
1952        }
1953
1954        for (i = 0; i < BC_RX_LIST_CNT; i++) {
1955                rpkt = kzalloc(sizeof(*rpkt), GFP_KERNEL);
1956                if (!rpkt) {
1957                        BCMLOG_ERR("Insufficient Memory For RX\n");
1958                        crystalhd_hw_free_dma_rings(hw);
1959                        return BC_STS_INSUFF_RES;
1960                }
1961
1962                mem = bc_kern_dma_alloc(hw->adp, mem_len, &phy_addr);
1963                if (mem) {
1964                        memset(mem, 0, mem_len);
1965                } else {
1966                        BCMLOG_ERR("Insufficient Memory For RX\n");
1967                        crystalhd_hw_free_dma_rings(hw);
1968                        return BC_STS_INSUFF_RES;
1969                }
1970                rpkt->desc_mem.pdma_desc_start = mem;
1971                rpkt->desc_mem.phy_addr = phy_addr;
1972                rpkt->desc_mem.sz  = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
1973                rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
1974                crystalhd_hw_free_rx_pkt(hw, rpkt);
1975        }
1976
1977        return BC_STS_SUCCESS;
1978}
1979
1980enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
1981{
1982        unsigned int i;
1983        struct crystalhd_rx_dma_pkt *rpkt = NULL;
1984
1985        if (!hw || !hw->adp) {
1986                BCMLOG_ERR("Invalid Arguments\n");
1987                return BC_STS_INV_ARG;
1988        }
1989
1990        /* Delete all IOQs.. */
1991        crystalhd_hw_delete_ioqs(hw);
1992
1993        for (i = 0; i < BC_TX_LIST_CNT; i++) {
1994                if (hw->tx_pkt_pool[i].desc_mem.pdma_desc_start) {
1995                        bc_kern_dma_free(hw->adp,
1996                                hw->tx_pkt_pool[i].desc_mem.sz,
1997                                hw->tx_pkt_pool[i].desc_mem.pdma_desc_start,
1998                                hw->tx_pkt_pool[i].desc_mem.phy_addr);
1999
2000                        hw->tx_pkt_pool[i].desc_mem.pdma_desc_start = NULL;
2001                }
2002        }
2003
2004        BCMLOG(BCMLOG_DBG, "Releasing RX Pkt pool\n");
2005        do {
2006                rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2007                if (!rpkt)
2008                        break;
2009                bc_kern_dma_free(hw->adp, rpkt->desc_mem.sz,
2010                                 rpkt->desc_mem.pdma_desc_start,
2011                                 rpkt->desc_mem.phy_addr);
2012                kfree(rpkt);
2013        } while (rpkt);
2014
2015        return BC_STS_SUCCESS;
2016}
2017
2018enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_dio_req *ioreq,
2019                             hw_comp_callback call_back,
2020                             wait_queue_head_t *cb_event, uint32_t *list_id,
2021                             uint8_t data_flags)
2022{
2023        struct tx_dma_pkt *tx_dma_packet = NULL;
2024        uint32_t first_desc_u_addr, first_desc_l_addr;
2025        uint32_t low_addr, high_addr;
2026        union addr_64 desc_addr;
2027        enum BC_STATUS sts, add_sts;
2028        uint32_t dummy_index = 0;
2029        unsigned long flags;
2030        bool rc;
2031
2032        if (!hw || !ioreq || !call_back || !cb_event || !list_id) {
2033                BCMLOG_ERR("Invalid Arguments\n");
2034                return BC_STS_INV_ARG;
2035        }
2036
2037        /*
2038         * Since we hit code in busy condition very frequently,
2039         * we will check the code in status first before
2040         * checking the availability of free elem.
2041         *
2042         * This will avoid the Q fetch/add in normal condition.
2043         */
2044        rc = crystalhd_code_in_full(hw->adp, ioreq->uinfo.xfr_len,
2045                                  false, data_flags);
2046        if (rc) {
2047                hw->stats.cin_busy++;
2048                return BC_STS_BUSY;
2049        }
2050
2051        /* Get a list from TxFreeQ */
2052        tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
2053        if (!tx_dma_packet) {
2054                BCMLOG_ERR("No empty elements..\n");
2055                return BC_STS_ERR_USAGE;
2056        }
2057
2058        sts = crystalhd_xlat_sgl_to_dma_desc(ioreq,
2059                                           &tx_dma_packet->desc_mem,
2060                                           &dummy_index);
2061        if (sts != BC_STS_SUCCESS) {
2062                add_sts = crystalhd_dioq_add(hw->tx_freeq, tx_dma_packet,
2063                                           false, 0);
2064                if (add_sts != BC_STS_SUCCESS)
2065                        BCMLOG_ERR("double fault..\n");
2066
2067                return sts;
2068        }
2069
2070        hw->pwr_lock++;
2071
2072        desc_addr.full_addr = tx_dma_packet->desc_mem.phy_addr;
2073        low_addr = desc_addr.low_part;
2074        high_addr = desc_addr.high_part;
2075
2076        tx_dma_packet->call_back = call_back;
2077        tx_dma_packet->cb_event  = cb_event;
2078        tx_dma_packet->dio_req   = ioreq;
2079
2080        spin_lock_irqsave(&hw->lock, flags);
2081
2082        if (hw->tx_list_post_index == 0) {
2083                first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST0;
2084                first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST0;
2085        } else {
2086                first_desc_u_addr = MISC1_TX_FIRST_DESC_U_ADDR_LIST1;
2087                first_desc_l_addr = MISC1_TX_FIRST_DESC_L_ADDR_LIST1;
2088        }
2089
2090        *list_id = tx_dma_packet->list_tag = hw->tx_ioq_tag_seed +
2091                                             hw->tx_list_post_index;
2092
2093        hw->tx_list_post_index = (hw->tx_list_post_index + 1) % DMA_ENGINE_CNT;
2094
2095        spin_unlock_irqrestore(&hw->lock, flags);
2096
2097
2098        /* Insert in Active Q..*/
2099        crystalhd_dioq_add(hw->tx_actq, tx_dma_packet, false,
2100                         tx_dma_packet->list_tag);
2101
2102        /*
2103         * Interrupt will come as soon as you write
2104         * the valid bit. So be ready for that. All
2105         * the initialization should happen before that.
2106         */
2107        crystalhd_start_tx_dma_engine(hw);
2108        crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
2109
2110        crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01);
2111                                        /* Be sure we set the valid bit ^^^^ */
2112
2113        return BC_STS_SUCCESS;
2114}
2115
2116/*
2117 * This is a force cancel and we are racing with ISR.
2118 *
2119 * Will try to remove the req from ActQ before ISR gets it.
2120 * If ISR gets it first then the completion happens in the
2121 * normal path and we will return _STS_NO_DATA from here.
2122 *
2123 * FIX_ME: Not Tested the actual condition..
2124 */
2125enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
2126{
2127        if (!hw || !list_id) {
2128                BCMLOG_ERR("Invalid Arguments\n");
2129                return BC_STS_INV_ARG;
2130        }
2131
2132        crystalhd_stop_tx_dma_engine(hw);
2133        crystalhd_hw_tx_req_complete(hw, list_id, BC_STS_IO_USER_ABORT);
2134
2135        return BC_STS_SUCCESS;
2136}
2137
2138enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
2139                                    struct crystalhd_dio_req *ioreq, bool en_post)
2140{
2141        struct crystalhd_rx_dma_pkt *rpkt;
2142        uint32_t tag, uv_desc_ix = 0;
2143        enum BC_STATUS sts;
2144
2145        if (!hw || !ioreq) {
2146                BCMLOG_ERR("Invalid Arguments\n");
2147                return BC_STS_INV_ARG;
2148        }
2149
2150        rpkt = crystalhd_hw_alloc_rx_pkt(hw);
2151        if (!rpkt) {
2152                BCMLOG_ERR("Insufficient resources\n");
2153                return BC_STS_INSUFF_RES;
2154        }
2155
2156        rpkt->dio_req = ioreq;
2157        tag = rpkt->pkt_tag;
2158
2159        sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix);
2160        if (sts != BC_STS_SUCCESS)
2161                return sts;
2162
2163        rpkt->uv_phy_addr = 0;
2164
2165        /* Store the address of UV in the rx packet for post*/
2166        if (uv_desc_ix)
2167                rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
2168                                    (sizeof(struct dma_descriptor) * (uv_desc_ix + 1));
2169
2170        if (en_post)
2171                sts = crystalhd_hw_post_cap_buff(hw, rpkt);
2172        else
2173                sts = crystalhd_dioq_add(hw->rx_freeq, rpkt, false, tag);
2174
2175        return sts;
2176}
2177
2178enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
2179                                    struct BC_PIC_INFO_BLOCK *pib,
2180                                    struct crystalhd_dio_req **ioreq)
2181{
2182        struct crystalhd_rx_dma_pkt *rpkt;
2183        uint32_t timeout = BC_PROC_OUTPUT_TIMEOUT / 1000;
2184        uint32_t sig_pending = 0;
2185
2186
2187        if (!hw || !ioreq || !pib) {
2188                BCMLOG_ERR("Invalid Arguments\n");
2189                return BC_STS_INV_ARG;
2190        }
2191
2192        rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
2193        if (!rpkt) {
2194                if (sig_pending) {
2195                        BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending);
2196                        return BC_STS_IO_USER_ABORT;
2197                } else {
2198                        return BC_STS_TIMEOUT;
2199                }
2200        }
2201
2202        rpkt->dio_req->uinfo.comp_flags = rpkt->flags;
2203
2204        if (rpkt->flags & COMP_FLAG_PIB_VALID)
2205                memcpy(pib, &rpkt->pib, sizeof(*pib));
2206
2207        *ioreq = rpkt->dio_req;
2208
2209        crystalhd_hw_free_rx_pkt(hw, rpkt);
2210
2211        return BC_STS_SUCCESS;
2212}
2213
2214enum BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw)
2215{
2216        struct crystalhd_rx_dma_pkt *rx_pkt;
2217        enum BC_STATUS sts;
2218        uint32_t i;
2219
2220        if (!hw) {
2221                BCMLOG_ERR("Invalid Arguments\n");
2222                return BC_STS_INV_ARG;
2223        }
2224
2225        /* This is start of capture.. Post to both the lists.. */
2226        for (i = 0; i < DMA_ENGINE_CNT; i++) {
2227                rx_pkt = crystalhd_dioq_fetch(hw->rx_freeq);
2228                if (!rx_pkt)
2229                        return BC_STS_NO_DATA;
2230                sts = crystalhd_hw_post_cap_buff(hw, rx_pkt);
2231                if (BC_STS_SUCCESS != sts)
2232                        break;
2233
2234        }
2235
2236        return BC_STS_SUCCESS;
2237}
2238
2239enum BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw)
2240{
2241        void *temp = NULL;
2242
2243        if (!hw) {
2244                BCMLOG_ERR("Invalid Arguments\n");
2245                return BC_STS_INV_ARG;
2246        }
2247
2248        crystalhd_stop_rx_dma_engine(hw);
2249
2250        do {
2251                temp = crystalhd_dioq_fetch(hw->rx_freeq);
2252                if (temp)
2253                        crystalhd_rx_pkt_rel_call_back(hw, temp);
2254        } while (temp);
2255
2256        return BC_STS_SUCCESS;
2257}
2258
2259enum BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw)
2260{
2261        hw->stats.pause_cnt++;
2262        hw->stop_pending = 1;
2263
2264        if ((hw->rx_list_sts[0] == sts_free) &&
2265            (hw->rx_list_sts[1] == sts_free))
2266                crystalhd_hw_finalize_pause(hw);
2267
2268        return BC_STS_SUCCESS;
2269}
2270
2271enum BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw)
2272{
2273        enum BC_STATUS sts;
2274        uint32_t aspm;
2275
2276        hw->stop_pending = 0;
2277
2278        aspm = crystalhd_reg_rd(hw->adp, PCIE_DLL_DATA_LINK_CONTROL);
2279        aspm &= ~ASPM_L1_ENABLE;
2280/* NAREN BCMLOG(BCMLOG_INFO, "aspm off\n"); */
2281        crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
2282
2283        sts = crystalhd_hw_start_capture(hw);
2284        return sts;
2285}
2286
2287enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
2288{
2289        enum BC_STATUS sts;
2290
2291        if (!hw) {
2292                BCMLOG_ERR("Invalid Arguments\n");
2293                return BC_STS_INV_ARG;
2294        }
2295
2296        sts = crystalhd_put_ddr2sleep(hw);
2297        if (sts != BC_STS_SUCCESS) {
2298                BCMLOG_ERR("Failed to Put DDR To Sleep!!\n");
2299                return BC_STS_ERROR;
2300        }
2301
2302        if (!crystalhd_stop_device(hw->adp)) {
2303                BCMLOG_ERR("Failed to Stop Device!!\n");
2304                return BC_STS_ERROR;
2305        }
2306
2307        return BC_STS_SUCCESS;
2308}
2309
2310void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats)
2311{
2312        if (!hw) {
2313                BCMLOG_ERR("Invalid Arguments\n");
2314                return;
2315        }
2316
2317        /* if called w/NULL stats, its a req to zero out the stats */
2318        if (!stats) {
2319                memset(&hw->stats, 0, sizeof(hw->stats));
2320                return;
2321        }
2322
2323        hw->stats.freeq_count = crystalhd_dioq_count(hw->rx_freeq);
2324        hw->stats.rdyq_count  = crystalhd_dioq_count(hw->rx_rdyq);
2325        memcpy(stats, &hw->stats, sizeof(*stats));
2326}
2327
2328enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
2329{
2330        uint32_t reg, n, i;
2331        uint32_t vco_mg, refresh_reg;
2332
2333        if (!hw) {
2334                BCMLOG_ERR("Invalid Arguments\n");
2335                return BC_STS_INV_ARG;
2336        }
2337
2338        /* FIXME: jarod: wha? */
2339        /*n = (hw->core_clock_mhz * 3) / 20 + 1; */
2340        n = hw->core_clock_mhz/5;
2341
2342        if (n == hw->prev_n)
2343                return BC_STS_CLK_NOCHG;
2344
2345        if (hw->pwr_lock > 0) {
2346                /* BCMLOG(BCMLOG_INFO,"pwr_lock is %u\n", hw->pwr_lock) */
2347                return BC_STS_CLK_NOCHG;
2348        }
2349
2350        i = n * 27;
2351        if (i < 560)
2352                vco_mg = 0;
2353        else if (i < 900)
2354                vco_mg = 1;
2355        else if (i < 1030)
2356                vco_mg = 2;
2357        else
2358                vco_mg = 3;
2359
2360        reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2361
2362        reg &= 0xFFFFCFC0;
2363        reg |= n;
2364        reg |= vco_mg << 12;
2365
2366        BCMLOG(BCMLOG_INFO, "clock is moving to %d with n %d with vco_mg %d\n",
2367               hw->core_clock_mhz, n, vco_mg);
2368
2369        /* Change the DRAM refresh rate to accomodate the new frequency */
2370        /* refresh reg = ((refresh_rate * clock_rate)/16) - 1; rounding up*/
2371        refresh_reg = (7 * hw->core_clock_mhz / 16);
2372        bc_dec_reg_wr(hw->adp, SDRAM_REF_PARAM, ((1 << 12) | refresh_reg));
2373
2374        bc_dec_reg_wr(hw->adp, DecHt_PllACtl, reg);
2375
2376        i = 0;
2377
2378        for (i = 0; i < 10; i++) {
2379                reg = bc_dec_reg_rd(hw->adp, DecHt_PllACtl);
2380
2381                if (reg & 0x00020000) {
2382                        hw->prev_n = n;
2383                        /* FIXME: jarod: outputting a random "C" is... confusing... */
2384                        BCMLOG(BCMLOG_INFO, "C");
2385                        return BC_STS_SUCCESS;
2386                } else {
2387                        msleep_interruptible(10);
2388                }
2389        }
2390        BCMLOG(BCMLOG_INFO, "clk change failed\n");
2391        return BC_STS_CLK_NOCHG;
2392}
2393