linux/drivers/net/ethernet/cavium/liquidio/request_manager.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more
  17 * details.
  18 **********************************************************************/
  19#include <linux/pci.h>
  20#include <linux/netdevice.h>
  21#include <linux/vmalloc.h>
  22#include "liquidio_common.h"
  23#include "octeon_droq.h"
  24#include "octeon_iq.h"
  25#include "response_manager.h"
  26#include "octeon_device.h"
  27#include "octeon_main.h"
  28#include "octeon_network.h"
  29#include "cn66xx_device.h"
  30#include "cn23xx_pf_device.h"
  31#include "cn23xx_vf_device.h"
  32
  33struct iq_post_status {
  34        int status;
  35        int index;
  36};
  37
  38static void check_db_timeout(struct work_struct *work);
  39static void  __check_db_timeout(struct octeon_device *oct, u64 iq_no);
  40
  41static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
  42
  43static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
  44{
  45        struct octeon_instr_queue *iq =
  46            (struct octeon_instr_queue *)oct->instr_queue[iq_no];
  47        return iq->iqcmd_64B;
  48}
  49
  50#define IQ_INSTR_MODE_32B(oct, iq_no)  (!IQ_INSTR_MODE_64B(oct, iq_no))
  51
  52/* Define this to return the request status comaptible to old code */
  53/*#define OCTEON_USE_OLD_REQ_STATUS*/
  54
  55/* Return 0 on success, 1 on failure */
  56int octeon_init_instr_queue(struct octeon_device *oct,
  57                            union oct_txpciq txpciq,
  58                            u32 num_descs)
  59{
  60        struct octeon_instr_queue *iq;
  61        struct octeon_iq_config *conf = NULL;
  62        u32 iq_no = (u32)txpciq.s.q_no;
  63        u32 q_size;
  64        struct cavium_wq *db_wq;
  65        int numa_node = dev_to_node(&oct->pci_dev->dev);
  66
  67        if (OCTEON_CN6XXX(oct))
  68                conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
  69        else if (OCTEON_CN23XX_PF(oct))
  70                conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
  71        else if (OCTEON_CN23XX_VF(oct))
  72                conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf)));
  73
  74        if (!conf) {
  75                dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
  76                        oct->chip_id);
  77                return 1;
  78        }
  79
  80        q_size = (u32)conf->instr_type * num_descs;
  81
  82        iq = oct->instr_queue[iq_no];
  83
  84        iq->oct_dev = oct;
  85
  86        iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
  87        if (!iq->base_addr) {
  88                dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
  89                        iq_no);
  90                return 1;
  91        }
  92
  93        iq->max_count = num_descs;
  94
  95        /* Initialize a list to holds requests that have been posted to Octeon
  96         * but has yet to be fetched by octeon
  97         */
  98        iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
  99                                               numa_node);
 100        if (!iq->request_list)
 101                iq->request_list =
 102                        vmalloc(array_size(num_descs,
 103                                           sizeof(*iq->request_list)));
 104        if (!iq->request_list) {
 105                lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
 106                dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
 107                        iq_no);
 108                return 1;
 109        }
 110
 111        memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
 112
 113        dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
 114                iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
 115
 116        iq->txpciq.u64 = txpciq.u64;
 117        iq->fill_threshold = (u32)conf->db_min;
 118        iq->fill_cnt = 0;
 119        iq->host_write_index = 0;
 120        iq->octeon_read_index = 0;
 121        iq->flush_index = 0;
 122        iq->last_db_time = 0;
 123        iq->do_auto_flush = 1;
 124        iq->db_timeout = (u32)conf->db_timeout;
 125        atomic_set(&iq->instr_pending, 0);
 126
 127        /* Initialize the spinlock for this instruction queue */
 128        spin_lock_init(&iq->lock);
 129        spin_lock_init(&iq->post_lock);
 130
 131        spin_lock_init(&iq->iq_flush_running_lock);
 132
 133        oct->io_qmask.iq |= BIT_ULL(iq_no);
 134
 135        /* Set the 32B/64B mode for each input queue */
 136        oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
 137        iq->iqcmd_64B = (conf->instr_type == 64);
 138
 139        oct->fn_list.setup_iq_regs(oct, iq_no);
 140
 141        oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
 142                                                     WQ_MEM_RECLAIM,
 143                                                     0);
 144        if (!oct->check_db_wq[iq_no].wq) {
 145                vfree(iq->request_list);
 146                iq->request_list = NULL;
 147                lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
 148                dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
 149                        iq_no);
 150                return 1;
 151        }
 152
 153        db_wq = &oct->check_db_wq[iq_no];
 154
 155        INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
 156        db_wq->wk.ctxptr = oct;
 157        db_wq->wk.ctxul = iq_no;
 158        queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
 159
 160        return 0;
 161}
 162
 163int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
 164{
 165        u64 desc_size = 0, q_size;
 166        struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
 167
 168        cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
 169        destroy_workqueue(oct->check_db_wq[iq_no].wq);
 170
 171        if (OCTEON_CN6XXX(oct))
 172                desc_size =
 173                    CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx));
 174        else if (OCTEON_CN23XX_PF(oct))
 175                desc_size =
 176                    CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
 177        else if (OCTEON_CN23XX_VF(oct))
 178                desc_size =
 179                    CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf));
 180
 181        vfree(iq->request_list);
 182
 183        if (iq->base_addr) {
 184                q_size = iq->max_count * desc_size;
 185                lio_dma_free(oct, (u32)q_size, iq->base_addr,
 186                             iq->base_addr_dma);
 187                oct->io_qmask.iq &= ~(1ULL << iq_no);
 188                vfree(oct->instr_queue[iq_no]);
 189                oct->instr_queue[iq_no] = NULL;
 190                oct->num_iqs--;
 191                return 0;
 192        }
 193        return 1;
 194}
 195
 196/* Return 0 on success, 1 on failure */
 197int octeon_setup_iq(struct octeon_device *oct,
 198                    int ifidx,
 199                    int q_index,
 200                    union oct_txpciq txpciq,
 201                    u32 num_descs,
 202                    void *app_ctx)
 203{
 204        u32 iq_no = (u32)txpciq.s.q_no;
 205        int numa_node = dev_to_node(&oct->pci_dev->dev);
 206
 207        if (oct->instr_queue[iq_no]) {
 208                dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
 209                        iq_no);
 210                oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
 211                oct->instr_queue[iq_no]->app_ctx = app_ctx;
 212                return 0;
 213        }
 214        oct->instr_queue[iq_no] =
 215            vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
 216        if (!oct->instr_queue[iq_no])
 217                oct->instr_queue[iq_no] =
 218                    vmalloc(sizeof(struct octeon_instr_queue));
 219        if (!oct->instr_queue[iq_no])
 220                return 1;
 221
 222        memset(oct->instr_queue[iq_no], 0,
 223               sizeof(struct octeon_instr_queue));
 224
 225        oct->instr_queue[iq_no]->q_index = q_index;
 226        oct->instr_queue[iq_no]->app_ctx = app_ctx;
 227        oct->instr_queue[iq_no]->ifidx = ifidx;
 228
 229        if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
 230                vfree(oct->instr_queue[iq_no]);
 231                oct->instr_queue[iq_no] = NULL;
 232                return 1;
 233        }
 234
 235        oct->num_iqs++;
 236        if (oct->fn_list.enable_io_queues(oct))
 237                return 1;
 238
 239        return 0;
 240}
 241
 242int lio_wait_for_instr_fetch(struct octeon_device *oct)
 243{
 244        int i, retry = 1000, pending, instr_cnt = 0;
 245
 246        do {
 247                instr_cnt = 0;
 248
 249                for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 250                        if (!(oct->io_qmask.iq & BIT_ULL(i)))
 251                                continue;
 252                        pending =
 253                            atomic_read(&oct->instr_queue[i]->instr_pending);
 254                        if (pending)
 255                                __check_db_timeout(oct, i);
 256                        instr_cnt += pending;
 257                }
 258
 259                if (instr_cnt == 0)
 260                        break;
 261
 262                schedule_timeout_uninterruptible(1);
 263
 264        } while (retry-- && instr_cnt);
 265
 266        return instr_cnt;
 267}
 268
 269static inline void
 270ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
 271{
 272        if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
 273                writel(iq->fill_cnt, iq->doorbell_reg);
 274                /* make sure doorbell write goes through */
 275                iq->fill_cnt = 0;
 276                iq->last_db_time = jiffies;
 277                return;
 278        }
 279}
 280
 281void
 282octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no)
 283{
 284        struct octeon_instr_queue *iq;
 285
 286        iq = oct->instr_queue[iq_no];
 287        spin_lock(&iq->post_lock);
 288        if (iq->fill_cnt)
 289                ring_doorbell(oct, iq);
 290        spin_unlock(&iq->post_lock);
 291}
 292
 293static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
 294                                      u8 *cmd)
 295{
 296        u8 *iqptr, cmdsize;
 297
 298        cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
 299        iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
 300
 301        memcpy(iqptr, cmd, cmdsize);
 302}
 303
 304static inline struct iq_post_status
 305__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
 306{
 307        struct iq_post_status st;
 308
 309        st.status = IQ_SEND_OK;
 310
 311        /* This ensures that the read index does not wrap around to the same
 312         * position if queue gets full before Octeon could fetch any instr.
 313         */
 314        if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
 315                st.status = IQ_SEND_FAILED;
 316                st.index = -1;
 317                return st;
 318        }
 319
 320        if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
 321                st.status = IQ_SEND_STOP;
 322
 323        __copy_cmd_into_iq(iq, cmd);
 324
 325        /* "index" is returned, host_write_index is modified. */
 326        st.index = iq->host_write_index;
 327        iq->host_write_index = incr_index(iq->host_write_index, 1,
 328                                          iq->max_count);
 329        iq->fill_cnt++;
 330
 331        /* Flush the command into memory. We need to be sure the data is in
 332         * memory before indicating that the instruction is pending.
 333         */
 334        wmb();
 335
 336        atomic_inc(&iq->instr_pending);
 337
 338        return st;
 339}
 340
 341int
 342octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
 343                                void (*fn)(void *))
 344{
 345        if (reqtype > REQTYPE_LAST) {
 346                dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
 347                        __func__, reqtype);
 348                return -EINVAL;
 349        }
 350
 351        reqtype_free_fn[oct->octeon_id][reqtype] = fn;
 352
 353        return 0;
 354}
 355
 356static inline void
 357__add_to_request_list(struct octeon_instr_queue *iq,
 358                      int idx, void *buf, int reqtype)
 359{
 360        iq->request_list[idx].buf = buf;
 361        iq->request_list[idx].reqtype = reqtype;
 362}
 363
 364/* Can only run in process context */
 365int
 366lio_process_iq_request_list(struct octeon_device *oct,
 367                            struct octeon_instr_queue *iq, u32 napi_budget)
 368{
 369        struct cavium_wq *cwq = &oct->dma_comp_wq;
 370        int reqtype;
 371        void *buf;
 372        u32 old = iq->flush_index;
 373        u32 inst_count = 0;
 374        unsigned int pkts_compl = 0, bytes_compl = 0;
 375        struct octeon_soft_command *sc;
 376        struct octeon_instr_irh *irh;
 377        unsigned long flags;
 378
 379        while (old != iq->octeon_read_index) {
 380                reqtype = iq->request_list[old].reqtype;
 381                buf     = iq->request_list[old].buf;
 382
 383                if (reqtype == REQTYPE_NONE)
 384                        goto skip_this;
 385
 386                octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
 387                                                     &bytes_compl);
 388
 389                switch (reqtype) {
 390                case REQTYPE_NORESP_NET:
 391                case REQTYPE_NORESP_NET_SG:
 392                case REQTYPE_RESP_NET_SG:
 393                        reqtype_free_fn[oct->octeon_id][reqtype](buf);
 394                        break;
 395                case REQTYPE_RESP_NET:
 396                case REQTYPE_SOFT_COMMAND:
 397                        sc = buf;
 398
 399                        if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))
 400                                irh = (struct octeon_instr_irh *)
 401                                        &sc->cmd.cmd3.irh;
 402                        else
 403                                irh = (struct octeon_instr_irh *)
 404                                        &sc->cmd.cmd2.irh;
 405                        if (irh->rflag) {
 406                                /* We're expecting a response from Octeon.
 407                                 * It's up to lio_process_ordered_list() to
 408                                 * process  sc. Add sc to the ordered soft
 409                                 * command response list because we expect
 410                                 * a response from Octeon.
 411                                 */
 412                                spin_lock_irqsave
 413                                        (&oct->response_list
 414                                         [OCTEON_ORDERED_SC_LIST].lock,
 415                                         flags);
 416                                atomic_inc(&oct->response_list
 417                                        [OCTEON_ORDERED_SC_LIST].
 418                                        pending_req_count);
 419                                list_add_tail(&sc->node, &oct->response_list
 420                                        [OCTEON_ORDERED_SC_LIST].head);
 421                                spin_unlock_irqrestore
 422                                        (&oct->response_list
 423                                         [OCTEON_ORDERED_SC_LIST].lock,
 424                                         flags);
 425                        } else {
 426                                if (sc->callback) {
 427                                        /* This callback must not sleep */
 428                                        sc->callback(oct, OCTEON_REQUEST_DONE,
 429                                                     sc->callback_arg);
 430                                }
 431                        }
 432                        break;
 433                default:
 434                        dev_err(&oct->pci_dev->dev,
 435                                "%s Unknown reqtype: %d buf: %p at idx %d\n",
 436                                __func__, reqtype, buf, old);
 437                }
 438
 439                iq->request_list[old].buf = NULL;
 440                iq->request_list[old].reqtype = 0;
 441
 442 skip_this:
 443                inst_count++;
 444                old = incr_index(old, 1, iq->max_count);
 445
 446                if ((napi_budget) && (inst_count >= napi_budget))
 447                        break;
 448        }
 449        if (bytes_compl)
 450                octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
 451                                                   bytes_compl);
 452        iq->flush_index = old;
 453
 454        if (atomic_read(&oct->response_list
 455                        [OCTEON_ORDERED_SC_LIST].pending_req_count))
 456                queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1));
 457
 458        return inst_count;
 459}
 460
 461/* Can only be called from process context */
 462int
 463octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
 464                u32 napi_budget)
 465{
 466        u32 inst_processed = 0;
 467        u32 tot_inst_processed = 0;
 468        int tx_done = 1;
 469
 470        if (!spin_trylock(&iq->iq_flush_running_lock))
 471                return tx_done;
 472
 473        spin_lock_bh(&iq->lock);
 474
 475        iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
 476
 477        do {
 478                /* Process any outstanding IQ packets. */
 479                if (iq->flush_index == iq->octeon_read_index)
 480                        break;
 481
 482                if (napi_budget)
 483                        inst_processed =
 484                                lio_process_iq_request_list(oct, iq,
 485                                                            napi_budget -
 486                                                            tot_inst_processed);
 487                else
 488                        inst_processed =
 489                                lio_process_iq_request_list(oct, iq, 0);
 490
 491                if (inst_processed) {
 492                        atomic_sub(inst_processed, &iq->instr_pending);
 493                        iq->stats.instr_processed += inst_processed;
 494                }
 495
 496                tot_inst_processed += inst_processed;
 497        } while (tot_inst_processed < napi_budget);
 498
 499        if (napi_budget && (tot_inst_processed >= napi_budget))
 500                tx_done = 0;
 501
 502        iq->last_db_time = jiffies;
 503
 504        spin_unlock_bh(&iq->lock);
 505
 506        spin_unlock(&iq->iq_flush_running_lock);
 507
 508        return tx_done;
 509}
 510
 511/* Process instruction queue after timeout.
 512 * This routine gets called from a workqueue or when removing the module.
 513 */
 514static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
 515{
 516        struct octeon_instr_queue *iq;
 517        u64 next_time;
 518
 519        if (!oct)
 520                return;
 521
 522        iq = oct->instr_queue[iq_no];
 523        if (!iq)
 524                return;
 525
 526        /* return immediately, if no work pending */
 527        if (!atomic_read(&iq->instr_pending))
 528                return;
 529        /* If jiffies - last_db_time < db_timeout do nothing  */
 530        next_time = iq->last_db_time + iq->db_timeout;
 531        if (!time_after(jiffies, (unsigned long)next_time))
 532                return;
 533        iq->last_db_time = jiffies;
 534
 535        /* Flush the instruction queue */
 536        octeon_flush_iq(oct, iq, 0);
 537
 538        lio_enable_irq(NULL, iq);
 539}
 540
 541/* Called by the Poll thread at regular intervals to check the instruction
 542 * queue for commands to be posted and for commands that were fetched by Octeon.
 543 */
 544static void check_db_timeout(struct work_struct *work)
 545{
 546        struct cavium_wk *wk = (struct cavium_wk *)work;
 547        struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
 548        u64 iq_no = wk->ctxul;
 549        struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
 550        u32 delay = 10;
 551
 552        __check_db_timeout(oct, iq_no);
 553        queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
 554}
 555
 556int
 557octeon_send_command(struct octeon_device *oct, u32 iq_no,
 558                    u32 force_db, void *cmd, void *buf,
 559                    u32 datasize, u32 reqtype)
 560{
 561        int xmit_stopped;
 562        struct iq_post_status st;
 563        struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
 564
 565        /* Get the lock and prevent other tasks and tx interrupt handler from
 566         * running.
 567         */
 568        spin_lock_bh(&iq->post_lock);
 569
 570        st = __post_command2(iq, cmd);
 571
 572        if (st.status != IQ_SEND_FAILED) {
 573                xmit_stopped = octeon_report_sent_bytes_to_bql(buf, reqtype);
 574                __add_to_request_list(iq, st.index, buf, reqtype);
 575                INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
 576                INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
 577
 578                if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db ||
 579                    xmit_stopped || st.status == IQ_SEND_STOP)
 580                        ring_doorbell(oct, iq);
 581        } else {
 582                INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
 583        }
 584
 585        spin_unlock_bh(&iq->post_lock);
 586
 587        /* This is only done here to expedite packets being flushed
 588         * for cases where there are no IQ completion interrupts.
 589         */
 590
 591        return st.status;
 592}
 593
 594void
 595octeon_prepare_soft_command(struct octeon_device *oct,
 596                            struct octeon_soft_command *sc,
 597                            u8 opcode,
 598                            u8 subcode,
 599                            u32 irh_ossp,
 600                            u64 ossp0,
 601                            u64 ossp1)
 602{
 603        struct octeon_config *oct_cfg;
 604        struct octeon_instr_ih2 *ih2;
 605        struct octeon_instr_ih3 *ih3;
 606        struct octeon_instr_pki_ih3 *pki_ih3;
 607        struct octeon_instr_irh *irh;
 608        struct octeon_instr_rdp *rdp;
 609
 610        WARN_ON(opcode > 15);
 611        WARN_ON(subcode > 127);
 612
 613        oct_cfg = octeon_get_conf(oct);
 614
 615        if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
 616                ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
 617
 618                ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
 619
 620                pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
 621
 622                pki_ih3->w           = 1;
 623                pki_ih3->raw         = 1;
 624                pki_ih3->utag        = 1;
 625                pki_ih3->uqpg        =
 626                        oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
 627                pki_ih3->utt         = 1;
 628                pki_ih3->tag     = LIO_CONTROL;
 629                pki_ih3->tagtype = ATOMIC_TAG;
 630                pki_ih3->qpg         =
 631                        oct->instr_queue[sc->iq_no]->txpciq.s.ctrl_qpg;
 632
 633                pki_ih3->pm          = 0x7;
 634                pki_ih3->sl          = 8;
 635
 636                if (sc->datasize)
 637                        ih3->dlengsz = sc->datasize;
 638
 639                irh            = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
 640                irh->opcode    = opcode;
 641                irh->subcode   = subcode;
 642
 643                /* opcode/subcode specific parameters (ossp) */
 644                irh->ossp       = irh_ossp;
 645                sc->cmd.cmd3.ossp[0] = ossp0;
 646                sc->cmd.cmd3.ossp[1] = ossp1;
 647
 648                if (sc->rdatasize) {
 649                        rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
 650                        rdp->pcie_port = oct->pcie_port;
 651                        rdp->rlen      = sc->rdatasize;
 652
 653                        irh->rflag =  1;
 654                        /*PKI IH3*/
 655                        /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
 656                        ih3->fsz    = LIO_SOFTCMDRESP_IH3;
 657                } else {
 658                        irh->rflag =  0;
 659                        /*PKI IH3*/
 660                        /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
 661                        ih3->fsz    = LIO_PCICMD_O3;
 662                }
 663
 664        } else {
 665                ih2          = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
 666                ih2->tagtype = ATOMIC_TAG;
 667                ih2->tag     = LIO_CONTROL;
 668                ih2->raw     = 1;
 669                ih2->grp     = CFG_GET_CTRL_Q_GRP(oct_cfg);
 670
 671                if (sc->datasize) {
 672                        ih2->dlengsz = sc->datasize;
 673                        ih2->rs = 1;
 674                }
 675
 676                irh            = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
 677                irh->opcode    = opcode;
 678                irh->subcode   = subcode;
 679
 680                /* opcode/subcode specific parameters (ossp) */
 681                irh->ossp       = irh_ossp;
 682                sc->cmd.cmd2.ossp[0] = ossp0;
 683                sc->cmd.cmd2.ossp[1] = ossp1;
 684
 685                if (sc->rdatasize) {
 686                        rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
 687                        rdp->pcie_port = oct->pcie_port;
 688                        rdp->rlen      = sc->rdatasize;
 689
 690                        irh->rflag =  1;
 691                        /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
 692                        ih2->fsz   = LIO_SOFTCMDRESP_IH2;
 693                } else {
 694                        irh->rflag =  0;
 695                        /* irh + ossp[0] + ossp[1] = 24 bytes */
 696                        ih2->fsz   = LIO_PCICMD_O2;
 697                }
 698        }
 699}
 700
 701int octeon_send_soft_command(struct octeon_device *oct,
 702                             struct octeon_soft_command *sc)
 703{
 704        struct octeon_instr_ih2 *ih2;
 705        struct octeon_instr_ih3 *ih3;
 706        struct octeon_instr_irh *irh;
 707        u32 len;
 708
 709        if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
 710                ih3 =  (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
 711                if (ih3->dlengsz) {
 712                        WARN_ON(!sc->dmadptr);
 713                        sc->cmd.cmd3.dptr = sc->dmadptr;
 714                }
 715                irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
 716                if (irh->rflag) {
 717                        WARN_ON(!sc->dmarptr);
 718                        WARN_ON(!sc->status_word);
 719                        *sc->status_word = COMPLETION_WORD_INIT;
 720                        sc->cmd.cmd3.rptr = sc->dmarptr;
 721                }
 722                len = (u32)ih3->dlengsz;
 723        } else {
 724                ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
 725                if (ih2->dlengsz) {
 726                        WARN_ON(!sc->dmadptr);
 727                        sc->cmd.cmd2.dptr = sc->dmadptr;
 728                }
 729                irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
 730                if (irh->rflag) {
 731                        WARN_ON(!sc->dmarptr);
 732                        WARN_ON(!sc->status_word);
 733                        *sc->status_word = COMPLETION_WORD_INIT;
 734                        sc->cmd.cmd2.rptr = sc->dmarptr;
 735                }
 736                len = (u32)ih2->dlengsz;
 737        }
 738
 739        if (sc->wait_time)
 740                sc->timeout = jiffies + sc->wait_time;
 741
 742        return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
 743                                    len, REQTYPE_SOFT_COMMAND));
 744}
 745
 746int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
 747{
 748        int i;
 749        u64 dma_addr;
 750        struct octeon_soft_command *sc;
 751
 752        INIT_LIST_HEAD(&oct->sc_buf_pool.head);
 753        spin_lock_init(&oct->sc_buf_pool.lock);
 754        atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
 755
 756        for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
 757                sc = (struct octeon_soft_command *)
 758                        lio_dma_alloc(oct,
 759                                      SOFT_COMMAND_BUFFER_SIZE,
 760                                          (dma_addr_t *)&dma_addr);
 761                if (!sc) {
 762                        octeon_free_sc_buffer_pool(oct);
 763                        return 1;
 764                }
 765
 766                sc->dma_addr = dma_addr;
 767                sc->size = SOFT_COMMAND_BUFFER_SIZE;
 768
 769                list_add_tail(&sc->node, &oct->sc_buf_pool.head);
 770        }
 771
 772        return 0;
 773}
 774
 775int octeon_free_sc_buffer_pool(struct octeon_device *oct)
 776{
 777        struct list_head *tmp, *tmp2;
 778        struct octeon_soft_command *sc;
 779
 780        spin_lock_bh(&oct->sc_buf_pool.lock);
 781
 782        list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
 783                list_del(tmp);
 784
 785                sc = (struct octeon_soft_command *)tmp;
 786
 787                lio_dma_free(oct, sc->size, sc, sc->dma_addr);
 788        }
 789
 790        INIT_LIST_HEAD(&oct->sc_buf_pool.head);
 791
 792        spin_unlock_bh(&oct->sc_buf_pool.lock);
 793
 794        return 0;
 795}
 796
 797struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
 798                                                      u32 datasize,
 799                                                      u32 rdatasize,
 800                                                      u32 ctxsize)
 801{
 802        u64 dma_addr;
 803        u32 size;
 804        u32 offset = sizeof(struct octeon_soft_command);
 805        struct octeon_soft_command *sc = NULL;
 806        struct list_head *tmp;
 807
 808        WARN_ON((offset + datasize + rdatasize + ctxsize) >
 809               SOFT_COMMAND_BUFFER_SIZE);
 810
 811        spin_lock_bh(&oct->sc_buf_pool.lock);
 812
 813        if (list_empty(&oct->sc_buf_pool.head)) {
 814                spin_unlock_bh(&oct->sc_buf_pool.lock);
 815                return NULL;
 816        }
 817
 818        list_for_each(tmp, &oct->sc_buf_pool.head)
 819                break;
 820
 821        list_del(tmp);
 822
 823        atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
 824
 825        spin_unlock_bh(&oct->sc_buf_pool.lock);
 826
 827        sc = (struct octeon_soft_command *)tmp;
 828
 829        dma_addr = sc->dma_addr;
 830        size = sc->size;
 831
 832        memset(sc, 0, sc->size);
 833
 834        sc->dma_addr = dma_addr;
 835        sc->size = size;
 836
 837        if (ctxsize) {
 838                sc->ctxptr = (u8 *)sc + offset;
 839                sc->ctxsize = ctxsize;
 840        }
 841
 842        /* Start data at 128 byte boundary */
 843        offset = (offset + ctxsize + 127) & 0xffffff80;
 844
 845        if (datasize) {
 846                sc->virtdptr = (u8 *)sc + offset;
 847                sc->dmadptr = dma_addr + offset;
 848                sc->datasize = datasize;
 849        }
 850
 851        /* Start rdata at 128 byte boundary */
 852        offset = (offset + datasize + 127) & 0xffffff80;
 853
 854        if (rdatasize) {
 855                WARN_ON(rdatasize < 16);
 856                sc->virtrptr = (u8 *)sc + offset;
 857                sc->dmarptr = dma_addr + offset;
 858                sc->rdatasize = rdatasize;
 859                sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
 860        }
 861
 862        return sc;
 863}
 864
 865void octeon_free_soft_command(struct octeon_device *oct,
 866                              struct octeon_soft_command *sc)
 867{
 868        spin_lock_bh(&oct->sc_buf_pool.lock);
 869
 870        list_add_tail(&sc->node, &oct->sc_buf_pool.head);
 871
 872        atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
 873
 874        spin_unlock_bh(&oct->sc_buf_pool.lock);
 875}
 876