linux/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/pci.h>
  19#include <linux/netdevice.h>
  20#include <linux/vmalloc.h>
  21#include "liquidio_common.h"
  22#include "octeon_droq.h"
  23#include "octeon_iq.h"
  24#include "response_manager.h"
  25#include "octeon_device.h"
  26#include "octeon_main.h"
  27#include "octeon_network.h"
  28#include "cn66xx_regs.h"
  29#include "cn66xx_device.h"
  30#include "cn23xx_pf_device.h"
  31#include "cn23xx_vf_device.h"
  32
  33struct niclist {
  34        struct list_head list;
  35        void *ptr;
  36};
  37
  38struct __dispatch {
  39        struct list_head list;
  40        struct octeon_recv_info *rinfo;
  41        octeon_dispatch_fn_t disp_fn;
  42};
  43
  44/** Get the argument that the user set when registering dispatch
  45 *  function for a given opcode/subcode.
  46 *  @param  octeon_dev - the octeon device pointer.
  47 *  @param  opcode     - the opcode for which the dispatch argument
  48 *                       is to be checked.
  49 *  @param  subcode    - the subcode for which the dispatch argument
  50 *                       is to be checked.
  51 *  @return  Success: void * (argument to the dispatch function)
  52 *  @return  Failure: NULL
  53 *
  54 */
  55void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
  56                              u16 opcode, u16 subcode)
  57{
  58        int idx;
  59        struct list_head *dispatch;
  60        void *fn_arg = NULL;
  61        u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
  62
  63        idx = combined_opcode & OCTEON_OPCODE_MASK;
  64
  65        spin_lock_bh(&octeon_dev->dispatch.lock);
  66
  67        if (octeon_dev->dispatch.count == 0) {
  68                spin_unlock_bh(&octeon_dev->dispatch.lock);
  69                return NULL;
  70        }
  71
  72        if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
  73                fn_arg = octeon_dev->dispatch.dlist[idx].arg;
  74        } else {
  75                list_for_each(dispatch,
  76                              &octeon_dev->dispatch.dlist[idx].list) {
  77                        if (((struct octeon_dispatch *)dispatch)->opcode ==
  78                            combined_opcode) {
  79                                fn_arg = ((struct octeon_dispatch *)
  80                                          dispatch)->arg;
  81                                break;
  82                        }
  83                }
  84        }
  85
  86        spin_unlock_bh(&octeon_dev->dispatch.lock);
  87        return fn_arg;
  88}
  89
  90/** Check for packets on Droq. This function should be called with lock held.
  91 *  @param  droq - Droq on which count is checked.
  92 *  @return Returns packet count.
  93 */
  94u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
  95{
  96        u32 pkt_count = 0;
  97        u32 last_count;
  98
  99        pkt_count = readl(droq->pkts_sent_reg);
 100
 101        last_count = pkt_count - droq->pkt_count;
 102        droq->pkt_count = pkt_count;
 103
 104        /* we shall write to cnts  at napi irq enable or end of droq tasklet */
 105        if (last_count)
 106                atomic_add(last_count, &droq->pkts_pending);
 107
 108        return last_count;
 109}
 110
 111static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
 112{
 113        u32 count = 0;
 114
 115        /* max_empty_descs is the max. no. of descs that can have no buffers.
 116         * If the empty desc count goes beyond this value, we cannot safely
 117         * read in a 64K packet sent by Octeon
 118         * (64K is max pkt size from Octeon)
 119         */
 120        droq->max_empty_descs = 0;
 121
 122        do {
 123                droq->max_empty_descs++;
 124                count += droq->buffer_size;
 125        } while (count < (64 * 1024));
 126
 127        droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
 128}
 129
 130static void octeon_droq_reset_indices(struct octeon_droq *droq)
 131{
 132        droq->read_idx = 0;
 133        droq->write_idx = 0;
 134        droq->refill_idx = 0;
 135        droq->refill_count = 0;
 136        atomic_set(&droq->pkts_pending, 0);
 137}
 138
 139static void
 140octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
 141                                 struct octeon_droq *droq)
 142{
 143        u32 i;
 144        struct octeon_skb_page_info *pg_info;
 145
 146        for (i = 0; i < droq->max_count; i++) {
 147                pg_info = &droq->recv_buf_list[i].pg_info;
 148                if (!pg_info)
 149                        continue;
 150
 151                if (pg_info->dma)
 152                        lio_unmap_ring(oct->pci_dev,
 153                                       (u64)pg_info->dma);
 154                pg_info->dma = 0;
 155
 156                if (pg_info->page)
 157                        recv_buffer_destroy(droq->recv_buf_list[i].buffer,
 158                                            pg_info);
 159
 160                droq->recv_buf_list[i].buffer = NULL;
 161        }
 162
 163        octeon_droq_reset_indices(droq);
 164}
 165
 166static int
 167octeon_droq_setup_ring_buffers(struct octeon_device *oct,
 168                               struct octeon_droq *droq)
 169{
 170        u32 i;
 171        void *buf;
 172        struct octeon_droq_desc *desc_ring = droq->desc_ring;
 173
 174        for (i = 0; i < droq->max_count; i++) {
 175                buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
 176
 177                if (!buf) {
 178                        dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
 179                                __func__);
 180                        droq->stats.rx_alloc_failure++;
 181                        return -ENOMEM;
 182                }
 183
 184                droq->recv_buf_list[i].buffer = buf;
 185                droq->recv_buf_list[i].data = get_rbd(buf);
 186                desc_ring[i].info_ptr = 0;
 187                desc_ring[i].buffer_ptr =
 188                        lio_map_ring(droq->recv_buf_list[i].buffer);
 189        }
 190
 191        octeon_droq_reset_indices(droq);
 192
 193        octeon_droq_compute_max_packet_bufs(droq);
 194
 195        return 0;
 196}
 197
 198int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
 199{
 200        struct octeon_droq *droq = oct->droq[q_no];
 201
 202        dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
 203
 204        octeon_droq_destroy_ring_buffers(oct, droq);
 205        vfree(droq->recv_buf_list);
 206
 207        if (droq->desc_ring)
 208                lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
 209                             droq->desc_ring, droq->desc_ring_dma);
 210
 211        memset(droq, 0, OCT_DROQ_SIZE);
 212        oct->io_qmask.oq &= ~(1ULL << q_no);
 213        vfree(oct->droq[q_no]);
 214        oct->droq[q_no] = NULL;
 215        oct->num_oqs--;
 216
 217        return 0;
 218}
 219
 220int octeon_init_droq(struct octeon_device *oct,
 221                     u32 q_no,
 222                     u32 num_descs,
 223                     u32 desc_size,
 224                     void *app_ctx)
 225{
 226        struct octeon_droq *droq;
 227        u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
 228        u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
 229        int numa_node = dev_to_node(&oct->pci_dev->dev);
 230
 231        dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
 232
 233        droq = oct->droq[q_no];
 234        memset(droq, 0, OCT_DROQ_SIZE);
 235
 236        droq->oct_dev = oct;
 237        droq->q_no = q_no;
 238        if (app_ctx)
 239                droq->app_ctx = app_ctx;
 240        else
 241                droq->app_ctx = (void *)(size_t)q_no;
 242
 243        c_num_descs = num_descs;
 244        c_buf_size = desc_size;
 245        if (OCTEON_CN6XXX(oct)) {
 246                struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
 247
 248                c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
 249                c_refill_threshold =
 250                        (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
 251        } else if (OCTEON_CN23XX_PF(oct)) {
 252                struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
 253
 254                c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
 255                c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
 256        } else if (OCTEON_CN23XX_VF(oct)) {
 257                struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf);
 258
 259                c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
 260                c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
 261        } else {
 262                return 1;
 263        }
 264
 265        droq->max_count = c_num_descs;
 266        droq->buffer_size = c_buf_size;
 267
 268        desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
 269        droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
 270                                        (dma_addr_t *)&droq->desc_ring_dma);
 271
 272        if (!droq->desc_ring) {
 273                dev_err(&oct->pci_dev->dev,
 274                        "Output queue %d ring alloc failed\n", q_no);
 275                return 1;
 276        }
 277
 278        dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
 279                q_no, droq->desc_ring, droq->desc_ring_dma);
 280        dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
 281                droq->max_count);
 282
 283        droq->recv_buf_list = vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
 284                                           numa_node);
 285        if (!droq->recv_buf_list)
 286                droq->recv_buf_list = vzalloc(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE));
 287        if (!droq->recv_buf_list) {
 288                dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
 289                goto init_droq_fail;
 290        }
 291
 292        if (octeon_droq_setup_ring_buffers(oct, droq))
 293                goto init_droq_fail;
 294
 295        droq->pkts_per_intr = c_pkts_per_intr;
 296        droq->refill_threshold = c_refill_threshold;
 297
 298        dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
 299                droq->max_empty_descs);
 300
 301        INIT_LIST_HEAD(&droq->dispatch_list);
 302
 303        /* For 56xx Pass1, this function won't be called, so no checks. */
 304        oct->fn_list.setup_oq_regs(oct, q_no);
 305
 306        oct->io_qmask.oq |= BIT_ULL(q_no);
 307
 308        return 0;
 309
 310init_droq_fail:
 311        octeon_delete_droq(oct, q_no);
 312        return 1;
 313}
 314
 315/* octeon_create_recv_info
 316 * Parameters:
 317 *  octeon_dev - pointer to the octeon device structure
 318 *  droq       - droq in which the packet arrived.
 319 *  buf_cnt    - no. of buffers used by the packet.
 320 *  idx        - index in the descriptor for the first buffer in the packet.
 321 * Description:
 322 *  Allocates a recv_info_t and copies the buffer addresses for packet data
 323 *  into the recv_pkt space which starts at an 8B offset from recv_info_t.
 324 *  Flags the descriptors for refill later. If available descriptors go
 325 *  below the threshold to receive a 64K pkt, new buffers are first allocated
 326 *  before the recv_pkt_t is created.
 327 *  This routine will be called in interrupt context.
 328 * Returns:
 329 *  Success: Pointer to recv_info_t
 330 *  Failure: NULL.
 331 */
 332static inline struct octeon_recv_info *octeon_create_recv_info(
 333                struct octeon_device *octeon_dev,
 334                struct octeon_droq *droq,
 335                u32 buf_cnt,
 336                u32 idx)
 337{
 338        struct octeon_droq_info *info;
 339        struct octeon_recv_pkt *recv_pkt;
 340        struct octeon_recv_info *recv_info;
 341        u32 i, bytes_left;
 342        struct octeon_skb_page_info *pg_info;
 343
 344        info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data;
 345
 346        recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
 347        if (!recv_info)
 348                return NULL;
 349
 350        recv_pkt = recv_info->recv_pkt;
 351        recv_pkt->rh = info->rh;
 352        recv_pkt->length = (u32)info->length;
 353        recv_pkt->buffer_count = (u16)buf_cnt;
 354        recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
 355
 356        i = 0;
 357        bytes_left = (u32)info->length;
 358
 359        while (buf_cnt) {
 360                {
 361                        pg_info = &droq->recv_buf_list[idx].pg_info;
 362
 363                        lio_unmap_ring(octeon_dev->pci_dev,
 364                                       (u64)pg_info->dma);
 365                        pg_info->page = NULL;
 366                        pg_info->dma = 0;
 367                }
 368
 369                recv_pkt->buffer_size[i] =
 370                        (bytes_left >=
 371                         droq->buffer_size) ? droq->buffer_size : bytes_left;
 372
 373                recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
 374                droq->recv_buf_list[idx].buffer = NULL;
 375
 376                idx = incr_index(idx, 1, droq->max_count);
 377                bytes_left -= droq->buffer_size;
 378                i++;
 379                buf_cnt--;
 380        }
 381
 382        return recv_info;
 383}
 384
 385/* If we were not able to refill all buffers, try to move around
 386 * the buffers that were not dispatched.
 387 */
 388static inline u32
 389octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
 390                                struct octeon_droq_desc *desc_ring)
 391{
 392        u32 desc_refilled = 0;
 393
 394        u32 refill_index = droq->refill_idx;
 395
 396        while (refill_index != droq->read_idx) {
 397                if (droq->recv_buf_list[refill_index].buffer) {
 398                        droq->recv_buf_list[droq->refill_idx].buffer =
 399                                droq->recv_buf_list[refill_index].buffer;
 400                        droq->recv_buf_list[droq->refill_idx].data =
 401                                droq->recv_buf_list[refill_index].data;
 402                        desc_ring[droq->refill_idx].buffer_ptr =
 403                                desc_ring[refill_index].buffer_ptr;
 404                        droq->recv_buf_list[refill_index].buffer = NULL;
 405                        desc_ring[refill_index].buffer_ptr = 0;
 406                        do {
 407                                droq->refill_idx = incr_index(droq->refill_idx,
 408                                                              1,
 409                                                              droq->max_count);
 410                                desc_refilled++;
 411                                droq->refill_count--;
 412                        } while (droq->recv_buf_list[droq->refill_idx].buffer);
 413                }
 414                refill_index = incr_index(refill_index, 1, droq->max_count);
 415        }                       /* while */
 416        return desc_refilled;
 417}
 418
 419/* octeon_droq_refill
 420 * Parameters:
 421 *  droq       - droq in which descriptors require new buffers.
 422 * Description:
 423 *  Called during normal DROQ processing in interrupt mode or by the poll
 424 *  thread to refill the descriptors from which buffers were dispatched
 425 *  to upper layers. Attempts to allocate new buffers. If that fails, moves
 426 *  up buffers (that were not dispatched) to form a contiguous ring.
 427 * Returns:
 428 *  No of descriptors refilled.
 429 */
 430static u32
 431octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
 432{
 433        struct octeon_droq_desc *desc_ring;
 434        void *buf = NULL;
 435        u8 *data;
 436        u32 desc_refilled = 0;
 437        struct octeon_skb_page_info *pg_info;
 438
 439        desc_ring = droq->desc_ring;
 440
 441        while (droq->refill_count && (desc_refilled < droq->max_count)) {
 442                /* If a valid buffer exists (happens if there is no dispatch),
 443                 * reuse the buffer, else allocate.
 444                 */
 445                if (!droq->recv_buf_list[droq->refill_idx].buffer) {
 446                        pg_info =
 447                                &droq->recv_buf_list[droq->refill_idx].pg_info;
 448                        /* Either recycle the existing pages or go for
 449                         * new page alloc
 450                         */
 451                        if (pg_info->page)
 452                                buf = recv_buffer_reuse(octeon_dev, pg_info);
 453                        else
 454                                buf = recv_buffer_alloc(octeon_dev, pg_info);
 455                        /* If a buffer could not be allocated, no point in
 456                         * continuing
 457                         */
 458                        if (!buf) {
 459                                droq->stats.rx_alloc_failure++;
 460                                break;
 461                        }
 462                        droq->recv_buf_list[droq->refill_idx].buffer =
 463                                buf;
 464                        data = get_rbd(buf);
 465                } else {
 466                        data = get_rbd(droq->recv_buf_list
 467                                       [droq->refill_idx].buffer);
 468                }
 469
 470                droq->recv_buf_list[droq->refill_idx].data = data;
 471
 472                desc_ring[droq->refill_idx].buffer_ptr =
 473                        lio_map_ring(droq->recv_buf_list[
 474                                     droq->refill_idx].buffer);
 475
 476                droq->refill_idx = incr_index(droq->refill_idx, 1,
 477                                              droq->max_count);
 478                desc_refilled++;
 479                droq->refill_count--;
 480        }
 481
 482        if (droq->refill_count)
 483                desc_refilled +=
 484                        octeon_droq_refill_pullup_descs(droq, desc_ring);
 485
 486        /* if droq->refill_count
 487         * The refill count would not change in pass two. We only moved buffers
 488         * to close the gap in the ring, but we would still have the same no. of
 489         * buffers to refill.
 490         */
 491        return desc_refilled;
 492}
 493
 494/** check if we can allocate packets to get out of oom.
 495 *  @param  droq - Droq being checked.
 496 *  @return 1 if fails to refill minimum
 497 */
 498int octeon_retry_droq_refill(struct octeon_droq *droq)
 499{
 500        struct octeon_device *oct = droq->oct_dev;
 501        int desc_refilled, reschedule = 1;
 502        u32 pkts_credit;
 503
 504        pkts_credit = readl(droq->pkts_credit_reg);
 505        desc_refilled = octeon_droq_refill(oct, droq);
 506        if (desc_refilled) {
 507                /* Flush the droq descriptor data to memory to be sure
 508                 * that when we update the credits the data in memory
 509                 * is accurate.
 510                 */
 511                wmb();
 512                writel(desc_refilled, droq->pkts_credit_reg);
 513
 514                if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
 515                        reschedule = 0;
 516        }
 517
 518        return reschedule;
 519}
 520
 521static inline u32
 522octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
 523{
 524        return DIV_ROUND_UP(total_len, buf_size);
 525}
 526
 527static int
 528octeon_droq_dispatch_pkt(struct octeon_device *oct,
 529                         struct octeon_droq *droq,
 530                         union octeon_rh *rh,
 531                         struct octeon_droq_info *info)
 532{
 533        u32 cnt;
 534        octeon_dispatch_fn_t disp_fn;
 535        struct octeon_recv_info *rinfo;
 536
 537        cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
 538
 539        disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
 540                                      (u16)rh->r.subcode);
 541        if (disp_fn) {
 542                rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
 543                if (rinfo) {
 544                        struct __dispatch *rdisp = rinfo->rsvd;
 545
 546                        rdisp->rinfo = rinfo;
 547                        rdisp->disp_fn = disp_fn;
 548                        rinfo->recv_pkt->rh = *rh;
 549                        list_add_tail(&rdisp->list,
 550                                      &droq->dispatch_list);
 551                } else {
 552                        droq->stats.dropped_nomem++;
 553                }
 554        } else {
 555                dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
 556                        (unsigned int)rh->r.opcode,
 557                        (unsigned int)rh->r.subcode);
 558                droq->stats.dropped_nodispatch++;
 559        }
 560
 561        return cnt;
 562}
 563
 564static inline void octeon_droq_drop_packets(struct octeon_device *oct,
 565                                            struct octeon_droq *droq,
 566                                            u32 cnt)
 567{
 568        u32 i = 0, buf_cnt;
 569        struct octeon_droq_info *info;
 570
 571        for (i = 0; i < cnt; i++) {
 572                info = (struct octeon_droq_info *)
 573                        droq->recv_buf_list[droq->read_idx].data;
 574                octeon_swap_8B_data((u64 *)info, 2);
 575
 576                if (info->length) {
 577                        info->length += OCTNET_FRM_LENGTH_SIZE;
 578                        droq->stats.bytes_received += info->length;
 579                        buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
 580                                                           (u32)info->length);
 581                } else {
 582                        dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
 583                        buf_cnt = 1;
 584                }
 585
 586                droq->read_idx = incr_index(droq->read_idx, buf_cnt,
 587                                            droq->max_count);
 588                droq->refill_count += buf_cnt;
 589        }
 590}
 591
 592static u32
 593octeon_droq_fast_process_packets(struct octeon_device *oct,
 594                                 struct octeon_droq *droq,
 595                                 u32 pkts_to_process)
 596{
 597        u32 pkt, total_len = 0, pkt_count, retval;
 598        struct octeon_droq_info *info;
 599        union octeon_rh *rh;
 600
 601        pkt_count = pkts_to_process;
 602
 603        for (pkt = 0; pkt < pkt_count; pkt++) {
 604                u32 pkt_len = 0;
 605                struct sk_buff *nicbuf = NULL;
 606                struct octeon_skb_page_info *pg_info;
 607                void *buf;
 608
 609                info = (struct octeon_droq_info *)
 610                        droq->recv_buf_list[droq->read_idx].data;
 611                octeon_swap_8B_data((u64 *)info, 2);
 612
 613                if (!info->length) {
 614                        dev_err(&oct->pci_dev->dev,
 615                                "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
 616                                droq->q_no, droq->read_idx, pkt_count);
 617                        print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
 618                                             (u8 *)info,
 619                                             OCT_DROQ_INFO_SIZE);
 620                        break;
 621                }
 622
 623                /* Len of resp hdr in included in the received data len. */
 624                rh = &info->rh;
 625
 626                info->length += OCTNET_FRM_LENGTH_SIZE;
 627                rh->r_dh.len += (ROUNDUP8(OCT_DROQ_INFO_SIZE) / sizeof(u64));
 628                total_len += (u32)info->length;
 629                if (opcode_slow_path(rh)) {
 630                        u32 buf_cnt;
 631
 632                        buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
 633                        droq->read_idx = incr_index(droq->read_idx,
 634                                                    buf_cnt, droq->max_count);
 635                        droq->refill_count += buf_cnt;
 636                } else {
 637                        if (info->length <= droq->buffer_size) {
 638                                pkt_len = (u32)info->length;
 639                                nicbuf = droq->recv_buf_list[
 640                                        droq->read_idx].buffer;
 641                                pg_info = &droq->recv_buf_list[
 642                                        droq->read_idx].pg_info;
 643                                if (recv_buffer_recycle(oct, pg_info))
 644                                        pg_info->page = NULL;
 645                                droq->recv_buf_list[droq->read_idx].buffer =
 646                                        NULL;
 647
 648                                droq->read_idx = incr_index(droq->read_idx, 1,
 649                                                            droq->max_count);
 650                                droq->refill_count++;
 651                        } else {
 652                                nicbuf = octeon_fast_packet_alloc((u32)
 653                                                                  info->length);
 654                                pkt_len = 0;
 655                                /* nicbuf allocation can fail. We'll handle it
 656                                 * inside the loop.
 657                                 */
 658                                while (pkt_len < info->length) {
 659                                        int cpy_len, idx = droq->read_idx;
 660
 661                                        cpy_len = ((pkt_len + droq->buffer_size)
 662                                                   > info->length) ?
 663                                                ((u32)info->length - pkt_len) :
 664                                                droq->buffer_size;
 665
 666                                        if (nicbuf) {
 667                                                octeon_fast_packet_next(droq,
 668                                                                        nicbuf,
 669                                                                        cpy_len,
 670                                                                        idx);
 671                                                buf = droq->recv_buf_list[
 672                                                        idx].buffer;
 673                                                recv_buffer_fast_free(buf);
 674                                                droq->recv_buf_list[idx].buffer
 675                                                        = NULL;
 676                                        } else {
 677                                                droq->stats.rx_alloc_failure++;
 678                                        }
 679
 680                                        pkt_len += cpy_len;
 681                                        droq->read_idx =
 682                                                incr_index(droq->read_idx, 1,
 683                                                           droq->max_count);
 684                                        droq->refill_count++;
 685                                }
 686                        }
 687
 688                        if (nicbuf) {
 689                                if (droq->ops.fptr) {
 690                                        droq->ops.fptr(oct->octeon_id,
 691                                                       nicbuf, pkt_len,
 692                                                       rh, &droq->napi,
 693                                                       droq->ops.farg);
 694                                } else {
 695                                        recv_buffer_free(nicbuf);
 696                                }
 697                        }
 698                }
 699
 700                if (droq->refill_count >= droq->refill_threshold) {
 701                        int desc_refilled = octeon_droq_refill(oct, droq);
 702
 703                        if (desc_refilled) {
 704                                /* Flush the droq descriptor data to memory to
 705                                 * be sure that when we update the credits the
 706                                 * data in memory is accurate.
 707                                 */
 708                                wmb();
 709                                writel(desc_refilled, droq->pkts_credit_reg);
 710                        }
 711                }
 712        }                       /* for (each packet)... */
 713
 714        /* Increment refill_count by the number of buffers processed. */
 715        droq->stats.pkts_received += pkt;
 716        droq->stats.bytes_received += total_len;
 717
 718        retval = pkt;
 719        if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
 720                octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
 721
 722                droq->stats.dropped_toomany += (pkts_to_process - pkt);
 723                retval = pkts_to_process;
 724        }
 725
 726        atomic_sub(retval, &droq->pkts_pending);
 727
 728        if (droq->refill_count >= droq->refill_threshold &&
 729            readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) {
 730                octeon_droq_check_hw_for_pkts(droq);
 731
 732                /* Make sure there are no pkts_pending */
 733                if (!atomic_read(&droq->pkts_pending))
 734                        octeon_schedule_rxq_oom_work(oct, droq);
 735        }
 736
 737        return retval;
 738}
 739
 740int
 741octeon_droq_process_packets(struct octeon_device *oct,
 742                            struct octeon_droq *droq,
 743                            u32 budget)
 744{
 745        u32 pkt_count = 0;
 746        struct list_head *tmp, *tmp2;
 747
 748        octeon_droq_check_hw_for_pkts(droq);
 749        pkt_count = atomic_read(&droq->pkts_pending);
 750
 751        if (!pkt_count)
 752                return 0;
 753
 754        if (pkt_count > budget)
 755                pkt_count = budget;
 756
 757        octeon_droq_fast_process_packets(oct, droq, pkt_count);
 758
 759        list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
 760                struct __dispatch *rdisp = (struct __dispatch *)tmp;
 761
 762                list_del(tmp);
 763                rdisp->disp_fn(rdisp->rinfo,
 764                               octeon_get_dispatch_arg
 765                               (oct,
 766                                (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
 767                                (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
 768        }
 769
 770        /* If there are packets pending. schedule tasklet again */
 771        if (atomic_read(&droq->pkts_pending))
 772                return 1;
 773
 774        return 0;
 775}
 776
 777/*
 778 * Utility function to poll for packets. check_hw_for_packets must be
 779 * called before calling this routine.
 780 */
 781
 782int
 783octeon_droq_process_poll_pkts(struct octeon_device *oct,
 784                              struct octeon_droq *droq, u32 budget)
 785{
 786        struct list_head *tmp, *tmp2;
 787        u32 pkts_available = 0, pkts_processed = 0;
 788        u32 total_pkts_processed = 0;
 789
 790        if (budget > droq->max_count)
 791                budget = droq->max_count;
 792
 793        while (total_pkts_processed < budget) {
 794                octeon_droq_check_hw_for_pkts(droq);
 795
 796                pkts_available = min((budget - total_pkts_processed),
 797                                     (u32)(atomic_read(&droq->pkts_pending)));
 798
 799                if (pkts_available == 0)
 800                        break;
 801
 802                pkts_processed =
 803                        octeon_droq_fast_process_packets(oct, droq,
 804                                                         pkts_available);
 805
 806                total_pkts_processed += pkts_processed;
 807        }
 808
 809        list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
 810                struct __dispatch *rdisp = (struct __dispatch *)tmp;
 811
 812                list_del(tmp);
 813                rdisp->disp_fn(rdisp->rinfo,
 814                               octeon_get_dispatch_arg
 815                               (oct,
 816                                (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
 817                                (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
 818        }
 819
 820        return total_pkts_processed;
 821}
 822
 823/* Enable Pkt Interrupt */
 824int
 825octeon_enable_irq(struct octeon_device *oct, u32 q_no)
 826{
 827        switch (oct->chip_id) {
 828        case OCTEON_CN66XX:
 829        case OCTEON_CN68XX: {
 830                struct octeon_cn6xxx *cn6xxx =
 831                        (struct octeon_cn6xxx *)oct->chip;
 832                unsigned long flags;
 833                u32 value;
 834
 835                spin_lock_irqsave
 836                        (&cn6xxx->lock_for_droq_int_enb_reg, flags);
 837                value = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
 838                value |= (1 << q_no);
 839                octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, value);
 840                value = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
 841                value |= (1 << q_no);
 842                octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, value);
 843
 844                /* don't bother flushing the enables */
 845
 846                spin_unlock_irqrestore
 847                        (&cn6xxx->lock_for_droq_int_enb_reg, flags);
 848        }
 849                break;
 850        case OCTEON_CN23XX_PF_VID:
 851                lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
 852                break;
 853
 854        case OCTEON_CN23XX_VF_VID:
 855                lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
 856                break;
 857        default:
 858                dev_err(&oct->pci_dev->dev, "%s Unknown Chip\n", __func__);
 859                return 1;
 860        }
 861
 862        return 0;
 863}
 864
 865int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
 866                             struct octeon_droq_ops *ops)
 867{
 868        struct octeon_config *oct_cfg = NULL;
 869        struct octeon_droq *droq;
 870
 871        oct_cfg = octeon_get_conf(oct);
 872
 873        if (!oct_cfg)
 874                return -EINVAL;
 875
 876        if (!(ops)) {
 877                dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
 878                        __func__);
 879                return -EINVAL;
 880        }
 881
 882        if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
 883                dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
 884                        __func__, q_no, (oct->num_oqs - 1));
 885                return -EINVAL;
 886        }
 887
 888        droq = oct->droq[q_no];
 889        memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
 890
 891        return 0;
 892}
 893
 894int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
 895{
 896        struct octeon_config *oct_cfg = NULL;
 897        struct octeon_droq *droq;
 898
 899        oct_cfg = octeon_get_conf(oct);
 900
 901        if (!oct_cfg)
 902                return -EINVAL;
 903
 904        if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
 905                dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
 906                        __func__, q_no, oct->num_oqs - 1);
 907                return -EINVAL;
 908        }
 909
 910        droq = oct->droq[q_no];
 911
 912        if (!droq) {
 913                dev_info(&oct->pci_dev->dev,
 914                         "Droq id (%d) not available.\n", q_no);
 915                return 0;
 916        }
 917
 918        droq->ops.fptr = NULL;
 919        droq->ops.farg = NULL;
 920        droq->ops.drop_on_max = 0;
 921
 922        return 0;
 923}
 924
 925int octeon_create_droq(struct octeon_device *oct,
 926                       u32 q_no, u32 num_descs,
 927                       u32 desc_size, void *app_ctx)
 928{
 929        struct octeon_droq *droq;
 930        int numa_node = dev_to_node(&oct->pci_dev->dev);
 931
 932        if (oct->droq[q_no]) {
 933                dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
 934                        q_no);
 935                return 1;
 936        }
 937
 938        /* Allocate the DS for the new droq. */
 939        droq = vmalloc_node(sizeof(*droq), numa_node);
 940        if (!droq)
 941                droq = vmalloc(sizeof(*droq));
 942        if (!droq)
 943                return -1;
 944
 945        memset(droq, 0, sizeof(struct octeon_droq));
 946
 947        /*Disable the pkt o/p for this Q  */
 948        octeon_set_droq_pkt_op(oct, q_no, 0);
 949        oct->droq[q_no] = droq;
 950
 951        /* Initialize the Droq */
 952        if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) {
 953                vfree(oct->droq[q_no]);
 954                oct->droq[q_no] = NULL;
 955                return -1;
 956        }
 957
 958        oct->num_oqs++;
 959
 960        dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
 961                oct->num_oqs);
 962
 963        /* Global Droq register settings */
 964
 965        /* As of now not required, as setting are done for all 32 Droqs at
 966         * the same time.
 967         */
 968        return 0;
 969}
 970