linux/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
<<
>>
Prefs
   1/**********************************************************************
   2 * Author: Cavium, Inc.
   3 *
   4 * Contact: support@cavium.com
   5 *          Please include "LiquidIO" in the subject.
   6 *
   7 * Copyright (c) 2003-2016 Cavium, Inc.
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
  17 ***********************************************************************/
  18#include <linux/pci.h>
  19#include <linux/netdevice.h>
  20#include <linux/vmalloc.h>
  21#include "liquidio_common.h"
  22#include "octeon_droq.h"
  23#include "octeon_iq.h"
  24#include "response_manager.h"
  25#include "octeon_device.h"
  26#include "octeon_main.h"
  27#include "octeon_network.h"
  28#include "cn66xx_regs.h"
  29#include "cn66xx_device.h"
  30#include "cn23xx_pf_device.h"
  31#include "cn23xx_vf_device.h"
  32
  33struct niclist {
  34        struct list_head list;
  35        void *ptr;
  36};
  37
  38struct __dispatch {
  39        struct list_head list;
  40        struct octeon_recv_info *rinfo;
  41        octeon_dispatch_fn_t disp_fn;
  42};
  43
  44/** Get the argument that the user set when registering dispatch
  45 *  function for a given opcode/subcode.
  46 *  @param  octeon_dev - the octeon device pointer.
  47 *  @param  opcode     - the opcode for which the dispatch argument
  48 *                       is to be checked.
  49 *  @param  subcode    - the subcode for which the dispatch argument
  50 *                       is to be checked.
  51 *  @return  Success: void * (argument to the dispatch function)
  52 *  @return  Failure: NULL
  53 *
  54 */
  55void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
  56                              u16 opcode, u16 subcode)
  57{
  58        int idx;
  59        struct list_head *dispatch;
  60        void *fn_arg = NULL;
  61        u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
  62
  63        idx = combined_opcode & OCTEON_OPCODE_MASK;
  64
  65        spin_lock_bh(&octeon_dev->dispatch.lock);
  66
  67        if (octeon_dev->dispatch.count == 0) {
  68                spin_unlock_bh(&octeon_dev->dispatch.lock);
  69                return NULL;
  70        }
  71
  72        if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
  73                fn_arg = octeon_dev->dispatch.dlist[idx].arg;
  74        } else {
  75                list_for_each(dispatch,
  76                              &octeon_dev->dispatch.dlist[idx].list) {
  77                        if (((struct octeon_dispatch *)dispatch)->opcode ==
  78                            combined_opcode) {
  79                                fn_arg = ((struct octeon_dispatch *)
  80                                          dispatch)->arg;
  81                                break;
  82                        }
  83                }
  84        }
  85
  86        spin_unlock_bh(&octeon_dev->dispatch.lock);
  87        return fn_arg;
  88}
  89
  90/** Check for packets on Droq. This function should be called with lock held.
  91 *  @param  droq - Droq on which count is checked.
  92 *  @return Returns packet count.
  93 */
  94u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
  95{
  96        u32 pkt_count = 0;
  97        u32 last_count;
  98
  99        pkt_count = readl(droq->pkts_sent_reg);
 100
 101        last_count = pkt_count - droq->pkt_count;
 102        droq->pkt_count = pkt_count;
 103
 104        /* we shall write to cnts  at napi irq enable or end of droq tasklet */
 105        if (last_count)
 106                atomic_add(last_count, &droq->pkts_pending);
 107
 108        return last_count;
 109}
 110
 111static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
 112{
 113        u32 count = 0;
 114
 115        /* max_empty_descs is the max. no. of descs that can have no buffers.
 116         * If the empty desc count goes beyond this value, we cannot safely
 117         * read in a 64K packet sent by Octeon
 118         * (64K is max pkt size from Octeon)
 119         */
 120        droq->max_empty_descs = 0;
 121
 122        do {
 123                droq->max_empty_descs++;
 124                count += droq->buffer_size;
 125        } while (count < (64 * 1024));
 126
 127        droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
 128}
 129
 130static void octeon_droq_reset_indices(struct octeon_droq *droq)
 131{
 132        droq->read_idx = 0;
 133        droq->write_idx = 0;
 134        droq->refill_idx = 0;
 135        droq->refill_count = 0;
 136        atomic_set(&droq->pkts_pending, 0);
 137}
 138
 139static void
 140octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
 141                                 struct octeon_droq *droq)
 142{
 143        u32 i;
 144        struct octeon_skb_page_info *pg_info;
 145
 146        for (i = 0; i < droq->max_count; i++) {
 147                pg_info = &droq->recv_buf_list[i].pg_info;
 148                if (!pg_info)
 149                        continue;
 150
 151                if (pg_info->dma)
 152                        lio_unmap_ring(oct->pci_dev,
 153                                       (u64)pg_info->dma);
 154                pg_info->dma = 0;
 155
 156                if (pg_info->page)
 157                        recv_buffer_destroy(droq->recv_buf_list[i].buffer,
 158                                            pg_info);
 159
 160                droq->recv_buf_list[i].buffer = NULL;
 161        }
 162
 163        octeon_droq_reset_indices(droq);
 164}
 165
 166static int
 167octeon_droq_setup_ring_buffers(struct octeon_device *oct,
 168                               struct octeon_droq *droq)
 169{
 170        u32 i;
 171        void *buf;
 172        struct octeon_droq_desc *desc_ring = droq->desc_ring;
 173
 174        for (i = 0; i < droq->max_count; i++) {
 175                buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
 176
 177                if (!buf) {
 178                        dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
 179                                __func__);
 180                        droq->stats.rx_alloc_failure++;
 181                        return -ENOMEM;
 182                }
 183
 184                droq->recv_buf_list[i].buffer = buf;
 185                droq->recv_buf_list[i].data = get_rbd(buf);
 186                desc_ring[i].info_ptr = 0;
 187                desc_ring[i].buffer_ptr =
 188                        lio_map_ring(droq->recv_buf_list[i].buffer);
 189        }
 190
 191        octeon_droq_reset_indices(droq);
 192
 193        octeon_droq_compute_max_packet_bufs(droq);
 194
 195        return 0;
 196}
 197
 198int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
 199{
 200        struct octeon_droq *droq = oct->droq[q_no];
 201
 202        dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
 203
 204        octeon_droq_destroy_ring_buffers(oct, droq);
 205        vfree(droq->recv_buf_list);
 206
 207        if (droq->desc_ring)
 208                lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
 209                             droq->desc_ring, droq->desc_ring_dma);
 210
 211        memset(droq, 0, OCT_DROQ_SIZE);
 212        oct->io_qmask.oq &= ~(1ULL << q_no);
 213        vfree(oct->droq[q_no]);
 214        oct->droq[q_no] = NULL;
 215        oct->num_oqs--;
 216
 217        return 0;
 218}
 219
 220int octeon_init_droq(struct octeon_device *oct,
 221                     u32 q_no,
 222                     u32 num_descs,
 223                     u32 desc_size,
 224                     void *app_ctx)
 225{
 226        struct octeon_droq *droq;
 227        u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
 228        u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
 229        int numa_node = dev_to_node(&oct->pci_dev->dev);
 230
 231        dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
 232
 233        droq = oct->droq[q_no];
 234        memset(droq, 0, OCT_DROQ_SIZE);
 235
 236        droq->oct_dev = oct;
 237        droq->q_no = q_no;
 238        if (app_ctx)
 239                droq->app_ctx = app_ctx;
 240        else
 241                droq->app_ctx = (void *)(size_t)q_no;
 242
 243        c_num_descs = num_descs;
 244        c_buf_size = desc_size;
 245        if (OCTEON_CN6XXX(oct)) {
 246                struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
 247
 248                c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
 249                c_refill_threshold =
 250                        (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
 251        } else if (OCTEON_CN23XX_PF(oct)) {
 252                struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
 253
 254                c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
 255                c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
 256        } else if (OCTEON_CN23XX_VF(oct)) {
 257                struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf);
 258
 259                c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
 260                c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
 261        } else {
 262                return 1;
 263        }
 264
 265        droq->max_count = c_num_descs;
 266        droq->buffer_size = c_buf_size;
 267
 268        desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
 269        droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
 270                                        (dma_addr_t *)&droq->desc_ring_dma);
 271
 272        if (!droq->desc_ring) {
 273                dev_err(&oct->pci_dev->dev,
 274                        "Output queue %d ring alloc failed\n", q_no);
 275                return 1;
 276        }
 277
 278        dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
 279                q_no, droq->desc_ring, droq->desc_ring_dma);
 280        dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
 281                droq->max_count);
 282
 283        droq->recv_buf_list = (struct octeon_recv_buffer *)
 284              vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
 285                           numa_node);
 286        if (!droq->recv_buf_list)
 287                droq->recv_buf_list = (struct octeon_recv_buffer *)
 288                      vzalloc(array_size(droq->max_count,
 289                                         OCT_DROQ_RECVBUF_SIZE));
 290        if (!droq->recv_buf_list) {
 291                dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
 292                goto init_droq_fail;
 293        }
 294
 295        if (octeon_droq_setup_ring_buffers(oct, droq))
 296                goto init_droq_fail;
 297
 298        droq->pkts_per_intr = c_pkts_per_intr;
 299        droq->refill_threshold = c_refill_threshold;
 300
 301        dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
 302                droq->max_empty_descs);
 303
 304        spin_lock_init(&droq->lock);
 305
 306        INIT_LIST_HEAD(&droq->dispatch_list);
 307
 308        /* For 56xx Pass1, this function won't be called, so no checks. */
 309        oct->fn_list.setup_oq_regs(oct, q_no);
 310
 311        oct->io_qmask.oq |= BIT_ULL(q_no);
 312
 313        return 0;
 314
 315init_droq_fail:
 316        octeon_delete_droq(oct, q_no);
 317        return 1;
 318}
 319
 320/* octeon_create_recv_info
 321 * Parameters:
 322 *  octeon_dev - pointer to the octeon device structure
 323 *  droq       - droq in which the packet arrived.
 324 *  buf_cnt    - no. of buffers used by the packet.
 325 *  idx        - index in the descriptor for the first buffer in the packet.
 326 * Description:
 327 *  Allocates a recv_info_t and copies the buffer addresses for packet data
 328 *  into the recv_pkt space which starts at an 8B offset from recv_info_t.
 329 *  Flags the descriptors for refill later. If available descriptors go
 330 *  below the threshold to receive a 64K pkt, new buffers are first allocated
 331 *  before the recv_pkt_t is created.
 332 *  This routine will be called in interrupt context.
 333 * Returns:
 334 *  Success: Pointer to recv_info_t
 335 *  Failure: NULL.
 336 * Locks:
 337 *  The droq->lock is held when this routine is called.
 338 */
 339static inline struct octeon_recv_info *octeon_create_recv_info(
 340                struct octeon_device *octeon_dev,
 341                struct octeon_droq *droq,
 342                u32 buf_cnt,
 343                u32 idx)
 344{
 345        struct octeon_droq_info *info;
 346        struct octeon_recv_pkt *recv_pkt;
 347        struct octeon_recv_info *recv_info;
 348        u32 i, bytes_left;
 349        struct octeon_skb_page_info *pg_info;
 350
 351        info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data;
 352
 353        recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
 354        if (!recv_info)
 355                return NULL;
 356
 357        recv_pkt = recv_info->recv_pkt;
 358        recv_pkt->rh = info->rh;
 359        recv_pkt->length = (u32)info->length;
 360        recv_pkt->buffer_count = (u16)buf_cnt;
 361        recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
 362
 363        i = 0;
 364        bytes_left = (u32)info->length;
 365
 366        while (buf_cnt) {
 367                {
 368                        pg_info = &droq->recv_buf_list[idx].pg_info;
 369
 370                        lio_unmap_ring(octeon_dev->pci_dev,
 371                                       (u64)pg_info->dma);
 372                        pg_info->page = NULL;
 373                        pg_info->dma = 0;
 374                }
 375
 376                recv_pkt->buffer_size[i] =
 377                        (bytes_left >=
 378                         droq->buffer_size) ? droq->buffer_size : bytes_left;
 379
 380                recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
 381                droq->recv_buf_list[idx].buffer = NULL;
 382
 383                idx = incr_index(idx, 1, droq->max_count);
 384                bytes_left -= droq->buffer_size;
 385                i++;
 386                buf_cnt--;
 387        }
 388
 389        return recv_info;
 390}
 391
 392/* If we were not able to refill all buffers, try to move around
 393 * the buffers that were not dispatched.
 394 */
 395static inline u32
 396octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
 397                                struct octeon_droq_desc *desc_ring)
 398{
 399        u32 desc_refilled = 0;
 400
 401        u32 refill_index = droq->refill_idx;
 402
 403        while (refill_index != droq->read_idx) {
 404                if (droq->recv_buf_list[refill_index].buffer) {
 405                        droq->recv_buf_list[droq->refill_idx].buffer =
 406                                droq->recv_buf_list[refill_index].buffer;
 407                        droq->recv_buf_list[droq->refill_idx].data =
 408                                droq->recv_buf_list[refill_index].data;
 409                        desc_ring[droq->refill_idx].buffer_ptr =
 410                                desc_ring[refill_index].buffer_ptr;
 411                        droq->recv_buf_list[refill_index].buffer = NULL;
 412                        desc_ring[refill_index].buffer_ptr = 0;
 413                        do {
 414                                droq->refill_idx = incr_index(droq->refill_idx,
 415                                                              1,
 416                                                              droq->max_count);
 417                                desc_refilled++;
 418                                droq->refill_count--;
 419                        } while (droq->recv_buf_list[droq->refill_idx].buffer);
 420                }
 421                refill_index = incr_index(refill_index, 1, droq->max_count);
 422        }                       /* while */
 423        return desc_refilled;
 424}
 425
 426/* octeon_droq_refill
 427 * Parameters:
 428 *  droq       - droq in which descriptors require new buffers.
 429 * Description:
 430 *  Called during normal DROQ processing in interrupt mode or by the poll
 431 *  thread to refill the descriptors from which buffers were dispatched
 432 *  to upper layers. Attempts to allocate new buffers. If that fails, moves
 433 *  up buffers (that were not dispatched) to form a contiguous ring.
 434 * Returns:
 435 *  No of descriptors refilled.
 436 * Locks:
 437 *  This routine is called with droq->lock held.
 438 */
 439static u32
 440octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
 441{
 442        struct octeon_droq_desc *desc_ring;
 443        void *buf = NULL;
 444        u8 *data;
 445        u32 desc_refilled = 0;
 446        struct octeon_skb_page_info *pg_info;
 447
 448        desc_ring = droq->desc_ring;
 449
 450        while (droq->refill_count && (desc_refilled < droq->max_count)) {
 451                /* If a valid buffer exists (happens if there is no dispatch),
 452                 * reuse
 453                 * the buffer, else allocate.
 454                 */
 455                if (!droq->recv_buf_list[droq->refill_idx].buffer) {
 456                        pg_info =
 457                                &droq->recv_buf_list[droq->refill_idx].pg_info;
 458                        /* Either recycle the existing pages or go for
 459                         * new page alloc
 460                         */
 461                        if (pg_info->page)
 462                                buf = recv_buffer_reuse(octeon_dev, pg_info);
 463                        else
 464                                buf = recv_buffer_alloc(octeon_dev, pg_info);
 465                        /* If a buffer could not be allocated, no point in
 466                         * continuing
 467                         */
 468                        if (!buf) {
 469                                droq->stats.rx_alloc_failure++;
 470                                break;
 471                        }
 472                        droq->recv_buf_list[droq->refill_idx].buffer =
 473                                buf;
 474                        data = get_rbd(buf);
 475                } else {
 476                        data = get_rbd(droq->recv_buf_list
 477                                       [droq->refill_idx].buffer);
 478                }
 479
 480                droq->recv_buf_list[droq->refill_idx].data = data;
 481
 482                desc_ring[droq->refill_idx].buffer_ptr =
 483                        lio_map_ring(droq->recv_buf_list[
 484                                     droq->refill_idx].buffer);
 485
 486                droq->refill_idx = incr_index(droq->refill_idx, 1,
 487                                              droq->max_count);
 488                desc_refilled++;
 489                droq->refill_count--;
 490        }
 491
 492        if (droq->refill_count)
 493                desc_refilled +=
 494                        octeon_droq_refill_pullup_descs(droq, desc_ring);
 495
 496        /* if droq->refill_count
 497         * The refill count would not change in pass two. We only moved buffers
 498         * to close the gap in the ring, but we would still have the same no. of
 499         * buffers to refill.
 500         */
 501        return desc_refilled;
 502}
 503
 504/** check if we can allocate packets to get out of oom.
 505 *  @param  droq - Droq being checked.
 506 *  @return does not return anything
 507 */
 508void octeon_droq_check_oom(struct octeon_droq *droq)
 509{
 510        int desc_refilled;
 511        struct octeon_device *oct = droq->oct_dev;
 512
 513        if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) {
 514                spin_lock_bh(&droq->lock);
 515                desc_refilled = octeon_droq_refill(oct, droq);
 516                if (desc_refilled) {
 517                        /* Flush the droq descriptor data to memory to be sure
 518                         * that when we update the credits the data in memory
 519                         * is accurate.
 520                         */
 521                        wmb();
 522                        writel(desc_refilled, droq->pkts_credit_reg);
 523                }
 524                spin_unlock_bh(&droq->lock);
 525        }
 526}
 527
 528static inline u32
 529octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
 530{
 531        return ((total_len + buf_size - 1) / buf_size);
 532}
 533
 534static int
 535octeon_droq_dispatch_pkt(struct octeon_device *oct,
 536                         struct octeon_droq *droq,
 537                         union octeon_rh *rh,
 538                         struct octeon_droq_info *info)
 539{
 540        u32 cnt;
 541        octeon_dispatch_fn_t disp_fn;
 542        struct octeon_recv_info *rinfo;
 543
 544        cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
 545
 546        disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
 547                                      (u16)rh->r.subcode);
 548        if (disp_fn) {
 549                rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
 550                if (rinfo) {
 551                        struct __dispatch *rdisp = rinfo->rsvd;
 552
 553                        rdisp->rinfo = rinfo;
 554                        rdisp->disp_fn = disp_fn;
 555                        rinfo->recv_pkt->rh = *rh;
 556                        list_add_tail(&rdisp->list,
 557                                      &droq->dispatch_list);
 558                } else {
 559                        droq->stats.dropped_nomem++;
 560                }
 561        } else {
 562                dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
 563                        (unsigned int)rh->r.opcode,
 564                        (unsigned int)rh->r.subcode);
 565                droq->stats.dropped_nodispatch++;
 566        }
 567
 568        return cnt;
 569}
 570
 571static inline void octeon_droq_drop_packets(struct octeon_device *oct,
 572                                            struct octeon_droq *droq,
 573                                            u32 cnt)
 574{
 575        u32 i = 0, buf_cnt;
 576        struct octeon_droq_info *info;
 577
 578        for (i = 0; i < cnt; i++) {
 579                info = (struct octeon_droq_info *)
 580                        droq->recv_buf_list[droq->read_idx].data;
 581                octeon_swap_8B_data((u64 *)info, 2);
 582
 583                if (info->length) {
 584                        info->length += OCTNET_FRM_LENGTH_SIZE;
 585                        droq->stats.bytes_received += info->length;
 586                        buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
 587                                                           (u32)info->length);
 588                } else {
 589                        dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
 590                        buf_cnt = 1;
 591                }
 592
 593                droq->read_idx = incr_index(droq->read_idx, buf_cnt,
 594                                            droq->max_count);
 595                droq->refill_count += buf_cnt;
 596        }
 597}
 598
 599static u32
 600octeon_droq_fast_process_packets(struct octeon_device *oct,
 601                                 struct octeon_droq *droq,
 602                                 u32 pkts_to_process)
 603{
 604        struct octeon_droq_info *info;
 605        union octeon_rh *rh;
 606        u32 pkt, total_len = 0, pkt_count;
 607
 608        pkt_count = pkts_to_process;
 609
 610        for (pkt = 0; pkt < pkt_count; pkt++) {
 611                u32 pkt_len = 0;
 612                struct sk_buff *nicbuf = NULL;
 613                struct octeon_skb_page_info *pg_info;
 614                void *buf;
 615
 616                info = (struct octeon_droq_info *)
 617                        droq->recv_buf_list[droq->read_idx].data;
 618                octeon_swap_8B_data((u64 *)info, 2);
 619
 620                if (!info->length) {
 621                        dev_err(&oct->pci_dev->dev,
 622                                "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
 623                                droq->q_no, droq->read_idx, pkt_count);
 624                        print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
 625                                             (u8 *)info,
 626                                             OCT_DROQ_INFO_SIZE);
 627                        break;
 628                }
 629
 630                /* Len of resp hdr in included in the received data len. */
 631                rh = &info->rh;
 632
 633                info->length += OCTNET_FRM_LENGTH_SIZE;
 634                rh->r_dh.len += (ROUNDUP8(OCT_DROQ_INFO_SIZE) / sizeof(u64));
 635                total_len += (u32)info->length;
 636                if (opcode_slow_path(rh)) {
 637                        u32 buf_cnt;
 638
 639                        buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
 640                        droq->read_idx = incr_index(droq->read_idx,
 641                                                    buf_cnt, droq->max_count);
 642                        droq->refill_count += buf_cnt;
 643                } else {
 644                        if (info->length <= droq->buffer_size) {
 645                                pkt_len = (u32)info->length;
 646                                nicbuf = droq->recv_buf_list[
 647                                        droq->read_idx].buffer;
 648                                pg_info = &droq->recv_buf_list[
 649                                        droq->read_idx].pg_info;
 650                                if (recv_buffer_recycle(oct, pg_info))
 651                                        pg_info->page = NULL;
 652                                droq->recv_buf_list[droq->read_idx].buffer =
 653                                        NULL;
 654
 655                                droq->read_idx = incr_index(droq->read_idx, 1,
 656                                                            droq->max_count);
 657                                droq->refill_count++;
 658                        } else {
 659                                nicbuf = octeon_fast_packet_alloc((u32)
 660                                                                  info->length);
 661                                pkt_len = 0;
 662                                /* nicbuf allocation can fail. We'll handle it
 663                                 * inside the loop.
 664                                 */
 665                                while (pkt_len < info->length) {
 666                                        int cpy_len, idx = droq->read_idx;
 667
 668                                        cpy_len = ((pkt_len + droq->buffer_size)
 669                                                   > info->length) ?
 670                                                ((u32)info->length - pkt_len) :
 671                                                droq->buffer_size;
 672
 673                                        if (nicbuf) {
 674                                                octeon_fast_packet_next(droq,
 675                                                                        nicbuf,
 676                                                                        cpy_len,
 677                                                                        idx);
 678                                                buf = droq->recv_buf_list[
 679                                                        idx].buffer;
 680                                                recv_buffer_fast_free(buf);
 681                                                droq->recv_buf_list[idx].buffer
 682                                                        = NULL;
 683                                        } else {
 684                                                droq->stats.rx_alloc_failure++;
 685                                        }
 686
 687                                        pkt_len += cpy_len;
 688                                        droq->read_idx =
 689                                                incr_index(droq->read_idx, 1,
 690                                                           droq->max_count);
 691                                        droq->refill_count++;
 692                                }
 693                        }
 694
 695                        if (nicbuf) {
 696                                if (droq->ops.fptr) {
 697                                        droq->ops.fptr(oct->octeon_id,
 698                                                       nicbuf, pkt_len,
 699                                                       rh, &droq->napi,
 700                                                       droq->ops.farg);
 701                                } else {
 702                                        recv_buffer_free(nicbuf);
 703                                }
 704                        }
 705                }
 706
 707                if (droq->refill_count >= droq->refill_threshold) {
 708                        int desc_refilled = octeon_droq_refill(oct, droq);
 709
 710                        /* Flush the droq descriptor data to memory to be sure
 711                         * that when we update the credits the data in memory
 712                         * is accurate.
 713                         */
 714                        wmb();
 715                        writel((desc_refilled), droq->pkts_credit_reg);
 716                }
 717
 718        }                       /* for (each packet)... */
 719
 720        /* Increment refill_count by the number of buffers processed. */
 721        droq->stats.pkts_received += pkt;
 722        droq->stats.bytes_received += total_len;
 723
 724        if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
 725                octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
 726
 727                droq->stats.dropped_toomany += (pkts_to_process - pkt);
 728                return pkts_to_process;
 729        }
 730
 731        return pkt;
 732}
 733
 734int
 735octeon_droq_process_packets(struct octeon_device *oct,
 736                            struct octeon_droq *droq,
 737                            u32 budget)
 738{
 739        u32 pkt_count = 0, pkts_processed = 0;
 740        struct list_head *tmp, *tmp2;
 741
 742        /* Grab the droq lock */
 743        spin_lock(&droq->lock);
 744
 745        octeon_droq_check_hw_for_pkts(droq);
 746        pkt_count = atomic_read(&droq->pkts_pending);
 747
 748        if (!pkt_count) {
 749                spin_unlock(&droq->lock);
 750                return 0;
 751        }
 752
 753        if (pkt_count > budget)
 754                pkt_count = budget;
 755
 756        pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
 757
 758        atomic_sub(pkts_processed, &droq->pkts_pending);
 759
 760        /* Release the spin lock */
 761        spin_unlock(&droq->lock);
 762
 763        list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
 764                struct __dispatch *rdisp = (struct __dispatch *)tmp;
 765
 766                list_del(tmp);
 767                rdisp->disp_fn(rdisp->rinfo,
 768                               octeon_get_dispatch_arg
 769                               (oct,
 770                                (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
 771                                (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
 772        }
 773
 774        /* If there are packets pending. schedule tasklet again */
 775        if (atomic_read(&droq->pkts_pending))
 776                return 1;
 777
 778        return 0;
 779}
 780
 781/**
 782 * Utility function to poll for packets. check_hw_for_packets must be
 783 * called before calling this routine.
 784 */
 785
 786int
 787octeon_droq_process_poll_pkts(struct octeon_device *oct,
 788                              struct octeon_droq *droq, u32 budget)
 789{
 790        struct list_head *tmp, *tmp2;
 791        u32 pkts_available = 0, pkts_processed = 0;
 792        u32 total_pkts_processed = 0;
 793
 794        if (budget > droq->max_count)
 795                budget = droq->max_count;
 796
 797        spin_lock(&droq->lock);
 798
 799        while (total_pkts_processed < budget) {
 800                octeon_droq_check_hw_for_pkts(droq);
 801
 802                pkts_available = min((budget - total_pkts_processed),
 803                                     (u32)(atomic_read(&droq->pkts_pending)));
 804
 805                if (pkts_available == 0)
 806                        break;
 807
 808                pkts_processed =
 809                        octeon_droq_fast_process_packets(oct, droq,
 810                                                         pkts_available);
 811
 812                atomic_sub(pkts_processed, &droq->pkts_pending);
 813
 814                total_pkts_processed += pkts_processed;
 815        }
 816
 817        spin_unlock(&droq->lock);
 818
 819        list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
 820                struct __dispatch *rdisp = (struct __dispatch *)tmp;
 821
 822                list_del(tmp);
 823                rdisp->disp_fn(rdisp->rinfo,
 824                               octeon_get_dispatch_arg
 825                               (oct,
 826                                (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
 827                                (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
 828        }
 829
 830        return total_pkts_processed;
 831}
 832
 833/* Enable Pkt Interrupt */
 834int
 835octeon_enable_irq(struct octeon_device *oct, u32 q_no)
 836{
 837        switch (oct->chip_id) {
 838        case OCTEON_CN66XX:
 839        case OCTEON_CN68XX: {
 840                struct octeon_cn6xxx *cn6xxx =
 841                        (struct octeon_cn6xxx *)oct->chip;
 842                unsigned long flags;
 843                u32 value;
 844
 845                spin_lock_irqsave
 846                        (&cn6xxx->lock_for_droq_int_enb_reg, flags);
 847                value = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
 848                value |= (1 << q_no);
 849                octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, value);
 850                value = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
 851                value |= (1 << q_no);
 852                octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, value);
 853
 854                /* don't bother flushing the enables */
 855
 856                spin_unlock_irqrestore
 857                        (&cn6xxx->lock_for_droq_int_enb_reg, flags);
 858        }
 859                break;
 860        case OCTEON_CN23XX_PF_VID:
 861                lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
 862                break;
 863
 864        case OCTEON_CN23XX_VF_VID:
 865                lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
 866                break;
 867        default:
 868                dev_err(&oct->pci_dev->dev, "%s Unknown Chip\n", __func__);
 869                return 1;
 870        }
 871
 872        return 0;
 873}
 874
 875int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
 876                             struct octeon_droq_ops *ops)
 877{
 878        struct octeon_droq *droq;
 879        unsigned long flags;
 880        struct octeon_config *oct_cfg = NULL;
 881
 882        oct_cfg = octeon_get_conf(oct);
 883
 884        if (!oct_cfg)
 885                return -EINVAL;
 886
 887        if (!(ops)) {
 888                dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
 889                        __func__);
 890                return -EINVAL;
 891        }
 892
 893        if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
 894                dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
 895                        __func__, q_no, (oct->num_oqs - 1));
 896                return -EINVAL;
 897        }
 898
 899        droq = oct->droq[q_no];
 900
 901        spin_lock_irqsave(&droq->lock, flags);
 902
 903        memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
 904
 905        spin_unlock_irqrestore(&droq->lock, flags);
 906
 907        return 0;
 908}
 909
 910int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
 911{
 912        unsigned long flags;
 913        struct octeon_droq *droq;
 914        struct octeon_config *oct_cfg = NULL;
 915
 916        oct_cfg = octeon_get_conf(oct);
 917
 918        if (!oct_cfg)
 919                return -EINVAL;
 920
 921        if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
 922                dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
 923                        __func__, q_no, oct->num_oqs - 1);
 924                return -EINVAL;
 925        }
 926
 927        droq = oct->droq[q_no];
 928
 929        if (!droq) {
 930                dev_info(&oct->pci_dev->dev,
 931                         "Droq id (%d) not available.\n", q_no);
 932                return 0;
 933        }
 934
 935        spin_lock_irqsave(&droq->lock, flags);
 936
 937        droq->ops.fptr = NULL;
 938        droq->ops.farg = NULL;
 939        droq->ops.drop_on_max = 0;
 940
 941        spin_unlock_irqrestore(&droq->lock, flags);
 942
 943        return 0;
 944}
 945
 946int octeon_create_droq(struct octeon_device *oct,
 947                       u32 q_no, u32 num_descs,
 948                       u32 desc_size, void *app_ctx)
 949{
 950        struct octeon_droq *droq;
 951        int numa_node = dev_to_node(&oct->pci_dev->dev);
 952
 953        if (oct->droq[q_no]) {
 954                dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
 955                        q_no);
 956                return 1;
 957        }
 958
 959        /* Allocate the DS for the new droq. */
 960        droq = vmalloc_node(sizeof(*droq), numa_node);
 961        if (!droq)
 962                droq = vmalloc(sizeof(*droq));
 963        if (!droq)
 964                return -1;
 965
 966        memset(droq, 0, sizeof(struct octeon_droq));
 967
 968        /*Disable the pkt o/p for this Q  */
 969        octeon_set_droq_pkt_op(oct, q_no, 0);
 970        oct->droq[q_no] = droq;
 971
 972        /* Initialize the Droq */
 973        if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) {
 974                vfree(oct->droq[q_no]);
 975                oct->droq[q_no] = NULL;
 976                return -1;
 977        }
 978
 979        oct->num_oqs++;
 980
 981        dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
 982                oct->num_oqs);
 983
 984        /* Global Droq register settings */
 985
 986        /* As of now not required, as setting are done for all 32 Droqs at
 987         * the same time.
 988         */
 989        return 0;
 990}
 991