uboot/drivers/net/octeontx/nicvf_queues.c
<<
>>
Prefs
   1// SPDX-License-Identifier:    GPL-2.0
   2/*
   3 * Copyright (C) 2018 Marvell International Ltd.
   4 */
   5
   6#include <cpu_func.h>
   7#include <dm/device.h>
   8#include <malloc.h>
   9#include <net.h>
  10#include <phy.h>
  11#include <linux/delay.h>
  12
  13#include "nic_reg.h"
  14#include "nic.h"
  15#include "q_struct.h"
  16#include "nicvf_queues.h"
  17
  18static int nicvf_poll_reg(struct nicvf *nic, int qidx,
  19                          u64 reg, int bit_pos, int bits, int val)
  20{
  21        u64 bit_mask;
  22        u64 reg_val;
  23        int timeout = 10;
  24
  25        bit_mask = (1ULL << bits) - 1;
  26        bit_mask = (bit_mask << bit_pos);
  27
  28        while (timeout) {
  29                reg_val = nicvf_queue_reg_read(nic, reg, qidx);
  30                if (((reg_val & bit_mask) >> bit_pos) == val)
  31                        return 0;
  32                udelay(2000);
  33                timeout--;
  34        }
  35        printf("Poll on reg 0x%llx failed\n", reg);
  36        return 1;
  37}
  38
  39static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
  40                                  int q_len, int desc_size, int align_bytes)
  41{
  42        dmem->q_len = q_len;
  43        dmem->size = (desc_size * q_len) + align_bytes;
  44        /* Save address, need it while freeing */
  45        dmem->unalign_base = calloc(1, dmem->size);
  46        dmem->dma = (uintptr_t)dmem->unalign_base;
  47
  48        if (!dmem->unalign_base)
  49                return -1;
  50
  51        /* Align memory address for 'align_bytes' */
  52        dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
  53        dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
  54
  55        return 0;
  56}
  57
  58static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
  59{
  60        if (!dmem)
  61                return;
  62
  63        free(dmem->unalign_base);
  64
  65        dmem->unalign_base = NULL;
  66        dmem->base = NULL;
  67}
  68
  69static void *nicvf_rb_ptr_to_pkt(struct nicvf *nic, uintptr_t rb_ptr)
  70{
  71        return (void *)rb_ptr;
  72}
  73
  74static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
  75                           int ring_len, int buf_size)
  76{
  77        int idx;
  78        uintptr_t rbuf;
  79        struct rbdr_entry_t *desc;
  80
  81        if (nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
  82                                   sizeof(struct rbdr_entry_t),
  83                                   NICVF_RCV_BUF_ALIGN_BYTES)) {
  84                printf("Unable to allocate memory for rcv buffer ring\n");
  85                return -1;
  86        }
  87
  88        rbdr->desc = rbdr->dmem.base;
  89        /* Buffer size has to be in multiples of 128 bytes */
  90        rbdr->dma_size = buf_size;
  91        rbdr->enable = true;
  92        rbdr->thresh = RBDR_THRESH;
  93
  94        debug("%s: %d: allocating %lld bytes for rcv buffers\n",
  95              __func__, __LINE__,
  96              ring_len * buf_size + NICVF_RCV_BUF_ALIGN_BYTES);
  97        rbdr->buf_mem = (uintptr_t)calloc(1, ring_len * buf_size
  98                                                + NICVF_RCV_BUF_ALIGN_BYTES);
  99
 100        if (!rbdr->buf_mem) {
 101                printf("Unable to allocate memory for rcv buffers\n");
 102                return -1;
 103        }
 104
 105        rbdr->buffers = NICVF_ALIGNED_ADDR(rbdr->buf_mem,
 106                                           NICVF_RCV_BUF_ALIGN_BYTES);
 107
 108        debug("%s: %d: rbdr->buf_mem: %lx, rbdr->buffers: %lx\n",
 109              __func__, __LINE__, rbdr->buf_mem, rbdr->buffers);
 110
 111        for (idx = 0; idx < ring_len; idx++) {
 112                rbuf = rbdr->buffers + DMA_BUFFER_LEN * idx;
 113                desc = GET_RBDR_DESC(rbdr, idx);
 114                desc->buf_addr = rbuf >> NICVF_RCV_BUF_ALIGN;
 115                flush_dcache_range((uintptr_t)desc,
 116                                   (uintptr_t)desc + sizeof(desc));
 117        }
 118        return 0;
 119}
 120
 121static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
 122{
 123        if (!rbdr)
 124                return;
 125
 126        rbdr->enable = false;
 127        if (!rbdr->dmem.base)
 128                return;
 129
 130        debug("%s: %d: rbdr->buf_mem: %p\n", __func__,
 131              __LINE__, (void *)rbdr->buf_mem);
 132        free((void *)rbdr->buf_mem);
 133
 134        /* Free RBDR ring */
 135        nicvf_free_q_desc_mem(nic, &rbdr->dmem);
 136}
 137
 138/* Refill receive buffer descriptors with new buffers.
 139 * This runs in softirq context .
 140 */
 141void nicvf_refill_rbdr(struct nicvf *nic)
 142{
 143        struct queue_set *qs = nic->qs;
 144        int rbdr_idx = qs->rbdr_cnt;
 145        unsigned long qcount, head, tail, rb_cnt;
 146        struct rbdr *rbdr;
 147
 148        if (!rbdr_idx)
 149                return;
 150        rbdr_idx--;
 151        rbdr = &qs->rbdr[rbdr_idx];
 152        /* Check if it's enabled */
 153        if (!rbdr->enable) {
 154                printf("Receive queue %d is disabled\n", rbdr_idx);
 155                return;
 156        }
 157
 158        /* check if valid descs reached or crossed threshold level */
 159        qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
 160        head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, rbdr_idx);
 161        tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx);
 162
 163        qcount &= 0x7FFFF;
 164
 165        rb_cnt = qs->rbdr_len - qcount - 1;
 166
 167        debug("%s: %d: qcount: %lu, head: %lx, tail: %lx, rb_cnt: %lu\n",
 168              __func__, __LINE__, qcount, head, tail, rb_cnt);
 169
 170        /* Notify HW */
 171        nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, rbdr_idx, rb_cnt);
 172
 173        asm volatile ("dsb sy");
 174}
 175
 176/* TBD: how to handle full packets received in CQ
 177 * i.e conversion of buffers into SKBs
 178 */
 179static int nicvf_init_cmp_queue(struct nicvf *nic,
 180                                struct cmp_queue *cq, int q_len)
 181{
 182        if (nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len,
 183                                   CMP_QUEUE_DESC_SIZE,
 184                                   NICVF_CQ_BASE_ALIGN_BYTES)) {
 185                printf("Unable to allocate memory for completion queue\n");
 186                return -1;
 187        }
 188        cq->desc = cq->dmem.base;
 189        if (!pass1_silicon(nic->rev_id, nic->nicpf->hw->model_id))
 190                cq->thresh = CMP_QUEUE_CQE_THRESH;
 191        else
 192                cq->thresh = 0;
 193        cq->intr_timer_thresh = CMP_QUEUE_TIMER_THRESH;
 194
 195        return 0;
 196}
 197
 198static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
 199{
 200        if (!cq)
 201                return;
 202        if (!cq->dmem.base)
 203                return;
 204
 205        nicvf_free_q_desc_mem(nic, &cq->dmem);
 206}
 207
 208static int nicvf_init_snd_queue(struct nicvf *nic,
 209                                struct snd_queue *sq, int q_len)
 210{
 211        if (nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len,
 212                                   SND_QUEUE_DESC_SIZE,
 213                                   NICVF_SQ_BASE_ALIGN_BYTES)) {
 214                printf("Unable to allocate memory for send queue\n");
 215                return -1;
 216        }
 217
 218        sq->desc = sq->dmem.base;
 219        sq->skbuff = calloc(q_len, sizeof(u64));
 220        sq->head = 0;
 221        sq->tail = 0;
 222        sq->free_cnt = q_len - 1;
 223        sq->thresh = SND_QUEUE_THRESH;
 224
 225        return 0;
 226}
 227
 228static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
 229{
 230        if (!sq)
 231                return;
 232        if (!sq->dmem.base)
 233                return;
 234
 235        debug("%s: %d\n", __func__, __LINE__);
 236        free(sq->skbuff);
 237
 238        nicvf_free_q_desc_mem(nic, &sq->dmem);
 239}
 240
 241static void nicvf_reclaim_snd_queue(struct nicvf *nic,
 242                                    struct queue_set *qs, int qidx)
 243{
 244        /* Disable send queue */
 245        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
 246        /* Check if SQ is stopped */
 247        if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
 248                return;
 249        /* Reset send queue */
 250        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
 251}
 252
 253static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
 254                                    struct queue_set *qs, int qidx)
 255{
 256        union nic_mbx mbx = {};
 257
 258        /* Make sure all packets in the pipeline are written back into mem */
 259        mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
 260        nicvf_send_msg_to_pf(nic, &mbx);
 261}
 262
 263static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
 264                                    struct queue_set *qs, int qidx)
 265{
 266        /* Disable timer threshold (doesn't get reset upon CQ reset */
 267        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
 268        /* Disable completion queue */
 269        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
 270        /* Reset completion queue */
 271        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
 272}
 273
 274static void nicvf_reclaim_rbdr(struct nicvf *nic,
 275                               struct rbdr *rbdr, int qidx)
 276{
 277        u64 tmp, fifo_state;
 278        int timeout = 10;
 279
 280        /* Save head and tail pointers for feeing up buffers */
 281        rbdr->head = nicvf_queue_reg_read(nic,
 282                                          NIC_QSET_RBDR_0_1_HEAD,
 283                                          qidx) >> 3;
 284        rbdr->tail = nicvf_queue_reg_read(nic,
 285                                          NIC_QSET_RBDR_0_1_TAIL,
 286                                          qidx) >> 3;
 287
 288        /* If RBDR FIFO is in 'FAIL' state then do a reset first
 289         * before relaiming.
 290         */
 291        fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
 292        if (((fifo_state >> 62) & 0x03) == 0x3)
 293                nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
 294                                      qidx, NICVF_RBDR_RESET);
 295
 296        /* Disable RBDR */
 297        nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
 298        if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
 299                return;
 300        while (1) {
 301                tmp = nicvf_queue_reg_read(nic,
 302                                           NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
 303                                           qidx);
 304                if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
 305                        break;
 306                mdelay(2000);
 307                timeout--;
 308                if (!timeout) {
 309                        printf("Failed polling on prefetch status\n");
 310                        return;
 311                }
 312        }
 313        nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
 314                              qidx, NICVF_RBDR_RESET);
 315
 316        if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
 317                return;
 318        nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
 319        if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
 320                return;
 321}
 322
 323/* Configures receive queue */
 324static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 325                                   int qidx, bool enable)
 326{
 327        union nic_mbx mbx = {};
 328        struct rcv_queue *rq;
 329        union {
 330                struct rq_cfg s;
 331                u64    u;
 332        } rq_cfg;
 333
 334        rq = &qs->rq[qidx];
 335        rq->enable = enable;
 336
 337        /* Disable receive queue */
 338        nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
 339
 340        if (!rq->enable) {
 341                nicvf_reclaim_rcv_queue(nic, qs, qidx);
 342                return;
 343        }
 344
 345        rq->cq_qs = qs->vnic_id;
 346        rq->cq_idx = qidx;
 347        rq->start_rbdr_qs = qs->vnic_id;
 348        rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
 349        rq->cont_rbdr_qs = qs->vnic_id;
 350        rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
 351        /* all writes of RBDR data to be loaded into L2 Cache as well*/
 352        rq->caching = 1;
 353
 354        /* Send a mailbox msg to PF to config RQ */
 355        mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
 356        mbx.rq.qs_num = qs->vnic_id;
 357        mbx.rq.rq_num = qidx;
 358        mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
 359                          (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
 360                          (rq->cont_qs_rbdr_idx << 8) |
 361                          (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
 362        nicvf_send_msg_to_pf(nic, &mbx);
 363
 364        mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
 365        mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
 366        nicvf_send_msg_to_pf(nic, &mbx);
 367
 368        /* RQ drop config
 369         * Enable CQ drop to reserve sufficient CQEs for all tx packets
 370         */
 371        mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
 372        mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
 373        nicvf_send_msg_to_pf(nic, &mbx);
 374        nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
 375
 376        /* Enable Receive queue */
 377        rq_cfg.s.ena = 1;
 378        rq_cfg.s.tcp_ena = 0;
 379        nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.u);
 380}
 381
 382void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
 383                            int qidx, bool enable)
 384{
 385        struct cmp_queue *cq;
 386        union {
 387                u64 u;
 388                struct cq_cfg s;
 389        } cq_cfg;
 390
 391        cq = &qs->cq[qidx];
 392        cq->enable = enable;
 393
 394        if (!cq->enable) {
 395                nicvf_reclaim_cmp_queue(nic, qs, qidx);
 396                return;
 397        }
 398
 399        /* Reset completion queue */
 400        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
 401
 402        if (!cq->enable)
 403                return;
 404
 405        /* Set completion queue base address */
 406        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
 407                              qidx, (u64)(cq->dmem.phys_base));
 408
 409        /* Enable Completion queue */
 410        cq_cfg.s.ena = 1;
 411        cq_cfg.s.reset = 0;
 412        cq_cfg.s.caching = 0;
 413        cq_cfg.s.qsize = CMP_QSIZE;
 414        cq_cfg.s.avg_con = 0;
 415        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.u);
 416
 417        /* Set threshold value for interrupt generation */
 418        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
 419        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
 420                              cq->intr_timer_thresh);
 421}
 422
 423/* Configures transmit queue */
 424static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
 425                                   int qidx, bool enable)
 426{
 427        union nic_mbx mbx = {};
 428        struct snd_queue *sq;
 429
 430        union {
 431                struct sq_cfg s;
 432                u64 u;
 433        } sq_cfg;
 434
 435        sq = &qs->sq[qidx];
 436        sq->enable = enable;
 437
 438        if (!sq->enable) {
 439                nicvf_reclaim_snd_queue(nic, qs, qidx);
 440                return;
 441        }
 442
 443        /* Reset send queue */
 444        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
 445
 446        sq->cq_qs = qs->vnic_id;
 447        sq->cq_idx = qidx;
 448
 449        /* Send a mailbox msg to PF to config SQ */
 450        mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
 451        mbx.sq.qs_num = qs->vnic_id;
 452        mbx.sq.sq_num = qidx;
 453        mbx.sq.sqs_mode = nic->sqs_mode;
 454        mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
 455        nicvf_send_msg_to_pf(nic, &mbx);
 456
 457        /* Set queue base address */
 458        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
 459                              qidx, (u64)(sq->dmem.phys_base));
 460
 461        /* Enable send queue  & set queue size */
 462        sq_cfg.s.ena = 1;
 463        sq_cfg.s.reset = 0;
 464        sq_cfg.s.ldwb = 0;
 465        sq_cfg.s.qsize = SND_QSIZE;
 466        sq_cfg.s.tstmp_bgx_intf = 0;
 467        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.u);
 468
 469        /* Set threshold value for interrupt generation */
 470        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
 471}
 472
 473/* Configures receive buffer descriptor ring */
 474static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
 475                              int qidx, bool enable)
 476{
 477        struct rbdr *rbdr;
 478        union {
 479                struct rbdr_cfg s;
 480                u64 u;
 481        } rbdr_cfg;
 482
 483        rbdr = &qs->rbdr[qidx];
 484        nicvf_reclaim_rbdr(nic, rbdr, qidx);
 485        if (!enable)
 486                return;
 487
 488        /* Set descriptor base address */
 489        nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
 490                              qidx, (u64)(rbdr->dmem.phys_base));
 491
 492        /* Enable RBDR  & set queue size */
 493        /* Buffer size should be in multiples of 128 bytes */
 494        rbdr_cfg.s.ena = 1;
 495        rbdr_cfg.s.reset = 0;
 496        rbdr_cfg.s.ldwb = 0;
 497        rbdr_cfg.s.qsize = RBDR_SIZE;
 498        rbdr_cfg.s.avg_con = 0;
 499        rbdr_cfg.s.lines = rbdr->dma_size / 128;
 500        nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
 501                              qidx, rbdr_cfg.u);
 502
 503        /* Notify HW */
 504        nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
 505                              qidx, qs->rbdr_len - 1);
 506
 507        /* Set threshold value for interrupt generation */
 508        nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
 509                              qidx, rbdr->thresh - 1);
 510}
 511
 512/* Requests PF to assign and enable Qset */
 513void nicvf_qset_config(struct nicvf *nic, bool enable)
 514{
 515        union nic_mbx mbx = {};
 516        struct queue_set *qs = nic->qs;
 517        struct qs_cfg *qs_cfg;
 518
 519        if (!qs) {
 520                printf("Qset is still not allocated, don't init queues\n");
 521                return;
 522        }
 523
 524        qs->enable = enable;
 525        qs->vnic_id = nic->vf_id;
 526
 527        /* Send a mailbox msg to PF to config Qset */
 528        mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
 529        mbx.qs.num = qs->vnic_id;
 530#ifdef VNIC_MULTI_QSET_SUPPORT
 531        mbx.qs.sqs_count = nic->sqs_count;
 532#endif
 533
 534        mbx.qs.cfg = 0;
 535        qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
 536        if (qs->enable) {
 537                qs_cfg->ena = 1;
 538#ifdef __BIG_ENDIAN
 539                qs_cfg->be = 1;
 540#endif
 541                qs_cfg->vnic = qs->vnic_id;
 542        }
 543        nicvf_send_msg_to_pf(nic, &mbx);
 544}
 545
 546static void nicvf_free_resources(struct nicvf *nic)
 547{
 548        int qidx;
 549        struct queue_set *qs = nic->qs;
 550
 551        /* Free receive buffer descriptor ring */
 552        for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 553                nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
 554
 555        /* Free completion queue */
 556        for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 557                nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
 558
 559        /* Free send queue */
 560        for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 561                nicvf_free_snd_queue(nic, &qs->sq[qidx]);
 562}
 563
 564static int nicvf_alloc_resources(struct nicvf *nic)
 565{
 566        int qidx;
 567        struct queue_set *qs = nic->qs;
 568
 569        /* Alloc receive buffer descriptor ring */
 570        for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
 571                if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
 572                                    DMA_BUFFER_LEN))
 573                        goto alloc_fail;
 574        }
 575
 576        /* Alloc send queue */
 577        for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
 578                if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
 579                        goto alloc_fail;
 580        }
 581
 582        /* Alloc completion queue */
 583        for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
 584                if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
 585                        goto alloc_fail;
 586        }
 587
 588        return 0;
 589alloc_fail:
 590        nicvf_free_resources(nic);
 591        return -1;
 592}
 593
 594int nicvf_set_qset_resources(struct nicvf *nic)
 595{
 596        struct queue_set *qs;
 597
 598        qs = calloc(1, sizeof(struct queue_set));
 599        if (!qs)
 600                return -1;
 601        nic->qs = qs;
 602
 603        /* Set count of each queue */
 604        qs->rbdr_cnt = RBDR_CNT;
 605        qs->rq_cnt = 1;
 606        qs->sq_cnt = SND_QUEUE_CNT;
 607        qs->cq_cnt = CMP_QUEUE_CNT;
 608
 609        /* Set queue lengths */
 610        qs->rbdr_len = RCV_BUF_COUNT;
 611        qs->sq_len = SND_QUEUE_LEN;
 612        qs->cq_len = CMP_QUEUE_LEN;
 613
 614        nic->rx_queues = qs->rq_cnt;
 615        nic->tx_queues = qs->sq_cnt;
 616
 617        return 0;
 618}
 619
 620int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
 621{
 622        bool disable = false;
 623        struct queue_set *qs = nic->qs;
 624        int qidx;
 625
 626        if (!qs)
 627                return 0;
 628
 629        if (enable) {
 630                if (nicvf_alloc_resources(nic))
 631                        return -1;
 632
 633                for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 634                        nicvf_snd_queue_config(nic, qs, qidx, enable);
 635                for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 636                        nicvf_cmp_queue_config(nic, qs, qidx, enable);
 637                for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 638                        nicvf_rbdr_config(nic, qs, qidx, enable);
 639                for (qidx = 0; qidx < qs->rq_cnt; qidx++)
 640                        nicvf_rcv_queue_config(nic, qs, qidx, enable);
 641        } else {
 642                for (qidx = 0; qidx < qs->rq_cnt; qidx++)
 643                        nicvf_rcv_queue_config(nic, qs, qidx, disable);
 644                for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
 645                        nicvf_rbdr_config(nic, qs, qidx, disable);
 646                for (qidx = 0; qidx < qs->sq_cnt; qidx++)
 647                        nicvf_snd_queue_config(nic, qs, qidx, disable);
 648                for (qidx = 0; qidx < qs->cq_cnt; qidx++)
 649                        nicvf_cmp_queue_config(nic, qs, qidx, disable);
 650
 651                nicvf_free_resources(nic);
 652        }
 653
 654        return 0;
 655}
 656
 657/* Get a free desc from SQ
 658 * returns descriptor ponter & descriptor number
 659 */
 660static int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
 661{
 662        int qentry;
 663
 664        qentry = sq->tail;
 665        sq->free_cnt -= desc_cnt;
 666        sq->tail += desc_cnt;
 667        sq->tail &= (sq->dmem.q_len - 1);
 668
 669        return qentry;
 670}
 671
 672/* Free descriptor back to SQ for future use */
 673void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
 674{
 675        sq->free_cnt += desc_cnt;
 676        sq->head += desc_cnt;
 677        sq->head &= (sq->dmem.q_len - 1);
 678}
 679
 680static int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
 681{
 682        qentry++;
 683        qentry &= (sq->dmem.q_len - 1);
 684        return qentry;
 685}
 686
 687void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
 688{
 689        u64 sq_cfg;
 690
 691        sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
 692        sq_cfg |= NICVF_SQ_EN;
 693        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
 694        /* Ring doorbell so that H/W restarts processing SQEs */
 695        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
 696}
 697
 698void nicvf_sq_disable(struct nicvf *nic, int qidx)
 699{
 700        u64 sq_cfg;
 701
 702        sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
 703        sq_cfg &= ~NICVF_SQ_EN;
 704        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
 705}
 706
 707void nicvf_sq_free_used_descs(struct udevice *dev, struct snd_queue *sq,
 708                              int qidx)
 709{
 710        u64 head;
 711        struct nicvf *nic = dev_get_priv(dev);
 712        struct sq_hdr_subdesc *hdr;
 713
 714        head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
 715
 716        while (sq->head != head) {
 717                hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
 718                if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
 719                        nicvf_put_sq_desc(sq, 1);
 720                        continue;
 721                }
 722                nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
 723        }
 724}
 725
 726/* Get the number of SQ descriptors needed to xmit this skb */
 727static int nicvf_sq_subdesc_required(struct nicvf *nic)
 728{
 729        int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
 730
 731        return subdesc_cnt;
 732}
 733
 734/* Add SQ HEADER subdescriptor.
 735 * First subdescriptor for every send descriptor.
 736 */
 737static inline void
 738nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
 739                         int subdesc_cnt, void *pkt, size_t pkt_len)
 740{
 741        struct sq_hdr_subdesc *hdr;
 742
 743        hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
 744        sq->skbuff[qentry] = (uintptr_t)pkt;
 745
 746        memset(hdr, 0, SND_QUEUE_DESC_SIZE);
 747        hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
 748        /* Enable notification via CQE after processing SQE */
 749        hdr->post_cqe = 1;
 750        /* No of subdescriptors following this */
 751        hdr->subdesc_cnt = subdesc_cnt;
 752        hdr->tot_len = pkt_len;
 753
 754        flush_dcache_range((uintptr_t)hdr,
 755                           (uintptr_t)hdr + sizeof(struct sq_hdr_subdesc));
 756}
 757
 758/* SQ GATHER subdescriptor
 759 * Must follow HDR descriptor
 760 */
 761static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
 762                                               size_t size, uintptr_t data)
 763{
 764        struct sq_gather_subdesc *gather;
 765
 766        qentry &= (sq->dmem.q_len - 1);
 767        gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
 768
 769        memset(gather, 0, SND_QUEUE_DESC_SIZE);
 770        gather->subdesc_type = SQ_DESC_TYPE_GATHER;
 771        gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
 772        gather->size = size;
 773        gather->addr = data;
 774
 775        flush_dcache_range((uintptr_t)gather,
 776                           (uintptr_t)gather + sizeof(struct sq_hdr_subdesc));
 777}
 778
 779/* Append an skb to a SQ for packet transfer. */
 780int nicvf_sq_append_pkt(struct nicvf *nic, void *pkt, size_t pkt_size)
 781{
 782        int subdesc_cnt;
 783        int sq_num = 0, qentry;
 784        struct queue_set *qs;
 785        struct snd_queue *sq;
 786
 787        qs = nic->qs;
 788        sq = &qs->sq[sq_num];
 789
 790        subdesc_cnt = nicvf_sq_subdesc_required(nic);
 791        if (subdesc_cnt > sq->free_cnt)
 792                goto append_fail;
 793
 794        qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
 795
 796        /* Add SQ header subdesc */
 797        nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
 798                                 pkt, pkt_size);
 799
 800        /* Add SQ gather subdescs */
 801        qentry = nicvf_get_nxt_sqentry(sq, qentry);
 802        nicvf_sq_add_gather_subdesc(sq, qentry, pkt_size, (uintptr_t)(pkt));
 803
 804        flush_dcache_range((uintptr_t)pkt,
 805                           (uintptr_t)pkt + pkt_size);
 806
 807        /* make sure all memory stores are done before ringing doorbell */
 808        asm volatile ("dsb sy");
 809
 810        /* Inform HW to xmit new packet */
 811        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
 812                              sq_num, subdesc_cnt);
 813        return 1;
 814
 815append_fail:
 816        printf("Not enough SQ descriptors to xmit pkt\n");
 817        return 0;
 818}
 819
 820static unsigned int frag_num(unsigned int i)
 821{
 822#ifdef __BIG_ENDIAN
 823        return (i & ~3) + 3 - (i & 3);
 824#else
 825        return i;
 826#endif
 827}
 828
 829void *nicvf_get_rcv_pkt(struct nicvf *nic, void *cq_desc, size_t *pkt_len)
 830{
 831        int frag;
 832        int payload_len = 0, tot_len;
 833        void *pkt = NULL, *pkt_buf = NULL, *buffer;
 834        struct cqe_rx_t *cqe_rx;
 835        struct rbdr *rbdr;
 836        struct rcv_queue *rq;
 837        struct queue_set *qs = nic->qs;
 838        u16 *rb_lens = NULL;
 839        u64 *rb_ptrs = NULL;
 840
 841        cqe_rx = (struct cqe_rx_t *)cq_desc;
 842
 843        rq = &qs->rq[cqe_rx->rq_idx];
 844        rbdr = &qs->rbdr[rq->start_qs_rbdr_idx];
 845        rb_lens = cq_desc + (3 * sizeof(u64)); /* Use offsetof */
 846        /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
 847         * CQE_RX at word6, hence buffer pointers move by word
 848         *
 849         * Use existing 'hw_tso' flag which will be set for all chips
 850         * except 88xx pass1 instead of a additional cache line
 851         * access (or miss) by using pci dev's revision.
 852         */
 853        if (!nic->hw_tso)
 854                rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
 855        else
 856                rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
 857
 858        /*
 859         * Figure out packet length to create packet buffer
 860         */
 861        for (frag = 0; frag < cqe_rx->rb_cnt; frag++)
 862                payload_len += rb_lens[frag_num(frag)];
 863        *pkt_len = payload_len;
 864        /* round up size to 8 byte multiple */
 865        tot_len = (payload_len & (~0x7)) + 8;
 866        buffer = calloc(1, tot_len);
 867        if (!buffer) {
 868                printf("%s - Failed to allocate packet buffer\n", __func__);
 869                return NULL;
 870        }
 871        pkt_buf = buffer;
 872        debug("total pkt buf %p len %ld tot_len %d\n", pkt_buf, *pkt_len,
 873              tot_len);
 874        for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
 875                payload_len = rb_lens[frag_num(frag)];
 876
 877                invalidate_dcache_range((uintptr_t)(*rb_ptrs),
 878                                        (uintptr_t)(*rb_ptrs) + rbdr->dma_size);
 879
 880                /* First fragment */
 881                *rb_ptrs = *rb_ptrs - cqe_rx->align_pad;
 882
 883                pkt = nicvf_rb_ptr_to_pkt(nic, *rb_ptrs);
 884
 885                invalidate_dcache_range((uintptr_t)pkt,
 886                                        (uintptr_t)pkt + payload_len);
 887
 888                if (cqe_rx->align_pad)
 889                        pkt += cqe_rx->align_pad;
 890                debug("pkt_buf %p, pkt %p payload_len %d\n", pkt_buf, pkt,
 891                      payload_len);
 892                memcpy(buffer, pkt, payload_len);
 893                buffer += payload_len;
 894                /* Next buffer pointer */
 895                rb_ptrs++;
 896        }
 897        return pkt_buf;
 898}
 899
 900/* Clear interrupt */
 901void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
 902{
 903        u64 reg_val = 0;
 904
 905        switch (int_type) {
 906        case NICVF_INTR_CQ:
 907                reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
 908        break;
 909        case NICVF_INTR_SQ:
 910                reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
 911        break;
 912        case NICVF_INTR_RBDR:
 913                reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
 914        break;
 915        case NICVF_INTR_PKT_DROP:
 916                reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
 917        break;
 918        case NICVF_INTR_TCP_TIMER:
 919                reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
 920        break;
 921        case NICVF_INTR_MBOX:
 922                reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
 923        break;
 924        case NICVF_INTR_QS_ERR:
 925                reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
 926        break;
 927        default:
 928                printf("Failed to clear interrupt: unknown type\n");
 929        break;
 930        }
 931
 932        nicvf_reg_write(nic, NIC_VF_INT, reg_val);
 933}
 934
 935void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
 936{
 937        struct rcv_queue *rq;
 938
 939#define GET_RQ_STATS(reg) \
 940        nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
 941                            (rq_idx << NIC_Q_NUM_SHIFT) | ((reg) << 3))
 942
 943        rq = &nic->qs->rq[rq_idx];
 944        rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
 945        rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
 946}
 947
 948void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
 949{
 950        struct snd_queue *sq;
 951
 952#define GET_SQ_STATS(reg) \
 953        nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
 954                            (sq_idx << NIC_Q_NUM_SHIFT) | ((reg) << 3))
 955
 956        sq = &nic->qs->sq[sq_idx];
 957        sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
 958        sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
 959}
 960
 961/* Check for errors in the receive cmp.queue entry */
 962int nicvf_check_cqe_rx_errs(struct nicvf *nic,
 963                            struct cmp_queue *cq, void *cq_desc)
 964{
 965        struct cqe_rx_t *cqe_rx;
 966        struct cmp_queue_stats *stats = &cq->stats;
 967
 968        cqe_rx = (struct cqe_rx_t *)cq_desc;
 969        if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
 970                stats->rx.errop.good++;
 971                return 0;
 972        }
 973
 974        switch (cqe_rx->err_level) {
 975        case CQ_ERRLVL_MAC:
 976                stats->rx.errlvl.mac_errs++;
 977        break;
 978        case CQ_ERRLVL_L2:
 979                stats->rx.errlvl.l2_errs++;
 980        break;
 981        case CQ_ERRLVL_L3:
 982                stats->rx.errlvl.l3_errs++;
 983        break;
 984        case CQ_ERRLVL_L4:
 985                stats->rx.errlvl.l4_errs++;
 986        break;
 987        }
 988
 989        switch (cqe_rx->err_opcode) {
 990        case CQ_RX_ERROP_RE_PARTIAL:
 991                stats->rx.errop.partial_pkts++;
 992        break;
 993        case CQ_RX_ERROP_RE_JABBER:
 994                stats->rx.errop.jabber_errs++;
 995        break;
 996        case CQ_RX_ERROP_RE_FCS:
 997                stats->rx.errop.fcs_errs++;
 998        break;
 999        case CQ_RX_ERROP_RE_TERMINATE:
1000                stats->rx.errop.terminate_errs++;
1001        break;
1002        case CQ_RX_ERROP_RE_RX_CTL:
1003                stats->rx.errop.bgx_rx_errs++;
1004        break;
1005        case CQ_RX_ERROP_PREL2_ERR:
1006                stats->rx.errop.prel2_errs++;
1007        break;
1008        case CQ_RX_ERROP_L2_FRAGMENT:
1009                stats->rx.errop.l2_frags++;
1010        break;
1011        case CQ_RX_ERROP_L2_OVERRUN:
1012                stats->rx.errop.l2_overruns++;
1013        break;
1014        case CQ_RX_ERROP_L2_PFCS:
1015                stats->rx.errop.l2_pfcs++;
1016        break;
1017        case CQ_RX_ERROP_L2_PUNY:
1018                stats->rx.errop.l2_puny++;
1019        break;
1020        case CQ_RX_ERROP_L2_MAL:
1021                stats->rx.errop.l2_hdr_malformed++;
1022        break;
1023        case CQ_RX_ERROP_L2_OVERSIZE:
1024                stats->rx.errop.l2_oversize++;
1025        break;
1026        case CQ_RX_ERROP_L2_UNDERSIZE:
1027                stats->rx.errop.l2_undersize++;
1028        break;
1029        case CQ_RX_ERROP_L2_LENMISM:
1030                stats->rx.errop.l2_len_mismatch++;
1031        break;
1032        case CQ_RX_ERROP_L2_PCLP:
1033                stats->rx.errop.l2_pclp++;
1034        break;
1035        case CQ_RX_ERROP_IP_NOT:
1036                stats->rx.errop.non_ip++;
1037        break;
1038        case CQ_RX_ERROP_IP_CSUM_ERR:
1039                stats->rx.errop.ip_csum_err++;
1040        break;
1041        case CQ_RX_ERROP_IP_MAL:
1042                stats->rx.errop.ip_hdr_malformed++;
1043        break;
1044        case CQ_RX_ERROP_IP_MALD:
1045                stats->rx.errop.ip_payload_malformed++;
1046        break;
1047        case CQ_RX_ERROP_IP_HOP:
1048                stats->rx.errop.ip_hop_errs++;
1049        break;
1050        case CQ_RX_ERROP_L3_ICRC:
1051                stats->rx.errop.l3_icrc_errs++;
1052        break;
1053        case CQ_RX_ERROP_L3_PCLP:
1054                stats->rx.errop.l3_pclp++;
1055        break;
1056        case CQ_RX_ERROP_L4_MAL:
1057                stats->rx.errop.l4_malformed++;
1058        break;
1059        case CQ_RX_ERROP_L4_CHK:
1060                stats->rx.errop.l4_csum_errs++;
1061        break;
1062        case CQ_RX_ERROP_UDP_LEN:
1063                stats->rx.errop.udp_len_err++;
1064        break;
1065        case CQ_RX_ERROP_L4_PORT:
1066                stats->rx.errop.bad_l4_port++;
1067        break;
1068        case CQ_RX_ERROP_TCP_FLAG:
1069                stats->rx.errop.bad_tcp_flag++;
1070        break;
1071        case CQ_RX_ERROP_TCP_OFFSET:
1072                stats->rx.errop.tcp_offset_errs++;
1073        break;
1074        case CQ_RX_ERROP_L4_PCLP:
1075                stats->rx.errop.l4_pclp++;
1076        break;
1077        case CQ_RX_ERROP_RBDR_TRUNC:
1078                stats->rx.errop.pkt_truncated++;
1079        break;
1080        }
1081
1082        return 1;
1083}
1084
1085/* Check for errors in the send cmp.queue entry */
1086int nicvf_check_cqe_tx_errs(struct nicvf *nic,
1087                            struct cmp_queue *cq, void *cq_desc)
1088{
1089        struct cqe_send_t *cqe_tx;
1090        struct cmp_queue_stats *stats = &cq->stats;
1091
1092        cqe_tx = (struct cqe_send_t *)cq_desc;
1093        switch (cqe_tx->send_status) {
1094        case CQ_TX_ERROP_GOOD:
1095                stats->tx.good++;
1096                return 0;
1097        break;
1098        case CQ_TX_ERROP_DESC_FAULT:
1099                stats->tx.desc_fault++;
1100        break;
1101        case CQ_TX_ERROP_HDR_CONS_ERR:
1102                stats->tx.hdr_cons_err++;
1103        break;
1104        case CQ_TX_ERROP_SUBDC_ERR:
1105                stats->tx.subdesc_err++;
1106        break;
1107        case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1108                stats->tx.imm_size_oflow++;
1109        break;
1110        case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1111                stats->tx.data_seq_err++;
1112        break;
1113        case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1114                stats->tx.mem_seq_err++;
1115        break;
1116        case CQ_TX_ERROP_LOCK_VIOL:
1117                stats->tx.lock_viol++;
1118        break;
1119        case CQ_TX_ERROP_DATA_FAULT:
1120                stats->tx.data_fault++;
1121        break;
1122        case CQ_TX_ERROP_TSTMP_CONFLICT:
1123                stats->tx.tstmp_conflict++;
1124        break;
1125        case CQ_TX_ERROP_TSTMP_TIMEOUT:
1126                stats->tx.tstmp_timeout++;
1127        break;
1128        case CQ_TX_ERROP_MEM_FAULT:
1129                stats->tx.mem_fault++;
1130        break;
1131        case CQ_TX_ERROP_CK_OVERLAP:
1132                stats->tx.csum_overlap++;
1133        break;
1134        case CQ_TX_ERROP_CK_OFLOW:
1135                stats->tx.csum_overflow++;
1136        break;
1137        }
1138
1139        return 1;
1140}
1141