linux/include/linux/qed/qed_chain.h
<<
>>
Prefs
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#ifndef _QED_CHAIN_H
  34#define _QED_CHAIN_H
  35
  36#include <linux/types.h>
  37#include <asm/byteorder.h>
  38#include <linux/kernel.h>
  39#include <linux/list.h>
  40#include <linux/slab.h>
  41#include <linux/qed/common_hsi.h>
  42
  43enum qed_chain_mode {
  44        /* Each Page contains a next pointer at its end */
  45        QED_CHAIN_MODE_NEXT_PTR,
  46
  47        /* Chain is a single page (next ptr) is unrequired */
  48        QED_CHAIN_MODE_SINGLE,
  49
  50        /* Page pointers are located in a side list */
  51        QED_CHAIN_MODE_PBL,
  52};
  53
  54enum qed_chain_use_mode {
  55        QED_CHAIN_USE_TO_PRODUCE,               /* Chain starts empty */
  56        QED_CHAIN_USE_TO_CONSUME,               /* Chain starts full */
  57        QED_CHAIN_USE_TO_CONSUME_PRODUCE,       /* Chain starts empty */
  58};
  59
  60enum qed_chain_cnt_type {
  61        /* The chain's size/prod/cons are kept in 16-bit variables */
  62        QED_CHAIN_CNT_TYPE_U16,
  63
  64        /* The chain's size/prod/cons are kept in 32-bit variables  */
  65        QED_CHAIN_CNT_TYPE_U32,
  66};
  67
  68struct qed_chain_next {
  69        struct regpair  next_phys;
  70        void            *next_virt;
  71};
  72
  73struct qed_chain_pbl_u16 {
  74        u16 prod_page_idx;
  75        u16 cons_page_idx;
  76};
  77
  78struct qed_chain_pbl_u32 {
  79        u32 prod_page_idx;
  80        u32 cons_page_idx;
  81};
  82
  83struct qed_chain_u16 {
  84        /* Cyclic index of next element to produce/consme */
  85        u16 prod_idx;
  86        u16 cons_idx;
  87};
  88
  89struct qed_chain_u32 {
  90        /* Cyclic index of next element to produce/consme */
  91        u32 prod_idx;
  92        u32 cons_idx;
  93};
  94
  95struct qed_chain {
  96        /* fastpath portion of the chain - required for commands such
  97         * as produce / consume.
  98         */
  99        /* Point to next element to produce/consume */
 100        void *p_prod_elem;
 101        void *p_cons_elem;
 102
 103        /* Fastpath portions of the PBL [if exists] */
 104        struct {
 105                /* Table for keeping the virtual addresses of the chain pages,
 106                 * respectively to the physical addresses in the pbl table.
 107                 */
 108                void **pp_virt_addr_tbl;
 109
 110                union {
 111                        struct qed_chain_pbl_u16 u16;
 112                        struct qed_chain_pbl_u32 u32;
 113                } c;
 114        } pbl;
 115
 116        union {
 117                struct qed_chain_u16 chain16;
 118                struct qed_chain_u32 chain32;
 119        } u;
 120
 121        /* Capacity counts only usable elements */
 122        u32 capacity;
 123        u32 page_cnt;
 124
 125        enum qed_chain_mode mode;
 126
 127        /* Elements information for fast calculations */
 128        u16 elem_per_page;
 129        u16 elem_per_page_mask;
 130        u16 elem_size;
 131        u16 next_page_mask;
 132        u16 usable_per_page;
 133        u8 elem_unusable;
 134
 135        u8 cnt_type;
 136
 137        /* Slowpath of the chain - required for initialization and destruction,
 138         * but isn't involved in regular functionality.
 139         */
 140
 141        /* Base address of a pre-allocated buffer for pbl */
 142        struct {
 143                dma_addr_t p_phys_table;
 144                void *p_virt_table;
 145        } pbl_sp;
 146
 147        /* Address of first page of the chain - the address is required
 148         * for fastpath operation [consume/produce] but only for the the SINGLE
 149         * flavour which isn't considered fastpath [== SPQ].
 150         */
 151        void *p_virt_addr;
 152        dma_addr_t p_phys_addr;
 153
 154        /* Total number of elements [for entire chain] */
 155        u32 size;
 156
 157        u8 intended_use;
 158};
 159
 160#define QED_CHAIN_PBL_ENTRY_SIZE        (8)
 161#define QED_CHAIN_PAGE_SIZE             (0x1000)
 162#define ELEMS_PER_PAGE(elem_size)       (QED_CHAIN_PAGE_SIZE / (elem_size))
 163
 164#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)         \
 165        (((mode) == QED_CHAIN_MODE_NEXT_PTR) ?           \
 166         (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
 167                   (elem_size))) : 0)
 168
 169#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
 170        ((u32)(ELEMS_PER_PAGE(elem_size) -     \
 171               UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
 172
 173#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
 174        DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
 175
 176#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
 177#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
 178
 179/* Accessors */
 180static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
 181{
 182        return p_chain->u.chain16.prod_idx;
 183}
 184
 185static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
 186{
 187        return p_chain->u.chain16.cons_idx;
 188}
 189
 190static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
 191{
 192        return p_chain->u.chain32.cons_idx;
 193}
 194
 195static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
 196{
 197        u16 used;
 198
 199        used = (u16) (((u32)0x10000 +
 200                       (u32)p_chain->u.chain16.prod_idx) -
 201                      (u32)p_chain->u.chain16.cons_idx);
 202        if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
 203                used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
 204                    p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
 205
 206        return (u16)(p_chain->capacity - used);
 207}
 208
 209static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
 210{
 211        u32 used;
 212
 213        used = (u32) (((u64)0x100000000ULL +
 214                       (u64)p_chain->u.chain32.prod_idx) -
 215                      (u64)p_chain->u.chain32.cons_idx);
 216        if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
 217                used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
 218                    p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
 219
 220        return p_chain->capacity - used;
 221}
 222
 223static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
 224{
 225        return p_chain->usable_per_page;
 226}
 227
 228static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
 229{
 230        return p_chain->elem_unusable;
 231}
 232
 233static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
 234{
 235        return p_chain->page_cnt;
 236}
 237
 238static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
 239{
 240        return p_chain->pbl_sp.p_phys_table;
 241}
 242
 243/**
 244 * @brief qed_chain_advance_page -
 245 *
 246 * Advance the next element accros pages for a linked chain
 247 *
 248 * @param p_chain
 249 * @param p_next_elem
 250 * @param idx_to_inc
 251 * @param page_to_inc
 252 */
 253static inline void
 254qed_chain_advance_page(struct qed_chain *p_chain,
 255                       void **p_next_elem, void *idx_to_inc, void *page_to_inc)
 256{
 257        struct qed_chain_next *p_next = NULL;
 258        u32 page_index = 0;
 259
 260        switch (p_chain->mode) {
 261        case QED_CHAIN_MODE_NEXT_PTR:
 262                p_next = *p_next_elem;
 263                *p_next_elem = p_next->next_virt;
 264                if (is_chain_u16(p_chain))
 265                        *(u16 *)idx_to_inc += p_chain->elem_unusable;
 266                else
 267                        *(u32 *)idx_to_inc += p_chain->elem_unusable;
 268                break;
 269        case QED_CHAIN_MODE_SINGLE:
 270                *p_next_elem = p_chain->p_virt_addr;
 271                break;
 272
 273        case QED_CHAIN_MODE_PBL:
 274                if (is_chain_u16(p_chain)) {
 275                        if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
 276                                *(u16 *)page_to_inc = 0;
 277                        page_index = *(u16 *)page_to_inc;
 278                } else {
 279                        if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
 280                                *(u32 *)page_to_inc = 0;
 281                        page_index = *(u32 *)page_to_inc;
 282                }
 283                *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
 284        }
 285}
 286
 287#define is_unusable_idx(p, idx) \
 288        (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
 289
 290#define is_unusable_idx_u32(p, idx) \
 291        (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
 292#define is_unusable_next_idx(p, idx)                             \
 293        ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
 294         (p)->usable_per_page)
 295
 296#define is_unusable_next_idx_u32(p, idx)                         \
 297        ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
 298         (p)->usable_per_page)
 299
 300#define test_and_skip(p, idx)                                              \
 301        do {                                            \
 302                if (is_chain_u16(p)) {                                     \
 303                        if (is_unusable_idx(p, idx))                       \
 304                                (p)->u.chain16.idx += (p)->elem_unusable;  \
 305                } else {                                                   \
 306                        if (is_unusable_idx_u32(p, idx))                   \
 307                                (p)->u.chain32.idx += (p)->elem_unusable;  \
 308                }                                       \
 309        } while (0)
 310
 311/**
 312 * @brief qed_chain_return_produced -
 313 *
 314 * A chain in which the driver "Produces" elements should use this API
 315 * to indicate previous produced elements are now consumed.
 316 *
 317 * @param p_chain
 318 */
 319static inline void qed_chain_return_produced(struct qed_chain *p_chain)
 320{
 321        if (is_chain_u16(p_chain))
 322                p_chain->u.chain16.cons_idx++;
 323        else
 324                p_chain->u.chain32.cons_idx++;
 325        test_and_skip(p_chain, cons_idx);
 326}
 327
 328/**
 329 * @brief qed_chain_produce -
 330 *
 331 * A chain in which the driver "Produces" elements should use this to get
 332 * a pointer to the next element which can be "Produced". It's driver
 333 * responsibility to validate that the chain has room for new element.
 334 *
 335 * @param p_chain
 336 *
 337 * @return void*, a pointer to next element
 338 */
 339static inline void *qed_chain_produce(struct qed_chain *p_chain)
 340{
 341        void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
 342
 343        if (is_chain_u16(p_chain)) {
 344                if ((p_chain->u.chain16.prod_idx &
 345                     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
 346                        p_prod_idx = &p_chain->u.chain16.prod_idx;
 347                        p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
 348                        qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
 349                                               p_prod_idx, p_prod_page_idx);
 350                }
 351                p_chain->u.chain16.prod_idx++;
 352        } else {
 353                if ((p_chain->u.chain32.prod_idx &
 354                     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
 355                        p_prod_idx = &p_chain->u.chain32.prod_idx;
 356                        p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
 357                        qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
 358                                               p_prod_idx, p_prod_page_idx);
 359                }
 360                p_chain->u.chain32.prod_idx++;
 361        }
 362
 363        p_ret = p_chain->p_prod_elem;
 364        p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
 365                                        p_chain->elem_size);
 366
 367        return p_ret;
 368}
 369
 370/**
 371 * @brief qed_chain_get_capacity -
 372 *
 373 * Get the maximum number of BDs in chain
 374 *
 375 * @param p_chain
 376 * @param num
 377 *
 378 * @return number of unusable BDs
 379 */
 380static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
 381{
 382        return p_chain->capacity;
 383}
 384
 385/**
 386 * @brief qed_chain_recycle_consumed -
 387 *
 388 * Returns an element which was previously consumed;
 389 * Increments producers so they could be written to FW.
 390 *
 391 * @param p_chain
 392 */
 393static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
 394{
 395        test_and_skip(p_chain, prod_idx);
 396        if (is_chain_u16(p_chain))
 397                p_chain->u.chain16.prod_idx++;
 398        else
 399                p_chain->u.chain32.prod_idx++;
 400}
 401
 402/**
 403 * @brief qed_chain_consume -
 404 *
 405 * A Chain in which the driver utilizes data written by a different source
 406 * (i.e., FW) should use this to access passed buffers.
 407 *
 408 * @param p_chain
 409 *
 410 * @return void*, a pointer to the next buffer written
 411 */
 412static inline void *qed_chain_consume(struct qed_chain *p_chain)
 413{
 414        void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
 415
 416        if (is_chain_u16(p_chain)) {
 417                if ((p_chain->u.chain16.cons_idx &
 418                     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
 419                        p_cons_idx = &p_chain->u.chain16.cons_idx;
 420                        p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
 421                        qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
 422                                               p_cons_idx, p_cons_page_idx);
 423                }
 424                p_chain->u.chain16.cons_idx++;
 425        } else {
 426                if ((p_chain->u.chain32.cons_idx &
 427                     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
 428                        p_cons_idx = &p_chain->u.chain32.cons_idx;
 429                        p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
 430                        qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
 431                                               p_cons_idx, p_cons_page_idx);
 432                }
 433                p_chain->u.chain32.cons_idx++;
 434        }
 435
 436        p_ret = p_chain->p_cons_elem;
 437        p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
 438                                        p_chain->elem_size);
 439
 440        return p_ret;
 441}
 442
 443/**
 444 * @brief qed_chain_reset - Resets the chain to its start state
 445 *
 446 * @param p_chain pointer to a previously allocted chain
 447 */
 448static inline void qed_chain_reset(struct qed_chain *p_chain)
 449{
 450        u32 i;
 451
 452        if (is_chain_u16(p_chain)) {
 453                p_chain->u.chain16.prod_idx = 0;
 454                p_chain->u.chain16.cons_idx = 0;
 455        } else {
 456                p_chain->u.chain32.prod_idx = 0;
 457                p_chain->u.chain32.cons_idx = 0;
 458        }
 459        p_chain->p_cons_elem = p_chain->p_virt_addr;
 460        p_chain->p_prod_elem = p_chain->p_virt_addr;
 461
 462        if (p_chain->mode == QED_CHAIN_MODE_PBL) {
 463                /* Use (page_cnt - 1) as a reset value for the prod/cons page's
 464                 * indices, to avoid unnecessary page advancing on the first
 465                 * call to qed_chain_produce/consume. Instead, the indices
 466                 * will be advanced to page_cnt and then will be wrapped to 0.
 467                 */
 468                u32 reset_val = p_chain->page_cnt - 1;
 469
 470                if (is_chain_u16(p_chain)) {
 471                        p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
 472                        p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
 473                } else {
 474                        p_chain->pbl.c.u32.prod_page_idx = reset_val;
 475                        p_chain->pbl.c.u32.cons_page_idx = reset_val;
 476                }
 477        }
 478
 479        switch (p_chain->intended_use) {
 480        case QED_CHAIN_USE_TO_CONSUME:
 481                /* produce empty elements */
 482                for (i = 0; i < p_chain->capacity; i++)
 483                        qed_chain_recycle_consumed(p_chain);
 484                break;
 485
 486        case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
 487        case QED_CHAIN_USE_TO_PRODUCE:
 488        default:
 489                /* Do nothing */
 490                break;
 491        }
 492}
 493
 494/**
 495 * @brief qed_chain_init - Initalizes a basic chain struct
 496 *
 497 * @param p_chain
 498 * @param p_virt_addr
 499 * @param p_phys_addr   physical address of allocated buffer's beginning
 500 * @param page_cnt      number of pages in the allocated buffer
 501 * @param elem_size     size of each element in the chain
 502 * @param intended_use
 503 * @param mode
 504 */
 505static inline void qed_chain_init_params(struct qed_chain *p_chain,
 506                                         u32 page_cnt,
 507                                         u8 elem_size,
 508                                         enum qed_chain_use_mode intended_use,
 509                                         enum qed_chain_mode mode,
 510                                         enum qed_chain_cnt_type cnt_type)
 511{
 512        /* chain fixed parameters */
 513        p_chain->p_virt_addr = NULL;
 514        p_chain->p_phys_addr = 0;
 515        p_chain->elem_size      = elem_size;
 516        p_chain->intended_use = (u8)intended_use;
 517        p_chain->mode           = mode;
 518        p_chain->cnt_type = (u8)cnt_type;
 519
 520        p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
 521        p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
 522        p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
 523        p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
 524        p_chain->next_page_mask = (p_chain->usable_per_page &
 525                                   p_chain->elem_per_page_mask);
 526
 527        p_chain->page_cnt = page_cnt;
 528        p_chain->capacity = p_chain->usable_per_page * page_cnt;
 529        p_chain->size = p_chain->elem_per_page * page_cnt;
 530
 531        p_chain->pbl_sp.p_phys_table = 0;
 532        p_chain->pbl_sp.p_virt_table = NULL;
 533        p_chain->pbl.pp_virt_addr_tbl = NULL;
 534}
 535
 536/**
 537 * @brief qed_chain_init_mem -
 538 *
 539 * Initalizes a basic chain struct with its chain buffers
 540 *
 541 * @param p_chain
 542 * @param p_virt_addr   virtual address of allocated buffer's beginning
 543 * @param p_phys_addr   physical address of allocated buffer's beginning
 544 *
 545 */
 546static inline void qed_chain_init_mem(struct qed_chain *p_chain,
 547                                      void *p_virt_addr, dma_addr_t p_phys_addr)
 548{
 549        p_chain->p_virt_addr = p_virt_addr;
 550        p_chain->p_phys_addr = p_phys_addr;
 551}
 552
 553/**
 554 * @brief qed_chain_init_pbl_mem -
 555 *
 556 * Initalizes a basic chain struct with its pbl buffers
 557 *
 558 * @param p_chain
 559 * @param p_virt_pbl    pointer to a pre allocated side table which will hold
 560 *                      virtual page addresses.
 561 * @param p_phys_pbl    pointer to a pre-allocated side table which will hold
 562 *                      physical page addresses.
 563 * @param pp_virt_addr_tbl
 564 *                      pointer to a pre-allocated side table which will hold
 565 *                      the virtual addresses of the chain pages.
 566 *
 567 */
 568static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
 569                                          void *p_virt_pbl,
 570                                          dma_addr_t p_phys_pbl,
 571                                          void **pp_virt_addr_tbl)
 572{
 573        p_chain->pbl_sp.p_phys_table = p_phys_pbl;
 574        p_chain->pbl_sp.p_virt_table = p_virt_pbl;
 575        p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
 576}
 577
 578/**
 579 * @brief qed_chain_init_next_ptr_elem -
 580 *
 581 * Initalizes a next pointer element
 582 *
 583 * @param p_chain
 584 * @param p_virt_curr   virtual address of a chain page of which the next
 585 *                      pointer element is initialized
 586 * @param p_virt_next   virtual address of the next chain page
 587 * @param p_phys_next   physical address of the next chain page
 588 *
 589 */
 590static inline void
 591qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
 592                             void *p_virt_curr,
 593                             void *p_virt_next, dma_addr_t p_phys_next)
 594{
 595        struct qed_chain_next *p_next;
 596        u32 size;
 597
 598        size = p_chain->elem_size * p_chain->usable_per_page;
 599        p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
 600
 601        DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
 602
 603        p_next->next_virt = p_virt_next;
 604}
 605
 606/**
 607 * @brief qed_chain_get_last_elem -
 608 *
 609 * Returns a pointer to the last element of the chain
 610 *
 611 * @param p_chain
 612 *
 613 * @return void*
 614 */
 615static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
 616{
 617        struct qed_chain_next *p_next = NULL;
 618        void *p_virt_addr = NULL;
 619        u32 size, last_page_idx;
 620
 621        if (!p_chain->p_virt_addr)
 622                goto out;
 623
 624        switch (p_chain->mode) {
 625        case QED_CHAIN_MODE_NEXT_PTR:
 626                size = p_chain->elem_size * p_chain->usable_per_page;
 627                p_virt_addr = p_chain->p_virt_addr;
 628                p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
 629                while (p_next->next_virt != p_chain->p_virt_addr) {
 630                        p_virt_addr = p_next->next_virt;
 631                        p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
 632                                                           size);
 633                }
 634                break;
 635        case QED_CHAIN_MODE_SINGLE:
 636                p_virt_addr = p_chain->p_virt_addr;
 637                break;
 638        case QED_CHAIN_MODE_PBL:
 639                last_page_idx = p_chain->page_cnt - 1;
 640                p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
 641                break;
 642        }
 643        /* p_virt_addr points at this stage to the last page of the chain */
 644        size = p_chain->elem_size * (p_chain->usable_per_page - 1);
 645        p_virt_addr = (u8 *)p_virt_addr + size;
 646out:
 647        return p_virt_addr;
 648}
 649
 650/**
 651 * @brief qed_chain_set_prod - sets the prod to the given value
 652 *
 653 * @param prod_idx
 654 * @param p_prod_elem
 655 */
 656static inline void qed_chain_set_prod(struct qed_chain *p_chain,
 657                                      u32 prod_idx, void *p_prod_elem)
 658{
 659        if (is_chain_u16(p_chain))
 660                p_chain->u.chain16.prod_idx = (u16) prod_idx;
 661        else
 662                p_chain->u.chain32.prod_idx = prod_idx;
 663        p_chain->p_prod_elem = p_prod_elem;
 664}
 665
 666/**
 667 * @brief qed_chain_pbl_zero_mem - set chain memory to 0
 668 *
 669 * @param p_chain
 670 */
 671static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
 672{
 673        u32 i, page_cnt;
 674
 675        if (p_chain->mode != QED_CHAIN_MODE_PBL)
 676                return;
 677
 678        page_cnt = qed_chain_get_page_cnt(p_chain);
 679
 680        for (i = 0; i < page_cnt; i++)
 681                memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
 682                       QED_CHAIN_PAGE_SIZE);
 683}
 684
 685#endif
 686