linux/drivers/net/ethernet/qlogic/qed/qed_cxt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/bitops.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/errno.h>
  11#include <linux/kernel.h>
  12#include <linux/list.h>
  13#include <linux/log2.h>
  14#include <linux/pci.h>
  15#include <linux/slab.h>
  16#include <linux/string.h>
  17#include "qed.h"
  18#include "qed_cxt.h"
  19#include "qed_dev_api.h"
  20#include "qed_hsi.h"
  21#include "qed_hw.h"
  22#include "qed_init_ops.h"
  23#include "qed_rdma.h"
  24#include "qed_reg_addr.h"
  25#include "qed_sriov.h"
  26
  27/* QM constants */
  28#define QM_PQ_ELEMENT_SIZE      4 /* in bytes */
  29
  30/* Doorbell-Queue constants */
  31#define DQ_RANGE_SHIFT          4
  32#define DQ_RANGE_ALIGN          BIT(DQ_RANGE_SHIFT)
  33
  34/* Searcher constants */
  35#define SRC_MIN_NUM_ELEMS 256
  36
  37/* Timers constants */
  38#define TM_SHIFT        7
  39#define TM_ALIGN        BIT(TM_SHIFT)
  40#define TM_ELEM_SIZE    4
  41
  42#define ILT_DEFAULT_HW_P_SIZE   4
  43
  44#define ILT_PAGE_IN_BYTES(hw_p_size)    (1U << ((hw_p_size) + 12))
  45#define ILT_CFG_REG(cli, reg)   PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
  46
  47/* ILT entry structure */
  48#define ILT_ENTRY_PHY_ADDR_MASK         (~0ULL >> 12)
  49#define ILT_ENTRY_PHY_ADDR_SHIFT        0
  50#define ILT_ENTRY_VALID_MASK            0x1ULL
  51#define ILT_ENTRY_VALID_SHIFT           52
  52#define ILT_ENTRY_IN_REGS               2
  53#define ILT_REG_SIZE_IN_BYTES           4
  54
  55/* connection context union */
  56union conn_context {
  57        struct e4_core_conn_context core_ctx;
  58        struct e4_eth_conn_context eth_ctx;
  59        struct e4_iscsi_conn_context iscsi_ctx;
  60        struct e4_fcoe_conn_context fcoe_ctx;
  61        struct e4_roce_conn_context roce_ctx;
  62};
  63
  64/* TYPE-0 task context - iSCSI, FCOE */
  65union type0_task_context {
  66        struct e4_iscsi_task_context iscsi_ctx;
  67        struct e4_fcoe_task_context fcoe_ctx;
  68};
  69
  70/* TYPE-1 task context - ROCE */
  71union type1_task_context {
  72        struct e4_rdma_task_context roce_ctx;
  73};
  74
  75struct src_ent {
  76        __u8                            opaque[56];
  77        __be64                          next;
  78};
  79
  80#define CDUT_SEG_ALIGNMET               3 /* in 4k chunks */
  81#define CDUT_SEG_ALIGNMET_IN_BYTES      BIT(CDUT_SEG_ALIGNMET + 12)
  82
  83#define CONN_CXT_SIZE(p_hwfn) \
  84        ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
  85
  86#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
  87#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
  88
  89#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
  90        ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
  91
  92/* Alignment is inherent to the type1_task_context structure */
  93#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
  94
  95static bool src_proto(enum protocol_type type)
  96{
  97        return type == PROTOCOLID_TCP_ULP ||
  98               type == PROTOCOLID_FCOE ||
  99               type == PROTOCOLID_IWARP;
 100}
 101
 102static bool tm_cid_proto(enum protocol_type type)
 103{
 104        return type == PROTOCOLID_TCP_ULP ||
 105               type == PROTOCOLID_FCOE ||
 106               type == PROTOCOLID_ROCE ||
 107               type == PROTOCOLID_IWARP;
 108}
 109
 110static bool tm_tid_proto(enum protocol_type type)
 111{
 112        return type == PROTOCOLID_FCOE;
 113}
 114
 115/* counts the iids for the CDU/CDUC ILT client configuration */
 116struct qed_cdu_iids {
 117        u32 pf_cids;
 118        u32 per_vf_cids;
 119};
 120
 121static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
 122                             struct qed_cdu_iids *iids)
 123{
 124        u32 type;
 125
 126        for (type = 0; type < MAX_CONN_TYPES; type++) {
 127                iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
 128                iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
 129        }
 130}
 131
 132/* counts the iids for the Searcher block configuration */
 133struct qed_src_iids {
 134        u32 pf_cids;
 135        u32 per_vf_cids;
 136};
 137
 138static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
 139                             struct qed_src_iids *iids)
 140{
 141        u32 i;
 142
 143        for (i = 0; i < MAX_CONN_TYPES; i++) {
 144                if (!src_proto(i))
 145                        continue;
 146
 147                iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
 148                iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
 149        }
 150
 151        /* Add L2 filtering filters in addition */
 152        iids->pf_cids += p_mngr->arfs_count;
 153}
 154
 155/* counts the iids for the Timers block configuration */
 156struct qed_tm_iids {
 157        u32 pf_cids;
 158        u32 pf_tids[NUM_TASK_PF_SEGMENTS];      /* per segment */
 159        u32 pf_tids_total;
 160        u32 per_vf_cids;
 161        u32 per_vf_tids;
 162};
 163
 164static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
 165                            struct qed_cxt_mngr *p_mngr,
 166                            struct qed_tm_iids *iids)
 167{
 168        bool tm_vf_required = false;
 169        bool tm_required = false;
 170        int i, j;
 171
 172        /* Timers is a special case -> we don't count how many cids require
 173         * timers but what's the max cid that will be used by the timer block.
 174         * therefore we traverse in reverse order, and once we hit a protocol
 175         * that requires the timers memory, we'll sum all the protocols up
 176         * to that one.
 177         */
 178        for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
 179                struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
 180
 181                if (tm_cid_proto(i) || tm_required) {
 182                        if (p_cfg->cid_count)
 183                                tm_required = true;
 184
 185                        iids->pf_cids += p_cfg->cid_count;
 186                }
 187
 188                if (tm_cid_proto(i) || tm_vf_required) {
 189                        if (p_cfg->cids_per_vf)
 190                                tm_vf_required = true;
 191
 192                        iids->per_vf_cids += p_cfg->cids_per_vf;
 193                }
 194
 195                if (tm_tid_proto(i)) {
 196                        struct qed_tid_seg *segs = p_cfg->tid_seg;
 197
 198                        /* for each segment there is at most one
 199                         * protocol for which count is not 0.
 200                         */
 201                        for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
 202                                iids->pf_tids[j] += segs[j].count;
 203
 204                        /* The last array elelment is for the VFs. As for PF
 205                         * segments there can be only one protocol for
 206                         * which this value is not 0.
 207                         */
 208                        iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
 209                }
 210        }
 211
 212        iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
 213        iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
 214        iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
 215
 216        for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
 217                iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
 218                iids->pf_tids_total += iids->pf_tids[j];
 219        }
 220}
 221
 222static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
 223                            struct qed_qm_iids *iids)
 224{
 225        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 226        struct qed_tid_seg *segs;
 227        u32 vf_cids = 0, type, j;
 228        u32 vf_tids = 0;
 229
 230        for (type = 0; type < MAX_CONN_TYPES; type++) {
 231                iids->cids += p_mngr->conn_cfg[type].cid_count;
 232                vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
 233
 234                segs = p_mngr->conn_cfg[type].tid_seg;
 235                /* for each segment there is at most one
 236                 * protocol for which count is not 0.
 237                 */
 238                for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
 239                        iids->tids += segs[j].count;
 240
 241                /* The last array elelment is for the VFs. As for PF
 242                 * segments there can be only one protocol for
 243                 * which this value is not 0.
 244                 */
 245                vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
 246        }
 247
 248        iids->vf_cids = vf_cids;
 249        iids->tids += vf_tids * p_mngr->vf_count;
 250
 251        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 252                   "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
 253                   iids->cids, iids->vf_cids, iids->tids, vf_tids);
 254}
 255
 256static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
 257                                                u32 seg)
 258{
 259        struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
 260        u32 i;
 261
 262        /* Find the protocol with tid count > 0 for this segment.
 263         * Note: there can only be one and this is already validated.
 264         */
 265        for (i = 0; i < MAX_CONN_TYPES; i++)
 266                if (p_cfg->conn_cfg[i].tid_seg[seg].count)
 267                        return &p_cfg->conn_cfg[i].tid_seg[seg];
 268        return NULL;
 269}
 270
 271static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn,
 272                                  u32 num_srqs, u32 num_xrc_srqs)
 273{
 274        struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 275
 276        p_mgr->srq_count = num_srqs;
 277        p_mgr->xrc_srq_count = num_xrc_srqs;
 278}
 279
 280u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
 281                              enum ilt_clients ilt_client)
 282{
 283        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 284        struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
 285
 286        return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 287}
 288
 289static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn)
 290{
 291        u32 page_size;
 292
 293        page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
 294        return page_size / XRC_SRQ_CXT_SIZE;
 295}
 296
 297u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn)
 298{
 299        struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 300        u32 total_srqs;
 301
 302        total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count;
 303
 304        return total_srqs;
 305}
 306
 307/* set the iids count per protocol */
 308static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
 309                                        enum protocol_type type,
 310                                        u32 cid_count, u32 vf_cid_cnt)
 311{
 312        struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 313        struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
 314
 315        p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
 316        p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
 317
 318        if (type == PROTOCOLID_ROCE) {
 319                u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
 320                u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
 321                u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
 322                u32 align = elems_per_page * DQ_RANGE_ALIGN;
 323
 324                p_conn->cid_count = roundup(p_conn->cid_count, align);
 325        }
 326}
 327
 328u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
 329                                enum protocol_type type, u32 *vf_cid)
 330{
 331        if (vf_cid)
 332                *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
 333
 334        return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
 335}
 336
 337u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
 338                                enum protocol_type type)
 339{
 340        return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
 341}
 342
 343u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
 344                                enum protocol_type type)
 345{
 346        u32 cnt = 0;
 347        int i;
 348
 349        for (i = 0; i < TASK_SEGMENTS; i++)
 350                cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
 351
 352        return cnt;
 353}
 354
 355static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
 356                                        enum protocol_type proto,
 357                                        u8 seg,
 358                                        u8 seg_type, u32 count, bool has_fl)
 359{
 360        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 361        struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
 362
 363        p_seg->count = count;
 364        p_seg->has_fl_mem = has_fl;
 365        p_seg->type = seg_type;
 366}
 367
 368static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
 369                                 struct qed_ilt_cli_blk *p_blk,
 370                                 u32 start_line, u32 total_size, u32 elem_size)
 371{
 372        u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 373
 374        /* verify thatits called only once for each block */
 375        if (p_blk->total_size)
 376                return;
 377
 378        p_blk->total_size = total_size;
 379        p_blk->real_size_in_page = 0;
 380        if (elem_size)
 381                p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
 382        p_blk->start_line = start_line;
 383}
 384
 385static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
 386                                 struct qed_ilt_client_cfg *p_cli,
 387                                 struct qed_ilt_cli_blk *p_blk,
 388                                 u32 *p_line, enum ilt_clients client_id)
 389{
 390        if (!p_blk->total_size)
 391                return;
 392
 393        if (!p_cli->active)
 394                p_cli->first.val = *p_line;
 395
 396        p_cli->active = true;
 397        *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
 398        p_cli->last.val = *p_line - 1;
 399
 400        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 401                   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
 402                   client_id, p_cli->first.val,
 403                   p_cli->last.val, p_blk->total_size,
 404                   p_blk->real_size_in_page, p_blk->start_line);
 405}
 406
 407static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
 408                                        enum ilt_clients ilt_client)
 409{
 410        u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
 411        struct qed_ilt_client_cfg *p_cli;
 412        u32 lines_to_skip = 0;
 413        u32 cxts_per_p;
 414
 415        if (ilt_client == ILT_CLI_CDUC) {
 416                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
 417
 418                cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
 419                    (u32) CONN_CXT_SIZE(p_hwfn);
 420
 421                lines_to_skip = cid_count / cxts_per_p;
 422        }
 423
 424        return lines_to_skip;
 425}
 426
 427static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
 428                                                  *p_cli)
 429{
 430        p_cli->active = false;
 431        p_cli->first.val = 0;
 432        p_cli->last.val = 0;
 433        return p_cli;
 434}
 435
 436static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
 437{
 438        p_blk->total_size = 0;
 439        return p_blk;
 440}
 441
 442static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
 443{
 444        struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
 445        u32 cli_idx, blk_idx;
 446
 447        for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
 448                for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
 449                        clients[cli_idx].pf_blks[blk_idx].total_size = 0;
 450
 451                for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
 452                        clients[cli_idx].vf_blks[blk_idx].total_size = 0;
 453        }
 454}
 455
 456int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 457{
 458        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 459        u32 curr_line, total, i, task_size, line;
 460        struct qed_ilt_client_cfg *p_cli;
 461        struct qed_ilt_cli_blk *p_blk;
 462        struct qed_cdu_iids cdu_iids;
 463        struct qed_src_iids src_iids;
 464        struct qed_qm_iids qm_iids;
 465        struct qed_tm_iids tm_iids;
 466        struct qed_tid_seg *p_seg;
 467
 468        memset(&qm_iids, 0, sizeof(qm_iids));
 469        memset(&cdu_iids, 0, sizeof(cdu_iids));
 470        memset(&src_iids, 0, sizeof(src_iids));
 471        memset(&tm_iids, 0, sizeof(tm_iids));
 472
 473        p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
 474
 475        /* Reset all ILT blocks at the beginning of ILT computing in order
 476         * to prevent memory allocation for irrelevant blocks afterwards.
 477         */
 478        qed_cxt_ilt_blk_reset(p_hwfn);
 479
 480        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 481                   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
 482                   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
 483
 484        /* CDUC */
 485        p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
 486
 487        curr_line = p_mngr->pf_start_line;
 488
 489        /* CDUC PF */
 490        p_cli->pf_total_lines = 0;
 491
 492        /* get the counters for the CDUC and QM clients  */
 493        qed_cxt_cdu_iids(p_mngr, &cdu_iids);
 494
 495        p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
 496
 497        total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
 498
 499        qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 500                             total, CONN_CXT_SIZE(p_hwfn));
 501
 502        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
 503        p_cli->pf_total_lines = curr_line - p_blk->start_line;
 504
 505        p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
 506                                                               ILT_CLI_CDUC);
 507
 508        /* CDUC VF */
 509        p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
 510        total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
 511
 512        qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 513                             total, CONN_CXT_SIZE(p_hwfn));
 514
 515        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
 516        p_cli->vf_total_lines = curr_line - p_blk->start_line;
 517
 518        for (i = 1; i < p_mngr->vf_count; i++)
 519                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 520                                     ILT_CLI_CDUC);
 521
 522        /* CDUT PF */
 523        p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
 524        p_cli->first.val = curr_line;
 525
 526        /* first the 'working' task memory */
 527        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
 528                p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
 529                if (!p_seg || p_seg->count == 0)
 530                        continue;
 531
 532                p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
 533                total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 534                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
 535                                     p_mngr->task_type_size[p_seg->type]);
 536
 537                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 538                                     ILT_CLI_CDUT);
 539        }
 540
 541        /* next the 'init' task memory (forced load memory) */
 542        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
 543                p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
 544                if (!p_seg || p_seg->count == 0)
 545                        continue;
 546
 547                p_blk =
 548                    qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
 549
 550                if (!p_seg->has_fl_mem) {
 551                        /* The segment is active (total size pf 'working'
 552                         * memory is > 0) but has no FL (forced-load, Init)
 553                         * memory. Thus:
 554                         *
 555                         * 1.   The total-size in the corrsponding FL block of
 556                         *      the ILT client is set to 0 - No ILT line are
 557                         *      provisioned and no ILT memory allocated.
 558                         *
 559                         * 2.   The start-line of said block is set to the
 560                         *      start line of the matching working memory
 561                         *      block in the ILT client. This is later used to
 562                         *      configure the CDU segment offset registers and
 563                         *      results in an FL command for TIDs of this
 564                         *      segement behaves as regular load commands
 565                         *      (loading TIDs from the working memory).
 566                         */
 567                        line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
 568
 569                        qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
 570                        continue;
 571                }
 572                total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 573
 574                qed_ilt_cli_blk_fill(p_cli, p_blk,
 575                                     curr_line, total,
 576                                     p_mngr->task_type_size[p_seg->type]);
 577
 578                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 579                                     ILT_CLI_CDUT);
 580        }
 581        p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
 582
 583        /* CDUT VF */
 584        p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
 585        if (p_seg && p_seg->count) {
 586                /* Stricly speaking we need to iterate over all VF
 587                 * task segment types, but a VF has only 1 segment
 588                 */
 589
 590                /* 'working' memory */
 591                total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 592
 593                p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
 594                qed_ilt_cli_blk_fill(p_cli, p_blk,
 595                                     curr_line, total,
 596                                     p_mngr->task_type_size[p_seg->type]);
 597
 598                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 599                                     ILT_CLI_CDUT);
 600
 601                /* 'init' memory */
 602                p_blk =
 603                    qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
 604                if (!p_seg->has_fl_mem) {
 605                        /* see comment above */
 606                        line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
 607                        qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
 608                } else {
 609                        task_size = p_mngr->task_type_size[p_seg->type];
 610                        qed_ilt_cli_blk_fill(p_cli, p_blk,
 611                                             curr_line, total, task_size);
 612                        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 613                                             ILT_CLI_CDUT);
 614                }
 615                p_cli->vf_total_lines = curr_line -
 616                    p_cli->vf_blks[0].start_line;
 617
 618                /* Now for the rest of the VFs */
 619                for (i = 1; i < p_mngr->vf_count; i++) {
 620                        p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
 621                        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 622                                             ILT_CLI_CDUT);
 623
 624                        p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
 625                        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 626                                             ILT_CLI_CDUT);
 627                }
 628        }
 629
 630        /* QM */
 631        p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
 632        p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 633
 634        qed_cxt_qm_iids(p_hwfn, &qm_iids);
 635        total = qed_qm_pf_mem_size(qm_iids.cids,
 636                                   qm_iids.vf_cids, qm_iids.tids,
 637                                   p_hwfn->qm_info.num_pqs,
 638                                   p_hwfn->qm_info.num_vf_pqs);
 639
 640        DP_VERBOSE(p_hwfn,
 641                   QED_MSG_ILT,
 642                   "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
 643                   qm_iids.cids,
 644                   qm_iids.vf_cids,
 645                   qm_iids.tids,
 646                   p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
 647
 648        qed_ilt_cli_blk_fill(p_cli, p_blk,
 649                             curr_line, total * 0x1000,
 650                             QM_PQ_ELEMENT_SIZE);
 651
 652        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
 653        p_cli->pf_total_lines = curr_line - p_blk->start_line;
 654
 655        /* SRC */
 656        p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
 657        qed_cxt_src_iids(p_mngr, &src_iids);
 658
 659        /* Both the PF and VFs searcher connections are stored in the per PF
 660         * database. Thus sum the PF searcher cids and all the VFs searcher
 661         * cids.
 662         */
 663        total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
 664        if (total) {
 665                u32 local_max = max_t(u32, total,
 666                                      SRC_MIN_NUM_ELEMS);
 667
 668                total = roundup_pow_of_two(local_max);
 669
 670                p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 671                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 672                                     total * sizeof(struct src_ent),
 673                                     sizeof(struct src_ent));
 674
 675                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 676                                     ILT_CLI_SRC);
 677                p_cli->pf_total_lines = curr_line - p_blk->start_line;
 678        }
 679
 680        /* TM PF */
 681        p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
 682        qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
 683        total = tm_iids.pf_cids + tm_iids.pf_tids_total;
 684        if (total) {
 685                p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 686                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 687                                     total * TM_ELEM_SIZE, TM_ELEM_SIZE);
 688
 689                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 690                                     ILT_CLI_TM);
 691                p_cli->pf_total_lines = curr_line - p_blk->start_line;
 692        }
 693
 694        /* TM VF */
 695        total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
 696        if (total) {
 697                p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
 698                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 699                                     total * TM_ELEM_SIZE, TM_ELEM_SIZE);
 700
 701                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 702                                     ILT_CLI_TM);
 703
 704                p_cli->vf_total_lines = curr_line - p_blk->start_line;
 705                for (i = 1; i < p_mngr->vf_count; i++)
 706                        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 707                                             ILT_CLI_TM);
 708        }
 709
 710        /* TSDM (SRQ CONTEXT) */
 711        total = qed_cxt_get_total_srq_count(p_hwfn);
 712
 713        if (total) {
 714                p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
 715                p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
 716                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 717                                     total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
 718
 719                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 720                                     ILT_CLI_TSDM);
 721                p_cli->pf_total_lines = curr_line - p_blk->start_line;
 722        }
 723
 724        *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
 725
 726        if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
 727            RESC_NUM(p_hwfn, QED_ILT))
 728                return -EINVAL;
 729
 730        return 0;
 731}
 732
 733u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
 734{
 735        struct qed_ilt_client_cfg *p_cli;
 736        u32 excess_lines, available_lines;
 737        struct qed_cxt_mngr *p_mngr;
 738        u32 ilt_page_size, elem_size;
 739        struct qed_tid_seg *p_seg;
 740        int i;
 741
 742        available_lines = RESC_NUM(p_hwfn, QED_ILT);
 743        excess_lines = used_lines - available_lines;
 744
 745        if (!excess_lines)
 746                return 0;
 747
 748        if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
 749                return 0;
 750
 751        p_mngr = p_hwfn->p_cxt_mngr;
 752        p_cli = &p_mngr->clients[ILT_CLI_CDUT];
 753        ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 754
 755        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
 756                p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
 757                if (!p_seg || p_seg->count == 0)
 758                        continue;
 759
 760                elem_size = p_mngr->task_type_size[p_seg->type];
 761                if (!elem_size)
 762                        continue;
 763
 764                return (ilt_page_size / elem_size) * excess_lines;
 765        }
 766
 767        DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
 768        return 0;
 769}
 770
 771static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
 772{
 773        struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
 774        u32 i;
 775
 776        if (!p_t2 || !p_t2->dma_mem)
 777                return;
 778
 779        for (i = 0; i < p_t2->num_pages; i++)
 780                if (p_t2->dma_mem[i].virt_addr)
 781                        dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 782                                          p_t2->dma_mem[i].size,
 783                                          p_t2->dma_mem[i].virt_addr,
 784                                          p_t2->dma_mem[i].phys_addr);
 785
 786        kfree(p_t2->dma_mem);
 787        p_t2->dma_mem = NULL;
 788}
 789
 790static int
 791qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn,
 792                       struct qed_src_t2 *p_t2, u32 total_size, u32 page_size)
 793{
 794        void **p_virt;
 795        u32 size, i;
 796
 797        if (!p_t2 || !p_t2->dma_mem)
 798                return -EINVAL;
 799
 800        for (i = 0; i < p_t2->num_pages; i++) {
 801                size = min_t(u32, total_size, page_size);
 802                p_virt = &p_t2->dma_mem[i].virt_addr;
 803
 804                *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 805                                             size,
 806                                             &p_t2->dma_mem[i].phys_addr,
 807                                             GFP_KERNEL);
 808                if (!p_t2->dma_mem[i].virt_addr)
 809                        return -ENOMEM;
 810
 811                memset(*p_virt, 0, size);
 812                p_t2->dma_mem[i].size = size;
 813                total_size -= size;
 814        }
 815
 816        return 0;
 817}
 818
 819static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
 820{
 821        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 822        u32 conn_num, total_size, ent_per_page, psz, i;
 823        struct phys_mem_desc *p_t2_last_page;
 824        struct qed_ilt_client_cfg *p_src;
 825        struct qed_src_iids src_iids;
 826        struct qed_src_t2 *p_t2;
 827        int rc;
 828
 829        memset(&src_iids, 0, sizeof(src_iids));
 830
 831        /* if the SRC ILT client is inactive - there are no connection
 832         * requiring the searcer, leave.
 833         */
 834        p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
 835        if (!p_src->active)
 836                return 0;
 837
 838        qed_cxt_src_iids(p_mngr, &src_iids);
 839        conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
 840        total_size = conn_num * sizeof(struct src_ent);
 841
 842        /* use the same page size as the SRC ILT client */
 843        psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
 844        p_t2 = &p_mngr->src_t2;
 845        p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
 846
 847        /* allocate t2 */
 848        p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc),
 849                                GFP_KERNEL);
 850        if (!p_t2->dma_mem) {
 851                DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
 852                rc = -ENOMEM;
 853                goto t2_fail;
 854        }
 855
 856        rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
 857        if (rc)
 858                goto t2_fail;
 859
 860        /* Set the t2 pointers */
 861
 862        /* entries per page - must be a power of two */
 863        ent_per_page = psz / sizeof(struct src_ent);
 864
 865        p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
 866
 867        p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
 868        p_t2->last_free = (u64)p_t2_last_page->phys_addr +
 869            ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
 870
 871        for (i = 0; i < p_t2->num_pages; i++) {
 872                u32 ent_num = min_t(u32,
 873                                    ent_per_page,
 874                                    conn_num);
 875                struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
 876                u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
 877                u32 j;
 878
 879                for (j = 0; j < ent_num - 1; j++) {
 880                        val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
 881                        entries[j].next = cpu_to_be64(val);
 882                }
 883
 884                if (i < p_t2->num_pages - 1)
 885                        val = (u64)p_t2->dma_mem[i + 1].phys_addr;
 886                else
 887                        val = 0;
 888                entries[j].next = cpu_to_be64(val);
 889
 890                conn_num -= ent_num;
 891        }
 892
 893        return 0;
 894
 895t2_fail:
 896        qed_cxt_src_t2_free(p_hwfn);
 897        return rc;
 898}
 899
 900#define for_each_ilt_valid_client(pos, clients) \
 901        for (pos = 0; pos < MAX_ILT_CLIENTS; pos++)     \
 902                if (!clients[pos].active) {     \
 903                        continue;               \
 904                } else                          \
 905
 906/* Total number of ILT lines used by this PF */
 907static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
 908{
 909        u32 size = 0;
 910        u32 i;
 911
 912        for_each_ilt_valid_client(i, ilt_clients)
 913            size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
 914
 915        return size;
 916}
 917
 918static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
 919{
 920        struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
 921        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 922        u32 ilt_size, i;
 923
 924        ilt_size = qed_cxt_ilt_shadow_size(p_cli);
 925
 926        for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
 927                struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
 928
 929                if (p_dma->virt_addr)
 930                        dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 931                                          p_dma->size, p_dma->virt_addr,
 932                                          p_dma->phys_addr);
 933                p_dma->virt_addr = NULL;
 934        }
 935        kfree(p_mngr->ilt_shadow);
 936}
 937
 938static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
 939                             struct qed_ilt_cli_blk *p_blk,
 940                             enum ilt_clients ilt_client,
 941                             u32 start_line_offset)
 942{
 943        struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
 944        u32 lines, line, sz_left, lines_to_skip = 0;
 945
 946        /* Special handling for RoCE that supports dynamic allocation */
 947        if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
 948            ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
 949                return 0;
 950
 951        lines_to_skip = p_blk->dynamic_line_cnt;
 952
 953        if (!p_blk->total_size)
 954                return 0;
 955
 956        sz_left = p_blk->total_size;
 957        lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
 958        line = p_blk->start_line + start_line_offset -
 959            p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
 960
 961        for (; lines; lines--) {
 962                dma_addr_t p_phys;
 963                void *p_virt;
 964                u32 size;
 965
 966                size = min_t(u32, sz_left, p_blk->real_size_in_page);
 967                p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
 968                                            &p_phys, GFP_KERNEL);
 969                if (!p_virt)
 970                        return -ENOMEM;
 971
 972                ilt_shadow[line].phys_addr = p_phys;
 973                ilt_shadow[line].virt_addr = p_virt;
 974                ilt_shadow[line].size = size;
 975
 976                DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 977                           "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
 978                            line, (u64)p_phys, p_virt, size);
 979
 980                sz_left -= size;
 981                line++;
 982        }
 983
 984        return 0;
 985}
 986
 987static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
 988{
 989        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 990        struct qed_ilt_client_cfg *clients = p_mngr->clients;
 991        struct qed_ilt_cli_blk *p_blk;
 992        u32 size, i, j, k;
 993        int rc;
 994
 995        size = qed_cxt_ilt_shadow_size(clients);
 996        p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc),
 997                                     GFP_KERNEL);
 998        if (!p_mngr->ilt_shadow) {
 999                rc = -ENOMEM;
1000                goto ilt_shadow_fail;
1001        }
1002
1003        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1004                   "Allocated 0x%x bytes for ilt shadow\n",
1005                   (u32)(size * sizeof(struct phys_mem_desc)));
1006
1007        for_each_ilt_valid_client(i, clients) {
1008                for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1009                        p_blk = &clients[i].pf_blks[j];
1010                        rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
1011                        if (rc)
1012                                goto ilt_shadow_fail;
1013                }
1014                for (k = 0; k < p_mngr->vf_count; k++) {
1015                        for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1016                                u32 lines = clients[i].vf_total_lines * k;
1017
1018                                p_blk = &clients[i].vf_blks[j];
1019                                rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
1020                                if (rc)
1021                                        goto ilt_shadow_fail;
1022                        }
1023                }
1024        }
1025
1026        return 0;
1027
1028ilt_shadow_fail:
1029        qed_ilt_shadow_free(p_hwfn);
1030        return rc;
1031}
1032
1033static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
1034{
1035        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1036        u32 type, vf;
1037
1038        for (type = 0; type < MAX_CONN_TYPES; type++) {
1039                kfree(p_mngr->acquired[type].cid_map);
1040                p_mngr->acquired[type].max_count = 0;
1041                p_mngr->acquired[type].start_cid = 0;
1042
1043                for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1044                        kfree(p_mngr->acquired_vf[type][vf].cid_map);
1045                        p_mngr->acquired_vf[type][vf].max_count = 0;
1046                        p_mngr->acquired_vf[type][vf].start_cid = 0;
1047                }
1048        }
1049}
1050
1051static int
1052qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
1053                         u32 type,
1054                         u32 cid_start,
1055                         u32 cid_count, struct qed_cid_acquired_map *p_map)
1056{
1057        u32 size;
1058
1059        if (!cid_count)
1060                return 0;
1061
1062        size = DIV_ROUND_UP(cid_count,
1063                            sizeof(unsigned long) * BITS_PER_BYTE) *
1064               sizeof(unsigned long);
1065        p_map->cid_map = kzalloc(size, GFP_KERNEL);
1066        if (!p_map->cid_map)
1067                return -ENOMEM;
1068
1069        p_map->max_count = cid_count;
1070        p_map->start_cid = cid_start;
1071
1072        DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1073                   "Type %08x start: %08x count %08x\n",
1074                   type, p_map->start_cid, p_map->max_count);
1075
1076        return 0;
1077}
1078
1079static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1080{
1081        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1082        u32 start_cid = 0, vf_start_cid = 0;
1083        u32 type, vf;
1084
1085        for (type = 0; type < MAX_CONN_TYPES; type++) {
1086                struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
1087                struct qed_cid_acquired_map *p_map;
1088
1089                /* Handle PF maps */
1090                p_map = &p_mngr->acquired[type];
1091                if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
1092                                             p_cfg->cid_count, p_map))
1093                        goto cid_map_fail;
1094
1095                /* Handle VF maps */
1096                for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1097                        p_map = &p_mngr->acquired_vf[type][vf];
1098                        if (qed_cid_map_alloc_single(p_hwfn, type,
1099                                                     vf_start_cid,
1100                                                     p_cfg->cids_per_vf, p_map))
1101                                goto cid_map_fail;
1102                }
1103
1104                start_cid += p_cfg->cid_count;
1105                vf_start_cid += p_cfg->cids_per_vf;
1106        }
1107
1108        return 0;
1109
1110cid_map_fail:
1111        qed_cid_map_free(p_hwfn);
1112        return -ENOMEM;
1113}
1114
1115int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1116{
1117        struct qed_ilt_client_cfg *clients;
1118        struct qed_cxt_mngr *p_mngr;
1119        u32 i;
1120
1121        p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
1122        if (!p_mngr)
1123                return -ENOMEM;
1124
1125        /* Initialize ILT client registers */
1126        clients = p_mngr->clients;
1127        clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1128        clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1129        clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
1130
1131        clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1132        clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1133        clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
1134
1135        clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1136        clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1137        clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1138
1139        clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1140        clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1141        clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1142
1143        clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1144        clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1145        clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1146
1147        clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1148        clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1149        clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
1150        /* default ILT page size for all clients is 64K */
1151        for (i = 0; i < MAX_ILT_CLIENTS; i++)
1152                p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1153
1154        p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
1155
1156        /* Initialize task sizes */
1157        p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1158        p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1159
1160        if (p_hwfn->cdev->p_iov_info) {
1161                p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
1162                p_mngr->first_vf_in_pf =
1163                        p_hwfn->cdev->p_iov_info->first_vf_in_pf;
1164        }
1165        /* Initialize the dynamic ILT allocation mutex */
1166        mutex_init(&p_mngr->mutex);
1167
1168        /* Set the cxt mangr pointer priori to further allocations */
1169        p_hwfn->p_cxt_mngr = p_mngr;
1170
1171        return 0;
1172}
1173
1174int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1175{
1176        int rc;
1177
1178        /* Allocate the ILT shadow table */
1179        rc = qed_ilt_shadow_alloc(p_hwfn);
1180        if (rc)
1181                goto tables_alloc_fail;
1182
1183        /* Allocate the T2  table */
1184        rc = qed_cxt_src_t2_alloc(p_hwfn);
1185        if (rc)
1186                goto tables_alloc_fail;
1187
1188        /* Allocate and initialize the acquired cids bitmaps */
1189        rc = qed_cid_map_alloc(p_hwfn);
1190        if (rc)
1191                goto tables_alloc_fail;
1192
1193        return 0;
1194
1195tables_alloc_fail:
1196        qed_cxt_mngr_free(p_hwfn);
1197        return rc;
1198}
1199
1200void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1201{
1202        if (!p_hwfn->p_cxt_mngr)
1203                return;
1204
1205        qed_cid_map_free(p_hwfn);
1206        qed_cxt_src_t2_free(p_hwfn);
1207        qed_ilt_shadow_free(p_hwfn);
1208        kfree(p_hwfn->p_cxt_mngr);
1209
1210        p_hwfn->p_cxt_mngr = NULL;
1211}
1212
1213void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1214{
1215        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1216        struct qed_cid_acquired_map *p_map;
1217        struct qed_conn_type_cfg *p_cfg;
1218        int type;
1219        u32 len;
1220
1221        /* Reset acquired cids */
1222        for (type = 0; type < MAX_CONN_TYPES; type++) {
1223                u32 vf;
1224
1225                p_cfg = &p_mngr->conn_cfg[type];
1226                if (p_cfg->cid_count) {
1227                        p_map = &p_mngr->acquired[type];
1228                        len = DIV_ROUND_UP(p_map->max_count,
1229                                           sizeof(unsigned long) *
1230                                           BITS_PER_BYTE) *
1231                              sizeof(unsigned long);
1232                        memset(p_map->cid_map, 0, len);
1233                }
1234
1235                if (!p_cfg->cids_per_vf)
1236                        continue;
1237
1238                for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1239                        p_map = &p_mngr->acquired_vf[type][vf];
1240                        len = DIV_ROUND_UP(p_map->max_count,
1241                                           sizeof(unsigned long) *
1242                                           BITS_PER_BYTE) *
1243                              sizeof(unsigned long);
1244                        memset(p_map->cid_map, 0, len);
1245                }
1246        }
1247}
1248
1249/* CDU Common */
1250#define CDUC_CXT_SIZE_SHIFT \
1251        CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1252
1253#define CDUC_CXT_SIZE_MASK \
1254        (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1255
1256#define CDUC_BLOCK_WASTE_SHIFT \
1257        CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1258
1259#define CDUC_BLOCK_WASTE_MASK \
1260        (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1261
1262#define CDUC_NCIB_SHIFT \
1263        CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1264
1265#define CDUC_NCIB_MASK \
1266        (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1267
1268#define CDUT_TYPE0_CXT_SIZE_SHIFT \
1269        CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1270
1271#define CDUT_TYPE0_CXT_SIZE_MASK                \
1272        (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1273         CDUT_TYPE0_CXT_SIZE_SHIFT)
1274
1275#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1276        CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1277
1278#define CDUT_TYPE0_BLOCK_WASTE_MASK                    \
1279        (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1280         CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1281
1282#define CDUT_TYPE0_NCIB_SHIFT \
1283        CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1284
1285#define CDUT_TYPE0_NCIB_MASK                             \
1286        (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1287         CDUT_TYPE0_NCIB_SHIFT)
1288
1289#define CDUT_TYPE1_CXT_SIZE_SHIFT \
1290        CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1291
1292#define CDUT_TYPE1_CXT_SIZE_MASK                \
1293        (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1294         CDUT_TYPE1_CXT_SIZE_SHIFT)
1295
1296#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1297        CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1298
1299#define CDUT_TYPE1_BLOCK_WASTE_MASK                    \
1300        (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1301         CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1302
1303#define CDUT_TYPE1_NCIB_SHIFT \
1304        CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1305
1306#define CDUT_TYPE1_NCIB_MASK                             \
1307        (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1308         CDUT_TYPE1_NCIB_SHIFT)
1309
1310static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1311{
1312        u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1313
1314        /* CDUC - connection configuration */
1315        page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1316        cxt_size = CONN_CXT_SIZE(p_hwfn);
1317        elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1318        block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1319
1320        SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1321        SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1322        SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1323        STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1324
1325        /* CDUT - type-0 tasks configuration */
1326        page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1327        cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1328        elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1329        block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1330
1331        /* cxt size and block-waste are multipes of 8 */
1332        cdu_params = 0;
1333        SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1334        SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1335        SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1336        STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1337
1338        /* CDUT - type-1 tasks configuration */
1339        cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1340        elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1341        block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1342
1343        /* cxt size and block-waste are multipes of 8 */
1344        cdu_params = 0;
1345        SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1346        SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1347        SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1348        STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1349}
1350
1351/* CDU PF */
1352#define CDU_SEG_REG_TYPE_SHIFT          CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1353#define CDU_SEG_REG_TYPE_MASK           0x1
1354#define CDU_SEG_REG_OFFSET_SHIFT        0
1355#define CDU_SEG_REG_OFFSET_MASK         CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1356
1357static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1358{
1359        struct qed_ilt_client_cfg *p_cli;
1360        struct qed_tid_seg *p_seg;
1361        u32 cdu_seg_params, offset;
1362        int i;
1363
1364        static const u32 rt_type_offset_arr[] = {
1365                CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1366                CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1367                CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1368                CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1369        };
1370
1371        static const u32 rt_type_offset_fl_arr[] = {
1372                CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1373                CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1374                CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1375                CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1376        };
1377
1378        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1379
1380        /* There are initializations only for CDUT during pf Phase */
1381        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1382                /* Segment 0 */
1383                p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1384                if (!p_seg)
1385                        continue;
1386
1387                /* Note: start_line is already adjusted for the CDU
1388                 * segment register granularity, so we just need to
1389                 * divide. Adjustment is implicit as we assume ILT
1390                 * Page size is larger than 32K!
1391                 */
1392                offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1393                          (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1394                           p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1395
1396                cdu_seg_params = 0;
1397                SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1398                SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1399                STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1400
1401                offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1402                          (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1403                           p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1404
1405                cdu_seg_params = 0;
1406                SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1407                SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1408                STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1409        }
1410}
1411
1412void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
1413                    struct qed_ptt *p_ptt, bool is_pf_loading)
1414{
1415        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1416        struct qed_qm_pf_rt_init_params params;
1417        struct qed_qm_iids iids;
1418
1419        memset(&iids, 0, sizeof(iids));
1420        qed_cxt_qm_iids(p_hwfn, &iids);
1421
1422        memset(&params, 0, sizeof(params));
1423        params.port_id = p_hwfn->port_id;
1424        params.pf_id = p_hwfn->rel_pf_id;
1425        params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1426        params.is_pf_loading = is_pf_loading;
1427        params.num_pf_cids = iids.cids;
1428        params.num_vf_cids = iids.vf_cids;
1429        params.num_tids = iids.tids;
1430        params.start_pq = qm_info->start_pq;
1431        params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1432        params.num_vf_pqs = qm_info->num_vf_pqs;
1433        params.start_vport = qm_info->start_vport;
1434        params.num_vports = qm_info->num_vports;
1435        params.pf_wfq = qm_info->pf_wfq;
1436        params.pf_rl = qm_info->pf_rl;
1437        params.pq_params = qm_info->qm_pq_params;
1438        params.vport_params = qm_info->qm_vport_params;
1439
1440        qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
1441}
1442
1443/* CM PF */
1444static void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
1445{
1446        /* XCM pure-LB queue */
1447        STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1448                     qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
1449}
1450
1451/* DQ PF */
1452static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1453{
1454        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1455        u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
1456
1457        dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1458        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1459
1460        dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1461        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1462
1463        dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1464        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1465
1466        dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1467        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1468
1469        dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1470        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1471
1472        dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1473        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1474
1475        dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1476        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1477
1478        dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1479        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1480
1481        dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1482        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1483
1484        dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1485        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1486
1487        dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1488        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
1489
1490        dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1491        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1492
1493        /* Connection types 6 & 7 are not in use, yet they must be configured
1494         * as the highest possible connection. Not configuring them means the
1495         * defaults will be  used, and with a large number of cids a bug may
1496         * occur, if the defaults will be smaller than dq_pf_max_cid /
1497         * dq_vf_max_cid.
1498         */
1499        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1500        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1501
1502        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1503        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
1504}
1505
1506static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1507{
1508        struct qed_ilt_client_cfg *ilt_clients;
1509        int i;
1510
1511        ilt_clients = p_hwfn->p_cxt_mngr->clients;
1512        for_each_ilt_valid_client(i, ilt_clients) {
1513                STORE_RT_REG(p_hwfn,
1514                             ilt_clients[i].first.reg,
1515                             ilt_clients[i].first.val);
1516                STORE_RT_REG(p_hwfn,
1517                             ilt_clients[i].last.reg, ilt_clients[i].last.val);
1518                STORE_RT_REG(p_hwfn,
1519                             ilt_clients[i].p_size.reg,
1520                             ilt_clients[i].p_size.val);
1521        }
1522}
1523
1524static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1525{
1526        struct qed_ilt_client_cfg *p_cli;
1527        u32 blk_factor;
1528
1529        /* For simplicty  we set the 'block' to be an ILT page */
1530        if (p_hwfn->cdev->p_iov_info) {
1531                struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1532
1533                STORE_RT_REG(p_hwfn,
1534                             PSWRQ2_REG_VF_BASE_RT_OFFSET,
1535                             p_iov->first_vf_in_pf);
1536                STORE_RT_REG(p_hwfn,
1537                             PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1538                             p_iov->first_vf_in_pf + p_iov->total_vfs);
1539        }
1540
1541        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1542        blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1543        if (p_cli->active) {
1544                STORE_RT_REG(p_hwfn,
1545                             PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1546                             blk_factor);
1547                STORE_RT_REG(p_hwfn,
1548                             PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1549                             p_cli->pf_total_lines);
1550                STORE_RT_REG(p_hwfn,
1551                             PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1552                             p_cli->vf_total_lines);
1553        }
1554
1555        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1556        blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1557        if (p_cli->active) {
1558                STORE_RT_REG(p_hwfn,
1559                             PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1560                             blk_factor);
1561                STORE_RT_REG(p_hwfn,
1562                             PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1563                             p_cli->pf_total_lines);
1564                STORE_RT_REG(p_hwfn,
1565                             PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1566                             p_cli->vf_total_lines);
1567        }
1568
1569        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1570        blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1571        if (p_cli->active) {
1572                STORE_RT_REG(p_hwfn,
1573                             PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1574                STORE_RT_REG(p_hwfn,
1575                             PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1576                             p_cli->pf_total_lines);
1577                STORE_RT_REG(p_hwfn,
1578                             PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1579                             p_cli->vf_total_lines);
1580        }
1581}
1582
1583/* ILT (PSWRQ2) PF */
1584static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1585{
1586        struct qed_ilt_client_cfg *clients;
1587        struct qed_cxt_mngr *p_mngr;
1588        struct phys_mem_desc *p_shdw;
1589        u32 line, rt_offst, i;
1590
1591        qed_ilt_bounds_init(p_hwfn);
1592        qed_ilt_vf_bounds_init(p_hwfn);
1593
1594        p_mngr = p_hwfn->p_cxt_mngr;
1595        p_shdw = p_mngr->ilt_shadow;
1596        clients = p_hwfn->p_cxt_mngr->clients;
1597
1598        for_each_ilt_valid_client(i, clients) {
1599                /** Client's 1st val and RT array are absolute, ILT shadows'
1600                 *  lines are relative.
1601                 */
1602                line = clients[i].first.val - p_mngr->pf_start_line;
1603                rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1604                           clients[i].first.val * ILT_ENTRY_IN_REGS;
1605
1606                for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1607                     line++, rt_offst += ILT_ENTRY_IN_REGS) {
1608                        u64 ilt_hw_entry = 0;
1609
1610                        /** p_virt could be NULL incase of dynamic
1611                         *  allocation
1612                         */
1613                        if (p_shdw[line].virt_addr) {
1614                                SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1615                                SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1616                                          (p_shdw[line].phys_addr >> 12));
1617
1618                                DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1619                                           "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1620                                           rt_offst, line, i,
1621                                           (u64)(p_shdw[line].phys_addr >> 12));
1622                        }
1623
1624                        STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1625                }
1626        }
1627}
1628
1629/* SRC (Searcher) PF */
1630static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1631{
1632        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1633        u32 rounded_conn_num, conn_num, conn_max;
1634        struct qed_src_iids src_iids;
1635
1636        memset(&src_iids, 0, sizeof(src_iids));
1637        qed_cxt_src_iids(p_mngr, &src_iids);
1638        conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1639        if (!conn_num)
1640                return;
1641
1642        conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1643        rounded_conn_num = roundup_pow_of_two(conn_max);
1644
1645        STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1646        STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1647                     ilog2(rounded_conn_num));
1648
1649        STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1650                         p_hwfn->p_cxt_mngr->src_t2.first_free);
1651        STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1652                         p_hwfn->p_cxt_mngr->src_t2.last_free);
1653}
1654
1655/* Timers PF */
1656#define TM_CFG_NUM_IDS_SHIFT            0
1657#define TM_CFG_NUM_IDS_MASK             0xFFFFULL
1658#define TM_CFG_PRE_SCAN_OFFSET_SHIFT    16
1659#define TM_CFG_PRE_SCAN_OFFSET_MASK     0x1FFULL
1660#define TM_CFG_PARENT_PF_SHIFT          25
1661#define TM_CFG_PARENT_PF_MASK           0x7ULL
1662
1663#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT  30
1664#define TM_CFG_CID_PRE_SCAN_ROWS_MASK   0x1FFULL
1665
1666#define TM_CFG_TID_OFFSET_SHIFT         30
1667#define TM_CFG_TID_OFFSET_MASK          0x7FFFFULL
1668#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT  49
1669#define TM_CFG_TID_PRE_SCAN_ROWS_MASK   0x1FFULL
1670
1671static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1672{
1673        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1674        u32 active_seg_mask = 0, tm_offset, rt_reg;
1675        struct qed_tm_iids tm_iids;
1676        u64 cfg_word;
1677        u8 i;
1678
1679        memset(&tm_iids, 0, sizeof(tm_iids));
1680        qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
1681
1682        /* @@@TBD No pre-scan for now */
1683
1684        /* Note: We assume consecutive VFs for a PF */
1685        for (i = 0; i < p_mngr->vf_count; i++) {
1686                cfg_word = 0;
1687                SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1688                SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1689                SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1690                SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1691                rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1692                    (sizeof(cfg_word) / sizeof(u32)) *
1693                    (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1694                STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1695        }
1696
1697        cfg_word = 0;
1698        SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1699        SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1700        SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);       /* n/a for PF */
1701        SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);       /* scan all   */
1702
1703        rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1704            (sizeof(cfg_word) / sizeof(u32)) *
1705            (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1706        STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1707
1708        /* enale scan */
1709        STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1710                     tm_iids.pf_cids ? 0x1 : 0x0);
1711
1712        /* @@@TBD how to enable the scan for the VFs */
1713
1714        tm_offset = tm_iids.per_vf_cids;
1715
1716        /* Note: We assume consecutive VFs for a PF */
1717        for (i = 0; i < p_mngr->vf_count; i++) {
1718                cfg_word = 0;
1719                SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1720                SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1721                SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1722                SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1723                SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1724
1725                rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1726                    (sizeof(cfg_word) / sizeof(u32)) *
1727                    (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1728
1729                STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1730        }
1731
1732        tm_offset = tm_iids.pf_cids;
1733        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1734                cfg_word = 0;
1735                SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1736                SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1737                SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1738                SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1739                SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1740
1741                rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1742                    (sizeof(cfg_word) / sizeof(u32)) *
1743                    (NUM_OF_VFS(p_hwfn->cdev) +
1744                     p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1745
1746                STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1747                active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
1748
1749                tm_offset += tm_iids.pf_tids[i];
1750        }
1751
1752        if (QED_IS_RDMA_PERSONALITY(p_hwfn))
1753                active_seg_mask = 0;
1754
1755        STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1756
1757        /* @@@TBD how to enable the scan for the VFs */
1758}
1759
1760static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
1761{
1762        if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
1763            p_hwfn->pf_params.fcoe_pf_params.is_target)
1764                STORE_RT_REG(p_hwfn,
1765                             PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1766}
1767
1768static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
1769{
1770        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1771        struct qed_conn_type_cfg *p_fcoe;
1772        struct qed_tid_seg *p_tid;
1773
1774        p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1775
1776        /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1777        if (!p_fcoe->cid_count)
1778                return;
1779
1780        p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
1781        if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1782                STORE_RT_REG_AGG(p_hwfn,
1783                                 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1784                                 p_tid->count);
1785        } else {
1786                STORE_RT_REG_AGG(p_hwfn,
1787                                 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1788                                 p_tid->count);
1789        }
1790}
1791
1792void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1793{
1794        qed_cdu_init_common(p_hwfn);
1795        qed_prs_init_common(p_hwfn);
1796}
1797
1798void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1799{
1800        qed_qm_init_pf(p_hwfn, p_ptt, true);
1801        qed_cm_init_pf(p_hwfn);
1802        qed_dq_init_pf(p_hwfn);
1803        qed_cdu_init_pf(p_hwfn);
1804        qed_ilt_init_pf(p_hwfn);
1805        qed_src_init_pf(p_hwfn);
1806        qed_tm_init_pf(p_hwfn);
1807        qed_prs_init_pf(p_hwfn);
1808}
1809
1810int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1811                         enum protocol_type type, u32 *p_cid, u8 vfid)
1812{
1813        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1814        struct qed_cid_acquired_map *p_map;
1815        u32 rel_cid;
1816
1817        if (type >= MAX_CONN_TYPES) {
1818                DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1819                return -EINVAL;
1820        }
1821
1822        if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
1823                DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
1824                return -EINVAL;
1825        }
1826
1827        /* Determine the right map to take this CID from */
1828        if (vfid == QED_CXT_PF_CID)
1829                p_map = &p_mngr->acquired[type];
1830        else
1831                p_map = &p_mngr->acquired_vf[type][vfid];
1832
1833        if (!p_map->cid_map) {
1834                DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1835                return -EINVAL;
1836        }
1837
1838        rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
1839
1840        if (rel_cid >= p_map->max_count) {
1841                DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
1842                return -EINVAL;
1843        }
1844
1845        __set_bit(rel_cid, p_map->cid_map);
1846
1847        *p_cid = rel_cid + p_map->start_cid;
1848
1849        DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1850                   "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
1851                   *p_cid, rel_cid, vfid, type);
1852
1853        return 0;
1854}
1855
1856int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1857                        enum protocol_type type, u32 *p_cid)
1858{
1859        return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
1860}
1861
1862static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
1863                                      u32 cid,
1864                                      u8 vfid,
1865                                      enum protocol_type *p_type,
1866                                      struct qed_cid_acquired_map **pp_map)
1867{
1868        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1869        u32 rel_cid;
1870
1871        /* Iterate over protocols and find matching cid range */
1872        for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
1873                if (vfid == QED_CXT_PF_CID)
1874                        *pp_map = &p_mngr->acquired[*p_type];
1875                else
1876                        *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
1877
1878                if (!((*pp_map)->cid_map))
1879                        continue;
1880                if (cid >= (*pp_map)->start_cid &&
1881                    cid < (*pp_map)->start_cid + (*pp_map)->max_count)
1882                        break;
1883        }
1884
1885        if (*p_type == MAX_CONN_TYPES) {
1886                DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
1887                goto fail;
1888        }
1889
1890        rel_cid = cid - (*pp_map)->start_cid;
1891        if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
1892                DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
1893                          cid, vfid);
1894                goto fail;
1895        }
1896
1897        return true;
1898fail:
1899        *p_type = MAX_CONN_TYPES;
1900        *pp_map = NULL;
1901        return false;
1902}
1903
1904void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
1905{
1906        struct qed_cid_acquired_map *p_map = NULL;
1907        enum protocol_type type;
1908        bool b_acquired;
1909        u32 rel_cid;
1910
1911        if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
1912                DP_NOTICE(p_hwfn,
1913                          "Trying to return incorrect CID belonging to VF %02x\n",
1914                          vfid);
1915                return;
1916        }
1917
1918        /* Test acquired and find matching per-protocol map */
1919        b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
1920                                               &type, &p_map);
1921
1922        if (!b_acquired)
1923                return;
1924
1925        rel_cid = cid - p_map->start_cid;
1926        clear_bit(rel_cid, p_map->cid_map);
1927
1928        DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1929                   "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
1930                   cid, rel_cid, vfid, type);
1931}
1932
1933void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
1934{
1935        _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
1936}
1937
1938int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
1939{
1940        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1941        struct qed_cid_acquired_map *p_map = NULL;
1942        u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1943        enum protocol_type type;
1944        bool b_acquired;
1945
1946        /* Test acquired and find matching per-protocol map */
1947        b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
1948                                               QED_CXT_PF_CID, &type, &p_map);
1949
1950        if (!b_acquired)
1951                return -EINVAL;
1952
1953        /* set the protocl type */
1954        p_info->type = type;
1955
1956        /* compute context virtual pointer */
1957        hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1958
1959        conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1960        cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1961        line = p_info->iid / cxts_per_p;
1962
1963        /* Make sure context is allocated (dynamic allocation) */
1964        if (!p_mngr->ilt_shadow[line].virt_addr)
1965                return -EINVAL;
1966
1967        p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr +
1968                        p_info->iid % cxts_per_p * conn_cxt_size;
1969
1970        DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
1971                   "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1972                   p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
1973
1974        return 0;
1975}
1976
1977static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
1978                                   struct qed_rdma_pf_params *p_params,
1979                                   u32 num_tasks)
1980{
1981        u32 num_cons, num_qps;
1982        enum protocol_type proto;
1983
1984        if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
1985                DP_VERBOSE(p_hwfn, QED_MSG_SP,
1986                           "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
1987                p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
1988        }
1989
1990        switch (p_hwfn->hw_info.personality) {
1991        case QED_PCI_ETH_IWARP:
1992                /* Each QP requires one connection */
1993                num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
1994                proto = PROTOCOLID_IWARP;
1995                break;
1996        case QED_PCI_ETH_ROCE:
1997                num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
1998                num_cons = num_qps * 2; /* each QP requires two connections */
1999                proto = PROTOCOLID_ROCE;
2000                break;
2001        default:
2002                return;
2003        }
2004
2005        if (num_cons && num_tasks) {
2006                u32 num_srqs, num_xrc_srqs;
2007
2008                qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
2009
2010                /* Deliberatly passing ROCE for tasks id. This is because
2011                 * iWARP / RoCE share the task id.
2012                 */
2013                qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
2014                                            QED_CXT_ROCE_TID_SEG, 1,
2015                                            num_tasks, false);
2016
2017                num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
2018
2019                /* XRC SRQs populate a single ILT page */
2020                num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn);
2021
2022                qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
2023        } else {
2024                DP_INFO(p_hwfn->cdev,
2025                        "RDMA personality used without setting params!\n");
2026        }
2027}
2028
2029int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
2030{
2031        /* Set the number of required CORE connections */
2032        u32 core_cids = 1; /* SPQ */
2033
2034        if (p_hwfn->using_ll2)
2035                core_cids += 4;
2036        qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
2037
2038        switch (p_hwfn->hw_info.personality) {
2039        case QED_PCI_ETH_RDMA:
2040        case QED_PCI_ETH_IWARP:
2041        case QED_PCI_ETH_ROCE:
2042        {
2043                        qed_rdma_set_pf_params(p_hwfn,
2044                                               &p_hwfn->
2045                                               pf_params.rdma_pf_params,
2046                                               rdma_tasks);
2047                /* no need for break since RoCE coexist with Ethernet */
2048        }
2049                fallthrough;
2050        case QED_PCI_ETH:
2051        {
2052                struct qed_eth_pf_params *p_params =
2053                    &p_hwfn->pf_params.eth_pf_params;
2054
2055                if (!p_params->num_vf_cons)
2056                        p_params->num_vf_cons =
2057                            ETH_PF_PARAMS_VF_CONS_DEFAULT;
2058                qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
2059                                            p_params->num_cons,
2060                                            p_params->num_vf_cons);
2061                p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
2062                break;
2063        }
2064        case QED_PCI_FCOE:
2065        {
2066                struct qed_fcoe_pf_params *p_params;
2067
2068                p_params = &p_hwfn->pf_params.fcoe_pf_params;
2069
2070                if (p_params->num_cons && p_params->num_tasks) {
2071                        qed_cxt_set_proto_cid_count(p_hwfn,
2072                                                    PROTOCOLID_FCOE,
2073                                                    p_params->num_cons,
2074                                                    0);
2075                        qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
2076                                                    QED_CXT_FCOE_TID_SEG, 0,
2077                                                    p_params->num_tasks, true);
2078                } else {
2079                        DP_INFO(p_hwfn->cdev,
2080                                "Fcoe personality used without setting params!\n");
2081                }
2082                break;
2083        }
2084        case QED_PCI_ISCSI:
2085        {
2086                struct qed_iscsi_pf_params *p_params;
2087
2088                p_params = &p_hwfn->pf_params.iscsi_pf_params;
2089
2090                if (p_params->num_cons && p_params->num_tasks) {
2091                        qed_cxt_set_proto_cid_count(p_hwfn,
2092                                                    PROTOCOLID_TCP_ULP,
2093                                                    p_params->num_cons,
2094                                                    0);
2095                        qed_cxt_set_proto_tid_count(p_hwfn,
2096                                                    PROTOCOLID_TCP_ULP,
2097                                                    QED_CXT_TCP_ULP_TID_SEG,
2098                                                    0,
2099                                                    p_params->num_tasks,
2100                                                    true);
2101                } else {
2102                        DP_INFO(p_hwfn->cdev,
2103                                "Iscsi personality used without setting params!\n");
2104                }
2105                break;
2106        }
2107        case QED_PCI_NVMETCP:
2108        {
2109                struct qed_nvmetcp_pf_params *p_params;
2110
2111                p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
2112
2113                if (p_params->num_cons && p_params->num_tasks) {
2114                        qed_cxt_set_proto_cid_count(p_hwfn,
2115                                                    PROTOCOLID_TCP_ULP,
2116                                                    p_params->num_cons,
2117                                                    0);
2118                        qed_cxt_set_proto_tid_count(p_hwfn,
2119                                                    PROTOCOLID_TCP_ULP,
2120                                                    QED_CXT_TCP_ULP_TID_SEG,
2121                                                    0,
2122                                                    p_params->num_tasks,
2123                                                    true);
2124                } else {
2125                        DP_INFO(p_hwfn->cdev,
2126                                "NvmeTCP personality used without setting params!\n");
2127                }
2128                break;
2129        }
2130        default:
2131                return -EINVAL;
2132        }
2133
2134        return 0;
2135}
2136
2137int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
2138                             struct qed_tid_mem *p_info)
2139{
2140        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2141        u32 proto, seg, total_lines, i, shadow_line;
2142        struct qed_ilt_client_cfg *p_cli;
2143        struct qed_ilt_cli_blk *p_fl_seg;
2144        struct qed_tid_seg *p_seg_info;
2145
2146        /* Verify the personality */
2147        switch (p_hwfn->hw_info.personality) {
2148        case QED_PCI_FCOE:
2149                proto = PROTOCOLID_FCOE;
2150                seg = QED_CXT_FCOE_TID_SEG;
2151                break;
2152        case QED_PCI_ISCSI:
2153        case QED_PCI_NVMETCP:
2154                proto = PROTOCOLID_TCP_ULP;
2155                seg = QED_CXT_TCP_ULP_TID_SEG;
2156                break;
2157        default:
2158                return -EINVAL;
2159        }
2160
2161        p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2162        if (!p_cli->active)
2163                return -EINVAL;
2164
2165        p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2166        if (!p_seg_info->has_fl_mem)
2167                return -EINVAL;
2168
2169        p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2170        total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2171                                   p_fl_seg->real_size_in_page);
2172
2173        for (i = 0; i < total_lines; i++) {
2174                shadow_line = i + p_fl_seg->start_line -
2175                    p_hwfn->p_cxt_mngr->pf_start_line;
2176                p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
2177        }
2178        p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2179            p_fl_seg->real_size_in_page;
2180        p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2181        p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2182            p_info->tid_size;
2183
2184        return 0;
2185}
2186
2187/* This function is very RoCE oriented, if another protocol in the future
2188 * will want this feature we'll need to modify the function to be more generic
2189 */
2190int
2191qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2192                          enum qed_cxt_elem_type elem_type, u32 iid)
2193{
2194        u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2195        struct tdif_task_context *tdif_context;
2196        struct qed_ilt_client_cfg *p_cli;
2197        struct qed_ilt_cli_blk *p_blk;
2198        struct qed_ptt *p_ptt;
2199        dma_addr_t p_phys;
2200        u64 ilt_hw_entry;
2201        void *p_virt;
2202        u32 flags1;
2203        int rc = 0;
2204
2205        switch (elem_type) {
2206        case QED_ELEM_CXT:
2207                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2208                elem_size = CONN_CXT_SIZE(p_hwfn);
2209                p_blk = &p_cli->pf_blks[CDUC_BLK];
2210                break;
2211        case QED_ELEM_SRQ:
2212                /* The first ILT page is not used for regular SRQs. Skip it. */
2213                iid += p_hwfn->p_cxt_mngr->xrc_srq_count;
2214                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2215                elem_size = SRQ_CXT_SIZE;
2216                p_blk = &p_cli->pf_blks[SRQ_BLK];
2217                break;
2218        case QED_ELEM_XRC_SRQ:
2219                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2220                elem_size = XRC_SRQ_CXT_SIZE;
2221                p_blk = &p_cli->pf_blks[SRQ_BLK];
2222                break;
2223        case QED_ELEM_TASK:
2224                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2225                elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2226                p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2227                break;
2228        default:
2229                DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2230                return -EINVAL;
2231        }
2232
2233        /* Calculate line in ilt */
2234        hw_p_size = p_cli->p_size.val;
2235        elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2236        line = p_blk->start_line + (iid / elems_per_p);
2237        shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2238
2239        /* If line is already allocated, do nothing, otherwise allocate it and
2240         * write it to the PSWRQ2 registers.
2241         * This section can be run in parallel from different contexts and thus
2242         * a mutex protection is needed.
2243         */
2244
2245        mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2246
2247        if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
2248                goto out0;
2249
2250        p_ptt = qed_ptt_acquire(p_hwfn);
2251        if (!p_ptt) {
2252                DP_NOTICE(p_hwfn,
2253                          "QED_TIME_OUT on ptt acquire - dynamic allocation");
2254                rc = -EBUSY;
2255                goto out0;
2256        }
2257
2258        p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2259                                    p_blk->real_size_in_page, &p_phys,
2260                                    GFP_KERNEL);
2261        if (!p_virt) {
2262                rc = -ENOMEM;
2263                goto out1;
2264        }
2265
2266        /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2267         * to compensate for a HW bug, but it is configured even if DIF is not
2268         * enabled. This is harmless and allows us to avoid a dedicated API. We
2269         * configure the field for all of the contexts on the newly allocated
2270         * page.
2271         */
2272        if (elem_type == QED_ELEM_TASK) {
2273                u32 elem_i;
2274                u8 *elem_start = (u8 *)p_virt;
2275                union type1_task_context *elem;
2276
2277                for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2278                        elem = (union type1_task_context *)elem_start;
2279                        tdif_context = &elem->roce_ctx.tdif_context;
2280
2281                        flags1 = le32_to_cpu(tdif_context->flags1);
2282                        SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
2283                        tdif_context->flags1 = cpu_to_le32(flags1);
2284
2285                        elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2286                }
2287        }
2288
2289        p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
2290        p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
2291        p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2292            p_blk->real_size_in_page;
2293
2294        /* compute absolute offset */
2295        reg_offset = PSWRQ2_REG_ILT_MEMORY +
2296            (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2297
2298        ilt_hw_entry = 0;
2299        SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2300        SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
2301                  (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr
2302                   >> 12));
2303
2304        /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2305        qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2306                          reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
2307                          NULL);
2308
2309        if (elem_type == QED_ELEM_CXT) {
2310                u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2311                    elems_per_p;
2312
2313                /* Update the relevant register in the parser */
2314                qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2315                       last_cid_allocated - 1);
2316
2317                if (!p_hwfn->b_rdma_enabled_in_prs) {
2318                        /* Enable RDMA search */
2319                        qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2320                        p_hwfn->b_rdma_enabled_in_prs = true;
2321                }
2322        }
2323
2324out1:
2325        qed_ptt_release(p_hwfn, p_ptt);
2326out0:
2327        mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2328
2329        return rc;
2330}
2331
2332/* This function is very RoCE oriented, if another protocol in the future
2333 * will want this feature we'll need to modify the function to be more generic
2334 */
2335static int
2336qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2337                       enum qed_cxt_elem_type elem_type,
2338                       u32 start_iid, u32 count)
2339{
2340        u32 start_line, end_line, shadow_start_line, shadow_end_line;
2341        u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2342        struct qed_ilt_client_cfg *p_cli;
2343        struct qed_ilt_cli_blk *p_blk;
2344        u32 end_iid = start_iid + count;
2345        struct qed_ptt *p_ptt;
2346        u64 ilt_hw_entry = 0;
2347        u32 i;
2348
2349        switch (elem_type) {
2350        case QED_ELEM_CXT:
2351                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2352                elem_size = CONN_CXT_SIZE(p_hwfn);
2353                p_blk = &p_cli->pf_blks[CDUC_BLK];
2354                break;
2355        case QED_ELEM_SRQ:
2356                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2357                elem_size = SRQ_CXT_SIZE;
2358                p_blk = &p_cli->pf_blks[SRQ_BLK];
2359                break;
2360        case QED_ELEM_XRC_SRQ:
2361                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2362                elem_size = XRC_SRQ_CXT_SIZE;
2363                p_blk = &p_cli->pf_blks[SRQ_BLK];
2364                break;
2365        case QED_ELEM_TASK:
2366                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2367                elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2368                p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2369                break;
2370        default:
2371                DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2372                return -EINVAL;
2373        }
2374
2375        /* Calculate line in ilt */
2376        hw_p_size = p_cli->p_size.val;
2377        elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2378        start_line = p_blk->start_line + (start_iid / elems_per_p);
2379        end_line = p_blk->start_line + (end_iid / elems_per_p);
2380        if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2381                end_line--;
2382
2383        shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2384        shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2385
2386        p_ptt = qed_ptt_acquire(p_hwfn);
2387        if (!p_ptt) {
2388                DP_NOTICE(p_hwfn,
2389                          "QED_TIME_OUT on ptt acquire - dynamic allocation");
2390                return -EBUSY;
2391        }
2392
2393        for (i = shadow_start_line; i < shadow_end_line; i++) {
2394                if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
2395                        continue;
2396
2397                dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2398                                  p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2399                                  p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
2400                                  p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr);
2401
2402                p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL;
2403                p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
2404                p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2405
2406                /* compute absolute offset */
2407                reg_offset = PSWRQ2_REG_ILT_MEMORY +
2408                    ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2409                     ILT_ENTRY_IN_REGS);
2410
2411                /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2412                 * wide-bus.
2413                 */
2414                qed_dmae_host2grc(p_hwfn, p_ptt,
2415                                  (u64) (uintptr_t) &ilt_hw_entry,
2416                                  reg_offset,
2417                                  sizeof(ilt_hw_entry) / sizeof(u32),
2418                                  NULL);
2419        }
2420
2421        qed_ptt_release(p_hwfn, p_ptt);
2422
2423        return 0;
2424}
2425
2426int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2427{
2428        int rc;
2429        u32 cid;
2430
2431        /* Free Connection CXT */
2432        rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2433                                    qed_cxt_get_proto_cid_start(p_hwfn,
2434                                                                proto),
2435                                    qed_cxt_get_proto_cid_count(p_hwfn,
2436                                                                proto, &cid));
2437
2438        if (rc)
2439                return rc;
2440
2441        /* Free Task CXT ( Intentionally RoCE as task-id is shared between
2442         * RoCE and iWARP )
2443         */
2444        proto = PROTOCOLID_ROCE;
2445        rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2446                                    qed_cxt_get_proto_tid_count(p_hwfn, proto));
2447        if (rc)
2448                return rc;
2449
2450        /* Free TSDM CXT */
2451        rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0,
2452                                    p_hwfn->p_cxt_mngr->xrc_srq_count);
2453
2454        rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ,
2455                                    p_hwfn->p_cxt_mngr->xrc_srq_count,
2456                                    p_hwfn->p_cxt_mngr->srq_count);
2457
2458        return rc;
2459}
2460
2461int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2462                         u32 tid, u8 ctx_type, void **pp_task_ctx)
2463{
2464        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2465        struct qed_ilt_client_cfg *p_cli;
2466        struct qed_tid_seg *p_seg_info;
2467        struct qed_ilt_cli_blk *p_seg;
2468        u32 num_tids_per_block;
2469        u32 tid_size, ilt_idx;
2470        u32 total_lines;
2471        u32 proto, seg;
2472
2473        /* Verify the personality */
2474        switch (p_hwfn->hw_info.personality) {
2475        case QED_PCI_FCOE:
2476                proto = PROTOCOLID_FCOE;
2477                seg = QED_CXT_FCOE_TID_SEG;
2478                break;
2479        case QED_PCI_ISCSI:
2480        case QED_PCI_NVMETCP:
2481                proto = PROTOCOLID_TCP_ULP;
2482                seg = QED_CXT_TCP_ULP_TID_SEG;
2483                break;
2484        default:
2485                return -EINVAL;
2486        }
2487
2488        p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2489        if (!p_cli->active)
2490                return -EINVAL;
2491
2492        p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2493
2494        if (ctx_type == QED_CTX_WORKING_MEM) {
2495                p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2496        } else if (ctx_type == QED_CTX_FL_MEM) {
2497                if (!p_seg_info->has_fl_mem)
2498                        return -EINVAL;
2499                p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2500        } else {
2501                return -EINVAL;
2502        }
2503        total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2504        tid_size = p_mngr->task_type_size[p_seg_info->type];
2505        num_tids_per_block = p_seg->real_size_in_page / tid_size;
2506
2507        if (total_lines < tid / num_tids_per_block)
2508                return -EINVAL;
2509
2510        ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2511                  p_mngr->pf_start_line;
2512        *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
2513                       (tid % num_tids_per_block) * tid_size;
2514
2515        return 0;
2516}
2517
2518static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk)
2519{
2520        if (p_blk->real_size_in_page == 0)
2521                return 0;
2522
2523        return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
2524}
2525
2526u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn)
2527{
2528        struct qed_ilt_client_cfg *p_cli;
2529        struct qed_ilt_cli_blk *p_blk;
2530        u16 i, pages = 0;
2531
2532        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2533        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
2534                p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
2535                pages += qed_blk_calculate_pages(p_blk);
2536        }
2537
2538        return pages;
2539}
2540
2541u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn)
2542{
2543        struct qed_ilt_client_cfg *p_cli;
2544        struct qed_ilt_cli_blk *p_blk;
2545        u16 i, pages = 0;
2546
2547        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2548        for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
2549                p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
2550                pages += qed_blk_calculate_pages(p_blk);
2551        }
2552
2553        return pages;
2554}
2555
2556u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn)
2557{
2558        struct qed_ilt_client_cfg *p_cli;
2559        struct qed_ilt_cli_blk *p_blk;
2560        u16 i, pages = 0;
2561
2562        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2563        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
2564                p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
2565                pages += qed_blk_calculate_pages(p_blk);
2566        }
2567
2568        return pages;
2569}
2570
2571u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn)
2572{
2573        struct qed_ilt_client_cfg *p_cli;
2574        struct qed_ilt_cli_blk *p_blk;
2575        u16 pages = 0, i;
2576
2577        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2578        for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
2579                p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
2580                pages += qed_blk_calculate_pages(p_blk);
2581        }
2582
2583        return pages;
2584}
2585