linux/drivers/net/ethernet/qlogic/qed/qed_cxt.c
<<
>>
Prefs
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015 QLogic Corporation
   3 *
   4 * This software is available under the terms of the GNU General Public License
   5 * (GPL) Version 2, available from the file COPYING in the main directory of
   6 * this source tree.
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/bitops.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/errno.h>
  13#include <linux/kernel.h>
  14#include <linux/list.h>
  15#include <linux/log2.h>
  16#include <linux/pci.h>
  17#include <linux/slab.h>
  18#include <linux/string.h>
  19#include <linux/bitops.h>
  20#include "qed.h"
  21#include "qed_cxt.h"
  22#include "qed_dev_api.h"
  23#include "qed_hsi.h"
  24#include "qed_hw.h"
  25#include "qed_init_ops.h"
  26#include "qed_reg_addr.h"
  27#include "qed_sriov.h"
  28
  29/* Max number of connection types in HW (DQ/CDU etc.) */
  30#define MAX_CONN_TYPES          PROTOCOLID_COMMON
  31#define NUM_TASK_TYPES          2
  32#define NUM_TASK_PF_SEGMENTS    4
  33#define NUM_TASK_VF_SEGMENTS    1
  34
  35/* QM constants */
  36#define QM_PQ_ELEMENT_SIZE      4 /* in bytes */
  37
  38/* Doorbell-Queue constants */
  39#define DQ_RANGE_SHIFT          4
  40#define DQ_RANGE_ALIGN          BIT(DQ_RANGE_SHIFT)
  41
  42/* Searcher constants */
  43#define SRC_MIN_NUM_ELEMS 256
  44
  45/* Timers constants */
  46#define TM_SHIFT        7
  47#define TM_ALIGN        BIT(TM_SHIFT)
  48#define TM_ELEM_SIZE    4
  49
  50/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
  51#define ILT_DEFAULT_HW_P_SIZE   (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
  52
  53#define ILT_PAGE_IN_BYTES(hw_p_size)    (1U << ((hw_p_size) + 12))
  54#define ILT_CFG_REG(cli, reg)   PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
  55
  56/* ILT entry structure */
  57#define ILT_ENTRY_PHY_ADDR_MASK         0x000FFFFFFFFFFFULL
  58#define ILT_ENTRY_PHY_ADDR_SHIFT        0
  59#define ILT_ENTRY_VALID_MASK            0x1ULL
  60#define ILT_ENTRY_VALID_SHIFT           52
  61#define ILT_ENTRY_IN_REGS               2
  62#define ILT_REG_SIZE_IN_BYTES           4
  63
  64/* connection context union */
  65union conn_context {
  66        struct core_conn_context core_ctx;
  67        struct eth_conn_context eth_ctx;
  68        struct iscsi_conn_context iscsi_ctx;
  69        struct roce_conn_context roce_ctx;
  70};
  71
  72/* TYPE-0 task context - iSCSI */
  73union type0_task_context {
  74        struct iscsi_task_context iscsi_ctx;
  75};
  76
  77/* TYPE-1 task context - ROCE */
  78union type1_task_context {
  79        struct rdma_task_context roce_ctx;
  80};
  81
  82struct src_ent {
  83        u8 opaque[56];
  84        u64 next;
  85};
  86
  87#define CDUT_SEG_ALIGNMET 3     /* in 4k chunks */
  88#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
  89
  90#define CONN_CXT_SIZE(p_hwfn) \
  91        ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
  92
  93#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
  94
  95#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
  96        ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
  97
  98/* Alignment is inherent to the type1_task_context structure */
  99#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
 100
 101/* PF per protocl configuration object */
 102#define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
 103#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
 104
 105struct qed_tid_seg {
 106        u32 count;
 107        u8 type;
 108        bool has_fl_mem;
 109};
 110
 111struct qed_conn_type_cfg {
 112        u32 cid_count;
 113        u32 cid_start;
 114        u32 cids_per_vf;
 115        struct qed_tid_seg tid_seg[TASK_SEGMENTS];
 116};
 117
 118/* ILT Client configuration, Per connection type (protocol) resources. */
 119#define ILT_CLI_PF_BLOCKS       (1 + NUM_TASK_PF_SEGMENTS * 2)
 120#define ILT_CLI_VF_BLOCKS       (1 + NUM_TASK_VF_SEGMENTS * 2)
 121#define CDUC_BLK                (0)
 122#define SRQ_BLK                 (0)
 123#define CDUT_SEG_BLK(n)         (1 + (u8)(n))
 124#define CDUT_FL_SEG_BLK(n, X)   (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
 125
 126enum ilt_clients {
 127        ILT_CLI_CDUC,
 128        ILT_CLI_CDUT,
 129        ILT_CLI_QM,
 130        ILT_CLI_TM,
 131        ILT_CLI_SRC,
 132        ILT_CLI_TSDM,
 133        ILT_CLI_MAX
 134};
 135
 136struct ilt_cfg_pair {
 137        u32 reg;
 138        u32 val;
 139};
 140
 141struct qed_ilt_cli_blk {
 142        u32 total_size; /* 0 means not active */
 143        u32 real_size_in_page;
 144        u32 start_line;
 145        u32 dynamic_line_cnt;
 146};
 147
 148struct qed_ilt_client_cfg {
 149        bool active;
 150
 151        /* ILT boundaries */
 152        struct ilt_cfg_pair first;
 153        struct ilt_cfg_pair last;
 154        struct ilt_cfg_pair p_size;
 155
 156        /* ILT client blocks for PF */
 157        struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
 158        u32 pf_total_lines;
 159
 160        /* ILT client blocks for VFs */
 161        struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
 162        u32 vf_total_lines;
 163};
 164
 165/* Per Path -
 166 *      ILT shadow table
 167 *      Protocol acquired CID lists
 168 *      PF start line in ILT
 169 */
 170struct qed_dma_mem {
 171        dma_addr_t p_phys;
 172        void *p_virt;
 173        size_t size;
 174};
 175
 176struct qed_cid_acquired_map {
 177        u32             start_cid;
 178        u32             max_count;
 179        unsigned long   *cid_map;
 180};
 181
 182struct qed_cxt_mngr {
 183        /* Per protocl configuration */
 184        struct qed_conn_type_cfg        conn_cfg[MAX_CONN_TYPES];
 185
 186        /* computed ILT structure */
 187        struct qed_ilt_client_cfg       clients[ILT_CLI_MAX];
 188
 189        /* Task type sizes */
 190        u32 task_type_size[NUM_TASK_TYPES];
 191
 192        /* total number of VFs for this hwfn -
 193         * ALL VFs are symmetric in terms of HW resources
 194         */
 195        u32                             vf_count;
 196
 197        /* total number of SRQ's for this hwfn */
 198        u32 srq_count;
 199
 200        /* Acquired CIDs */
 201        struct qed_cid_acquired_map     acquired[MAX_CONN_TYPES];
 202
 203        /* ILT  shadow table */
 204        struct qed_dma_mem              *ilt_shadow;
 205        u32                             pf_start_line;
 206
 207        /* Mutex for a dynamic ILT allocation */
 208        struct mutex mutex;
 209
 210        /* SRC T2 */
 211        struct qed_dma_mem *t2;
 212        u32 t2_num_pages;
 213        u64 first_free;
 214        u64 last_free;
 215};
 216static bool src_proto(enum protocol_type type)
 217{
 218        return type == PROTOCOLID_ISCSI ||
 219               type == PROTOCOLID_ROCE;
 220}
 221
 222static bool tm_cid_proto(enum protocol_type type)
 223{
 224        return type == PROTOCOLID_ISCSI ||
 225               type == PROTOCOLID_ROCE;
 226}
 227
 228/* counts the iids for the CDU/CDUC ILT client configuration */
 229struct qed_cdu_iids {
 230        u32 pf_cids;
 231        u32 per_vf_cids;
 232};
 233
 234static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
 235                             struct qed_cdu_iids *iids)
 236{
 237        u32 type;
 238
 239        for (type = 0; type < MAX_CONN_TYPES; type++) {
 240                iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
 241                iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
 242        }
 243}
 244
 245/* counts the iids for the Searcher block configuration */
 246struct qed_src_iids {
 247        u32 pf_cids;
 248        u32 per_vf_cids;
 249};
 250
 251static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
 252                             struct qed_src_iids *iids)
 253{
 254        u32 i;
 255
 256        for (i = 0; i < MAX_CONN_TYPES; i++) {
 257                if (!src_proto(i))
 258                        continue;
 259
 260                iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
 261                iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
 262        }
 263}
 264
 265/* counts the iids for the Timers block configuration */
 266struct qed_tm_iids {
 267        u32 pf_cids;
 268        u32 pf_tids[NUM_TASK_PF_SEGMENTS];      /* per segment */
 269        u32 pf_tids_total;
 270        u32 per_vf_cids;
 271        u32 per_vf_tids;
 272};
 273
 274static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
 275                            struct qed_tm_iids *iids)
 276{
 277        u32 i, j;
 278
 279        for (i = 0; i < MAX_CONN_TYPES; i++) {
 280                struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
 281
 282                if (tm_cid_proto(i)) {
 283                        iids->pf_cids += p_cfg->cid_count;
 284                        iids->per_vf_cids += p_cfg->cids_per_vf;
 285                }
 286        }
 287
 288        iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
 289        iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
 290        iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
 291
 292        for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
 293                iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
 294                iids->pf_tids_total += iids->pf_tids[j];
 295        }
 296}
 297
 298static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
 299                            struct qed_qm_iids *iids)
 300{
 301        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 302        struct qed_tid_seg *segs;
 303        u32 vf_cids = 0, type, j;
 304        u32 vf_tids = 0;
 305
 306        for (type = 0; type < MAX_CONN_TYPES; type++) {
 307                iids->cids += p_mngr->conn_cfg[type].cid_count;
 308                vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
 309
 310                segs = p_mngr->conn_cfg[type].tid_seg;
 311                /* for each segment there is at most one
 312                 * protocol for which count is not 0.
 313                 */
 314                for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
 315                        iids->tids += segs[j].count;
 316
 317                /* The last array elelment is for the VFs. As for PF
 318                 * segments there can be only one protocol for
 319                 * which this value is not 0.
 320                 */
 321                vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
 322        }
 323
 324        iids->vf_cids += vf_cids * p_mngr->vf_count;
 325        iids->tids += vf_tids * p_mngr->vf_count;
 326
 327        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 328                   "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
 329                   iids->cids, iids->vf_cids, iids->tids, vf_tids);
 330}
 331
 332static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
 333                                                u32 seg)
 334{
 335        struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
 336        u32 i;
 337
 338        /* Find the protocol with tid count > 0 for this segment.
 339         * Note: there can only be one and this is already validated.
 340         */
 341        for (i = 0; i < MAX_CONN_TYPES; i++)
 342                if (p_cfg->conn_cfg[i].tid_seg[seg].count)
 343                        return &p_cfg->conn_cfg[i].tid_seg[seg];
 344        return NULL;
 345}
 346
 347static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
 348{
 349        struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 350
 351        p_mgr->srq_count = num_srqs;
 352}
 353
 354static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
 355{
 356        struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 357
 358        return p_mgr->srq_count;
 359}
 360
 361/* set the iids count per protocol */
 362static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
 363                                        enum protocol_type type,
 364                                        u32 cid_count, u32 vf_cid_cnt)
 365{
 366        struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 367        struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
 368
 369        p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
 370        p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
 371
 372        if (type == PROTOCOLID_ROCE) {
 373                u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
 374                u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
 375                u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
 376
 377                p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
 378        }
 379}
 380
 381u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
 382                                enum protocol_type type, u32 *vf_cid)
 383{
 384        if (vf_cid)
 385                *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
 386
 387        return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
 388}
 389
 390u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
 391                                enum protocol_type type)
 392{
 393        return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
 394}
 395
 396u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
 397                                enum protocol_type type)
 398{
 399        u32 cnt = 0;
 400        int i;
 401
 402        for (i = 0; i < TASK_SEGMENTS; i++)
 403                cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
 404
 405        return cnt;
 406}
 407
 408static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
 409                                        enum protocol_type proto,
 410                                        u8 seg,
 411                                        u8 seg_type, u32 count, bool has_fl)
 412{
 413        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 414        struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
 415
 416        p_seg->count = count;
 417        p_seg->has_fl_mem = has_fl;
 418        p_seg->type = seg_type;
 419}
 420
 421static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
 422                                 struct qed_ilt_cli_blk *p_blk,
 423                                 u32 start_line, u32 total_size, u32 elem_size)
 424{
 425        u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 426
 427        /* verify thatits called only once for each block */
 428        if (p_blk->total_size)
 429                return;
 430
 431        p_blk->total_size = total_size;
 432        p_blk->real_size_in_page = 0;
 433        if (elem_size)
 434                p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
 435        p_blk->start_line = start_line;
 436}
 437
 438static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
 439                                 struct qed_ilt_client_cfg *p_cli,
 440                                 struct qed_ilt_cli_blk *p_blk,
 441                                 u32 *p_line, enum ilt_clients client_id)
 442{
 443        if (!p_blk->total_size)
 444                return;
 445
 446        if (!p_cli->active)
 447                p_cli->first.val = *p_line;
 448
 449        p_cli->active = true;
 450        *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
 451        p_cli->last.val = *p_line - 1;
 452
 453        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 454                   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
 455                   client_id, p_cli->first.val,
 456                   p_cli->last.val, p_blk->total_size,
 457                   p_blk->real_size_in_page, p_blk->start_line);
 458}
 459
 460static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
 461                                        enum ilt_clients ilt_client)
 462{
 463        u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
 464        struct qed_ilt_client_cfg *p_cli;
 465        u32 lines_to_skip = 0;
 466        u32 cxts_per_p;
 467
 468        if (ilt_client == ILT_CLI_CDUC) {
 469                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
 470
 471                cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
 472                    (u32) CONN_CXT_SIZE(p_hwfn);
 473
 474                lines_to_skip = cid_count / cxts_per_p;
 475        }
 476
 477        return lines_to_skip;
 478}
 479
 480int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
 481{
 482        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 483        u32 curr_line, total, i, task_size, line;
 484        struct qed_ilt_client_cfg *p_cli;
 485        struct qed_ilt_cli_blk *p_blk;
 486        struct qed_cdu_iids cdu_iids;
 487        struct qed_src_iids src_iids;
 488        struct qed_qm_iids qm_iids;
 489        struct qed_tm_iids tm_iids;
 490        struct qed_tid_seg *p_seg;
 491
 492        memset(&qm_iids, 0, sizeof(qm_iids));
 493        memset(&cdu_iids, 0, sizeof(cdu_iids));
 494        memset(&src_iids, 0, sizeof(src_iids));
 495        memset(&tm_iids, 0, sizeof(tm_iids));
 496
 497        p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
 498
 499        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 500                   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
 501                   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
 502
 503        /* CDUC */
 504        p_cli = &p_mngr->clients[ILT_CLI_CDUC];
 505        curr_line = p_mngr->pf_start_line;
 506
 507        /* CDUC PF */
 508        p_cli->pf_total_lines = 0;
 509
 510        /* get the counters for the CDUC and QM clients  */
 511        qed_cxt_cdu_iids(p_mngr, &cdu_iids);
 512
 513        p_blk = &p_cli->pf_blks[CDUC_BLK];
 514
 515        total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
 516
 517        qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 518                             total, CONN_CXT_SIZE(p_hwfn));
 519
 520        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
 521        p_cli->pf_total_lines = curr_line - p_blk->start_line;
 522
 523        p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
 524                                                               ILT_CLI_CDUC);
 525
 526        /* CDUC VF */
 527        p_blk = &p_cli->vf_blks[CDUC_BLK];
 528        total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
 529
 530        qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 531                             total, CONN_CXT_SIZE(p_hwfn));
 532
 533        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
 534        p_cli->vf_total_lines = curr_line - p_blk->start_line;
 535
 536        for (i = 1; i < p_mngr->vf_count; i++)
 537                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 538                                     ILT_CLI_CDUC);
 539
 540        /* CDUT PF */
 541        p_cli = &p_mngr->clients[ILT_CLI_CDUT];
 542        p_cli->first.val = curr_line;
 543
 544        /* first the 'working' task memory */
 545        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
 546                p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
 547                if (!p_seg || p_seg->count == 0)
 548                        continue;
 549
 550                p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
 551                total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 552                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
 553                                     p_mngr->task_type_size[p_seg->type]);
 554
 555                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 556                                     ILT_CLI_CDUT);
 557        }
 558
 559        /* next the 'init' task memory (forced load memory) */
 560        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
 561                p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
 562                if (!p_seg || p_seg->count == 0)
 563                        continue;
 564
 565                p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
 566
 567                if (!p_seg->has_fl_mem) {
 568                        /* The segment is active (total size pf 'working'
 569                         * memory is > 0) but has no FL (forced-load, Init)
 570                         * memory. Thus:
 571                         *
 572                         * 1.   The total-size in the corrsponding FL block of
 573                         *      the ILT client is set to 0 - No ILT line are
 574                         *      provisioned and no ILT memory allocated.
 575                         *
 576                         * 2.   The start-line of said block is set to the
 577                         *      start line of the matching working memory
 578                         *      block in the ILT client. This is later used to
 579                         *      configure the CDU segment offset registers and
 580                         *      results in an FL command for TIDs of this
 581                         *      segement behaves as regular load commands
 582                         *      (loading TIDs from the working memory).
 583                         */
 584                        line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
 585
 586                        qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
 587                        continue;
 588                }
 589                total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 590
 591                qed_ilt_cli_blk_fill(p_cli, p_blk,
 592                                     curr_line, total,
 593                                     p_mngr->task_type_size[p_seg->type]);
 594
 595                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 596                                     ILT_CLI_CDUT);
 597        }
 598        p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
 599
 600        /* CDUT VF */
 601        p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
 602        if (p_seg && p_seg->count) {
 603                /* Stricly speaking we need to iterate over all VF
 604                 * task segment types, but a VF has only 1 segment
 605                 */
 606
 607                /* 'working' memory */
 608                total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 609
 610                p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
 611                qed_ilt_cli_blk_fill(p_cli, p_blk,
 612                                     curr_line, total,
 613                                     p_mngr->task_type_size[p_seg->type]);
 614
 615                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 616                                     ILT_CLI_CDUT);
 617
 618                /* 'init' memory */
 619                p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
 620                if (!p_seg->has_fl_mem) {
 621                        /* see comment above */
 622                        line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
 623                        qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
 624                } else {
 625                        task_size = p_mngr->task_type_size[p_seg->type];
 626                        qed_ilt_cli_blk_fill(p_cli, p_blk,
 627                                             curr_line, total, task_size);
 628                        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 629                                             ILT_CLI_CDUT);
 630                }
 631                p_cli->vf_total_lines = curr_line -
 632                    p_cli->vf_blks[0].start_line;
 633
 634                /* Now for the rest of the VFs */
 635                for (i = 1; i < p_mngr->vf_count; i++) {
 636                        p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
 637                        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 638                                             ILT_CLI_CDUT);
 639
 640                        p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
 641                        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 642                                             ILT_CLI_CDUT);
 643                }
 644        }
 645
 646        /* QM */
 647        p_cli = &p_mngr->clients[ILT_CLI_QM];
 648        p_blk = &p_cli->pf_blks[0];
 649
 650        qed_cxt_qm_iids(p_hwfn, &qm_iids);
 651        total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
 652                                   qm_iids.vf_cids, qm_iids.tids,
 653                                   p_hwfn->qm_info.num_pqs,
 654                                   p_hwfn->qm_info.num_vf_pqs);
 655
 656        DP_VERBOSE(p_hwfn,
 657                   QED_MSG_ILT,
 658                   "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
 659                   qm_iids.cids,
 660                   qm_iids.vf_cids,
 661                   qm_iids.tids,
 662                   p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
 663
 664        qed_ilt_cli_blk_fill(p_cli, p_blk,
 665                             curr_line, total * 0x1000,
 666                             QM_PQ_ELEMENT_SIZE);
 667
 668        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
 669        p_cli->pf_total_lines = curr_line - p_blk->start_line;
 670
 671        /* SRC */
 672        p_cli = &p_mngr->clients[ILT_CLI_SRC];
 673        qed_cxt_src_iids(p_mngr, &src_iids);
 674
 675        /* Both the PF and VFs searcher connections are stored in the per PF
 676         * database. Thus sum the PF searcher cids and all the VFs searcher
 677         * cids.
 678         */
 679        total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
 680        if (total) {
 681                u32 local_max = max_t(u32, total,
 682                                      SRC_MIN_NUM_ELEMS);
 683
 684                total = roundup_pow_of_two(local_max);
 685
 686                p_blk = &p_cli->pf_blks[0];
 687                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 688                                     total * sizeof(struct src_ent),
 689                                     sizeof(struct src_ent));
 690
 691                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 692                                     ILT_CLI_SRC);
 693                p_cli->pf_total_lines = curr_line - p_blk->start_line;
 694        }
 695
 696        /* TM PF */
 697        p_cli = &p_mngr->clients[ILT_CLI_TM];
 698        qed_cxt_tm_iids(p_mngr, &tm_iids);
 699        total = tm_iids.pf_cids + tm_iids.pf_tids_total;
 700        if (total) {
 701                p_blk = &p_cli->pf_blks[0];
 702                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 703                                     total * TM_ELEM_SIZE, TM_ELEM_SIZE);
 704
 705                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 706                                     ILT_CLI_TM);
 707                p_cli->pf_total_lines = curr_line - p_blk->start_line;
 708        }
 709
 710        /* TM VF */
 711        total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
 712        if (total) {
 713                p_blk = &p_cli->vf_blks[0];
 714                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 715                                     total * TM_ELEM_SIZE, TM_ELEM_SIZE);
 716
 717                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 718                                     ILT_CLI_TM);
 719                p_cli->pf_total_lines = curr_line - p_blk->start_line;
 720
 721                for (i = 1; i < p_mngr->vf_count; i++)
 722                        qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 723                                             ILT_CLI_TM);
 724        }
 725
 726        /* TSDM (SRQ CONTEXT) */
 727        total = qed_cxt_get_srq_count(p_hwfn);
 728
 729        if (total) {
 730                p_cli = &p_mngr->clients[ILT_CLI_TSDM];
 731                p_blk = &p_cli->pf_blks[SRQ_BLK];
 732                qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 733                                     total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
 734
 735                qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 736                                     ILT_CLI_TSDM);
 737                p_cli->pf_total_lines = curr_line - p_blk->start_line;
 738        }
 739
 740        if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
 741            RESC_NUM(p_hwfn, QED_ILT)) {
 742                DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
 743                       curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
 744                return -EINVAL;
 745        }
 746
 747        return 0;
 748}
 749
 750static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
 751{
 752        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 753        u32 i;
 754
 755        if (!p_mngr->t2)
 756                return;
 757
 758        for (i = 0; i < p_mngr->t2_num_pages; i++)
 759                if (p_mngr->t2[i].p_virt)
 760                        dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 761                                          p_mngr->t2[i].size,
 762                                          p_mngr->t2[i].p_virt,
 763                                          p_mngr->t2[i].p_phys);
 764
 765        kfree(p_mngr->t2);
 766        p_mngr->t2 = NULL;
 767}
 768
 769static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
 770{
 771        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 772        u32 conn_num, total_size, ent_per_page, psz, i;
 773        struct qed_ilt_client_cfg *p_src;
 774        struct qed_src_iids src_iids;
 775        struct qed_dma_mem *p_t2;
 776        int rc;
 777
 778        memset(&src_iids, 0, sizeof(src_iids));
 779
 780        /* if the SRC ILT client is inactive - there are no connection
 781         * requiring the searcer, leave.
 782         */
 783        p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
 784        if (!p_src->active)
 785                return 0;
 786
 787        qed_cxt_src_iids(p_mngr, &src_iids);
 788        conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
 789        total_size = conn_num * sizeof(struct src_ent);
 790
 791        /* use the same page size as the SRC ILT client */
 792        psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
 793        p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
 794
 795        /* allocate t2 */
 796        p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
 797                             GFP_KERNEL);
 798        if (!p_mngr->t2) {
 799                rc = -ENOMEM;
 800                goto t2_fail;
 801        }
 802
 803        /* allocate t2 pages */
 804        for (i = 0; i < p_mngr->t2_num_pages; i++) {
 805                u32 size = min_t(u32, total_size, psz);
 806                void **p_virt = &p_mngr->t2[i].p_virt;
 807
 808                *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 809                                             size,
 810                                             &p_mngr->t2[i].p_phys, GFP_KERNEL);
 811                if (!p_mngr->t2[i].p_virt) {
 812                        rc = -ENOMEM;
 813                        goto t2_fail;
 814                }
 815                memset(*p_virt, 0, size);
 816                p_mngr->t2[i].size = size;
 817                total_size -= size;
 818        }
 819
 820        /* Set the t2 pointers */
 821
 822        /* entries per page - must be a power of two */
 823        ent_per_page = psz / sizeof(struct src_ent);
 824
 825        p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
 826
 827        p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
 828        p_mngr->last_free = (u64) p_t2->p_phys +
 829            ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
 830
 831        for (i = 0; i < p_mngr->t2_num_pages; i++) {
 832                u32 ent_num = min_t(u32,
 833                                    ent_per_page,
 834                                    conn_num);
 835                struct src_ent *entries = p_mngr->t2[i].p_virt;
 836                u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
 837                u32 j;
 838
 839                for (j = 0; j < ent_num - 1; j++) {
 840                        val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
 841                        entries[j].next = cpu_to_be64(val);
 842                }
 843
 844                if (i < p_mngr->t2_num_pages - 1)
 845                        val = (u64) p_mngr->t2[i + 1].p_phys;
 846                else
 847                        val = 0;
 848                entries[j].next = cpu_to_be64(val);
 849
 850                conn_num -= ent_num;
 851        }
 852
 853        return 0;
 854
 855t2_fail:
 856        qed_cxt_src_t2_free(p_hwfn);
 857        return rc;
 858}
 859
 860#define for_each_ilt_valid_client(pos, clients) \
 861        for (pos = 0; pos < ILT_CLI_MAX; pos++) \
 862                if (!clients[pos].active) {     \
 863                        continue;               \
 864                } else                          \
 865
 866/* Total number of ILT lines used by this PF */
 867static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
 868{
 869        u32 size = 0;
 870        u32 i;
 871
 872        for_each_ilt_valid_client(i, ilt_clients)
 873            size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
 874
 875        return size;
 876}
 877
 878static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
 879{
 880        struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
 881        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 882        u32 ilt_size, i;
 883
 884        ilt_size = qed_cxt_ilt_shadow_size(p_cli);
 885
 886        for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
 887                struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
 888
 889                if (p_dma->p_virt)
 890                        dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 891                                          p_dma->size, p_dma->p_virt,
 892                                          p_dma->p_phys);
 893                p_dma->p_virt = NULL;
 894        }
 895        kfree(p_mngr->ilt_shadow);
 896}
 897
 898static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
 899                             struct qed_ilt_cli_blk *p_blk,
 900                             enum ilt_clients ilt_client,
 901                             u32 start_line_offset)
 902{
 903        struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
 904        u32 lines, line, sz_left, lines_to_skip = 0;
 905
 906        /* Special handling for RoCE that supports dynamic allocation */
 907        if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
 908            ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
 909                return 0;
 910
 911        lines_to_skip = p_blk->dynamic_line_cnt;
 912
 913        if (!p_blk->total_size)
 914                return 0;
 915
 916        sz_left = p_blk->total_size;
 917        lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
 918        line = p_blk->start_line + start_line_offset -
 919            p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
 920
 921        for (; lines; lines--) {
 922                dma_addr_t p_phys;
 923                void *p_virt;
 924                u32 size;
 925
 926                size = min_t(u32, sz_left, p_blk->real_size_in_page);
 927                p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 928                                            size, &p_phys, GFP_KERNEL);
 929                if (!p_virt)
 930                        return -ENOMEM;
 931                memset(p_virt, 0, size);
 932
 933                ilt_shadow[line].p_phys = p_phys;
 934                ilt_shadow[line].p_virt = p_virt;
 935                ilt_shadow[line].size = size;
 936
 937                DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 938                           "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
 939                            line, (u64)p_phys, p_virt, size);
 940
 941                sz_left -= size;
 942                line++;
 943        }
 944
 945        return 0;
 946}
 947
 948static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
 949{
 950        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 951        struct qed_ilt_client_cfg *clients = p_mngr->clients;
 952        struct qed_ilt_cli_blk *p_blk;
 953        u32 size, i, j, k;
 954        int rc;
 955
 956        size = qed_cxt_ilt_shadow_size(clients);
 957        p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
 958                                     GFP_KERNEL);
 959        if (!p_mngr->ilt_shadow) {
 960                rc = -ENOMEM;
 961                goto ilt_shadow_fail;
 962        }
 963
 964        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 965                   "Allocated 0x%x bytes for ilt shadow\n",
 966                   (u32)(size * sizeof(struct qed_dma_mem)));
 967
 968        for_each_ilt_valid_client(i, clients) {
 969                for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
 970                        p_blk = &clients[i].pf_blks[j];
 971                        rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
 972                        if (rc)
 973                                goto ilt_shadow_fail;
 974                }
 975                for (k = 0; k < p_mngr->vf_count; k++) {
 976                        for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
 977                                u32 lines = clients[i].vf_total_lines * k;
 978
 979                                p_blk = &clients[i].vf_blks[j];
 980                                rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
 981                                if (rc)
 982                                        goto ilt_shadow_fail;
 983                        }
 984                }
 985        }
 986
 987        return 0;
 988
 989ilt_shadow_fail:
 990        qed_ilt_shadow_free(p_hwfn);
 991        return rc;
 992}
 993
 994static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
 995{
 996        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 997        u32 type;
 998
 999        for (type = 0; type < MAX_CONN_TYPES; type++) {
1000                kfree(p_mngr->acquired[type].cid_map);
1001                p_mngr->acquired[type].max_count = 0;
1002                p_mngr->acquired[type].start_cid = 0;
1003        }
1004}
1005
1006static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1007{
1008        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1009        u32 start_cid = 0;
1010        u32 type;
1011
1012        for (type = 0; type < MAX_CONN_TYPES; type++) {
1013                u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1014                u32 size;
1015
1016                if (cid_cnt == 0)
1017                        continue;
1018
1019                size = DIV_ROUND_UP(cid_cnt,
1020                                    sizeof(unsigned long) * BITS_PER_BYTE) *
1021                       sizeof(unsigned long);
1022                p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
1023                if (!p_mngr->acquired[type].cid_map)
1024                        goto cid_map_fail;
1025
1026                p_mngr->acquired[type].max_count = cid_cnt;
1027                p_mngr->acquired[type].start_cid = start_cid;
1028
1029                p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
1030
1031                DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1032                           "Type %08x start: %08x count %08x\n",
1033                           type, p_mngr->acquired[type].start_cid,
1034                           p_mngr->acquired[type].max_count);
1035                start_cid += cid_cnt;
1036        }
1037
1038        return 0;
1039
1040cid_map_fail:
1041        qed_cid_map_free(p_hwfn);
1042        return -ENOMEM;
1043}
1044
1045int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1046{
1047        struct qed_ilt_client_cfg *clients;
1048        struct qed_cxt_mngr *p_mngr;
1049        u32 i;
1050
1051        p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
1052        if (!p_mngr)
1053                return -ENOMEM;
1054
1055        /* Initialize ILT client registers */
1056        clients = p_mngr->clients;
1057        clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1058        clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1059        clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
1060
1061        clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1062        clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1063        clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
1064
1065        clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1066        clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1067        clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1068
1069        clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1070        clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1071        clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1072
1073        clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1074        clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1075        clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1076
1077        clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1078        clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1079        clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
1080        /* default ILT page size for all clients is 32K */
1081        for (i = 0; i < ILT_CLI_MAX; i++)
1082                p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1083
1084        /* Initialize task sizes */
1085        p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1086        p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1087
1088        if (p_hwfn->cdev->p_iov_info)
1089                p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
1090        /* Initialize the dynamic ILT allocation mutex */
1091        mutex_init(&p_mngr->mutex);
1092
1093        /* Set the cxt mangr pointer priori to further allocations */
1094        p_hwfn->p_cxt_mngr = p_mngr;
1095
1096        return 0;
1097}
1098
1099int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1100{
1101        int rc;
1102
1103        /* Allocate the ILT shadow table */
1104        rc = qed_ilt_shadow_alloc(p_hwfn);
1105        if (rc)
1106                goto tables_alloc_fail;
1107
1108        /* Allocate the T2  table */
1109        rc = qed_cxt_src_t2_alloc(p_hwfn);
1110        if (rc)
1111                goto tables_alloc_fail;
1112
1113        /* Allocate and initialize the acquired cids bitmaps */
1114        rc = qed_cid_map_alloc(p_hwfn);
1115        if (rc)
1116                goto tables_alloc_fail;
1117
1118        return 0;
1119
1120tables_alloc_fail:
1121        qed_cxt_mngr_free(p_hwfn);
1122        return rc;
1123}
1124
1125void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1126{
1127        if (!p_hwfn->p_cxt_mngr)
1128                return;
1129
1130        qed_cid_map_free(p_hwfn);
1131        qed_cxt_src_t2_free(p_hwfn);
1132        qed_ilt_shadow_free(p_hwfn);
1133        kfree(p_hwfn->p_cxt_mngr);
1134
1135        p_hwfn->p_cxt_mngr = NULL;
1136}
1137
1138void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1139{
1140        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1141        int type;
1142
1143        /* Reset acquired cids */
1144        for (type = 0; type < MAX_CONN_TYPES; type++) {
1145                u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1146
1147                if (cid_cnt == 0)
1148                        continue;
1149
1150                memset(p_mngr->acquired[type].cid_map, 0,
1151                       DIV_ROUND_UP(cid_cnt,
1152                                    sizeof(unsigned long) * BITS_PER_BYTE) *
1153                       sizeof(unsigned long));
1154        }
1155}
1156
1157/* CDU Common */
1158#define CDUC_CXT_SIZE_SHIFT \
1159        CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1160
1161#define CDUC_CXT_SIZE_MASK \
1162        (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1163
1164#define CDUC_BLOCK_WASTE_SHIFT \
1165        CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1166
1167#define CDUC_BLOCK_WASTE_MASK \
1168        (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1169
1170#define CDUC_NCIB_SHIFT \
1171        CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1172
1173#define CDUC_NCIB_MASK \
1174        (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1175
1176#define CDUT_TYPE0_CXT_SIZE_SHIFT \
1177        CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1178
1179#define CDUT_TYPE0_CXT_SIZE_MASK                \
1180        (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1181         CDUT_TYPE0_CXT_SIZE_SHIFT)
1182
1183#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1184        CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1185
1186#define CDUT_TYPE0_BLOCK_WASTE_MASK                    \
1187        (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1188         CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1189
1190#define CDUT_TYPE0_NCIB_SHIFT \
1191        CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1192
1193#define CDUT_TYPE0_NCIB_MASK                             \
1194        (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1195         CDUT_TYPE0_NCIB_SHIFT)
1196
1197#define CDUT_TYPE1_CXT_SIZE_SHIFT \
1198        CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1199
1200#define CDUT_TYPE1_CXT_SIZE_MASK                \
1201        (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1202         CDUT_TYPE1_CXT_SIZE_SHIFT)
1203
1204#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1205        CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1206
1207#define CDUT_TYPE1_BLOCK_WASTE_MASK                    \
1208        (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1209         CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1210
1211#define CDUT_TYPE1_NCIB_SHIFT \
1212        CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1213
1214#define CDUT_TYPE1_NCIB_MASK                             \
1215        (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1216         CDUT_TYPE1_NCIB_SHIFT)
1217
1218static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1219{
1220        u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1221
1222        /* CDUC - connection configuration */
1223        page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1224        cxt_size = CONN_CXT_SIZE(p_hwfn);
1225        elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1226        block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1227
1228        SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1229        SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1230        SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1231        STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1232
1233        /* CDUT - type-0 tasks configuration */
1234        page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1235        cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1236        elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1237        block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1238
1239        /* cxt size and block-waste are multipes of 8 */
1240        cdu_params = 0;
1241        SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1242        SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1243        SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1244        STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1245
1246        /* CDUT - type-1 tasks configuration */
1247        cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1248        elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1249        block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1250
1251        /* cxt size and block-waste are multipes of 8 */
1252        cdu_params = 0;
1253        SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1254        SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1255        SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1256        STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1257}
1258
1259/* CDU PF */
1260#define CDU_SEG_REG_TYPE_SHIFT          CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1261#define CDU_SEG_REG_TYPE_MASK           0x1
1262#define CDU_SEG_REG_OFFSET_SHIFT        0
1263#define CDU_SEG_REG_OFFSET_MASK         CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1264
1265static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1266{
1267        struct qed_ilt_client_cfg *p_cli;
1268        struct qed_tid_seg *p_seg;
1269        u32 cdu_seg_params, offset;
1270        int i;
1271
1272        static const u32 rt_type_offset_arr[] = {
1273                CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1274                CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1275                CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1276                CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1277        };
1278
1279        static const u32 rt_type_offset_fl_arr[] = {
1280                CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1281                CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1282                CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1283                CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1284        };
1285
1286        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1287
1288        /* There are initializations only for CDUT during pf Phase */
1289        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1290                /* Segment 0 */
1291                p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1292                if (!p_seg)
1293                        continue;
1294
1295                /* Note: start_line is already adjusted for the CDU
1296                 * segment register granularity, so we just need to
1297                 * divide. Adjustment is implicit as we assume ILT
1298                 * Page size is larger than 32K!
1299                 */
1300                offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1301                          (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1302                           p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1303
1304                cdu_seg_params = 0;
1305                SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1306                SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1307                STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1308
1309                offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1310                          (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1311                           p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1312
1313                cdu_seg_params = 0;
1314                SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1315                SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1316                STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1317        }
1318}
1319
1320void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
1321{
1322        struct qed_qm_pf_rt_init_params params;
1323        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1324        struct qed_qm_iids iids;
1325
1326        memset(&iids, 0, sizeof(iids));
1327        qed_cxt_qm_iids(p_hwfn, &iids);
1328
1329        memset(&params, 0, sizeof(params));
1330        params.port_id = p_hwfn->port_id;
1331        params.pf_id = p_hwfn->rel_pf_id;
1332        params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1333        params.is_first_pf = p_hwfn->first_on_engine;
1334        params.num_pf_cids = iids.cids;
1335        params.num_vf_cids = iids.vf_cids;
1336        params.start_pq = qm_info->start_pq;
1337        params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1338        params.num_vf_pqs = qm_info->num_vf_pqs;
1339        params.start_vport = qm_info->start_vport;
1340        params.num_vports = qm_info->num_vports;
1341        params.pf_wfq = qm_info->pf_wfq;
1342        params.pf_rl = qm_info->pf_rl;
1343        params.pq_params = qm_info->qm_pq_params;
1344        params.vport_params = qm_info->qm_vport_params;
1345
1346        qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
1347}
1348
1349/* CM PF */
1350static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
1351{
1352        union qed_qm_pq_params pq_params;
1353        u16 pq;
1354
1355        /* XCM pure-LB queue */
1356        memset(&pq_params, 0, sizeof(pq_params));
1357        pq_params.core.tc = LB_TC;
1358        pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
1359        STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
1360
1361        return 0;
1362}
1363
1364/* DQ PF */
1365static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1366{
1367        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1368        u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
1369
1370        dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1371        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1372
1373        dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1374        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1375
1376        dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1377        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1378
1379        dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1380        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1381
1382        dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1383        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1384
1385        dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1386        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1387
1388        dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1389        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1390
1391        dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1392        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1393
1394        dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1395        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1396
1397        dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1398        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1399
1400        dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1401        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
1402
1403        dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1404        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1405
1406        /* Connection types 6 & 7 are not in use, yet they must be configured
1407         * as the highest possible connection. Not configuring them means the
1408         * defaults will be  used, and with a large number of cids a bug may
1409         * occur, if the defaults will be smaller than dq_pf_max_cid /
1410         * dq_vf_max_cid.
1411         */
1412        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1413        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1414
1415        STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1416        STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
1417}
1418
1419static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1420{
1421        struct qed_ilt_client_cfg *ilt_clients;
1422        int i;
1423
1424        ilt_clients = p_hwfn->p_cxt_mngr->clients;
1425        for_each_ilt_valid_client(i, ilt_clients) {
1426                STORE_RT_REG(p_hwfn,
1427                             ilt_clients[i].first.reg,
1428                             ilt_clients[i].first.val);
1429                STORE_RT_REG(p_hwfn,
1430                             ilt_clients[i].last.reg, ilt_clients[i].last.val);
1431                STORE_RT_REG(p_hwfn,
1432                             ilt_clients[i].p_size.reg,
1433                             ilt_clients[i].p_size.val);
1434        }
1435}
1436
1437static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1438{
1439        struct qed_ilt_client_cfg *p_cli;
1440        u32 blk_factor;
1441
1442        /* For simplicty  we set the 'block' to be an ILT page */
1443        if (p_hwfn->cdev->p_iov_info) {
1444                struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1445
1446                STORE_RT_REG(p_hwfn,
1447                             PSWRQ2_REG_VF_BASE_RT_OFFSET,
1448                             p_iov->first_vf_in_pf);
1449                STORE_RT_REG(p_hwfn,
1450                             PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1451                             p_iov->first_vf_in_pf + p_iov->total_vfs);
1452        }
1453
1454        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1455        blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1456        if (p_cli->active) {
1457                STORE_RT_REG(p_hwfn,
1458                             PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1459                             blk_factor);
1460                STORE_RT_REG(p_hwfn,
1461                             PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1462                             p_cli->pf_total_lines);
1463                STORE_RT_REG(p_hwfn,
1464                             PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1465                             p_cli->vf_total_lines);
1466        }
1467
1468        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1469        blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1470        if (p_cli->active) {
1471                STORE_RT_REG(p_hwfn,
1472                             PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1473                             blk_factor);
1474                STORE_RT_REG(p_hwfn,
1475                             PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1476                             p_cli->pf_total_lines);
1477                STORE_RT_REG(p_hwfn,
1478                             PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1479                             p_cli->vf_total_lines);
1480        }
1481
1482        p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1483        blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1484        if (p_cli->active) {
1485                STORE_RT_REG(p_hwfn,
1486                             PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1487                STORE_RT_REG(p_hwfn,
1488                             PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1489                             p_cli->pf_total_lines);
1490                STORE_RT_REG(p_hwfn,
1491                             PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1492                             p_cli->vf_total_lines);
1493        }
1494}
1495
1496/* ILT (PSWRQ2) PF */
1497static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1498{
1499        struct qed_ilt_client_cfg *clients;
1500        struct qed_cxt_mngr *p_mngr;
1501        struct qed_dma_mem *p_shdw;
1502        u32 line, rt_offst, i;
1503
1504        qed_ilt_bounds_init(p_hwfn);
1505        qed_ilt_vf_bounds_init(p_hwfn);
1506
1507        p_mngr = p_hwfn->p_cxt_mngr;
1508        p_shdw = p_mngr->ilt_shadow;
1509        clients = p_hwfn->p_cxt_mngr->clients;
1510
1511        for_each_ilt_valid_client(i, clients) {
1512                /** Client's 1st val and RT array are absolute, ILT shadows'
1513                 *  lines are relative.
1514                 */
1515                line = clients[i].first.val - p_mngr->pf_start_line;
1516                rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1517                           clients[i].first.val * ILT_ENTRY_IN_REGS;
1518
1519                for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1520                     line++, rt_offst += ILT_ENTRY_IN_REGS) {
1521                        u64 ilt_hw_entry = 0;
1522
1523                        /** p_virt could be NULL incase of dynamic
1524                         *  allocation
1525                         */
1526                        if (p_shdw[line].p_virt) {
1527                                SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1528                                SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1529                                          (p_shdw[line].p_phys >> 12));
1530
1531                                DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1532                                           "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1533                                           rt_offst, line, i,
1534                                           (u64)(p_shdw[line].p_phys >> 12));
1535                        }
1536
1537                        STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1538                }
1539        }
1540}
1541
1542/* SRC (Searcher) PF */
1543static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1544{
1545        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1546        u32 rounded_conn_num, conn_num, conn_max;
1547        struct qed_src_iids src_iids;
1548
1549        memset(&src_iids, 0, sizeof(src_iids));
1550        qed_cxt_src_iids(p_mngr, &src_iids);
1551        conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1552        if (!conn_num)
1553                return;
1554
1555        conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1556        rounded_conn_num = roundup_pow_of_two(conn_max);
1557
1558        STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1559        STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1560                     ilog2(rounded_conn_num));
1561
1562        STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1563                         p_hwfn->p_cxt_mngr->first_free);
1564        STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1565                         p_hwfn->p_cxt_mngr->last_free);
1566}
1567
1568/* Timers PF */
1569#define TM_CFG_NUM_IDS_SHIFT            0
1570#define TM_CFG_NUM_IDS_MASK             0xFFFFULL
1571#define TM_CFG_PRE_SCAN_OFFSET_SHIFT    16
1572#define TM_CFG_PRE_SCAN_OFFSET_MASK     0x1FFULL
1573#define TM_CFG_PARENT_PF_SHIFT          25
1574#define TM_CFG_PARENT_PF_MASK           0x7ULL
1575
1576#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT  30
1577#define TM_CFG_CID_PRE_SCAN_ROWS_MASK   0x1FFULL
1578
1579#define TM_CFG_TID_OFFSET_SHIFT         30
1580#define TM_CFG_TID_OFFSET_MASK          0x7FFFFULL
1581#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT  49
1582#define TM_CFG_TID_PRE_SCAN_ROWS_MASK   0x1FFULL
1583
1584static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1585{
1586        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1587        u32 active_seg_mask = 0, tm_offset, rt_reg;
1588        struct qed_tm_iids tm_iids;
1589        u64 cfg_word;
1590        u8 i;
1591
1592        memset(&tm_iids, 0, sizeof(tm_iids));
1593        qed_cxt_tm_iids(p_mngr, &tm_iids);
1594
1595        /* @@@TBD No pre-scan for now */
1596
1597        /* Note: We assume consecutive VFs for a PF */
1598        for (i = 0; i < p_mngr->vf_count; i++) {
1599                cfg_word = 0;
1600                SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1601                SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1602                SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1603                SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1604                rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1605                    (sizeof(cfg_word) / sizeof(u32)) *
1606                    (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1607                STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1608        }
1609
1610        cfg_word = 0;
1611        SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1612        SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1613        SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);       /* n/a for PF */
1614        SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);       /* scan all   */
1615
1616        rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1617            (sizeof(cfg_word) / sizeof(u32)) *
1618            (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1619        STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1620
1621        /* enale scan */
1622        STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1623                     tm_iids.pf_cids ? 0x1 : 0x0);
1624
1625        /* @@@TBD how to enable the scan for the VFs */
1626
1627        tm_offset = tm_iids.per_vf_cids;
1628
1629        /* Note: We assume consecutive VFs for a PF */
1630        for (i = 0; i < p_mngr->vf_count; i++) {
1631                cfg_word = 0;
1632                SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1633                SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1634                SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1635                SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1636                SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1637
1638                rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1639                    (sizeof(cfg_word) / sizeof(u32)) *
1640                    (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1641
1642                STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1643        }
1644
1645        tm_offset = tm_iids.pf_cids;
1646        for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1647                cfg_word = 0;
1648                SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1649                SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1650                SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1651                SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1652                SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1653
1654                rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1655                    (sizeof(cfg_word) / sizeof(u32)) *
1656                    (NUM_OF_VFS(p_hwfn->cdev) +
1657                     p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1658
1659                STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1660                active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
1661
1662                tm_offset += tm_iids.pf_tids[i];
1663        }
1664
1665        if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
1666                active_seg_mask = 0;
1667
1668        STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1669
1670        /* @@@TBD how to enable the scan for the VFs */
1671}
1672
1673void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1674{
1675        qed_cdu_init_common(p_hwfn);
1676}
1677
1678void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
1679{
1680        qed_qm_init_pf(p_hwfn);
1681        qed_cm_init_pf(p_hwfn);
1682        qed_dq_init_pf(p_hwfn);
1683        qed_cdu_init_pf(p_hwfn);
1684        qed_ilt_init_pf(p_hwfn);
1685        qed_src_init_pf(p_hwfn);
1686        qed_tm_init_pf(p_hwfn);
1687}
1688
1689int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1690                        enum protocol_type type, u32 *p_cid)
1691{
1692        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1693        u32 rel_cid;
1694
1695        if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
1696                DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1697                return -EINVAL;
1698        }
1699
1700        rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
1701                                      p_mngr->acquired[type].max_count);
1702
1703        if (rel_cid >= p_mngr->acquired[type].max_count) {
1704                DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
1705                return -EINVAL;
1706        }
1707
1708        __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
1709
1710        *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
1711
1712        return 0;
1713}
1714
1715static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
1716                                      u32 cid, enum protocol_type *p_type)
1717{
1718        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1719        struct qed_cid_acquired_map *p_map;
1720        enum protocol_type p;
1721        u32 rel_cid;
1722
1723        /* Iterate over protocols and find matching cid range */
1724        for (p = 0; p < MAX_CONN_TYPES; p++) {
1725                p_map = &p_mngr->acquired[p];
1726
1727                if (!p_map->cid_map)
1728                        continue;
1729                if (cid >= p_map->start_cid &&
1730                    cid < p_map->start_cid + p_map->max_count)
1731                        break;
1732        }
1733        *p_type = p;
1734
1735        if (p == MAX_CONN_TYPES) {
1736                DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
1737                return false;
1738        }
1739
1740        rel_cid = cid - p_map->start_cid;
1741        if (!test_bit(rel_cid, p_map->cid_map)) {
1742                DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
1743                return false;
1744        }
1745        return true;
1746}
1747
1748void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
1749{
1750        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1751        enum protocol_type type;
1752        bool b_acquired;
1753        u32 rel_cid;
1754
1755        /* Test acquired and find matching per-protocol map */
1756        b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
1757
1758        if (!b_acquired)
1759                return;
1760
1761        rel_cid = cid - p_mngr->acquired[type].start_cid;
1762        __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
1763}
1764
1765int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
1766{
1767        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1768        u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1769        enum protocol_type type;
1770        bool b_acquired;
1771
1772        /* Test acquired and find matching per-protocol map */
1773        b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
1774
1775        if (!b_acquired)
1776                return -EINVAL;
1777
1778        /* set the protocl type */
1779        p_info->type = type;
1780
1781        /* compute context virtual pointer */
1782        hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1783
1784        conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1785        cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1786        line = p_info->iid / cxts_per_p;
1787
1788        /* Make sure context is allocated (dynamic allocation) */
1789        if (!p_mngr->ilt_shadow[line].p_virt)
1790                return -EINVAL;
1791
1792        p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
1793                        p_info->iid % cxts_per_p * conn_cxt_size;
1794
1795        DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
1796                   "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1797                   p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
1798
1799        return 0;
1800}
1801
1802static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
1803                                   struct qed_rdma_pf_params *p_params)
1804{
1805        u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
1806        enum protocol_type proto;
1807
1808        num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
1809        num_tasks = num_mrs;    /* each mr uses a single task id */
1810        num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
1811
1812        switch (p_hwfn->hw_info.personality) {
1813        case QED_PCI_ETH_ROCE:
1814                num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
1815                num_cons = num_qps * 2; /* each QP requires two connections */
1816                proto = PROTOCOLID_ROCE;
1817                break;
1818        default:
1819                return;
1820        }
1821
1822        if (num_cons && num_tasks) {
1823                qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
1824
1825                /* Deliberatly passing ROCE for tasks id. This is because
1826                 * iWARP / RoCE share the task id.
1827                 */
1828                qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
1829                                            QED_CXT_ROCE_TID_SEG, 1,
1830                                            num_tasks, false);
1831                qed_cxt_set_srq_count(p_hwfn, num_srqs);
1832        } else {
1833                DP_INFO(p_hwfn->cdev,
1834                        "RDMA personality used without setting params!\n");
1835        }
1836}
1837
1838int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
1839{
1840        /* Set the number of required CORE connections */
1841        u32 core_cids = 1; /* SPQ */
1842
1843        if (p_hwfn->using_ll2)
1844                core_cids += 4;
1845        qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
1846
1847        switch (p_hwfn->hw_info.personality) {
1848        case QED_PCI_ETH_ROCE:
1849        {
1850                qed_rdma_set_pf_params(p_hwfn,
1851                                       &p_hwfn->
1852                                       pf_params.rdma_pf_params);
1853                /* no need for break since RoCE coexist with Ethernet */
1854        }
1855        case QED_PCI_ETH:
1856        {
1857                struct qed_eth_pf_params *p_params =
1858                    &p_hwfn->pf_params.eth_pf_params;
1859
1860                qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
1861                                            p_params->num_cons, 1);
1862                break;
1863        }
1864        case QED_PCI_ISCSI:
1865        {
1866                struct qed_iscsi_pf_params *p_params;
1867
1868                p_params = &p_hwfn->pf_params.iscsi_pf_params;
1869
1870                if (p_params->num_cons && p_params->num_tasks) {
1871                        qed_cxt_set_proto_cid_count(p_hwfn,
1872                                                    PROTOCOLID_ISCSI,
1873                                                    p_params->num_cons,
1874                                                    0);
1875
1876                        qed_cxt_set_proto_tid_count(p_hwfn,
1877                                                    PROTOCOLID_ISCSI,
1878                                                    QED_CXT_ISCSI_TID_SEG,
1879                                                    0,
1880                                                    p_params->num_tasks,
1881                                                    true);
1882                } else {
1883                        DP_INFO(p_hwfn->cdev,
1884                                "Iscsi personality used without setting params!\n");
1885                }
1886                break;
1887        }
1888        default:
1889                return -EINVAL;
1890        }
1891
1892        return 0;
1893}
1894
1895int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
1896                             struct qed_tid_mem *p_info)
1897{
1898        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1899        u32 proto, seg, total_lines, i, shadow_line;
1900        struct qed_ilt_client_cfg *p_cli;
1901        struct qed_ilt_cli_blk *p_fl_seg;
1902        struct qed_tid_seg *p_seg_info;
1903
1904        /* Verify the personality */
1905        switch (p_hwfn->hw_info.personality) {
1906        case QED_PCI_ISCSI:
1907                proto = PROTOCOLID_ISCSI;
1908                seg = QED_CXT_ISCSI_TID_SEG;
1909                break;
1910        default:
1911                return -EINVAL;
1912        }
1913
1914        p_cli = &p_mngr->clients[ILT_CLI_CDUT];
1915        if (!p_cli->active)
1916                return -EINVAL;
1917
1918        p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
1919        if (!p_seg_info->has_fl_mem)
1920                return -EINVAL;
1921
1922        p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
1923        total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
1924                                   p_fl_seg->real_size_in_page);
1925
1926        for (i = 0; i < total_lines; i++) {
1927                shadow_line = i + p_fl_seg->start_line -
1928                    p_hwfn->p_cxt_mngr->pf_start_line;
1929                p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
1930        }
1931        p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
1932            p_fl_seg->real_size_in_page;
1933        p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
1934        p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
1935            p_info->tid_size;
1936
1937        return 0;
1938}
1939
1940/* This function is very RoCE oriented, if another protocol in the future
1941 * will want this feature we'll need to modify the function to be more generic
1942 */
1943int
1944qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
1945                          enum qed_cxt_elem_type elem_type, u32 iid)
1946{
1947        u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
1948        struct qed_ilt_client_cfg *p_cli;
1949        struct qed_ilt_cli_blk *p_blk;
1950        struct qed_ptt *p_ptt;
1951        dma_addr_t p_phys;
1952        u64 ilt_hw_entry;
1953        void *p_virt;
1954        int rc = 0;
1955
1956        switch (elem_type) {
1957        case QED_ELEM_CXT:
1958                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1959                elem_size = CONN_CXT_SIZE(p_hwfn);
1960                p_blk = &p_cli->pf_blks[CDUC_BLK];
1961                break;
1962        case QED_ELEM_SRQ:
1963                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
1964                elem_size = SRQ_CXT_SIZE;
1965                p_blk = &p_cli->pf_blks[SRQ_BLK];
1966                break;
1967        case QED_ELEM_TASK:
1968                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1969                elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
1970                p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
1971                break;
1972        default:
1973                DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
1974                return -EINVAL;
1975        }
1976
1977        /* Calculate line in ilt */
1978        hw_p_size = p_cli->p_size.val;
1979        elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
1980        line = p_blk->start_line + (iid / elems_per_p);
1981        shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
1982
1983        /* If line is already allocated, do nothing, otherwise allocate it and
1984         * write it to the PSWRQ2 registers.
1985         * This section can be run in parallel from different contexts and thus
1986         * a mutex protection is needed.
1987         */
1988
1989        mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
1990
1991        if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
1992                goto out0;
1993
1994        p_ptt = qed_ptt_acquire(p_hwfn);
1995        if (!p_ptt) {
1996                DP_NOTICE(p_hwfn,
1997                          "QED_TIME_OUT on ptt acquire - dynamic allocation");
1998                rc = -EBUSY;
1999                goto out0;
2000        }
2001
2002        p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2003                                    p_blk->real_size_in_page,
2004                                    &p_phys, GFP_KERNEL);
2005        if (!p_virt) {
2006                rc = -ENOMEM;
2007                goto out1;
2008        }
2009        memset(p_virt, 0, p_blk->real_size_in_page);
2010
2011        /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2012         * to compensate for a HW bug, but it is configured even if DIF is not
2013         * enabled. This is harmless and allows us to avoid a dedicated API. We
2014         * configure the field for all of the contexts on the newly allocated
2015         * page.
2016         */
2017        if (elem_type == QED_ELEM_TASK) {
2018                u32 elem_i;
2019                u8 *elem_start = (u8 *)p_virt;
2020                union type1_task_context *elem;
2021
2022                for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2023                        elem = (union type1_task_context *)elem_start;
2024                        SET_FIELD(elem->roce_ctx.tdif_context.flags1,
2025                                  TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
2026                        elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2027                }
2028        }
2029
2030        p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2031        p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2032        p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2033            p_blk->real_size_in_page;
2034
2035        /* compute absolute offset */
2036        reg_offset = PSWRQ2_REG_ILT_MEMORY +
2037            (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2038
2039        ilt_hw_entry = 0;
2040        SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2041        SET_FIELD(ilt_hw_entry,
2042                  ILT_ENTRY_PHY_ADDR,
2043                  (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2044
2045        /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2046        qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2047                          reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
2048
2049        if (elem_type == QED_ELEM_CXT) {
2050                u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2051                    elems_per_p;
2052
2053                /* Update the relevant register in the parser */
2054                qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2055                       last_cid_allocated - 1);
2056
2057                if (!p_hwfn->b_rdma_enabled_in_prs) {
2058                        /* Enable RoCE search */
2059                        qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2060                        p_hwfn->b_rdma_enabled_in_prs = true;
2061                }
2062        }
2063
2064out1:
2065        qed_ptt_release(p_hwfn, p_ptt);
2066out0:
2067        mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2068
2069        return rc;
2070}
2071
2072/* This function is very RoCE oriented, if another protocol in the future
2073 * will want this feature we'll need to modify the function to be more generic
2074 */
2075static int
2076qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2077                       enum qed_cxt_elem_type elem_type,
2078                       u32 start_iid, u32 count)
2079{
2080        u32 start_line, end_line, shadow_start_line, shadow_end_line;
2081        u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2082        struct qed_ilt_client_cfg *p_cli;
2083        struct qed_ilt_cli_blk *p_blk;
2084        u32 end_iid = start_iid + count;
2085        struct qed_ptt *p_ptt;
2086        u64 ilt_hw_entry = 0;
2087        u32 i;
2088
2089        switch (elem_type) {
2090        case QED_ELEM_CXT:
2091                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2092                elem_size = CONN_CXT_SIZE(p_hwfn);
2093                p_blk = &p_cli->pf_blks[CDUC_BLK];
2094                break;
2095        case QED_ELEM_SRQ:
2096                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2097                elem_size = SRQ_CXT_SIZE;
2098                p_blk = &p_cli->pf_blks[SRQ_BLK];
2099                break;
2100        case QED_ELEM_TASK:
2101                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2102                elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2103                p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2104                break;
2105        default:
2106                DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2107                return -EINVAL;
2108        }
2109
2110        /* Calculate line in ilt */
2111        hw_p_size = p_cli->p_size.val;
2112        elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2113        start_line = p_blk->start_line + (start_iid / elems_per_p);
2114        end_line = p_blk->start_line + (end_iid / elems_per_p);
2115        if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2116                end_line--;
2117
2118        shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2119        shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2120
2121        p_ptt = qed_ptt_acquire(p_hwfn);
2122        if (!p_ptt) {
2123                DP_NOTICE(p_hwfn,
2124                          "QED_TIME_OUT on ptt acquire - dynamic allocation");
2125                return -EBUSY;
2126        }
2127
2128        for (i = shadow_start_line; i < shadow_end_line; i++) {
2129                if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2130                        continue;
2131
2132                dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2133                                  p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2134                                  p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2135                                  p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
2136
2137                p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
2138                p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2139                p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2140
2141                /* compute absolute offset */
2142                reg_offset = PSWRQ2_REG_ILT_MEMORY +
2143                    ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2144                     ILT_ENTRY_IN_REGS);
2145
2146                /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2147                 * wide-bus.
2148                 */
2149                qed_dmae_host2grc(p_hwfn, p_ptt,
2150                                  (u64) (uintptr_t) &ilt_hw_entry,
2151                                  reg_offset,
2152                                  sizeof(ilt_hw_entry) / sizeof(u32),
2153                                  0);
2154        }
2155
2156        qed_ptt_release(p_hwfn, p_ptt);
2157
2158        return 0;
2159}
2160
2161int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2162{
2163        int rc;
2164        u32 cid;
2165
2166        /* Free Connection CXT */
2167        rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2168                                    qed_cxt_get_proto_cid_start(p_hwfn,
2169                                                                proto),
2170                                    qed_cxt_get_proto_cid_count(p_hwfn,
2171                                                                proto, &cid));
2172
2173        if (rc)
2174                return rc;
2175
2176        /* Free Task CXT */
2177        rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2178                                    qed_cxt_get_proto_tid_count(p_hwfn, proto));
2179        if (rc)
2180                return rc;
2181
2182        /* Free TSDM CXT */
2183        rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
2184                                    qed_cxt_get_srq_count(p_hwfn));
2185
2186        return rc;
2187}
2188
2189int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2190                         u32 tid, u8 ctx_type, void **pp_task_ctx)
2191{
2192        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2193        struct qed_ilt_client_cfg *p_cli;
2194        struct qed_ilt_cli_blk *p_seg;
2195        struct qed_tid_seg *p_seg_info;
2196        u32 proto, seg;
2197        u32 total_lines;
2198        u32 tid_size, ilt_idx;
2199        u32 num_tids_per_block;
2200
2201        /* Verify the personality */
2202        switch (p_hwfn->hw_info.personality) {
2203        case QED_PCI_ISCSI:
2204                proto = PROTOCOLID_ISCSI;
2205                seg = QED_CXT_ISCSI_TID_SEG;
2206                break;
2207        default:
2208                return -EINVAL;
2209        }
2210
2211        p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2212        if (!p_cli->active)
2213                return -EINVAL;
2214
2215        p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2216
2217        if (ctx_type == QED_CTX_WORKING_MEM) {
2218                p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2219        } else if (ctx_type == QED_CTX_FL_MEM) {
2220                if (!p_seg_info->has_fl_mem)
2221                        return -EINVAL;
2222                p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2223        } else {
2224                return -EINVAL;
2225        }
2226        total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2227        tid_size = p_mngr->task_type_size[p_seg_info->type];
2228        num_tids_per_block = p_seg->real_size_in_page / tid_size;
2229
2230        if (total_lines < tid / num_tids_per_block)
2231                return -EINVAL;
2232
2233        ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2234                  p_mngr->pf_start_line;
2235        *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
2236                       (tid % num_tids_per_block) * tid_size;
2237
2238        return 0;
2239}
2240