linux/drivers/scsi/bnx2fc/bnx2fc_hwi.c
<<
>>
Prefs
   1/* bnx2fc_hwi.c: QLogic Linux FCoE offload driver.
   2 * This file contains the code that low level functions that interact
   3 * with 57712 FCoE firmware.
   4 *
   5 * Copyright (c) 2008-2013 Broadcom Corporation
   6 * Copyright (c) 2014-2016 QLogic Corporation
   7 * Copyright (c) 2016-2017 Cavium Inc.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation.
  12 *
  13 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  14 */
  15
  16#include "bnx2fc.h"
  17
  18DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
  19
  20static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
  21                                        struct fcoe_kcqe *new_cqe_kcqe);
  22static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
  23                                        struct fcoe_kcqe *ofld_kcqe);
  24static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
  25                                                struct fcoe_kcqe *ofld_kcqe);
  26static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
  27static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
  28                                        struct fcoe_kcqe *destroy_kcqe);
  29
  30int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
  31{
  32        struct fcoe_kwqe_stat stat_req;
  33        struct kwqe *kwqe_arr[2];
  34        int num_kwqes = 1;
  35        int rc = 0;
  36
  37        memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
  38        stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
  39        stat_req.hdr.flags =
  40                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  41
  42        stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
  43        stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
  44
  45        kwqe_arr[0] = (struct kwqe *) &stat_req;
  46
  47        if (hba->cnic && hba->cnic->submit_kwqes)
  48                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
  49
  50        return rc;
  51}
  52
  53/**
  54 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
  55 *
  56 * @hba:        adapter structure pointer
  57 *
  58 * Send down FCoE firmware init KWQEs which initiates the initial handshake
  59 *      with the f/w.
  60 *
  61 */
  62int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
  63{
  64        struct fcoe_kwqe_init1 fcoe_init1;
  65        struct fcoe_kwqe_init2 fcoe_init2;
  66        struct fcoe_kwqe_init3 fcoe_init3;
  67        struct kwqe *kwqe_arr[3];
  68        int num_kwqes = 3;
  69        int rc = 0;
  70
  71        if (!hba->cnic) {
  72                printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
  73                return -ENODEV;
  74        }
  75
  76        /* fill init1 KWQE */
  77        memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
  78        fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
  79        fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
  80                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  81
  82        fcoe_init1.num_tasks = hba->max_tasks;
  83        fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
  84        fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
  85        fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
  86        fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
  87        fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
  88        fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
  89        fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
  90        fcoe_init1.task_list_pbl_addr_hi =
  91                                (u32) ((u64) hba->task_ctx_bd_dma >> 32);
  92        fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
  93
  94        fcoe_init1.flags = (PAGE_SHIFT <<
  95                                FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
  96
  97        fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
  98
  99        /* fill init2 KWQE */
 100        memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
 101        fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
 102        fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 103                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 104
 105        fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
 106        fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
 107
 108
 109        fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
 110        fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
 111                                           ((u64) hba->hash_tbl_pbl_dma >> 32);
 112
 113        fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
 114        fcoe_init2.t2_hash_tbl_addr_hi = (u32)
 115                                          ((u64) hba->t2_hash_tbl_dma >> 32);
 116
 117        fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
 118        fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
 119                                        ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
 120
 121        fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
 122
 123        /* fill init3 KWQE */
 124        memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
 125        fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
 126        fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 127                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 128        fcoe_init3.error_bit_map_lo = 0xffffffff;
 129        fcoe_init3.error_bit_map_hi = 0xffffffff;
 130
 131        /*
 132         * enable both cached connection and cached tasks
 133         * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
 134         */
 135        fcoe_init3.perf_config = 3;
 136
 137        kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
 138        kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
 139        kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
 140
 141        if (hba->cnic && hba->cnic->submit_kwqes)
 142                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 143
 144        return rc;
 145}
 146int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
 147{
 148        struct fcoe_kwqe_destroy fcoe_destroy;
 149        struct kwqe *kwqe_arr[2];
 150        int num_kwqes = 1;
 151        int rc = -1;
 152
 153        /* fill destroy KWQE */
 154        memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
 155        fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
 156        fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 157                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 158        kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
 159
 160        if (hba->cnic && hba->cnic->submit_kwqes)
 161                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 162        return rc;
 163}
 164
 165/**
 166 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
 167 *
 168 * @port:               port structure pointer
 169 * @tgt:                bnx2fc_rport structure pointer
 170 */
 171int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
 172                                        struct bnx2fc_rport *tgt)
 173{
 174        struct fc_lport *lport = port->lport;
 175        struct bnx2fc_interface *interface = port->priv;
 176        struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 177        struct bnx2fc_hba *hba = interface->hba;
 178        struct kwqe *kwqe_arr[4];
 179        struct fcoe_kwqe_conn_offload1 ofld_req1;
 180        struct fcoe_kwqe_conn_offload2 ofld_req2;
 181        struct fcoe_kwqe_conn_offload3 ofld_req3;
 182        struct fcoe_kwqe_conn_offload4 ofld_req4;
 183        struct fc_rport_priv *rdata = tgt->rdata;
 184        struct fc_rport *rport = tgt->rport;
 185        int num_kwqes = 4;
 186        u32 port_id;
 187        int rc = 0;
 188        u16 conn_id;
 189
 190        /* Initialize offload request 1 structure */
 191        memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
 192
 193        ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
 194        ofld_req1.hdr.flags =
 195                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 196
 197
 198        conn_id = (u16)tgt->fcoe_conn_id;
 199        ofld_req1.fcoe_conn_id = conn_id;
 200
 201
 202        ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
 203        ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
 204
 205        ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
 206        ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
 207
 208        ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
 209        ofld_req1.rq_first_pbe_addr_hi =
 210                                (u32)((u64) tgt->rq_dma >> 32);
 211
 212        ofld_req1.rq_prod = 0x8000;
 213
 214        /* Initialize offload request 2 structure */
 215        memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
 216
 217        ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
 218        ofld_req2.hdr.flags =
 219                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 220
 221        ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
 222
 223        ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
 224        ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
 225
 226        ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
 227        ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
 228
 229        ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
 230        ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
 231
 232        /* Initialize offload request 3 structure */
 233        memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
 234
 235        ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
 236        ofld_req3.hdr.flags =
 237                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 238
 239        ofld_req3.vlan_tag = interface->vlan_id <<
 240                                FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
 241        ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
 242
 243        port_id = fc_host_port_id(lport->host);
 244        if (port_id == 0) {
 245                BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
 246                return -EINVAL;
 247        }
 248
 249        /*
 250         * Store s_id of the initiator for further reference. This will
 251         * be used during disable/destroy during linkdown processing as
 252         * when the lport is reset, the port_id also is reset to 0
 253         */
 254        tgt->sid = port_id;
 255        ofld_req3.s_id[0] = (port_id & 0x000000FF);
 256        ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
 257        ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
 258
 259        port_id = rport->port_id;
 260        ofld_req3.d_id[0] = (port_id & 0x000000FF);
 261        ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
 262        ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
 263
 264        ofld_req3.tx_total_conc_seqs = rdata->max_seq;
 265
 266        ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
 267        ofld_req3.rx_max_fc_pay_len  = lport->mfs;
 268
 269        ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
 270        ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
 271        ofld_req3.rx_open_seqs_exch_c3 = 1;
 272
 273        ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
 274        ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
 275
 276        /* set mul_n_port_ids supported flag to 0, until it is supported */
 277        ofld_req3.flags = 0;
 278        /*
 279        ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
 280                            FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
 281        */
 282        /* Info from PLOGI response */
 283        ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
 284                             FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
 285
 286        ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
 287                             FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
 288
 289        /*
 290         * Info from PRLI response, this info is used for sequence level error
 291         * recovery support
 292         */
 293        if (tgt->dev_type == TYPE_TAPE) {
 294                ofld_req3.flags |= 1 <<
 295                                    FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
 296                ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
 297                                    ? 1 : 0) <<
 298                                    FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
 299        }
 300
 301        /* vlan flag */
 302        ofld_req3.flags |= (interface->vlan_enabled <<
 303                            FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
 304
 305        /* C2_VALID and ACK flags are not set as they are not supported */
 306
 307
 308        /* Initialize offload request 4 structure */
 309        memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
 310        ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
 311        ofld_req4.hdr.flags =
 312                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 313
 314        ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
 315
 316
 317        ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
 318                                                        /* local mac */
 319        ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
 320        ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
 321        ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
 322        ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
 323        ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
 324        ofld_req4.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
 325                                                        /* fcf mac */
 326        ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
 327        ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
 328        ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
 329        ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
 330        ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 331
 332        ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
 333        ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
 334
 335        ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
 336        ofld_req4.confq_pbl_base_addr_hi =
 337                                        (u32)((u64) tgt->confq_pbl_dma >> 32);
 338
 339        kwqe_arr[0] = (struct kwqe *) &ofld_req1;
 340        kwqe_arr[1] = (struct kwqe *) &ofld_req2;
 341        kwqe_arr[2] = (struct kwqe *) &ofld_req3;
 342        kwqe_arr[3] = (struct kwqe *) &ofld_req4;
 343
 344        if (hba->cnic && hba->cnic->submit_kwqes)
 345                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 346
 347        return rc;
 348}
 349
 350/**
 351 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
 352 *
 353 * @port:               port structure pointer
 354 * @tgt:                bnx2fc_rport structure pointer
 355 */
 356int bnx2fc_send_session_enable_req(struct fcoe_port *port,
 357                                        struct bnx2fc_rport *tgt)
 358{
 359        struct kwqe *kwqe_arr[2];
 360        struct bnx2fc_interface *interface = port->priv;
 361        struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 362        struct bnx2fc_hba *hba = interface->hba;
 363        struct fcoe_kwqe_conn_enable_disable enbl_req;
 364        struct fc_lport *lport = port->lport;
 365        struct fc_rport *rport = tgt->rport;
 366        int num_kwqes = 1;
 367        int rc = 0;
 368        u32 port_id;
 369
 370        memset(&enbl_req, 0x00,
 371               sizeof(struct fcoe_kwqe_conn_enable_disable));
 372        enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
 373        enbl_req.hdr.flags =
 374                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 375
 376        enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
 377                                                        /* local mac */
 378        enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
 379        enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
 380        enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
 381        enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
 382        enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
 383        memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
 384
 385        enbl_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
 386        enbl_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
 387        enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
 388        enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
 389        enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
 390        enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 391
 392        port_id = fc_host_port_id(lport->host);
 393        if (port_id != tgt->sid) {
 394                printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
 395                                "sid = 0x%x\n", port_id, tgt->sid);
 396                port_id = tgt->sid;
 397        }
 398        enbl_req.s_id[0] = (port_id & 0x000000FF);
 399        enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
 400        enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
 401
 402        port_id = rport->port_id;
 403        enbl_req.d_id[0] = (port_id & 0x000000FF);
 404        enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
 405        enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
 406        enbl_req.vlan_tag = interface->vlan_id <<
 407                                FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
 408        enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
 409        enbl_req.vlan_flag = interface->vlan_enabled;
 410        enbl_req.context_id = tgt->context_id;
 411        enbl_req.conn_id = tgt->fcoe_conn_id;
 412
 413        kwqe_arr[0] = (struct kwqe *) &enbl_req;
 414
 415        if (hba->cnic && hba->cnic->submit_kwqes)
 416                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 417        return rc;
 418}
 419
 420/**
 421 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
 422 *
 423 * @port:               port structure pointer
 424 * @tgt:                bnx2fc_rport structure pointer
 425 */
 426int bnx2fc_send_session_disable_req(struct fcoe_port *port,
 427                                    struct bnx2fc_rport *tgt)
 428{
 429        struct bnx2fc_interface *interface = port->priv;
 430        struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 431        struct bnx2fc_hba *hba = interface->hba;
 432        struct fcoe_kwqe_conn_enable_disable disable_req;
 433        struct kwqe *kwqe_arr[2];
 434        struct fc_rport *rport = tgt->rport;
 435        int num_kwqes = 1;
 436        int rc = 0;
 437        u32 port_id;
 438
 439        memset(&disable_req, 0x00,
 440               sizeof(struct fcoe_kwqe_conn_enable_disable));
 441        disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
 442        disable_req.hdr.flags =
 443                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 444
 445        disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
 446        disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
 447        disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
 448        disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
 449        disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
 450        disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
 451
 452        disable_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
 453        disable_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
 454        disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
 455        disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
 456        disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
 457        disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 458
 459        port_id = tgt->sid;
 460        disable_req.s_id[0] = (port_id & 0x000000FF);
 461        disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
 462        disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
 463
 464
 465        port_id = rport->port_id;
 466        disable_req.d_id[0] = (port_id & 0x000000FF);
 467        disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
 468        disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
 469        disable_req.context_id = tgt->context_id;
 470        disable_req.conn_id = tgt->fcoe_conn_id;
 471        disable_req.vlan_tag = interface->vlan_id <<
 472                                FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
 473        disable_req.vlan_tag |=
 474                        3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
 475        disable_req.vlan_flag = interface->vlan_enabled;
 476
 477        kwqe_arr[0] = (struct kwqe *) &disable_req;
 478
 479        if (hba->cnic && hba->cnic->submit_kwqes)
 480                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 481
 482        return rc;
 483}
 484
 485/**
 486 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
 487 *
 488 * @port:               port structure pointer
 489 * @tgt:                bnx2fc_rport structure pointer
 490 */
 491int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
 492                                        struct bnx2fc_rport *tgt)
 493{
 494        struct fcoe_kwqe_conn_destroy destroy_req;
 495        struct kwqe *kwqe_arr[2];
 496        int num_kwqes = 1;
 497        int rc = 0;
 498
 499        memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
 500        destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
 501        destroy_req.hdr.flags =
 502                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 503
 504        destroy_req.context_id = tgt->context_id;
 505        destroy_req.conn_id = tgt->fcoe_conn_id;
 506
 507        kwqe_arr[0] = (struct kwqe *) &destroy_req;
 508
 509        if (hba->cnic && hba->cnic->submit_kwqes)
 510                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 511
 512        return rc;
 513}
 514
 515static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
 516{
 517        struct bnx2fc_lport *blport;
 518
 519        spin_lock_bh(&hba->hba_lock);
 520        list_for_each_entry(blport, &hba->vports, list) {
 521                if (blport->lport == lport) {
 522                        spin_unlock_bh(&hba->hba_lock);
 523                        return true;
 524                }
 525        }
 526        spin_unlock_bh(&hba->hba_lock);
 527        return false;
 528
 529}
 530
 531
 532static void bnx2fc_unsol_els_work(struct work_struct *work)
 533{
 534        struct bnx2fc_unsol_els *unsol_els;
 535        struct fc_lport *lport;
 536        struct bnx2fc_hba *hba;
 537        struct fc_frame *fp;
 538
 539        unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
 540        lport = unsol_els->lport;
 541        fp = unsol_els->fp;
 542        hba = unsol_els->hba;
 543        if (is_valid_lport(hba, lport))
 544                fc_exch_recv(lport, fp);
 545        kfree(unsol_els);
 546}
 547
 548void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
 549                                   unsigned char *buf,
 550                                   u32 frame_len, u16 l2_oxid)
 551{
 552        struct fcoe_port *port = tgt->port;
 553        struct fc_lport *lport = port->lport;
 554        struct bnx2fc_interface *interface = port->priv;
 555        struct bnx2fc_unsol_els *unsol_els;
 556        struct fc_frame_header *fh;
 557        struct fc_frame *fp;
 558        struct sk_buff *skb;
 559        u32 payload_len;
 560        u32 crc;
 561        u8 op;
 562
 563
 564        unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
 565        if (!unsol_els) {
 566                BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
 567                return;
 568        }
 569
 570        BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
 571                l2_oxid, frame_len);
 572
 573        payload_len = frame_len - sizeof(struct fc_frame_header);
 574
 575        fp = fc_frame_alloc(lport, payload_len);
 576        if (!fp) {
 577                printk(KERN_ERR PFX "fc_frame_alloc failure\n");
 578                kfree(unsol_els);
 579                return;
 580        }
 581
 582        fh = (struct fc_frame_header *) fc_frame_header_get(fp);
 583        /* Copy FC Frame header and payload into the frame */
 584        memcpy(fh, buf, frame_len);
 585
 586        if (l2_oxid != FC_XID_UNKNOWN)
 587                fh->fh_ox_id = htons(l2_oxid);
 588
 589        skb = fp_skb(fp);
 590
 591        if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
 592            (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
 593
 594                if (fh->fh_type == FC_TYPE_ELS) {
 595                        op = fc_frame_payload_op(fp);
 596                        if ((op == ELS_TEST) || (op == ELS_ESTC) ||
 597                            (op == ELS_FAN) || (op == ELS_CSU)) {
 598                                /*
 599                                 * No need to reply for these
 600                                 * ELS requests
 601                                 */
 602                                printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
 603                                kfree_skb(skb);
 604                                kfree(unsol_els);
 605                                return;
 606                        }
 607                }
 608                crc = fcoe_fc_crc(fp);
 609                fc_frame_init(fp);
 610                fr_dev(fp) = lport;
 611                fr_sof(fp) = FC_SOF_I3;
 612                fr_eof(fp) = FC_EOF_T;
 613                fr_crc(fp) = cpu_to_le32(~crc);
 614                unsol_els->lport = lport;
 615                unsol_els->hba = interface->hba;
 616                unsol_els->fp = fp;
 617                INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
 618                queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
 619        } else {
 620                BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
 621                kfree_skb(skb);
 622                kfree(unsol_els);
 623        }
 624}
 625
 626static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
 627{
 628        u8 num_rq;
 629        struct fcoe_err_report_entry *err_entry;
 630        unsigned char *rq_data;
 631        unsigned char *buf = NULL, *buf1;
 632        int i;
 633        u16 xid;
 634        u32 frame_len, len;
 635        struct bnx2fc_cmd *io_req = NULL;
 636        struct fcoe_task_ctx_entry *task, *task_page;
 637        struct bnx2fc_interface *interface = tgt->port->priv;
 638        struct bnx2fc_hba *hba = interface->hba;
 639        int task_idx, index;
 640        int rc = 0;
 641        u64 err_warn_bit_map;
 642        u8 err_warn = 0xff;
 643
 644
 645        BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
 646        switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
 647        case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
 648                frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
 649                             FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
 650
 651                num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
 652
 653                spin_lock_bh(&tgt->tgt_lock);
 654                rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
 655                spin_unlock_bh(&tgt->tgt_lock);
 656
 657                if (rq_data) {
 658                        buf = rq_data;
 659                } else {
 660                        buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
 661                                              GFP_ATOMIC);
 662
 663                        if (!buf1) {
 664                                BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
 665                                break;
 666                        }
 667
 668                        for (i = 0; i < num_rq; i++) {
 669                                spin_lock_bh(&tgt->tgt_lock);
 670                                rq_data = (unsigned char *)
 671                                           bnx2fc_get_next_rqe(tgt, 1);
 672                                spin_unlock_bh(&tgt->tgt_lock);
 673                                len = BNX2FC_RQ_BUF_SZ;
 674                                memcpy(buf1, rq_data, len);
 675                                buf1 += len;
 676                        }
 677                }
 678                bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
 679                                              FC_XID_UNKNOWN);
 680
 681                if (buf != rq_data)
 682                        kfree(buf);
 683                spin_lock_bh(&tgt->tgt_lock);
 684                bnx2fc_return_rqe(tgt, num_rq);
 685                spin_unlock_bh(&tgt->tgt_lock);
 686                break;
 687
 688        case FCOE_ERROR_DETECTION_CQE_TYPE:
 689                /*
 690                 * In case of error reporting CQE a single RQ entry
 691                 * is consumed.
 692                 */
 693                spin_lock_bh(&tgt->tgt_lock);
 694                num_rq = 1;
 695                err_entry = (struct fcoe_err_report_entry *)
 696                             bnx2fc_get_next_rqe(tgt, 1);
 697                xid = err_entry->fc_hdr.ox_id;
 698                BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
 699                BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
 700                        err_entry->data.err_warn_bitmap_hi,
 701                        err_entry->data.err_warn_bitmap_lo);
 702                BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
 703                        err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 704
 705
 706                if (xid > hba->max_xid) {
 707                        BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
 708                                   xid);
 709                        goto ret_err_rqe;
 710                }
 711
 712                task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 713                index = xid % BNX2FC_TASKS_PER_PAGE;
 714                task_page = (struct fcoe_task_ctx_entry *)
 715                                        hba->task_ctx[task_idx];
 716                task = &(task_page[index]);
 717
 718                io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 719                if (!io_req)
 720                        goto ret_err_rqe;
 721
 722                if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
 723                        printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
 724                        goto ret_err_rqe;
 725                }
 726
 727                if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
 728                                       &io_req->req_flags)) {
 729                        BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
 730                                            "progress.. ignore unsol err\n");
 731                        goto ret_err_rqe;
 732                }
 733
 734                err_warn_bit_map = (u64)
 735                        ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
 736                        (u64)err_entry->data.err_warn_bitmap_lo;
 737                for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
 738                        if (err_warn_bit_map & (u64)((u64)1 << i)) {
 739                                err_warn = i;
 740                                break;
 741                        }
 742                }
 743
 744                /*
 745                 * If ABTS is already in progress, and FW error is
 746                 * received after that, do not cancel the timeout_work
 747                 * and let the error recovery continue by explicitly
 748                 * logging out the target, when the ABTS eventually
 749                 * times out.
 750                 */
 751                if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
 752                        printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
 753                                            "in ABTS processing\n", xid);
 754                        goto ret_err_rqe;
 755                }
 756                BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
 757                if (tgt->dev_type != TYPE_TAPE)
 758                        goto skip_rec;
 759                switch (err_warn) {
 760                case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
 761                case FCOE_ERROR_CODE_DATA_OOO_RO:
 762                case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
 763                case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
 764                case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
 765                case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
 766                        BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
 767                                   xid);
 768                        memcpy(&io_req->err_entry, err_entry,
 769                               sizeof(struct fcoe_err_report_entry));
 770                        if (!test_bit(BNX2FC_FLAG_SRR_SENT,
 771                                      &io_req->req_flags)) {
 772                                spin_unlock_bh(&tgt->tgt_lock);
 773                                rc = bnx2fc_send_rec(io_req);
 774                                spin_lock_bh(&tgt->tgt_lock);
 775
 776                                if (rc)
 777                                        goto skip_rec;
 778                        } else
 779                                printk(KERN_ERR PFX "SRR in progress\n");
 780                        goto ret_err_rqe;
 781                        break;
 782                default:
 783                        break;
 784                }
 785
 786skip_rec:
 787                set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
 788                /*
 789                 * Cancel the timeout_work, as we received IO
 790                 * completion with FW error.
 791                 */
 792                if (cancel_delayed_work(&io_req->timeout_work))
 793                        kref_put(&io_req->refcount, bnx2fc_cmd_release);
 794
 795                rc = bnx2fc_initiate_abts(io_req);
 796                if (rc != SUCCESS) {
 797                        printk(KERN_ERR PFX "err_warn: initiate_abts "
 798                                "failed xid = 0x%x. issue cleanup\n",
 799                                io_req->xid);
 800                        bnx2fc_initiate_cleanup(io_req);
 801                }
 802ret_err_rqe:
 803                bnx2fc_return_rqe(tgt, 1);
 804                spin_unlock_bh(&tgt->tgt_lock);
 805                break;
 806
 807        case FCOE_WARNING_DETECTION_CQE_TYPE:
 808                /*
 809                 *In case of warning reporting CQE a single RQ entry
 810                 * is consumes.
 811                 */
 812                spin_lock_bh(&tgt->tgt_lock);
 813                num_rq = 1;
 814                err_entry = (struct fcoe_err_report_entry *)
 815                             bnx2fc_get_next_rqe(tgt, 1);
 816                xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
 817                BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
 818                BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
 819                        err_entry->data.err_warn_bitmap_hi,
 820                        err_entry->data.err_warn_bitmap_lo);
 821                BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
 822                        err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 823
 824                if (xid > hba->max_xid) {
 825                        BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
 826                        goto ret_warn_rqe;
 827                }
 828
 829                err_warn_bit_map = (u64)
 830                        ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
 831                        (u64)err_entry->data.err_warn_bitmap_lo;
 832                for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
 833                        if (err_warn_bit_map & (u64) (1 << i)) {
 834                                err_warn = i;
 835                                break;
 836                        }
 837                }
 838                BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
 839
 840                task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 841                index = xid % BNX2FC_TASKS_PER_PAGE;
 842                task_page = (struct fcoe_task_ctx_entry *)
 843                             interface->hba->task_ctx[task_idx];
 844                task = &(task_page[index]);
 845                io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 846                if (!io_req)
 847                        goto ret_warn_rqe;
 848
 849                if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
 850                        printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
 851                        goto ret_warn_rqe;
 852                }
 853
 854                memcpy(&io_req->err_entry, err_entry,
 855                       sizeof(struct fcoe_err_report_entry));
 856
 857                if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
 858                        /* REC_TOV is not a warning code */
 859                        BUG_ON(1);
 860                else
 861                        BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
 862ret_warn_rqe:
 863                bnx2fc_return_rqe(tgt, 1);
 864                spin_unlock_bh(&tgt->tgt_lock);
 865                break;
 866
 867        default:
 868                printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
 869                break;
 870        }
 871}
 872
 873void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
 874{
 875        struct fcoe_task_ctx_entry *task;
 876        struct fcoe_task_ctx_entry *task_page;
 877        struct fcoe_port *port = tgt->port;
 878        struct bnx2fc_interface *interface = port->priv;
 879        struct bnx2fc_hba *hba = interface->hba;
 880        struct bnx2fc_cmd *io_req;
 881        int task_idx, index;
 882        u16 xid;
 883        u8  cmd_type;
 884        u8 rx_state = 0;
 885        u8 num_rq;
 886
 887        spin_lock_bh(&tgt->tgt_lock);
 888        xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
 889        if (xid >= hba->max_tasks) {
 890                printk(KERN_ERR PFX "ERROR:xid out of range\n");
 891                spin_unlock_bh(&tgt->tgt_lock);
 892                return;
 893        }
 894        task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 895        index = xid % BNX2FC_TASKS_PER_PAGE;
 896        task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
 897        task = &(task_page[index]);
 898
 899        num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
 900                   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
 901                   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
 902
 903        io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 904
 905        if (io_req == NULL) {
 906                printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
 907                spin_unlock_bh(&tgt->tgt_lock);
 908                return;
 909        }
 910
 911        /* Timestamp IO completion time */
 912        cmd_type = io_req->cmd_type;
 913
 914        rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
 915                    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
 916                    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
 917
 918        /* Process other IO completion types */
 919        switch (cmd_type) {
 920        case BNX2FC_SCSI_CMD:
 921                if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
 922                        bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
 923                        spin_unlock_bh(&tgt->tgt_lock);
 924                        return;
 925                }
 926
 927                if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
 928                        bnx2fc_process_abts_compl(io_req, task, num_rq);
 929                else if (rx_state ==
 930                         FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
 931                        bnx2fc_process_cleanup_compl(io_req, task, num_rq);
 932                else
 933                        printk(KERN_ERR PFX "Invalid rx state - %d\n",
 934                                rx_state);
 935                break;
 936
 937        case BNX2FC_TASK_MGMT_CMD:
 938                BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
 939                bnx2fc_process_tm_compl(io_req, task, num_rq);
 940                break;
 941
 942        case BNX2FC_ABTS:
 943                /*
 944                 * ABTS request received by firmware. ABTS response
 945                 * will be delivered to the task belonging to the IO
 946                 * that was aborted
 947                 */
 948                BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
 949                kref_put(&io_req->refcount, bnx2fc_cmd_release);
 950                break;
 951
 952        case BNX2FC_ELS:
 953                if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
 954                        bnx2fc_process_els_compl(io_req, task, num_rq);
 955                else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
 956                        bnx2fc_process_abts_compl(io_req, task, num_rq);
 957                else if (rx_state ==
 958                         FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
 959                        bnx2fc_process_cleanup_compl(io_req, task, num_rq);
 960                else
 961                        printk(KERN_ERR PFX "Invalid rx state =  %d\n",
 962                                rx_state);
 963                break;
 964
 965        case BNX2FC_CLEANUP:
 966                BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
 967                kref_put(&io_req->refcount, bnx2fc_cmd_release);
 968                break;
 969
 970        case BNX2FC_SEQ_CLEANUP:
 971                BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
 972                              io_req->xid);
 973                bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
 974                kref_put(&io_req->refcount, bnx2fc_cmd_release);
 975                break;
 976
 977        default:
 978                printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
 979                break;
 980        }
 981        spin_unlock_bh(&tgt->tgt_lock);
 982}
 983
 984void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
 985{
 986        struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
 987        u32 msg;
 988
 989        wmb();
 990        rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
 991                        FCOE_CQE_TOGGLE_BIT_SHIFT);
 992        msg = *((u32 *)rx_db);
 993        writel(cpu_to_le32(msg), tgt->ctx_base);
 994        mmiowb();
 995
 996}
 997
 998static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
 999{
1000        struct bnx2fc_work *work;
1001        work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
1002        if (!work)
1003                return NULL;
1004
1005        INIT_LIST_HEAD(&work->list);
1006        work->tgt = tgt;
1007        work->wqe = wqe;
1008        return work;
1009}
1010
1011/* Pending work request completion */
1012static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
1013{
1014        unsigned int cpu = wqe % num_possible_cpus();
1015        struct bnx2fc_percpu_s *fps;
1016        struct bnx2fc_work *work;
1017
1018        fps = &per_cpu(bnx2fc_percpu, cpu);
1019        spin_lock_bh(&fps->fp_work_lock);
1020        if (fps->iothread) {
1021                work = bnx2fc_alloc_work(tgt, wqe);
1022                if (work) {
1023                        list_add_tail(&work->list, &fps->work_list);
1024                        wake_up_process(fps->iothread);
1025                        spin_unlock_bh(&fps->fp_work_lock);
1026                        return;
1027                }
1028        }
1029        spin_unlock_bh(&fps->fp_work_lock);
1030        bnx2fc_process_cq_compl(tgt, wqe);
1031}
1032
1033int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1034{
1035        struct fcoe_cqe *cq;
1036        u32 cq_cons;
1037        struct fcoe_cqe *cqe;
1038        u32 num_free_sqes = 0;
1039        u32 num_cqes = 0;
1040        u16 wqe;
1041
1042        /*
1043         * cq_lock is a low contention lock used to protect
1044         * the CQ data structure from being freed up during
1045         * the upload operation
1046         */
1047        spin_lock_bh(&tgt->cq_lock);
1048
1049        if (!tgt->cq) {
1050                printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
1051                spin_unlock_bh(&tgt->cq_lock);
1052                return 0;
1053        }
1054        cq = tgt->cq;
1055        cq_cons = tgt->cq_cons_idx;
1056        cqe = &cq[cq_cons];
1057
1058        while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
1059               (tgt->cq_curr_toggle_bit <<
1060               FCOE_CQE_TOGGLE_BIT_SHIFT)) {
1061
1062                /* new entry on the cq */
1063                if (wqe & FCOE_CQE_CQE_TYPE) {
1064                        /* Unsolicited event notification */
1065                        bnx2fc_process_unsol_compl(tgt, wqe);
1066                } else {
1067                        bnx2fc_pending_work(tgt, wqe);
1068                        num_free_sqes++;
1069                }
1070                cqe++;
1071                tgt->cq_cons_idx++;
1072                num_cqes++;
1073
1074                if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
1075                        tgt->cq_cons_idx = 0;
1076                        cqe = cq;
1077                        tgt->cq_curr_toggle_bit =
1078                                1 - tgt->cq_curr_toggle_bit;
1079                }
1080        }
1081        if (num_cqes) {
1082                /* Arm CQ only if doorbell is mapped */
1083                if (tgt->ctx_base)
1084                        bnx2fc_arm_cq(tgt);
1085                atomic_add(num_free_sqes, &tgt->free_sqes);
1086        }
1087        spin_unlock_bh(&tgt->cq_lock);
1088        return 0;
1089}
1090
1091/**
1092 * bnx2fc_fastpath_notification - process global event queue (KCQ)
1093 *
1094 * @hba:                adapter structure pointer
1095 * @new_cqe_kcqe:       pointer to newly DMA'd KCQ entry
1096 *
1097 * Fast path event notification handler
1098 */
1099static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
1100                                        struct fcoe_kcqe *new_cqe_kcqe)
1101{
1102        u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
1103        struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
1104
1105        if (!tgt) {
1106                printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
1107                return;
1108        }
1109
1110        bnx2fc_process_new_cqes(tgt);
1111}
1112
1113/**
1114 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
1115 *
1116 * @hba:        adapter structure pointer
1117 * @ofld_kcqe:  connection offload kcqe pointer
1118 *
1119 * handle session offload completion, enable the session if offload is
1120 * successful.
1121 */
1122static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1123                                        struct fcoe_kcqe *ofld_kcqe)
1124{
1125        struct bnx2fc_rport             *tgt;
1126        struct fcoe_port                *port;
1127        struct bnx2fc_interface         *interface;
1128        u32                             conn_id;
1129        u32                             context_id;
1130
1131        conn_id = ofld_kcqe->fcoe_conn_id;
1132        context_id = ofld_kcqe->fcoe_conn_context_id;
1133        tgt = hba->tgt_ofld_list[conn_id];
1134        if (!tgt) {
1135                printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
1136                return;
1137        }
1138        BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1139                ofld_kcqe->fcoe_conn_context_id);
1140        port = tgt->port;
1141        interface = tgt->port->priv;
1142        if (hba != interface->hba) {
1143                printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1144                goto ofld_cmpl_err;
1145        }
1146        /*
1147         * cnic has allocated a context_id for this session; use this
1148         * while enabling the session.
1149         */
1150        tgt->context_id = context_id;
1151        if (ofld_kcqe->completion_status) {
1152                if (ofld_kcqe->completion_status ==
1153                                FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1154                        printk(KERN_ERR PFX "unable to allocate FCoE context "
1155                                "resources\n");
1156                        set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1157                }
1158        } else {
1159                /* FW offload request successfully completed */
1160                set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1161        }
1162ofld_cmpl_err:
1163        set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1164        wake_up_interruptible(&tgt->ofld_wait);
1165}
1166
1167/**
1168 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1169 *
1170 * @hba:        adapter structure pointer
1171 * @ofld_kcqe:  connection offload kcqe pointer
1172 *
1173 * handle session enable completion, mark the rport as ready
1174 */
1175
1176static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1177                                                struct fcoe_kcqe *ofld_kcqe)
1178{
1179        struct bnx2fc_rport             *tgt;
1180        struct bnx2fc_interface         *interface;
1181        u32                             conn_id;
1182        u32                             context_id;
1183
1184        context_id = ofld_kcqe->fcoe_conn_context_id;
1185        conn_id = ofld_kcqe->fcoe_conn_id;
1186        tgt = hba->tgt_ofld_list[conn_id];
1187        if (!tgt) {
1188                printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1189                return;
1190        }
1191
1192        BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1193                ofld_kcqe->fcoe_conn_context_id);
1194
1195        /*
1196         * context_id should be the same for this target during offload
1197         * and enable
1198         */
1199        if (tgt->context_id != context_id) {
1200                printk(KERN_ERR PFX "context id mis-match\n");
1201                return;
1202        }
1203        interface = tgt->port->priv;
1204        if (hba != interface->hba) {
1205                printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1206                goto enbl_cmpl_err;
1207        }
1208        if (!ofld_kcqe->completion_status)
1209                /* enable successful - rport ready for issuing IOs */
1210                set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1211
1212enbl_cmpl_err:
1213        set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1214        wake_up_interruptible(&tgt->ofld_wait);
1215}
1216
1217static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1218                                        struct fcoe_kcqe *disable_kcqe)
1219{
1220
1221        struct bnx2fc_rport             *tgt;
1222        u32                             conn_id;
1223
1224        conn_id = disable_kcqe->fcoe_conn_id;
1225        tgt = hba->tgt_ofld_list[conn_id];
1226        if (!tgt) {
1227                printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1228                return;
1229        }
1230
1231        BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1232
1233        if (disable_kcqe->completion_status) {
1234                printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1235                        disable_kcqe->completion_status);
1236                set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
1237                set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1238                wake_up_interruptible(&tgt->upld_wait);
1239        } else {
1240                /* disable successful */
1241                BNX2FC_TGT_DBG(tgt, "disable successful\n");
1242                clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1243                clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1244                set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1245                set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1246                wake_up_interruptible(&tgt->upld_wait);
1247        }
1248}
1249
1250static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1251                                        struct fcoe_kcqe *destroy_kcqe)
1252{
1253        struct bnx2fc_rport             *tgt;
1254        u32                             conn_id;
1255
1256        conn_id = destroy_kcqe->fcoe_conn_id;
1257        tgt = hba->tgt_ofld_list[conn_id];
1258        if (!tgt) {
1259                printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1260                return;
1261        }
1262
1263        BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1264
1265        if (destroy_kcqe->completion_status) {
1266                printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1267                        destroy_kcqe->completion_status);
1268                return;
1269        } else {
1270                /* destroy successful */
1271                BNX2FC_TGT_DBG(tgt, "upload successful\n");
1272                clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1273                set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1274                set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1275                wake_up_interruptible(&tgt->upld_wait);
1276        }
1277}
1278
1279static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1280{
1281        switch (err_code) {
1282        case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1283                printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1284                break;
1285
1286        case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1287                printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1288                break;
1289
1290        case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1291                printk(KERN_ERR PFX "init_failure due to NIC error\n");
1292                break;
1293        case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1294                printk(KERN_ERR PFX "init failure due to compl status err\n");
1295                break;
1296        case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1297                printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1298                break;
1299        default:
1300                printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1301        }
1302}
1303
1304/**
1305 * bnx2fc_indicae_kcqe - process KCQE
1306 *
1307 * @hba:        adapter structure pointer
1308 * @kcqe:       kcqe pointer
1309 * @num_cqe:    Number of completion queue elements
1310 *
1311 * Generic KCQ event handler
1312 */
1313void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1314                                        u32 num_cqe)
1315{
1316        struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1317        int i = 0;
1318        struct fcoe_kcqe *kcqe = NULL;
1319
1320        while (i < num_cqe) {
1321                kcqe = (struct fcoe_kcqe *) kcq[i++];
1322
1323                switch (kcqe->op_code) {
1324                case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1325                        bnx2fc_fastpath_notification(hba, kcqe);
1326                        break;
1327
1328                case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1329                        bnx2fc_process_ofld_cmpl(hba, kcqe);
1330                        break;
1331
1332                case FCOE_KCQE_OPCODE_ENABLE_CONN:
1333                        bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1334                        break;
1335
1336                case FCOE_KCQE_OPCODE_INIT_FUNC:
1337                        if (kcqe->completion_status !=
1338                                        FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1339                                bnx2fc_init_failure(hba,
1340                                                kcqe->completion_status);
1341                        } else {
1342                                set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1343                                bnx2fc_get_link_state(hba);
1344                                printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1345                                        (u8)hba->pcidev->bus->number);
1346                        }
1347                        break;
1348
1349                case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1350                        if (kcqe->completion_status !=
1351                                        FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1352
1353                                printk(KERN_ERR PFX "DESTROY failed\n");
1354                        } else {
1355                                printk(KERN_ERR PFX "DESTROY success\n");
1356                        }
1357                        set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1358                        wake_up_interruptible(&hba->destroy_wait);
1359                        break;
1360
1361                case FCOE_KCQE_OPCODE_DISABLE_CONN:
1362                        bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1363                        break;
1364
1365                case FCOE_KCQE_OPCODE_DESTROY_CONN:
1366                        bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1367                        break;
1368
1369                case FCOE_KCQE_OPCODE_STAT_FUNC:
1370                        if (kcqe->completion_status !=
1371                            FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1372                                printk(KERN_ERR PFX "STAT failed\n");
1373                        complete(&hba->stat_req_done);
1374                        break;
1375
1376                case FCOE_KCQE_OPCODE_FCOE_ERROR:
1377                        /* fall thru */
1378                default:
1379                        printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1380                                                                kcqe->op_code);
1381                }
1382        }
1383}
1384
1385void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1386{
1387        struct fcoe_sqe *sqe;
1388
1389        sqe = &tgt->sq[tgt->sq_prod_idx];
1390
1391        /* Fill SQ WQE */
1392        sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1393        sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1394
1395        /* Advance SQ Prod Idx */
1396        if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1397                tgt->sq_prod_idx = 0;
1398                tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1399        }
1400}
1401
1402void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1403{
1404        struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1405        u32 msg;
1406
1407        wmb();
1408        sq_db->prod = tgt->sq_prod_idx |
1409                                (tgt->sq_curr_toggle_bit << 15);
1410        msg = *((u32 *)sq_db);
1411        writel(cpu_to_le32(msg), tgt->ctx_base);
1412        mmiowb();
1413
1414}
1415
1416int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1417{
1418        u32 context_id = tgt->context_id;
1419        struct fcoe_port *port = tgt->port;
1420        u32 reg_off;
1421        resource_size_t reg_base;
1422        struct bnx2fc_interface *interface = port->priv;
1423        struct bnx2fc_hba *hba = interface->hba;
1424
1425        reg_base = pci_resource_start(hba->pcidev,
1426                                        BNX2X_DOORBELL_PCI_BAR);
1427        reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
1428        tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1429        if (!tgt->ctx_base)
1430                return -ENOMEM;
1431        return 0;
1432}
1433
1434char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1435{
1436        char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1437
1438        if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1439                return NULL;
1440
1441        tgt->rq_cons_idx += num_items;
1442
1443        if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1444                tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1445
1446        return buf;
1447}
1448
1449void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1450{
1451        /* return the rq buffer */
1452        u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1453        if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1454                /* Wrap around RQ */
1455                next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1456        }
1457        tgt->rq_prod_idx = next_prod_idx;
1458        tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1459}
1460
1461void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1462                                  struct fcoe_task_ctx_entry *task,
1463                                  struct bnx2fc_cmd *orig_io_req,
1464                                  u32 offset)
1465{
1466        struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1467        struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1468        struct bnx2fc_interface *interface = tgt->port->priv;
1469        struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1470        struct fcoe_task_ctx_entry *orig_task;
1471        struct fcoe_task_ctx_entry *task_page;
1472        struct fcoe_ext_mul_sges_ctx *sgl;
1473        u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1474        u8 orig_task_type;
1475        u16 orig_xid = orig_io_req->xid;
1476        u32 context_id = tgt->context_id;
1477        u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1478        u32 orig_offset = offset;
1479        int bd_count;
1480        int orig_task_idx, index;
1481        int i;
1482
1483        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1484
1485        if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1486                orig_task_type = FCOE_TASK_TYPE_WRITE;
1487        else
1488                orig_task_type = FCOE_TASK_TYPE_READ;
1489
1490        /* Tx flags */
1491        task->txwr_rxrd.const_ctx.tx_flags =
1492                                FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1493                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1494        /* init flags */
1495        task->txwr_rxrd.const_ctx.init_flags = task_type <<
1496                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1497        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1498                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1499        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1500                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1501        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1502                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1503
1504        task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1505
1506        task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1507        task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1508
1509        bd_count = orig_io_req->bd_tbl->bd_valid;
1510
1511        /* obtain the appropriate bd entry from relative offset */
1512        for (i = 0; i < bd_count; i++) {
1513                if (offset < bd[i].buf_len)
1514                        break;
1515                offset -= bd[i].buf_len;
1516        }
1517        phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1518
1519        if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1520                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1521                                (u32)phys_addr;
1522                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1523                                (u32)((u64)phys_addr >> 32);
1524                task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1525                                bd_count;
1526                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1527                                offset; /* adjusted offset */
1528                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1529        } else {
1530                orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1531                index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1532
1533                task_page = (struct fcoe_task_ctx_entry *)
1534                             interface->hba->task_ctx[orig_task_idx];
1535                orig_task = &(task_page[index]);
1536
1537                /* Multiple SGEs were used for this IO */
1538                sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1539                sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1540                sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1541                sgl->mul_sgl.sgl_size = bd_count;
1542                sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1543                sgl->mul_sgl.cur_sge_idx = i;
1544
1545                memset(&task->rxwr_only.rx_seq_ctx, 0,
1546                       sizeof(struct fcoe_rx_seq_ctx));
1547                task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1548                task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1549        }
1550}
1551void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1552                              struct fcoe_task_ctx_entry *task,
1553                              u16 orig_xid)
1554{
1555        u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1556        struct bnx2fc_rport *tgt = io_req->tgt;
1557        u32 context_id = tgt->context_id;
1558
1559        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1560
1561        /* Tx Write Rx Read */
1562        /* init flags */
1563        task->txwr_rxrd.const_ctx.init_flags = task_type <<
1564                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1565        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1566                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1567        if (tgt->dev_type == TYPE_TAPE)
1568                task->txwr_rxrd.const_ctx.init_flags |=
1569                                FCOE_TASK_DEV_TYPE_TAPE <<
1570                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1571        else
1572                task->txwr_rxrd.const_ctx.init_flags |=
1573                                FCOE_TASK_DEV_TYPE_DISK <<
1574                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1575        task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1576
1577        /* Tx flags */
1578        task->txwr_rxrd.const_ctx.tx_flags =
1579                                FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1580                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1581
1582        /* Rx Read Tx Write */
1583        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1584                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1585        task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1586                                FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1587}
1588
1589void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1590                                struct fcoe_task_ctx_entry *task)
1591{
1592        struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1593        struct bnx2fc_rport *tgt = io_req->tgt;
1594        struct fc_frame_header *fc_hdr;
1595        struct fcoe_ext_mul_sges_ctx *sgl;
1596        u8 task_type = 0;
1597        u64 *hdr;
1598        u64 temp_hdr[3];
1599        u32 context_id;
1600
1601
1602        /* Obtain task_type */
1603        if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1604            (io_req->cmd_type == BNX2FC_ELS)) {
1605                task_type = FCOE_TASK_TYPE_MIDPATH;
1606        } else if (io_req->cmd_type == BNX2FC_ABTS) {
1607                task_type = FCOE_TASK_TYPE_ABTS;
1608        }
1609
1610        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1611
1612        /* Setup the task from io_req for easy reference */
1613        io_req->task = task;
1614
1615        BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1616                io_req->cmd_type, task_type);
1617
1618        /* Tx only */
1619        if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1620            (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1621                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1622                                (u32)mp_req->mp_req_bd_dma;
1623                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1624                                (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1625                task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1626        }
1627
1628        /* Tx Write Rx Read */
1629        /* init flags */
1630        task->txwr_rxrd.const_ctx.init_flags = task_type <<
1631                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1632        if (tgt->dev_type == TYPE_TAPE)
1633                task->txwr_rxrd.const_ctx.init_flags |=
1634                                FCOE_TASK_DEV_TYPE_TAPE <<
1635                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1636        else
1637                task->txwr_rxrd.const_ctx.init_flags |=
1638                                FCOE_TASK_DEV_TYPE_DISK <<
1639                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1640        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1641                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1642
1643        /* tx flags */
1644        task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1645                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1646
1647        /* Rx Write Tx Read */
1648        task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1649
1650        /* rx flags */
1651        task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1652                                FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1653
1654        context_id = tgt->context_id;
1655        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1656                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1657
1658        fc_hdr = &(mp_req->req_fc_hdr);
1659        if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1660                fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1661                fc_hdr->fh_rx_id = htons(0xffff);
1662                task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1663        } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1664                fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1665        }
1666
1667        /* Fill FC Header into middle path buffer */
1668        hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1669        memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1670        hdr[0] = cpu_to_be64(temp_hdr[0]);
1671        hdr[1] = cpu_to_be64(temp_hdr[1]);
1672        hdr[2] = cpu_to_be64(temp_hdr[2]);
1673
1674        /* Rx Only */
1675        if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1676                sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1677
1678                sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1679                sgl->mul_sgl.cur_sge_addr.hi =
1680                                (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1681                sgl->mul_sgl.sgl_size = 1;
1682        }
1683}
1684
1685void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1686                             struct fcoe_task_ctx_entry *task)
1687{
1688        u8 task_type;
1689        struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1690        struct io_bdt *bd_tbl = io_req->bd_tbl;
1691        struct bnx2fc_rport *tgt = io_req->tgt;
1692        struct fcoe_cached_sge_ctx *cached_sge;
1693        struct fcoe_ext_mul_sges_ctx *sgl;
1694        int dev_type = tgt->dev_type;
1695        u64 *fcp_cmnd;
1696        u64 tmp_fcp_cmnd[4];
1697        u32 context_id;
1698        int cnt, i;
1699        int bd_count;
1700
1701        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1702
1703        /* Setup the task from io_req for easy reference */
1704        io_req->task = task;
1705
1706        if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1707                task_type = FCOE_TASK_TYPE_WRITE;
1708        else
1709                task_type = FCOE_TASK_TYPE_READ;
1710
1711        /* Tx only */
1712        bd_count = bd_tbl->bd_valid;
1713        cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1714        if (task_type == FCOE_TASK_TYPE_WRITE) {
1715                if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1716                        struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1717
1718                        task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1719                        cached_sge->cur_buf_addr.lo =
1720                                        fcoe_bd_tbl->buf_addr_lo;
1721                        task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1722                        cached_sge->cur_buf_addr.hi =
1723                                        fcoe_bd_tbl->buf_addr_hi;
1724                        task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1725                        cached_sge->cur_buf_rem =
1726                                        fcoe_bd_tbl->buf_len;
1727
1728                        task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1729                                FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1730                } else {
1731                        task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1732                                        (u32)bd_tbl->bd_tbl_dma;
1733                        task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1734                                        (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1735                        task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1736                                        bd_tbl->bd_valid;
1737                }
1738        }
1739
1740        /*Tx Write Rx Read */
1741        /* Init state to NORMAL */
1742        task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1743                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1744        if (dev_type == TYPE_TAPE) {
1745                task->txwr_rxrd.const_ctx.init_flags |=
1746                                FCOE_TASK_DEV_TYPE_TAPE <<
1747                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1748                io_req->rec_retry = 0;
1749                io_req->rec_retry = 0;
1750        } else
1751                task->txwr_rxrd.const_ctx.init_flags |=
1752                                FCOE_TASK_DEV_TYPE_DISK <<
1753                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1754        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1755                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1756        /* tx flags */
1757        task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1758                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1759
1760        /* Set initial seq counter */
1761        task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1762
1763        /* Fill FCP_CMND IU */
1764        fcp_cmnd = (u64 *)
1765                    task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1766        bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1767
1768        /* swap fcp_cmnd */
1769        cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1770
1771        for (i = 0; i < cnt; i++) {
1772                *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1773                fcp_cmnd++;
1774        }
1775
1776        /* Rx Write Tx Read */
1777        task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1778
1779        context_id = tgt->context_id;
1780        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1781                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1782
1783        /* rx flags */
1784        /* Set state to "waiting for the first packet" */
1785        task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1786                                FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1787
1788        task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1789
1790        /* Rx Only */
1791        if (task_type != FCOE_TASK_TYPE_READ)
1792                return;
1793
1794        sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1795        bd_count = bd_tbl->bd_valid;
1796
1797        if (dev_type == TYPE_DISK) {
1798                if (bd_count == 1) {
1799
1800                        struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1801
1802                        cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1803                        cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1804                        cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1805                        task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1806                                FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1807                } else if (bd_count == 2) {
1808                        struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1809
1810                        cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1811                        cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1812                        cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1813
1814                        fcoe_bd_tbl++;
1815                        cached_sge->second_buf_addr.lo =
1816                                                 fcoe_bd_tbl->buf_addr_lo;
1817                        cached_sge->second_buf_addr.hi =
1818                                                fcoe_bd_tbl->buf_addr_hi;
1819                        cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1820                        task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1821                                FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1822                } else {
1823
1824                        sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1825                        sgl->mul_sgl.cur_sge_addr.hi =
1826                                        (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1827                        sgl->mul_sgl.sgl_size = bd_count;
1828                }
1829        } else {
1830                sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1831                sgl->mul_sgl.cur_sge_addr.hi =
1832                                (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1833                sgl->mul_sgl.sgl_size = bd_count;
1834        }
1835}
1836
1837/**
1838 * bnx2fc_setup_task_ctx - allocate and map task context
1839 *
1840 * @hba:        pointer to adapter structure
1841 *
1842 * allocate memory for task context, and associated BD table to be used
1843 * by firmware
1844 *
1845 */
1846int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1847{
1848        int rc = 0;
1849        struct regpair *task_ctx_bdt;
1850        dma_addr_t addr;
1851        int task_ctx_arr_sz;
1852        int i;
1853
1854        /*
1855         * Allocate task context bd table. A page size of bd table
1856         * can map 256 buffers. Each buffer contains 32 task context
1857         * entries. Hence the limit with one page is 8192 task context
1858         * entries.
1859         */
1860        hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1861                                                  PAGE_SIZE,
1862                                                  &hba->task_ctx_bd_dma,
1863                                                  GFP_KERNEL);
1864        if (!hba->task_ctx_bd_tbl) {
1865                printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1866                rc = -1;
1867                goto out;
1868        }
1869        memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1870
1871        /*
1872         * Allocate task_ctx which is an array of pointers pointing to
1873         * a page containing 32 task contexts
1874         */
1875        task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1876        hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
1877                                 GFP_KERNEL);
1878        if (!hba->task_ctx) {
1879                printk(KERN_ERR PFX "unable to allocate task context array\n");
1880                rc = -1;
1881                goto out1;
1882        }
1883
1884        /*
1885         * Allocate task_ctx_dma which is an array of dma addresses
1886         */
1887        hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
1888                                        sizeof(dma_addr_t)), GFP_KERNEL);
1889        if (!hba->task_ctx_dma) {
1890                printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1891                rc = -1;
1892                goto out2;
1893        }
1894
1895        task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1896        for (i = 0; i < task_ctx_arr_sz; i++) {
1897
1898                hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1899                                                      PAGE_SIZE,
1900                                                      &hba->task_ctx_dma[i],
1901                                                      GFP_KERNEL);
1902                if (!hba->task_ctx[i]) {
1903                        printk(KERN_ERR PFX "unable to alloc task context\n");
1904                        rc = -1;
1905                        goto out3;
1906                }
1907                memset(hba->task_ctx[i], 0, PAGE_SIZE);
1908                addr = (u64)hba->task_ctx_dma[i];
1909                task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1910                task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1911                task_ctx_bdt++;
1912        }
1913        return 0;
1914
1915out3:
1916        for (i = 0; i < task_ctx_arr_sz; i++) {
1917                if (hba->task_ctx[i]) {
1918
1919                        dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1920                                hba->task_ctx[i], hba->task_ctx_dma[i]);
1921                        hba->task_ctx[i] = NULL;
1922                }
1923        }
1924
1925        kfree(hba->task_ctx_dma);
1926        hba->task_ctx_dma = NULL;
1927out2:
1928        kfree(hba->task_ctx);
1929        hba->task_ctx = NULL;
1930out1:
1931        dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1932                        hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1933        hba->task_ctx_bd_tbl = NULL;
1934out:
1935        return rc;
1936}
1937
1938void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1939{
1940        int task_ctx_arr_sz;
1941        int i;
1942
1943        if (hba->task_ctx_bd_tbl) {
1944                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1945                                    hba->task_ctx_bd_tbl,
1946                                    hba->task_ctx_bd_dma);
1947                hba->task_ctx_bd_tbl = NULL;
1948        }
1949
1950        task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1951        if (hba->task_ctx) {
1952                for (i = 0; i < task_ctx_arr_sz; i++) {
1953                        if (hba->task_ctx[i]) {
1954                                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1955                                                    hba->task_ctx[i],
1956                                                    hba->task_ctx_dma[i]);
1957                                hba->task_ctx[i] = NULL;
1958                        }
1959                }
1960                kfree(hba->task_ctx);
1961                hba->task_ctx = NULL;
1962        }
1963
1964        kfree(hba->task_ctx_dma);
1965        hba->task_ctx_dma = NULL;
1966}
1967
1968static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1969{
1970        int i;
1971        int segment_count;
1972        u32 *pbl;
1973
1974        if (hba->hash_tbl_segments) {
1975
1976                pbl = hba->hash_tbl_pbl;
1977                if (pbl) {
1978                        segment_count = hba->hash_tbl_segment_count;
1979                        for (i = 0; i < segment_count; ++i) {
1980                                dma_addr_t dma_address;
1981
1982                                dma_address = le32_to_cpu(*pbl);
1983                                ++pbl;
1984                                dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1985                                ++pbl;
1986                                dma_free_coherent(&hba->pcidev->dev,
1987                                                  BNX2FC_HASH_TBL_CHUNK_SIZE,
1988                                                  hba->hash_tbl_segments[i],
1989                                                  dma_address);
1990                        }
1991                }
1992
1993                kfree(hba->hash_tbl_segments);
1994                hba->hash_tbl_segments = NULL;
1995        }
1996
1997        if (hba->hash_tbl_pbl) {
1998                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1999                                    hba->hash_tbl_pbl,
2000                                    hba->hash_tbl_pbl_dma);
2001                hba->hash_tbl_pbl = NULL;
2002        }
2003}
2004
2005static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2006{
2007        int i;
2008        int hash_table_size;
2009        int segment_count;
2010        int segment_array_size;
2011        int dma_segment_array_size;
2012        dma_addr_t *dma_segment_array;
2013        u32 *pbl;
2014
2015        hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
2016                sizeof(struct fcoe_hash_table_entry);
2017
2018        segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
2019        segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
2020        hba->hash_tbl_segment_count = segment_count;
2021
2022        segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
2023        hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
2024        if (!hba->hash_tbl_segments) {
2025                printk(KERN_ERR PFX "hash table pointers alloc failed\n");
2026                return -ENOMEM;
2027        }
2028        dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
2029        dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
2030        if (!dma_segment_array) {
2031                printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
2032                goto cleanup_ht;
2033        }
2034
2035        for (i = 0; i < segment_count; ++i) {
2036                hba->hash_tbl_segments[i] =
2037                        dma_alloc_coherent(&hba->pcidev->dev,
2038                                           BNX2FC_HASH_TBL_CHUNK_SIZE,
2039                                           &dma_segment_array[i],
2040                                           GFP_KERNEL);
2041                if (!hba->hash_tbl_segments[i]) {
2042                        printk(KERN_ERR PFX "hash segment alloc failed\n");
2043                        goto cleanup_dma;
2044                }
2045                memset(hba->hash_tbl_segments[i], 0,
2046                       BNX2FC_HASH_TBL_CHUNK_SIZE);
2047        }
2048
2049        hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
2050                                               PAGE_SIZE,
2051                                               &hba->hash_tbl_pbl_dma,
2052                                               GFP_KERNEL);
2053        if (!hba->hash_tbl_pbl) {
2054                printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2055                goto cleanup_dma;
2056        }
2057        memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
2058
2059        pbl = hba->hash_tbl_pbl;
2060        for (i = 0; i < segment_count; ++i) {
2061                u64 paddr = dma_segment_array[i];
2062                *pbl = cpu_to_le32((u32) paddr);
2063                ++pbl;
2064                *pbl = cpu_to_le32((u32) (paddr >> 32));
2065                ++pbl;
2066        }
2067        pbl = hba->hash_tbl_pbl;
2068        i = 0;
2069        while (*pbl && *(pbl + 1)) {
2070                u32 lo;
2071                u32 hi;
2072                lo = *pbl;
2073                ++pbl;
2074                hi = *pbl;
2075                ++pbl;
2076                ++i;
2077        }
2078        kfree(dma_segment_array);
2079        return 0;
2080
2081cleanup_dma:
2082        for (i = 0; i < segment_count; ++i) {
2083                if (hba->hash_tbl_segments[i])
2084                        dma_free_coherent(&hba->pcidev->dev,
2085                                            BNX2FC_HASH_TBL_CHUNK_SIZE,
2086                                            hba->hash_tbl_segments[i],
2087                                            dma_segment_array[i]);
2088        }
2089
2090        kfree(dma_segment_array);
2091
2092cleanup_ht:
2093        kfree(hba->hash_tbl_segments);
2094        hba->hash_tbl_segments = NULL;
2095        return -ENOMEM;
2096}
2097
2098/**
2099 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
2100 *
2101 * @hba:        Pointer to adapter structure
2102 *
2103 */
2104int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2105{
2106        u64 addr;
2107        u32 mem_size;
2108        int i;
2109
2110        if (bnx2fc_allocate_hash_table(hba))
2111                return -ENOMEM;
2112
2113        mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2114        hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2115                                                  &hba->t2_hash_tbl_ptr_dma,
2116                                                  GFP_KERNEL);
2117        if (!hba->t2_hash_tbl_ptr) {
2118                printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2119                bnx2fc_free_fw_resc(hba);
2120                return -ENOMEM;
2121        }
2122        memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
2123
2124        mem_size = BNX2FC_NUM_MAX_SESS *
2125                                sizeof(struct fcoe_t2_hash_table_entry);
2126        hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2127                                              &hba->t2_hash_tbl_dma,
2128                                              GFP_KERNEL);
2129        if (!hba->t2_hash_tbl) {
2130                printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2131                bnx2fc_free_fw_resc(hba);
2132                return -ENOMEM;
2133        }
2134        memset(hba->t2_hash_tbl, 0x00, mem_size);
2135        for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
2136                addr = (unsigned long) hba->t2_hash_tbl_dma +
2137                         ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
2138                hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
2139                hba->t2_hash_tbl[i].next.hi = addr >> 32;
2140        }
2141
2142        hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2143                                               PAGE_SIZE, &hba->dummy_buf_dma,
2144                                               GFP_KERNEL);
2145        if (!hba->dummy_buffer) {
2146                printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
2147                bnx2fc_free_fw_resc(hba);
2148                return -ENOMEM;
2149        }
2150
2151        hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2152                                               PAGE_SIZE,
2153                                               &hba->stats_buf_dma,
2154                                               GFP_KERNEL);
2155        if (!hba->stats_buffer) {
2156                printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2157                bnx2fc_free_fw_resc(hba);
2158                return -ENOMEM;
2159        }
2160        memset(hba->stats_buffer, 0x00, PAGE_SIZE);
2161
2162        return 0;
2163}
2164
2165void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
2166{
2167        u32 mem_size;
2168
2169        if (hba->stats_buffer) {
2170                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2171                                  hba->stats_buffer, hba->stats_buf_dma);
2172                hba->stats_buffer = NULL;
2173        }
2174
2175        if (hba->dummy_buffer) {
2176                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2177                                  hba->dummy_buffer, hba->dummy_buf_dma);
2178                hba->dummy_buffer = NULL;
2179        }
2180
2181        if (hba->t2_hash_tbl_ptr) {
2182                mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2183                dma_free_coherent(&hba->pcidev->dev, mem_size,
2184                                    hba->t2_hash_tbl_ptr,
2185                                    hba->t2_hash_tbl_ptr_dma);
2186                hba->t2_hash_tbl_ptr = NULL;
2187        }
2188
2189        if (hba->t2_hash_tbl) {
2190                mem_size = BNX2FC_NUM_MAX_SESS *
2191                            sizeof(struct fcoe_t2_hash_table_entry);
2192                dma_free_coherent(&hba->pcidev->dev, mem_size,
2193                                    hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
2194                hba->t2_hash_tbl = NULL;
2195        }
2196        bnx2fc_free_hash_table(hba);
2197}
2198