linux/drivers/scsi/bnx2fc/bnx2fc_hwi.c
<<
>>
Prefs
   1/* bnx2fc_hwi.c: QLogic Linux FCoE offload driver.
   2 * This file contains the code that low level functions that interact
   3 * with 57712 FCoE firmware.
   4 *
   5 * Copyright (c) 2008-2013 Broadcom Corporation
   6 * Copyright (c) 2014-2016 QLogic Corporation
   7 * Copyright (c) 2016-2017 Cavium Inc.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation.
  12 *
  13 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  14 */
  15
  16#include "bnx2fc.h"
  17
  18DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
  19
  20static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
  21                                        struct fcoe_kcqe *new_cqe_kcqe);
  22static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
  23                                        struct fcoe_kcqe *ofld_kcqe);
  24static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
  25                                                struct fcoe_kcqe *ofld_kcqe);
  26static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
  27static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
  28                                        struct fcoe_kcqe *destroy_kcqe);
  29
  30int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
  31{
  32        struct fcoe_kwqe_stat stat_req;
  33        struct kwqe *kwqe_arr[2];
  34        int num_kwqes = 1;
  35        int rc = 0;
  36
  37        memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
  38        stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
  39        stat_req.hdr.flags =
  40                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  41
  42        stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
  43        stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
  44
  45        kwqe_arr[0] = (struct kwqe *) &stat_req;
  46
  47        if (hba->cnic && hba->cnic->submit_kwqes)
  48                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
  49
  50        return rc;
  51}
  52
  53/**
  54 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
  55 *
  56 * @hba:        adapter structure pointer
  57 *
  58 * Send down FCoE firmware init KWQEs which initiates the initial handshake
  59 *      with the f/w.
  60 *
  61 */
  62int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
  63{
  64        struct fcoe_kwqe_init1 fcoe_init1;
  65        struct fcoe_kwqe_init2 fcoe_init2;
  66        struct fcoe_kwqe_init3 fcoe_init3;
  67        struct kwqe *kwqe_arr[3];
  68        int num_kwqes = 3;
  69        int rc = 0;
  70
  71        if (!hba->cnic) {
  72                printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
  73                return -ENODEV;
  74        }
  75
  76        /* fill init1 KWQE */
  77        memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
  78        fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
  79        fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
  80                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  81
  82        fcoe_init1.num_tasks = hba->max_tasks;
  83        fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
  84        fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
  85        fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
  86        fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
  87        fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
  88        fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
  89        fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
  90        fcoe_init1.task_list_pbl_addr_hi =
  91                                (u32) ((u64) hba->task_ctx_bd_dma >> 32);
  92        fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
  93
  94        fcoe_init1.flags = (PAGE_SHIFT <<
  95                                FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
  96
  97        fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
  98
  99        /* fill init2 KWQE */
 100        memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
 101        fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
 102        fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 103                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 104
 105        fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
 106        fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
 107
 108
 109        fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
 110        fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
 111                                           ((u64) hba->hash_tbl_pbl_dma >> 32);
 112
 113        fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
 114        fcoe_init2.t2_hash_tbl_addr_hi = (u32)
 115                                          ((u64) hba->t2_hash_tbl_dma >> 32);
 116
 117        fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
 118        fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
 119                                        ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
 120
 121        fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
 122
 123        /* fill init3 KWQE */
 124        memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
 125        fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
 126        fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 127                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 128        fcoe_init3.error_bit_map_lo = 0xffffffff;
 129        fcoe_init3.error_bit_map_hi = 0xffffffff;
 130
 131        /*
 132         * enable both cached connection and cached tasks
 133         * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
 134         */
 135        fcoe_init3.perf_config = 3;
 136
 137        kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
 138        kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
 139        kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
 140
 141        if (hba->cnic && hba->cnic->submit_kwqes)
 142                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 143
 144        return rc;
 145}
 146int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
 147{
 148        struct fcoe_kwqe_destroy fcoe_destroy;
 149        struct kwqe *kwqe_arr[2];
 150        int num_kwqes = 1;
 151        int rc = -1;
 152
 153        /* fill destroy KWQE */
 154        memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
 155        fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
 156        fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 157                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 158        kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
 159
 160        if (hba->cnic && hba->cnic->submit_kwqes)
 161                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 162        return rc;
 163}
 164
 165/**
 166 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
 167 *
 168 * @port:               port structure pointer
 169 * @tgt:                bnx2fc_rport structure pointer
 170 */
 171int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
 172                                        struct bnx2fc_rport *tgt)
 173{
 174        struct fc_lport *lport = port->lport;
 175        struct bnx2fc_interface *interface = port->priv;
 176        struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 177        struct bnx2fc_hba *hba = interface->hba;
 178        struct kwqe *kwqe_arr[4];
 179        struct fcoe_kwqe_conn_offload1 ofld_req1;
 180        struct fcoe_kwqe_conn_offload2 ofld_req2;
 181        struct fcoe_kwqe_conn_offload3 ofld_req3;
 182        struct fcoe_kwqe_conn_offload4 ofld_req4;
 183        struct fc_rport_priv *rdata = tgt->rdata;
 184        struct fc_rport *rport = tgt->rport;
 185        int num_kwqes = 4;
 186        u32 port_id;
 187        int rc = 0;
 188        u16 conn_id;
 189
 190        /* Initialize offload request 1 structure */
 191        memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
 192
 193        ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
 194        ofld_req1.hdr.flags =
 195                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 196
 197
 198        conn_id = (u16)tgt->fcoe_conn_id;
 199        ofld_req1.fcoe_conn_id = conn_id;
 200
 201
 202        ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
 203        ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
 204
 205        ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
 206        ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
 207
 208        ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
 209        ofld_req1.rq_first_pbe_addr_hi =
 210                                (u32)((u64) tgt->rq_dma >> 32);
 211
 212        ofld_req1.rq_prod = 0x8000;
 213
 214        /* Initialize offload request 2 structure */
 215        memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
 216
 217        ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
 218        ofld_req2.hdr.flags =
 219                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 220
 221        ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
 222
 223        ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
 224        ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
 225
 226        ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
 227        ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
 228
 229        ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
 230        ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
 231
 232        /* Initialize offload request 3 structure */
 233        memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
 234
 235        ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
 236        ofld_req3.hdr.flags =
 237                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 238
 239        ofld_req3.vlan_tag = interface->vlan_id <<
 240                                FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
 241        ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
 242
 243        port_id = fc_host_port_id(lport->host);
 244        if (port_id == 0) {
 245                BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
 246                return -EINVAL;
 247        }
 248
 249        /*
 250         * Store s_id of the initiator for further reference. This will
 251         * be used during disable/destroy during linkdown processing as
 252         * when the lport is reset, the port_id also is reset to 0
 253         */
 254        tgt->sid = port_id;
 255        ofld_req3.s_id[0] = (port_id & 0x000000FF);
 256        ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
 257        ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
 258
 259        port_id = rport->port_id;
 260        ofld_req3.d_id[0] = (port_id & 0x000000FF);
 261        ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
 262        ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
 263
 264        ofld_req3.tx_total_conc_seqs = rdata->max_seq;
 265
 266        ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
 267        ofld_req3.rx_max_fc_pay_len  = lport->mfs;
 268
 269        ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
 270        ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
 271        ofld_req3.rx_open_seqs_exch_c3 = 1;
 272
 273        ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
 274        ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
 275
 276        /* set mul_n_port_ids supported flag to 0, until it is supported */
 277        ofld_req3.flags = 0;
 278        /*
 279        ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
 280                            FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
 281        */
 282        /* Info from PLOGI response */
 283        ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
 284                             FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
 285
 286        ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
 287                             FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
 288
 289        /*
 290         * Info from PRLI response, this info is used for sequence level error
 291         * recovery support
 292         */
 293        if (tgt->dev_type == TYPE_TAPE) {
 294                ofld_req3.flags |= 1 <<
 295                                    FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
 296                ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
 297                                    ? 1 : 0) <<
 298                                    FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
 299        }
 300
 301        /* vlan flag */
 302        ofld_req3.flags |= (interface->vlan_enabled <<
 303                            FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
 304
 305        /* C2_VALID and ACK flags are not set as they are not supported */
 306
 307
 308        /* Initialize offload request 4 structure */
 309        memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
 310        ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
 311        ofld_req4.hdr.flags =
 312                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 313
 314        ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
 315
 316
 317        ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
 318                                                        /* local mac */
 319        ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
 320        ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
 321        ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
 322        ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
 323        ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
 324        ofld_req4.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
 325                                                        /* fcf mac */
 326        ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
 327        ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
 328        ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
 329        ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
 330        ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 331
 332        ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
 333        ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
 334
 335        ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
 336        ofld_req4.confq_pbl_base_addr_hi =
 337                                        (u32)((u64) tgt->confq_pbl_dma >> 32);
 338
 339        kwqe_arr[0] = (struct kwqe *) &ofld_req1;
 340        kwqe_arr[1] = (struct kwqe *) &ofld_req2;
 341        kwqe_arr[2] = (struct kwqe *) &ofld_req3;
 342        kwqe_arr[3] = (struct kwqe *) &ofld_req4;
 343
 344        if (hba->cnic && hba->cnic->submit_kwqes)
 345                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 346
 347        return rc;
 348}
 349
 350/**
 351 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
 352 *
 353 * @port:               port structure pointer
 354 * @tgt:                bnx2fc_rport structure pointer
 355 */
 356int bnx2fc_send_session_enable_req(struct fcoe_port *port,
 357                                        struct bnx2fc_rport *tgt)
 358{
 359        struct kwqe *kwqe_arr[2];
 360        struct bnx2fc_interface *interface = port->priv;
 361        struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 362        struct bnx2fc_hba *hba = interface->hba;
 363        struct fcoe_kwqe_conn_enable_disable enbl_req;
 364        struct fc_lport *lport = port->lport;
 365        struct fc_rport *rport = tgt->rport;
 366        int num_kwqes = 1;
 367        int rc = 0;
 368        u32 port_id;
 369
 370        memset(&enbl_req, 0x00,
 371               sizeof(struct fcoe_kwqe_conn_enable_disable));
 372        enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
 373        enbl_req.hdr.flags =
 374                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 375
 376        enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
 377                                                        /* local mac */
 378        enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
 379        enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
 380        enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
 381        enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
 382        enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
 383        memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
 384
 385        enbl_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
 386        enbl_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
 387        enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
 388        enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
 389        enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
 390        enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 391
 392        port_id = fc_host_port_id(lport->host);
 393        if (port_id != tgt->sid) {
 394                printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
 395                                "sid = 0x%x\n", port_id, tgt->sid);
 396                port_id = tgt->sid;
 397        }
 398        enbl_req.s_id[0] = (port_id & 0x000000FF);
 399        enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
 400        enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
 401
 402        port_id = rport->port_id;
 403        enbl_req.d_id[0] = (port_id & 0x000000FF);
 404        enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
 405        enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
 406        enbl_req.vlan_tag = interface->vlan_id <<
 407                                FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
 408        enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
 409        enbl_req.vlan_flag = interface->vlan_enabled;
 410        enbl_req.context_id = tgt->context_id;
 411        enbl_req.conn_id = tgt->fcoe_conn_id;
 412
 413        kwqe_arr[0] = (struct kwqe *) &enbl_req;
 414
 415        if (hba->cnic && hba->cnic->submit_kwqes)
 416                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 417        return rc;
 418}
 419
 420/**
 421 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
 422 *
 423 * @port:               port structure pointer
 424 * @tgt:                bnx2fc_rport structure pointer
 425 */
 426int bnx2fc_send_session_disable_req(struct fcoe_port *port,
 427                                    struct bnx2fc_rport *tgt)
 428{
 429        struct bnx2fc_interface *interface = port->priv;
 430        struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 431        struct bnx2fc_hba *hba = interface->hba;
 432        struct fcoe_kwqe_conn_enable_disable disable_req;
 433        struct kwqe *kwqe_arr[2];
 434        struct fc_rport *rport = tgt->rport;
 435        int num_kwqes = 1;
 436        int rc = 0;
 437        u32 port_id;
 438
 439        memset(&disable_req, 0x00,
 440               sizeof(struct fcoe_kwqe_conn_enable_disable));
 441        disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
 442        disable_req.hdr.flags =
 443                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 444
 445        disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
 446        disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
 447        disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
 448        disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
 449        disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
 450        disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
 451
 452        disable_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
 453        disable_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
 454        disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
 455        disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
 456        disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
 457        disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 458
 459        port_id = tgt->sid;
 460        disable_req.s_id[0] = (port_id & 0x000000FF);
 461        disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
 462        disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
 463
 464
 465        port_id = rport->port_id;
 466        disable_req.d_id[0] = (port_id & 0x000000FF);
 467        disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
 468        disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
 469        disable_req.context_id = tgt->context_id;
 470        disable_req.conn_id = tgt->fcoe_conn_id;
 471        disable_req.vlan_tag = interface->vlan_id <<
 472                                FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
 473        disable_req.vlan_tag |=
 474                        3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
 475        disable_req.vlan_flag = interface->vlan_enabled;
 476
 477        kwqe_arr[0] = (struct kwqe *) &disable_req;
 478
 479        if (hba->cnic && hba->cnic->submit_kwqes)
 480                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 481
 482        return rc;
 483}
 484
 485/**
 486 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
 487 *
 488 * @port:               port structure pointer
 489 * @tgt:                bnx2fc_rport structure pointer
 490 */
 491int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
 492                                        struct bnx2fc_rport *tgt)
 493{
 494        struct fcoe_kwqe_conn_destroy destroy_req;
 495        struct kwqe *kwqe_arr[2];
 496        int num_kwqes = 1;
 497        int rc = 0;
 498
 499        memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
 500        destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
 501        destroy_req.hdr.flags =
 502                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 503
 504        destroy_req.context_id = tgt->context_id;
 505        destroy_req.conn_id = tgt->fcoe_conn_id;
 506
 507        kwqe_arr[0] = (struct kwqe *) &destroy_req;
 508
 509        if (hba->cnic && hba->cnic->submit_kwqes)
 510                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 511
 512        return rc;
 513}
 514
 515static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
 516{
 517        struct bnx2fc_lport *blport;
 518
 519        spin_lock_bh(&hba->hba_lock);
 520        list_for_each_entry(blport, &hba->vports, list) {
 521                if (blport->lport == lport) {
 522                        spin_unlock_bh(&hba->hba_lock);
 523                        return true;
 524                }
 525        }
 526        spin_unlock_bh(&hba->hba_lock);
 527        return false;
 528
 529}
 530
 531
 532static void bnx2fc_unsol_els_work(struct work_struct *work)
 533{
 534        struct bnx2fc_unsol_els *unsol_els;
 535        struct fc_lport *lport;
 536        struct bnx2fc_hba *hba;
 537        struct fc_frame *fp;
 538
 539        unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
 540        lport = unsol_els->lport;
 541        fp = unsol_els->fp;
 542        hba = unsol_els->hba;
 543        if (is_valid_lport(hba, lport))
 544                fc_exch_recv(lport, fp);
 545        kfree(unsol_els);
 546}
 547
 548void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
 549                                   unsigned char *buf,
 550                                   u32 frame_len, u16 l2_oxid)
 551{
 552        struct fcoe_port *port = tgt->port;
 553        struct fc_lport *lport = port->lport;
 554        struct bnx2fc_interface *interface = port->priv;
 555        struct bnx2fc_unsol_els *unsol_els;
 556        struct fc_frame_header *fh;
 557        struct fc_frame *fp;
 558        struct sk_buff *skb;
 559        u32 payload_len;
 560        u32 crc;
 561        u8 op;
 562
 563
 564        unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
 565        if (!unsol_els) {
 566                BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
 567                return;
 568        }
 569
 570        BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
 571                l2_oxid, frame_len);
 572
 573        payload_len = frame_len - sizeof(struct fc_frame_header);
 574
 575        fp = fc_frame_alloc(lport, payload_len);
 576        if (!fp) {
 577                printk(KERN_ERR PFX "fc_frame_alloc failure\n");
 578                kfree(unsol_els);
 579                return;
 580        }
 581
 582        fh = (struct fc_frame_header *) fc_frame_header_get(fp);
 583        /* Copy FC Frame header and payload into the frame */
 584        memcpy(fh, buf, frame_len);
 585
 586        if (l2_oxid != FC_XID_UNKNOWN)
 587                fh->fh_ox_id = htons(l2_oxid);
 588
 589        skb = fp_skb(fp);
 590
 591        if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
 592            (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
 593
 594                if (fh->fh_type == FC_TYPE_ELS) {
 595                        op = fc_frame_payload_op(fp);
 596                        if ((op == ELS_TEST) || (op == ELS_ESTC) ||
 597                            (op == ELS_FAN) || (op == ELS_CSU)) {
 598                                /*
 599                                 * No need to reply for these
 600                                 * ELS requests
 601                                 */
 602                                printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
 603                                kfree_skb(skb);
 604                                kfree(unsol_els);
 605                                return;
 606                        }
 607                }
 608                crc = fcoe_fc_crc(fp);
 609                fc_frame_init(fp);
 610                fr_dev(fp) = lport;
 611                fr_sof(fp) = FC_SOF_I3;
 612                fr_eof(fp) = FC_EOF_T;
 613                fr_crc(fp) = cpu_to_le32(~crc);
 614                unsol_els->lport = lport;
 615                unsol_els->hba = interface->hba;
 616                unsol_els->fp = fp;
 617                INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
 618                queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
 619        } else {
 620                BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
 621                kfree_skb(skb);
 622                kfree(unsol_els);
 623        }
 624}
 625
 626static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
 627{
 628        u8 num_rq;
 629        struct fcoe_err_report_entry *err_entry;
 630        unsigned char *rq_data;
 631        unsigned char *buf = NULL, *buf1;
 632        int i;
 633        u16 xid;
 634        u32 frame_len, len;
 635        struct bnx2fc_cmd *io_req = NULL;
 636        struct fcoe_task_ctx_entry *task, *task_page;
 637        struct bnx2fc_interface *interface = tgt->port->priv;
 638        struct bnx2fc_hba *hba = interface->hba;
 639        int task_idx, index;
 640        int rc = 0;
 641        u64 err_warn_bit_map;
 642        u8 err_warn = 0xff;
 643
 644
 645        BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
 646        switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
 647        case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
 648                frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
 649                             FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
 650
 651                num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
 652
 653                spin_lock_bh(&tgt->tgt_lock);
 654                rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
 655                spin_unlock_bh(&tgt->tgt_lock);
 656
 657                if (rq_data) {
 658                        buf = rq_data;
 659                } else {
 660                        buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
 661                                              GFP_ATOMIC);
 662
 663                        if (!buf1) {
 664                                BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
 665                                break;
 666                        }
 667
 668                        for (i = 0; i < num_rq; i++) {
 669                                spin_lock_bh(&tgt->tgt_lock);
 670                                rq_data = (unsigned char *)
 671                                           bnx2fc_get_next_rqe(tgt, 1);
 672                                spin_unlock_bh(&tgt->tgt_lock);
 673                                len = BNX2FC_RQ_BUF_SZ;
 674                                memcpy(buf1, rq_data, len);
 675                                buf1 += len;
 676                        }
 677                }
 678                bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
 679                                              FC_XID_UNKNOWN);
 680
 681                if (buf != rq_data)
 682                        kfree(buf);
 683                spin_lock_bh(&tgt->tgt_lock);
 684                bnx2fc_return_rqe(tgt, num_rq);
 685                spin_unlock_bh(&tgt->tgt_lock);
 686                break;
 687
 688        case FCOE_ERROR_DETECTION_CQE_TYPE:
 689                /*
 690                 * In case of error reporting CQE a single RQ entry
 691                 * is consumed.
 692                 */
 693                spin_lock_bh(&tgt->tgt_lock);
 694                num_rq = 1;
 695                err_entry = (struct fcoe_err_report_entry *)
 696                             bnx2fc_get_next_rqe(tgt, 1);
 697                xid = err_entry->fc_hdr.ox_id;
 698                BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
 699                BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
 700                        err_entry->data.err_warn_bitmap_hi,
 701                        err_entry->data.err_warn_bitmap_lo);
 702                BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
 703                        err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 704
 705
 706                if (xid > hba->max_xid) {
 707                        BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
 708                                   xid);
 709                        goto ret_err_rqe;
 710                }
 711
 712                task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 713                index = xid % BNX2FC_TASKS_PER_PAGE;
 714                task_page = (struct fcoe_task_ctx_entry *)
 715                                        hba->task_ctx[task_idx];
 716                task = &(task_page[index]);
 717
 718                io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 719                if (!io_req)
 720                        goto ret_err_rqe;
 721
 722                if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
 723                        printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
 724                        goto ret_err_rqe;
 725                }
 726
 727                if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
 728                                       &io_req->req_flags)) {
 729                        BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
 730                                            "progress.. ignore unsol err\n");
 731                        goto ret_err_rqe;
 732                }
 733
 734                err_warn_bit_map = (u64)
 735                        ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
 736                        (u64)err_entry->data.err_warn_bitmap_lo;
 737                for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
 738                        if (err_warn_bit_map & (u64)((u64)1 << i)) {
 739                                err_warn = i;
 740                                break;
 741                        }
 742                }
 743
 744                /*
 745                 * If ABTS is already in progress, and FW error is
 746                 * received after that, do not cancel the timeout_work
 747                 * and let the error recovery continue by explicitly
 748                 * logging out the target, when the ABTS eventually
 749                 * times out.
 750                 */
 751                if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
 752                        printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
 753                                            "in ABTS processing\n", xid);
 754                        goto ret_err_rqe;
 755                }
 756                BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
 757                if (tgt->dev_type != TYPE_TAPE)
 758                        goto skip_rec;
 759                switch (err_warn) {
 760                case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
 761                case FCOE_ERROR_CODE_DATA_OOO_RO:
 762                case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
 763                case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
 764                case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
 765                case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
 766                        BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
 767                                   xid);
 768                        memcpy(&io_req->err_entry, err_entry,
 769                               sizeof(struct fcoe_err_report_entry));
 770                        if (!test_bit(BNX2FC_FLAG_SRR_SENT,
 771                                      &io_req->req_flags)) {
 772                                spin_unlock_bh(&tgt->tgt_lock);
 773                                rc = bnx2fc_send_rec(io_req);
 774                                spin_lock_bh(&tgt->tgt_lock);
 775
 776                                if (rc)
 777                                        goto skip_rec;
 778                        } else
 779                                printk(KERN_ERR PFX "SRR in progress\n");
 780                        goto ret_err_rqe;
 781                        break;
 782                default:
 783                        break;
 784                }
 785
 786skip_rec:
 787                set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
 788                /*
 789                 * Cancel the timeout_work, as we received IO
 790                 * completion with FW error.
 791                 */
 792                if (cancel_delayed_work(&io_req->timeout_work))
 793                        kref_put(&io_req->refcount, bnx2fc_cmd_release);
 794
 795                rc = bnx2fc_initiate_abts(io_req);
 796                if (rc != SUCCESS) {
 797                        printk(KERN_ERR PFX "err_warn: initiate_abts "
 798                                "failed xid = 0x%x. issue cleanup\n",
 799                                io_req->xid);
 800                        bnx2fc_initiate_cleanup(io_req);
 801                }
 802ret_err_rqe:
 803                bnx2fc_return_rqe(tgt, 1);
 804                spin_unlock_bh(&tgt->tgt_lock);
 805                break;
 806
 807        case FCOE_WARNING_DETECTION_CQE_TYPE:
 808                /*
 809                 *In case of warning reporting CQE a single RQ entry
 810                 * is consumes.
 811                 */
 812                spin_lock_bh(&tgt->tgt_lock);
 813                num_rq = 1;
 814                err_entry = (struct fcoe_err_report_entry *)
 815                             bnx2fc_get_next_rqe(tgt, 1);
 816                xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
 817                BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
 818                BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
 819                        err_entry->data.err_warn_bitmap_hi,
 820                        err_entry->data.err_warn_bitmap_lo);
 821                BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
 822                        err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 823
 824                if (xid > hba->max_xid) {
 825                        BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
 826                        goto ret_warn_rqe;
 827                }
 828
 829                err_warn_bit_map = (u64)
 830                        ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
 831                        (u64)err_entry->data.err_warn_bitmap_lo;
 832                for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
 833                        if (err_warn_bit_map & ((u64)1 << i)) {
 834                                err_warn = i;
 835                                break;
 836                        }
 837                }
 838                BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
 839
 840                task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 841                index = xid % BNX2FC_TASKS_PER_PAGE;
 842                task_page = (struct fcoe_task_ctx_entry *)
 843                             interface->hba->task_ctx[task_idx];
 844                task = &(task_page[index]);
 845                io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 846                if (!io_req)
 847                        goto ret_warn_rqe;
 848
 849                if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
 850                        printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
 851                        goto ret_warn_rqe;
 852                }
 853
 854                memcpy(&io_req->err_entry, err_entry,
 855                       sizeof(struct fcoe_err_report_entry));
 856
 857                if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
 858                        /* REC_TOV is not a warning code */
 859                        BUG_ON(1);
 860                else
 861                        BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
 862ret_warn_rqe:
 863                bnx2fc_return_rqe(tgt, 1);
 864                spin_unlock_bh(&tgt->tgt_lock);
 865                break;
 866
 867        default:
 868                printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
 869                break;
 870        }
 871}
 872
 873void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
 874{
 875        struct fcoe_task_ctx_entry *task;
 876        struct fcoe_task_ctx_entry *task_page;
 877        struct fcoe_port *port = tgt->port;
 878        struct bnx2fc_interface *interface = port->priv;
 879        struct bnx2fc_hba *hba = interface->hba;
 880        struct bnx2fc_cmd *io_req;
 881        int task_idx, index;
 882        u16 xid;
 883        u8  cmd_type;
 884        u8 rx_state = 0;
 885        u8 num_rq;
 886
 887        spin_lock_bh(&tgt->tgt_lock);
 888        xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
 889        if (xid >= hba->max_tasks) {
 890                printk(KERN_ERR PFX "ERROR:xid out of range\n");
 891                spin_unlock_bh(&tgt->tgt_lock);
 892                return;
 893        }
 894        task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 895        index = xid % BNX2FC_TASKS_PER_PAGE;
 896        task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
 897        task = &(task_page[index]);
 898
 899        num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
 900                   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
 901                   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
 902
 903        io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 904
 905        if (io_req == NULL) {
 906                printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
 907                spin_unlock_bh(&tgt->tgt_lock);
 908                return;
 909        }
 910
 911        /* Timestamp IO completion time */
 912        cmd_type = io_req->cmd_type;
 913
 914        rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
 915                    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
 916                    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
 917
 918        /* Process other IO completion types */
 919        switch (cmd_type) {
 920        case BNX2FC_SCSI_CMD:
 921                if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
 922                        bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
 923                        spin_unlock_bh(&tgt->tgt_lock);
 924                        return;
 925                }
 926
 927                if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
 928                        bnx2fc_process_abts_compl(io_req, task, num_rq);
 929                else if (rx_state ==
 930                         FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
 931                        bnx2fc_process_cleanup_compl(io_req, task, num_rq);
 932                else
 933                        printk(KERN_ERR PFX "Invalid rx state - %d\n",
 934                                rx_state);
 935                break;
 936
 937        case BNX2FC_TASK_MGMT_CMD:
 938                BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
 939                bnx2fc_process_tm_compl(io_req, task, num_rq);
 940                break;
 941
 942        case BNX2FC_ABTS:
 943                /*
 944                 * ABTS request received by firmware. ABTS response
 945                 * will be delivered to the task belonging to the IO
 946                 * that was aborted
 947                 */
 948                BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
 949                kref_put(&io_req->refcount, bnx2fc_cmd_release);
 950                break;
 951
 952        case BNX2FC_ELS:
 953                if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
 954                        bnx2fc_process_els_compl(io_req, task, num_rq);
 955                else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
 956                        bnx2fc_process_abts_compl(io_req, task, num_rq);
 957                else if (rx_state ==
 958                         FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
 959                        bnx2fc_process_cleanup_compl(io_req, task, num_rq);
 960                else
 961                        printk(KERN_ERR PFX "Invalid rx state =  %d\n",
 962                                rx_state);
 963                break;
 964
 965        case BNX2FC_CLEANUP:
 966                BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
 967                kref_put(&io_req->refcount, bnx2fc_cmd_release);
 968                break;
 969
 970        case BNX2FC_SEQ_CLEANUP:
 971                BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
 972                              io_req->xid);
 973                bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
 974                kref_put(&io_req->refcount, bnx2fc_cmd_release);
 975                break;
 976
 977        default:
 978                printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
 979                break;
 980        }
 981        spin_unlock_bh(&tgt->tgt_lock);
 982}
 983
 984void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
 985{
 986        struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
 987        u32 msg;
 988
 989        wmb();
 990        rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
 991                        FCOE_CQE_TOGGLE_BIT_SHIFT);
 992        msg = *((u32 *)rx_db);
 993        writel(cpu_to_le32(msg), tgt->ctx_base);
 994
 995}
 996
 997static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
 998{
 999        struct bnx2fc_work *work;
1000        work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
1001        if (!work)
1002                return NULL;
1003
1004        INIT_LIST_HEAD(&work->list);
1005        work->tgt = tgt;
1006        work->wqe = wqe;
1007        return work;
1008}
1009
1010/* Pending work request completion */
1011static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
1012{
1013        unsigned int cpu = wqe % num_possible_cpus();
1014        struct bnx2fc_percpu_s *fps;
1015        struct bnx2fc_work *work;
1016
1017        fps = &per_cpu(bnx2fc_percpu, cpu);
1018        spin_lock_bh(&fps->fp_work_lock);
1019        if (fps->iothread) {
1020                work = bnx2fc_alloc_work(tgt, wqe);
1021                if (work) {
1022                        list_add_tail(&work->list, &fps->work_list);
1023                        wake_up_process(fps->iothread);
1024                        spin_unlock_bh(&fps->fp_work_lock);
1025                        return;
1026                }
1027        }
1028        spin_unlock_bh(&fps->fp_work_lock);
1029        bnx2fc_process_cq_compl(tgt, wqe);
1030}
1031
1032int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1033{
1034        struct fcoe_cqe *cq;
1035        u32 cq_cons;
1036        struct fcoe_cqe *cqe;
1037        u32 num_free_sqes = 0;
1038        u32 num_cqes = 0;
1039        u16 wqe;
1040
1041        /*
1042         * cq_lock is a low contention lock used to protect
1043         * the CQ data structure from being freed up during
1044         * the upload operation
1045         */
1046        spin_lock_bh(&tgt->cq_lock);
1047
1048        if (!tgt->cq) {
1049                printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
1050                spin_unlock_bh(&tgt->cq_lock);
1051                return 0;
1052        }
1053        cq = tgt->cq;
1054        cq_cons = tgt->cq_cons_idx;
1055        cqe = &cq[cq_cons];
1056
1057        while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
1058               (tgt->cq_curr_toggle_bit <<
1059               FCOE_CQE_TOGGLE_BIT_SHIFT)) {
1060
1061                /* new entry on the cq */
1062                if (wqe & FCOE_CQE_CQE_TYPE) {
1063                        /* Unsolicited event notification */
1064                        bnx2fc_process_unsol_compl(tgt, wqe);
1065                } else {
1066                        bnx2fc_pending_work(tgt, wqe);
1067                        num_free_sqes++;
1068                }
1069                cqe++;
1070                tgt->cq_cons_idx++;
1071                num_cqes++;
1072
1073                if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
1074                        tgt->cq_cons_idx = 0;
1075                        cqe = cq;
1076                        tgt->cq_curr_toggle_bit =
1077                                1 - tgt->cq_curr_toggle_bit;
1078                }
1079        }
1080        if (num_cqes) {
1081                /* Arm CQ only if doorbell is mapped */
1082                if (tgt->ctx_base)
1083                        bnx2fc_arm_cq(tgt);
1084                atomic_add(num_free_sqes, &tgt->free_sqes);
1085        }
1086        spin_unlock_bh(&tgt->cq_lock);
1087        return 0;
1088}
1089
1090/**
1091 * bnx2fc_fastpath_notification - process global event queue (KCQ)
1092 *
1093 * @hba:                adapter structure pointer
1094 * @new_cqe_kcqe:       pointer to newly DMA'd KCQ entry
1095 *
1096 * Fast path event notification handler
1097 */
1098static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
1099                                        struct fcoe_kcqe *new_cqe_kcqe)
1100{
1101        u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
1102        struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
1103
1104        if (!tgt) {
1105                printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
1106                return;
1107        }
1108
1109        bnx2fc_process_new_cqes(tgt);
1110}
1111
1112/**
1113 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
1114 *
1115 * @hba:        adapter structure pointer
1116 * @ofld_kcqe:  connection offload kcqe pointer
1117 *
1118 * handle session offload completion, enable the session if offload is
1119 * successful.
1120 */
1121static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1122                                        struct fcoe_kcqe *ofld_kcqe)
1123{
1124        struct bnx2fc_rport             *tgt;
1125        struct fcoe_port                *port;
1126        struct bnx2fc_interface         *interface;
1127        u32                             conn_id;
1128        u32                             context_id;
1129
1130        conn_id = ofld_kcqe->fcoe_conn_id;
1131        context_id = ofld_kcqe->fcoe_conn_context_id;
1132        tgt = hba->tgt_ofld_list[conn_id];
1133        if (!tgt) {
1134                printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
1135                return;
1136        }
1137        BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1138                ofld_kcqe->fcoe_conn_context_id);
1139        port = tgt->port;
1140        interface = tgt->port->priv;
1141        if (hba != interface->hba) {
1142                printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1143                goto ofld_cmpl_err;
1144        }
1145        /*
1146         * cnic has allocated a context_id for this session; use this
1147         * while enabling the session.
1148         */
1149        tgt->context_id = context_id;
1150        if (ofld_kcqe->completion_status) {
1151                if (ofld_kcqe->completion_status ==
1152                                FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1153                        printk(KERN_ERR PFX "unable to allocate FCoE context "
1154                                "resources\n");
1155                        set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1156                }
1157        } else {
1158                /* FW offload request successfully completed */
1159                set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1160        }
1161ofld_cmpl_err:
1162        set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1163        wake_up_interruptible(&tgt->ofld_wait);
1164}
1165
1166/**
1167 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1168 *
1169 * @hba:        adapter structure pointer
1170 * @ofld_kcqe:  connection offload kcqe pointer
1171 *
1172 * handle session enable completion, mark the rport as ready
1173 */
1174
1175static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1176                                                struct fcoe_kcqe *ofld_kcqe)
1177{
1178        struct bnx2fc_rport             *tgt;
1179        struct bnx2fc_interface         *interface;
1180        u32                             conn_id;
1181        u32                             context_id;
1182
1183        context_id = ofld_kcqe->fcoe_conn_context_id;
1184        conn_id = ofld_kcqe->fcoe_conn_id;
1185        tgt = hba->tgt_ofld_list[conn_id];
1186        if (!tgt) {
1187                printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1188                return;
1189        }
1190
1191        BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1192                ofld_kcqe->fcoe_conn_context_id);
1193
1194        /*
1195         * context_id should be the same for this target during offload
1196         * and enable
1197         */
1198        if (tgt->context_id != context_id) {
1199                printk(KERN_ERR PFX "context id mis-match\n");
1200                return;
1201        }
1202        interface = tgt->port->priv;
1203        if (hba != interface->hba) {
1204                printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1205                goto enbl_cmpl_err;
1206        }
1207        if (!ofld_kcqe->completion_status)
1208                /* enable successful - rport ready for issuing IOs */
1209                set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1210
1211enbl_cmpl_err:
1212        set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1213        wake_up_interruptible(&tgt->ofld_wait);
1214}
1215
1216static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1217                                        struct fcoe_kcqe *disable_kcqe)
1218{
1219
1220        struct bnx2fc_rport             *tgt;
1221        u32                             conn_id;
1222
1223        conn_id = disable_kcqe->fcoe_conn_id;
1224        tgt = hba->tgt_ofld_list[conn_id];
1225        if (!tgt) {
1226                printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1227                return;
1228        }
1229
1230        BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1231
1232        if (disable_kcqe->completion_status) {
1233                printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1234                        disable_kcqe->completion_status);
1235                set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
1236                set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1237                wake_up_interruptible(&tgt->upld_wait);
1238        } else {
1239                /* disable successful */
1240                BNX2FC_TGT_DBG(tgt, "disable successful\n");
1241                clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1242                clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1243                set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1244                set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1245                wake_up_interruptible(&tgt->upld_wait);
1246        }
1247}
1248
1249static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1250                                        struct fcoe_kcqe *destroy_kcqe)
1251{
1252        struct bnx2fc_rport             *tgt;
1253        u32                             conn_id;
1254
1255        conn_id = destroy_kcqe->fcoe_conn_id;
1256        tgt = hba->tgt_ofld_list[conn_id];
1257        if (!tgt) {
1258                printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1259                return;
1260        }
1261
1262        BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1263
1264        if (destroy_kcqe->completion_status) {
1265                printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1266                        destroy_kcqe->completion_status);
1267                return;
1268        } else {
1269                /* destroy successful */
1270                BNX2FC_TGT_DBG(tgt, "upload successful\n");
1271                clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1272                set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1273                set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1274                wake_up_interruptible(&tgt->upld_wait);
1275        }
1276}
1277
1278static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1279{
1280        switch (err_code) {
1281        case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1282                printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1283                break;
1284
1285        case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1286                printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1287                break;
1288
1289        case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1290                printk(KERN_ERR PFX "init_failure due to NIC error\n");
1291                break;
1292        case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1293                printk(KERN_ERR PFX "init failure due to compl status err\n");
1294                break;
1295        case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1296                printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1297                break;
1298        default:
1299                printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1300        }
1301}
1302
1303/**
1304 * bnx2fc_indicae_kcqe - process KCQE
1305 *
1306 * @hba:        adapter structure pointer
1307 * @kcqe:       kcqe pointer
1308 * @num_cqe:    Number of completion queue elements
1309 *
1310 * Generic KCQ event handler
1311 */
1312void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1313                                        u32 num_cqe)
1314{
1315        struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1316        int i = 0;
1317        struct fcoe_kcqe *kcqe = NULL;
1318
1319        while (i < num_cqe) {
1320                kcqe = (struct fcoe_kcqe *) kcq[i++];
1321
1322                switch (kcqe->op_code) {
1323                case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1324                        bnx2fc_fastpath_notification(hba, kcqe);
1325                        break;
1326
1327                case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1328                        bnx2fc_process_ofld_cmpl(hba, kcqe);
1329                        break;
1330
1331                case FCOE_KCQE_OPCODE_ENABLE_CONN:
1332                        bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1333                        break;
1334
1335                case FCOE_KCQE_OPCODE_INIT_FUNC:
1336                        if (kcqe->completion_status !=
1337                                        FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1338                                bnx2fc_init_failure(hba,
1339                                                kcqe->completion_status);
1340                        } else {
1341                                set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1342                                bnx2fc_get_link_state(hba);
1343                                printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1344                                        (u8)hba->pcidev->bus->number);
1345                        }
1346                        break;
1347
1348                case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1349                        if (kcqe->completion_status !=
1350                                        FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1351
1352                                printk(KERN_ERR PFX "DESTROY failed\n");
1353                        } else {
1354                                printk(KERN_ERR PFX "DESTROY success\n");
1355                        }
1356                        set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1357                        wake_up_interruptible(&hba->destroy_wait);
1358                        break;
1359
1360                case FCOE_KCQE_OPCODE_DISABLE_CONN:
1361                        bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1362                        break;
1363
1364                case FCOE_KCQE_OPCODE_DESTROY_CONN:
1365                        bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1366                        break;
1367
1368                case FCOE_KCQE_OPCODE_STAT_FUNC:
1369                        if (kcqe->completion_status !=
1370                            FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1371                                printk(KERN_ERR PFX "STAT failed\n");
1372                        complete(&hba->stat_req_done);
1373                        break;
1374
1375                case FCOE_KCQE_OPCODE_FCOE_ERROR:
1376                        /* fall thru */
1377                default:
1378                        printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1379                                                                kcqe->op_code);
1380                }
1381        }
1382}
1383
1384void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1385{
1386        struct fcoe_sqe *sqe;
1387
1388        sqe = &tgt->sq[tgt->sq_prod_idx];
1389
1390        /* Fill SQ WQE */
1391        sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1392        sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1393
1394        /* Advance SQ Prod Idx */
1395        if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1396                tgt->sq_prod_idx = 0;
1397                tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1398        }
1399}
1400
1401void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1402{
1403        struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1404        u32 msg;
1405
1406        wmb();
1407        sq_db->prod = tgt->sq_prod_idx |
1408                                (tgt->sq_curr_toggle_bit << 15);
1409        msg = *((u32 *)sq_db);
1410        writel(cpu_to_le32(msg), tgt->ctx_base);
1411
1412}
1413
1414int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1415{
1416        u32 context_id = tgt->context_id;
1417        struct fcoe_port *port = tgt->port;
1418        u32 reg_off;
1419        resource_size_t reg_base;
1420        struct bnx2fc_interface *interface = port->priv;
1421        struct bnx2fc_hba *hba = interface->hba;
1422
1423        reg_base = pci_resource_start(hba->pcidev,
1424                                        BNX2X_DOORBELL_PCI_BAR);
1425        reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
1426        tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1427        if (!tgt->ctx_base)
1428                return -ENOMEM;
1429        return 0;
1430}
1431
1432char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1433{
1434        char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1435
1436        if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1437                return NULL;
1438
1439        tgt->rq_cons_idx += num_items;
1440
1441        if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1442                tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1443
1444        return buf;
1445}
1446
1447void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1448{
1449        /* return the rq buffer */
1450        u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1451        if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1452                /* Wrap around RQ */
1453                next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1454        }
1455        tgt->rq_prod_idx = next_prod_idx;
1456        tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1457}
1458
1459void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1460                                  struct fcoe_task_ctx_entry *task,
1461                                  struct bnx2fc_cmd *orig_io_req,
1462                                  u32 offset)
1463{
1464        struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1465        struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1466        struct bnx2fc_interface *interface = tgt->port->priv;
1467        struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1468        struct fcoe_task_ctx_entry *orig_task;
1469        struct fcoe_task_ctx_entry *task_page;
1470        struct fcoe_ext_mul_sges_ctx *sgl;
1471        u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1472        u8 orig_task_type;
1473        u16 orig_xid = orig_io_req->xid;
1474        u32 context_id = tgt->context_id;
1475        u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1476        u32 orig_offset = offset;
1477        int bd_count;
1478        int orig_task_idx, index;
1479        int i;
1480
1481        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1482
1483        if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1484                orig_task_type = FCOE_TASK_TYPE_WRITE;
1485        else
1486                orig_task_type = FCOE_TASK_TYPE_READ;
1487
1488        /* Tx flags */
1489        task->txwr_rxrd.const_ctx.tx_flags =
1490                                FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1491                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1492        /* init flags */
1493        task->txwr_rxrd.const_ctx.init_flags = task_type <<
1494                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1495        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1496                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1497        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1498                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1499        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1500                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1501
1502        task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1503
1504        task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1505        task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1506
1507        bd_count = orig_io_req->bd_tbl->bd_valid;
1508
1509        /* obtain the appropriate bd entry from relative offset */
1510        for (i = 0; i < bd_count; i++) {
1511                if (offset < bd[i].buf_len)
1512                        break;
1513                offset -= bd[i].buf_len;
1514        }
1515        phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1516
1517        if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1518                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1519                                (u32)phys_addr;
1520                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1521                                (u32)((u64)phys_addr >> 32);
1522                task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1523                                bd_count;
1524                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1525                                offset; /* adjusted offset */
1526                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1527        } else {
1528                orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1529                index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1530
1531                task_page = (struct fcoe_task_ctx_entry *)
1532                             interface->hba->task_ctx[orig_task_idx];
1533                orig_task = &(task_page[index]);
1534
1535                /* Multiple SGEs were used for this IO */
1536                sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1537                sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1538                sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1539                sgl->mul_sgl.sgl_size = bd_count;
1540                sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1541                sgl->mul_sgl.cur_sge_idx = i;
1542
1543                memset(&task->rxwr_only.rx_seq_ctx, 0,
1544                       sizeof(struct fcoe_rx_seq_ctx));
1545                task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1546                task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1547        }
1548}
1549void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1550                              struct fcoe_task_ctx_entry *task,
1551                              u16 orig_xid)
1552{
1553        u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1554        struct bnx2fc_rport *tgt = io_req->tgt;
1555        u32 context_id = tgt->context_id;
1556
1557        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1558
1559        /* Tx Write Rx Read */
1560        /* init flags */
1561        task->txwr_rxrd.const_ctx.init_flags = task_type <<
1562                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1563        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1564                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1565        if (tgt->dev_type == TYPE_TAPE)
1566                task->txwr_rxrd.const_ctx.init_flags |=
1567                                FCOE_TASK_DEV_TYPE_TAPE <<
1568                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1569        else
1570                task->txwr_rxrd.const_ctx.init_flags |=
1571                                FCOE_TASK_DEV_TYPE_DISK <<
1572                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1573        task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1574
1575        /* Tx flags */
1576        task->txwr_rxrd.const_ctx.tx_flags =
1577                                FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1578                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1579
1580        /* Rx Read Tx Write */
1581        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1582                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1583        task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1584                                FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1585}
1586
1587void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1588                                struct fcoe_task_ctx_entry *task)
1589{
1590        struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1591        struct bnx2fc_rport *tgt = io_req->tgt;
1592        struct fc_frame_header *fc_hdr;
1593        struct fcoe_ext_mul_sges_ctx *sgl;
1594        u8 task_type = 0;
1595        u64 *hdr;
1596        u64 temp_hdr[3];
1597        u32 context_id;
1598
1599
1600        /* Obtain task_type */
1601        if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1602            (io_req->cmd_type == BNX2FC_ELS)) {
1603                task_type = FCOE_TASK_TYPE_MIDPATH;
1604        } else if (io_req->cmd_type == BNX2FC_ABTS) {
1605                task_type = FCOE_TASK_TYPE_ABTS;
1606        }
1607
1608        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1609
1610        /* Setup the task from io_req for easy reference */
1611        io_req->task = task;
1612
1613        BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1614                io_req->cmd_type, task_type);
1615
1616        /* Tx only */
1617        if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1618            (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1619                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1620                                (u32)mp_req->mp_req_bd_dma;
1621                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1622                                (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1623                task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1624        }
1625
1626        /* Tx Write Rx Read */
1627        /* init flags */
1628        task->txwr_rxrd.const_ctx.init_flags = task_type <<
1629                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1630        if (tgt->dev_type == TYPE_TAPE)
1631                task->txwr_rxrd.const_ctx.init_flags |=
1632                                FCOE_TASK_DEV_TYPE_TAPE <<
1633                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1634        else
1635                task->txwr_rxrd.const_ctx.init_flags |=
1636                                FCOE_TASK_DEV_TYPE_DISK <<
1637                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1638        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1639                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1640
1641        /* tx flags */
1642        task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1643                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1644
1645        /* Rx Write Tx Read */
1646        task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1647
1648        /* rx flags */
1649        task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1650                                FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1651
1652        context_id = tgt->context_id;
1653        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1654                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1655
1656        fc_hdr = &(mp_req->req_fc_hdr);
1657        if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1658                fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1659                fc_hdr->fh_rx_id = htons(0xffff);
1660                task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1661        } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1662                fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1663        }
1664
1665        /* Fill FC Header into middle path buffer */
1666        hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1667        memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1668        hdr[0] = cpu_to_be64(temp_hdr[0]);
1669        hdr[1] = cpu_to_be64(temp_hdr[1]);
1670        hdr[2] = cpu_to_be64(temp_hdr[2]);
1671
1672        /* Rx Only */
1673        if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1674                sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1675
1676                sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1677                sgl->mul_sgl.cur_sge_addr.hi =
1678                                (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1679                sgl->mul_sgl.sgl_size = 1;
1680        }
1681}
1682
1683void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1684                             struct fcoe_task_ctx_entry *task)
1685{
1686        u8 task_type;
1687        struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1688        struct io_bdt *bd_tbl = io_req->bd_tbl;
1689        struct bnx2fc_rport *tgt = io_req->tgt;
1690        struct fcoe_cached_sge_ctx *cached_sge;
1691        struct fcoe_ext_mul_sges_ctx *sgl;
1692        int dev_type = tgt->dev_type;
1693        u64 *fcp_cmnd;
1694        u64 tmp_fcp_cmnd[4];
1695        u32 context_id;
1696        int cnt, i;
1697        int bd_count;
1698
1699        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1700
1701        /* Setup the task from io_req for easy reference */
1702        io_req->task = task;
1703
1704        if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1705                task_type = FCOE_TASK_TYPE_WRITE;
1706        else
1707                task_type = FCOE_TASK_TYPE_READ;
1708
1709        /* Tx only */
1710        bd_count = bd_tbl->bd_valid;
1711        cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1712        if (task_type == FCOE_TASK_TYPE_WRITE) {
1713                if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1714                        struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1715
1716                        task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1717                        cached_sge->cur_buf_addr.lo =
1718                                        fcoe_bd_tbl->buf_addr_lo;
1719                        task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1720                        cached_sge->cur_buf_addr.hi =
1721                                        fcoe_bd_tbl->buf_addr_hi;
1722                        task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1723                        cached_sge->cur_buf_rem =
1724                                        fcoe_bd_tbl->buf_len;
1725
1726                        task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1727                                FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1728                } else {
1729                        task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1730                                        (u32)bd_tbl->bd_tbl_dma;
1731                        task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1732                                        (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1733                        task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1734                                        bd_tbl->bd_valid;
1735                }
1736        }
1737
1738        /*Tx Write Rx Read */
1739        /* Init state to NORMAL */
1740        task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1741                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1742        if (dev_type == TYPE_TAPE) {
1743                task->txwr_rxrd.const_ctx.init_flags |=
1744                                FCOE_TASK_DEV_TYPE_TAPE <<
1745                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1746                io_req->rec_retry = 0;
1747                io_req->rec_retry = 0;
1748        } else
1749                task->txwr_rxrd.const_ctx.init_flags |=
1750                                FCOE_TASK_DEV_TYPE_DISK <<
1751                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1752        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1753                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1754        /* tx flags */
1755        task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1756                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1757
1758        /* Set initial seq counter */
1759        task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1760
1761        /* Fill FCP_CMND IU */
1762        fcp_cmnd = (u64 *)
1763                    task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1764        bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1765
1766        /* swap fcp_cmnd */
1767        cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1768
1769        for (i = 0; i < cnt; i++) {
1770                *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1771                fcp_cmnd++;
1772        }
1773
1774        /* Rx Write Tx Read */
1775        task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1776
1777        context_id = tgt->context_id;
1778        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1779                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1780
1781        /* rx flags */
1782        /* Set state to "waiting for the first packet" */
1783        task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1784                                FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1785
1786        task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1787
1788        /* Rx Only */
1789        if (task_type != FCOE_TASK_TYPE_READ)
1790                return;
1791
1792        sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1793        bd_count = bd_tbl->bd_valid;
1794
1795        if (dev_type == TYPE_DISK) {
1796                if (bd_count == 1) {
1797
1798                        struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1799
1800                        cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1801                        cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1802                        cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1803                        task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1804                                FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1805                } else if (bd_count == 2) {
1806                        struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1807
1808                        cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1809                        cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1810                        cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1811
1812                        fcoe_bd_tbl++;
1813                        cached_sge->second_buf_addr.lo =
1814                                                 fcoe_bd_tbl->buf_addr_lo;
1815                        cached_sge->second_buf_addr.hi =
1816                                                fcoe_bd_tbl->buf_addr_hi;
1817                        cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1818                        task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1819                                FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1820                } else {
1821
1822                        sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1823                        sgl->mul_sgl.cur_sge_addr.hi =
1824                                        (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1825                        sgl->mul_sgl.sgl_size = bd_count;
1826                }
1827        } else {
1828                sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1829                sgl->mul_sgl.cur_sge_addr.hi =
1830                                (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1831                sgl->mul_sgl.sgl_size = bd_count;
1832        }
1833}
1834
1835/**
1836 * bnx2fc_setup_task_ctx - allocate and map task context
1837 *
1838 * @hba:        pointer to adapter structure
1839 *
1840 * allocate memory for task context, and associated BD table to be used
1841 * by firmware
1842 *
1843 */
1844int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1845{
1846        int rc = 0;
1847        struct regpair *task_ctx_bdt;
1848        dma_addr_t addr;
1849        int task_ctx_arr_sz;
1850        int i;
1851
1852        /*
1853         * Allocate task context bd table. A page size of bd table
1854         * can map 256 buffers. Each buffer contains 32 task context
1855         * entries. Hence the limit with one page is 8192 task context
1856         * entries.
1857         */
1858        hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1859                                                  PAGE_SIZE,
1860                                                  &hba->task_ctx_bd_dma,
1861                                                  GFP_KERNEL);
1862        if (!hba->task_ctx_bd_tbl) {
1863                printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1864                rc = -1;
1865                goto out;
1866        }
1867
1868        /*
1869         * Allocate task_ctx which is an array of pointers pointing to
1870         * a page containing 32 task contexts
1871         */
1872        task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1873        hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
1874                                 GFP_KERNEL);
1875        if (!hba->task_ctx) {
1876                printk(KERN_ERR PFX "unable to allocate task context array\n");
1877                rc = -1;
1878                goto out1;
1879        }
1880
1881        /*
1882         * Allocate task_ctx_dma which is an array of dma addresses
1883         */
1884        hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
1885                                        sizeof(dma_addr_t)), GFP_KERNEL);
1886        if (!hba->task_ctx_dma) {
1887                printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1888                rc = -1;
1889                goto out2;
1890        }
1891
1892        task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1893        for (i = 0; i < task_ctx_arr_sz; i++) {
1894
1895                hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1896                                                      PAGE_SIZE,
1897                                                      &hba->task_ctx_dma[i],
1898                                                      GFP_KERNEL);
1899                if (!hba->task_ctx[i]) {
1900                        printk(KERN_ERR PFX "unable to alloc task context\n");
1901                        rc = -1;
1902                        goto out3;
1903                }
1904                addr = (u64)hba->task_ctx_dma[i];
1905                task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1906                task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1907                task_ctx_bdt++;
1908        }
1909        return 0;
1910
1911out3:
1912        for (i = 0; i < task_ctx_arr_sz; i++) {
1913                if (hba->task_ctx[i]) {
1914
1915                        dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1916                                hba->task_ctx[i], hba->task_ctx_dma[i]);
1917                        hba->task_ctx[i] = NULL;
1918                }
1919        }
1920
1921        kfree(hba->task_ctx_dma);
1922        hba->task_ctx_dma = NULL;
1923out2:
1924        kfree(hba->task_ctx);
1925        hba->task_ctx = NULL;
1926out1:
1927        dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1928                        hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1929        hba->task_ctx_bd_tbl = NULL;
1930out:
1931        return rc;
1932}
1933
1934void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1935{
1936        int task_ctx_arr_sz;
1937        int i;
1938
1939        if (hba->task_ctx_bd_tbl) {
1940                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1941                                    hba->task_ctx_bd_tbl,
1942                                    hba->task_ctx_bd_dma);
1943                hba->task_ctx_bd_tbl = NULL;
1944        }
1945
1946        task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1947        if (hba->task_ctx) {
1948                for (i = 0; i < task_ctx_arr_sz; i++) {
1949                        if (hba->task_ctx[i]) {
1950                                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1951                                                    hba->task_ctx[i],
1952                                                    hba->task_ctx_dma[i]);
1953                                hba->task_ctx[i] = NULL;
1954                        }
1955                }
1956                kfree(hba->task_ctx);
1957                hba->task_ctx = NULL;
1958        }
1959
1960        kfree(hba->task_ctx_dma);
1961        hba->task_ctx_dma = NULL;
1962}
1963
1964static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1965{
1966        int i;
1967        int segment_count;
1968        u32 *pbl;
1969
1970        if (hba->hash_tbl_segments) {
1971
1972                pbl = hba->hash_tbl_pbl;
1973                if (pbl) {
1974                        segment_count = hba->hash_tbl_segment_count;
1975                        for (i = 0; i < segment_count; ++i) {
1976                                dma_addr_t dma_address;
1977
1978                                dma_address = le32_to_cpu(*pbl);
1979                                ++pbl;
1980                                dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1981                                ++pbl;
1982                                dma_free_coherent(&hba->pcidev->dev,
1983                                                  BNX2FC_HASH_TBL_CHUNK_SIZE,
1984                                                  hba->hash_tbl_segments[i],
1985                                                  dma_address);
1986                        }
1987                }
1988
1989                kfree(hba->hash_tbl_segments);
1990                hba->hash_tbl_segments = NULL;
1991        }
1992
1993        if (hba->hash_tbl_pbl) {
1994                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1995                                    hba->hash_tbl_pbl,
1996                                    hba->hash_tbl_pbl_dma);
1997                hba->hash_tbl_pbl = NULL;
1998        }
1999}
2000
2001static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2002{
2003        int i;
2004        int hash_table_size;
2005        int segment_count;
2006        int segment_array_size;
2007        int dma_segment_array_size;
2008        dma_addr_t *dma_segment_array;
2009        u32 *pbl;
2010
2011        hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
2012                sizeof(struct fcoe_hash_table_entry);
2013
2014        segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
2015        segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
2016        hba->hash_tbl_segment_count = segment_count;
2017
2018        segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
2019        hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
2020        if (!hba->hash_tbl_segments) {
2021                printk(KERN_ERR PFX "hash table pointers alloc failed\n");
2022                return -ENOMEM;
2023        }
2024        dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
2025        dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
2026        if (!dma_segment_array) {
2027                printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
2028                goto cleanup_ht;
2029        }
2030
2031        for (i = 0; i < segment_count; ++i) {
2032                hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
2033                                                               BNX2FC_HASH_TBL_CHUNK_SIZE,
2034                                                               &dma_segment_array[i],
2035                                                               GFP_KERNEL);
2036                if (!hba->hash_tbl_segments[i]) {
2037                        printk(KERN_ERR PFX "hash segment alloc failed\n");
2038                        goto cleanup_dma;
2039                }
2040        }
2041
2042        hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2043                                               &hba->hash_tbl_pbl_dma,
2044                                               GFP_KERNEL);
2045        if (!hba->hash_tbl_pbl) {
2046                printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2047                goto cleanup_dma;
2048        }
2049
2050        pbl = hba->hash_tbl_pbl;
2051        for (i = 0; i < segment_count; ++i) {
2052                u64 paddr = dma_segment_array[i];
2053                *pbl = cpu_to_le32((u32) paddr);
2054                ++pbl;
2055                *pbl = cpu_to_le32((u32) (paddr >> 32));
2056                ++pbl;
2057        }
2058        pbl = hba->hash_tbl_pbl;
2059        i = 0;
2060        while (*pbl && *(pbl + 1)) {
2061                u32 lo;
2062                u32 hi;
2063                lo = *pbl;
2064                ++pbl;
2065                hi = *pbl;
2066                ++pbl;
2067                ++i;
2068        }
2069        kfree(dma_segment_array);
2070        return 0;
2071
2072cleanup_dma:
2073        for (i = 0; i < segment_count; ++i) {
2074                if (hba->hash_tbl_segments[i])
2075                        dma_free_coherent(&hba->pcidev->dev,
2076                                            BNX2FC_HASH_TBL_CHUNK_SIZE,
2077                                            hba->hash_tbl_segments[i],
2078                                            dma_segment_array[i]);
2079        }
2080
2081        kfree(dma_segment_array);
2082
2083cleanup_ht:
2084        kfree(hba->hash_tbl_segments);
2085        hba->hash_tbl_segments = NULL;
2086        return -ENOMEM;
2087}
2088
2089/**
2090 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
2091 *
2092 * @hba:        Pointer to adapter structure
2093 *
2094 */
2095int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2096{
2097        u64 addr;
2098        u32 mem_size;
2099        int i;
2100
2101        if (bnx2fc_allocate_hash_table(hba))
2102                return -ENOMEM;
2103
2104        mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2105        hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2106                                                  &hba->t2_hash_tbl_ptr_dma,
2107                                                  GFP_KERNEL);
2108        if (!hba->t2_hash_tbl_ptr) {
2109                printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2110                bnx2fc_free_fw_resc(hba);
2111                return -ENOMEM;
2112        }
2113
2114        mem_size = BNX2FC_NUM_MAX_SESS *
2115                                sizeof(struct fcoe_t2_hash_table_entry);
2116        hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2117                                              &hba->t2_hash_tbl_dma,
2118                                              GFP_KERNEL);
2119        if (!hba->t2_hash_tbl) {
2120                printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2121                bnx2fc_free_fw_resc(hba);
2122                return -ENOMEM;
2123        }
2124        for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
2125                addr = (unsigned long) hba->t2_hash_tbl_dma +
2126                         ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
2127                hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
2128                hba->t2_hash_tbl[i].next.hi = addr >> 32;
2129        }
2130
2131        hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2132                                               PAGE_SIZE, &hba->dummy_buf_dma,
2133                                               GFP_KERNEL);
2134        if (!hba->dummy_buffer) {
2135                printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
2136                bnx2fc_free_fw_resc(hba);
2137                return -ENOMEM;
2138        }
2139
2140        hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2141                                               &hba->stats_buf_dma,
2142                                               GFP_KERNEL);
2143        if (!hba->stats_buffer) {
2144                printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2145                bnx2fc_free_fw_resc(hba);
2146                return -ENOMEM;
2147        }
2148
2149        return 0;
2150}
2151
2152void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
2153{
2154        u32 mem_size;
2155
2156        if (hba->stats_buffer) {
2157                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2158                                  hba->stats_buffer, hba->stats_buf_dma);
2159                hba->stats_buffer = NULL;
2160        }
2161
2162        if (hba->dummy_buffer) {
2163                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2164                                  hba->dummy_buffer, hba->dummy_buf_dma);
2165                hba->dummy_buffer = NULL;
2166        }
2167
2168        if (hba->t2_hash_tbl_ptr) {
2169                mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2170                dma_free_coherent(&hba->pcidev->dev, mem_size,
2171                                    hba->t2_hash_tbl_ptr,
2172                                    hba->t2_hash_tbl_ptr_dma);
2173                hba->t2_hash_tbl_ptr = NULL;
2174        }
2175
2176        if (hba->t2_hash_tbl) {
2177                mem_size = BNX2FC_NUM_MAX_SESS *
2178                            sizeof(struct fcoe_t2_hash_table_entry);
2179                dma_free_coherent(&hba->pcidev->dev, mem_size,
2180                                    hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
2181                hba->t2_hash_tbl = NULL;
2182        }
2183        bnx2fc_free_hash_table(hba);
2184}
2185