linux/drivers/scsi/bnx2fc/bnx2fc_hwi.c
<<
>>
Prefs
   1/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
   2 * This file contains the code that low level functions that interact
   3 * with 57712 FCoE firmware.
   4 *
   5 * Copyright (c) 2008 - 2013 Broadcom Corporation
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation.
  10 *
  11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  12 */
  13
  14#include "bnx2fc.h"
  15
  16DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
  17
  18static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
  19                                        struct fcoe_kcqe *new_cqe_kcqe);
  20static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
  21                                        struct fcoe_kcqe *ofld_kcqe);
  22static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
  23                                                struct fcoe_kcqe *ofld_kcqe);
  24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
  25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
  26                                        struct fcoe_kcqe *destroy_kcqe);
  27
  28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
  29{
  30        struct fcoe_kwqe_stat stat_req;
  31        struct kwqe *kwqe_arr[2];
  32        int num_kwqes = 1;
  33        int rc = 0;
  34
  35        memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
  36        stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
  37        stat_req.hdr.flags =
  38                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  39
  40        stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
  41        stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
  42
  43        kwqe_arr[0] = (struct kwqe *) &stat_req;
  44
  45        if (hba->cnic && hba->cnic->submit_kwqes)
  46                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
  47
  48        return rc;
  49}
  50
  51/**
  52 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
  53 *
  54 * @hba:        adapter structure pointer
  55 *
  56 * Send down FCoE firmware init KWQEs which initiates the initial handshake
  57 *      with the f/w.
  58 *
  59 */
  60int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
  61{
  62        struct fcoe_kwqe_init1 fcoe_init1;
  63        struct fcoe_kwqe_init2 fcoe_init2;
  64        struct fcoe_kwqe_init3 fcoe_init3;
  65        struct kwqe *kwqe_arr[3];
  66        int num_kwqes = 3;
  67        int rc = 0;
  68
  69        if (!hba->cnic) {
  70                printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
  71                return -ENODEV;
  72        }
  73
  74        /* fill init1 KWQE */
  75        memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
  76        fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
  77        fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
  78                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
  79
  80        fcoe_init1.num_tasks = hba->max_tasks;
  81        fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
  82        fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
  83        fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
  84        fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
  85        fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
  86        fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
  87        fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
  88        fcoe_init1.task_list_pbl_addr_hi =
  89                                (u32) ((u64) hba->task_ctx_bd_dma >> 32);
  90        fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
  91
  92        fcoe_init1.flags = (PAGE_SHIFT <<
  93                                FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
  94
  95        fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
  96
  97        /* fill init2 KWQE */
  98        memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
  99        fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
 100        fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 101                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 102
 103        fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
 104        fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
 105
 106
 107        fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
 108        fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
 109                                           ((u64) hba->hash_tbl_pbl_dma >> 32);
 110
 111        fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
 112        fcoe_init2.t2_hash_tbl_addr_hi = (u32)
 113                                          ((u64) hba->t2_hash_tbl_dma >> 32);
 114
 115        fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
 116        fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
 117                                        ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
 118
 119        fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
 120
 121        /* fill init3 KWQE */
 122        memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
 123        fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
 124        fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 125                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 126        fcoe_init3.error_bit_map_lo = 0xffffffff;
 127        fcoe_init3.error_bit_map_hi = 0xffffffff;
 128
 129        /*
 130         * enable both cached connection and cached tasks
 131         * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
 132         */
 133        fcoe_init3.perf_config = 3;
 134
 135        kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
 136        kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
 137        kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
 138
 139        if (hba->cnic && hba->cnic->submit_kwqes)
 140                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 141
 142        return rc;
 143}
 144int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
 145{
 146        struct fcoe_kwqe_destroy fcoe_destroy;
 147        struct kwqe *kwqe_arr[2];
 148        int num_kwqes = 1;
 149        int rc = -1;
 150
 151        /* fill destroy KWQE */
 152        memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
 153        fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
 154        fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
 155                                        FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 156        kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
 157
 158        if (hba->cnic && hba->cnic->submit_kwqes)
 159                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 160        return rc;
 161}
 162
 163/**
 164 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
 165 *
 166 * @port:               port structure pointer
 167 * @tgt:                bnx2fc_rport structure pointer
 168 */
 169int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
 170                                        struct bnx2fc_rport *tgt)
 171{
 172        struct fc_lport *lport = port->lport;
 173        struct bnx2fc_interface *interface = port->priv;
 174        struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 175        struct bnx2fc_hba *hba = interface->hba;
 176        struct kwqe *kwqe_arr[4];
 177        struct fcoe_kwqe_conn_offload1 ofld_req1;
 178        struct fcoe_kwqe_conn_offload2 ofld_req2;
 179        struct fcoe_kwqe_conn_offload3 ofld_req3;
 180        struct fcoe_kwqe_conn_offload4 ofld_req4;
 181        struct fc_rport_priv *rdata = tgt->rdata;
 182        struct fc_rport *rport = tgt->rport;
 183        int num_kwqes = 4;
 184        u32 port_id;
 185        int rc = 0;
 186        u16 conn_id;
 187
 188        /* Initialize offload request 1 structure */
 189        memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
 190
 191        ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
 192        ofld_req1.hdr.flags =
 193                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 194
 195
 196        conn_id = (u16)tgt->fcoe_conn_id;
 197        ofld_req1.fcoe_conn_id = conn_id;
 198
 199
 200        ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
 201        ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
 202
 203        ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
 204        ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
 205
 206        ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
 207        ofld_req1.rq_first_pbe_addr_hi =
 208                                (u32)((u64) tgt->rq_dma >> 32);
 209
 210        ofld_req1.rq_prod = 0x8000;
 211
 212        /* Initialize offload request 2 structure */
 213        memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
 214
 215        ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
 216        ofld_req2.hdr.flags =
 217                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 218
 219        ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
 220
 221        ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
 222        ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
 223
 224        ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
 225        ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
 226
 227        ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
 228        ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
 229
 230        /* Initialize offload request 3 structure */
 231        memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
 232
 233        ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
 234        ofld_req3.hdr.flags =
 235                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 236
 237        ofld_req3.vlan_tag = interface->vlan_id <<
 238                                FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
 239        ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
 240
 241        port_id = fc_host_port_id(lport->host);
 242        if (port_id == 0) {
 243                BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
 244                return -EINVAL;
 245        }
 246
 247        /*
 248         * Store s_id of the initiator for further reference. This will
 249         * be used during disable/destroy during linkdown processing as
 250         * when the lport is reset, the port_id also is reset to 0
 251         */
 252        tgt->sid = port_id;
 253        ofld_req3.s_id[0] = (port_id & 0x000000FF);
 254        ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
 255        ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
 256
 257        port_id = rport->port_id;
 258        ofld_req3.d_id[0] = (port_id & 0x000000FF);
 259        ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
 260        ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
 261
 262        ofld_req3.tx_total_conc_seqs = rdata->max_seq;
 263
 264        ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
 265        ofld_req3.rx_max_fc_pay_len  = lport->mfs;
 266
 267        ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
 268        ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
 269        ofld_req3.rx_open_seqs_exch_c3 = 1;
 270
 271        ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
 272        ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
 273
 274        /* set mul_n_port_ids supported flag to 0, until it is supported */
 275        ofld_req3.flags = 0;
 276        /*
 277        ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
 278                            FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
 279        */
 280        /* Info from PLOGI response */
 281        ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
 282                             FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
 283
 284        ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
 285                             FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
 286
 287        /*
 288         * Info from PRLI response, this info is used for sequence level error
 289         * recovery support
 290         */
 291        if (tgt->dev_type == TYPE_TAPE) {
 292                ofld_req3.flags |= 1 <<
 293                                    FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
 294                ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
 295                                    ? 1 : 0) <<
 296                                    FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
 297        }
 298
 299        /* vlan flag */
 300        ofld_req3.flags |= (interface->vlan_enabled <<
 301                            FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
 302
 303        /* C2_VALID and ACK flags are not set as they are not supported */
 304
 305
 306        /* Initialize offload request 4 structure */
 307        memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
 308        ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
 309        ofld_req4.hdr.flags =
 310                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 311
 312        ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
 313
 314
 315        ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
 316                                                        /* local mac */
 317        ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
 318        ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
 319        ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
 320        ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
 321        ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
 322        ofld_req4.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
 323                                                        /* fcf mac */
 324        ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
 325        ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
 326        ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
 327        ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
 328        ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 329
 330        ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
 331        ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
 332
 333        ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
 334        ofld_req4.confq_pbl_base_addr_hi =
 335                                        (u32)((u64) tgt->confq_pbl_dma >> 32);
 336
 337        kwqe_arr[0] = (struct kwqe *) &ofld_req1;
 338        kwqe_arr[1] = (struct kwqe *) &ofld_req2;
 339        kwqe_arr[2] = (struct kwqe *) &ofld_req3;
 340        kwqe_arr[3] = (struct kwqe *) &ofld_req4;
 341
 342        if (hba->cnic && hba->cnic->submit_kwqes)
 343                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 344
 345        return rc;
 346}
 347
 348/**
 349 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
 350 *
 351 * @port:               port structure pointer
 352 * @tgt:                bnx2fc_rport structure pointer
 353 */
 354int bnx2fc_send_session_enable_req(struct fcoe_port *port,
 355                                        struct bnx2fc_rport *tgt)
 356{
 357        struct kwqe *kwqe_arr[2];
 358        struct bnx2fc_interface *interface = port->priv;
 359        struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 360        struct bnx2fc_hba *hba = interface->hba;
 361        struct fcoe_kwqe_conn_enable_disable enbl_req;
 362        struct fc_lport *lport = port->lport;
 363        struct fc_rport *rport = tgt->rport;
 364        int num_kwqes = 1;
 365        int rc = 0;
 366        u32 port_id;
 367
 368        memset(&enbl_req, 0x00,
 369               sizeof(struct fcoe_kwqe_conn_enable_disable));
 370        enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
 371        enbl_req.hdr.flags =
 372                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 373
 374        enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
 375                                                        /* local mac */
 376        enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
 377        enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
 378        enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
 379        enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
 380        enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
 381        memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
 382
 383        enbl_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
 384        enbl_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
 385        enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
 386        enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
 387        enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
 388        enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 389
 390        port_id = fc_host_port_id(lport->host);
 391        if (port_id != tgt->sid) {
 392                printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
 393                                "sid = 0x%x\n", port_id, tgt->sid);
 394                port_id = tgt->sid;
 395        }
 396        enbl_req.s_id[0] = (port_id & 0x000000FF);
 397        enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
 398        enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
 399
 400        port_id = rport->port_id;
 401        enbl_req.d_id[0] = (port_id & 0x000000FF);
 402        enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
 403        enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
 404        enbl_req.vlan_tag = interface->vlan_id <<
 405                                FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
 406        enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
 407        enbl_req.vlan_flag = interface->vlan_enabled;
 408        enbl_req.context_id = tgt->context_id;
 409        enbl_req.conn_id = tgt->fcoe_conn_id;
 410
 411        kwqe_arr[0] = (struct kwqe *) &enbl_req;
 412
 413        if (hba->cnic && hba->cnic->submit_kwqes)
 414                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 415        return rc;
 416}
 417
 418/**
 419 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
 420 *
 421 * @port:               port structure pointer
 422 * @tgt:                bnx2fc_rport structure pointer
 423 */
 424int bnx2fc_send_session_disable_req(struct fcoe_port *port,
 425                                    struct bnx2fc_rport *tgt)
 426{
 427        struct bnx2fc_interface *interface = port->priv;
 428        struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
 429        struct bnx2fc_hba *hba = interface->hba;
 430        struct fcoe_kwqe_conn_enable_disable disable_req;
 431        struct kwqe *kwqe_arr[2];
 432        struct fc_rport *rport = tgt->rport;
 433        int num_kwqes = 1;
 434        int rc = 0;
 435        u32 port_id;
 436
 437        memset(&disable_req, 0x00,
 438               sizeof(struct fcoe_kwqe_conn_enable_disable));
 439        disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
 440        disable_req.hdr.flags =
 441                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 442
 443        disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
 444        disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
 445        disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
 446        disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
 447        disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
 448        disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
 449
 450        disable_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
 451        disable_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
 452        disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
 453        disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
 454        disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
 455        disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
 456
 457        port_id = tgt->sid;
 458        disable_req.s_id[0] = (port_id & 0x000000FF);
 459        disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
 460        disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
 461
 462
 463        port_id = rport->port_id;
 464        disable_req.d_id[0] = (port_id & 0x000000FF);
 465        disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
 466        disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
 467        disable_req.context_id = tgt->context_id;
 468        disable_req.conn_id = tgt->fcoe_conn_id;
 469        disable_req.vlan_tag = interface->vlan_id <<
 470                                FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
 471        disable_req.vlan_tag |=
 472                        3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
 473        disable_req.vlan_flag = interface->vlan_enabled;
 474
 475        kwqe_arr[0] = (struct kwqe *) &disable_req;
 476
 477        if (hba->cnic && hba->cnic->submit_kwqes)
 478                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 479
 480        return rc;
 481}
 482
 483/**
 484 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
 485 *
 486 * @port:               port structure pointer
 487 * @tgt:                bnx2fc_rport structure pointer
 488 */
 489int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
 490                                        struct bnx2fc_rport *tgt)
 491{
 492        struct fcoe_kwqe_conn_destroy destroy_req;
 493        struct kwqe *kwqe_arr[2];
 494        int num_kwqes = 1;
 495        int rc = 0;
 496
 497        memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
 498        destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
 499        destroy_req.hdr.flags =
 500                (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
 501
 502        destroy_req.context_id = tgt->context_id;
 503        destroy_req.conn_id = tgt->fcoe_conn_id;
 504
 505        kwqe_arr[0] = (struct kwqe *) &destroy_req;
 506
 507        if (hba->cnic && hba->cnic->submit_kwqes)
 508                rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
 509
 510        return rc;
 511}
 512
 513static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
 514{
 515        struct bnx2fc_lport *blport;
 516
 517        spin_lock_bh(&hba->hba_lock);
 518        list_for_each_entry(blport, &hba->vports, list) {
 519                if (blport->lport == lport) {
 520                        spin_unlock_bh(&hba->hba_lock);
 521                        return true;
 522                }
 523        }
 524        spin_unlock_bh(&hba->hba_lock);
 525        return false;
 526
 527}
 528
 529
 530static void bnx2fc_unsol_els_work(struct work_struct *work)
 531{
 532        struct bnx2fc_unsol_els *unsol_els;
 533        struct fc_lport *lport;
 534        struct bnx2fc_hba *hba;
 535        struct fc_frame *fp;
 536
 537        unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
 538        lport = unsol_els->lport;
 539        fp = unsol_els->fp;
 540        hba = unsol_els->hba;
 541        if (is_valid_lport(hba, lport))
 542                fc_exch_recv(lport, fp);
 543        kfree(unsol_els);
 544}
 545
 546void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
 547                                   unsigned char *buf,
 548                                   u32 frame_len, u16 l2_oxid)
 549{
 550        struct fcoe_port *port = tgt->port;
 551        struct fc_lport *lport = port->lport;
 552        struct bnx2fc_interface *interface = port->priv;
 553        struct bnx2fc_unsol_els *unsol_els;
 554        struct fc_frame_header *fh;
 555        struct fc_frame *fp;
 556        struct sk_buff *skb;
 557        u32 payload_len;
 558        u32 crc;
 559        u8 op;
 560
 561
 562        unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
 563        if (!unsol_els) {
 564                BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
 565                return;
 566        }
 567
 568        BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
 569                l2_oxid, frame_len);
 570
 571        payload_len = frame_len - sizeof(struct fc_frame_header);
 572
 573        fp = fc_frame_alloc(lport, payload_len);
 574        if (!fp) {
 575                printk(KERN_ERR PFX "fc_frame_alloc failure\n");
 576                kfree(unsol_els);
 577                return;
 578        }
 579
 580        fh = (struct fc_frame_header *) fc_frame_header_get(fp);
 581        /* Copy FC Frame header and payload into the frame */
 582        memcpy(fh, buf, frame_len);
 583
 584        if (l2_oxid != FC_XID_UNKNOWN)
 585                fh->fh_ox_id = htons(l2_oxid);
 586
 587        skb = fp_skb(fp);
 588
 589        if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
 590            (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
 591
 592                if (fh->fh_type == FC_TYPE_ELS) {
 593                        op = fc_frame_payload_op(fp);
 594                        if ((op == ELS_TEST) || (op == ELS_ESTC) ||
 595                            (op == ELS_FAN) || (op == ELS_CSU)) {
 596                                /*
 597                                 * No need to reply for these
 598                                 * ELS requests
 599                                 */
 600                                printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
 601                                kfree_skb(skb);
 602                                kfree(unsol_els);
 603                                return;
 604                        }
 605                }
 606                crc = fcoe_fc_crc(fp);
 607                fc_frame_init(fp);
 608                fr_dev(fp) = lport;
 609                fr_sof(fp) = FC_SOF_I3;
 610                fr_eof(fp) = FC_EOF_T;
 611                fr_crc(fp) = cpu_to_le32(~crc);
 612                unsol_els->lport = lport;
 613                unsol_els->hba = interface->hba;
 614                unsol_els->fp = fp;
 615                INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
 616                queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
 617        } else {
 618                BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
 619                kfree_skb(skb);
 620                kfree(unsol_els);
 621        }
 622}
 623
 624static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
 625{
 626        u8 num_rq;
 627        struct fcoe_err_report_entry *err_entry;
 628        unsigned char *rq_data;
 629        unsigned char *buf = NULL, *buf1;
 630        int i;
 631        u16 xid;
 632        u32 frame_len, len;
 633        struct bnx2fc_cmd *io_req = NULL;
 634        struct fcoe_task_ctx_entry *task, *task_page;
 635        struct bnx2fc_interface *interface = tgt->port->priv;
 636        struct bnx2fc_hba *hba = interface->hba;
 637        int task_idx, index;
 638        int rc = 0;
 639        u64 err_warn_bit_map;
 640        u8 err_warn = 0xff;
 641
 642
 643        BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
 644        switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
 645        case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
 646                frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
 647                             FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
 648
 649                num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
 650
 651                spin_lock_bh(&tgt->tgt_lock);
 652                rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
 653                spin_unlock_bh(&tgt->tgt_lock);
 654
 655                if (rq_data) {
 656                        buf = rq_data;
 657                } else {
 658                        buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
 659                                              GFP_ATOMIC);
 660
 661                        if (!buf1) {
 662                                BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
 663                                break;
 664                        }
 665
 666                        for (i = 0; i < num_rq; i++) {
 667                                spin_lock_bh(&tgt->tgt_lock);
 668                                rq_data = (unsigned char *)
 669                                           bnx2fc_get_next_rqe(tgt, 1);
 670                                spin_unlock_bh(&tgt->tgt_lock);
 671                                len = BNX2FC_RQ_BUF_SZ;
 672                                memcpy(buf1, rq_data, len);
 673                                buf1 += len;
 674                        }
 675                }
 676                bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
 677                                              FC_XID_UNKNOWN);
 678
 679                if (buf != rq_data)
 680                        kfree(buf);
 681                spin_lock_bh(&tgt->tgt_lock);
 682                bnx2fc_return_rqe(tgt, num_rq);
 683                spin_unlock_bh(&tgt->tgt_lock);
 684                break;
 685
 686        case FCOE_ERROR_DETECTION_CQE_TYPE:
 687                /*
 688                 * In case of error reporting CQE a single RQ entry
 689                 * is consumed.
 690                 */
 691                spin_lock_bh(&tgt->tgt_lock);
 692                num_rq = 1;
 693                err_entry = (struct fcoe_err_report_entry *)
 694                             bnx2fc_get_next_rqe(tgt, 1);
 695                xid = err_entry->fc_hdr.ox_id;
 696                BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
 697                BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
 698                        err_entry->data.err_warn_bitmap_hi,
 699                        err_entry->data.err_warn_bitmap_lo);
 700                BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
 701                        err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 702
 703
 704                if (xid > hba->max_xid) {
 705                        BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
 706                                   xid);
 707                        goto ret_err_rqe;
 708                }
 709
 710                task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 711                index = xid % BNX2FC_TASKS_PER_PAGE;
 712                task_page = (struct fcoe_task_ctx_entry *)
 713                                        hba->task_ctx[task_idx];
 714                task = &(task_page[index]);
 715
 716                io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 717                if (!io_req)
 718                        goto ret_err_rqe;
 719
 720                if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
 721                        printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
 722                        goto ret_err_rqe;
 723                }
 724
 725                if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
 726                                       &io_req->req_flags)) {
 727                        BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
 728                                            "progress.. ignore unsol err\n");
 729                        goto ret_err_rqe;
 730                }
 731
 732                err_warn_bit_map = (u64)
 733                        ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
 734                        (u64)err_entry->data.err_warn_bitmap_lo;
 735                for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
 736                        if (err_warn_bit_map & (u64)((u64)1 << i)) {
 737                                err_warn = i;
 738                                break;
 739                        }
 740                }
 741
 742                /*
 743                 * If ABTS is already in progress, and FW error is
 744                 * received after that, do not cancel the timeout_work
 745                 * and let the error recovery continue by explicitly
 746                 * logging out the target, when the ABTS eventually
 747                 * times out.
 748                 */
 749                if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
 750                        printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
 751                                            "in ABTS processing\n", xid);
 752                        goto ret_err_rqe;
 753                }
 754                BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
 755                if (tgt->dev_type != TYPE_TAPE)
 756                        goto skip_rec;
 757                switch (err_warn) {
 758                case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
 759                case FCOE_ERROR_CODE_DATA_OOO_RO:
 760                case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
 761                case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
 762                case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
 763                case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
 764                        BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
 765                                   xid);
 766                        memcpy(&io_req->err_entry, err_entry,
 767                               sizeof(struct fcoe_err_report_entry));
 768                        if (!test_bit(BNX2FC_FLAG_SRR_SENT,
 769                                      &io_req->req_flags)) {
 770                                spin_unlock_bh(&tgt->tgt_lock);
 771                                rc = bnx2fc_send_rec(io_req);
 772                                spin_lock_bh(&tgt->tgt_lock);
 773
 774                                if (rc)
 775                                        goto skip_rec;
 776                        } else
 777                                printk(KERN_ERR PFX "SRR in progress\n");
 778                        goto ret_err_rqe;
 779                        break;
 780                default:
 781                        break;
 782                }
 783
 784skip_rec:
 785                set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
 786                /*
 787                 * Cancel the timeout_work, as we received IO
 788                 * completion with FW error.
 789                 */
 790                if (cancel_delayed_work(&io_req->timeout_work))
 791                        kref_put(&io_req->refcount, bnx2fc_cmd_release);
 792
 793                rc = bnx2fc_initiate_abts(io_req);
 794                if (rc != SUCCESS) {
 795                        printk(KERN_ERR PFX "err_warn: initiate_abts "
 796                                "failed xid = 0x%x. issue cleanup\n",
 797                                io_req->xid);
 798                        bnx2fc_initiate_cleanup(io_req);
 799                }
 800ret_err_rqe:
 801                bnx2fc_return_rqe(tgt, 1);
 802                spin_unlock_bh(&tgt->tgt_lock);
 803                break;
 804
 805        case FCOE_WARNING_DETECTION_CQE_TYPE:
 806                /*
 807                 *In case of warning reporting CQE a single RQ entry
 808                 * is consumes.
 809                 */
 810                spin_lock_bh(&tgt->tgt_lock);
 811                num_rq = 1;
 812                err_entry = (struct fcoe_err_report_entry *)
 813                             bnx2fc_get_next_rqe(tgt, 1);
 814                xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
 815                BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
 816                BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
 817                        err_entry->data.err_warn_bitmap_hi,
 818                        err_entry->data.err_warn_bitmap_lo);
 819                BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
 820                        err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
 821
 822                if (xid > hba->max_xid) {
 823                        BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
 824                        goto ret_warn_rqe;
 825                }
 826
 827                err_warn_bit_map = (u64)
 828                        ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
 829                        (u64)err_entry->data.err_warn_bitmap_lo;
 830                for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
 831                        if (err_warn_bit_map & (u64) (1 << i)) {
 832                                err_warn = i;
 833                                break;
 834                        }
 835                }
 836                BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
 837
 838                task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 839                index = xid % BNX2FC_TASKS_PER_PAGE;
 840                task_page = (struct fcoe_task_ctx_entry *)
 841                             interface->hba->task_ctx[task_idx];
 842                task = &(task_page[index]);
 843                io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 844                if (!io_req)
 845                        goto ret_warn_rqe;
 846
 847                if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
 848                        printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
 849                        goto ret_warn_rqe;
 850                }
 851
 852                memcpy(&io_req->err_entry, err_entry,
 853                       sizeof(struct fcoe_err_report_entry));
 854
 855                if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
 856                        /* REC_TOV is not a warning code */
 857                        BUG_ON(1);
 858                else
 859                        BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
 860ret_warn_rqe:
 861                bnx2fc_return_rqe(tgt, 1);
 862                spin_unlock_bh(&tgt->tgt_lock);
 863                break;
 864
 865        default:
 866                printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
 867                break;
 868        }
 869}
 870
 871void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
 872{
 873        struct fcoe_task_ctx_entry *task;
 874        struct fcoe_task_ctx_entry *task_page;
 875        struct fcoe_port *port = tgt->port;
 876        struct bnx2fc_interface *interface = port->priv;
 877        struct bnx2fc_hba *hba = interface->hba;
 878        struct bnx2fc_cmd *io_req;
 879        int task_idx, index;
 880        u16 xid;
 881        u8  cmd_type;
 882        u8 rx_state = 0;
 883        u8 num_rq;
 884
 885        spin_lock_bh(&tgt->tgt_lock);
 886        xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
 887        if (xid >= hba->max_tasks) {
 888                printk(KERN_ERR PFX "ERROR:xid out of range\n");
 889                spin_unlock_bh(&tgt->tgt_lock);
 890                return;
 891        }
 892        task_idx = xid / BNX2FC_TASKS_PER_PAGE;
 893        index = xid % BNX2FC_TASKS_PER_PAGE;
 894        task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
 895        task = &(task_page[index]);
 896
 897        num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
 898                   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
 899                   FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
 900
 901        io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
 902
 903        if (io_req == NULL) {
 904                printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
 905                spin_unlock_bh(&tgt->tgt_lock);
 906                return;
 907        }
 908
 909        /* Timestamp IO completion time */
 910        cmd_type = io_req->cmd_type;
 911
 912        rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
 913                    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
 914                    FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
 915
 916        /* Process other IO completion types */
 917        switch (cmd_type) {
 918        case BNX2FC_SCSI_CMD:
 919                if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
 920                        bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
 921                        spin_unlock_bh(&tgt->tgt_lock);
 922                        return;
 923                }
 924
 925                if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
 926                        bnx2fc_process_abts_compl(io_req, task, num_rq);
 927                else if (rx_state ==
 928                         FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
 929                        bnx2fc_process_cleanup_compl(io_req, task, num_rq);
 930                else
 931                        printk(KERN_ERR PFX "Invalid rx state - %d\n",
 932                                rx_state);
 933                break;
 934
 935        case BNX2FC_TASK_MGMT_CMD:
 936                BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
 937                bnx2fc_process_tm_compl(io_req, task, num_rq);
 938                break;
 939
 940        case BNX2FC_ABTS:
 941                /*
 942                 * ABTS request received by firmware. ABTS response
 943                 * will be delivered to the task belonging to the IO
 944                 * that was aborted
 945                 */
 946                BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
 947                kref_put(&io_req->refcount, bnx2fc_cmd_release);
 948                break;
 949
 950        case BNX2FC_ELS:
 951                if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
 952                        bnx2fc_process_els_compl(io_req, task, num_rq);
 953                else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
 954                        bnx2fc_process_abts_compl(io_req, task, num_rq);
 955                else if (rx_state ==
 956                         FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
 957                        bnx2fc_process_cleanup_compl(io_req, task, num_rq);
 958                else
 959                        printk(KERN_ERR PFX "Invalid rx state =  %d\n",
 960                                rx_state);
 961                break;
 962
 963        case BNX2FC_CLEANUP:
 964                BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
 965                kref_put(&io_req->refcount, bnx2fc_cmd_release);
 966                break;
 967
 968        case BNX2FC_SEQ_CLEANUP:
 969                BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
 970                              io_req->xid);
 971                bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
 972                kref_put(&io_req->refcount, bnx2fc_cmd_release);
 973                break;
 974
 975        default:
 976                printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
 977                break;
 978        }
 979        spin_unlock_bh(&tgt->tgt_lock);
 980}
 981
 982void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
 983{
 984        struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
 985        u32 msg;
 986
 987        wmb();
 988        rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
 989                        FCOE_CQE_TOGGLE_BIT_SHIFT);
 990        msg = *((u32 *)rx_db);
 991        writel(cpu_to_le32(msg), tgt->ctx_base);
 992        mmiowb();
 993
 994}
 995
 996struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
 997{
 998        struct bnx2fc_work *work;
 999        work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
1000        if (!work)
1001                return NULL;
1002
1003        INIT_LIST_HEAD(&work->list);
1004        work->tgt = tgt;
1005        work->wqe = wqe;
1006        return work;
1007}
1008
1009int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1010{
1011        struct fcoe_cqe *cq;
1012        u32 cq_cons;
1013        struct fcoe_cqe *cqe;
1014        u32 num_free_sqes = 0;
1015        u32 num_cqes = 0;
1016        u16 wqe;
1017
1018        /*
1019         * cq_lock is a low contention lock used to protect
1020         * the CQ data structure from being freed up during
1021         * the upload operation
1022         */
1023        spin_lock_bh(&tgt->cq_lock);
1024
1025        if (!tgt->cq) {
1026                printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
1027                spin_unlock_bh(&tgt->cq_lock);
1028                return 0;
1029        }
1030        cq = tgt->cq;
1031        cq_cons = tgt->cq_cons_idx;
1032        cqe = &cq[cq_cons];
1033
1034        while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
1035               (tgt->cq_curr_toggle_bit <<
1036               FCOE_CQE_TOGGLE_BIT_SHIFT)) {
1037
1038                /* new entry on the cq */
1039                if (wqe & FCOE_CQE_CQE_TYPE) {
1040                        /* Unsolicited event notification */
1041                        bnx2fc_process_unsol_compl(tgt, wqe);
1042                } else {
1043                        /* Pending work request completion */
1044                        struct bnx2fc_work *work = NULL;
1045                        struct bnx2fc_percpu_s *fps = NULL;
1046                        unsigned int cpu = wqe % num_possible_cpus();
1047
1048                        fps = &per_cpu(bnx2fc_percpu, cpu);
1049                        spin_lock_bh(&fps->fp_work_lock);
1050                        if (unlikely(!fps->iothread))
1051                                goto unlock;
1052
1053                        work = bnx2fc_alloc_work(tgt, wqe);
1054                        if (work)
1055                                list_add_tail(&work->list,
1056                                              &fps->work_list);
1057unlock:
1058                        spin_unlock_bh(&fps->fp_work_lock);
1059
1060                        /* Pending work request completion */
1061                        if (fps->iothread && work)
1062                                wake_up_process(fps->iothread);
1063                        else
1064                                bnx2fc_process_cq_compl(tgt, wqe);
1065                        num_free_sqes++;
1066                }
1067                cqe++;
1068                tgt->cq_cons_idx++;
1069                num_cqes++;
1070
1071                if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
1072                        tgt->cq_cons_idx = 0;
1073                        cqe = cq;
1074                        tgt->cq_curr_toggle_bit =
1075                                1 - tgt->cq_curr_toggle_bit;
1076                }
1077        }
1078        if (num_cqes) {
1079                /* Arm CQ only if doorbell is mapped */
1080                if (tgt->ctx_base)
1081                        bnx2fc_arm_cq(tgt);
1082                atomic_add(num_free_sqes, &tgt->free_sqes);
1083        }
1084        spin_unlock_bh(&tgt->cq_lock);
1085        return 0;
1086}
1087
1088/**
1089 * bnx2fc_fastpath_notification - process global event queue (KCQ)
1090 *
1091 * @hba:                adapter structure pointer
1092 * @new_cqe_kcqe:       pointer to newly DMA'd KCQ entry
1093 *
1094 * Fast path event notification handler
1095 */
1096static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
1097                                        struct fcoe_kcqe *new_cqe_kcqe)
1098{
1099        u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
1100        struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
1101
1102        if (!tgt) {
1103                printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
1104                return;
1105        }
1106
1107        bnx2fc_process_new_cqes(tgt);
1108}
1109
1110/**
1111 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
1112 *
1113 * @hba:        adapter structure pointer
1114 * @ofld_kcqe:  connection offload kcqe pointer
1115 *
1116 * handle session offload completion, enable the session if offload is
1117 * successful.
1118 */
1119static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1120                                        struct fcoe_kcqe *ofld_kcqe)
1121{
1122        struct bnx2fc_rport             *tgt;
1123        struct fcoe_port                *port;
1124        struct bnx2fc_interface         *interface;
1125        u32                             conn_id;
1126        u32                             context_id;
1127
1128        conn_id = ofld_kcqe->fcoe_conn_id;
1129        context_id = ofld_kcqe->fcoe_conn_context_id;
1130        tgt = hba->tgt_ofld_list[conn_id];
1131        if (!tgt) {
1132                printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
1133                return;
1134        }
1135        BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1136                ofld_kcqe->fcoe_conn_context_id);
1137        port = tgt->port;
1138        interface = tgt->port->priv;
1139        if (hba != interface->hba) {
1140                printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1141                goto ofld_cmpl_err;
1142        }
1143        /*
1144         * cnic has allocated a context_id for this session; use this
1145         * while enabling the session.
1146         */
1147        tgt->context_id = context_id;
1148        if (ofld_kcqe->completion_status) {
1149                if (ofld_kcqe->completion_status ==
1150                                FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1151                        printk(KERN_ERR PFX "unable to allocate FCoE context "
1152                                "resources\n");
1153                        set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1154                }
1155        } else {
1156                /* FW offload request successfully completed */
1157                set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1158        }
1159ofld_cmpl_err:
1160        set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1161        wake_up_interruptible(&tgt->ofld_wait);
1162}
1163
1164/**
1165 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1166 *
1167 * @hba:        adapter structure pointer
1168 * @ofld_kcqe:  connection offload kcqe pointer
1169 *
1170 * handle session enable completion, mark the rport as ready
1171 */
1172
1173static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1174                                                struct fcoe_kcqe *ofld_kcqe)
1175{
1176        struct bnx2fc_rport             *tgt;
1177        struct bnx2fc_interface         *interface;
1178        u32                             conn_id;
1179        u32                             context_id;
1180
1181        context_id = ofld_kcqe->fcoe_conn_context_id;
1182        conn_id = ofld_kcqe->fcoe_conn_id;
1183        tgt = hba->tgt_ofld_list[conn_id];
1184        if (!tgt) {
1185                printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1186                return;
1187        }
1188
1189        BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1190                ofld_kcqe->fcoe_conn_context_id);
1191
1192        /*
1193         * context_id should be the same for this target during offload
1194         * and enable
1195         */
1196        if (tgt->context_id != context_id) {
1197                printk(KERN_ERR PFX "context id mis-match\n");
1198                return;
1199        }
1200        interface = tgt->port->priv;
1201        if (hba != interface->hba) {
1202                printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1203                goto enbl_cmpl_err;
1204        }
1205        if (!ofld_kcqe->completion_status)
1206                /* enable successful - rport ready for issuing IOs */
1207                set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1208
1209enbl_cmpl_err:
1210        set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1211        wake_up_interruptible(&tgt->ofld_wait);
1212}
1213
1214static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1215                                        struct fcoe_kcqe *disable_kcqe)
1216{
1217
1218        struct bnx2fc_rport             *tgt;
1219        u32                             conn_id;
1220
1221        conn_id = disable_kcqe->fcoe_conn_id;
1222        tgt = hba->tgt_ofld_list[conn_id];
1223        if (!tgt) {
1224                printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1225                return;
1226        }
1227
1228        BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1229
1230        if (disable_kcqe->completion_status) {
1231                printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1232                        disable_kcqe->completion_status);
1233                set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
1234                set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1235                wake_up_interruptible(&tgt->upld_wait);
1236        } else {
1237                /* disable successful */
1238                BNX2FC_TGT_DBG(tgt, "disable successful\n");
1239                clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1240                clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1241                set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1242                set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1243                wake_up_interruptible(&tgt->upld_wait);
1244        }
1245}
1246
1247static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1248                                        struct fcoe_kcqe *destroy_kcqe)
1249{
1250        struct bnx2fc_rport             *tgt;
1251        u32                             conn_id;
1252
1253        conn_id = destroy_kcqe->fcoe_conn_id;
1254        tgt = hba->tgt_ofld_list[conn_id];
1255        if (!tgt) {
1256                printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1257                return;
1258        }
1259
1260        BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1261
1262        if (destroy_kcqe->completion_status) {
1263                printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1264                        destroy_kcqe->completion_status);
1265                return;
1266        } else {
1267                /* destroy successful */
1268                BNX2FC_TGT_DBG(tgt, "upload successful\n");
1269                clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1270                set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1271                set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1272                wake_up_interruptible(&tgt->upld_wait);
1273        }
1274}
1275
1276static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1277{
1278        switch (err_code) {
1279        case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1280                printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1281                break;
1282
1283        case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1284                printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1285                break;
1286
1287        case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1288                printk(KERN_ERR PFX "init_failure due to NIC error\n");
1289                break;
1290        case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1291                printk(KERN_ERR PFX "init failure due to compl status err\n");
1292                break;
1293        case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1294                printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1295                break;
1296        default:
1297                printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1298        }
1299}
1300
1301/**
1302 * bnx2fc_indicae_kcqe - process KCQE
1303 *
1304 * @hba:        adapter structure pointer
1305 * @kcqe:       kcqe pointer
1306 * @num_cqe:    Number of completion queue elements
1307 *
1308 * Generic KCQ event handler
1309 */
1310void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1311                                        u32 num_cqe)
1312{
1313        struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1314        int i = 0;
1315        struct fcoe_kcqe *kcqe = NULL;
1316
1317        while (i < num_cqe) {
1318                kcqe = (struct fcoe_kcqe *) kcq[i++];
1319
1320                switch (kcqe->op_code) {
1321                case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1322                        bnx2fc_fastpath_notification(hba, kcqe);
1323                        break;
1324
1325                case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1326                        bnx2fc_process_ofld_cmpl(hba, kcqe);
1327                        break;
1328
1329                case FCOE_KCQE_OPCODE_ENABLE_CONN:
1330                        bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1331                        break;
1332
1333                case FCOE_KCQE_OPCODE_INIT_FUNC:
1334                        if (kcqe->completion_status !=
1335                                        FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1336                                bnx2fc_init_failure(hba,
1337                                                kcqe->completion_status);
1338                        } else {
1339                                set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1340                                bnx2fc_get_link_state(hba);
1341                                printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1342                                        (u8)hba->pcidev->bus->number);
1343                        }
1344                        break;
1345
1346                case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1347                        if (kcqe->completion_status !=
1348                                        FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1349
1350                                printk(KERN_ERR PFX "DESTROY failed\n");
1351                        } else {
1352                                printk(KERN_ERR PFX "DESTROY success\n");
1353                        }
1354                        set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1355                        wake_up_interruptible(&hba->destroy_wait);
1356                        break;
1357
1358                case FCOE_KCQE_OPCODE_DISABLE_CONN:
1359                        bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1360                        break;
1361
1362                case FCOE_KCQE_OPCODE_DESTROY_CONN:
1363                        bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1364                        break;
1365
1366                case FCOE_KCQE_OPCODE_STAT_FUNC:
1367                        if (kcqe->completion_status !=
1368                            FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1369                                printk(KERN_ERR PFX "STAT failed\n");
1370                        complete(&hba->stat_req_done);
1371                        break;
1372
1373                case FCOE_KCQE_OPCODE_FCOE_ERROR:
1374                        /* fall thru */
1375                default:
1376                        printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1377                                                                kcqe->op_code);
1378                }
1379        }
1380}
1381
1382void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1383{
1384        struct fcoe_sqe *sqe;
1385
1386        sqe = &tgt->sq[tgt->sq_prod_idx];
1387
1388        /* Fill SQ WQE */
1389        sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1390        sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1391
1392        /* Advance SQ Prod Idx */
1393        if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1394                tgt->sq_prod_idx = 0;
1395                tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1396        }
1397}
1398
1399void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1400{
1401        struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1402        u32 msg;
1403
1404        wmb();
1405        sq_db->prod = tgt->sq_prod_idx |
1406                                (tgt->sq_curr_toggle_bit << 15);
1407        msg = *((u32 *)sq_db);
1408        writel(cpu_to_le32(msg), tgt->ctx_base);
1409        mmiowb();
1410
1411}
1412
1413int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1414{
1415        u32 context_id = tgt->context_id;
1416        struct fcoe_port *port = tgt->port;
1417        u32 reg_off;
1418        resource_size_t reg_base;
1419        struct bnx2fc_interface *interface = port->priv;
1420        struct bnx2fc_hba *hba = interface->hba;
1421
1422        reg_base = pci_resource_start(hba->pcidev,
1423                                        BNX2X_DOORBELL_PCI_BAR);
1424        reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1425                        (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1426        tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1427        if (!tgt->ctx_base)
1428                return -ENOMEM;
1429        return 0;
1430}
1431
1432char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1433{
1434        char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1435
1436        if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1437                return NULL;
1438
1439        tgt->rq_cons_idx += num_items;
1440
1441        if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1442                tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1443
1444        return buf;
1445}
1446
1447void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1448{
1449        /* return the rq buffer */
1450        u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1451        if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1452                /* Wrap around RQ */
1453                next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1454        }
1455        tgt->rq_prod_idx = next_prod_idx;
1456        tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1457}
1458
1459void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1460                                  struct fcoe_task_ctx_entry *task,
1461                                  struct bnx2fc_cmd *orig_io_req,
1462                                  u32 offset)
1463{
1464        struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1465        struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1466        struct bnx2fc_interface *interface = tgt->port->priv;
1467        struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1468        struct fcoe_task_ctx_entry *orig_task;
1469        struct fcoe_task_ctx_entry *task_page;
1470        struct fcoe_ext_mul_sges_ctx *sgl;
1471        u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1472        u8 orig_task_type;
1473        u16 orig_xid = orig_io_req->xid;
1474        u32 context_id = tgt->context_id;
1475        u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1476        u32 orig_offset = offset;
1477        int bd_count;
1478        int orig_task_idx, index;
1479        int i;
1480
1481        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1482
1483        if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1484                orig_task_type = FCOE_TASK_TYPE_WRITE;
1485        else
1486                orig_task_type = FCOE_TASK_TYPE_READ;
1487
1488        /* Tx flags */
1489        task->txwr_rxrd.const_ctx.tx_flags =
1490                                FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1491                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1492        /* init flags */
1493        task->txwr_rxrd.const_ctx.init_flags = task_type <<
1494                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1495        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1496                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1497        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1498                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1499        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1500                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1501
1502        task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1503
1504        task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1505        task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1506
1507        bd_count = orig_io_req->bd_tbl->bd_valid;
1508
1509        /* obtain the appropriate bd entry from relative offset */
1510        for (i = 0; i < bd_count; i++) {
1511                if (offset < bd[i].buf_len)
1512                        break;
1513                offset -= bd[i].buf_len;
1514        }
1515        phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1516
1517        if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1518                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1519                                (u32)phys_addr;
1520                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1521                                (u32)((u64)phys_addr >> 32);
1522                task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1523                                bd_count;
1524                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1525                                offset; /* adjusted offset */
1526                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1527        } else {
1528                orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1529                index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1530
1531                task_page = (struct fcoe_task_ctx_entry *)
1532                             interface->hba->task_ctx[orig_task_idx];
1533                orig_task = &(task_page[index]);
1534
1535                /* Multiple SGEs were used for this IO */
1536                sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1537                sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1538                sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1539                sgl->mul_sgl.sgl_size = bd_count;
1540                sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1541                sgl->mul_sgl.cur_sge_idx = i;
1542
1543                memset(&task->rxwr_only.rx_seq_ctx, 0,
1544                       sizeof(struct fcoe_rx_seq_ctx));
1545                task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1546                task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1547        }
1548}
1549void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1550                              struct fcoe_task_ctx_entry *task,
1551                              u16 orig_xid)
1552{
1553        u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1554        struct bnx2fc_rport *tgt = io_req->tgt;
1555        u32 context_id = tgt->context_id;
1556
1557        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1558
1559        /* Tx Write Rx Read */
1560        /* init flags */
1561        task->txwr_rxrd.const_ctx.init_flags = task_type <<
1562                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1563        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1564                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1565        if (tgt->dev_type == TYPE_TAPE)
1566                task->txwr_rxrd.const_ctx.init_flags |=
1567                                FCOE_TASK_DEV_TYPE_TAPE <<
1568                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1569        else
1570                task->txwr_rxrd.const_ctx.init_flags |=
1571                                FCOE_TASK_DEV_TYPE_DISK <<
1572                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1573        task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1574
1575        /* Tx flags */
1576        task->txwr_rxrd.const_ctx.tx_flags =
1577                                FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1578                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1579
1580        /* Rx Read Tx Write */
1581        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1582                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1583        task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1584                                FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1585}
1586
1587void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1588                                struct fcoe_task_ctx_entry *task)
1589{
1590        struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1591        struct bnx2fc_rport *tgt = io_req->tgt;
1592        struct fc_frame_header *fc_hdr;
1593        struct fcoe_ext_mul_sges_ctx *sgl;
1594        u8 task_type = 0;
1595        u64 *hdr;
1596        u64 temp_hdr[3];
1597        u32 context_id;
1598
1599
1600        /* Obtain task_type */
1601        if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1602            (io_req->cmd_type == BNX2FC_ELS)) {
1603                task_type = FCOE_TASK_TYPE_MIDPATH;
1604        } else if (io_req->cmd_type == BNX2FC_ABTS) {
1605                task_type = FCOE_TASK_TYPE_ABTS;
1606        }
1607
1608        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1609
1610        /* Setup the task from io_req for easy reference */
1611        io_req->task = task;
1612
1613        BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1614                io_req->cmd_type, task_type);
1615
1616        /* Tx only */
1617        if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1618            (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1619                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1620                                (u32)mp_req->mp_req_bd_dma;
1621                task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1622                                (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1623                task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1624        }
1625
1626        /* Tx Write Rx Read */
1627        /* init flags */
1628        task->txwr_rxrd.const_ctx.init_flags = task_type <<
1629                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1630        if (tgt->dev_type == TYPE_TAPE)
1631                task->txwr_rxrd.const_ctx.init_flags |=
1632                                FCOE_TASK_DEV_TYPE_TAPE <<
1633                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1634        else
1635                task->txwr_rxrd.const_ctx.init_flags |=
1636                                FCOE_TASK_DEV_TYPE_DISK <<
1637                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1638        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1639                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1640
1641        /* tx flags */
1642        task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1643                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1644
1645        /* Rx Write Tx Read */
1646        task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1647
1648        /* rx flags */
1649        task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1650                                FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1651
1652        context_id = tgt->context_id;
1653        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1654                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1655
1656        fc_hdr = &(mp_req->req_fc_hdr);
1657        if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1658                fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1659                fc_hdr->fh_rx_id = htons(0xffff);
1660                task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1661        } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1662                fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1663        }
1664
1665        /* Fill FC Header into middle path buffer */
1666        hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1667        memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1668        hdr[0] = cpu_to_be64(temp_hdr[0]);
1669        hdr[1] = cpu_to_be64(temp_hdr[1]);
1670        hdr[2] = cpu_to_be64(temp_hdr[2]);
1671
1672        /* Rx Only */
1673        if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1674                sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1675
1676                sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1677                sgl->mul_sgl.cur_sge_addr.hi =
1678                                (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1679                sgl->mul_sgl.sgl_size = 1;
1680        }
1681}
1682
1683void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1684                             struct fcoe_task_ctx_entry *task)
1685{
1686        u8 task_type;
1687        struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1688        struct io_bdt *bd_tbl = io_req->bd_tbl;
1689        struct bnx2fc_rport *tgt = io_req->tgt;
1690        struct fcoe_cached_sge_ctx *cached_sge;
1691        struct fcoe_ext_mul_sges_ctx *sgl;
1692        int dev_type = tgt->dev_type;
1693        u64 *fcp_cmnd;
1694        u64 tmp_fcp_cmnd[4];
1695        u32 context_id;
1696        int cnt, i;
1697        int bd_count;
1698
1699        memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1700
1701        /* Setup the task from io_req for easy reference */
1702        io_req->task = task;
1703
1704        if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1705                task_type = FCOE_TASK_TYPE_WRITE;
1706        else
1707                task_type = FCOE_TASK_TYPE_READ;
1708
1709        /* Tx only */
1710        bd_count = bd_tbl->bd_valid;
1711        cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1712        if (task_type == FCOE_TASK_TYPE_WRITE) {
1713                if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1714                        struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1715
1716                        task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1717                        cached_sge->cur_buf_addr.lo =
1718                                        fcoe_bd_tbl->buf_addr_lo;
1719                        task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1720                        cached_sge->cur_buf_addr.hi =
1721                                        fcoe_bd_tbl->buf_addr_hi;
1722                        task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1723                        cached_sge->cur_buf_rem =
1724                                        fcoe_bd_tbl->buf_len;
1725
1726                        task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1727                                FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1728                } else {
1729                        task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1730                                        (u32)bd_tbl->bd_tbl_dma;
1731                        task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1732                                        (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1733                        task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1734                                        bd_tbl->bd_valid;
1735                }
1736        }
1737
1738        /*Tx Write Rx Read */
1739        /* Init state to NORMAL */
1740        task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1741                                FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1742        if (dev_type == TYPE_TAPE) {
1743                task->txwr_rxrd.const_ctx.init_flags |=
1744                                FCOE_TASK_DEV_TYPE_TAPE <<
1745                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1746                io_req->rec_retry = 0;
1747                io_req->rec_retry = 0;
1748        } else
1749                task->txwr_rxrd.const_ctx.init_flags |=
1750                                FCOE_TASK_DEV_TYPE_DISK <<
1751                                FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1752        task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1753                                FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1754        /* tx flags */
1755        task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1756                                FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1757
1758        /* Set initial seq counter */
1759        task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1760
1761        /* Fill FCP_CMND IU */
1762        fcp_cmnd = (u64 *)
1763                    task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1764        bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1765
1766        /* swap fcp_cmnd */
1767        cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1768
1769        for (i = 0; i < cnt; i++) {
1770                *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1771                fcp_cmnd++;
1772        }
1773
1774        /* Rx Write Tx Read */
1775        task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1776
1777        context_id = tgt->context_id;
1778        task->rxwr_txrd.const_ctx.init_flags = context_id <<
1779                                FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1780
1781        /* rx flags */
1782        /* Set state to "waiting for the first packet" */
1783        task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1784                                FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1785
1786        task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1787
1788        /* Rx Only */
1789        if (task_type != FCOE_TASK_TYPE_READ)
1790                return;
1791
1792        sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1793        bd_count = bd_tbl->bd_valid;
1794
1795        if (dev_type == TYPE_DISK) {
1796                if (bd_count == 1) {
1797
1798                        struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1799
1800                        cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1801                        cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1802                        cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1803                        task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1804                                FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1805                } else if (bd_count == 2) {
1806                        struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1807
1808                        cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1809                        cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1810                        cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1811
1812                        fcoe_bd_tbl++;
1813                        cached_sge->second_buf_addr.lo =
1814                                                 fcoe_bd_tbl->buf_addr_lo;
1815                        cached_sge->second_buf_addr.hi =
1816                                                fcoe_bd_tbl->buf_addr_hi;
1817                        cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1818                        task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1819                                FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1820                } else {
1821
1822                        sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1823                        sgl->mul_sgl.cur_sge_addr.hi =
1824                                        (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1825                        sgl->mul_sgl.sgl_size = bd_count;
1826                }
1827        } else {
1828                sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1829                sgl->mul_sgl.cur_sge_addr.hi =
1830                                (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1831                sgl->mul_sgl.sgl_size = bd_count;
1832        }
1833}
1834
1835/**
1836 * bnx2fc_setup_task_ctx - allocate and map task context
1837 *
1838 * @hba:        pointer to adapter structure
1839 *
1840 * allocate memory for task context, and associated BD table to be used
1841 * by firmware
1842 *
1843 */
1844int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1845{
1846        int rc = 0;
1847        struct regpair *task_ctx_bdt;
1848        dma_addr_t addr;
1849        int task_ctx_arr_sz;
1850        int i;
1851
1852        /*
1853         * Allocate task context bd table. A page size of bd table
1854         * can map 256 buffers. Each buffer contains 32 task context
1855         * entries. Hence the limit with one page is 8192 task context
1856         * entries.
1857         */
1858        hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1859                                                  PAGE_SIZE,
1860                                                  &hba->task_ctx_bd_dma,
1861                                                  GFP_KERNEL);
1862        if (!hba->task_ctx_bd_tbl) {
1863                printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1864                rc = -1;
1865                goto out;
1866        }
1867        memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1868
1869        /*
1870         * Allocate task_ctx which is an array of pointers pointing to
1871         * a page containing 32 task contexts
1872         */
1873        task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1874        hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
1875                                 GFP_KERNEL);
1876        if (!hba->task_ctx) {
1877                printk(KERN_ERR PFX "unable to allocate task context array\n");
1878                rc = -1;
1879                goto out1;
1880        }
1881
1882        /*
1883         * Allocate task_ctx_dma which is an array of dma addresses
1884         */
1885        hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
1886                                        sizeof(dma_addr_t)), GFP_KERNEL);
1887        if (!hba->task_ctx_dma) {
1888                printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1889                rc = -1;
1890                goto out2;
1891        }
1892
1893        task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1894        for (i = 0; i < task_ctx_arr_sz; i++) {
1895
1896                hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1897                                                      PAGE_SIZE,
1898                                                      &hba->task_ctx_dma[i],
1899                                                      GFP_KERNEL);
1900                if (!hba->task_ctx[i]) {
1901                        printk(KERN_ERR PFX "unable to alloc task context\n");
1902                        rc = -1;
1903                        goto out3;
1904                }
1905                memset(hba->task_ctx[i], 0, PAGE_SIZE);
1906                addr = (u64)hba->task_ctx_dma[i];
1907                task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1908                task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1909                task_ctx_bdt++;
1910        }
1911        return 0;
1912
1913out3:
1914        for (i = 0; i < task_ctx_arr_sz; i++) {
1915                if (hba->task_ctx[i]) {
1916
1917                        dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1918                                hba->task_ctx[i], hba->task_ctx_dma[i]);
1919                        hba->task_ctx[i] = NULL;
1920                }
1921        }
1922
1923        kfree(hba->task_ctx_dma);
1924        hba->task_ctx_dma = NULL;
1925out2:
1926        kfree(hba->task_ctx);
1927        hba->task_ctx = NULL;
1928out1:
1929        dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1930                        hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1931        hba->task_ctx_bd_tbl = NULL;
1932out:
1933        return rc;
1934}
1935
1936void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1937{
1938        int task_ctx_arr_sz;
1939        int i;
1940
1941        if (hba->task_ctx_bd_tbl) {
1942                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1943                                    hba->task_ctx_bd_tbl,
1944                                    hba->task_ctx_bd_dma);
1945                hba->task_ctx_bd_tbl = NULL;
1946        }
1947
1948        task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1949        if (hba->task_ctx) {
1950                for (i = 0; i < task_ctx_arr_sz; i++) {
1951                        if (hba->task_ctx[i]) {
1952                                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1953                                                    hba->task_ctx[i],
1954                                                    hba->task_ctx_dma[i]);
1955                                hba->task_ctx[i] = NULL;
1956                        }
1957                }
1958                kfree(hba->task_ctx);
1959                hba->task_ctx = NULL;
1960        }
1961
1962        kfree(hba->task_ctx_dma);
1963        hba->task_ctx_dma = NULL;
1964}
1965
1966static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1967{
1968        int i;
1969        int segment_count;
1970        int hash_table_size;
1971        u32 *pbl;
1972
1973        segment_count = hba->hash_tbl_segment_count;
1974        hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1975                sizeof(struct fcoe_hash_table_entry);
1976
1977        pbl = hba->hash_tbl_pbl;
1978        for (i = 0; i < segment_count; ++i) {
1979                dma_addr_t dma_address;
1980
1981                dma_address = le32_to_cpu(*pbl);
1982                ++pbl;
1983                dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1984                ++pbl;
1985                dma_free_coherent(&hba->pcidev->dev,
1986                                  BNX2FC_HASH_TBL_CHUNK_SIZE,
1987                                  hba->hash_tbl_segments[i],
1988                                  dma_address);
1989
1990        }
1991
1992        if (hba->hash_tbl_pbl) {
1993                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1994                                    hba->hash_tbl_pbl,
1995                                    hba->hash_tbl_pbl_dma);
1996                hba->hash_tbl_pbl = NULL;
1997        }
1998}
1999
2000static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2001{
2002        int i;
2003        int hash_table_size;
2004        int segment_count;
2005        int segment_array_size;
2006        int dma_segment_array_size;
2007        dma_addr_t *dma_segment_array;
2008        u32 *pbl;
2009
2010        hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
2011                sizeof(struct fcoe_hash_table_entry);
2012
2013        segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
2014        segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
2015        hba->hash_tbl_segment_count = segment_count;
2016
2017        segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
2018        hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
2019        if (!hba->hash_tbl_segments) {
2020                printk(KERN_ERR PFX "hash table pointers alloc failed\n");
2021                return -ENOMEM;
2022        }
2023        dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
2024        dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
2025        if (!dma_segment_array) {
2026                printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
2027                return -ENOMEM;
2028        }
2029
2030        for (i = 0; i < segment_count; ++i) {
2031                hba->hash_tbl_segments[i] =
2032                        dma_alloc_coherent(&hba->pcidev->dev,
2033                                           BNX2FC_HASH_TBL_CHUNK_SIZE,
2034                                           &dma_segment_array[i],
2035                                           GFP_KERNEL);
2036                if (!hba->hash_tbl_segments[i]) {
2037                        printk(KERN_ERR PFX "hash segment alloc failed\n");
2038                        while (--i >= 0) {
2039                                dma_free_coherent(&hba->pcidev->dev,
2040                                                    BNX2FC_HASH_TBL_CHUNK_SIZE,
2041                                                    hba->hash_tbl_segments[i],
2042                                                    dma_segment_array[i]);
2043                                hba->hash_tbl_segments[i] = NULL;
2044                        }
2045                        kfree(dma_segment_array);
2046                        return -ENOMEM;
2047                }
2048                memset(hba->hash_tbl_segments[i], 0,
2049                       BNX2FC_HASH_TBL_CHUNK_SIZE);
2050        }
2051
2052        hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
2053                                               PAGE_SIZE,
2054                                               &hba->hash_tbl_pbl_dma,
2055                                               GFP_KERNEL);
2056        if (!hba->hash_tbl_pbl) {
2057                printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2058                kfree(dma_segment_array);
2059                return -ENOMEM;
2060        }
2061        memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
2062
2063        pbl = hba->hash_tbl_pbl;
2064        for (i = 0; i < segment_count; ++i) {
2065                u64 paddr = dma_segment_array[i];
2066                *pbl = cpu_to_le32((u32) paddr);
2067                ++pbl;
2068                *pbl = cpu_to_le32((u32) (paddr >> 32));
2069                ++pbl;
2070        }
2071        pbl = hba->hash_tbl_pbl;
2072        i = 0;
2073        while (*pbl && *(pbl + 1)) {
2074                u32 lo;
2075                u32 hi;
2076                lo = *pbl;
2077                ++pbl;
2078                hi = *pbl;
2079                ++pbl;
2080                ++i;
2081        }
2082        kfree(dma_segment_array);
2083        return 0;
2084}
2085
2086/**
2087 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
2088 *
2089 * @hba:        Pointer to adapter structure
2090 *
2091 */
2092int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2093{
2094        u64 addr;
2095        u32 mem_size;
2096        int i;
2097
2098        if (bnx2fc_allocate_hash_table(hba))
2099                return -ENOMEM;
2100
2101        mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2102        hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2103                                                  &hba->t2_hash_tbl_ptr_dma,
2104                                                  GFP_KERNEL);
2105        if (!hba->t2_hash_tbl_ptr) {
2106                printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2107                bnx2fc_free_fw_resc(hba);
2108                return -ENOMEM;
2109        }
2110        memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
2111
2112        mem_size = BNX2FC_NUM_MAX_SESS *
2113                                sizeof(struct fcoe_t2_hash_table_entry);
2114        hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2115                                              &hba->t2_hash_tbl_dma,
2116                                              GFP_KERNEL);
2117        if (!hba->t2_hash_tbl) {
2118                printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2119                bnx2fc_free_fw_resc(hba);
2120                return -ENOMEM;
2121        }
2122        memset(hba->t2_hash_tbl, 0x00, mem_size);
2123        for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
2124                addr = (unsigned long) hba->t2_hash_tbl_dma +
2125                         ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
2126                hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
2127                hba->t2_hash_tbl[i].next.hi = addr >> 32;
2128        }
2129
2130        hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2131                                               PAGE_SIZE, &hba->dummy_buf_dma,
2132                                               GFP_KERNEL);
2133        if (!hba->dummy_buffer) {
2134                printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
2135                bnx2fc_free_fw_resc(hba);
2136                return -ENOMEM;
2137        }
2138
2139        hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2140                                               PAGE_SIZE,
2141                                               &hba->stats_buf_dma,
2142                                               GFP_KERNEL);
2143        if (!hba->stats_buffer) {
2144                printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2145                bnx2fc_free_fw_resc(hba);
2146                return -ENOMEM;
2147        }
2148        memset(hba->stats_buffer, 0x00, PAGE_SIZE);
2149
2150        return 0;
2151}
2152
2153void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
2154{
2155        u32 mem_size;
2156
2157        if (hba->stats_buffer) {
2158                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2159                                  hba->stats_buffer, hba->stats_buf_dma);
2160                hba->stats_buffer = NULL;
2161        }
2162
2163        if (hba->dummy_buffer) {
2164                dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2165                                  hba->dummy_buffer, hba->dummy_buf_dma);
2166                hba->dummy_buffer = NULL;
2167        }
2168
2169        if (hba->t2_hash_tbl_ptr) {
2170                mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2171                dma_free_coherent(&hba->pcidev->dev, mem_size,
2172                                    hba->t2_hash_tbl_ptr,
2173                                    hba->t2_hash_tbl_ptr_dma);
2174                hba->t2_hash_tbl_ptr = NULL;
2175        }
2176
2177        if (hba->t2_hash_tbl) {
2178                mem_size = BNX2FC_NUM_MAX_SESS *
2179                            sizeof(struct fcoe_t2_hash_table_entry);
2180                dma_free_coherent(&hba->pcidev->dev, mem_size,
2181                                    hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
2182                hba->t2_hash_tbl = NULL;
2183        }
2184        bnx2fc_free_hash_table(hba);
2185}
2186