linux/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
<<
>>
Prefs
   1/*
   2 * QLogic qlcnic NIC Driver
   3 * Copyright (c) 2009-2013 QLogic Corporation
   4 *
   5 * See LICENSE.qlcnic for copyright and licensing details.
   6 */
   7
   8#include "qlcnic_sriov.h"
   9#include "qlcnic.h"
  10#include "qlcnic_83xx_hw.h"
  11#include <linux/types.h>
  12
  13#define QLC_BC_COMMAND  0
  14#define QLC_BC_RESPONSE 1
  15
  16#define QLC_MBOX_RESP_TIMEOUT           (10 * HZ)
  17#define QLC_MBOX_CH_FREE_TIMEOUT        (10 * HZ)
  18
  19#define QLC_BC_MSG              0
  20#define QLC_BC_CFREE            1
  21#define QLC_BC_FLR              2
  22#define QLC_BC_HDR_SZ           16
  23#define QLC_BC_PAYLOAD_SZ       (1024 - QLC_BC_HDR_SZ)
  24
  25#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF            2048
  26#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF      512
  27
  28#define QLC_83XX_VF_RESET_FAIL_THRESH   8
  29#define QLC_BC_CMD_MAX_RETRY_CNT        5
  30
  31static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
  32static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
  33static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
  34static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
  35static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
  36static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
  37                                  struct qlcnic_cmd_args *);
  38static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
  39
  40static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
  41        .read_crb                       = qlcnic_83xx_read_crb,
  42        .write_crb                      = qlcnic_83xx_write_crb,
  43        .read_reg                       = qlcnic_83xx_rd_reg_indirect,
  44        .write_reg                      = qlcnic_83xx_wrt_reg_indirect,
  45        .get_mac_address                = qlcnic_83xx_get_mac_address,
  46        .setup_intr                     = qlcnic_83xx_setup_intr,
  47        .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
  48        .mbx_cmd                        = qlcnic_sriov_vf_mbx_op,
  49        .get_func_no                    = qlcnic_83xx_get_func_no,
  50        .api_lock                       = qlcnic_83xx_cam_lock,
  51        .api_unlock                     = qlcnic_83xx_cam_unlock,
  52        .process_lb_rcv_ring_diag       = qlcnic_83xx_process_rcv_ring_diag,
  53        .create_rx_ctx                  = qlcnic_83xx_create_rx_ctx,
  54        .create_tx_ctx                  = qlcnic_83xx_create_tx_ctx,
  55        .del_rx_ctx                     = qlcnic_83xx_del_rx_ctx,
  56        .del_tx_ctx                     = qlcnic_83xx_del_tx_ctx,
  57        .setup_link_event               = qlcnic_83xx_setup_link_event,
  58        .get_nic_info                   = qlcnic_83xx_get_nic_info,
  59        .get_pci_info                   = qlcnic_83xx_get_pci_info,
  60        .set_nic_info                   = qlcnic_83xx_set_nic_info,
  61        .change_macvlan                 = qlcnic_83xx_sre_macaddr_change,
  62        .napi_enable                    = qlcnic_83xx_napi_enable,
  63        .napi_disable                   = qlcnic_83xx_napi_disable,
  64        .config_intr_coal               = qlcnic_83xx_config_intr_coal,
  65        .config_rss                     = qlcnic_83xx_config_rss,
  66        .config_hw_lro                  = qlcnic_83xx_config_hw_lro,
  67        .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
  68        .change_l2_filter               = qlcnic_83xx_change_l2_filter,
  69        .get_board_info                 = qlcnic_83xx_get_port_info,
  70        .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
  71};
  72
  73static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
  74        .config_bridged_mode    = qlcnic_config_bridged_mode,
  75        .config_led             = qlcnic_config_led,
  76        .cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
  77        .napi_add               = qlcnic_83xx_napi_add,
  78        .napi_del               = qlcnic_83xx_napi_del,
  79        .shutdown               = qlcnic_sriov_vf_shutdown,
  80        .resume                 = qlcnic_sriov_vf_resume,
  81        .config_ipaddr          = qlcnic_83xx_config_ipaddr,
  82        .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
  83};
  84
  85static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
  86        {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
  87        {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
  88        {QLCNIC_BC_CMD_GET_ACL, 3, 14},
  89        {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
  90};
  91
  92static inline bool qlcnic_sriov_bc_msg_check(u32 val)
  93{
  94        return (val & (1 << QLC_BC_MSG)) ? true : false;
  95}
  96
  97static inline bool qlcnic_sriov_channel_free_check(u32 val)
  98{
  99        return (val & (1 << QLC_BC_CFREE)) ? true : false;
 100}
 101
 102static inline bool qlcnic_sriov_flr_check(u32 val)
 103{
 104        return (val & (1 << QLC_BC_FLR)) ? true : false;
 105}
 106
 107static inline u8 qlcnic_sriov_target_func_id(u32 val)
 108{
 109        return (val >> 4) & 0xff;
 110}
 111
 112static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
 113{
 114        struct pci_dev *dev = adapter->pdev;
 115        int pos;
 116        u16 stride, offset;
 117
 118        if (qlcnic_sriov_vf_check(adapter))
 119                return 0;
 120
 121        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
 122        pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
 123        pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
 124
 125        return (dev->devfn + offset + stride * vf_id) & 0xff;
 126}
 127
 128int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
 129{
 130        struct qlcnic_sriov *sriov;
 131        struct qlcnic_back_channel *bc;
 132        struct workqueue_struct *wq;
 133        struct qlcnic_vport *vp;
 134        struct qlcnic_vf_info *vf;
 135        int err, i;
 136
 137        if (!qlcnic_sriov_enable_check(adapter))
 138                return -EIO;
 139
 140        sriov  = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
 141        if (!sriov)
 142                return -ENOMEM;
 143
 144        adapter->ahw->sriov = sriov;
 145        sriov->num_vfs = num_vfs;
 146        bc = &sriov->bc;
 147        sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
 148                                 num_vfs, GFP_KERNEL);
 149        if (!sriov->vf_info) {
 150                err = -ENOMEM;
 151                goto qlcnic_free_sriov;
 152        }
 153
 154        wq = create_singlethread_workqueue("bc-trans");
 155        if (wq == NULL) {
 156                err = -ENOMEM;
 157                dev_err(&adapter->pdev->dev,
 158                        "Cannot create bc-trans workqueue\n");
 159                goto qlcnic_free_vf_info;
 160        }
 161
 162        bc->bc_trans_wq = wq;
 163
 164        wq = create_singlethread_workqueue("async");
 165        if (wq == NULL) {
 166                err = -ENOMEM;
 167                dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
 168                goto qlcnic_destroy_trans_wq;
 169        }
 170
 171        bc->bc_async_wq =  wq;
 172        INIT_LIST_HEAD(&bc->async_list);
 173
 174        for (i = 0; i < num_vfs; i++) {
 175                vf = &sriov->vf_info[i];
 176                vf->adapter = adapter;
 177                vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
 178                mutex_init(&vf->send_cmd_lock);
 179                INIT_LIST_HEAD(&vf->rcv_act.wait_list);
 180                INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
 181                spin_lock_init(&vf->rcv_act.lock);
 182                spin_lock_init(&vf->rcv_pend.lock);
 183                init_completion(&vf->ch_free_cmpl);
 184
 185                INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
 186
 187                if (qlcnic_sriov_pf_check(adapter)) {
 188                        vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
 189                        if (!vp) {
 190                                err = -ENOMEM;
 191                                goto qlcnic_destroy_async_wq;
 192                        }
 193                        sriov->vf_info[i].vp = vp;
 194                        vp->max_tx_bw = MAX_BW;
 195                        vp->spoofchk = true;
 196                        random_ether_addr(vp->mac);
 197                        dev_info(&adapter->pdev->dev,
 198                                 "MAC Address %pM is configured for VF %d\n",
 199                                 vp->mac, i);
 200                }
 201        }
 202
 203        return 0;
 204
 205qlcnic_destroy_async_wq:
 206        destroy_workqueue(bc->bc_async_wq);
 207
 208qlcnic_destroy_trans_wq:
 209        destroy_workqueue(bc->bc_trans_wq);
 210
 211qlcnic_free_vf_info:
 212        kfree(sriov->vf_info);
 213
 214qlcnic_free_sriov:
 215        kfree(adapter->ahw->sriov);
 216        return err;
 217}
 218
 219void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
 220{
 221        struct qlcnic_bc_trans *trans;
 222        struct qlcnic_cmd_args cmd;
 223        unsigned long flags;
 224
 225        spin_lock_irqsave(&t_list->lock, flags);
 226
 227        while (!list_empty(&t_list->wait_list)) {
 228                trans = list_first_entry(&t_list->wait_list,
 229                                         struct qlcnic_bc_trans, list);
 230                list_del(&trans->list);
 231                t_list->count--;
 232                cmd.req.arg = (u32 *)trans->req_pay;
 233                cmd.rsp.arg = (u32 *)trans->rsp_pay;
 234                qlcnic_free_mbx_args(&cmd);
 235                qlcnic_sriov_cleanup_transaction(trans);
 236        }
 237
 238        spin_unlock_irqrestore(&t_list->lock, flags);
 239}
 240
 241void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
 242{
 243        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 244        struct qlcnic_back_channel *bc = &sriov->bc;
 245        struct qlcnic_vf_info *vf;
 246        int i;
 247
 248        if (!qlcnic_sriov_enable_check(adapter))
 249                return;
 250
 251        qlcnic_sriov_cleanup_async_list(bc);
 252        destroy_workqueue(bc->bc_async_wq);
 253
 254        for (i = 0; i < sriov->num_vfs; i++) {
 255                vf = &sriov->vf_info[i];
 256                qlcnic_sriov_cleanup_list(&vf->rcv_pend);
 257                cancel_work_sync(&vf->trans_work);
 258                qlcnic_sriov_cleanup_list(&vf->rcv_act);
 259        }
 260
 261        destroy_workqueue(bc->bc_trans_wq);
 262
 263        for (i = 0; i < sriov->num_vfs; i++)
 264                kfree(sriov->vf_info[i].vp);
 265
 266        kfree(sriov->vf_info);
 267        kfree(adapter->ahw->sriov);
 268}
 269
 270static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
 271{
 272        qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
 273        qlcnic_sriov_cfg_bc_intr(adapter, 0);
 274        __qlcnic_sriov_cleanup(adapter);
 275}
 276
 277void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
 278{
 279        if (qlcnic_sriov_pf_check(adapter))
 280                qlcnic_sriov_pf_cleanup(adapter);
 281
 282        if (qlcnic_sriov_vf_check(adapter))
 283                qlcnic_sriov_vf_cleanup(adapter);
 284}
 285
 286static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
 287                                    u32 *pay, u8 pci_func, u8 size)
 288{
 289        u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
 290        struct qlcnic_hardware_context *ahw = adapter->ahw;
 291        unsigned long flags;
 292        u16 opcode;
 293        u8 mbx_err_code;
 294        int i, j;
 295
 296        opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
 297
 298        if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
 299                dev_info(&adapter->pdev->dev,
 300                         "Mailbox cmd attempted, 0x%x\n", opcode);
 301                dev_info(&adapter->pdev->dev, "Mailbox detached\n");
 302                return 0;
 303        }
 304
 305        spin_lock_irqsave(&ahw->mbx_lock, flags);
 306
 307        mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
 308        if (mbx_val) {
 309                QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
 310                spin_unlock_irqrestore(&ahw->mbx_lock, flags);
 311                return QLCNIC_RCODE_TIMEOUT;
 312        }
 313        /* Fill in mailbox registers */
 314        val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
 315        mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
 316
 317        writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
 318        mbx_cmd = 0x1 | (1 << 4);
 319
 320        if (qlcnic_sriov_pf_check(adapter))
 321                mbx_cmd |= (pci_func << 5);
 322
 323        writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
 324        for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
 325                        i++, j++) {
 326                writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i));
 327        }
 328        for (j = 0; j < size; j++, i++)
 329                writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
 330
 331        /* Signal FW about the impending command */
 332        QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
 333
 334        /* Waiting for the mailbox cmd to complete and while waiting here
 335         * some AEN might arrive. If more than 5 seconds expire we can
 336         * assume something is wrong.
 337         */
 338poll:
 339        rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
 340        if (rsp != QLCNIC_RCODE_TIMEOUT) {
 341                /* Get the FW response data */
 342                fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
 343                if (fw_data &  QLCNIC_MBX_ASYNC_EVENT) {
 344                        __qlcnic_83xx_process_aen(adapter);
 345                        goto poll;
 346                }
 347                mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
 348                rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
 349                opcode = QLCNIC_MBX_RSP(fw_data);
 350
 351                switch (mbx_err_code) {
 352                case QLCNIC_MBX_RSP_OK:
 353                case QLCNIC_MBX_PORT_RSP_OK:
 354                        rsp = QLCNIC_RCODE_SUCCESS;
 355                        break;
 356                default:
 357                        if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
 358                                rsp = qlcnic_83xx_mac_rcode(adapter);
 359                                if (!rsp)
 360                                        goto out;
 361                        }
 362                        dev_err(&adapter->pdev->dev,
 363                                "MBX command 0x%x failed with err:0x%x\n",
 364                                opcode, mbx_err_code);
 365                        rsp = mbx_err_code;
 366                        break;
 367                }
 368                goto out;
 369        }
 370
 371        dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
 372                QLCNIC_MBX_RSP(mbx_cmd));
 373        rsp = QLCNIC_RCODE_TIMEOUT;
 374out:
 375        /* clear fw mbx control register */
 376        QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
 377        spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
 378        return rsp;
 379}
 380
 381static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
 382{
 383        adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
 384        adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
 385        adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
 386        adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
 387        adapter->num_txd = MAX_CMD_DESCRIPTORS;
 388        adapter->max_rds_rings = MAX_RDS_RINGS;
 389}
 390
 391int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
 392                                   struct qlcnic_info *npar_info, u16 vport_id)
 393{
 394        struct device *dev = &adapter->pdev->dev;
 395        struct qlcnic_cmd_args cmd;
 396        int err;
 397        u32 status;
 398
 399        err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
 400        if (err)
 401                return err;
 402
 403        cmd.req.arg[1] = vport_id << 16 | 0x1;
 404        err = qlcnic_issue_cmd(adapter, &cmd);
 405        if (err) {
 406                dev_err(&adapter->pdev->dev,
 407                        "Failed to get vport info, err=%d\n", err);
 408                qlcnic_free_mbx_args(&cmd);
 409                return err;
 410        }
 411
 412        status = cmd.rsp.arg[2] & 0xffff;
 413        if (status & BIT_0)
 414                npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
 415        if (status & BIT_1)
 416                npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
 417        if (status & BIT_2)
 418                npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
 419        if (status & BIT_3)
 420                npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
 421        if (status & BIT_4)
 422                npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
 423        if (status & BIT_5)
 424                npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
 425        if (status & BIT_6)
 426                npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
 427        if (status & BIT_7)
 428                npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
 429        if (status & BIT_8)
 430                npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
 431        if (status & BIT_9)
 432                npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
 433
 434        npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
 435        npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
 436        npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
 437        npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
 438
 439        dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
 440                 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
 441                 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
 442                 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
 443                 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
 444                 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
 445                 npar_info->min_tx_bw, npar_info->max_tx_bw,
 446                 npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
 447                 npar_info->max_rx_mcast_mac_filters,
 448                 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
 449                 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
 450                 npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
 451                 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
 452                 npar_info->max_remote_ipv6_addrs);
 453
 454        qlcnic_free_mbx_args(&cmd);
 455        return err;
 456}
 457
 458static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
 459                                      struct qlcnic_cmd_args *cmd)
 460{
 461        adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff;
 462        adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
 463        return 0;
 464}
 465
 466static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
 467                                            struct qlcnic_cmd_args *cmd)
 468{
 469        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 470        int i, num_vlans;
 471        u16 *vlans;
 472
 473        if (sriov->allowed_vlans)
 474                return 0;
 475
 476        sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
 477        if (!sriov->any_vlan)
 478                return 0;
 479
 480        sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
 481        num_vlans = sriov->num_allowed_vlans;
 482        sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
 483        if (!sriov->allowed_vlans)
 484                return -ENOMEM;
 485
 486        vlans = (u16 *)&cmd->rsp.arg[3];
 487        for (i = 0; i < num_vlans; i++)
 488                sriov->allowed_vlans[i] = vlans[i];
 489
 490        return 0;
 491}
 492
 493static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
 494{
 495        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
 496        struct qlcnic_cmd_args cmd;
 497        int ret;
 498
 499        ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
 500        if (ret)
 501                return ret;
 502
 503        ret = qlcnic_issue_cmd(adapter, &cmd);
 504        if (ret) {
 505                dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
 506                        ret);
 507        } else {
 508                sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
 509                switch (sriov->vlan_mode) {
 510                case QLC_GUEST_VLAN_MODE:
 511                        ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
 512                        break;
 513                case QLC_PVID_MODE:
 514                        ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
 515                        break;
 516                }
 517        }
 518
 519        qlcnic_free_mbx_args(&cmd);
 520        return ret;
 521}
 522
 523static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
 524{
 525        struct qlcnic_info nic_info;
 526        struct qlcnic_hardware_context *ahw = adapter->ahw;
 527        int err;
 528
 529        err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
 530        if (err)
 531                return err;
 532
 533        err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
 534        if (err)
 535                return -EIO;
 536
 537        err = qlcnic_sriov_get_vf_acl(adapter);
 538        if (err)
 539                return err;
 540
 541        if (qlcnic_83xx_get_port_info(adapter))
 542                return -EIO;
 543
 544        qlcnic_sriov_vf_cfg_buff_desc(adapter);
 545        adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
 546        dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
 547                 adapter->ahw->fw_hal_version);
 548
 549        ahw->physical_port = (u8) nic_info.phys_port;
 550        ahw->switch_mode = nic_info.switch_mode;
 551        ahw->max_mtu = nic_info.max_mtu;
 552        ahw->op_mode = nic_info.op_mode;
 553        ahw->capabilities = nic_info.capabilities;
 554        return 0;
 555}
 556
 557static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
 558                                 int pci_using_dac)
 559{
 560        int err;
 561
 562        INIT_LIST_HEAD(&adapter->vf_mc_list);
 563        if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
 564                dev_warn(&adapter->pdev->dev,
 565                         "Device does not support MSI interrupts\n");
 566
 567        err = qlcnic_setup_intr(adapter, 1);
 568        if (err) {
 569                dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
 570                goto err_out_disable_msi;
 571        }
 572
 573        err = qlcnic_83xx_setup_mbx_intr(adapter);
 574        if (err)
 575                goto err_out_disable_msi;
 576
 577        err = qlcnic_sriov_init(adapter, 1);
 578        if (err)
 579                goto err_out_disable_mbx_intr;
 580
 581        err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
 582        if (err)
 583                goto err_out_cleanup_sriov;
 584
 585        err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
 586        if (err)
 587                goto err_out_disable_bc_intr;
 588
 589        err = qlcnic_sriov_vf_init_driver(adapter);
 590        if (err)
 591                goto err_out_send_channel_term;
 592
 593        err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
 594        if (err)
 595                goto err_out_send_channel_term;
 596
 597        pci_set_drvdata(adapter->pdev, adapter);
 598        dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
 599                 adapter->netdev->name);
 600        qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
 601                             adapter->ahw->idc.delay);
 602        return 0;
 603
 604err_out_send_channel_term:
 605        qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
 606
 607err_out_disable_bc_intr:
 608        qlcnic_sriov_cfg_bc_intr(adapter, 0);
 609
 610err_out_cleanup_sriov:
 611        __qlcnic_sriov_cleanup(adapter);
 612
 613err_out_disable_mbx_intr:
 614        qlcnic_83xx_free_mbx_intr(adapter);
 615
 616err_out_disable_msi:
 617        qlcnic_teardown_intr(adapter);
 618        return err;
 619}
 620
 621static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
 622{
 623        u32 state;
 624
 625        do {
 626                msleep(20);
 627                if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
 628                        return -EIO;
 629                state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
 630        } while (state != QLC_83XX_IDC_DEV_READY);
 631
 632        return 0;
 633}
 634
 635int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
 636{
 637        struct qlcnic_hardware_context *ahw = adapter->ahw;
 638        int err;
 639
 640        spin_lock_init(&ahw->mbx_lock);
 641        set_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
 642        set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
 643        ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
 644        ahw->reset_context = 0;
 645        adapter->fw_fail_cnt = 0;
 646        ahw->msix_supported = 1;
 647        adapter->need_fw_reset = 0;
 648        adapter->flags |= QLCNIC_TX_INTR_SHARED;
 649
 650        err = qlcnic_sriov_check_dev_ready(adapter);
 651        if (err)
 652                return err;
 653
 654        err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
 655        if (err)
 656                return err;
 657
 658        if (qlcnic_read_mac_addr(adapter))
 659                dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
 660
 661        INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
 662
 663        clear_bit(__QLCNIC_RESETTING, &adapter->state);
 664        return 0;
 665}
 666
 667void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
 668{
 669        struct qlcnic_hardware_context *ahw = adapter->ahw;
 670
 671        ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
 672        dev_info(&adapter->pdev->dev,
 673                 "HAL Version: %d Non Privileged SRIOV function\n",
 674                 ahw->fw_hal_version);
 675        adapter->nic_ops = &qlcnic_sriov_vf_ops;
 676        set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
 677        return;
 678}
 679
 680void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
 681{
 682        ahw->hw_ops             = &qlcnic_sriov_vf_hw_ops;
 683        ahw->reg_tbl            = (u32 *)qlcnic_83xx_reg_tbl;
 684        ahw->ext_reg_tbl        = (u32 *)qlcnic_83xx_ext_reg_tbl;
 685}
 686
 687static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
 688{
 689        u32 pay_size;
 690
 691        pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
 692
 693        if (pay_size)
 694                pay_size = QLC_BC_PAYLOAD_SZ;
 695        else
 696                pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
 697
 698        return pay_size;
 699}
 700
 701int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
 702{
 703        struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
 704        u8 i;
 705
 706        if (qlcnic_sriov_vf_check(adapter))
 707                return 0;
 708
 709        for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
 710                if (vf_info[i].pci_func == pci_func)
 711                        return i;
 712        }
 713
 714        return -EINVAL;
 715}
 716
 717static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
 718{
 719        *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
 720        if (!*trans)
 721                return -ENOMEM;
 722
 723        init_completion(&(*trans)->resp_cmpl);
 724        return 0;
 725}
 726
 727static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
 728                                            u32 size)
 729{
 730        *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
 731        if (!*hdr)
 732                return -ENOMEM;
 733
 734        return 0;
 735}
 736
 737static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
 738{
 739        const struct qlcnic_mailbox_metadata *mbx_tbl;
 740        int i, size;
 741
 742        mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
 743        size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
 744
 745        for (i = 0; i < size; i++) {
 746                if (type == mbx_tbl[i].cmd) {
 747                        mbx->op_type = QLC_BC_CMD;
 748                        mbx->req.num = mbx_tbl[i].in_args;
 749                        mbx->rsp.num = mbx_tbl[i].out_args;
 750                        mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
 751                                               GFP_ATOMIC);
 752                        if (!mbx->req.arg)
 753                                return -ENOMEM;
 754                        mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
 755                                               GFP_ATOMIC);
 756                        if (!mbx->rsp.arg) {
 757                                kfree(mbx->req.arg);
 758                                mbx->req.arg = NULL;
 759                                return -ENOMEM;
 760                        }
 761                        memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
 762                        memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
 763                        mbx->req.arg[0] = (type | (mbx->req.num << 16) |
 764                                           (3 << 29));
 765                        mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
 766                        return 0;
 767                }
 768        }
 769        return -EINVAL;
 770}
 771
 772static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
 773                                       struct qlcnic_cmd_args *cmd,
 774                                       u16 seq, u8 msg_type)
 775{
 776        struct qlcnic_bc_hdr *hdr;
 777        int i;
 778        u32 num_regs, bc_pay_sz;
 779        u16 remainder;
 780        u8 cmd_op, num_frags, t_num_frags;
 781
 782        bc_pay_sz = QLC_BC_PAYLOAD_SZ;
 783        if (msg_type == QLC_BC_COMMAND) {
 784                trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
 785                trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
 786                num_regs = cmd->req.num;
 787                trans->req_pay_size = (num_regs * 4);
 788                num_regs = cmd->rsp.num;
 789                trans->rsp_pay_size = (num_regs * 4);
 790                cmd_op = cmd->req.arg[0] & 0xff;
 791                remainder = (trans->req_pay_size) % (bc_pay_sz);
 792                num_frags = (trans->req_pay_size) / (bc_pay_sz);
 793                if (remainder)
 794                        num_frags++;
 795                t_num_frags = num_frags;
 796                if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
 797                        return -ENOMEM;
 798                remainder = (trans->rsp_pay_size) % (bc_pay_sz);
 799                num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
 800                if (remainder)
 801                        num_frags++;
 802                if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
 803                        return -ENOMEM;
 804                num_frags  = t_num_frags;
 805                hdr = trans->req_hdr;
 806        }  else {
 807                cmd->req.arg = (u32 *)trans->req_pay;
 808                cmd->rsp.arg = (u32 *)trans->rsp_pay;
 809                cmd_op = cmd->req.arg[0] & 0xff;
 810                remainder = (trans->rsp_pay_size) % (bc_pay_sz);
 811                num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
 812                if (remainder)
 813                        num_frags++;
 814                cmd->req.num = trans->req_pay_size / 4;
 815                cmd->rsp.num = trans->rsp_pay_size / 4;
 816                hdr = trans->rsp_hdr;
 817                cmd->op_type = trans->req_hdr->op_type;
 818        }
 819
 820        trans->trans_id = seq;
 821        trans->cmd_id = cmd_op;
 822        for (i = 0; i < num_frags; i++) {
 823                hdr[i].version = 2;
 824                hdr[i].msg_type = msg_type;
 825                hdr[i].op_type = cmd->op_type;
 826                hdr[i].num_cmds = 1;
 827                hdr[i].num_frags = num_frags;
 828                hdr[i].frag_num = i + 1;
 829                hdr[i].cmd_op = cmd_op;
 830                hdr[i].seq_id = seq;
 831        }
 832        return 0;
 833}
 834
 835static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
 836{
 837        if (!trans)
 838                return;
 839        kfree(trans->req_hdr);
 840        kfree(trans->rsp_hdr);
 841        kfree(trans);
 842}
 843
 844static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
 845                                    struct qlcnic_bc_trans *trans, u8 type)
 846{
 847        struct qlcnic_trans_list *t_list;
 848        unsigned long flags;
 849        int ret = 0;
 850
 851        if (type == QLC_BC_RESPONSE) {
 852                t_list = &vf->rcv_act;
 853                spin_lock_irqsave(&t_list->lock, flags);
 854                t_list->count--;
 855                list_del(&trans->list);
 856                if (t_list->count > 0)
 857                        ret = 1;
 858                spin_unlock_irqrestore(&t_list->lock, flags);
 859        }
 860        if (type == QLC_BC_COMMAND) {
 861                while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
 862                        msleep(100);
 863                vf->send_cmd = NULL;
 864                clear_bit(QLC_BC_VF_SEND, &vf->state);
 865        }
 866        return ret;
 867}
 868
 869static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
 870                                         struct qlcnic_vf_info *vf,
 871                                         work_func_t func)
 872{
 873        if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
 874            vf->adapter->need_fw_reset)
 875                return;
 876
 877        queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
 878}
 879
 880static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
 881{
 882        struct completion *cmpl = &trans->resp_cmpl;
 883
 884        if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
 885                trans->trans_state = QLC_END;
 886        else
 887                trans->trans_state = QLC_ABORT;
 888
 889        return;
 890}
 891
 892static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
 893                                            u8 type)
 894{
 895        if (type == QLC_BC_RESPONSE) {
 896                trans->curr_rsp_frag++;
 897                if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
 898                        trans->trans_state = QLC_INIT;
 899                else
 900                        trans->trans_state = QLC_END;
 901        } else {
 902                trans->curr_req_frag++;
 903                if (trans->curr_req_frag < trans->req_hdr->num_frags)
 904                        trans->trans_state = QLC_INIT;
 905                else
 906                        trans->trans_state = QLC_WAIT_FOR_RESP;
 907        }
 908}
 909
 910static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
 911                                               u8 type)
 912{
 913        struct qlcnic_vf_info *vf = trans->vf;
 914        struct completion *cmpl = &vf->ch_free_cmpl;
 915
 916        if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
 917                trans->trans_state = QLC_ABORT;
 918                return;
 919        }
 920
 921        clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
 922        qlcnic_sriov_handle_multi_frags(trans, type);
 923}
 924
 925static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
 926                                     u32 *hdr, u32 *pay, u32 size)
 927{
 928        struct qlcnic_hardware_context *ahw = adapter->ahw;
 929        u32 fw_mbx;
 930        u8 i, max = 2, hdr_size, j;
 931
 932        hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
 933        max = (size / sizeof(u32)) + hdr_size;
 934
 935        fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
 936        for (i = 2, j = 0; j < hdr_size; i++, j++)
 937                *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
 938        for (; j < max; i++, j++)
 939                *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
 940}
 941
 942static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
 943{
 944        int ret = -EBUSY;
 945        u32 timeout = 10000;
 946
 947        do {
 948                if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
 949                        ret = 0;
 950                        break;
 951                }
 952                mdelay(1);
 953        } while (--timeout);
 954
 955        return ret;
 956}
 957
 958static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
 959{
 960        struct qlcnic_vf_info *vf = trans->vf;
 961        u32 pay_size, hdr_size;
 962        u32 *hdr, *pay;
 963        int ret;
 964        u8 pci_func = trans->func_id;
 965
 966        if (__qlcnic_sriov_issue_bc_post(vf))
 967                return -EBUSY;
 968
 969        if (type == QLC_BC_COMMAND) {
 970                hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
 971                pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
 972                hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
 973                pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
 974                                                       trans->curr_req_frag);
 975                pay_size = (pay_size / sizeof(u32));
 976        } else {
 977                hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
 978                pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
 979                hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
 980                pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
 981                                                       trans->curr_rsp_frag);
 982                pay_size = (pay_size / sizeof(u32));
 983        }
 984
 985        ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
 986                                       pci_func, pay_size);
 987        return ret;
 988}
 989
 990static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
 991                                      struct qlcnic_vf_info *vf, u8 type)
 992{
 993        bool flag = true;
 994        int err = -EIO;
 995
 996        while (flag) {
 997                if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
 998                    vf->adapter->need_fw_reset)
 999                        trans->trans_state = QLC_ABORT;
1000
1001                switch (trans->trans_state) {
1002                case QLC_INIT:
1003                        trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
1004                        if (qlcnic_sriov_issue_bc_post(trans, type))
1005                                trans->trans_state = QLC_ABORT;
1006                        break;
1007                case QLC_WAIT_FOR_CHANNEL_FREE:
1008                        qlcnic_sriov_wait_for_channel_free(trans, type);
1009                        break;
1010                case QLC_WAIT_FOR_RESP:
1011                        qlcnic_sriov_wait_for_resp(trans);
1012                        break;
1013                case QLC_END:
1014                        err = 0;
1015                        flag = false;
1016                        break;
1017                case QLC_ABORT:
1018                        err = -EIO;
1019                        flag = false;
1020                        clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
1021                        break;
1022                default:
1023                        err = -EIO;
1024                        flag = false;
1025                }
1026        }
1027        return err;
1028}
1029
1030static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
1031                                    struct qlcnic_bc_trans *trans, int pci_func)
1032{
1033        struct qlcnic_vf_info *vf;
1034        int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
1035
1036        if (index < 0)
1037                return -EIO;
1038
1039        vf = &adapter->ahw->sriov->vf_info[index];
1040        trans->vf = vf;
1041        trans->func_id = pci_func;
1042
1043        if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
1044                if (qlcnic_sriov_pf_check(adapter))
1045                        return -EIO;
1046                if (qlcnic_sriov_vf_check(adapter) &&
1047                    trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
1048                        return -EIO;
1049        }
1050
1051        mutex_lock(&vf->send_cmd_lock);
1052        vf->send_cmd = trans;
1053        err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
1054        qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
1055        mutex_unlock(&vf->send_cmd_lock);
1056        return err;
1057}
1058
1059static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
1060                                          struct qlcnic_bc_trans *trans,
1061                                          struct qlcnic_cmd_args *cmd)
1062{
1063#ifdef CONFIG_QLCNIC_SRIOV
1064        if (qlcnic_sriov_pf_check(adapter)) {
1065                qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
1066                return;
1067        }
1068#endif
1069        cmd->rsp.arg[0] |= (0x9 << 25);
1070        return;
1071}
1072
1073static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1074{
1075        struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
1076                                                 trans_work);
1077        struct qlcnic_bc_trans *trans = NULL;
1078        struct qlcnic_adapter *adapter  = vf->adapter;
1079        struct qlcnic_cmd_args cmd;
1080        u8 req;
1081
1082        if (adapter->need_fw_reset)
1083                return;
1084
1085        if (test_bit(QLC_BC_VF_FLR, &vf->state))
1086                return;
1087
1088        trans = list_first_entry(&vf->rcv_act.wait_list,
1089                                 struct qlcnic_bc_trans, list);
1090        adapter = vf->adapter;
1091
1092        if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
1093                                        QLC_BC_RESPONSE))
1094                goto cleanup_trans;
1095
1096        __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
1097        trans->trans_state = QLC_INIT;
1098        __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
1099
1100cleanup_trans:
1101        qlcnic_free_mbx_args(&cmd);
1102        req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
1103        qlcnic_sriov_cleanup_transaction(trans);
1104        if (req)
1105                qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
1106                                             qlcnic_sriov_process_bc_cmd);
1107}
1108
1109static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
1110                                        struct qlcnic_vf_info *vf)
1111{
1112        struct qlcnic_bc_trans *trans;
1113        u32 pay_size;
1114
1115        if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
1116                return;
1117
1118        trans = vf->send_cmd;
1119
1120        if (trans == NULL)
1121                goto clear_send;
1122
1123        if (trans->trans_id != hdr->seq_id)
1124                goto clear_send;
1125
1126        pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
1127                                               trans->curr_rsp_frag);
1128        qlcnic_sriov_pull_bc_msg(vf->adapter,
1129                                 (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
1130                                 (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
1131                                 pay_size);
1132        if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
1133                goto clear_send;
1134
1135        complete(&trans->resp_cmpl);
1136
1137clear_send:
1138        clear_bit(QLC_BC_VF_SEND, &vf->state);
1139}
1140
1141int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1142                                struct qlcnic_vf_info *vf,
1143                                struct qlcnic_bc_trans *trans)
1144{
1145        struct qlcnic_trans_list *t_list = &vf->rcv_act;
1146
1147        t_list->count++;
1148        list_add_tail(&trans->list, &t_list->wait_list);
1149        if (t_list->count == 1)
1150                qlcnic_sriov_schedule_bc_cmd(sriov, vf,
1151                                             qlcnic_sriov_process_bc_cmd);
1152        return 0;
1153}
1154
1155static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1156                                     struct qlcnic_vf_info *vf,
1157                                     struct qlcnic_bc_trans *trans)
1158{
1159        struct qlcnic_trans_list *t_list = &vf->rcv_act;
1160
1161        spin_lock(&t_list->lock);
1162
1163        __qlcnic_sriov_add_act_list(sriov, vf, trans);
1164
1165        spin_unlock(&t_list->lock);
1166        return 0;
1167}
1168
1169static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
1170                                              struct qlcnic_vf_info *vf,
1171                                              struct qlcnic_bc_hdr *hdr)
1172{
1173        struct qlcnic_bc_trans *trans = NULL;
1174        struct list_head *node;
1175        u32 pay_size, curr_frag;
1176        u8 found = 0, active = 0;
1177
1178        spin_lock(&vf->rcv_pend.lock);
1179        if (vf->rcv_pend.count > 0) {
1180                list_for_each(node, &vf->rcv_pend.wait_list) {
1181                        trans = list_entry(node, struct qlcnic_bc_trans, list);
1182                        if (trans->trans_id == hdr->seq_id) {
1183                                found = 1;
1184                                break;
1185                        }
1186                }
1187        }
1188
1189        if (found) {
1190                curr_frag = trans->curr_req_frag;
1191                pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1192                                                       curr_frag);
1193                qlcnic_sriov_pull_bc_msg(vf->adapter,
1194                                         (u32 *)(trans->req_hdr + curr_frag),
1195                                         (u32 *)(trans->req_pay + curr_frag),
1196                                         pay_size);
1197                trans->curr_req_frag++;
1198                if (trans->curr_req_frag >= hdr->num_frags) {
1199                        vf->rcv_pend.count--;
1200                        list_del(&trans->list);
1201                        active = 1;
1202                }
1203        }
1204        spin_unlock(&vf->rcv_pend.lock);
1205
1206        if (active)
1207                if (qlcnic_sriov_add_act_list(sriov, vf, trans))
1208                        qlcnic_sriov_cleanup_transaction(trans);
1209
1210        return;
1211}
1212
1213static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1214                                       struct qlcnic_bc_hdr *hdr,
1215                                       struct qlcnic_vf_info *vf)
1216{
1217        struct qlcnic_bc_trans *trans;
1218        struct qlcnic_adapter *adapter = vf->adapter;
1219        struct qlcnic_cmd_args cmd;
1220        u32 pay_size;
1221        int err;
1222        u8 cmd_op;
1223
1224        if (adapter->need_fw_reset)
1225                return;
1226
1227        if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
1228            hdr->op_type != QLC_BC_CMD &&
1229            hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
1230                return;
1231
1232        if (hdr->frag_num > 1) {
1233                qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
1234                return;
1235        }
1236
1237        cmd_op = hdr->cmd_op;
1238        if (qlcnic_sriov_alloc_bc_trans(&trans))
1239                return;
1240
1241        if (hdr->op_type == QLC_BC_CMD)
1242                err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
1243        else
1244                err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
1245
1246        if (err) {
1247                qlcnic_sriov_cleanup_transaction(trans);
1248                return;
1249        }
1250
1251        cmd.op_type = hdr->op_type;
1252        if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1253                                        QLC_BC_COMMAND)) {
1254                qlcnic_free_mbx_args(&cmd);
1255                qlcnic_sriov_cleanup_transaction(trans);
1256                return;
1257        }
1258
1259        pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1260                                         trans->curr_req_frag);
1261        qlcnic_sriov_pull_bc_msg(vf->adapter,
1262                                 (u32 *)(trans->req_hdr + trans->curr_req_frag),
1263                                 (u32 *)(trans->req_pay + trans->curr_req_frag),
1264                                 pay_size);
1265        trans->func_id = vf->pci_func;
1266        trans->vf = vf;
1267        trans->trans_id = hdr->seq_id;
1268        trans->curr_req_frag++;
1269
1270        if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
1271                return;
1272
1273        if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1274                if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1275                        qlcnic_free_mbx_args(&cmd);
1276                        qlcnic_sriov_cleanup_transaction(trans);
1277                }
1278        } else {
1279                spin_lock(&vf->rcv_pend.lock);
1280                list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1281                vf->rcv_pend.count++;
1282                spin_unlock(&vf->rcv_pend.lock);
1283        }
1284}
1285
1286static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1287                                          struct qlcnic_vf_info *vf)
1288{
1289        struct qlcnic_bc_hdr hdr;
1290        u32 *ptr = (u32 *)&hdr;
1291        u8 msg_type, i;
1292
1293        for (i = 2; i < 6; i++)
1294                ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1295        msg_type = hdr.msg_type;
1296
1297        switch (msg_type) {
1298        case QLC_BC_COMMAND:
1299                qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1300                break;
1301        case QLC_BC_RESPONSE:
1302                qlcnic_sriov_handle_bc_resp(&hdr, vf);
1303                break;
1304        }
1305}
1306
1307static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
1308                                          struct qlcnic_vf_info *vf)
1309{
1310        struct qlcnic_adapter *adapter = vf->adapter;
1311
1312        if (qlcnic_sriov_pf_check(adapter))
1313                qlcnic_sriov_pf_handle_flr(sriov, vf);
1314        else
1315                dev_err(&adapter->pdev->dev,
1316                        "Invalid event to VF. VF should not get FLR event\n");
1317}
1318
1319void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1320{
1321        struct qlcnic_vf_info *vf;
1322        struct qlcnic_sriov *sriov;
1323        int index;
1324        u8 pci_func;
1325
1326        sriov = adapter->ahw->sriov;
1327        pci_func = qlcnic_sriov_target_func_id(event);
1328        index = qlcnic_sriov_func_to_index(adapter, pci_func);
1329
1330        if (index < 0)
1331                return;
1332
1333        vf = &sriov->vf_info[index];
1334        vf->pci_func = pci_func;
1335
1336        if (qlcnic_sriov_channel_free_check(event))
1337                complete(&vf->ch_free_cmpl);
1338
1339        if (qlcnic_sriov_flr_check(event)) {
1340                qlcnic_sriov_handle_flr_event(sriov, vf);
1341                return;
1342        }
1343
1344        if (qlcnic_sriov_bc_msg_check(event))
1345                qlcnic_sriov_handle_msg_event(sriov, vf);
1346}
1347
1348int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1349{
1350        struct qlcnic_cmd_args cmd;
1351        int err;
1352
1353        if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1354                return 0;
1355
1356        if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1357                return -ENOMEM;
1358
1359        if (enable)
1360                cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1361
1362        err = qlcnic_83xx_mbx_op(adapter, &cmd);
1363
1364        if (err != QLCNIC_RCODE_SUCCESS) {
1365                dev_err(&adapter->pdev->dev,
1366                        "Failed to %s bc events, err=%d\n",
1367                        (enable ? "enable" : "disable"), err);
1368        }
1369
1370        qlcnic_free_mbx_args(&cmd);
1371        return err;
1372}
1373
1374static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1375                                     struct qlcnic_bc_trans *trans)
1376{
1377        u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
1378        u32 state;
1379
1380        state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1381        if (state == QLC_83XX_IDC_DEV_READY) {
1382                msleep(20);
1383                clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
1384                trans->trans_state = QLC_INIT;
1385                if (++adapter->fw_fail_cnt > max)
1386                        return -EIO;
1387                else
1388                        return 0;
1389        }
1390
1391        return -EIO;
1392}
1393
1394static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
1395                                  struct qlcnic_cmd_args *cmd)
1396{
1397        struct qlcnic_hardware_context *ahw = adapter->ahw;
1398        struct device *dev = &adapter->pdev->dev;
1399        struct qlcnic_bc_trans *trans;
1400        int err;
1401        u32 rsp_data, opcode, mbx_err_code, rsp;
1402        u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
1403        u8 func = ahw->pci_func;
1404
1405        rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1406        if (rsp)
1407                return rsp;
1408
1409        rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1410        if (rsp)
1411                goto cleanup_transaction;
1412
1413retry:
1414        if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
1415                rsp = -EIO;
1416                QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1417                      QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
1418                goto err_out;
1419        }
1420
1421        err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
1422        if (err) {
1423                dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
1424                        (cmd->req.arg[0] & 0xffff), func);
1425                rsp = QLCNIC_RCODE_TIMEOUT;
1426
1427                /* After adapter reset PF driver may take some time to
1428                 * respond to VF's request. Retry request till maximum retries.
1429                 */
1430                if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1431                    !qlcnic_sriov_retry_bc_cmd(adapter, trans))
1432                        goto retry;
1433
1434                goto err_out;
1435        }
1436
1437        rsp_data = cmd->rsp.arg[0];
1438        mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1439        opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1440
1441        if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1442            (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1443                rsp = QLCNIC_RCODE_SUCCESS;
1444        } else {
1445                rsp = mbx_err_code;
1446                if (!rsp)
1447                        rsp = 1;
1448                dev_err(dev,
1449                        "MBX command 0x%x failed with err:0x%x for VF %d\n",
1450                        opcode, mbx_err_code, func);
1451        }
1452
1453err_out:
1454        if (rsp == QLCNIC_RCODE_TIMEOUT) {
1455                ahw->reset_context = 1;
1456                adapter->need_fw_reset = 1;
1457                clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
1458        }
1459
1460cleanup_transaction:
1461        qlcnic_sriov_cleanup_transaction(trans);
1462        return rsp;
1463}
1464
1465int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1466{
1467        struct qlcnic_cmd_args cmd;
1468        struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1469        int ret;
1470
1471        if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1472                return -ENOMEM;
1473
1474        ret = qlcnic_issue_cmd(adapter, &cmd);
1475        if (ret) {
1476                dev_err(&adapter->pdev->dev,
1477                        "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1478                        ret);
1479                goto out;
1480        }
1481
1482        cmd_op = (cmd.rsp.arg[0] & 0xff);
1483        if (cmd.rsp.arg[0] >> 25 == 2)
1484                return 2;
1485        if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1486                set_bit(QLC_BC_VF_STATE, &vf->state);
1487        else
1488                clear_bit(QLC_BC_VF_STATE, &vf->state);
1489
1490out:
1491        qlcnic_free_mbx_args(&cmd);
1492        return ret;
1493}
1494
1495void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan)
1496{
1497        struct qlcnic_adapter *adapter = netdev_priv(netdev);
1498        struct qlcnic_mac_list_s *cur;
1499        struct list_head *head, tmp_list;
1500
1501        INIT_LIST_HEAD(&tmp_list);
1502        head = &adapter->vf_mc_list;
1503        netif_addr_lock_bh(netdev);
1504
1505        while (!list_empty(head)) {
1506                cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
1507                list_move(&cur->list, &tmp_list);
1508        }
1509
1510        netif_addr_unlock_bh(netdev);
1511
1512        while (!list_empty(&tmp_list)) {
1513                cur = list_entry((&tmp_list)->next,
1514                                 struct qlcnic_mac_list_s, list);
1515                qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan);
1516                list_del(&cur->list);
1517                kfree(cur);
1518        }
1519}
1520
1521void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1522{
1523        struct list_head *head = &bc->async_list;
1524        struct qlcnic_async_work_list *entry;
1525
1526        while (!list_empty(head)) {
1527                entry = list_entry(head->next, struct qlcnic_async_work_list,
1528                                   list);
1529                cancel_work_sync(&entry->work);
1530                list_del(&entry->list);
1531                kfree(entry);
1532        }
1533}
1534
1535static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1536{
1537        struct qlcnic_adapter *adapter = netdev_priv(netdev);
1538        u16 vlan;
1539
1540        if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1541                return;
1542
1543        vlan = adapter->ahw->sriov->vlan;
1544        __qlcnic_set_multi(netdev, vlan);
1545}
1546
1547static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
1548{
1549        struct qlcnic_async_work_list *entry;
1550        struct net_device *netdev;
1551
1552        entry = container_of(work, struct qlcnic_async_work_list, work);
1553        netdev = (struct net_device *)entry->ptr;
1554
1555        qlcnic_sriov_vf_set_multi(netdev);
1556        return;
1557}
1558
1559static struct qlcnic_async_work_list *
1560qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1561{
1562        struct list_head *node;
1563        struct qlcnic_async_work_list *entry = NULL;
1564        u8 empty = 0;
1565
1566        list_for_each(node, &bc->async_list) {
1567                entry = list_entry(node, struct qlcnic_async_work_list, list);
1568                if (!work_pending(&entry->work)) {
1569                        empty = 1;
1570                        break;
1571                }
1572        }
1573
1574        if (!empty) {
1575                entry = kzalloc(sizeof(struct qlcnic_async_work_list),
1576                                GFP_ATOMIC);
1577                if (entry == NULL)
1578                        return NULL;
1579                list_add_tail(&entry->list, &bc->async_list);
1580        }
1581
1582        return entry;
1583}
1584
1585static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1586                                                work_func_t func, void *data)
1587{
1588        struct qlcnic_async_work_list *entry = NULL;
1589
1590        entry = qlcnic_sriov_get_free_node_async_work(bc);
1591        if (!entry)
1592                return;
1593
1594        entry->ptr = data;
1595        INIT_WORK(&entry->work, func);
1596        queue_work(bc->bc_async_wq, &entry->work);
1597}
1598
1599void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
1600{
1601
1602        struct qlcnic_adapter *adapter = netdev_priv(netdev);
1603        struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1604
1605        if (adapter->need_fw_reset)
1606                return;
1607
1608        qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
1609                                            netdev);
1610}
1611
1612static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1613{
1614        int err;
1615
1616        set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
1617        qlcnic_83xx_enable_mbx_intrpt(adapter);
1618
1619        err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1620        if (err)
1621                return err;
1622
1623        err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1624        if (err)
1625                goto err_out_cleanup_bc_intr;
1626
1627        err = qlcnic_sriov_vf_init_driver(adapter);
1628        if (err)
1629                goto err_out_term_channel;
1630
1631        return 0;
1632
1633err_out_term_channel:
1634        qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1635
1636err_out_cleanup_bc_intr:
1637        qlcnic_sriov_cfg_bc_intr(adapter, 0);
1638        return err;
1639}
1640
1641static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
1642{
1643        struct net_device *netdev = adapter->netdev;
1644
1645        if (netif_running(netdev)) {
1646                if (!qlcnic_up(adapter, netdev))
1647                        qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1648        }
1649
1650        netif_device_attach(netdev);
1651}
1652
1653static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1654{
1655        struct qlcnic_hardware_context *ahw = adapter->ahw;
1656        struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
1657        struct net_device *netdev = adapter->netdev;
1658        u8 i, max_ints = ahw->num_msix - 1;
1659
1660        qlcnic_83xx_disable_mbx_intr(adapter);
1661        netif_device_detach(netdev);
1662        if (netif_running(netdev))
1663                qlcnic_down(adapter, netdev);
1664
1665        for (i = 0; i < max_ints; i++) {
1666                intr_tbl[i].id = i;
1667                intr_tbl[i].enabled = 0;
1668                intr_tbl[i].src = 0;
1669        }
1670        ahw->reset_context = 0;
1671}
1672
1673static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1674{
1675        struct qlcnic_hardware_context *ahw = adapter->ahw;
1676        struct device *dev = &adapter->pdev->dev;
1677        struct qlc_83xx_idc *idc = &ahw->idc;
1678        u8 func = ahw->pci_func;
1679        u32 state;
1680
1681        if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
1682            (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
1683                if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1684                        qlcnic_sriov_vf_attach(adapter);
1685                        adapter->fw_fail_cnt = 0;
1686                        dev_info(dev,
1687                                 "%s: Reinitialization of VF 0x%x done after FW reset\n",
1688                                 __func__, func);
1689                } else {
1690                        dev_err(dev,
1691                                "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1692                                __func__, func);
1693                        state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1694                        dev_info(dev, "Current state 0x%x after FW reset\n",
1695                                 state);
1696                }
1697        }
1698
1699        return 0;
1700}
1701
1702static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1703{
1704        struct qlcnic_hardware_context *ahw = adapter->ahw;
1705        struct device *dev = &adapter->pdev->dev;
1706        struct qlc_83xx_idc *idc = &ahw->idc;
1707        u8 func = ahw->pci_func;
1708        u32 state;
1709
1710        adapter->reset_ctx_cnt++;
1711
1712        /* Skip the context reset and check if FW is hung */
1713        if (adapter->reset_ctx_cnt < 3) {
1714                adapter->need_fw_reset = 1;
1715                clear_bit(QLC_83XX_MBX_READY, &idc->status);
1716                dev_info(dev,
1717                         "Resetting context, wait here to check if FW is in failed state\n");
1718                return 0;
1719        }
1720
1721        /* Check if number of resets exceed the threshold.
1722         * If it exceeds the threshold just fail the VF.
1723         */
1724        if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
1725                clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1726                adapter->tx_timeo_cnt = 0;
1727                adapter->fw_fail_cnt = 0;
1728                adapter->reset_ctx_cnt = 0;
1729                qlcnic_sriov_vf_detach(adapter);
1730                dev_err(dev,
1731                        "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1732                return -EIO;
1733        }
1734
1735        dev_info(dev, "Resetting context of VF 0x%x\n", func);
1736        dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
1737                 __func__, adapter->reset_ctx_cnt, func);
1738        set_bit(__QLCNIC_RESETTING, &adapter->state);
1739        adapter->need_fw_reset = 1;
1740        clear_bit(QLC_83XX_MBX_READY, &idc->status);
1741        qlcnic_sriov_vf_detach(adapter);
1742        adapter->need_fw_reset = 0;
1743
1744        if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1745                qlcnic_sriov_vf_attach(adapter);
1746                adapter->tx_timeo_cnt = 0;
1747                adapter->reset_ctx_cnt = 0;
1748                adapter->fw_fail_cnt = 0;
1749                dev_info(dev, "Done resetting context for VF 0x%x\n", func);
1750        } else {
1751                dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
1752                        __func__, func);
1753                state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1754                dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
1755        }
1756
1757        return 0;
1758}
1759
1760static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
1761{
1762        struct qlcnic_hardware_context *ahw = adapter->ahw;
1763        int ret = 0;
1764
1765        if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
1766                ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
1767        else if (ahw->reset_context)
1768                ret = qlcnic_sriov_vf_handle_context_reset(adapter);
1769
1770        clear_bit(__QLCNIC_RESETTING, &adapter->state);
1771        return ret;
1772}
1773
1774static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1775{
1776        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1777
1778        dev_err(&adapter->pdev->dev, "Device is in failed state\n");
1779        if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
1780                qlcnic_sriov_vf_detach(adapter);
1781
1782        clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1783        clear_bit(__QLCNIC_RESETTING, &adapter->state);
1784        return -EIO;
1785}
1786
1787static int
1788qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1789{
1790        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1791
1792        dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
1793        if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1794                set_bit(__QLCNIC_RESETTING, &adapter->state);
1795                adapter->tx_timeo_cnt = 0;
1796                adapter->reset_ctx_cnt = 0;
1797                clear_bit(QLC_83XX_MBX_READY, &idc->status);
1798                qlcnic_sriov_vf_detach(adapter);
1799        }
1800
1801        return 0;
1802}
1803
1804static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1805{
1806        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1807        u8 func = adapter->ahw->pci_func;
1808
1809        if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1810                dev_err(&adapter->pdev->dev,
1811                        "Firmware hang detected by VF 0x%x\n", func);
1812                set_bit(__QLCNIC_RESETTING, &adapter->state);
1813                adapter->tx_timeo_cnt = 0;
1814                adapter->reset_ctx_cnt = 0;
1815                clear_bit(QLC_83XX_MBX_READY, &idc->status);
1816                qlcnic_sriov_vf_detach(adapter);
1817        }
1818        return 0;
1819}
1820
1821static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1822{
1823        dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
1824        return 0;
1825}
1826
1827static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1828{
1829        struct qlcnic_adapter *adapter;
1830        struct qlc_83xx_idc *idc;
1831        int ret = 0;
1832
1833        adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
1834        idc = &adapter->ahw->idc;
1835        idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1836
1837        switch (idc->curr_state) {
1838        case QLC_83XX_IDC_DEV_READY:
1839                ret = qlcnic_sriov_vf_idc_ready_state(adapter);
1840                break;
1841        case QLC_83XX_IDC_DEV_NEED_RESET:
1842        case QLC_83XX_IDC_DEV_INIT:
1843                ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
1844                break;
1845        case QLC_83XX_IDC_DEV_NEED_QUISCENT:
1846                ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
1847                break;
1848        case QLC_83XX_IDC_DEV_FAILED:
1849                ret = qlcnic_sriov_vf_idc_failed_state(adapter);
1850                break;
1851        case QLC_83XX_IDC_DEV_QUISCENT:
1852                break;
1853        default:
1854                ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
1855        }
1856
1857        idc->prev_state = idc->curr_state;
1858        if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1859                qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1860                                     idc->delay);
1861}
1862
1863static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
1864{
1865        while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1866                msleep(20);
1867
1868        clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1869        clear_bit(__QLCNIC_RESETTING, &adapter->state);
1870        cancel_delayed_work_sync(&adapter->fw_work);
1871}
1872
1873static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov,
1874                                          u16 vid, u8 enable)
1875{
1876        u16 vlan = sriov->vlan;
1877        u8 allowed = 0;
1878        int i;
1879
1880        if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
1881                return -EINVAL;
1882
1883        if (enable) {
1884                if (vlan)
1885                        return -EINVAL;
1886
1887                if (sriov->any_vlan) {
1888                        for (i = 0; i < sriov->num_allowed_vlans; i++) {
1889                                if (sriov->allowed_vlans[i] == vid)
1890                                        allowed = 1;
1891                        }
1892
1893                        if (!allowed)
1894                                return -EINVAL;
1895                }
1896        } else {
1897                if (!vlan || vlan != vid)
1898                        return -EINVAL;
1899        }
1900
1901        return 0;
1902}
1903
1904int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
1905                                   u16 vid, u8 enable)
1906{
1907        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1908        struct qlcnic_cmd_args cmd;
1909        int ret;
1910
1911        if (vid == 0)
1912                return 0;
1913
1914        ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable);
1915        if (ret)
1916                return ret;
1917
1918        ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
1919                                             QLCNIC_BC_CMD_CFG_GUEST_VLAN);
1920        if (ret)
1921                return ret;
1922
1923        cmd.req.arg[1] = (enable & 1) | vid << 16;
1924
1925        qlcnic_sriov_cleanup_async_list(&sriov->bc);
1926        ret = qlcnic_issue_cmd(adapter, &cmd);
1927        if (ret) {
1928                dev_err(&adapter->pdev->dev,
1929                        "Failed to configure guest VLAN, err=%d\n", ret);
1930        } else {
1931                qlcnic_free_mac_list(adapter);
1932
1933                if (enable)
1934                        sriov->vlan = vid;
1935                else
1936                        sriov->vlan = 0;
1937
1938                qlcnic_sriov_vf_set_multi(adapter->netdev);
1939        }
1940
1941        qlcnic_free_mbx_args(&cmd);
1942        return ret;
1943}
1944
1945static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
1946{
1947        struct list_head *head = &adapter->mac_list;
1948        struct qlcnic_mac_list_s *cur;
1949        u16 vlan;
1950
1951        vlan = adapter->ahw->sriov->vlan;
1952
1953        while (!list_empty(head)) {
1954                cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
1955                qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
1956                                          vlan, QLCNIC_MAC_DEL);
1957                list_del(&cur->list);
1958                kfree(cur);
1959        }
1960}
1961
1962int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
1963{
1964        struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1965        struct net_device *netdev = adapter->netdev;
1966        int retval;
1967
1968        netif_device_detach(netdev);
1969        qlcnic_cancel_idc_work(adapter);
1970
1971        if (netif_running(netdev))
1972                qlcnic_down(adapter, netdev);
1973
1974        qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1975        qlcnic_sriov_cfg_bc_intr(adapter, 0);
1976        qlcnic_83xx_disable_mbx_intr(adapter);
1977        cancel_delayed_work_sync(&adapter->idc_aen_work);
1978
1979        retval = pci_save_state(pdev);
1980        if (retval)
1981                return retval;
1982
1983        return 0;
1984}
1985
1986int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
1987{
1988        struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1989        struct net_device *netdev = adapter->netdev;
1990        int err;
1991
1992        set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1993        qlcnic_83xx_enable_mbx_intrpt(adapter);
1994        err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1995        if (err)
1996                return err;
1997
1998        err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1999        if (!err) {
2000                if (netif_running(netdev)) {
2001                        err = qlcnic_up(adapter, netdev);
2002                        if (!err)
2003                                qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2004                }
2005        }
2006
2007        netif_device_attach(netdev);
2008        qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
2009                             idc->delay);
2010        return err;
2011}
2012