linux/drivers/net/ethernet/ibm/ibmvnic.c
<<
>>
Prefs
   1/**************************************************************************/
   2/*                                                                        */
   3/*  IBM System i and System p Virtual NIC Device Driver                   */
   4/*  Copyright (C) 2014 IBM Corp.                                          */
   5/*  Santiago Leon (santi_leon@yahoo.com)                                  */
   6/*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
   7/*  John Allen (jallen@linux.vnet.ibm.com)                                */
   8/*                                                                        */
   9/*  This program is free software; you can redistribute it and/or modify  */
  10/*  it under the terms of the GNU General Public License as published by  */
  11/*  the Free Software Foundation; either version 2 of the License, or     */
  12/*  (at your option) any later version.                                   */
  13/*                                                                        */
  14/*  This program is distributed in the hope that it will be useful,       */
  15/*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
  16/*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
  17/*  GNU General Public License for more details.                          */
  18/*                                                                        */
  19/*  You should have received a copy of the GNU General Public License     */
  20/*  along with this program.                                              */
  21/*                                                                        */
  22/* This module contains the implementation of a virtual ethernet device   */
  23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
  24/* option of the RS/6000 Platform Architecture to interface with virtual  */
  25/* ethernet NICs that are presented to the partition by the hypervisor.   */
  26/*                                                                         */
  27/* Messages are passed between the VNIC driver and the VNIC server using  */
  28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
  29/* issue and receive commands that initiate communication with the server */
  30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
  31/* are used by the driver to notify the server that a packet is           */
  32/* ready for transmission or that a buffer has been added to receive a    */
  33/* packet. Subsequently, sCRQs are used by the server to notify the       */
  34/* driver that a packet transmission has been completed or that a packet  */
  35/* has been received and placed in a waiting buffer.                      */
  36/*                                                                        */
  37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
  38/* which skbs are DMA mapped and immediately unmapped when the transmit   */
  39/* or receive has been completed, the VNIC driver is required to use      */
  40/* "long term mapping". This entails that large, continuous DMA mapped    */
  41/* buffers are allocated on driver initialization and these buffers are   */
  42/* then continuously reused to pass skbs to and from the VNIC server.     */
  43/*                                                                        */
  44/**************************************************************************/
  45
  46#include <linux/module.h>
  47#include <linux/moduleparam.h>
  48#include <linux/types.h>
  49#include <linux/errno.h>
  50#include <linux/completion.h>
  51#include <linux/ioport.h>
  52#include <linux/dma-mapping.h>
  53#include <linux/kernel.h>
  54#include <linux/netdevice.h>
  55#include <linux/etherdevice.h>
  56#include <linux/skbuff.h>
  57#include <linux/init.h>
  58#include <linux/delay.h>
  59#include <linux/mm.h>
  60#include <linux/ethtool.h>
  61#include <linux/proc_fs.h>
  62#include <linux/in.h>
  63#include <linux/ip.h>
  64#include <linux/ipv6.h>
  65#include <linux/irq.h>
  66#include <linux/kthread.h>
  67#include <linux/seq_file.h>
  68#include <linux/debugfs.h>
  69#include <linux/interrupt.h>
  70#include <net/net_namespace.h>
  71#include <asm/hvcall.h>
  72#include <linux/atomic.h>
  73#include <asm/vio.h>
  74#include <asm/iommu.h>
  75#include <linux/uaccess.h>
  76#include <asm/firmware.h>
  77#include <linux/workqueue.h>
  78
  79#include "ibmvnic.h"
  80
  81static const char ibmvnic_driver_name[] = "ibmvnic";
  82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
  83
  84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
  85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
  86MODULE_LICENSE("GPL");
  87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
  88
  89static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
  90static int ibmvnic_remove(struct vio_dev *);
  91static void release_sub_crqs(struct ibmvnic_adapter *);
  92static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
  93static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
  94static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
  95static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
  96static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
  97static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
  98                       union sub_crq *sub_crq);
  99static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
 100static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
 101static int enable_scrq_irq(struct ibmvnic_adapter *,
 102                           struct ibmvnic_sub_crq_queue *);
 103static int disable_scrq_irq(struct ibmvnic_adapter *,
 104                            struct ibmvnic_sub_crq_queue *);
 105static int pending_scrq(struct ibmvnic_adapter *,
 106                        struct ibmvnic_sub_crq_queue *);
 107static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
 108                                        struct ibmvnic_sub_crq_queue *);
 109static int ibmvnic_poll(struct napi_struct *napi, int data);
 110static void send_map_query(struct ibmvnic_adapter *adapter);
 111static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
 112static void send_request_unmap(struct ibmvnic_adapter *, u8);
 113
 114struct ibmvnic_stat {
 115        char name[ETH_GSTRING_LEN];
 116        int offset;
 117};
 118
 119#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
 120                             offsetof(struct ibmvnic_statistics, stat))
 121#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
 122
 123static const struct ibmvnic_stat ibmvnic_stats[] = {
 124        {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
 125        {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
 126        {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
 127        {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
 128        {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
 129        {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
 130        {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
 131        {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
 132        {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
 133        {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
 134        {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
 135        {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
 136        {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
 137        {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
 138        {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
 139        {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
 140        {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
 141        {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
 142        {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
 143        {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
 144        {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
 145        {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
 146};
 147
 148static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
 149                          unsigned long length, unsigned long *number,
 150                          unsigned long *irq)
 151{
 152        unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
 153        long rc;
 154
 155        rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
 156        *number = retbuf[0];
 157        *irq = retbuf[1];
 158
 159        return rc;
 160}
 161
 162/* net_device_ops functions */
 163
 164static void init_rx_pool(struct ibmvnic_adapter *adapter,
 165                         struct ibmvnic_rx_pool *rx_pool, int num, int index,
 166                         int buff_size, int active)
 167{
 168        netdev_dbg(adapter->netdev,
 169                   "Initializing rx_pool %d, %d buffs, %d bytes each\n",
 170                   index, num, buff_size);
 171        rx_pool->size = num;
 172        rx_pool->index = index;
 173        rx_pool->buff_size = buff_size;
 174        rx_pool->active = active;
 175}
 176
 177static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
 178                                struct ibmvnic_long_term_buff *ltb, int size)
 179{
 180        struct device *dev = &adapter->vdev->dev;
 181
 182        ltb->size = size;
 183        ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
 184                                       GFP_KERNEL);
 185
 186        if (!ltb->buff) {
 187                dev_err(dev, "Couldn't alloc long term buffer\n");
 188                return -ENOMEM;
 189        }
 190        ltb->map_id = adapter->map_id;
 191        adapter->map_id++;
 192
 193        init_completion(&adapter->fw_done);
 194        send_request_map(adapter, ltb->addr,
 195                         ltb->size, ltb->map_id);
 196        wait_for_completion(&adapter->fw_done);
 197        return 0;
 198}
 199
 200static void free_long_term_buff(struct ibmvnic_adapter *adapter,
 201                                struct ibmvnic_long_term_buff *ltb)
 202{
 203        struct device *dev = &adapter->vdev->dev;
 204
 205        dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 206        if (!adapter->failover)
 207                send_request_unmap(adapter, ltb->map_id);
 208}
 209
 210static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
 211                         struct ibmvnic_rx_pool *pool)
 212{
 213        struct device *dev = &adapter->vdev->dev;
 214        int i;
 215
 216        pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
 217        if (!pool->free_map)
 218                return -ENOMEM;
 219
 220        pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
 221                                GFP_KERNEL);
 222
 223        if (!pool->rx_buff) {
 224                dev_err(dev, "Couldn't alloc rx buffers\n");
 225                kfree(pool->free_map);
 226                return -ENOMEM;
 227        }
 228
 229        if (alloc_long_term_buff(adapter, &pool->long_term_buff,
 230                                 pool->size * pool->buff_size)) {
 231                kfree(pool->free_map);
 232                kfree(pool->rx_buff);
 233                return -ENOMEM;
 234        }
 235
 236        for (i = 0; i < pool->size; ++i)
 237                pool->free_map[i] = i;
 238
 239        atomic_set(&pool->available, 0);
 240        pool->next_alloc = 0;
 241        pool->next_free = 0;
 242
 243        return 0;
 244}
 245
 246static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
 247                              struct ibmvnic_rx_pool *pool)
 248{
 249        int count = pool->size - atomic_read(&pool->available);
 250        struct device *dev = &adapter->vdev->dev;
 251        int buffers_added = 0;
 252        unsigned long lpar_rc;
 253        union sub_crq sub_crq;
 254        struct sk_buff *skb;
 255        unsigned int offset;
 256        dma_addr_t dma_addr;
 257        unsigned char *dst;
 258        u64 *handle_array;
 259        int shift = 0;
 260        int index;
 261        int i;
 262
 263        handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 264                                      be32_to_cpu(adapter->login_rsp_buf->
 265                                      off_rxadd_subcrqs));
 266
 267        for (i = 0; i < count; ++i) {
 268                skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
 269                if (!skb) {
 270                        dev_err(dev, "Couldn't replenish rx buff\n");
 271                        adapter->replenish_no_mem++;
 272                        break;
 273                }
 274
 275                index = pool->free_map[pool->next_free];
 276
 277                if (pool->rx_buff[index].skb)
 278                        dev_err(dev, "Inconsistent free_map!\n");
 279
 280                /* Copy the skb to the long term mapped DMA buffer */
 281                offset = index * pool->buff_size;
 282                dst = pool->long_term_buff.buff + offset;
 283                memset(dst, 0, pool->buff_size);
 284                dma_addr = pool->long_term_buff.addr + offset;
 285                pool->rx_buff[index].data = dst;
 286
 287                pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
 288                pool->rx_buff[index].dma = dma_addr;
 289                pool->rx_buff[index].skb = skb;
 290                pool->rx_buff[index].pool_index = pool->index;
 291                pool->rx_buff[index].size = pool->buff_size;
 292
 293                memset(&sub_crq, 0, sizeof(sub_crq));
 294                sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
 295                sub_crq.rx_add.correlator =
 296                    cpu_to_be64((u64)&pool->rx_buff[index]);
 297                sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
 298                sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
 299
 300                /* The length field of the sCRQ is defined to be 24 bits so the
 301                 * buffer size needs to be left shifted by a byte before it is
 302                 * converted to big endian to prevent the last byte from being
 303                 * truncated.
 304                 */
 305#ifdef __LITTLE_ENDIAN__
 306                shift = 8;
 307#endif
 308                sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
 309
 310                lpar_rc = send_subcrq(adapter, handle_array[pool->index],
 311                                      &sub_crq);
 312                if (lpar_rc != H_SUCCESS)
 313                        goto failure;
 314
 315                buffers_added++;
 316                adapter->replenish_add_buff_success++;
 317                pool->next_free = (pool->next_free + 1) % pool->size;
 318        }
 319        atomic_add(buffers_added, &pool->available);
 320        return;
 321
 322failure:
 323        dev_info(dev, "replenish pools failure\n");
 324        pool->free_map[pool->next_free] = index;
 325        pool->rx_buff[index].skb = NULL;
 326        if (!dma_mapping_error(dev, dma_addr))
 327                dma_unmap_single(dev, dma_addr, pool->buff_size,
 328                                 DMA_FROM_DEVICE);
 329
 330        dev_kfree_skb_any(skb);
 331        adapter->replenish_add_buff_failure++;
 332        atomic_add(buffers_added, &pool->available);
 333}
 334
 335static void replenish_pools(struct ibmvnic_adapter *adapter)
 336{
 337        int i;
 338
 339        if (adapter->migrated)
 340                return;
 341
 342        adapter->replenish_task_cycles++;
 343        for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 344             i++) {
 345                if (adapter->rx_pool[i].active)
 346                        replenish_rx_pool(adapter, &adapter->rx_pool[i]);
 347        }
 348}
 349
 350static void free_rx_pool(struct ibmvnic_adapter *adapter,
 351                         struct ibmvnic_rx_pool *pool)
 352{
 353        int i;
 354
 355        kfree(pool->free_map);
 356        pool->free_map = NULL;
 357
 358        if (!pool->rx_buff)
 359                return;
 360
 361        for (i = 0; i < pool->size; i++) {
 362                if (pool->rx_buff[i].skb) {
 363                        dev_kfree_skb_any(pool->rx_buff[i].skb);
 364                        pool->rx_buff[i].skb = NULL;
 365                }
 366        }
 367        kfree(pool->rx_buff);
 368        pool->rx_buff = NULL;
 369}
 370
 371static int ibmvnic_open(struct net_device *netdev)
 372{
 373        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 374        struct device *dev = &adapter->vdev->dev;
 375        struct ibmvnic_tx_pool *tx_pool;
 376        union ibmvnic_crq crq;
 377        int rxadd_subcrqs;
 378        u64 *size_array;
 379        int tx_subcrqs;
 380        int i, j;
 381
 382        rxadd_subcrqs =
 383            be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 384        tx_subcrqs =
 385            be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
 386        size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 387                                  be32_to_cpu(adapter->login_rsp_buf->
 388                                              off_rxadd_buff_size));
 389        adapter->map_id = 1;
 390        adapter->napi = kcalloc(adapter->req_rx_queues,
 391                                sizeof(struct napi_struct), GFP_KERNEL);
 392        if (!adapter->napi)
 393                goto alloc_napi_failed;
 394        for (i = 0; i < adapter->req_rx_queues; i++) {
 395                netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
 396                               NAPI_POLL_WEIGHT);
 397                napi_enable(&adapter->napi[i]);
 398        }
 399        adapter->rx_pool =
 400            kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
 401
 402        if (!adapter->rx_pool)
 403                goto rx_pool_arr_alloc_failed;
 404        send_map_query(adapter);
 405        for (i = 0; i < rxadd_subcrqs; i++) {
 406                init_rx_pool(adapter, &adapter->rx_pool[i],
 407                             adapter->req_rx_add_entries_per_subcrq, i,
 408                             be64_to_cpu(size_array[i]), 1);
 409                if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
 410                        dev_err(dev, "Couldn't alloc rx pool\n");
 411                        goto rx_pool_alloc_failed;
 412                }
 413        }
 414        adapter->tx_pool =
 415            kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
 416
 417        if (!adapter->tx_pool)
 418                goto tx_pool_arr_alloc_failed;
 419        for (i = 0; i < tx_subcrqs; i++) {
 420                tx_pool = &adapter->tx_pool[i];
 421                tx_pool->tx_buff =
 422                    kcalloc(adapter->req_tx_entries_per_subcrq,
 423                            sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
 424                if (!tx_pool->tx_buff)
 425                        goto tx_pool_alloc_failed;
 426
 427                if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
 428                                         adapter->req_tx_entries_per_subcrq *
 429                                         adapter->req_mtu))
 430                        goto tx_ltb_alloc_failed;
 431
 432                tx_pool->free_map =
 433                    kcalloc(adapter->req_tx_entries_per_subcrq,
 434                            sizeof(int), GFP_KERNEL);
 435                if (!tx_pool->free_map)
 436                        goto tx_fm_alloc_failed;
 437
 438                for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
 439                        tx_pool->free_map[j] = j;
 440
 441                tx_pool->consumer_index = 0;
 442                tx_pool->producer_index = 0;
 443        }
 444        adapter->bounce_buffer_size =
 445            (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
 446        adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
 447                                         GFP_KERNEL);
 448        if (!adapter->bounce_buffer)
 449                goto bounce_alloc_failed;
 450
 451        adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
 452                                                    adapter->bounce_buffer_size,
 453                                                    DMA_TO_DEVICE);
 454        if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 455                dev_err(dev, "Couldn't map tx bounce buffer\n");
 456                goto bounce_map_failed;
 457        }
 458        replenish_pools(adapter);
 459
 460        /* We're ready to receive frames, enable the sub-crq interrupts and
 461         * set the logical link state to up
 462         */
 463        for (i = 0; i < adapter->req_rx_queues; i++)
 464                enable_scrq_irq(adapter, adapter->rx_scrq[i]);
 465
 466        for (i = 0; i < adapter->req_tx_queues; i++)
 467                enable_scrq_irq(adapter, adapter->tx_scrq[i]);
 468
 469        memset(&crq, 0, sizeof(crq));
 470        crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
 471        crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
 472        crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
 473        ibmvnic_send_crq(adapter, &crq);
 474
 475        netif_tx_start_all_queues(netdev);
 476
 477        return 0;
 478
 479bounce_map_failed:
 480        kfree(adapter->bounce_buffer);
 481bounce_alloc_failed:
 482        i = tx_subcrqs - 1;
 483        kfree(adapter->tx_pool[i].free_map);
 484tx_fm_alloc_failed:
 485        free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
 486tx_ltb_alloc_failed:
 487        kfree(adapter->tx_pool[i].tx_buff);
 488tx_pool_alloc_failed:
 489        for (j = 0; j < i; j++) {
 490                kfree(adapter->tx_pool[j].tx_buff);
 491                free_long_term_buff(adapter,
 492                                    &adapter->tx_pool[j].long_term_buff);
 493                kfree(adapter->tx_pool[j].free_map);
 494        }
 495        kfree(adapter->tx_pool);
 496        adapter->tx_pool = NULL;
 497tx_pool_arr_alloc_failed:
 498        i = rxadd_subcrqs;
 499rx_pool_alloc_failed:
 500        for (j = 0; j < i; j++) {
 501                free_rx_pool(adapter, &adapter->rx_pool[j]);
 502                free_long_term_buff(adapter,
 503                                    &adapter->rx_pool[j].long_term_buff);
 504        }
 505        kfree(adapter->rx_pool);
 506        adapter->rx_pool = NULL;
 507rx_pool_arr_alloc_failed:
 508        for (i = 0; i < adapter->req_rx_queues; i++)
 509                napi_disable(&adapter->napi[i]);
 510alloc_napi_failed:
 511        return -ENOMEM;
 512}
 513
 514static int ibmvnic_close(struct net_device *netdev)
 515{
 516        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 517        struct device *dev = &adapter->vdev->dev;
 518        union ibmvnic_crq crq;
 519        int i;
 520
 521        adapter->closing = true;
 522
 523        for (i = 0; i < adapter->req_rx_queues; i++)
 524                napi_disable(&adapter->napi[i]);
 525
 526        if (!adapter->failover)
 527                netif_tx_stop_all_queues(netdev);
 528
 529        if (adapter->bounce_buffer) {
 530                if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 531                        dma_unmap_single(&adapter->vdev->dev,
 532                                         adapter->bounce_buffer_dma,
 533                                         adapter->bounce_buffer_size,
 534                                         DMA_BIDIRECTIONAL);
 535                        adapter->bounce_buffer_dma = DMA_ERROR_CODE;
 536                }
 537                kfree(adapter->bounce_buffer);
 538                adapter->bounce_buffer = NULL;
 539        }
 540
 541        memset(&crq, 0, sizeof(crq));
 542        crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
 543        crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
 544        crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
 545        ibmvnic_send_crq(adapter, &crq);
 546
 547        for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
 548             i++) {
 549                kfree(adapter->tx_pool[i].tx_buff);
 550                free_long_term_buff(adapter,
 551                                    &adapter->tx_pool[i].long_term_buff);
 552                kfree(adapter->tx_pool[i].free_map);
 553        }
 554        kfree(adapter->tx_pool);
 555        adapter->tx_pool = NULL;
 556
 557        for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
 558             i++) {
 559                free_rx_pool(adapter, &adapter->rx_pool[i]);
 560                free_long_term_buff(adapter,
 561                                    &adapter->rx_pool[i].long_term_buff);
 562        }
 563        kfree(adapter->rx_pool);
 564        adapter->rx_pool = NULL;
 565
 566        adapter->closing = false;
 567
 568        return 0;
 569}
 570
 571/**
 572 * build_hdr_data - creates L2/L3/L4 header data buffer
 573 * @hdr_field - bitfield determining needed headers
 574 * @skb - socket buffer
 575 * @hdr_len - array of header lengths
 576 * @tot_len - total length of data
 577 *
 578 * Reads hdr_field to determine which headers are needed by firmware.
 579 * Builds a buffer containing these headers.  Saves individual header
 580 * lengths and total buffer length to be used to build descriptors.
 581 */
 582static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
 583                          int *hdr_len, u8 *hdr_data)
 584{
 585        int len = 0;
 586        u8 *hdr;
 587
 588        hdr_len[0] = sizeof(struct ethhdr);
 589
 590        if (skb->protocol == htons(ETH_P_IP)) {
 591                hdr_len[1] = ip_hdr(skb)->ihl * 4;
 592                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 593                        hdr_len[2] = tcp_hdrlen(skb);
 594                else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
 595                        hdr_len[2] = sizeof(struct udphdr);
 596        } else if (skb->protocol == htons(ETH_P_IPV6)) {
 597                hdr_len[1] = sizeof(struct ipv6hdr);
 598                if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 599                        hdr_len[2] = tcp_hdrlen(skb);
 600                else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
 601                        hdr_len[2] = sizeof(struct udphdr);
 602        }
 603
 604        memset(hdr_data, 0, 120);
 605        if ((hdr_field >> 6) & 1) {
 606                hdr = skb_mac_header(skb);
 607                memcpy(hdr_data, hdr, hdr_len[0]);
 608                len += hdr_len[0];
 609        }
 610
 611        if ((hdr_field >> 5) & 1) {
 612                hdr = skb_network_header(skb);
 613                memcpy(hdr_data + len, hdr, hdr_len[1]);
 614                len += hdr_len[1];
 615        }
 616
 617        if ((hdr_field >> 4) & 1) {
 618                hdr = skb_transport_header(skb);
 619                memcpy(hdr_data + len, hdr, hdr_len[2]);
 620                len += hdr_len[2];
 621        }
 622        return len;
 623}
 624
 625/**
 626 * create_hdr_descs - create header and header extension descriptors
 627 * @hdr_field - bitfield determining needed headers
 628 * @data - buffer containing header data
 629 * @len - length of data buffer
 630 * @hdr_len - array of individual header lengths
 631 * @scrq_arr - descriptor array
 632 *
 633 * Creates header and, if needed, header extension descriptors and
 634 * places them in a descriptor array, scrq_arr
 635 */
 636
 637static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
 638                             union sub_crq *scrq_arr)
 639{
 640        union sub_crq hdr_desc;
 641        int tmp_len = len;
 642        u8 *data, *cur;
 643        int tmp;
 644
 645        while (tmp_len > 0) {
 646                cur = hdr_data + len - tmp_len;
 647
 648                memset(&hdr_desc, 0, sizeof(hdr_desc));
 649                if (cur != hdr_data) {
 650                        data = hdr_desc.hdr_ext.data;
 651                        tmp = tmp_len > 29 ? 29 : tmp_len;
 652                        hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
 653                        hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
 654                        hdr_desc.hdr_ext.len = tmp;
 655                } else {
 656                        data = hdr_desc.hdr.data;
 657                        tmp = tmp_len > 24 ? 24 : tmp_len;
 658                        hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
 659                        hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
 660                        hdr_desc.hdr.len = tmp;
 661                        hdr_desc.hdr.l2_len = (u8)hdr_len[0];
 662                        hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
 663                        hdr_desc.hdr.l4_len = (u8)hdr_len[2];
 664                        hdr_desc.hdr.flag = hdr_field << 1;
 665                }
 666                memcpy(data, cur, tmp);
 667                tmp_len -= tmp;
 668                *scrq_arr = hdr_desc;
 669                scrq_arr++;
 670        }
 671}
 672
 673/**
 674 * build_hdr_descs_arr - build a header descriptor array
 675 * @skb - socket buffer
 676 * @num_entries - number of descriptors to be sent
 677 * @subcrq - first TX descriptor
 678 * @hdr_field - bit field determining which headers will be sent
 679 *
 680 * This function will build a TX descriptor array with applicable
 681 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
 682 */
 683
 684static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
 685                                int *num_entries, u8 hdr_field)
 686{
 687        int hdr_len[3] = {0, 0, 0};
 688        int tot_len, len;
 689        u8 *hdr_data = txbuff->hdr_data;
 690
 691        tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
 692                                 txbuff->hdr_data);
 693        len = tot_len;
 694        len -= 24;
 695        if (len > 0)
 696                num_entries += len % 29 ? len / 29 + 1 : len / 29;
 697        create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
 698                         txbuff->indir_arr + 1);
 699}
 700
 701static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 702{
 703        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 704        int queue_num = skb_get_queue_mapping(skb);
 705        u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
 706        struct device *dev = &adapter->vdev->dev;
 707        struct ibmvnic_tx_buff *tx_buff = NULL;
 708        struct ibmvnic_sub_crq_queue *tx_scrq;
 709        struct ibmvnic_tx_pool *tx_pool;
 710        unsigned int tx_send_failed = 0;
 711        unsigned int tx_map_failed = 0;
 712        unsigned int tx_dropped = 0;
 713        unsigned int tx_packets = 0;
 714        unsigned int tx_bytes = 0;
 715        dma_addr_t data_dma_addr;
 716        struct netdev_queue *txq;
 717        bool used_bounce = false;
 718        unsigned long lpar_rc;
 719        union sub_crq tx_crq;
 720        unsigned int offset;
 721        int num_entries = 1;
 722        unsigned char *dst;
 723        u64 *handle_array;
 724        int index = 0;
 725        int ret = 0;
 726
 727        tx_pool = &adapter->tx_pool[queue_num];
 728        tx_scrq = adapter->tx_scrq[queue_num];
 729        txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
 730        handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
 731                                   be32_to_cpu(adapter->login_rsp_buf->
 732                                               off_txsubm_subcrqs));
 733        if (adapter->migrated) {
 734                tx_send_failed++;
 735                tx_dropped++;
 736                ret = NETDEV_TX_BUSY;
 737                goto out;
 738        }
 739
 740        index = tx_pool->free_map[tx_pool->consumer_index];
 741        offset = index * adapter->req_mtu;
 742        dst = tx_pool->long_term_buff.buff + offset;
 743        memset(dst, 0, adapter->req_mtu);
 744        skb_copy_from_linear_data(skb, dst, skb->len);
 745        data_dma_addr = tx_pool->long_term_buff.addr + offset;
 746
 747        tx_pool->consumer_index =
 748            (tx_pool->consumer_index + 1) %
 749                adapter->req_tx_entries_per_subcrq;
 750
 751        tx_buff = &tx_pool->tx_buff[index];
 752        tx_buff->skb = skb;
 753        tx_buff->data_dma[0] = data_dma_addr;
 754        tx_buff->data_len[0] = skb->len;
 755        tx_buff->index = index;
 756        tx_buff->pool_index = queue_num;
 757        tx_buff->last_frag = true;
 758        tx_buff->used_bounce = used_bounce;
 759
 760        memset(&tx_crq, 0, sizeof(tx_crq));
 761        tx_crq.v1.first = IBMVNIC_CRQ_CMD;
 762        tx_crq.v1.type = IBMVNIC_TX_DESC;
 763        tx_crq.v1.n_crq_elem = 1;
 764        tx_crq.v1.n_sge = 1;
 765        tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
 766        tx_crq.v1.correlator = cpu_to_be32(index);
 767        tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
 768        tx_crq.v1.sge_len = cpu_to_be32(skb->len);
 769        tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
 770
 771        if (adapter->vlan_header_insertion) {
 772                tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
 773                tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
 774        }
 775
 776        if (skb->protocol == htons(ETH_P_IP)) {
 777                if (ip_hdr(skb)->version == 4)
 778                        tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
 779                else if (ip_hdr(skb)->version == 6)
 780                        tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
 781
 782                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 783                        tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
 784                else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
 785                        tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
 786        }
 787
 788        if (skb->ip_summed == CHECKSUM_PARTIAL) {
 789                tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
 790                hdrs += 2;
 791        }
 792        /* determine if l2/3/4 headers are sent to firmware */
 793        if ((*hdrs >> 7) & 1 &&
 794            (skb->protocol == htons(ETH_P_IP) ||
 795             skb->protocol == htons(ETH_P_IPV6))) {
 796                build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
 797                tx_crq.v1.n_crq_elem = num_entries;
 798                tx_buff->indir_arr[0] = tx_crq;
 799                tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
 800                                                    sizeof(tx_buff->indir_arr),
 801                                                    DMA_TO_DEVICE);
 802                if (dma_mapping_error(dev, tx_buff->indir_dma)) {
 803                        if (!firmware_has_feature(FW_FEATURE_CMO))
 804                                dev_err(dev, "tx: unable to map descriptor array\n");
 805                        tx_map_failed++;
 806                        tx_dropped++;
 807                        ret = NETDEV_TX_BUSY;
 808                        goto out;
 809                }
 810                lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
 811                                               (u64)tx_buff->indir_dma,
 812                                               (u64)num_entries);
 813        } else {
 814                lpar_rc = send_subcrq(adapter, handle_array[queue_num],
 815                                      &tx_crq);
 816        }
 817        if (lpar_rc != H_SUCCESS) {
 818                dev_err(dev, "tx failed with code %ld\n", lpar_rc);
 819
 820                if (tx_pool->consumer_index == 0)
 821                        tx_pool->consumer_index =
 822                                adapter->req_tx_entries_per_subcrq - 1;
 823                else
 824                        tx_pool->consumer_index--;
 825
 826                tx_send_failed++;
 827                tx_dropped++;
 828                ret = NETDEV_TX_BUSY;
 829                goto out;
 830        }
 831
 832        atomic_inc(&tx_scrq->used);
 833
 834        if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
 835                netdev_info(netdev, "Stopping queue %d\n", queue_num);
 836                netif_stop_subqueue(netdev, queue_num);
 837        }
 838
 839        tx_packets++;
 840        tx_bytes += skb->len;
 841        txq->trans_start = jiffies;
 842        ret = NETDEV_TX_OK;
 843
 844out:
 845        netdev->stats.tx_dropped += tx_dropped;
 846        netdev->stats.tx_bytes += tx_bytes;
 847        netdev->stats.tx_packets += tx_packets;
 848        adapter->tx_send_failed += tx_send_failed;
 849        adapter->tx_map_failed += tx_map_failed;
 850
 851        return ret;
 852}
 853
 854static void ibmvnic_set_multi(struct net_device *netdev)
 855{
 856        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 857        struct netdev_hw_addr *ha;
 858        union ibmvnic_crq crq;
 859
 860        memset(&crq, 0, sizeof(crq));
 861        crq.request_capability.first = IBMVNIC_CRQ_CMD;
 862        crq.request_capability.cmd = REQUEST_CAPABILITY;
 863
 864        if (netdev->flags & IFF_PROMISC) {
 865                if (!adapter->promisc_supported)
 866                        return;
 867        } else {
 868                if (netdev->flags & IFF_ALLMULTI) {
 869                        /* Accept all multicast */
 870                        memset(&crq, 0, sizeof(crq));
 871                        crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
 872                        crq.multicast_ctrl.cmd = MULTICAST_CTRL;
 873                        crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
 874                        ibmvnic_send_crq(adapter, &crq);
 875                } else if (netdev_mc_empty(netdev)) {
 876                        /* Reject all multicast */
 877                        memset(&crq, 0, sizeof(crq));
 878                        crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
 879                        crq.multicast_ctrl.cmd = MULTICAST_CTRL;
 880                        crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
 881                        ibmvnic_send_crq(adapter, &crq);
 882                } else {
 883                        /* Accept one or more multicast(s) */
 884                        netdev_for_each_mc_addr(ha, netdev) {
 885                                memset(&crq, 0, sizeof(crq));
 886                                crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
 887                                crq.multicast_ctrl.cmd = MULTICAST_CTRL;
 888                                crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
 889                                ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
 890                                                ha->addr);
 891                                ibmvnic_send_crq(adapter, &crq);
 892                        }
 893                }
 894        }
 895}
 896
 897static int ibmvnic_set_mac(struct net_device *netdev, void *p)
 898{
 899        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 900        struct sockaddr *addr = p;
 901        union ibmvnic_crq crq;
 902
 903        if (!is_valid_ether_addr(addr->sa_data))
 904                return -EADDRNOTAVAIL;
 905
 906        memset(&crq, 0, sizeof(crq));
 907        crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
 908        crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
 909        ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
 910        ibmvnic_send_crq(adapter, &crq);
 911        /* netdev->dev_addr is changed in handle_change_mac_rsp function */
 912        return 0;
 913}
 914
 915static void ibmvnic_tx_timeout(struct net_device *dev)
 916{
 917        struct ibmvnic_adapter *adapter = netdev_priv(dev);
 918        int rc;
 919
 920        /* Adapter timed out, resetting it */
 921        release_sub_crqs(adapter);
 922        rc = ibmvnic_reset_crq(adapter);
 923        if (rc)
 924                dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
 925        else
 926                ibmvnic_send_crq_init(adapter);
 927}
 928
 929static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
 930                                  struct ibmvnic_rx_buff *rx_buff)
 931{
 932        struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
 933
 934        rx_buff->skb = NULL;
 935
 936        pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
 937        pool->next_alloc = (pool->next_alloc + 1) % pool->size;
 938
 939        atomic_dec(&pool->available);
 940}
 941
 942static int ibmvnic_poll(struct napi_struct *napi, int budget)
 943{
 944        struct net_device *netdev = napi->dev;
 945        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 946        int scrq_num = (int)(napi - adapter->napi);
 947        int frames_processed = 0;
 948restart_poll:
 949        while (frames_processed < budget) {
 950                struct sk_buff *skb;
 951                struct ibmvnic_rx_buff *rx_buff;
 952                union sub_crq *next;
 953                u32 length;
 954                u16 offset;
 955                u8 flags = 0;
 956
 957                if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
 958                        break;
 959                next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
 960                rx_buff =
 961                    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
 962                                                          rx_comp.correlator);
 963                /* do error checking */
 964                if (next->rx_comp.rc) {
 965                        netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
 966                        /* free the entry */
 967                        next->rx_comp.first = 0;
 968                        remove_buff_from_pool(adapter, rx_buff);
 969                        break;
 970                }
 971
 972                length = be32_to_cpu(next->rx_comp.len);
 973                offset = be16_to_cpu(next->rx_comp.off_frame_data);
 974                flags = next->rx_comp.flags;
 975                skb = rx_buff->skb;
 976                skb_copy_to_linear_data(skb, rx_buff->data + offset,
 977                                        length);
 978                skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
 979                /* free the entry */
 980                next->rx_comp.first = 0;
 981                remove_buff_from_pool(adapter, rx_buff);
 982
 983                skb_put(skb, length);
 984                skb->protocol = eth_type_trans(skb, netdev);
 985
 986                if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
 987                    flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
 988                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 989                }
 990
 991                length = skb->len;
 992                napi_gro_receive(napi, skb); /* send it up */
 993                netdev->stats.rx_packets++;
 994                netdev->stats.rx_bytes += length;
 995                frames_processed++;
 996        }
 997        replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
 998
 999        if (frames_processed < budget) {
1000                enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1001                napi_complete_done(napi, frames_processed);
1002                if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1003                    napi_reschedule(napi)) {
1004                        disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1005                        goto restart_poll;
1006                }
1007        }
1008        return frames_processed;
1009}
1010
1011#ifdef CONFIG_NET_POLL_CONTROLLER
1012static void ibmvnic_netpoll_controller(struct net_device *dev)
1013{
1014        struct ibmvnic_adapter *adapter = netdev_priv(dev);
1015        int i;
1016
1017        replenish_pools(netdev_priv(dev));
1018        for (i = 0; i < adapter->req_rx_queues; i++)
1019                ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1020                                     adapter->rx_scrq[i]);
1021}
1022#endif
1023
1024static const struct net_device_ops ibmvnic_netdev_ops = {
1025        .ndo_open               = ibmvnic_open,
1026        .ndo_stop               = ibmvnic_close,
1027        .ndo_start_xmit         = ibmvnic_xmit,
1028        .ndo_set_rx_mode        = ibmvnic_set_multi,
1029        .ndo_set_mac_address    = ibmvnic_set_mac,
1030        .ndo_validate_addr      = eth_validate_addr,
1031        .ndo_tx_timeout         = ibmvnic_tx_timeout,
1032#ifdef CONFIG_NET_POLL_CONTROLLER
1033        .ndo_poll_controller    = ibmvnic_netpoll_controller,
1034#endif
1035};
1036
1037/* ethtool functions */
1038
1039static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1040                                      struct ethtool_link_ksettings *cmd)
1041{
1042        u32 supported, advertising;
1043
1044        supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1045                          SUPPORTED_FIBRE);
1046        advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1047                            ADVERTISED_FIBRE);
1048        cmd->base.speed = SPEED_1000;
1049        cmd->base.duplex = DUPLEX_FULL;
1050        cmd->base.port = PORT_FIBRE;
1051        cmd->base.phy_address = 0;
1052        cmd->base.autoneg = AUTONEG_ENABLE;
1053
1054        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1055                                                supported);
1056        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1057                                                advertising);
1058
1059        return 0;
1060}
1061
1062static void ibmvnic_get_drvinfo(struct net_device *dev,
1063                                struct ethtool_drvinfo *info)
1064{
1065        strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1066        strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1067}
1068
1069static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1070{
1071        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1072
1073        return adapter->msg_enable;
1074}
1075
1076static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1077{
1078        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1079
1080        adapter->msg_enable = data;
1081}
1082
1083static u32 ibmvnic_get_link(struct net_device *netdev)
1084{
1085        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1086
1087        /* Don't need to send a query because we request a logical link up at
1088         * init and then we wait for link state indications
1089         */
1090        return adapter->logical_link_state;
1091}
1092
1093static void ibmvnic_get_ringparam(struct net_device *netdev,
1094                                  struct ethtool_ringparam *ring)
1095{
1096        ring->rx_max_pending = 0;
1097        ring->tx_max_pending = 0;
1098        ring->rx_mini_max_pending = 0;
1099        ring->rx_jumbo_max_pending = 0;
1100        ring->rx_pending = 0;
1101        ring->tx_pending = 0;
1102        ring->rx_mini_pending = 0;
1103        ring->rx_jumbo_pending = 0;
1104}
1105
1106static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1107{
1108        int i;
1109
1110        if (stringset != ETH_SS_STATS)
1111                return;
1112
1113        for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1114                memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1115}
1116
1117static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1118{
1119        switch (sset) {
1120        case ETH_SS_STATS:
1121                return ARRAY_SIZE(ibmvnic_stats);
1122        default:
1123                return -EOPNOTSUPP;
1124        }
1125}
1126
1127static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1128                                      struct ethtool_stats *stats, u64 *data)
1129{
1130        struct ibmvnic_adapter *adapter = netdev_priv(dev);
1131        union ibmvnic_crq crq;
1132        int i;
1133
1134        memset(&crq, 0, sizeof(crq));
1135        crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1136        crq.request_statistics.cmd = REQUEST_STATISTICS;
1137        crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1138        crq.request_statistics.len =
1139            cpu_to_be32(sizeof(struct ibmvnic_statistics));
1140
1141        /* Wait for data to be written */
1142        init_completion(&adapter->stats_done);
1143        ibmvnic_send_crq(adapter, &crq);
1144        wait_for_completion(&adapter->stats_done);
1145
1146        for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1147                data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1148}
1149
1150static const struct ethtool_ops ibmvnic_ethtool_ops = {
1151        .get_drvinfo            = ibmvnic_get_drvinfo,
1152        .get_msglevel           = ibmvnic_get_msglevel,
1153        .set_msglevel           = ibmvnic_set_msglevel,
1154        .get_link               = ibmvnic_get_link,
1155        .get_ringparam          = ibmvnic_get_ringparam,
1156        .get_strings            = ibmvnic_get_strings,
1157        .get_sset_count         = ibmvnic_get_sset_count,
1158        .get_ethtool_stats      = ibmvnic_get_ethtool_stats,
1159        .get_link_ksettings     = ibmvnic_get_link_ksettings,
1160};
1161
1162/* Routines for managing CRQs/sCRQs  */
1163
1164static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1165                                  struct ibmvnic_sub_crq_queue *scrq)
1166{
1167        struct device *dev = &adapter->vdev->dev;
1168        long rc;
1169
1170        netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1171
1172        /* Close the sub-crqs */
1173        do {
1174                rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1175                                        adapter->vdev->unit_address,
1176                                        scrq->crq_num);
1177        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1178
1179        dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1180                         DMA_BIDIRECTIONAL);
1181        free_pages((unsigned long)scrq->msgs, 2);
1182        kfree(scrq);
1183}
1184
1185static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1186                                                        *adapter)
1187{
1188        struct device *dev = &adapter->vdev->dev;
1189        struct ibmvnic_sub_crq_queue *scrq;
1190        int rc;
1191
1192        scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1193        if (!scrq)
1194                return NULL;
1195
1196        scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
1197        memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1198        if (!scrq->msgs) {
1199                dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1200                goto zero_page_failed;
1201        }
1202
1203        scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1204                                         DMA_BIDIRECTIONAL);
1205        if (dma_mapping_error(dev, scrq->msg_token)) {
1206                dev_warn(dev, "Couldn't map crq queue messages page\n");
1207                goto map_failed;
1208        }
1209
1210        rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1211                           4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1212
1213        if (rc == H_RESOURCE)
1214                rc = ibmvnic_reset_crq(adapter);
1215
1216        if (rc == H_CLOSED) {
1217                dev_warn(dev, "Partner adapter not ready, waiting.\n");
1218        } else if (rc) {
1219                dev_warn(dev, "Error %d registering sub-crq\n", rc);
1220                goto reg_failed;
1221        }
1222
1223        scrq->adapter = adapter;
1224        scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1225        scrq->cur = 0;
1226        atomic_set(&scrq->used, 0);
1227        scrq->rx_skb_top = NULL;
1228        spin_lock_init(&scrq->lock);
1229
1230        netdev_dbg(adapter->netdev,
1231                   "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1232                   scrq->crq_num, scrq->hw_irq, scrq->irq);
1233
1234        return scrq;
1235
1236reg_failed:
1237        dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1238                         DMA_BIDIRECTIONAL);
1239map_failed:
1240        free_pages((unsigned long)scrq->msgs, 2);
1241zero_page_failed:
1242        kfree(scrq);
1243
1244        return NULL;
1245}
1246
1247static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1248{
1249        int i;
1250
1251        if (adapter->tx_scrq) {
1252                for (i = 0; i < adapter->req_tx_queues; i++)
1253                        if (adapter->tx_scrq[i]) {
1254                                free_irq(adapter->tx_scrq[i]->irq,
1255                                         adapter->tx_scrq[i]);
1256                                irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1257                                release_sub_crq_queue(adapter,
1258                                                      adapter->tx_scrq[i]);
1259                        }
1260                kfree(adapter->tx_scrq);
1261                adapter->tx_scrq = NULL;
1262        }
1263
1264        if (adapter->rx_scrq) {
1265                for (i = 0; i < adapter->req_rx_queues; i++)
1266                        if (adapter->rx_scrq[i]) {
1267                                free_irq(adapter->rx_scrq[i]->irq,
1268                                         adapter->rx_scrq[i]);
1269                                irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1270                                release_sub_crq_queue(adapter,
1271                                                      adapter->rx_scrq[i]);
1272                        }
1273                kfree(adapter->rx_scrq);
1274                adapter->rx_scrq = NULL;
1275        }
1276}
1277
1278static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1279{
1280        int i;
1281
1282        if (adapter->tx_scrq) {
1283                for (i = 0; i < adapter->req_tx_queues; i++)
1284                        if (adapter->tx_scrq[i])
1285                                release_sub_crq_queue(adapter,
1286                                                      adapter->tx_scrq[i]);
1287                adapter->tx_scrq = NULL;
1288        }
1289
1290        if (adapter->rx_scrq) {
1291                for (i = 0; i < adapter->req_rx_queues; i++)
1292                        if (adapter->rx_scrq[i])
1293                                release_sub_crq_queue(adapter,
1294                                                      adapter->rx_scrq[i]);
1295                adapter->rx_scrq = NULL;
1296        }
1297}
1298
1299static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1300                            struct ibmvnic_sub_crq_queue *scrq)
1301{
1302        struct device *dev = &adapter->vdev->dev;
1303        unsigned long rc;
1304
1305        rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1306                                H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1307        if (rc)
1308                dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1309                        scrq->hw_irq, rc);
1310        return rc;
1311}
1312
1313static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1314                           struct ibmvnic_sub_crq_queue *scrq)
1315{
1316        struct device *dev = &adapter->vdev->dev;
1317        unsigned long rc;
1318
1319        if (scrq->hw_irq > 0x100000000ULL) {
1320                dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1321                return 1;
1322        }
1323
1324        rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1325                                H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1326        if (rc)
1327                dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1328                        scrq->hw_irq, rc);
1329        return rc;
1330}
1331
1332static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1333                               struct ibmvnic_sub_crq_queue *scrq)
1334{
1335        struct device *dev = &adapter->vdev->dev;
1336        struct ibmvnic_tx_buff *txbuff;
1337        union sub_crq *next;
1338        int index;
1339        int i, j;
1340        u8 first;
1341
1342restart_loop:
1343        while (pending_scrq(adapter, scrq)) {
1344                unsigned int pool = scrq->pool_index;
1345
1346                next = ibmvnic_next_scrq(adapter, scrq);
1347                for (i = 0; i < next->tx_comp.num_comps; i++) {
1348                        if (next->tx_comp.rcs[i]) {
1349                                dev_err(dev, "tx error %x\n",
1350                                        next->tx_comp.rcs[i]);
1351                                continue;
1352                        }
1353                        index = be32_to_cpu(next->tx_comp.correlators[i]);
1354                        txbuff = &adapter->tx_pool[pool].tx_buff[index];
1355
1356                        for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1357                                if (!txbuff->data_dma[j])
1358                                        continue;
1359
1360                                txbuff->data_dma[j] = 0;
1361                                txbuff->used_bounce = false;
1362                        }
1363                        /* if sub_crq was sent indirectly */
1364                        first = txbuff->indir_arr[0].generic.first;
1365                        if (first == IBMVNIC_CRQ_CMD) {
1366                                dma_unmap_single(dev, txbuff->indir_dma,
1367                                                 sizeof(txbuff->indir_arr),
1368                                                 DMA_TO_DEVICE);
1369                        }
1370
1371                        if (txbuff->last_frag) {
1372                                atomic_dec(&scrq->used);
1373
1374                                if (atomic_read(&scrq->used) <=
1375                                    (adapter->req_tx_entries_per_subcrq / 2) &&
1376                                    netif_subqueue_stopped(adapter->netdev,
1377                                                           txbuff->skb)) {
1378                                        netif_wake_subqueue(adapter->netdev,
1379                                                            scrq->pool_index);
1380                                        netdev_dbg(adapter->netdev,
1381                                                   "Started queue %d\n",
1382                                                   scrq->pool_index);
1383                                }
1384
1385                                dev_kfree_skb_any(txbuff->skb);
1386                        }
1387
1388                        adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1389                                                     producer_index] = index;
1390                        adapter->tx_pool[pool].producer_index =
1391                            (adapter->tx_pool[pool].producer_index + 1) %
1392                            adapter->req_tx_entries_per_subcrq;
1393                }
1394                /* remove tx_comp scrq*/
1395                next->tx_comp.first = 0;
1396        }
1397
1398        enable_scrq_irq(adapter, scrq);
1399
1400        if (pending_scrq(adapter, scrq)) {
1401                disable_scrq_irq(adapter, scrq);
1402                goto restart_loop;
1403        }
1404
1405        return 0;
1406}
1407
1408static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1409{
1410        struct ibmvnic_sub_crq_queue *scrq = instance;
1411        struct ibmvnic_adapter *adapter = scrq->adapter;
1412
1413        disable_scrq_irq(adapter, scrq);
1414        ibmvnic_complete_tx(adapter, scrq);
1415
1416        return IRQ_HANDLED;
1417}
1418
1419static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1420{
1421        struct ibmvnic_sub_crq_queue *scrq = instance;
1422        struct ibmvnic_adapter *adapter = scrq->adapter;
1423
1424        if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1425                disable_scrq_irq(adapter, scrq);
1426                __napi_schedule(&adapter->napi[scrq->scrq_num]);
1427        }
1428
1429        return IRQ_HANDLED;
1430}
1431
1432static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1433{
1434        struct device *dev = &adapter->vdev->dev;
1435        struct ibmvnic_sub_crq_queue *scrq;
1436        int i = 0, j = 0;
1437        int rc = 0;
1438
1439        for (i = 0; i < adapter->req_tx_queues; i++) {
1440                scrq = adapter->tx_scrq[i];
1441                scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1442
1443                if (!scrq->irq) {
1444                        rc = -EINVAL;
1445                        dev_err(dev, "Error mapping irq\n");
1446                        goto req_tx_irq_failed;
1447                }
1448
1449                rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1450                                 0, "ibmvnic_tx", scrq);
1451
1452                if (rc) {
1453                        dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1454                                scrq->irq, rc);
1455                        irq_dispose_mapping(scrq->irq);
1456                        goto req_rx_irq_failed;
1457                }
1458        }
1459
1460        for (i = 0; i < adapter->req_rx_queues; i++) {
1461                scrq = adapter->rx_scrq[i];
1462                scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1463                if (!scrq->irq) {
1464                        rc = -EINVAL;
1465                        dev_err(dev, "Error mapping irq\n");
1466                        goto req_rx_irq_failed;
1467                }
1468                rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1469                                 0, "ibmvnic_rx", scrq);
1470                if (rc) {
1471                        dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1472                                scrq->irq, rc);
1473                        irq_dispose_mapping(scrq->irq);
1474                        goto req_rx_irq_failed;
1475                }
1476        }
1477        return rc;
1478
1479req_rx_irq_failed:
1480        for (j = 0; j < i; j++) {
1481                free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1482                irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1483        }
1484        i = adapter->req_tx_queues;
1485req_tx_irq_failed:
1486        for (j = 0; j < i; j++) {
1487                free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1488                irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1489        }
1490        release_sub_crqs_no_irqs(adapter);
1491        return rc;
1492}
1493
1494static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1495{
1496        struct device *dev = &adapter->vdev->dev;
1497        struct ibmvnic_sub_crq_queue **allqueues;
1498        int registered_queues = 0;
1499        union ibmvnic_crq crq;
1500        int total_queues;
1501        int more = 0;
1502        int i;
1503
1504        if (!retry) {
1505                /* Sub-CRQ entries are 32 byte long */
1506                int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1507
1508                if (adapter->min_tx_entries_per_subcrq > entries_page ||
1509                    adapter->min_rx_add_entries_per_subcrq > entries_page) {
1510                        dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1511                        goto allqueues_failed;
1512                }
1513
1514                /* Get the minimum between the queried max and the entries
1515                 * that fit in our PAGE_SIZE
1516                 */
1517                adapter->req_tx_entries_per_subcrq =
1518                    adapter->max_tx_entries_per_subcrq > entries_page ?
1519                    entries_page : adapter->max_tx_entries_per_subcrq;
1520                adapter->req_rx_add_entries_per_subcrq =
1521                    adapter->max_rx_add_entries_per_subcrq > entries_page ?
1522                    entries_page : adapter->max_rx_add_entries_per_subcrq;
1523
1524                adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1525                adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1526                adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1527
1528                adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
1529        }
1530
1531        total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1532
1533        allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1534        if (!allqueues)
1535                goto allqueues_failed;
1536
1537        for (i = 0; i < total_queues; i++) {
1538                allqueues[i] = init_sub_crq_queue(adapter);
1539                if (!allqueues[i]) {
1540                        dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1541                        break;
1542                }
1543                registered_queues++;
1544        }
1545
1546        /* Make sure we were able to register the minimum number of queues */
1547        if (registered_queues <
1548            adapter->min_tx_queues + adapter->min_rx_queues) {
1549                dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
1550                goto tx_failed;
1551        }
1552
1553        /* Distribute the failed allocated queues*/
1554        for (i = 0; i < total_queues - registered_queues + more ; i++) {
1555                netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1556                switch (i % 3) {
1557                case 0:
1558                        if (adapter->req_rx_queues > adapter->min_rx_queues)
1559                                adapter->req_rx_queues--;
1560                        else
1561                                more++;
1562                        break;
1563                case 1:
1564                        if (adapter->req_tx_queues > adapter->min_tx_queues)
1565                                adapter->req_tx_queues--;
1566                        else
1567                                more++;
1568                        break;
1569                }
1570        }
1571
1572        adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1573                                   sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1574        if (!adapter->tx_scrq)
1575                goto tx_failed;
1576
1577        for (i = 0; i < adapter->req_tx_queues; i++) {
1578                adapter->tx_scrq[i] = allqueues[i];
1579                adapter->tx_scrq[i]->pool_index = i;
1580        }
1581
1582        adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1583                                   sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1584        if (!adapter->rx_scrq)
1585                goto rx_failed;
1586
1587        for (i = 0; i < adapter->req_rx_queues; i++) {
1588                adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1589                adapter->rx_scrq[i]->scrq_num = i;
1590        }
1591
1592        memset(&crq, 0, sizeof(crq));
1593        crq.request_capability.first = IBMVNIC_CRQ_CMD;
1594        crq.request_capability.cmd = REQUEST_CAPABILITY;
1595
1596        crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1597        crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1598        atomic_inc(&adapter->running_cap_crqs);
1599        ibmvnic_send_crq(adapter, &crq);
1600
1601        crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1602        crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1603        atomic_inc(&adapter->running_cap_crqs);
1604        ibmvnic_send_crq(adapter, &crq);
1605
1606        crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1607        crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1608        atomic_inc(&adapter->running_cap_crqs);
1609        ibmvnic_send_crq(adapter, &crq);
1610
1611        crq.request_capability.capability =
1612            cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1613        crq.request_capability.number =
1614            cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1615        atomic_inc(&adapter->running_cap_crqs);
1616        ibmvnic_send_crq(adapter, &crq);
1617
1618        crq.request_capability.capability =
1619            cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1620        crq.request_capability.number =
1621            cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1622        atomic_inc(&adapter->running_cap_crqs);
1623        ibmvnic_send_crq(adapter, &crq);
1624
1625        crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1626        crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1627        atomic_inc(&adapter->running_cap_crqs);
1628        ibmvnic_send_crq(adapter, &crq);
1629
1630        if (adapter->netdev->flags & IFF_PROMISC) {
1631                if (adapter->promisc_supported) {
1632                        crq.request_capability.capability =
1633                            cpu_to_be16(PROMISC_REQUESTED);
1634                        crq.request_capability.number = cpu_to_be64(1);
1635                        atomic_inc(&adapter->running_cap_crqs);
1636                        ibmvnic_send_crq(adapter, &crq);
1637                }
1638        } else {
1639                crq.request_capability.capability =
1640                    cpu_to_be16(PROMISC_REQUESTED);
1641                crq.request_capability.number = cpu_to_be64(0);
1642                atomic_inc(&adapter->running_cap_crqs);
1643                ibmvnic_send_crq(adapter, &crq);
1644        }
1645
1646        kfree(allqueues);
1647
1648        return;
1649
1650rx_failed:
1651        kfree(adapter->tx_scrq);
1652        adapter->tx_scrq = NULL;
1653tx_failed:
1654        for (i = 0; i < registered_queues; i++)
1655                release_sub_crq_queue(adapter, allqueues[i]);
1656        kfree(allqueues);
1657allqueues_failed:
1658        ibmvnic_remove(adapter->vdev);
1659}
1660
1661static int pending_scrq(struct ibmvnic_adapter *adapter,
1662                        struct ibmvnic_sub_crq_queue *scrq)
1663{
1664        union sub_crq *entry = &scrq->msgs[scrq->cur];
1665
1666        if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1667                return 1;
1668        else
1669                return 0;
1670}
1671
1672static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1673                                        struct ibmvnic_sub_crq_queue *scrq)
1674{
1675        union sub_crq *entry;
1676        unsigned long flags;
1677
1678        spin_lock_irqsave(&scrq->lock, flags);
1679        entry = &scrq->msgs[scrq->cur];
1680        if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1681                if (++scrq->cur == scrq->size)
1682                        scrq->cur = 0;
1683        } else {
1684                entry = NULL;
1685        }
1686        spin_unlock_irqrestore(&scrq->lock, flags);
1687
1688        return entry;
1689}
1690
1691static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1692{
1693        struct ibmvnic_crq_queue *queue = &adapter->crq;
1694        union ibmvnic_crq *crq;
1695
1696        crq = &queue->msgs[queue->cur];
1697        if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1698                if (++queue->cur == queue->size)
1699                        queue->cur = 0;
1700        } else {
1701                crq = NULL;
1702        }
1703
1704        return crq;
1705}
1706
1707static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1708                       union sub_crq *sub_crq)
1709{
1710        unsigned int ua = adapter->vdev->unit_address;
1711        struct device *dev = &adapter->vdev->dev;
1712        u64 *u64_crq = (u64 *)sub_crq;
1713        int rc;
1714
1715        netdev_dbg(adapter->netdev,
1716                   "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1717                   (unsigned long int)cpu_to_be64(remote_handle),
1718                   (unsigned long int)cpu_to_be64(u64_crq[0]),
1719                   (unsigned long int)cpu_to_be64(u64_crq[1]),
1720                   (unsigned long int)cpu_to_be64(u64_crq[2]),
1721                   (unsigned long int)cpu_to_be64(u64_crq[3]));
1722
1723        /* Make sure the hypervisor sees the complete request */
1724        mb();
1725
1726        rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1727                                cpu_to_be64(remote_handle),
1728                                cpu_to_be64(u64_crq[0]),
1729                                cpu_to_be64(u64_crq[1]),
1730                                cpu_to_be64(u64_crq[2]),
1731                                cpu_to_be64(u64_crq[3]));
1732
1733        if (rc) {
1734                if (rc == H_CLOSED)
1735                        dev_warn(dev, "CRQ Queue closed\n");
1736                dev_err(dev, "Send error (rc=%d)\n", rc);
1737        }
1738
1739        return rc;
1740}
1741
1742static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1743                                u64 remote_handle, u64 ioba, u64 num_entries)
1744{
1745        unsigned int ua = adapter->vdev->unit_address;
1746        struct device *dev = &adapter->vdev->dev;
1747        int rc;
1748
1749        /* Make sure the hypervisor sees the complete request */
1750        mb();
1751        rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1752                                cpu_to_be64(remote_handle),
1753                                ioba, num_entries);
1754
1755        if (rc) {
1756                if (rc == H_CLOSED)
1757                        dev_warn(dev, "CRQ Queue closed\n");
1758                dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1759        }
1760
1761        return rc;
1762}
1763
1764static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1765                            union ibmvnic_crq *crq)
1766{
1767        unsigned int ua = adapter->vdev->unit_address;
1768        struct device *dev = &adapter->vdev->dev;
1769        u64 *u64_crq = (u64 *)crq;
1770        int rc;
1771
1772        netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1773                   (unsigned long int)cpu_to_be64(u64_crq[0]),
1774                   (unsigned long int)cpu_to_be64(u64_crq[1]));
1775
1776        /* Make sure the hypervisor sees the complete request */
1777        mb();
1778
1779        rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1780                                cpu_to_be64(u64_crq[0]),
1781                                cpu_to_be64(u64_crq[1]));
1782
1783        if (rc) {
1784                if (rc == H_CLOSED)
1785                        dev_warn(dev, "CRQ Queue closed\n");
1786                dev_warn(dev, "Send error (rc=%d)\n", rc);
1787        }
1788
1789        return rc;
1790}
1791
1792static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1793{
1794        union ibmvnic_crq crq;
1795
1796        memset(&crq, 0, sizeof(crq));
1797        crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1798        crq.generic.cmd = IBMVNIC_CRQ_INIT;
1799        netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1800
1801        return ibmvnic_send_crq(adapter, &crq);
1802}
1803
1804static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1805{
1806        union ibmvnic_crq crq;
1807
1808        memset(&crq, 0, sizeof(crq));
1809        crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1810        crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1811        netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1812
1813        return ibmvnic_send_crq(adapter, &crq);
1814}
1815
1816static int send_version_xchg(struct ibmvnic_adapter *adapter)
1817{
1818        union ibmvnic_crq crq;
1819
1820        memset(&crq, 0, sizeof(crq));
1821        crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1822        crq.version_exchange.cmd = VERSION_EXCHANGE;
1823        crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1824
1825        return ibmvnic_send_crq(adapter, &crq);
1826}
1827
1828static void send_login(struct ibmvnic_adapter *adapter)
1829{
1830        struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1831        struct ibmvnic_login_buffer *login_buffer;
1832        struct ibmvnic_inflight_cmd *inflight_cmd;
1833        struct device *dev = &adapter->vdev->dev;
1834        dma_addr_t rsp_buffer_token;
1835        dma_addr_t buffer_token;
1836        size_t rsp_buffer_size;
1837        union ibmvnic_crq crq;
1838        unsigned long flags;
1839        size_t buffer_size;
1840        __be64 *tx_list_p;
1841        __be64 *rx_list_p;
1842        int i;
1843
1844        buffer_size =
1845            sizeof(struct ibmvnic_login_buffer) +
1846            sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1847
1848        login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1849        if (!login_buffer)
1850                goto buf_alloc_failed;
1851
1852        buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1853                                      DMA_TO_DEVICE);
1854        if (dma_mapping_error(dev, buffer_token)) {
1855                dev_err(dev, "Couldn't map login buffer\n");
1856                goto buf_map_failed;
1857        }
1858
1859        rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1860                          sizeof(u64) * adapter->req_tx_queues +
1861                          sizeof(u64) * adapter->req_rx_queues +
1862                          sizeof(u64) * adapter->req_rx_queues +
1863                          sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
1864
1865        login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1866        if (!login_rsp_buffer)
1867                goto buf_rsp_alloc_failed;
1868
1869        rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1870                                          rsp_buffer_size, DMA_FROM_DEVICE);
1871        if (dma_mapping_error(dev, rsp_buffer_token)) {
1872                dev_err(dev, "Couldn't map login rsp buffer\n");
1873                goto buf_rsp_map_failed;
1874        }
1875        inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1876        if (!inflight_cmd) {
1877                dev_err(dev, "Couldn't allocate inflight_cmd\n");
1878                goto inflight_alloc_failed;
1879        }
1880        adapter->login_buf = login_buffer;
1881        adapter->login_buf_token = buffer_token;
1882        adapter->login_buf_sz = buffer_size;
1883        adapter->login_rsp_buf = login_rsp_buffer;
1884        adapter->login_rsp_buf_token = rsp_buffer_token;
1885        adapter->login_rsp_buf_sz = rsp_buffer_size;
1886
1887        login_buffer->len = cpu_to_be32(buffer_size);
1888        login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1889        login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1890        login_buffer->off_txcomp_subcrqs =
1891            cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1892        login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1893        login_buffer->off_rxcomp_subcrqs =
1894            cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1895                        sizeof(u64) * adapter->req_tx_queues);
1896        login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1897        login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1898
1899        tx_list_p = (__be64 *)((char *)login_buffer +
1900                                      sizeof(struct ibmvnic_login_buffer));
1901        rx_list_p = (__be64 *)((char *)login_buffer +
1902                                      sizeof(struct ibmvnic_login_buffer) +
1903                                      sizeof(u64) * adapter->req_tx_queues);
1904
1905        for (i = 0; i < adapter->req_tx_queues; i++) {
1906                if (adapter->tx_scrq[i]) {
1907                        tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1908                                                   crq_num);
1909                }
1910        }
1911
1912        for (i = 0; i < adapter->req_rx_queues; i++) {
1913                if (adapter->rx_scrq[i]) {
1914                        rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1915                                                   crq_num);
1916                }
1917        }
1918
1919        netdev_dbg(adapter->netdev, "Login Buffer:\n");
1920        for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1921                netdev_dbg(adapter->netdev, "%016lx\n",
1922                           ((unsigned long int *)(adapter->login_buf))[i]);
1923        }
1924
1925        memset(&crq, 0, sizeof(crq));
1926        crq.login.first = IBMVNIC_CRQ_CMD;
1927        crq.login.cmd = LOGIN;
1928        crq.login.ioba = cpu_to_be32(buffer_token);
1929        crq.login.len = cpu_to_be32(buffer_size);
1930
1931        memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1932
1933        spin_lock_irqsave(&adapter->inflight_lock, flags);
1934        list_add_tail(&inflight_cmd->list, &adapter->inflight);
1935        spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1936
1937        ibmvnic_send_crq(adapter, &crq);
1938
1939        return;
1940
1941inflight_alloc_failed:
1942        dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1943                         DMA_FROM_DEVICE);
1944buf_rsp_map_failed:
1945        kfree(login_rsp_buffer);
1946buf_rsp_alloc_failed:
1947        dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1948buf_map_failed:
1949        kfree(login_buffer);
1950buf_alloc_failed:
1951        return;
1952}
1953
1954static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1955                             u32 len, u8 map_id)
1956{
1957        union ibmvnic_crq crq;
1958
1959        memset(&crq, 0, sizeof(crq));
1960        crq.request_map.first = IBMVNIC_CRQ_CMD;
1961        crq.request_map.cmd = REQUEST_MAP;
1962        crq.request_map.map_id = map_id;
1963        crq.request_map.ioba = cpu_to_be32(addr);
1964        crq.request_map.len = cpu_to_be32(len);
1965        ibmvnic_send_crq(adapter, &crq);
1966}
1967
1968static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1969{
1970        union ibmvnic_crq crq;
1971
1972        memset(&crq, 0, sizeof(crq));
1973        crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1974        crq.request_unmap.cmd = REQUEST_UNMAP;
1975        crq.request_unmap.map_id = map_id;
1976        ibmvnic_send_crq(adapter, &crq);
1977}
1978
1979static void send_map_query(struct ibmvnic_adapter *adapter)
1980{
1981        union ibmvnic_crq crq;
1982
1983        memset(&crq, 0, sizeof(crq));
1984        crq.query_map.first = IBMVNIC_CRQ_CMD;
1985        crq.query_map.cmd = QUERY_MAP;
1986        ibmvnic_send_crq(adapter, &crq);
1987}
1988
1989/* Send a series of CRQs requesting various capabilities of the VNIC server */
1990static void send_cap_queries(struct ibmvnic_adapter *adapter)
1991{
1992        union ibmvnic_crq crq;
1993
1994        atomic_set(&adapter->running_cap_crqs, 0);
1995        memset(&crq, 0, sizeof(crq));
1996        crq.query_capability.first = IBMVNIC_CRQ_CMD;
1997        crq.query_capability.cmd = QUERY_CAPABILITY;
1998
1999        crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
2000        atomic_inc(&adapter->running_cap_crqs);
2001        ibmvnic_send_crq(adapter, &crq);
2002
2003        crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2004        atomic_inc(&adapter->running_cap_crqs);
2005        ibmvnic_send_crq(adapter, &crq);
2006
2007        crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2008        atomic_inc(&adapter->running_cap_crqs);
2009        ibmvnic_send_crq(adapter, &crq);
2010
2011        crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2012        atomic_inc(&adapter->running_cap_crqs);
2013        ibmvnic_send_crq(adapter, &crq);
2014
2015        crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2016        atomic_inc(&adapter->running_cap_crqs);
2017        ibmvnic_send_crq(adapter, &crq);
2018
2019        crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2020        atomic_inc(&adapter->running_cap_crqs);
2021        ibmvnic_send_crq(adapter, &crq);
2022
2023        crq.query_capability.capability =
2024            cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2025        atomic_inc(&adapter->running_cap_crqs);
2026        ibmvnic_send_crq(adapter, &crq);
2027
2028        crq.query_capability.capability =
2029            cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2030        atomic_inc(&adapter->running_cap_crqs);
2031        ibmvnic_send_crq(adapter, &crq);
2032
2033        crq.query_capability.capability =
2034            cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2035        atomic_inc(&adapter->running_cap_crqs);
2036        ibmvnic_send_crq(adapter, &crq);
2037
2038        crq.query_capability.capability =
2039            cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2040        atomic_inc(&adapter->running_cap_crqs);
2041        ibmvnic_send_crq(adapter, &crq);
2042
2043        crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2044        atomic_inc(&adapter->running_cap_crqs);
2045        ibmvnic_send_crq(adapter, &crq);
2046
2047        crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2048        atomic_inc(&adapter->running_cap_crqs);
2049        ibmvnic_send_crq(adapter, &crq);
2050
2051        crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2052        atomic_inc(&adapter->running_cap_crqs);
2053        ibmvnic_send_crq(adapter, &crq);
2054
2055        crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2056        atomic_inc(&adapter->running_cap_crqs);
2057        ibmvnic_send_crq(adapter, &crq);
2058
2059        crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2060        atomic_inc(&adapter->running_cap_crqs);
2061        ibmvnic_send_crq(adapter, &crq);
2062
2063        crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2064        atomic_inc(&adapter->running_cap_crqs);
2065        ibmvnic_send_crq(adapter, &crq);
2066
2067        crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2068        atomic_inc(&adapter->running_cap_crqs);
2069        ibmvnic_send_crq(adapter, &crq);
2070
2071        crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2072        atomic_inc(&adapter->running_cap_crqs);
2073        ibmvnic_send_crq(adapter, &crq);
2074
2075        crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2076        atomic_inc(&adapter->running_cap_crqs);
2077        ibmvnic_send_crq(adapter, &crq);
2078
2079        crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2080        atomic_inc(&adapter->running_cap_crqs);
2081        ibmvnic_send_crq(adapter, &crq);
2082
2083        crq.query_capability.capability =
2084                        cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2085        atomic_inc(&adapter->running_cap_crqs);
2086        ibmvnic_send_crq(adapter, &crq);
2087
2088        crq.query_capability.capability =
2089                        cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2090        atomic_inc(&adapter->running_cap_crqs);
2091        ibmvnic_send_crq(adapter, &crq);
2092
2093        crq.query_capability.capability =
2094                        cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2095        atomic_inc(&adapter->running_cap_crqs);
2096        ibmvnic_send_crq(adapter, &crq);
2097
2098        crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2099        atomic_inc(&adapter->running_cap_crqs);
2100        ibmvnic_send_crq(adapter, &crq);
2101}
2102
2103static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2104{
2105        struct device *dev = &adapter->vdev->dev;
2106        struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2107        union ibmvnic_crq crq;
2108        int i;
2109
2110        dma_unmap_single(dev, adapter->ip_offload_tok,
2111                         sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2112
2113        netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2114        for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2115                netdev_dbg(adapter->netdev, "%016lx\n",
2116                           ((unsigned long int *)(buf))[i]);
2117
2118        netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2119        netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2120        netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2121                   buf->tcp_ipv4_chksum);
2122        netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2123                   buf->tcp_ipv6_chksum);
2124        netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2125                   buf->udp_ipv4_chksum);
2126        netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2127                   buf->udp_ipv6_chksum);
2128        netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2129                   buf->large_tx_ipv4);
2130        netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2131                   buf->large_tx_ipv6);
2132        netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2133                   buf->large_rx_ipv4);
2134        netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2135                   buf->large_rx_ipv6);
2136        netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2137                   buf->max_ipv4_header_size);
2138        netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2139                   buf->max_ipv6_header_size);
2140        netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2141                   buf->max_tcp_header_size);
2142        netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2143                   buf->max_udp_header_size);
2144        netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2145                   buf->max_large_tx_size);
2146        netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2147                   buf->max_large_rx_size);
2148        netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2149                   buf->ipv6_extension_header);
2150        netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2151                   buf->tcp_pseudosum_req);
2152        netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2153                   buf->num_ipv6_ext_headers);
2154        netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2155                   buf->off_ipv6_ext_headers);
2156
2157        adapter->ip_offload_ctrl_tok =
2158            dma_map_single(dev, &adapter->ip_offload_ctrl,
2159                           sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2160
2161        if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2162                dev_err(dev, "Couldn't map ip offload control buffer\n");
2163                return;
2164        }
2165
2166        adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2167        adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2168        adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2169        adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2170        adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2171
2172        /* large_tx/rx disabled for now, additional features needed */
2173        adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2174        adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2175        adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2176        adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2177
2178        adapter->netdev->features = NETIF_F_GSO;
2179
2180        if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2181                adapter->netdev->features |= NETIF_F_IP_CSUM;
2182
2183        if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2184                adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2185
2186        if ((adapter->netdev->features &
2187            (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2188                adapter->netdev->features |= NETIF_F_RXCSUM;
2189
2190        memset(&crq, 0, sizeof(crq));
2191        crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2192        crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2193        crq.control_ip_offload.len =
2194            cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2195        crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2196        ibmvnic_send_crq(adapter, &crq);
2197}
2198
2199static void handle_error_info_rsp(union ibmvnic_crq *crq,
2200                                  struct ibmvnic_adapter *adapter)
2201{
2202        struct device *dev = &adapter->vdev->dev;
2203        struct ibmvnic_error_buff *error_buff, *tmp;
2204        unsigned long flags;
2205        bool found = false;
2206        int i;
2207
2208        if (!crq->request_error_rsp.rc.code) {
2209                dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2210                         crq->request_error_rsp.rc.code);
2211                return;
2212        }
2213
2214        spin_lock_irqsave(&adapter->error_list_lock, flags);
2215        list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2216                if (error_buff->error_id == crq->request_error_rsp.error_id) {
2217                        found = true;
2218                        list_del(&error_buff->list);
2219                        break;
2220                }
2221        spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2222
2223        if (!found) {
2224                dev_err(dev, "Couldn't find error id %x\n",
2225                        be32_to_cpu(crq->request_error_rsp.error_id));
2226                return;
2227        }
2228
2229        dev_err(dev, "Detailed info for error id %x:",
2230                be32_to_cpu(crq->request_error_rsp.error_id));
2231
2232        for (i = 0; i < error_buff->len; i++) {
2233                pr_cont("%02x", (int)error_buff->buff[i]);
2234                if (i % 8 == 7)
2235                        pr_cont(" ");
2236        }
2237        pr_cont("\n");
2238
2239        dma_unmap_single(dev, error_buff->dma, error_buff->len,
2240                         DMA_FROM_DEVICE);
2241        kfree(error_buff->buff);
2242        kfree(error_buff);
2243}
2244
2245static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2246                                 struct ibmvnic_adapter *adapter)
2247{
2248        int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2249        struct ibmvnic_inflight_cmd *inflight_cmd;
2250        struct device *dev = &adapter->vdev->dev;
2251        union ibmvnic_crq newcrq;
2252        unsigned long flags;
2253
2254        /* allocate and map buffer */
2255        adapter->dump_data = kmalloc(len, GFP_KERNEL);
2256        if (!adapter->dump_data) {
2257                complete(&adapter->fw_done);
2258                return;
2259        }
2260
2261        adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2262                                                  DMA_FROM_DEVICE);
2263
2264        if (dma_mapping_error(dev, adapter->dump_data_token)) {
2265                if (!firmware_has_feature(FW_FEATURE_CMO))
2266                        dev_err(dev, "Couldn't map dump data\n");
2267                kfree(adapter->dump_data);
2268                complete(&adapter->fw_done);
2269                return;
2270        }
2271
2272        inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2273        if (!inflight_cmd) {
2274                dma_unmap_single(dev, adapter->dump_data_token, len,
2275                                 DMA_FROM_DEVICE);
2276                kfree(adapter->dump_data);
2277                complete(&adapter->fw_done);
2278                return;
2279        }
2280
2281        memset(&newcrq, 0, sizeof(newcrq));
2282        newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2283        newcrq.request_dump.cmd = REQUEST_DUMP;
2284        newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2285        newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2286
2287        memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2288
2289        spin_lock_irqsave(&adapter->inflight_lock, flags);
2290        list_add_tail(&inflight_cmd->list, &adapter->inflight);
2291        spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2292
2293        ibmvnic_send_crq(adapter, &newcrq);
2294}
2295
2296static void handle_error_indication(union ibmvnic_crq *crq,
2297                                    struct ibmvnic_adapter *adapter)
2298{
2299        int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2300        struct ibmvnic_inflight_cmd *inflight_cmd;
2301        struct device *dev = &adapter->vdev->dev;
2302        struct ibmvnic_error_buff *error_buff;
2303        union ibmvnic_crq new_crq;
2304        unsigned long flags;
2305
2306        dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2307                crq->error_indication.
2308                    flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2309                be32_to_cpu(crq->error_indication.error_id),
2310                be16_to_cpu(crq->error_indication.error_cause));
2311
2312        error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2313        if (!error_buff)
2314                return;
2315
2316        error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2317        if (!error_buff->buff) {
2318                kfree(error_buff);
2319                return;
2320        }
2321
2322        error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2323                                         DMA_FROM_DEVICE);
2324        if (dma_mapping_error(dev, error_buff->dma)) {
2325                if (!firmware_has_feature(FW_FEATURE_CMO))
2326                        dev_err(dev, "Couldn't map error buffer\n");
2327                kfree(error_buff->buff);
2328                kfree(error_buff);
2329                return;
2330        }
2331
2332        inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2333        if (!inflight_cmd) {
2334                dma_unmap_single(dev, error_buff->dma, detail_len,
2335                                 DMA_FROM_DEVICE);
2336                kfree(error_buff->buff);
2337                kfree(error_buff);
2338                return;
2339        }
2340
2341        error_buff->len = detail_len;
2342        error_buff->error_id = crq->error_indication.error_id;
2343
2344        spin_lock_irqsave(&adapter->error_list_lock, flags);
2345        list_add_tail(&error_buff->list, &adapter->errors);
2346        spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2347
2348        memset(&new_crq, 0, sizeof(new_crq));
2349        new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2350        new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2351        new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2352        new_crq.request_error_info.len = cpu_to_be32(detail_len);
2353        new_crq.request_error_info.error_id = crq->error_indication.error_id;
2354
2355        memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2356
2357        spin_lock_irqsave(&adapter->inflight_lock, flags);
2358        list_add_tail(&inflight_cmd->list, &adapter->inflight);
2359        spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2360
2361        ibmvnic_send_crq(adapter, &new_crq);
2362}
2363
2364static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2365                                  struct ibmvnic_adapter *adapter)
2366{
2367        struct net_device *netdev = adapter->netdev;
2368        struct device *dev = &adapter->vdev->dev;
2369        long rc;
2370
2371        rc = crq->change_mac_addr_rsp.rc.code;
2372        if (rc) {
2373                dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2374                return;
2375        }
2376        memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2377               ETH_ALEN);
2378}
2379
2380static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2381                                   struct ibmvnic_adapter *adapter)
2382{
2383        struct device *dev = &adapter->vdev->dev;
2384        u64 *req_value;
2385        char *name;
2386
2387        atomic_dec(&adapter->running_cap_crqs);
2388        switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2389        case REQ_TX_QUEUES:
2390                req_value = &adapter->req_tx_queues;
2391                name = "tx";
2392                break;
2393        case REQ_RX_QUEUES:
2394                req_value = &adapter->req_rx_queues;
2395                name = "rx";
2396                break;
2397        case REQ_RX_ADD_QUEUES:
2398                req_value = &adapter->req_rx_add_queues;
2399                name = "rx_add";
2400                break;
2401        case REQ_TX_ENTRIES_PER_SUBCRQ:
2402                req_value = &adapter->req_tx_entries_per_subcrq;
2403                name = "tx_entries_per_subcrq";
2404                break;
2405        case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2406                req_value = &adapter->req_rx_add_entries_per_subcrq;
2407                name = "rx_add_entries_per_subcrq";
2408                break;
2409        case REQ_MTU:
2410                req_value = &adapter->req_mtu;
2411                name = "mtu";
2412                break;
2413        case PROMISC_REQUESTED:
2414                req_value = &adapter->promisc;
2415                name = "promisc";
2416                break;
2417        default:
2418                dev_err(dev, "Got invalid cap request rsp %d\n",
2419                        crq->request_capability.capability);
2420                return;
2421        }
2422
2423        switch (crq->request_capability_rsp.rc.code) {
2424        case SUCCESS:
2425                break;
2426        case PARTIALSUCCESS:
2427                dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2428                         *req_value,
2429                         (long int)be64_to_cpu(crq->request_capability_rsp.
2430                                               number), name);
2431                release_sub_crqs_no_irqs(adapter);
2432                *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2433                init_sub_crqs(adapter, 1);
2434                return;
2435        default:
2436                dev_err(dev, "Error %d in request cap rsp\n",
2437                        crq->request_capability_rsp.rc.code);
2438                return;
2439        }
2440
2441        /* Done receiving requested capabilities, query IP offload support */
2442        if (atomic_read(&adapter->running_cap_crqs) == 0) {
2443                union ibmvnic_crq newcrq;
2444                int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2445                struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2446                    &adapter->ip_offload_buf;
2447
2448                adapter->wait_capability = false;
2449                adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2450                                                         buf_sz,
2451                                                         DMA_FROM_DEVICE);
2452
2453                if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2454                        if (!firmware_has_feature(FW_FEATURE_CMO))
2455                                dev_err(dev, "Couldn't map offload buffer\n");
2456                        return;
2457                }
2458
2459                memset(&newcrq, 0, sizeof(newcrq));
2460                newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2461                newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2462                newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2463                newcrq.query_ip_offload.ioba =
2464                    cpu_to_be32(adapter->ip_offload_tok);
2465
2466                ibmvnic_send_crq(adapter, &newcrq);
2467        }
2468}
2469
2470static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2471                            struct ibmvnic_adapter *adapter)
2472{
2473        struct device *dev = &adapter->vdev->dev;
2474        struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2475        struct ibmvnic_login_buffer *login = adapter->login_buf;
2476        union ibmvnic_crq crq;
2477        int i;
2478
2479        dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2480                         DMA_BIDIRECTIONAL);
2481        dma_unmap_single(dev, adapter->login_rsp_buf_token,
2482                         adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2483
2484        /* If the number of queues requested can't be allocated by the
2485         * server, the login response will return with code 1. We will need
2486         * to resend the login buffer with fewer queues requested.
2487         */
2488        if (login_rsp_crq->generic.rc.code) {
2489                adapter->renegotiate = true;
2490                complete(&adapter->init_done);
2491                return 0;
2492        }
2493
2494        netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2495        for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2496                netdev_dbg(adapter->netdev, "%016lx\n",
2497                           ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2498        }
2499
2500        /* Sanity checks */
2501        if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2502            (be32_to_cpu(login->num_rxcomp_subcrqs) *
2503             adapter->req_rx_add_queues !=
2504             be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2505                dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2506                ibmvnic_remove(adapter->vdev);
2507                return -EIO;
2508        }
2509        complete(&adapter->init_done);
2510
2511        memset(&crq, 0, sizeof(crq));
2512        crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2513        crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2514        ibmvnic_send_crq(adapter, &crq);
2515
2516        return 0;
2517}
2518
2519static void handle_request_map_rsp(union ibmvnic_crq *crq,
2520                                   struct ibmvnic_adapter *adapter)
2521{
2522        struct device *dev = &adapter->vdev->dev;
2523        u8 map_id = crq->request_map_rsp.map_id;
2524        int tx_subcrqs;
2525        int rx_subcrqs;
2526        long rc;
2527        int i;
2528
2529        tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2530        rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2531
2532        rc = crq->request_map_rsp.rc.code;
2533        if (rc) {
2534                dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2535                adapter->map_id--;
2536                /* need to find and zero tx/rx_pool map_id */
2537                for (i = 0; i < tx_subcrqs; i++) {
2538                        if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2539                                adapter->tx_pool[i].long_term_buff.map_id = 0;
2540                }
2541                for (i = 0; i < rx_subcrqs; i++) {
2542                        if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2543                                adapter->rx_pool[i].long_term_buff.map_id = 0;
2544                }
2545        }
2546        complete(&adapter->fw_done);
2547}
2548
2549static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2550                                     struct ibmvnic_adapter *adapter)
2551{
2552        struct device *dev = &adapter->vdev->dev;
2553        long rc;
2554
2555        rc = crq->request_unmap_rsp.rc.code;
2556        if (rc)
2557                dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2558}
2559
2560static void handle_query_map_rsp(union ibmvnic_crq *crq,
2561                                 struct ibmvnic_adapter *adapter)
2562{
2563        struct net_device *netdev = adapter->netdev;
2564        struct device *dev = &adapter->vdev->dev;
2565        long rc;
2566
2567        rc = crq->query_map_rsp.rc.code;
2568        if (rc) {
2569                dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2570                return;
2571        }
2572        netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2573                   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2574                   crq->query_map_rsp.free_pages);
2575}
2576
2577static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2578                                 struct ibmvnic_adapter *adapter)
2579{
2580        struct net_device *netdev = adapter->netdev;
2581        struct device *dev = &adapter->vdev->dev;
2582        long rc;
2583
2584        atomic_dec(&adapter->running_cap_crqs);
2585        netdev_dbg(netdev, "Outstanding queries: %d\n",
2586                   atomic_read(&adapter->running_cap_crqs));
2587        rc = crq->query_capability.rc.code;
2588        if (rc) {
2589                dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2590                goto out;
2591        }
2592
2593        switch (be16_to_cpu(crq->query_capability.capability)) {
2594        case MIN_TX_QUEUES:
2595                adapter->min_tx_queues =
2596                    be64_to_cpu(crq->query_capability.number);
2597                netdev_dbg(netdev, "min_tx_queues = %lld\n",
2598                           adapter->min_tx_queues);
2599                break;
2600        case MIN_RX_QUEUES:
2601                adapter->min_rx_queues =
2602                    be64_to_cpu(crq->query_capability.number);
2603                netdev_dbg(netdev, "min_rx_queues = %lld\n",
2604                           adapter->min_rx_queues);
2605                break;
2606        case MIN_RX_ADD_QUEUES:
2607                adapter->min_rx_add_queues =
2608                    be64_to_cpu(crq->query_capability.number);
2609                netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2610                           adapter->min_rx_add_queues);
2611                break;
2612        case MAX_TX_QUEUES:
2613                adapter->max_tx_queues =
2614                    be64_to_cpu(crq->query_capability.number);
2615                netdev_dbg(netdev, "max_tx_queues = %lld\n",
2616                           adapter->max_tx_queues);
2617                break;
2618        case MAX_RX_QUEUES:
2619                adapter->max_rx_queues =
2620                    be64_to_cpu(crq->query_capability.number);
2621                netdev_dbg(netdev, "max_rx_queues = %lld\n",
2622                           adapter->max_rx_queues);
2623                break;
2624        case MAX_RX_ADD_QUEUES:
2625                adapter->max_rx_add_queues =
2626                    be64_to_cpu(crq->query_capability.number);
2627                netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2628                           adapter->max_rx_add_queues);
2629                break;
2630        case MIN_TX_ENTRIES_PER_SUBCRQ:
2631                adapter->min_tx_entries_per_subcrq =
2632                    be64_to_cpu(crq->query_capability.number);
2633                netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2634                           adapter->min_tx_entries_per_subcrq);
2635                break;
2636        case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2637                adapter->min_rx_add_entries_per_subcrq =
2638                    be64_to_cpu(crq->query_capability.number);
2639                netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2640                           adapter->min_rx_add_entries_per_subcrq);
2641                break;
2642        case MAX_TX_ENTRIES_PER_SUBCRQ:
2643                adapter->max_tx_entries_per_subcrq =
2644                    be64_to_cpu(crq->query_capability.number);
2645                netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2646                           adapter->max_tx_entries_per_subcrq);
2647                break;
2648        case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2649                adapter->max_rx_add_entries_per_subcrq =
2650                    be64_to_cpu(crq->query_capability.number);
2651                netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2652                           adapter->max_rx_add_entries_per_subcrq);
2653                break;
2654        case TCP_IP_OFFLOAD:
2655                adapter->tcp_ip_offload =
2656                    be64_to_cpu(crq->query_capability.number);
2657                netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2658                           adapter->tcp_ip_offload);
2659                break;
2660        case PROMISC_SUPPORTED:
2661                adapter->promisc_supported =
2662                    be64_to_cpu(crq->query_capability.number);
2663                netdev_dbg(netdev, "promisc_supported = %lld\n",
2664                           adapter->promisc_supported);
2665                break;
2666        case MIN_MTU:
2667                adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2668                netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2669                netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2670                break;
2671        case MAX_MTU:
2672                adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2673                netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2674                netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2675                break;
2676        case MAX_MULTICAST_FILTERS:
2677                adapter->max_multicast_filters =
2678                    be64_to_cpu(crq->query_capability.number);
2679                netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2680                           adapter->max_multicast_filters);
2681                break;
2682        case VLAN_HEADER_INSERTION:
2683                adapter->vlan_header_insertion =
2684                    be64_to_cpu(crq->query_capability.number);
2685                if (adapter->vlan_header_insertion)
2686                        netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2687                netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2688                           adapter->vlan_header_insertion);
2689                break;
2690        case MAX_TX_SG_ENTRIES:
2691                adapter->max_tx_sg_entries =
2692                    be64_to_cpu(crq->query_capability.number);
2693                netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2694                           adapter->max_tx_sg_entries);
2695                break;
2696        case RX_SG_SUPPORTED:
2697                adapter->rx_sg_supported =
2698                    be64_to_cpu(crq->query_capability.number);
2699                netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2700                           adapter->rx_sg_supported);
2701                break;
2702        case OPT_TX_COMP_SUB_QUEUES:
2703                adapter->opt_tx_comp_sub_queues =
2704                    be64_to_cpu(crq->query_capability.number);
2705                netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2706                           adapter->opt_tx_comp_sub_queues);
2707                break;
2708        case OPT_RX_COMP_QUEUES:
2709                adapter->opt_rx_comp_queues =
2710                    be64_to_cpu(crq->query_capability.number);
2711                netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2712                           adapter->opt_rx_comp_queues);
2713                break;
2714        case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2715                adapter->opt_rx_bufadd_q_per_rx_comp_q =
2716                    be64_to_cpu(crq->query_capability.number);
2717                netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2718                           adapter->opt_rx_bufadd_q_per_rx_comp_q);
2719                break;
2720        case OPT_TX_ENTRIES_PER_SUBCRQ:
2721                adapter->opt_tx_entries_per_subcrq =
2722                    be64_to_cpu(crq->query_capability.number);
2723                netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2724                           adapter->opt_tx_entries_per_subcrq);
2725                break;
2726        case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2727                adapter->opt_rxba_entries_per_subcrq =
2728                    be64_to_cpu(crq->query_capability.number);
2729                netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2730                           adapter->opt_rxba_entries_per_subcrq);
2731                break;
2732        case TX_RX_DESC_REQ:
2733                adapter->tx_rx_desc_req = crq->query_capability.number;
2734                netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2735                           adapter->tx_rx_desc_req);
2736                break;
2737
2738        default:
2739                netdev_err(netdev, "Got invalid cap rsp %d\n",
2740                           crq->query_capability.capability);
2741        }
2742
2743out:
2744        if (atomic_read(&adapter->running_cap_crqs) == 0) {
2745                adapter->wait_capability = false;
2746                init_sub_crqs(adapter, 0);
2747                /* We're done querying the capabilities, initialize sub-crqs */
2748        }
2749}
2750
2751static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2752                                   struct ibmvnic_adapter *adapter)
2753{
2754        u8 correlator = crq->control_ras_rsp.correlator;
2755        struct device *dev = &adapter->vdev->dev;
2756        bool found = false;
2757        int i;
2758
2759        if (crq->control_ras_rsp.rc.code) {
2760                dev_warn(dev, "Control ras failed rc=%d\n",
2761                         crq->control_ras_rsp.rc.code);
2762                return;
2763        }
2764
2765        for (i = 0; i < adapter->ras_comp_num; i++) {
2766                if (adapter->ras_comps[i].correlator == correlator) {
2767                        found = true;
2768                        break;
2769                }
2770        }
2771
2772        if (!found) {
2773                dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2774                return;
2775        }
2776
2777        switch (crq->control_ras_rsp.op) {
2778        case IBMVNIC_TRACE_LEVEL:
2779                adapter->ras_comps[i].trace_level = crq->control_ras.level;
2780                break;
2781        case IBMVNIC_ERROR_LEVEL:
2782                adapter->ras_comps[i].error_check_level =
2783                    crq->control_ras.level;
2784                break;
2785        case IBMVNIC_TRACE_PAUSE:
2786                adapter->ras_comp_int[i].paused = 1;
2787                break;
2788        case IBMVNIC_TRACE_RESUME:
2789                adapter->ras_comp_int[i].paused = 0;
2790                break;
2791        case IBMVNIC_TRACE_ON:
2792                adapter->ras_comps[i].trace_on = 1;
2793                break;
2794        case IBMVNIC_TRACE_OFF:
2795                adapter->ras_comps[i].trace_on = 0;
2796                break;
2797        case IBMVNIC_CHG_TRACE_BUFF_SZ:
2798                /* trace_buff_sz is 3 bytes, stuff it into an int */
2799                ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2800                ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2801                    crq->control_ras_rsp.trace_buff_sz[0];
2802                ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2803                    crq->control_ras_rsp.trace_buff_sz[1];
2804                ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2805                    crq->control_ras_rsp.trace_buff_sz[2];
2806                break;
2807        default:
2808                dev_err(dev, "invalid op %d on control_ras_rsp",
2809                        crq->control_ras_rsp.op);
2810        }
2811}
2812
2813static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2814                          loff_t *ppos)
2815{
2816        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2817        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2818        struct device *dev = &adapter->vdev->dev;
2819        struct ibmvnic_fw_trace_entry *trace;
2820        int num = ras_comp_int->num;
2821        union ibmvnic_crq crq;
2822        dma_addr_t trace_tok;
2823
2824        if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2825                return 0;
2826
2827        trace =
2828            dma_alloc_coherent(dev,
2829                               be32_to_cpu(adapter->ras_comps[num].
2830                                           trace_buff_size), &trace_tok,
2831                               GFP_KERNEL);
2832        if (!trace) {
2833                dev_err(dev, "Couldn't alloc trace buffer\n");
2834                return 0;
2835        }
2836
2837        memset(&crq, 0, sizeof(crq));
2838        crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2839        crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2840        crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2841        crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2842        crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2843
2844        init_completion(&adapter->fw_done);
2845        ibmvnic_send_crq(adapter, &crq);
2846        wait_for_completion(&adapter->fw_done);
2847
2848        if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2849                len =
2850                    be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2851                    *ppos;
2852
2853        copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2854
2855        dma_free_coherent(dev,
2856                          be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2857                          trace, trace_tok);
2858        *ppos += len;
2859        return len;
2860}
2861
2862static const struct file_operations trace_ops = {
2863        .owner          = THIS_MODULE,
2864        .open           = simple_open,
2865        .read           = trace_read,
2866};
2867
2868static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2869                           loff_t *ppos)
2870{
2871        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2872        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2873        int num = ras_comp_int->num;
2874        char buff[5]; /*  1 or 0 plus \n and \0 */
2875        int size;
2876
2877        size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2878
2879        if (*ppos >= size)
2880                return 0;
2881
2882        copy_to_user(user_buf, buff, size);
2883        *ppos += size;
2884        return size;
2885}
2886
2887static ssize_t paused_write(struct file *file, const char __user *user_buf,
2888                            size_t len, loff_t *ppos)
2889{
2890        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2891        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2892        int num = ras_comp_int->num;
2893        union ibmvnic_crq crq;
2894        unsigned long val;
2895        char buff[9]; /* decimal max int plus \n and \0 */
2896
2897        copy_from_user(buff, user_buf, sizeof(buff));
2898        val = kstrtoul(buff, 10, NULL);
2899
2900        adapter->ras_comp_int[num].paused = val ? 1 : 0;
2901
2902        memset(&crq, 0, sizeof(crq));
2903        crq.control_ras.first = IBMVNIC_CRQ_CMD;
2904        crq.control_ras.cmd = CONTROL_RAS;
2905        crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2906        crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2907        ibmvnic_send_crq(adapter, &crq);
2908
2909        return len;
2910}
2911
2912static const struct file_operations paused_ops = {
2913        .owner          = THIS_MODULE,
2914        .open           = simple_open,
2915        .read           = paused_read,
2916        .write          = paused_write,
2917};
2918
2919static ssize_t tracing_read(struct file *file, char __user *user_buf,
2920                            size_t len, loff_t *ppos)
2921{
2922        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2923        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2924        int num = ras_comp_int->num;
2925        char buff[5]; /*  1 or 0 plus \n and \0 */
2926        int size;
2927
2928        size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2929
2930        if (*ppos >= size)
2931                return 0;
2932
2933        copy_to_user(user_buf, buff, size);
2934        *ppos += size;
2935        return size;
2936}
2937
2938static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2939                             size_t len, loff_t *ppos)
2940{
2941        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2942        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2943        int num = ras_comp_int->num;
2944        union ibmvnic_crq crq;
2945        unsigned long val;
2946        char buff[9]; /* decimal max int plus \n and \0 */
2947
2948        copy_from_user(buff, user_buf, sizeof(buff));
2949        val = kstrtoul(buff, 10, NULL);
2950
2951        memset(&crq, 0, sizeof(crq));
2952        crq.control_ras.first = IBMVNIC_CRQ_CMD;
2953        crq.control_ras.cmd = CONTROL_RAS;
2954        crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2955        crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2956
2957        return len;
2958}
2959
2960static const struct file_operations tracing_ops = {
2961        .owner          = THIS_MODULE,
2962        .open           = simple_open,
2963        .read           = tracing_read,
2964        .write          = tracing_write,
2965};
2966
2967static ssize_t error_level_read(struct file *file, char __user *user_buf,
2968                                size_t len, loff_t *ppos)
2969{
2970        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2971        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2972        int num = ras_comp_int->num;
2973        char buff[5]; /* decimal max char plus \n and \0 */
2974        int size;
2975
2976        size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2977
2978        if (*ppos >= size)
2979                return 0;
2980
2981        copy_to_user(user_buf, buff, size);
2982        *ppos += size;
2983        return size;
2984}
2985
2986static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2987                                 size_t len, loff_t *ppos)
2988{
2989        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2990        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2991        int num = ras_comp_int->num;
2992        union ibmvnic_crq crq;
2993        unsigned long val;
2994        char buff[9]; /* decimal max int plus \n and \0 */
2995
2996        copy_from_user(buff, user_buf, sizeof(buff));
2997        val = kstrtoul(buff, 10, NULL);
2998
2999        if (val > 9)
3000                val = 9;
3001
3002        memset(&crq, 0, sizeof(crq));
3003        crq.control_ras.first = IBMVNIC_CRQ_CMD;
3004        crq.control_ras.cmd = CONTROL_RAS;
3005        crq.control_ras.correlator = adapter->ras_comps[num].correlator;
3006        crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
3007        crq.control_ras.level = val;
3008        ibmvnic_send_crq(adapter, &crq);
3009
3010        return len;
3011}
3012
3013static const struct file_operations error_level_ops = {
3014        .owner          = THIS_MODULE,
3015        .open           = simple_open,
3016        .read           = error_level_read,
3017        .write          = error_level_write,
3018};
3019
3020static ssize_t trace_level_read(struct file *file, char __user *user_buf,
3021                                size_t len, loff_t *ppos)
3022{
3023        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3024        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3025        int num = ras_comp_int->num;
3026        char buff[5]; /* decimal max char plus \n and \0 */
3027        int size;
3028
3029        size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
3030        if (*ppos >= size)
3031                return 0;
3032
3033        copy_to_user(user_buf, buff, size);
3034        *ppos += size;
3035        return size;
3036}
3037
3038static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
3039                                 size_t len, loff_t *ppos)
3040{
3041        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3042        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3043        union ibmvnic_crq crq;
3044        unsigned long val;
3045        char buff[9]; /* decimal max int plus \n and \0 */
3046
3047        copy_from_user(buff, user_buf, sizeof(buff));
3048        val = kstrtoul(buff, 10, NULL);
3049        if (val > 9)
3050                val = 9;
3051
3052        memset(&crq, 0, sizeof(crq));
3053        crq.control_ras.first = IBMVNIC_CRQ_CMD;
3054        crq.control_ras.cmd = CONTROL_RAS;
3055        crq.control_ras.correlator =
3056            adapter->ras_comps[ras_comp_int->num].correlator;
3057        crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
3058        crq.control_ras.level = val;
3059        ibmvnic_send_crq(adapter, &crq);
3060
3061        return len;
3062}
3063
3064static const struct file_operations trace_level_ops = {
3065        .owner          = THIS_MODULE,
3066        .open           = simple_open,
3067        .read           = trace_level_read,
3068        .write          = trace_level_write,
3069};
3070
3071static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
3072                                    size_t len, loff_t *ppos)
3073{
3074        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3075        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3076        int num = ras_comp_int->num;
3077        char buff[9]; /* decimal max int plus \n and \0 */
3078        int size;
3079
3080        size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3081        if (*ppos >= size)
3082                return 0;
3083
3084        copy_to_user(user_buf, buff, size);
3085        *ppos += size;
3086        return size;
3087}
3088
3089static ssize_t trace_buff_size_write(struct file *file,
3090                                     const char __user *user_buf, size_t len,
3091                                     loff_t *ppos)
3092{
3093        struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3094        struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3095        union ibmvnic_crq crq;
3096        unsigned long val;
3097        char buff[9]; /* decimal max int plus \n and \0 */
3098
3099        copy_from_user(buff, user_buf, sizeof(buff));
3100        val = kstrtoul(buff, 10, NULL);
3101
3102        memset(&crq, 0, sizeof(crq));
3103        crq.control_ras.first = IBMVNIC_CRQ_CMD;
3104        crq.control_ras.cmd = CONTROL_RAS;
3105        crq.control_ras.correlator =
3106            adapter->ras_comps[ras_comp_int->num].correlator;
3107        crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3108        /* trace_buff_sz is 3 bytes, stuff an int into it */
3109        crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3110        crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3111        crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3112        ibmvnic_send_crq(adapter, &crq);
3113
3114        return len;
3115}
3116
3117static const struct file_operations trace_size_ops = {
3118        .owner          = THIS_MODULE,
3119        .open           = simple_open,
3120        .read           = trace_buff_size_read,
3121        .write          = trace_buff_size_write,
3122};
3123
3124static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3125                                         struct ibmvnic_adapter *adapter)
3126{
3127        struct device *dev = &adapter->vdev->dev;
3128        struct dentry *dir_ent;
3129        struct dentry *ent;
3130        int i;
3131
3132        debugfs_remove_recursive(adapter->ras_comps_ent);
3133
3134        adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3135                                                    adapter->debugfs_dir);
3136        if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3137                dev_info(dev, "debugfs create ras_comps dir failed\n");
3138                return;
3139        }
3140
3141        for (i = 0; i < adapter->ras_comp_num; i++) {
3142                dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3143                                             adapter->ras_comps_ent);
3144                if (!dir_ent || IS_ERR(dir_ent)) {
3145                        dev_info(dev, "debugfs create %s dir failed\n",
3146                                 adapter->ras_comps[i].name);
3147                        continue;
3148                }
3149
3150                adapter->ras_comp_int[i].adapter = adapter;
3151                adapter->ras_comp_int[i].num = i;
3152                adapter->ras_comp_int[i].desc_blob.data =
3153                    &adapter->ras_comps[i].description;
3154                adapter->ras_comp_int[i].desc_blob.size =
3155                    sizeof(adapter->ras_comps[i].description);
3156
3157                /* Don't need to remember the dentry's because the debugfs dir
3158                 * gets removed recursively
3159                 */
3160                ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3161                                          &adapter->ras_comp_int[i].desc_blob);
3162                ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3163                                          dir_ent, &adapter->ras_comp_int[i],
3164                                          &trace_size_ops);
3165                ent = debugfs_create_file("trace_level",
3166                                          S_IRUGO |
3167                                          (adapter->ras_comps[i].trace_level !=
3168                                           0xFF  ? S_IWUSR : 0),
3169                                           dir_ent, &adapter->ras_comp_int[i],
3170                                           &trace_level_ops);
3171                ent = debugfs_create_file("error_level",
3172                                          S_IRUGO |
3173                                          (adapter->
3174                                           ras_comps[i].error_check_level !=
3175                                           0xFF ? S_IWUSR : 0),
3176                                          dir_ent, &adapter->ras_comp_int[i],
3177                                          &trace_level_ops);
3178                ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3179                                          dir_ent, &adapter->ras_comp_int[i],
3180                                          &tracing_ops);
3181                ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3182                                          dir_ent, &adapter->ras_comp_int[i],
3183                                          &paused_ops);
3184                ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3185                                          &adapter->ras_comp_int[i],
3186                                          &trace_ops);
3187        }
3188}
3189
3190static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3191                                            struct ibmvnic_adapter *adapter)
3192{
3193        int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3194        struct device *dev = &adapter->vdev->dev;
3195        union ibmvnic_crq newcrq;
3196
3197        adapter->ras_comps = dma_alloc_coherent(dev, len,
3198                                                &adapter->ras_comps_tok,
3199                                                GFP_KERNEL);
3200        if (!adapter->ras_comps) {
3201                if (!firmware_has_feature(FW_FEATURE_CMO))
3202                        dev_err(dev, "Couldn't alloc fw comps buffer\n");
3203                return;
3204        }
3205
3206        adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3207                                        sizeof(struct ibmvnic_fw_comp_internal),
3208                                        GFP_KERNEL);
3209        if (!adapter->ras_comp_int)
3210                dma_free_coherent(dev, len, adapter->ras_comps,
3211                                  adapter->ras_comps_tok);
3212
3213        memset(&newcrq, 0, sizeof(newcrq));
3214        newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3215        newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3216        newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3217        newcrq.request_ras_comps.len = cpu_to_be32(len);
3218        ibmvnic_send_crq(adapter, &newcrq);
3219}
3220
3221static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3222{
3223        struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
3224        struct device *dev = &adapter->vdev->dev;
3225        struct ibmvnic_error_buff *error_buff, *tmp2;
3226        unsigned long flags;
3227        unsigned long flags2;
3228
3229        spin_lock_irqsave(&adapter->inflight_lock, flags);
3230        list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
3231                switch (inflight_cmd->crq.generic.cmd) {
3232                case LOGIN:
3233                        dma_unmap_single(dev, adapter->login_buf_token,
3234                                         adapter->login_buf_sz,
3235                                         DMA_BIDIRECTIONAL);
3236                        dma_unmap_single(dev, adapter->login_rsp_buf_token,
3237                                         adapter->login_rsp_buf_sz,
3238                                         DMA_BIDIRECTIONAL);
3239                        kfree(adapter->login_rsp_buf);
3240                        kfree(adapter->login_buf);
3241                        break;
3242                case REQUEST_DUMP:
3243                        complete(&adapter->fw_done);
3244                        break;
3245                case REQUEST_ERROR_INFO:
3246                        spin_lock_irqsave(&adapter->error_list_lock, flags2);
3247                        list_for_each_entry_safe(error_buff, tmp2,
3248                                                 &adapter->errors, list) {
3249                                dma_unmap_single(dev, error_buff->dma,
3250                                                 error_buff->len,
3251                                                 DMA_FROM_DEVICE);
3252                                kfree(error_buff->buff);
3253                                list_del(&error_buff->list);
3254                                kfree(error_buff);
3255                        }
3256                        spin_unlock_irqrestore(&adapter->error_list_lock,
3257                                               flags2);
3258                        break;
3259                }
3260                list_del(&inflight_cmd->list);
3261                kfree(inflight_cmd);
3262        }
3263        spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3264}
3265
3266static void ibmvnic_xport_event(struct work_struct *work)
3267{
3268        struct ibmvnic_adapter *adapter = container_of(work,
3269                                                       struct ibmvnic_adapter,
3270                                                       ibmvnic_xport);
3271        struct device *dev = &adapter->vdev->dev;
3272        long rc;
3273
3274        ibmvnic_free_inflight(adapter);
3275        release_sub_crqs(adapter);
3276        if (adapter->migrated) {
3277                rc = ibmvnic_reenable_crq_queue(adapter);
3278                if (rc)
3279                        dev_err(dev, "Error after enable rc=%ld\n", rc);
3280                adapter->migrated = false;
3281                rc = ibmvnic_send_crq_init(adapter);
3282                if (rc)
3283                        dev_err(dev, "Error sending init rc=%ld\n", rc);
3284        }
3285}
3286
3287static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3288                               struct ibmvnic_adapter *adapter)
3289{
3290        struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3291        struct net_device *netdev = adapter->netdev;
3292        struct device *dev = &adapter->vdev->dev;
3293        long rc;
3294
3295        netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3296                   ((unsigned long int *)crq)[0],
3297                   ((unsigned long int *)crq)[1]);
3298        switch (gen_crq->first) {
3299        case IBMVNIC_CRQ_INIT_RSP:
3300                switch (gen_crq->cmd) {
3301                case IBMVNIC_CRQ_INIT:
3302                        dev_info(dev, "Partner initialized\n");
3303                        /* Send back a response */
3304                        rc = ibmvnic_send_crq_init_complete(adapter);
3305                        if (!rc)
3306                                schedule_work(&adapter->vnic_crq_init);
3307                        else
3308                                dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3309                        break;
3310                case IBMVNIC_CRQ_INIT_COMPLETE:
3311                        dev_info(dev, "Partner initialization complete\n");
3312                        send_version_xchg(adapter);
3313                        break;
3314                default:
3315                        dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3316                }
3317                return;
3318        case IBMVNIC_CRQ_XPORT_EVENT:
3319                if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3320                        dev_info(dev, "Re-enabling adapter\n");
3321                        adapter->migrated = true;
3322                        schedule_work(&adapter->ibmvnic_xport);
3323                } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3324                        dev_info(dev, "Backing device failover detected\n");
3325                        netif_carrier_off(netdev);
3326                        adapter->failover = true;
3327                } else {
3328                        /* The adapter lost the connection */
3329                        dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3330                                gen_crq->cmd);
3331                        schedule_work(&adapter->ibmvnic_xport);
3332                }
3333                return;
3334        case IBMVNIC_CRQ_CMD_RSP:
3335                break;
3336        default:
3337                dev_err(dev, "Got an invalid msg type 0x%02x\n",
3338                        gen_crq->first);
3339                return;
3340        }
3341
3342        switch (gen_crq->cmd) {
3343        case VERSION_EXCHANGE_RSP:
3344                rc = crq->version_exchange_rsp.rc.code;
3345                if (rc) {
3346                        dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3347                        break;
3348                }
3349                dev_info(dev, "Partner protocol version is %d\n",
3350                         crq->version_exchange_rsp.version);
3351                if (be16_to_cpu(crq->version_exchange_rsp.version) <
3352                    ibmvnic_version)
3353                        ibmvnic_version =
3354                            be16_to_cpu(crq->version_exchange_rsp.version);
3355                send_cap_queries(adapter);
3356                break;
3357        case QUERY_CAPABILITY_RSP:
3358                handle_query_cap_rsp(crq, adapter);
3359                break;
3360        case QUERY_MAP_RSP:
3361                handle_query_map_rsp(crq, adapter);
3362                break;
3363        case REQUEST_MAP_RSP:
3364                handle_request_map_rsp(crq, adapter);
3365                break;
3366        case REQUEST_UNMAP_RSP:
3367                handle_request_unmap_rsp(crq, adapter);
3368                break;
3369        case REQUEST_CAPABILITY_RSP:
3370                handle_request_cap_rsp(crq, adapter);
3371                break;
3372        case LOGIN_RSP:
3373                netdev_dbg(netdev, "Got Login Response\n");
3374                handle_login_rsp(crq, adapter);
3375                break;
3376        case LOGICAL_LINK_STATE_RSP:
3377                netdev_dbg(netdev, "Got Logical Link State Response\n");
3378                adapter->logical_link_state =
3379                    crq->logical_link_state_rsp.link_state;
3380                break;
3381        case LINK_STATE_INDICATION:
3382                netdev_dbg(netdev, "Got Logical Link State Indication\n");
3383                adapter->phys_link_state =
3384                    crq->link_state_indication.phys_link_state;
3385                adapter->logical_link_state =
3386                    crq->link_state_indication.logical_link_state;
3387                break;
3388        case CHANGE_MAC_ADDR_RSP:
3389                netdev_dbg(netdev, "Got MAC address change Response\n");
3390                handle_change_mac_rsp(crq, adapter);
3391                break;
3392        case ERROR_INDICATION:
3393                netdev_dbg(netdev, "Got Error Indication\n");
3394                handle_error_indication(crq, adapter);
3395                break;
3396        case REQUEST_ERROR_RSP:
3397                netdev_dbg(netdev, "Got Error Detail Response\n");
3398                handle_error_info_rsp(crq, adapter);
3399                break;
3400        case REQUEST_STATISTICS_RSP:
3401                netdev_dbg(netdev, "Got Statistics Response\n");
3402                complete(&adapter->stats_done);
3403                break;
3404        case REQUEST_DUMP_SIZE_RSP:
3405                netdev_dbg(netdev, "Got Request Dump Size Response\n");
3406                handle_dump_size_rsp(crq, adapter);
3407                break;
3408        case REQUEST_DUMP_RSP:
3409                netdev_dbg(netdev, "Got Request Dump Response\n");
3410                complete(&adapter->fw_done);
3411                break;
3412        case QUERY_IP_OFFLOAD_RSP:
3413                netdev_dbg(netdev, "Got Query IP offload Response\n");
3414                handle_query_ip_offload_rsp(adapter);
3415                break;
3416        case MULTICAST_CTRL_RSP:
3417                netdev_dbg(netdev, "Got multicast control Response\n");
3418                break;
3419        case CONTROL_IP_OFFLOAD_RSP:
3420                netdev_dbg(netdev, "Got Control IP offload Response\n");
3421                dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3422                                 sizeof(adapter->ip_offload_ctrl),
3423                                 DMA_TO_DEVICE);
3424                /* We're done with the queries, perform the login */
3425                send_login(adapter);
3426                break;
3427        case REQUEST_RAS_COMP_NUM_RSP:
3428                netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3429                if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3430                        netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3431                        break;
3432                }
3433                adapter->ras_comp_num =
3434                    be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3435                handle_request_ras_comp_num_rsp(crq, adapter);
3436                break;
3437        case REQUEST_RAS_COMPS_RSP:
3438                netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3439                handle_request_ras_comps_rsp(crq, adapter);
3440                break;
3441        case CONTROL_RAS_RSP:
3442                netdev_dbg(netdev, "Got Control RAS Response\n");
3443                handle_control_ras_rsp(crq, adapter);
3444                break;
3445        case COLLECT_FW_TRACE_RSP:
3446                netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3447                complete(&adapter->fw_done);
3448                break;
3449        default:
3450                netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3451                           gen_crq->cmd);
3452        }
3453}
3454
3455static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3456{
3457        struct ibmvnic_adapter *adapter = instance;
3458        unsigned long flags;
3459
3460        spin_lock_irqsave(&adapter->crq.lock, flags);
3461        vio_disable_interrupts(adapter->vdev);
3462        tasklet_schedule(&adapter->tasklet);
3463        spin_unlock_irqrestore(&adapter->crq.lock, flags);
3464        return IRQ_HANDLED;
3465}
3466
3467static void ibmvnic_tasklet(void *data)
3468{
3469        struct ibmvnic_adapter *adapter = data;
3470        struct ibmvnic_crq_queue *queue = &adapter->crq;
3471        struct vio_dev *vdev = adapter->vdev;
3472        union ibmvnic_crq *crq;
3473        unsigned long flags;
3474        bool done = false;
3475
3476        spin_lock_irqsave(&queue->lock, flags);
3477        vio_disable_interrupts(vdev);
3478        while (!done) {
3479                /* Pull all the valid messages off the CRQ */
3480                while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3481                        ibmvnic_handle_crq(crq, adapter);
3482                        crq->generic.first = 0;
3483                }
3484                vio_enable_interrupts(vdev);
3485                crq = ibmvnic_next_crq(adapter);
3486                if (crq) {
3487                        vio_disable_interrupts(vdev);
3488                        ibmvnic_handle_crq(crq, adapter);
3489                        crq->generic.first = 0;
3490                } else {
3491                        /* remain in tasklet until all
3492                         * capabilities responses are received
3493                         */
3494                        if (!adapter->wait_capability)
3495                                done = true;
3496                }
3497        }
3498        /* if capabilities CRQ's were sent in this tasklet, the following
3499         * tasklet must wait until all responses are received
3500         */
3501        if (atomic_read(&adapter->running_cap_crqs) != 0)
3502                adapter->wait_capability = true;
3503        spin_unlock_irqrestore(&queue->lock, flags);
3504}
3505
3506static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3507{
3508        struct vio_dev *vdev = adapter->vdev;
3509        int rc;
3510
3511        do {
3512                rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3513        } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3514
3515        if (rc)
3516                dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3517
3518        return rc;
3519}
3520
3521static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3522{
3523        struct ibmvnic_crq_queue *crq = &adapter->crq;
3524        struct device *dev = &adapter->vdev->dev;
3525        struct vio_dev *vdev = adapter->vdev;
3526        int rc;
3527
3528        /* Close the CRQ */
3529        do {
3530                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3531        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3532
3533        /* Clean out the queue */
3534        memset(crq->msgs, 0, PAGE_SIZE);
3535        crq->cur = 0;
3536
3537        /* And re-open it again */
3538        rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3539                                crq->msg_token, PAGE_SIZE);
3540
3541        if (rc == H_CLOSED)
3542                /* Adapter is good, but other end is not ready */
3543                dev_warn(dev, "Partner adapter not ready\n");
3544        else if (rc != 0)
3545                dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3546
3547        return rc;
3548}
3549
3550static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3551{
3552        struct ibmvnic_crq_queue *crq = &adapter->crq;
3553        struct vio_dev *vdev = adapter->vdev;
3554        long rc;
3555
3556        netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3557        free_irq(vdev->irq, adapter);
3558        tasklet_kill(&adapter->tasklet);
3559        do {
3560                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3561        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3562
3563        dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3564                         DMA_BIDIRECTIONAL);
3565        free_page((unsigned long)crq->msgs);
3566}
3567
3568static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3569{
3570        struct ibmvnic_crq_queue *crq = &adapter->crq;
3571        struct device *dev = &adapter->vdev->dev;
3572        struct vio_dev *vdev = adapter->vdev;
3573        int rc, retrc = -ENOMEM;
3574
3575        crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3576        /* Should we allocate more than one page? */
3577
3578        if (!crq->msgs)
3579                return -ENOMEM;
3580
3581        crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3582        crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3583                                        DMA_BIDIRECTIONAL);
3584        if (dma_mapping_error(dev, crq->msg_token))
3585                goto map_failed;
3586
3587        rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3588                                crq->msg_token, PAGE_SIZE);
3589
3590        if (rc == H_RESOURCE)
3591                /* maybe kexecing and resource is busy. try a reset */
3592                rc = ibmvnic_reset_crq(adapter);
3593        retrc = rc;
3594
3595        if (rc == H_CLOSED) {
3596                dev_warn(dev, "Partner adapter not ready\n");
3597        } else if (rc) {
3598                dev_warn(dev, "Error %d opening adapter\n", rc);
3599                goto reg_crq_failed;
3600        }
3601
3602        retrc = 0;
3603
3604        tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3605                     (unsigned long)adapter);
3606
3607        netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3608        rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3609                         adapter);
3610        if (rc) {
3611                dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3612                        vdev->irq, rc);
3613                goto req_irq_failed;
3614        }
3615
3616        rc = vio_enable_interrupts(vdev);
3617        if (rc) {
3618                dev_err(dev, "Error %d enabling interrupts\n", rc);
3619                goto req_irq_failed;
3620        }
3621
3622        crq->cur = 0;
3623        spin_lock_init(&crq->lock);
3624
3625        return retrc;
3626
3627req_irq_failed:
3628        tasklet_kill(&adapter->tasklet);
3629        do {
3630                rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3631        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3632reg_crq_failed:
3633        dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3634map_failed:
3635        free_page((unsigned long)crq->msgs);
3636        return retrc;
3637}
3638
3639/* debugfs for dump */
3640static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3641{
3642        struct net_device *netdev = seq->private;
3643        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3644        struct device *dev = &adapter->vdev->dev;
3645        union ibmvnic_crq crq;
3646
3647        memset(&crq, 0, sizeof(crq));
3648        crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3649        crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3650
3651        init_completion(&adapter->fw_done);
3652        ibmvnic_send_crq(adapter, &crq);
3653        wait_for_completion(&adapter->fw_done);
3654
3655        seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3656
3657        dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3658                         DMA_BIDIRECTIONAL);
3659
3660        kfree(adapter->dump_data);
3661
3662        return 0;
3663}
3664
3665static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3666{
3667        return single_open(file, ibmvnic_dump_show, inode->i_private);
3668}
3669
3670static const struct file_operations ibmvnic_dump_ops = {
3671        .owner          = THIS_MODULE,
3672        .open           = ibmvnic_dump_open,
3673        .read           = seq_read,
3674        .llseek         = seq_lseek,
3675        .release        = single_release,
3676};
3677
3678static void handle_crq_init_rsp(struct work_struct *work)
3679{
3680        struct ibmvnic_adapter *adapter = container_of(work,
3681                                                       struct ibmvnic_adapter,
3682                                                       vnic_crq_init);
3683        struct device *dev = &adapter->vdev->dev;
3684        struct net_device *netdev = adapter->netdev;
3685        unsigned long timeout = msecs_to_jiffies(30000);
3686        bool restart = false;
3687        int rc;
3688
3689        if (adapter->failover) {
3690                release_sub_crqs(adapter);
3691                if (netif_running(netdev)) {
3692                        netif_tx_disable(netdev);
3693                        ibmvnic_close(netdev);
3694                        restart = true;
3695                }
3696        }
3697
3698        reinit_completion(&adapter->init_done);
3699        send_version_xchg(adapter);
3700        if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3701                dev_err(dev, "Passive init timeout\n");
3702                goto task_failed;
3703        }
3704
3705        do {
3706                if (adapter->renegotiate) {
3707                        adapter->renegotiate = false;
3708                        release_sub_crqs_no_irqs(adapter);
3709
3710                        reinit_completion(&adapter->init_done);
3711                        send_cap_queries(adapter);
3712                        if (!wait_for_completion_timeout(&adapter->init_done,
3713                                                         timeout)) {
3714                                dev_err(dev, "Passive init timeout\n");
3715                                goto task_failed;
3716                        }
3717                }
3718        } while (adapter->renegotiate);
3719        rc = init_sub_crq_irqs(adapter);
3720
3721        if (rc)
3722                goto task_failed;
3723
3724        netdev->real_num_tx_queues = adapter->req_tx_queues;
3725        netdev->mtu = adapter->req_mtu - ETH_HLEN;
3726
3727        if (adapter->failover) {
3728                adapter->failover = false;
3729                if (restart) {
3730                        rc = ibmvnic_open(netdev);
3731                        if (rc)
3732                                goto restart_failed;
3733                }
3734                netif_carrier_on(netdev);
3735                return;
3736        }
3737
3738        rc = register_netdev(netdev);
3739        if (rc) {
3740                dev_err(dev,
3741                        "failed to register netdev rc=%d\n", rc);
3742                goto register_failed;
3743        }
3744        dev_info(dev, "ibmvnic registered\n");
3745
3746        return;
3747
3748restart_failed:
3749        dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
3750register_failed:
3751        release_sub_crqs(adapter);
3752task_failed:
3753        dev_err(dev, "Passive initialization was not successful\n");
3754}
3755
3756static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3757{
3758        unsigned long timeout = msecs_to_jiffies(30000);
3759        struct ibmvnic_adapter *adapter;
3760        struct net_device *netdev;
3761        unsigned char *mac_addr_p;
3762        struct dentry *ent;
3763        char buf[17]; /* debugfs name buf */
3764        int rc;
3765
3766        dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3767                dev->unit_address);
3768
3769        mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3770                                                        VETH_MAC_ADDR, NULL);
3771        if (!mac_addr_p) {
3772                dev_err(&dev->dev,
3773                        "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3774                        __FILE__, __LINE__);
3775                return 0;
3776        }
3777
3778        netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3779                                   IBMVNIC_MAX_TX_QUEUES);
3780        if (!netdev)
3781                return -ENOMEM;
3782
3783        adapter = netdev_priv(netdev);
3784        dev_set_drvdata(&dev->dev, netdev);
3785        adapter->vdev = dev;
3786        adapter->netdev = netdev;
3787        adapter->failover = false;
3788
3789        ether_addr_copy(adapter->mac_addr, mac_addr_p);
3790        ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3791        netdev->irq = dev->irq;
3792        netdev->netdev_ops = &ibmvnic_netdev_ops;
3793        netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3794        SET_NETDEV_DEV(netdev, &dev->dev);
3795
3796        INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3797        INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
3798
3799        spin_lock_init(&adapter->stats_lock);
3800
3801        rc = ibmvnic_init_crq_queue(adapter);
3802        if (rc) {
3803                dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3804                goto free_netdev;
3805        }
3806
3807        INIT_LIST_HEAD(&adapter->errors);
3808        INIT_LIST_HEAD(&adapter->inflight);
3809        spin_lock_init(&adapter->error_list_lock);
3810        spin_lock_init(&adapter->inflight_lock);
3811
3812        adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3813                                              sizeof(struct ibmvnic_statistics),
3814                                              DMA_FROM_DEVICE);
3815        if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3816                if (!firmware_has_feature(FW_FEATURE_CMO))
3817                        dev_err(&dev->dev, "Couldn't map stats buffer\n");
3818                rc = -ENOMEM;
3819                goto free_crq;
3820        }
3821
3822        snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3823        ent = debugfs_create_dir(buf, NULL);
3824        if (!ent || IS_ERR(ent)) {
3825                dev_info(&dev->dev, "debugfs create directory failed\n");
3826                adapter->debugfs_dir = NULL;
3827        } else {
3828                adapter->debugfs_dir = ent;
3829                ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3830                                          netdev, &ibmvnic_dump_ops);
3831                if (!ent || IS_ERR(ent)) {
3832                        dev_info(&dev->dev,
3833                                 "debugfs create dump file failed\n");
3834                        adapter->debugfs_dump = NULL;
3835                } else {
3836                        adapter->debugfs_dump = ent;
3837                }
3838        }
3839
3840        init_completion(&adapter->init_done);
3841        ibmvnic_send_crq_init(adapter);
3842        if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3843                return 0;
3844
3845        do {
3846                if (adapter->renegotiate) {
3847                        adapter->renegotiate = false;
3848                        release_sub_crqs_no_irqs(adapter);
3849
3850                        reinit_completion(&adapter->init_done);
3851                        send_cap_queries(adapter);
3852                        if (!wait_for_completion_timeout(&adapter->init_done,
3853                                                         timeout))
3854                                return 0;
3855                }
3856        } while (adapter->renegotiate);
3857
3858        rc = init_sub_crq_irqs(adapter);
3859        if (rc) {
3860                dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
3861                goto free_debugfs;
3862        }
3863
3864        netdev->real_num_tx_queues = adapter->req_tx_queues;
3865        netdev->mtu = adapter->req_mtu - ETH_HLEN;
3866
3867        rc = register_netdev(netdev);
3868        if (rc) {
3869                dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3870                goto free_sub_crqs;
3871        }
3872        dev_info(&dev->dev, "ibmvnic registered\n");
3873
3874        return 0;
3875
3876free_sub_crqs:
3877        release_sub_crqs(adapter);
3878free_debugfs:
3879        if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3880                debugfs_remove_recursive(adapter->debugfs_dir);
3881free_crq:
3882        ibmvnic_release_crq_queue(adapter);
3883free_netdev:
3884        free_netdev(netdev);
3885        return rc;
3886}
3887
3888static int ibmvnic_remove(struct vio_dev *dev)
3889{
3890        struct net_device *netdev = dev_get_drvdata(&dev->dev);
3891        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3892
3893        unregister_netdev(netdev);
3894
3895        release_sub_crqs(adapter);
3896
3897        ibmvnic_release_crq_queue(adapter);
3898
3899        if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3900                debugfs_remove_recursive(adapter->debugfs_dir);
3901
3902        dma_unmap_single(&dev->dev, adapter->stats_token,
3903                         sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3904
3905        if (adapter->ras_comps)
3906                dma_free_coherent(&dev->dev,
3907                                  adapter->ras_comp_num *
3908                                  sizeof(struct ibmvnic_fw_component),
3909                                  adapter->ras_comps, adapter->ras_comps_tok);
3910
3911        kfree(adapter->ras_comp_int);
3912
3913        free_netdev(netdev);
3914        dev_set_drvdata(&dev->dev, NULL);
3915
3916        return 0;
3917}
3918
3919static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3920{
3921        struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3922        struct ibmvnic_adapter *adapter;
3923        struct iommu_table *tbl;
3924        unsigned long ret = 0;
3925        int i;
3926
3927        tbl = get_iommu_table_base(&vdev->dev);
3928
3929        /* netdev inits at probe time along with the structures we need below*/
3930        if (!netdev)
3931                return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3932
3933        adapter = netdev_priv(netdev);
3934
3935        ret += PAGE_SIZE; /* the crq message queue */
3936        ret += adapter->bounce_buffer_size;
3937        ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3938
3939        for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3940                ret += 4 * PAGE_SIZE; /* the scrq message queue */
3941
3942        for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3943             i++)
3944                ret += adapter->rx_pool[i].size *
3945                    IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3946
3947        return ret;
3948}
3949
3950static int ibmvnic_resume(struct device *dev)
3951{
3952        struct net_device *netdev = dev_get_drvdata(dev);
3953        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3954        int i;
3955
3956        /* kick the interrupt handlers just in case we lost an interrupt */
3957        for (i = 0; i < adapter->req_rx_queues; i++)
3958                ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3959                                     adapter->rx_scrq[i]);
3960
3961        return 0;
3962}
3963
3964static struct vio_device_id ibmvnic_device_table[] = {
3965        {"network", "IBM,vnic"},
3966        {"", "" }
3967};
3968MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3969
3970static const struct dev_pm_ops ibmvnic_pm_ops = {
3971        .resume = ibmvnic_resume
3972};
3973
3974static struct vio_driver ibmvnic_driver = {
3975        .id_table       = ibmvnic_device_table,
3976        .probe          = ibmvnic_probe,
3977        .remove         = ibmvnic_remove,
3978        .get_desired_dma = ibmvnic_get_desired_dma,
3979        .name           = ibmvnic_driver_name,
3980        .pm             = &ibmvnic_pm_ops,
3981};
3982
3983/* module functions */
3984static int __init ibmvnic_module_init(void)
3985{
3986        pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3987                IBMVNIC_DRIVER_VERSION);
3988
3989        return vio_register_driver(&ibmvnic_driver);
3990}
3991
3992static void __exit ibmvnic_module_exit(void)
3993{
3994        vio_unregister_driver(&ibmvnic_driver);
3995}
3996
3997module_init(ibmvnic_module_init);
3998module_exit(ibmvnic_module_exit);
3999