linux/drivers/net/ethernet/ibm/ibmveth.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * IBM Power Virtual Ethernet Device Driver
   4 *
   5 * Copyright (C) IBM Corporation, 2003, 2010
   6 *
   7 * Authors: Dave Larson <larson1@us.ibm.com>
   8 *          Santiago Leon <santil@linux.vnet.ibm.com>
   9 *          Brian King <brking@linux.vnet.ibm.com>
  10 *          Robert Jennings <rcj@linux.vnet.ibm.com>
  11 *          Anton Blanchard <anton@au.ibm.com>
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/types.h>
  16#include <linux/errno.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/kernel.h>
  19#include <linux/netdevice.h>
  20#include <linux/etherdevice.h>
  21#include <linux/skbuff.h>
  22#include <linux/init.h>
  23#include <linux/interrupt.h>
  24#include <linux/mm.h>
  25#include <linux/pm.h>
  26#include <linux/ethtool.h>
  27#include <linux/in.h>
  28#include <linux/ip.h>
  29#include <linux/ipv6.h>
  30#include <linux/slab.h>
  31#include <asm/hvcall.h>
  32#include <linux/atomic.h>
  33#include <asm/vio.h>
  34#include <asm/iommu.h>
  35#include <asm/firmware.h>
  36#include <net/tcp.h>
  37#include <net/ip6_checksum.h>
  38
  39#include "ibmveth.h"
  40
  41static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
  42static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
  43static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
  44
  45static struct kobj_type ktype_veth_pool;
  46
  47
  48static const char ibmveth_driver_name[] = "ibmveth";
  49static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
  50#define ibmveth_driver_version "1.06"
  51
  52MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
  53MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
  54MODULE_LICENSE("GPL");
  55MODULE_VERSION(ibmveth_driver_version);
  56
  57static unsigned int tx_copybreak __read_mostly = 128;
  58module_param(tx_copybreak, uint, 0644);
  59MODULE_PARM_DESC(tx_copybreak,
  60        "Maximum size of packet that is copied to a new buffer on transmit");
  61
  62static unsigned int rx_copybreak __read_mostly = 128;
  63module_param(rx_copybreak, uint, 0644);
  64MODULE_PARM_DESC(rx_copybreak,
  65        "Maximum size of packet that is copied to a new buffer on receive");
  66
  67static unsigned int rx_flush __read_mostly = 0;
  68module_param(rx_flush, uint, 0644);
  69MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
  70
  71static bool old_large_send __read_mostly;
  72module_param(old_large_send, bool, 0444);
  73MODULE_PARM_DESC(old_large_send,
  74        "Use old large send method on firmware that supports the new method");
  75
  76struct ibmveth_stat {
  77        char name[ETH_GSTRING_LEN];
  78        int offset;
  79};
  80
  81#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
  82#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
  83
  84static struct ibmveth_stat ibmveth_stats[] = {
  85        { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
  86        { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
  87        { "replenish_add_buff_failure",
  88                        IBMVETH_STAT_OFF(replenish_add_buff_failure) },
  89        { "replenish_add_buff_success",
  90                        IBMVETH_STAT_OFF(replenish_add_buff_success) },
  91        { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
  92        { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
  93        { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
  94        { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
  95        { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
  96        { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
  97        { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
  98        { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
  99        { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
 100};
 101
 102/* simple methods of getting data from the current rxq entry */
 103static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
 104{
 105        return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
 106}
 107
 108static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
 109{
 110        return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
 111                        IBMVETH_RXQ_TOGGLE_SHIFT;
 112}
 113
 114static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
 115{
 116        return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
 117}
 118
 119static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
 120{
 121        return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
 122}
 123
 124static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
 125{
 126        return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
 127}
 128
 129static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
 130{
 131        return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
 132}
 133
 134static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
 135{
 136        return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
 137}
 138
 139static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
 140{
 141        return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
 142}
 143
 144/* setup the initial settings for a buffer pool */
 145static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
 146                                     u32 pool_index, u32 pool_size,
 147                                     u32 buff_size, u32 pool_active)
 148{
 149        pool->size = pool_size;
 150        pool->index = pool_index;
 151        pool->buff_size = buff_size;
 152        pool->threshold = pool_size * 7 / 8;
 153        pool->active = pool_active;
 154}
 155
 156/* allocate and setup an buffer pool - called during open */
 157static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
 158{
 159        int i;
 160
 161        pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL);
 162
 163        if (!pool->free_map)
 164                return -1;
 165
 166        pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
 167        if (!pool->dma_addr) {
 168                kfree(pool->free_map);
 169                pool->free_map = NULL;
 170                return -1;
 171        }
 172
 173        pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
 174
 175        if (!pool->skbuff) {
 176                kfree(pool->dma_addr);
 177                pool->dma_addr = NULL;
 178
 179                kfree(pool->free_map);
 180                pool->free_map = NULL;
 181                return -1;
 182        }
 183
 184        for (i = 0; i < pool->size; ++i)
 185                pool->free_map[i] = i;
 186
 187        atomic_set(&pool->available, 0);
 188        pool->producer_index = 0;
 189        pool->consumer_index = 0;
 190
 191        return 0;
 192}
 193
 194static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
 195{
 196        unsigned long offset;
 197
 198        for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
 199                asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
 200}
 201
 202/* replenish the buffers for a pool.  note that we don't need to
 203 * skb_reserve these since they are used for incoming...
 204 */
 205static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
 206                                          struct ibmveth_buff_pool *pool)
 207{
 208        u32 i;
 209        u32 count = pool->size - atomic_read(&pool->available);
 210        u32 buffers_added = 0;
 211        struct sk_buff *skb;
 212        unsigned int free_index, index;
 213        u64 correlator;
 214        unsigned long lpar_rc;
 215        dma_addr_t dma_addr;
 216
 217        mb();
 218
 219        for (i = 0; i < count; ++i) {
 220                union ibmveth_buf_desc desc;
 221
 222                skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
 223
 224                if (!skb) {
 225                        netdev_dbg(adapter->netdev,
 226                                   "replenish: unable to allocate skb\n");
 227                        adapter->replenish_no_mem++;
 228                        break;
 229                }
 230
 231                free_index = pool->consumer_index;
 232                pool->consumer_index++;
 233                if (pool->consumer_index >= pool->size)
 234                        pool->consumer_index = 0;
 235                index = pool->free_map[free_index];
 236
 237                BUG_ON(index == IBM_VETH_INVALID_MAP);
 238                BUG_ON(pool->skbuff[index] != NULL);
 239
 240                dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
 241                                pool->buff_size, DMA_FROM_DEVICE);
 242
 243                if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
 244                        goto failure;
 245
 246                pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
 247                pool->dma_addr[index] = dma_addr;
 248                pool->skbuff[index] = skb;
 249
 250                correlator = ((u64)pool->index << 32) | index;
 251                *(u64 *)skb->data = correlator;
 252
 253                desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
 254                desc.fields.address = dma_addr;
 255
 256                if (rx_flush) {
 257                        unsigned int len = min(pool->buff_size,
 258                                                adapter->netdev->mtu +
 259                                                IBMVETH_BUFF_OH);
 260                        ibmveth_flush_buffer(skb->data, len);
 261                }
 262                lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
 263                                                   desc.desc);
 264
 265                if (lpar_rc != H_SUCCESS) {
 266                        goto failure;
 267                } else {
 268                        buffers_added++;
 269                        adapter->replenish_add_buff_success++;
 270                }
 271        }
 272
 273        mb();
 274        atomic_add(buffers_added, &(pool->available));
 275        return;
 276
 277failure:
 278        pool->free_map[free_index] = index;
 279        pool->skbuff[index] = NULL;
 280        if (pool->consumer_index == 0)
 281                pool->consumer_index = pool->size - 1;
 282        else
 283                pool->consumer_index--;
 284        if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
 285                dma_unmap_single(&adapter->vdev->dev,
 286                                 pool->dma_addr[index], pool->buff_size,
 287                                 DMA_FROM_DEVICE);
 288        dev_kfree_skb_any(skb);
 289        adapter->replenish_add_buff_failure++;
 290
 291        mb();
 292        atomic_add(buffers_added, &(pool->available));
 293}
 294
 295/*
 296 * The final 8 bytes of the buffer list is a counter of frames dropped
 297 * because there was not a buffer in the buffer list capable of holding
 298 * the frame.
 299 */
 300static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
 301{
 302        __be64 *p = adapter->buffer_list_addr + 4096 - 8;
 303
 304        adapter->rx_no_buffer = be64_to_cpup(p);
 305}
 306
 307/* replenish routine */
 308static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
 309{
 310        int i;
 311
 312        adapter->replenish_task_cycles++;
 313
 314        for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
 315                struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
 316
 317                if (pool->active &&
 318                    (atomic_read(&pool->available) < pool->threshold))
 319                        ibmveth_replenish_buffer_pool(adapter, pool);
 320        }
 321
 322        ibmveth_update_rx_no_buffer(adapter);
 323}
 324
 325/* empty and free ana buffer pool - also used to do cleanup in error paths */
 326static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
 327                                     struct ibmveth_buff_pool *pool)
 328{
 329        int i;
 330
 331        kfree(pool->free_map);
 332        pool->free_map = NULL;
 333
 334        if (pool->skbuff && pool->dma_addr) {
 335                for (i = 0; i < pool->size; ++i) {
 336                        struct sk_buff *skb = pool->skbuff[i];
 337                        if (skb) {
 338                                dma_unmap_single(&adapter->vdev->dev,
 339                                                 pool->dma_addr[i],
 340                                                 pool->buff_size,
 341                                                 DMA_FROM_DEVICE);
 342                                dev_kfree_skb_any(skb);
 343                                pool->skbuff[i] = NULL;
 344                        }
 345                }
 346        }
 347
 348        if (pool->dma_addr) {
 349                kfree(pool->dma_addr);
 350                pool->dma_addr = NULL;
 351        }
 352
 353        if (pool->skbuff) {
 354                kfree(pool->skbuff);
 355                pool->skbuff = NULL;
 356        }
 357}
 358
 359/* remove a buffer from a pool */
 360static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
 361                                            u64 correlator)
 362{
 363        unsigned int pool  = correlator >> 32;
 364        unsigned int index = correlator & 0xffffffffUL;
 365        unsigned int free_index;
 366        struct sk_buff *skb;
 367
 368        BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 369        BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 370
 371        skb = adapter->rx_buff_pool[pool].skbuff[index];
 372
 373        BUG_ON(skb == NULL);
 374
 375        adapter->rx_buff_pool[pool].skbuff[index] = NULL;
 376
 377        dma_unmap_single(&adapter->vdev->dev,
 378                         adapter->rx_buff_pool[pool].dma_addr[index],
 379                         adapter->rx_buff_pool[pool].buff_size,
 380                         DMA_FROM_DEVICE);
 381
 382        free_index = adapter->rx_buff_pool[pool].producer_index;
 383        adapter->rx_buff_pool[pool].producer_index++;
 384        if (adapter->rx_buff_pool[pool].producer_index >=
 385            adapter->rx_buff_pool[pool].size)
 386                adapter->rx_buff_pool[pool].producer_index = 0;
 387        adapter->rx_buff_pool[pool].free_map[free_index] = index;
 388
 389        mb();
 390
 391        atomic_dec(&(adapter->rx_buff_pool[pool].available));
 392}
 393
 394/* get the current buffer on the rx queue */
 395static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
 396{
 397        u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
 398        unsigned int pool = correlator >> 32;
 399        unsigned int index = correlator & 0xffffffffUL;
 400
 401        BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 402        BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 403
 404        return adapter->rx_buff_pool[pool].skbuff[index];
 405}
 406
 407/* recycle the current buffer on the rx queue */
 408static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
 409{
 410        u32 q_index = adapter->rx_queue.index;
 411        u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
 412        unsigned int pool = correlator >> 32;
 413        unsigned int index = correlator & 0xffffffffUL;
 414        union ibmveth_buf_desc desc;
 415        unsigned long lpar_rc;
 416        int ret = 1;
 417
 418        BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
 419        BUG_ON(index >= adapter->rx_buff_pool[pool].size);
 420
 421        if (!adapter->rx_buff_pool[pool].active) {
 422                ibmveth_rxq_harvest_buffer(adapter);
 423                ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
 424                goto out;
 425        }
 426
 427        desc.fields.flags_len = IBMVETH_BUF_VALID |
 428                adapter->rx_buff_pool[pool].buff_size;
 429        desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
 430
 431        lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
 432
 433        if (lpar_rc != H_SUCCESS) {
 434                netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
 435                           "during recycle rc=%ld", lpar_rc);
 436                ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
 437                ret = 0;
 438        }
 439
 440        if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
 441                adapter->rx_queue.index = 0;
 442                adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
 443        }
 444
 445out:
 446        return ret;
 447}
 448
 449static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
 450{
 451        ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
 452
 453        if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
 454                adapter->rx_queue.index = 0;
 455                adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
 456        }
 457}
 458
 459static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
 460        union ibmveth_buf_desc rxq_desc, u64 mac_address)
 461{
 462        int rc, try_again = 1;
 463
 464        /*
 465         * After a kexec the adapter will still be open, so our attempt to
 466         * open it will fail. So if we get a failure we free the adapter and
 467         * try again, but only once.
 468         */
 469retry:
 470        rc = h_register_logical_lan(adapter->vdev->unit_address,
 471                                    adapter->buffer_list_dma, rxq_desc.desc,
 472                                    adapter->filter_list_dma, mac_address);
 473
 474        if (rc != H_SUCCESS && try_again) {
 475                do {
 476                        rc = h_free_logical_lan(adapter->vdev->unit_address);
 477                } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
 478
 479                try_again = 0;
 480                goto retry;
 481        }
 482
 483        return rc;
 484}
 485
 486static u64 ibmveth_encode_mac_addr(u8 *mac)
 487{
 488        int i;
 489        u64 encoded = 0;
 490
 491        for (i = 0; i < ETH_ALEN; i++)
 492                encoded = (encoded << 8) | mac[i];
 493
 494        return encoded;
 495}
 496
 497static int ibmveth_open(struct net_device *netdev)
 498{
 499        struct ibmveth_adapter *adapter = netdev_priv(netdev);
 500        u64 mac_address;
 501        int rxq_entries = 1;
 502        unsigned long lpar_rc;
 503        int rc;
 504        union ibmveth_buf_desc rxq_desc;
 505        int i;
 506        struct device *dev;
 507
 508        netdev_dbg(netdev, "open starting\n");
 509
 510        napi_enable(&adapter->napi);
 511
 512        for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 513                rxq_entries += adapter->rx_buff_pool[i].size;
 514
 515        rc = -ENOMEM;
 516        adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
 517        if (!adapter->buffer_list_addr) {
 518                netdev_err(netdev, "unable to allocate list pages\n");
 519                goto out;
 520        }
 521
 522        adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
 523        if (!adapter->filter_list_addr) {
 524                netdev_err(netdev, "unable to allocate filter pages\n");
 525                goto out_free_buffer_list;
 526        }
 527
 528        dev = &adapter->vdev->dev;
 529
 530        adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
 531                                                rxq_entries;
 532        adapter->rx_queue.queue_addr =
 533                dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
 534                                   &adapter->rx_queue.queue_dma, GFP_KERNEL);
 535        if (!adapter->rx_queue.queue_addr)
 536                goto out_free_filter_list;
 537
 538        adapter->buffer_list_dma = dma_map_single(dev,
 539                        adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
 540        if (dma_mapping_error(dev, adapter->buffer_list_dma)) {
 541                netdev_err(netdev, "unable to map buffer list pages\n");
 542                goto out_free_queue_mem;
 543        }
 544
 545        adapter->filter_list_dma = dma_map_single(dev,
 546                        adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
 547        if (dma_mapping_error(dev, adapter->filter_list_dma)) {
 548                netdev_err(netdev, "unable to map filter list pages\n");
 549                goto out_unmap_buffer_list;
 550        }
 551
 552        adapter->rx_queue.index = 0;
 553        adapter->rx_queue.num_slots = rxq_entries;
 554        adapter->rx_queue.toggle = 1;
 555
 556        mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
 557
 558        rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
 559                                        adapter->rx_queue.queue_len;
 560        rxq_desc.fields.address = adapter->rx_queue.queue_dma;
 561
 562        netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
 563        netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
 564        netdev_dbg(netdev, "receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
 565
 566        h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 567
 568        lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
 569
 570        if (lpar_rc != H_SUCCESS) {
 571                netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
 572                           lpar_rc);
 573                netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
 574                           "desc:0x%llx MAC:0x%llx\n",
 575                                     adapter->buffer_list_dma,
 576                                     adapter->filter_list_dma,
 577                                     rxq_desc.desc,
 578                                     mac_address);
 579                rc = -ENONET;
 580                goto out_unmap_filter_list;
 581        }
 582
 583        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
 584                if (!adapter->rx_buff_pool[i].active)
 585                        continue;
 586                if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
 587                        netdev_err(netdev, "unable to alloc pool\n");
 588                        adapter->rx_buff_pool[i].active = 0;
 589                        rc = -ENOMEM;
 590                        goto out_free_buffer_pools;
 591                }
 592        }
 593
 594        netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
 595        rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
 596                         netdev);
 597        if (rc != 0) {
 598                netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
 599                           netdev->irq, rc);
 600                do {
 601                        lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
 602                } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
 603
 604                goto out_free_buffer_pools;
 605        }
 606
 607        rc = -ENOMEM;
 608        adapter->bounce_buffer =
 609            kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
 610        if (!adapter->bounce_buffer)
 611                goto out_free_irq;
 612
 613        adapter->bounce_buffer_dma =
 614            dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
 615                           netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
 616        if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
 617                netdev_err(netdev, "unable to map bounce buffer\n");
 618                goto out_free_bounce_buffer;
 619        }
 620
 621        netdev_dbg(netdev, "initial replenish cycle\n");
 622        ibmveth_interrupt(netdev->irq, netdev);
 623
 624        netif_start_queue(netdev);
 625
 626        netdev_dbg(netdev, "open complete\n");
 627
 628        return 0;
 629
 630out_free_bounce_buffer:
 631        kfree(adapter->bounce_buffer);
 632out_free_irq:
 633        free_irq(netdev->irq, netdev);
 634out_free_buffer_pools:
 635        while (--i >= 0) {
 636                if (adapter->rx_buff_pool[i].active)
 637                        ibmveth_free_buffer_pool(adapter,
 638                                                 &adapter->rx_buff_pool[i]);
 639        }
 640out_unmap_filter_list:
 641        dma_unmap_single(dev, adapter->filter_list_dma, 4096,
 642                         DMA_BIDIRECTIONAL);
 643out_unmap_buffer_list:
 644        dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
 645                         DMA_BIDIRECTIONAL);
 646out_free_queue_mem:
 647        dma_free_coherent(dev, adapter->rx_queue.queue_len,
 648                          adapter->rx_queue.queue_addr,
 649                          adapter->rx_queue.queue_dma);
 650out_free_filter_list:
 651        free_page((unsigned long)adapter->filter_list_addr);
 652out_free_buffer_list:
 653        free_page((unsigned long)adapter->buffer_list_addr);
 654out:
 655        napi_disable(&adapter->napi);
 656        return rc;
 657}
 658
 659static int ibmveth_close(struct net_device *netdev)
 660{
 661        struct ibmveth_adapter *adapter = netdev_priv(netdev);
 662        struct device *dev = &adapter->vdev->dev;
 663        long lpar_rc;
 664        int i;
 665
 666        netdev_dbg(netdev, "close starting\n");
 667
 668        napi_disable(&adapter->napi);
 669
 670        if (!adapter->pool_config)
 671                netif_stop_queue(netdev);
 672
 673        h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 674
 675        do {
 676                lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
 677        } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
 678
 679        if (lpar_rc != H_SUCCESS) {
 680                netdev_err(netdev, "h_free_logical_lan failed with %lx, "
 681                           "continuing with close\n", lpar_rc);
 682        }
 683
 684        free_irq(netdev->irq, netdev);
 685
 686        ibmveth_update_rx_no_buffer(adapter);
 687
 688        dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
 689                         DMA_BIDIRECTIONAL);
 690        free_page((unsigned long)adapter->buffer_list_addr);
 691
 692        dma_unmap_single(dev, adapter->filter_list_dma, 4096,
 693                         DMA_BIDIRECTIONAL);
 694        free_page((unsigned long)adapter->filter_list_addr);
 695
 696        dma_free_coherent(dev, adapter->rx_queue.queue_len,
 697                          adapter->rx_queue.queue_addr,
 698                          adapter->rx_queue.queue_dma);
 699
 700        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
 701                if (adapter->rx_buff_pool[i].active)
 702                        ibmveth_free_buffer_pool(adapter,
 703                                                 &adapter->rx_buff_pool[i]);
 704
 705        dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
 706                         adapter->netdev->mtu + IBMVETH_BUFF_OH,
 707                         DMA_BIDIRECTIONAL);
 708        kfree(adapter->bounce_buffer);
 709
 710        netdev_dbg(netdev, "close complete\n");
 711
 712        return 0;
 713}
 714
 715static int ibmveth_set_link_ksettings(struct net_device *dev,
 716                                      const struct ethtool_link_ksettings *cmd)
 717{
 718        struct ibmveth_adapter *adapter = netdev_priv(dev);
 719
 720        return ethtool_virtdev_set_link_ksettings(dev, cmd,
 721                                                  &adapter->speed,
 722                                                  &adapter->duplex);
 723}
 724
 725static int ibmveth_get_link_ksettings(struct net_device *dev,
 726                                      struct ethtool_link_ksettings *cmd)
 727{
 728        struct ibmveth_adapter *adapter = netdev_priv(dev);
 729
 730        cmd->base.speed = adapter->speed;
 731        cmd->base.duplex = adapter->duplex;
 732        cmd->base.port = PORT_OTHER;
 733
 734        return 0;
 735}
 736
 737static void ibmveth_init_link_settings(struct net_device *dev)
 738{
 739        struct ibmveth_adapter *adapter = netdev_priv(dev);
 740
 741        adapter->speed = SPEED_1000;
 742        adapter->duplex = DUPLEX_FULL;
 743}
 744
 745static void netdev_get_drvinfo(struct net_device *dev,
 746                               struct ethtool_drvinfo *info)
 747{
 748        strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
 749        strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
 750}
 751
 752static netdev_features_t ibmveth_fix_features(struct net_device *dev,
 753        netdev_features_t features)
 754{
 755        /*
 756         * Since the ibmveth firmware interface does not have the
 757         * concept of separate tx/rx checksum offload enable, if rx
 758         * checksum is disabled we also have to disable tx checksum
 759         * offload. Once we disable rx checksum offload, we are no
 760         * longer allowed to send tx buffers that are not properly
 761         * checksummed.
 762         */
 763
 764        if (!(features & NETIF_F_RXCSUM))
 765                features &= ~NETIF_F_CSUM_MASK;
 766
 767        return features;
 768}
 769
 770static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
 771{
 772        struct ibmveth_adapter *adapter = netdev_priv(dev);
 773        unsigned long set_attr, clr_attr, ret_attr;
 774        unsigned long set_attr6, clr_attr6;
 775        long ret, ret4, ret6;
 776        int rc1 = 0, rc2 = 0;
 777        int restart = 0;
 778
 779        if (netif_running(dev)) {
 780                restart = 1;
 781                adapter->pool_config = 1;
 782                ibmveth_close(dev);
 783                adapter->pool_config = 0;
 784        }
 785
 786        set_attr = 0;
 787        clr_attr = 0;
 788        set_attr6 = 0;
 789        clr_attr6 = 0;
 790
 791        if (data) {
 792                set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
 793                set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
 794        } else {
 795                clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
 796                clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
 797        }
 798
 799        ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
 800
 801        if (ret == H_SUCCESS &&
 802            (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
 803                ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
 804                                         set_attr, &ret_attr);
 805
 806                if (ret4 != H_SUCCESS) {
 807                        netdev_err(dev, "unable to change IPv4 checksum "
 808                                        "offload settings. %d rc=%ld\n",
 809                                        data, ret4);
 810
 811                        h_illan_attributes(adapter->vdev->unit_address,
 812                                           set_attr, clr_attr, &ret_attr);
 813
 814                        if (data == 1)
 815                                dev->features &= ~NETIF_F_IP_CSUM;
 816
 817                } else {
 818                        adapter->fw_ipv4_csum_support = data;
 819                }
 820
 821                ret6 = h_illan_attributes(adapter->vdev->unit_address,
 822                                         clr_attr6, set_attr6, &ret_attr);
 823
 824                if (ret6 != H_SUCCESS) {
 825                        netdev_err(dev, "unable to change IPv6 checksum "
 826                                        "offload settings. %d rc=%ld\n",
 827                                        data, ret6);
 828
 829                        h_illan_attributes(adapter->vdev->unit_address,
 830                                           set_attr6, clr_attr6, &ret_attr);
 831
 832                        if (data == 1)
 833                                dev->features &= ~NETIF_F_IPV6_CSUM;
 834
 835                } else
 836                        adapter->fw_ipv6_csum_support = data;
 837
 838                if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
 839                        adapter->rx_csum = data;
 840                else
 841                        rc1 = -EIO;
 842        } else {
 843                rc1 = -EIO;
 844                netdev_err(dev, "unable to change checksum offload settings."
 845                                     " %d rc=%ld ret_attr=%lx\n", data, ret,
 846                                     ret_attr);
 847        }
 848
 849        if (restart)
 850                rc2 = ibmveth_open(dev);
 851
 852        return rc1 ? rc1 : rc2;
 853}
 854
 855static int ibmveth_set_tso(struct net_device *dev, u32 data)
 856{
 857        struct ibmveth_adapter *adapter = netdev_priv(dev);
 858        unsigned long set_attr, clr_attr, ret_attr;
 859        long ret1, ret2;
 860        int rc1 = 0, rc2 = 0;
 861        int restart = 0;
 862
 863        if (netif_running(dev)) {
 864                restart = 1;
 865                adapter->pool_config = 1;
 866                ibmveth_close(dev);
 867                adapter->pool_config = 0;
 868        }
 869
 870        set_attr = 0;
 871        clr_attr = 0;
 872
 873        if (data)
 874                set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
 875        else
 876                clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
 877
 878        ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
 879
 880        if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
 881            !old_large_send) {
 882                ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
 883                                          set_attr, &ret_attr);
 884
 885                if (ret2 != H_SUCCESS) {
 886                        netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
 887                                   data, ret2);
 888
 889                        h_illan_attributes(adapter->vdev->unit_address,
 890                                           set_attr, clr_attr, &ret_attr);
 891
 892                        if (data == 1)
 893                                dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
 894                        rc1 = -EIO;
 895
 896                } else {
 897                        adapter->fw_large_send_support = data;
 898                        adapter->large_send = data;
 899                }
 900        } else {
 901                /* Older firmware version of large send offload does not
 902                 * support tcp6/ipv6
 903                 */
 904                if (data == 1) {
 905                        dev->features &= ~NETIF_F_TSO6;
 906                        netdev_info(dev, "TSO feature requires all partitions to have updated driver");
 907                }
 908                adapter->large_send = data;
 909        }
 910
 911        if (restart)
 912                rc2 = ibmveth_open(dev);
 913
 914        return rc1 ? rc1 : rc2;
 915}
 916
 917static int ibmveth_set_features(struct net_device *dev,
 918        netdev_features_t features)
 919{
 920        struct ibmveth_adapter *adapter = netdev_priv(dev);
 921        int rx_csum = !!(features & NETIF_F_RXCSUM);
 922        int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
 923        int rc1 = 0, rc2 = 0;
 924
 925        if (rx_csum != adapter->rx_csum) {
 926                rc1 = ibmveth_set_csum_offload(dev, rx_csum);
 927                if (rc1 && !adapter->rx_csum)
 928                        dev->features =
 929                                features & ~(NETIF_F_CSUM_MASK |
 930                                             NETIF_F_RXCSUM);
 931        }
 932
 933        if (large_send != adapter->large_send) {
 934                rc2 = ibmveth_set_tso(dev, large_send);
 935                if (rc2 && !adapter->large_send)
 936                        dev->features =
 937                                features & ~(NETIF_F_TSO | NETIF_F_TSO6);
 938        }
 939
 940        return rc1 ? rc1 : rc2;
 941}
 942
 943static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 944{
 945        int i;
 946
 947        if (stringset != ETH_SS_STATS)
 948                return;
 949
 950        for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
 951                memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
 952}
 953
 954static int ibmveth_get_sset_count(struct net_device *dev, int sset)
 955{
 956        switch (sset) {
 957        case ETH_SS_STATS:
 958                return ARRAY_SIZE(ibmveth_stats);
 959        default:
 960                return -EOPNOTSUPP;
 961        }
 962}
 963
 964static void ibmveth_get_ethtool_stats(struct net_device *dev,
 965                                      struct ethtool_stats *stats, u64 *data)
 966{
 967        int i;
 968        struct ibmveth_adapter *adapter = netdev_priv(dev);
 969
 970        for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
 971                data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
 972}
 973
 974static const struct ethtool_ops netdev_ethtool_ops = {
 975        .get_drvinfo                     = netdev_get_drvinfo,
 976        .get_link                        = ethtool_op_get_link,
 977        .get_strings                     = ibmveth_get_strings,
 978        .get_sset_count                  = ibmveth_get_sset_count,
 979        .get_ethtool_stats               = ibmveth_get_ethtool_stats,
 980        .get_link_ksettings              = ibmveth_get_link_ksettings,
 981        .set_link_ksettings              = ibmveth_set_link_ksettings,
 982};
 983
 984static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 985{
 986        return -EOPNOTSUPP;
 987}
 988
 989static int ibmveth_send(struct ibmveth_adapter *adapter,
 990                        union ibmveth_buf_desc *descs, unsigned long mss)
 991{
 992        unsigned long correlator;
 993        unsigned int retry_count;
 994        unsigned long ret;
 995
 996        /*
 997         * The retry count sets a maximum for the number of broadcast and
 998         * multicast destinations within the system.
 999         */
1000        retry_count = 1024;
1001        correlator = 0;
1002        do {
1003                ret = h_send_logical_lan(adapter->vdev->unit_address,
1004                                             descs[0].desc, descs[1].desc,
1005                                             descs[2].desc, descs[3].desc,
1006                                             descs[4].desc, descs[5].desc,
1007                                             correlator, &correlator, mss,
1008                                             adapter->fw_large_send_support);
1009        } while ((ret == H_BUSY) && (retry_count--));
1010
1011        if (ret != H_SUCCESS && ret != H_DROPPED) {
1012                netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1013                           "with rc=%ld\n", ret);
1014                return 1;
1015        }
1016
1017        return 0;
1018}
1019
1020static int ibmveth_is_packet_unsupported(struct sk_buff *skb,
1021                                         struct net_device *netdev)
1022{
1023        struct ethhdr *ether_header;
1024        int ret = 0;
1025
1026        ether_header = eth_hdr(skb);
1027
1028        if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) {
1029                netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n");
1030                netdev->stats.tx_dropped++;
1031                ret = -EOPNOTSUPP;
1032        }
1033
1034        if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) {
1035                netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n");
1036                netdev->stats.tx_dropped++;
1037                ret = -EOPNOTSUPP;
1038        }
1039
1040        return ret;
1041}
1042
1043static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1044                                      struct net_device *netdev)
1045{
1046        struct ibmveth_adapter *adapter = netdev_priv(netdev);
1047        unsigned int desc_flags;
1048        union ibmveth_buf_desc descs[6];
1049        int last, i;
1050        int force_bounce = 0;
1051        dma_addr_t dma_addr;
1052        unsigned long mss = 0;
1053
1054        if (ibmveth_is_packet_unsupported(skb, netdev))
1055                goto out;
1056
1057        /* veth doesn't handle frag_list, so linearize the skb.
1058         * When GRO is enabled SKB's can have frag_list.
1059         */
1060        if (adapter->is_active_trunk &&
1061            skb_has_frag_list(skb) && __skb_linearize(skb)) {
1062                netdev->stats.tx_dropped++;
1063                goto out;
1064        }
1065
1066        /*
1067         * veth handles a maximum of 6 segments including the header, so
1068         * we have to linearize the skb if there are more than this.
1069         */
1070        if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1071                netdev->stats.tx_dropped++;
1072                goto out;
1073        }
1074
1075        /* veth can't checksum offload UDP */
1076        if (skb->ip_summed == CHECKSUM_PARTIAL &&
1077            ((skb->protocol == htons(ETH_P_IP) &&
1078              ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1079             (skb->protocol == htons(ETH_P_IPV6) &&
1080              ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1081            skb_checksum_help(skb)) {
1082
1083                netdev_err(netdev, "tx: failed to checksum packet\n");
1084                netdev->stats.tx_dropped++;
1085                goto out;
1086        }
1087
1088        desc_flags = IBMVETH_BUF_VALID;
1089
1090        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1091                unsigned char *buf = skb_transport_header(skb) +
1092                                                skb->csum_offset;
1093
1094                desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
1095
1096                /* Need to zero out the checksum */
1097                buf[0] = 0;
1098                buf[1] = 0;
1099
1100                if (skb_is_gso(skb) && adapter->fw_large_send_support)
1101                        desc_flags |= IBMVETH_BUF_LRG_SND;
1102        }
1103
1104retry_bounce:
1105        memset(descs, 0, sizeof(descs));
1106
1107        /*
1108         * If a linear packet is below the rx threshold then
1109         * copy it into the static bounce buffer. This avoids the
1110         * cost of a TCE insert and remove.
1111         */
1112        if (force_bounce || (!skb_is_nonlinear(skb) &&
1113                                (skb->len < tx_copybreak))) {
1114                skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1115                                          skb->len);
1116
1117                descs[0].fields.flags_len = desc_flags | skb->len;
1118                descs[0].fields.address = adapter->bounce_buffer_dma;
1119
1120                if (ibmveth_send(adapter, descs, 0)) {
1121                        adapter->tx_send_failed++;
1122                        netdev->stats.tx_dropped++;
1123                } else {
1124                        netdev->stats.tx_packets++;
1125                        netdev->stats.tx_bytes += skb->len;
1126                }
1127
1128                goto out;
1129        }
1130
1131        /* Map the header */
1132        dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1133                                  skb_headlen(skb), DMA_TO_DEVICE);
1134        if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1135                goto map_failed;
1136
1137        descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1138        descs[0].fields.address = dma_addr;
1139
1140        /* Map the frags */
1141        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1142                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1143
1144                dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1145                                            skb_frag_size(frag), DMA_TO_DEVICE);
1146
1147                if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1148                        goto map_failed_frags;
1149
1150                descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1151                descs[i+1].fields.address = dma_addr;
1152        }
1153
1154        if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1155                if (adapter->fw_large_send_support) {
1156                        mss = (unsigned long)skb_shinfo(skb)->gso_size;
1157                        adapter->tx_large_packets++;
1158                } else if (!skb_is_gso_v6(skb)) {
1159                        /* Put -1 in the IP checksum to tell phyp it
1160                         * is a largesend packet. Put the mss in
1161                         * the TCP checksum.
1162                         */
1163                        ip_hdr(skb)->check = 0xffff;
1164                        tcp_hdr(skb)->check =
1165                                cpu_to_be16(skb_shinfo(skb)->gso_size);
1166                        adapter->tx_large_packets++;
1167                }
1168        }
1169
1170        if (ibmveth_send(adapter, descs, mss)) {
1171                adapter->tx_send_failed++;
1172                netdev->stats.tx_dropped++;
1173        } else {
1174                netdev->stats.tx_packets++;
1175                netdev->stats.tx_bytes += skb->len;
1176        }
1177
1178        dma_unmap_single(&adapter->vdev->dev,
1179                         descs[0].fields.address,
1180                         descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1181                         DMA_TO_DEVICE);
1182
1183        for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1184                dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1185                               descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1186                               DMA_TO_DEVICE);
1187
1188out:
1189        dev_consume_skb_any(skb);
1190        return NETDEV_TX_OK;
1191
1192map_failed_frags:
1193        last = i+1;
1194        for (i = 1; i < last; i++)
1195                dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1196                               descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1197                               DMA_TO_DEVICE);
1198
1199        dma_unmap_single(&adapter->vdev->dev,
1200                         descs[0].fields.address,
1201                         descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1202                         DMA_TO_DEVICE);
1203map_failed:
1204        if (!firmware_has_feature(FW_FEATURE_CMO))
1205                netdev_err(netdev, "tx: unable to map xmit buffer\n");
1206        adapter->tx_map_failed++;
1207        if (skb_linearize(skb)) {
1208                netdev->stats.tx_dropped++;
1209                goto out;
1210        }
1211        force_bounce = 1;
1212        goto retry_bounce;
1213}
1214
1215static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
1216{
1217        struct tcphdr *tcph;
1218        int offset = 0;
1219        int hdr_len;
1220
1221        /* only TCP packets will be aggregated */
1222        if (skb->protocol == htons(ETH_P_IP)) {
1223                struct iphdr *iph = (struct iphdr *)skb->data;
1224
1225                if (iph->protocol == IPPROTO_TCP) {
1226                        offset = iph->ihl * 4;
1227                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1228                } else {
1229                        return;
1230                }
1231        } else if (skb->protocol == htons(ETH_P_IPV6)) {
1232                struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
1233
1234                if (iph6->nexthdr == IPPROTO_TCP) {
1235                        offset = sizeof(struct ipv6hdr);
1236                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1237                } else {
1238                        return;
1239                }
1240        } else {
1241                return;
1242        }
1243        /* if mss is not set through Large Packet bit/mss in rx buffer,
1244         * expect that the mss will be written to the tcp header checksum.
1245         */
1246        tcph = (struct tcphdr *)(skb->data + offset);
1247        if (lrg_pkt) {
1248                skb_shinfo(skb)->gso_size = mss;
1249        } else if (offset) {
1250                skb_shinfo(skb)->gso_size = ntohs(tcph->check);
1251                tcph->check = 0;
1252        }
1253
1254        if (skb_shinfo(skb)->gso_size) {
1255                hdr_len = offset + tcph->doff * 4;
1256                skb_shinfo(skb)->gso_segs =
1257                                DIV_ROUND_UP(skb->len - hdr_len,
1258                                             skb_shinfo(skb)->gso_size);
1259        }
1260}
1261
1262static void ibmveth_rx_csum_helper(struct sk_buff *skb,
1263                                   struct ibmveth_adapter *adapter)
1264{
1265        struct iphdr *iph = NULL;
1266        struct ipv6hdr *iph6 = NULL;
1267        __be16 skb_proto = 0;
1268        u16 iphlen = 0;
1269        u16 iph_proto = 0;
1270        u16 tcphdrlen = 0;
1271
1272        skb_proto = be16_to_cpu(skb->protocol);
1273
1274        if (skb_proto == ETH_P_IP) {
1275                iph = (struct iphdr *)skb->data;
1276
1277                /* If the IP checksum is not offloaded and if the packet
1278                 *  is large send, the checksum must be rebuilt.
1279                 */
1280                if (iph->check == 0xffff) {
1281                        iph->check = 0;
1282                        iph->check = ip_fast_csum((unsigned char *)iph,
1283                                                  iph->ihl);
1284                }
1285
1286                iphlen = iph->ihl * 4;
1287                iph_proto = iph->protocol;
1288        } else if (skb_proto == ETH_P_IPV6) {
1289                iph6 = (struct ipv6hdr *)skb->data;
1290                iphlen = sizeof(struct ipv6hdr);
1291                iph_proto = iph6->nexthdr;
1292        }
1293
1294        /* In OVS environment, when a flow is not cached, specifically for a
1295         * new TCP connection, the first packet information is passed up
1296         * the user space for finding a flow. During this process, OVS computes
1297         * checksum on the first packet when CHECKSUM_PARTIAL flag is set.
1298         *
1299         * Given that we zeroed out TCP checksum field in transmit path
1300         * (refer ibmveth_start_xmit routine) as we set "no checksum bit",
1301         * OVS computed checksum will be incorrect w/o TCP pseudo checksum
1302         * in the packet. This leads to OVS dropping the packet and hence
1303         * TCP retransmissions are seen.
1304         *
1305         * So, re-compute TCP pseudo header checksum.
1306         */
1307        if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) {
1308                struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
1309
1310                tcphdrlen = skb->len - iphlen;
1311
1312                /* Recompute TCP pseudo header checksum */
1313                if (skb_proto == ETH_P_IP)
1314                        tcph->check = ~csum_tcpudp_magic(iph->saddr,
1315                                        iph->daddr, tcphdrlen, iph_proto, 0);
1316                else if (skb_proto == ETH_P_IPV6)
1317                        tcph->check = ~csum_ipv6_magic(&iph6->saddr,
1318                                        &iph6->daddr, tcphdrlen, iph_proto, 0);
1319
1320                /* Setup SKB fields for checksum offload */
1321                skb_partial_csum_set(skb, iphlen,
1322                                     offsetof(struct tcphdr, check));
1323                skb_reset_network_header(skb);
1324        }
1325}
1326
1327static int ibmveth_poll(struct napi_struct *napi, int budget)
1328{
1329        struct ibmveth_adapter *adapter =
1330                        container_of(napi, struct ibmveth_adapter, napi);
1331        struct net_device *netdev = adapter->netdev;
1332        int frames_processed = 0;
1333        unsigned long lpar_rc;
1334        u16 mss = 0;
1335
1336        while (frames_processed < budget) {
1337                if (!ibmveth_rxq_pending_buffer(adapter))
1338                        break;
1339
1340                smp_rmb();
1341                if (!ibmveth_rxq_buffer_valid(adapter)) {
1342                        wmb(); /* suggested by larson1 */
1343                        adapter->rx_invalid_buffer++;
1344                        netdev_dbg(netdev, "recycling invalid buffer\n");
1345                        ibmveth_rxq_recycle_buffer(adapter);
1346                } else {
1347                        struct sk_buff *skb, *new_skb;
1348                        int length = ibmveth_rxq_frame_length(adapter);
1349                        int offset = ibmveth_rxq_frame_offset(adapter);
1350                        int csum_good = ibmveth_rxq_csum_good(adapter);
1351                        int lrg_pkt = ibmveth_rxq_large_packet(adapter);
1352
1353                        skb = ibmveth_rxq_get_buffer(adapter);
1354
1355                        /* if the large packet bit is set in the rx queue
1356                         * descriptor, the mss will be written by PHYP eight
1357                         * bytes from the start of the rx buffer, which is
1358                         * skb->data at this stage
1359                         */
1360                        if (lrg_pkt) {
1361                                __be64 *rxmss = (__be64 *)(skb->data + 8);
1362
1363                                mss = (u16)be64_to_cpu(*rxmss);
1364                        }
1365
1366                        new_skb = NULL;
1367                        if (length < rx_copybreak)
1368                                new_skb = netdev_alloc_skb(netdev, length);
1369
1370                        if (new_skb) {
1371                                skb_copy_to_linear_data(new_skb,
1372                                                        skb->data + offset,
1373                                                        length);
1374                                if (rx_flush)
1375                                        ibmveth_flush_buffer(skb->data,
1376                                                length + offset);
1377                                if (!ibmveth_rxq_recycle_buffer(adapter))
1378                                        kfree_skb(skb);
1379                                skb = new_skb;
1380                        } else {
1381                                ibmveth_rxq_harvest_buffer(adapter);
1382                                skb_reserve(skb, offset);
1383                        }
1384
1385                        skb_put(skb, length);
1386                        skb->protocol = eth_type_trans(skb, netdev);
1387
1388                        if (csum_good) {
1389                                skb->ip_summed = CHECKSUM_UNNECESSARY;
1390                                ibmveth_rx_csum_helper(skb, adapter);
1391                        }
1392
1393                        if (length > netdev->mtu + ETH_HLEN) {
1394                                ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
1395                                adapter->rx_large_packets++;
1396                        }
1397
1398                        napi_gro_receive(napi, skb);    /* send it up */
1399
1400                        netdev->stats.rx_packets++;
1401                        netdev->stats.rx_bytes += length;
1402                        frames_processed++;
1403                }
1404        }
1405
1406        ibmveth_replenish_task(adapter);
1407
1408        if (frames_processed < budget) {
1409                napi_complete_done(napi, frames_processed);
1410
1411                /* We think we are done - reenable interrupts,
1412                 * then check once more to make sure we are done.
1413                 */
1414                lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1415                                       VIO_IRQ_ENABLE);
1416
1417                BUG_ON(lpar_rc != H_SUCCESS);
1418
1419                if (ibmveth_rxq_pending_buffer(adapter) &&
1420                    napi_reschedule(napi)) {
1421                        lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1422                                               VIO_IRQ_DISABLE);
1423                }
1424        }
1425
1426        return frames_processed;
1427}
1428
1429static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1430{
1431        struct net_device *netdev = dev_instance;
1432        struct ibmveth_adapter *adapter = netdev_priv(netdev);
1433        unsigned long lpar_rc;
1434
1435        if (napi_schedule_prep(&adapter->napi)) {
1436                lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1437                                       VIO_IRQ_DISABLE);
1438                BUG_ON(lpar_rc != H_SUCCESS);
1439                __napi_schedule(&adapter->napi);
1440        }
1441        return IRQ_HANDLED;
1442}
1443
1444static void ibmveth_set_multicast_list(struct net_device *netdev)
1445{
1446        struct ibmveth_adapter *adapter = netdev_priv(netdev);
1447        unsigned long lpar_rc;
1448
1449        if ((netdev->flags & IFF_PROMISC) ||
1450            (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1451                lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1452                                           IbmVethMcastEnableRecv |
1453                                           IbmVethMcastDisableFiltering,
1454                                           0);
1455                if (lpar_rc != H_SUCCESS) {
1456                        netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1457                                   "entering promisc mode\n", lpar_rc);
1458                }
1459        } else {
1460                struct netdev_hw_addr *ha;
1461                /* clear the filter table & disable filtering */
1462                lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1463                                           IbmVethMcastEnableRecv |
1464                                           IbmVethMcastDisableFiltering |
1465                                           IbmVethMcastClearFilterTable,
1466                                           0);
1467                if (lpar_rc != H_SUCCESS) {
1468                        netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1469                                   "attempting to clear filter table\n",
1470                                   lpar_rc);
1471                }
1472                /* add the addresses to the filter table */
1473                netdev_for_each_mc_addr(ha, netdev) {
1474                        /* add the multicast address to the filter table */
1475                        u64 mcast_addr;
1476                        mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1477                        lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1478                                                   IbmVethMcastAddFilter,
1479                                                   mcast_addr);
1480                        if (lpar_rc != H_SUCCESS) {
1481                                netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1482                                           "when adding an entry to the filter "
1483                                           "table\n", lpar_rc);
1484                        }
1485                }
1486
1487                /* re-enable filtering */
1488                lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1489                                           IbmVethMcastEnableFiltering,
1490                                           0);
1491                if (lpar_rc != H_SUCCESS) {
1492                        netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1493                                   "enabling filtering\n", lpar_rc);
1494                }
1495        }
1496}
1497
1498static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1499{
1500        struct ibmveth_adapter *adapter = netdev_priv(dev);
1501        struct vio_dev *viodev = adapter->vdev;
1502        int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1503        int i, rc;
1504        int need_restart = 0;
1505
1506        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1507                if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1508                        break;
1509
1510        if (i == IBMVETH_NUM_BUFF_POOLS)
1511                return -EINVAL;
1512
1513        /* Deactivate all the buffer pools so that the next loop can activate
1514           only the buffer pools necessary to hold the new MTU */
1515        if (netif_running(adapter->netdev)) {
1516                need_restart = 1;
1517                adapter->pool_config = 1;
1518                ibmveth_close(adapter->netdev);
1519                adapter->pool_config = 0;
1520        }
1521
1522        /* Look for an active buffer pool that can hold the new MTU */
1523        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1524                adapter->rx_buff_pool[i].active = 1;
1525
1526                if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1527                        dev->mtu = new_mtu;
1528                        vio_cmo_set_dev_desired(viodev,
1529                                                ibmveth_get_desired_dma
1530                                                (viodev));
1531                        if (need_restart) {
1532                                return ibmveth_open(adapter->netdev);
1533                        }
1534                        return 0;
1535                }
1536        }
1537
1538        if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1539                return rc;
1540
1541        return -EINVAL;
1542}
1543
1544#ifdef CONFIG_NET_POLL_CONTROLLER
1545static void ibmveth_poll_controller(struct net_device *dev)
1546{
1547        ibmveth_replenish_task(netdev_priv(dev));
1548        ibmveth_interrupt(dev->irq, dev);
1549}
1550#endif
1551
1552/**
1553 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1554 *
1555 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1556 *
1557 * Return value:
1558 *      Number of bytes of IO data the driver will need to perform well.
1559 */
1560static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1561{
1562        struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1563        struct ibmveth_adapter *adapter;
1564        struct iommu_table *tbl;
1565        unsigned long ret;
1566        int i;
1567        int rxqentries = 1;
1568
1569        tbl = get_iommu_table_base(&vdev->dev);
1570
1571        /* netdev inits at probe time along with the structures we need below*/
1572        if (netdev == NULL)
1573                return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1574
1575        adapter = netdev_priv(netdev);
1576
1577        ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1578        ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1579
1580        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1581                /* add the size of the active receive buffers */
1582                if (adapter->rx_buff_pool[i].active)
1583                        ret +=
1584                            adapter->rx_buff_pool[i].size *
1585                            IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1586                                             buff_size, tbl);
1587                rxqentries += adapter->rx_buff_pool[i].size;
1588        }
1589        /* add the size of the receive queue entries */
1590        ret += IOMMU_PAGE_ALIGN(
1591                rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1592
1593        return ret;
1594}
1595
1596static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1597{
1598        struct ibmveth_adapter *adapter = netdev_priv(dev);
1599        struct sockaddr *addr = p;
1600        u64 mac_address;
1601        int rc;
1602
1603        if (!is_valid_ether_addr(addr->sa_data))
1604                return -EADDRNOTAVAIL;
1605
1606        mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1607        rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1608        if (rc) {
1609                netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1610                return rc;
1611        }
1612
1613        ether_addr_copy(dev->dev_addr, addr->sa_data);
1614
1615        return 0;
1616}
1617
1618static const struct net_device_ops ibmveth_netdev_ops = {
1619        .ndo_open               = ibmveth_open,
1620        .ndo_stop               = ibmveth_close,
1621        .ndo_start_xmit         = ibmveth_start_xmit,
1622        .ndo_set_rx_mode        = ibmveth_set_multicast_list,
1623        .ndo_do_ioctl           = ibmveth_ioctl,
1624        .ndo_change_mtu         = ibmveth_change_mtu,
1625        .ndo_fix_features       = ibmveth_fix_features,
1626        .ndo_set_features       = ibmveth_set_features,
1627        .ndo_validate_addr      = eth_validate_addr,
1628        .ndo_set_mac_address    = ibmveth_set_mac_addr,
1629#ifdef CONFIG_NET_POLL_CONTROLLER
1630        .ndo_poll_controller    = ibmveth_poll_controller,
1631#endif
1632};
1633
1634static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1635{
1636        int rc, i, mac_len;
1637        struct net_device *netdev;
1638        struct ibmveth_adapter *adapter;
1639        unsigned char *mac_addr_p;
1640        __be32 *mcastFilterSize_p;
1641        long ret;
1642        unsigned long ret_attr;
1643
1644        dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1645                dev->unit_address);
1646
1647        mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1648                                                        &mac_len);
1649        if (!mac_addr_p) {
1650                dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1651                return -EINVAL;
1652        }
1653        /* Workaround for old/broken pHyp */
1654        if (mac_len == 8)
1655                mac_addr_p += 2;
1656        else if (mac_len != 6) {
1657                dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1658                        mac_len);
1659                return -EINVAL;
1660        }
1661
1662        mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1663                                                        VETH_MCAST_FILTER_SIZE,
1664                                                        NULL);
1665        if (!mcastFilterSize_p) {
1666                dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1667                        "attribute\n");
1668                return -EINVAL;
1669        }
1670
1671        netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1672
1673        if (!netdev)
1674                return -ENOMEM;
1675
1676        adapter = netdev_priv(netdev);
1677        dev_set_drvdata(&dev->dev, netdev);
1678
1679        adapter->vdev = dev;
1680        adapter->netdev = netdev;
1681        adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1682        adapter->pool_config = 0;
1683        ibmveth_init_link_settings(netdev);
1684
1685        netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1686
1687        netdev->irq = dev->irq;
1688        netdev->netdev_ops = &ibmveth_netdev_ops;
1689        netdev->ethtool_ops = &netdev_ethtool_ops;
1690        SET_NETDEV_DEV(netdev, &dev->dev);
1691        netdev->hw_features = NETIF_F_SG;
1692        if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1693                netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1694                                       NETIF_F_RXCSUM;
1695        }
1696
1697        netdev->features |= netdev->hw_features;
1698
1699        ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1700
1701        /* If running older firmware, TSO should not be enabled by default */
1702        if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1703            !old_large_send) {
1704                netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1705                netdev->features |= netdev->hw_features;
1706        } else {
1707                netdev->hw_features |= NETIF_F_TSO;
1708        }
1709
1710        adapter->is_active_trunk = false;
1711        if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) {
1712                adapter->is_active_trunk = true;
1713                netdev->hw_features |= NETIF_F_FRAGLIST;
1714                netdev->features |= NETIF_F_FRAGLIST;
1715        }
1716
1717        netdev->min_mtu = IBMVETH_MIN_MTU;
1718        netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
1719
1720        memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1721
1722        if (firmware_has_feature(FW_FEATURE_CMO))
1723                memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1724
1725        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1726                struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1727                int error;
1728
1729                ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1730                                         pool_count[i], pool_size[i],
1731                                         pool_active[i]);
1732                error = kobject_init_and_add(kobj, &ktype_veth_pool,
1733                                             &dev->dev.kobj, "pool%d", i);
1734                if (!error)
1735                        kobject_uevent(kobj, KOBJ_ADD);
1736        }
1737
1738        netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1739        netdev_dbg(netdev, "registering netdev...\n");
1740
1741        ibmveth_set_features(netdev, netdev->features);
1742
1743        rc = register_netdev(netdev);
1744
1745        if (rc) {
1746                netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1747                free_netdev(netdev);
1748                return rc;
1749        }
1750
1751        netdev_dbg(netdev, "registered\n");
1752
1753        return 0;
1754}
1755
1756static int ibmveth_remove(struct vio_dev *dev)
1757{
1758        struct net_device *netdev = dev_get_drvdata(&dev->dev);
1759        struct ibmveth_adapter *adapter = netdev_priv(netdev);
1760        int i;
1761
1762        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1763                kobject_put(&adapter->rx_buff_pool[i].kobj);
1764
1765        unregister_netdev(netdev);
1766
1767        free_netdev(netdev);
1768        dev_set_drvdata(&dev->dev, NULL);
1769
1770        return 0;
1771}
1772
1773static struct attribute veth_active_attr;
1774static struct attribute veth_num_attr;
1775static struct attribute veth_size_attr;
1776
1777static ssize_t veth_pool_show(struct kobject *kobj,
1778                              struct attribute *attr, char *buf)
1779{
1780        struct ibmveth_buff_pool *pool = container_of(kobj,
1781                                                      struct ibmveth_buff_pool,
1782                                                      kobj);
1783
1784        if (attr == &veth_active_attr)
1785                return sprintf(buf, "%d\n", pool->active);
1786        else if (attr == &veth_num_attr)
1787                return sprintf(buf, "%d\n", pool->size);
1788        else if (attr == &veth_size_attr)
1789                return sprintf(buf, "%d\n", pool->buff_size);
1790        return 0;
1791}
1792
1793static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1794                               const char *buf, size_t count)
1795{
1796        struct ibmveth_buff_pool *pool = container_of(kobj,
1797                                                      struct ibmveth_buff_pool,
1798                                                      kobj);
1799        struct net_device *netdev = dev_get_drvdata(
1800            container_of(kobj->parent, struct device, kobj));
1801        struct ibmveth_adapter *adapter = netdev_priv(netdev);
1802        long value = simple_strtol(buf, NULL, 10);
1803        long rc;
1804
1805        if (attr == &veth_active_attr) {
1806                if (value && !pool->active) {
1807                        if (netif_running(netdev)) {
1808                                if (ibmveth_alloc_buffer_pool(pool)) {
1809                                        netdev_err(netdev,
1810                                                   "unable to alloc pool\n");
1811                                        return -ENOMEM;
1812                                }
1813                                pool->active = 1;
1814                                adapter->pool_config = 1;
1815                                ibmveth_close(netdev);
1816                                adapter->pool_config = 0;
1817                                if ((rc = ibmveth_open(netdev)))
1818                                        return rc;
1819                        } else {
1820                                pool->active = 1;
1821                        }
1822                } else if (!value && pool->active) {
1823                        int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1824                        int i;
1825                        /* Make sure there is a buffer pool with buffers that
1826                           can hold a packet of the size of the MTU */
1827                        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1828                                if (pool == &adapter->rx_buff_pool[i])
1829                                        continue;
1830                                if (!adapter->rx_buff_pool[i].active)
1831                                        continue;
1832                                if (mtu <= adapter->rx_buff_pool[i].buff_size)
1833                                        break;
1834                        }
1835
1836                        if (i == IBMVETH_NUM_BUFF_POOLS) {
1837                                netdev_err(netdev, "no active pool >= MTU\n");
1838                                return -EPERM;
1839                        }
1840
1841                        if (netif_running(netdev)) {
1842                                adapter->pool_config = 1;
1843                                ibmveth_close(netdev);
1844                                pool->active = 0;
1845                                adapter->pool_config = 0;
1846                                if ((rc = ibmveth_open(netdev)))
1847                                        return rc;
1848                        }
1849                        pool->active = 0;
1850                }
1851        } else if (attr == &veth_num_attr) {
1852                if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1853                        return -EINVAL;
1854                } else {
1855                        if (netif_running(netdev)) {
1856                                adapter->pool_config = 1;
1857                                ibmveth_close(netdev);
1858                                adapter->pool_config = 0;
1859                                pool->size = value;
1860                                if ((rc = ibmveth_open(netdev)))
1861                                        return rc;
1862                        } else {
1863                                pool->size = value;
1864                        }
1865                }
1866        } else if (attr == &veth_size_attr) {
1867                if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1868                        return -EINVAL;
1869                } else {
1870                        if (netif_running(netdev)) {
1871                                adapter->pool_config = 1;
1872                                ibmveth_close(netdev);
1873                                adapter->pool_config = 0;
1874                                pool->buff_size = value;
1875                                if ((rc = ibmveth_open(netdev)))
1876                                        return rc;
1877                        } else {
1878                                pool->buff_size = value;
1879                        }
1880                }
1881        }
1882
1883        /* kick the interrupt handler to allocate/deallocate pools */
1884        ibmveth_interrupt(netdev->irq, netdev);
1885        return count;
1886}
1887
1888
1889#define ATTR(_name, _mode)                              \
1890        struct attribute veth_##_name##_attr = {        \
1891        .name = __stringify(_name), .mode = _mode,      \
1892        };
1893
1894static ATTR(active, 0644);
1895static ATTR(num, 0644);
1896static ATTR(size, 0644);
1897
1898static struct attribute *veth_pool_attrs[] = {
1899        &veth_active_attr,
1900        &veth_num_attr,
1901        &veth_size_attr,
1902        NULL,
1903};
1904
1905static const struct sysfs_ops veth_pool_ops = {
1906        .show   = veth_pool_show,
1907        .store  = veth_pool_store,
1908};
1909
1910static struct kobj_type ktype_veth_pool = {
1911        .release        = NULL,
1912        .sysfs_ops      = &veth_pool_ops,
1913        .default_attrs  = veth_pool_attrs,
1914};
1915
1916static int ibmveth_resume(struct device *dev)
1917{
1918        struct net_device *netdev = dev_get_drvdata(dev);
1919        ibmveth_interrupt(netdev->irq, netdev);
1920        return 0;
1921}
1922
1923static const struct vio_device_id ibmveth_device_table[] = {
1924        { "network", "IBM,l-lan"},
1925        { "", "" }
1926};
1927MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1928
1929static const struct dev_pm_ops ibmveth_pm_ops = {
1930        .resume = ibmveth_resume
1931};
1932
1933static struct vio_driver ibmveth_driver = {
1934        .id_table       = ibmveth_device_table,
1935        .probe          = ibmveth_probe,
1936        .remove         = ibmveth_remove,
1937        .get_desired_dma = ibmveth_get_desired_dma,
1938        .name           = ibmveth_driver_name,
1939        .pm             = &ibmveth_pm_ops,
1940};
1941
1942static int __init ibmveth_module_init(void)
1943{
1944        printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1945               ibmveth_driver_string, ibmveth_driver_version);
1946
1947        return vio_register_driver(&ibmveth_driver);
1948}
1949
1950static void __exit ibmveth_module_exit(void)
1951{
1952        vio_unregister_driver(&ibmveth_driver);
1953}
1954
1955module_init(ibmveth_module_init);
1956module_exit(ibmveth_module_exit);
1957