linux/drivers/s390/net/qeth_core_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *    Copyright IBM Corp. 2007, 2009
   4 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
   5 *               Frank Pavlic <fpavlic@de.ibm.com>,
   6 *               Thomas Spatzier <tspat@de.ibm.com>,
   7 *               Frank Blaschka <frank.blaschka@de.ibm.com>
   8 */
   9
  10#define KMSG_COMPONENT "qeth"
  11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12
  13#include <linux/compat.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/string.h>
  17#include <linux/errno.h>
  18#include <linux/kernel.h>
  19#include <linux/log2.h>
  20#include <linux/io.h>
  21#include <linux/ip.h>
  22#include <linux/tcp.h>
  23#include <linux/mii.h>
  24#include <linux/mm.h>
  25#include <linux/kthread.h>
  26#include <linux/slab.h>
  27#include <linux/if_vlan.h>
  28#include <linux/netdevice.h>
  29#include <linux/netdev_features.h>
  30#include <linux/rcutree.h>
  31#include <linux/skbuff.h>
  32#include <linux/vmalloc.h>
  33
  34#include <net/iucv/af_iucv.h>
  35#include <net/dsfield.h>
  36#include <net/sock.h>
  37
  38#include <asm/ebcdic.h>
  39#include <asm/chpid.h>
  40#include <asm/sysinfo.h>
  41#include <asm/diag.h>
  42#include <asm/cio.h>
  43#include <asm/ccwdev.h>
  44#include <asm/cpcmd.h>
  45
  46#include "qeth_core.h"
  47
  48struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
  49        /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
  50        /*                   N  P  A    M  L  V                      H  */
  51        [QETH_DBF_SETUP] = {"qeth_setup",
  52                                8, 1,   8, 5, &debug_hex_ascii_view, NULL},
  53        [QETH_DBF_MSG]   = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
  54                            &debug_sprintf_view, NULL},
  55        [QETH_DBF_CTRL]  = {"qeth_control",
  56                8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
  57};
  58EXPORT_SYMBOL_GPL(qeth_dbf);
  59
  60static struct kmem_cache *qeth_core_header_cache;
  61static struct kmem_cache *qeth_qdio_outbuf_cache;
  62
  63static struct device *qeth_core_root_dev;
  64static struct dentry *qeth_debugfs_root;
  65static struct lock_class_key qdio_out_skb_queue_key;
  66
  67static void qeth_issue_next_read_cb(struct qeth_card *card,
  68                                    struct qeth_cmd_buffer *iob,
  69                                    unsigned int data_length);
  70static int qeth_qdio_establish(struct qeth_card *);
  71static void qeth_free_qdio_queues(struct qeth_card *card);
  72
  73static const char *qeth_get_cardname(struct qeth_card *card)
  74{
  75        if (IS_VM_NIC(card)) {
  76                switch (card->info.type) {
  77                case QETH_CARD_TYPE_OSD:
  78                        return " Virtual NIC QDIO";
  79                case QETH_CARD_TYPE_IQD:
  80                        return " Virtual NIC Hiper";
  81                case QETH_CARD_TYPE_OSM:
  82                        return " Virtual NIC QDIO - OSM";
  83                case QETH_CARD_TYPE_OSX:
  84                        return " Virtual NIC QDIO - OSX";
  85                default:
  86                        return " unknown";
  87                }
  88        } else {
  89                switch (card->info.type) {
  90                case QETH_CARD_TYPE_OSD:
  91                        return " OSD Express";
  92                case QETH_CARD_TYPE_IQD:
  93                        return " HiperSockets";
  94                case QETH_CARD_TYPE_OSM:
  95                        return " OSM QDIO";
  96                case QETH_CARD_TYPE_OSX:
  97                        return " OSX QDIO";
  98                default:
  99                        return " unknown";
 100                }
 101        }
 102        return " n/a";
 103}
 104
 105/* max length to be returned: 14 */
 106const char *qeth_get_cardname_short(struct qeth_card *card)
 107{
 108        if (IS_VM_NIC(card)) {
 109                switch (card->info.type) {
 110                case QETH_CARD_TYPE_OSD:
 111                        return "Virt.NIC QDIO";
 112                case QETH_CARD_TYPE_IQD:
 113                        return "Virt.NIC Hiper";
 114                case QETH_CARD_TYPE_OSM:
 115                        return "Virt.NIC OSM";
 116                case QETH_CARD_TYPE_OSX:
 117                        return "Virt.NIC OSX";
 118                default:
 119                        return "unknown";
 120                }
 121        } else {
 122                switch (card->info.type) {
 123                case QETH_CARD_TYPE_OSD:
 124                        switch (card->info.link_type) {
 125                        case QETH_LINK_TYPE_FAST_ETH:
 126                                return "OSD_100";
 127                        case QETH_LINK_TYPE_HSTR:
 128                                return "HSTR";
 129                        case QETH_LINK_TYPE_GBIT_ETH:
 130                                return "OSD_1000";
 131                        case QETH_LINK_TYPE_10GBIT_ETH:
 132                                return "OSD_10GIG";
 133                        case QETH_LINK_TYPE_25GBIT_ETH:
 134                                return "OSD_25GIG";
 135                        case QETH_LINK_TYPE_LANE_ETH100:
 136                                return "OSD_FE_LANE";
 137                        case QETH_LINK_TYPE_LANE_TR:
 138                                return "OSD_TR_LANE";
 139                        case QETH_LINK_TYPE_LANE_ETH1000:
 140                                return "OSD_GbE_LANE";
 141                        case QETH_LINK_TYPE_LANE:
 142                                return "OSD_ATM_LANE";
 143                        default:
 144                                return "OSD_Express";
 145                        }
 146                case QETH_CARD_TYPE_IQD:
 147                        return "HiperSockets";
 148                case QETH_CARD_TYPE_OSM:
 149                        return "OSM_1000";
 150                case QETH_CARD_TYPE_OSX:
 151                        return "OSX_10GIG";
 152                default:
 153                        return "unknown";
 154                }
 155        }
 156        return "n/a";
 157}
 158
 159void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
 160                         int clear_start_mask)
 161{
 162        unsigned long flags;
 163
 164        spin_lock_irqsave(&card->thread_mask_lock, flags);
 165        card->thread_allowed_mask = threads;
 166        if (clear_start_mask)
 167                card->thread_start_mask &= threads;
 168        spin_unlock_irqrestore(&card->thread_mask_lock, flags);
 169        wake_up(&card->wait_q);
 170}
 171EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
 172
 173int qeth_threads_running(struct qeth_card *card, unsigned long threads)
 174{
 175        unsigned long flags;
 176        int rc = 0;
 177
 178        spin_lock_irqsave(&card->thread_mask_lock, flags);
 179        rc = (card->thread_running_mask & threads);
 180        spin_unlock_irqrestore(&card->thread_mask_lock, flags);
 181        return rc;
 182}
 183EXPORT_SYMBOL_GPL(qeth_threads_running);
 184
 185static void qeth_clear_working_pool_list(struct qeth_card *card)
 186{
 187        struct qeth_buffer_pool_entry *pool_entry, *tmp;
 188        struct qeth_qdio_q *queue = card->qdio.in_q;
 189        unsigned int i;
 190
 191        QETH_CARD_TEXT(card, 5, "clwrklst");
 192        list_for_each_entry_safe(pool_entry, tmp,
 193                                 &card->qdio.in_buf_pool.entry_list, list)
 194                list_del(&pool_entry->list);
 195
 196        if (!queue)
 197                return;
 198
 199        for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
 200                queue->bufs[i].pool_entry = NULL;
 201}
 202
 203static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
 204{
 205        unsigned int i;
 206
 207        for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
 208                if (entry->elements[i])
 209                        __free_page(entry->elements[i]);
 210        }
 211
 212        kfree(entry);
 213}
 214
 215static void qeth_free_buffer_pool(struct qeth_card *card)
 216{
 217        struct qeth_buffer_pool_entry *entry, *tmp;
 218
 219        list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
 220                                 init_list) {
 221                list_del(&entry->init_list);
 222                qeth_free_pool_entry(entry);
 223        }
 224}
 225
 226static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
 227{
 228        struct qeth_buffer_pool_entry *entry;
 229        unsigned int i;
 230
 231        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 232        if (!entry)
 233                return NULL;
 234
 235        for (i = 0; i < pages; i++) {
 236                entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
 237
 238                if (!entry->elements[i]) {
 239                        qeth_free_pool_entry(entry);
 240                        return NULL;
 241                }
 242        }
 243
 244        return entry;
 245}
 246
 247static int qeth_alloc_buffer_pool(struct qeth_card *card)
 248{
 249        unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
 250        unsigned int i;
 251
 252        QETH_CARD_TEXT(card, 5, "alocpool");
 253        for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
 254                struct qeth_buffer_pool_entry *entry;
 255
 256                entry = qeth_alloc_pool_entry(buf_elements);
 257                if (!entry) {
 258                        qeth_free_buffer_pool(card);
 259                        return -ENOMEM;
 260                }
 261
 262                list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
 263        }
 264        return 0;
 265}
 266
 267int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
 268{
 269        unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
 270        struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
 271        struct qeth_buffer_pool_entry *entry, *tmp;
 272        int delta = count - pool->buf_count;
 273        LIST_HEAD(entries);
 274
 275        QETH_CARD_TEXT(card, 2, "realcbp");
 276
 277        /* Defer until queue is allocated: */
 278        if (!card->qdio.in_q)
 279                goto out;
 280
 281        /* Remove entries from the pool: */
 282        while (delta < 0) {
 283                entry = list_first_entry(&pool->entry_list,
 284                                         struct qeth_buffer_pool_entry,
 285                                         init_list);
 286                list_del(&entry->init_list);
 287                qeth_free_pool_entry(entry);
 288
 289                delta++;
 290        }
 291
 292        /* Allocate additional entries: */
 293        while (delta > 0) {
 294                entry = qeth_alloc_pool_entry(buf_elements);
 295                if (!entry) {
 296                        list_for_each_entry_safe(entry, tmp, &entries,
 297                                                 init_list) {
 298                                list_del(&entry->init_list);
 299                                qeth_free_pool_entry(entry);
 300                        }
 301
 302                        return -ENOMEM;
 303                }
 304
 305                list_add(&entry->init_list, &entries);
 306
 307                delta--;
 308        }
 309
 310        list_splice(&entries, &pool->entry_list);
 311
 312out:
 313        card->qdio.in_buf_pool.buf_count = count;
 314        pool->buf_count = count;
 315        return 0;
 316}
 317EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
 318
 319static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
 320{
 321        if (!q)
 322                return;
 323
 324        qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
 325        kfree(q);
 326}
 327
 328static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
 329{
 330        struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
 331        int i;
 332
 333        if (!q)
 334                return NULL;
 335
 336        if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
 337                kfree(q);
 338                return NULL;
 339        }
 340
 341        for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
 342                q->bufs[i].buffer = q->qdio_bufs[i];
 343
 344        QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
 345        return q;
 346}
 347
 348static int qeth_cq_init(struct qeth_card *card)
 349{
 350        int rc;
 351
 352        if (card->options.cq == QETH_CQ_ENABLED) {
 353                QETH_CARD_TEXT(card, 2, "cqinit");
 354                qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
 355                                   QDIO_MAX_BUFFERS_PER_Q);
 356                card->qdio.c_q->next_buf_to_init = 127;
 357                rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
 358                             card->qdio.no_in_queues - 1, 0, 127, NULL);
 359                if (rc) {
 360                        QETH_CARD_TEXT_(card, 2, "1err%d", rc);
 361                        goto out;
 362                }
 363        }
 364        rc = 0;
 365out:
 366        return rc;
 367}
 368
 369static int qeth_alloc_cq(struct qeth_card *card)
 370{
 371        if (card->options.cq == QETH_CQ_ENABLED) {
 372                QETH_CARD_TEXT(card, 2, "cqon");
 373                card->qdio.c_q = qeth_alloc_qdio_queue();
 374                if (!card->qdio.c_q) {
 375                        dev_err(&card->gdev->dev, "Failed to create completion queue\n");
 376                        return -ENOMEM;
 377                }
 378
 379                card->qdio.no_in_queues = 2;
 380        } else {
 381                QETH_CARD_TEXT(card, 2, "nocq");
 382                card->qdio.c_q = NULL;
 383                card->qdio.no_in_queues = 1;
 384        }
 385        QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
 386        return 0;
 387}
 388
 389static void qeth_free_cq(struct qeth_card *card)
 390{
 391        if (card->qdio.c_q) {
 392                --card->qdio.no_in_queues;
 393                qeth_free_qdio_queue(card->qdio.c_q);
 394                card->qdio.c_q = NULL;
 395        }
 396}
 397
 398static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
 399                                                        int delayed)
 400{
 401        enum iucv_tx_notify n;
 402
 403        switch (sbalf15) {
 404        case 0:
 405                n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
 406                break;
 407        case 4:
 408        case 16:
 409        case 17:
 410        case 18:
 411                n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
 412                        TX_NOTIFY_UNREACHABLE;
 413                break;
 414        default:
 415                n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
 416                        TX_NOTIFY_GENERALERROR;
 417                break;
 418        }
 419
 420        return n;
 421}
 422
 423static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
 424{
 425        if (refcount_dec_and_test(&iob->ref_count)) {
 426                kfree(iob->data);
 427                kfree(iob);
 428        }
 429}
 430static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
 431                           void *data)
 432{
 433        ccw->cmd_code = cmd_code;
 434        ccw->flags = flags | CCW_FLAG_SLI;
 435        ccw->count = len;
 436        ccw->cda = (__u32) __pa(data);
 437}
 438
 439static int __qeth_issue_next_read(struct qeth_card *card)
 440{
 441        struct qeth_cmd_buffer *iob = card->read_cmd;
 442        struct qeth_channel *channel = iob->channel;
 443        struct ccw1 *ccw = __ccw_from_cmd(iob);
 444        int rc;
 445
 446        QETH_CARD_TEXT(card, 5, "issnxrd");
 447        if (channel->state != CH_STATE_UP)
 448                return -EIO;
 449
 450        memset(iob->data, 0, iob->length);
 451        qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
 452        iob->callback = qeth_issue_next_read_cb;
 453        /* keep the cmd alive after completion: */
 454        qeth_get_cmd(iob);
 455
 456        QETH_CARD_TEXT(card, 6, "noirqpnd");
 457        rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
 458        if (!rc) {
 459                channel->active_cmd = iob;
 460        } else {
 461                QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
 462                                 rc, CARD_DEVID(card));
 463                qeth_unlock_channel(card, channel);
 464                qeth_put_cmd(iob);
 465                card->read_or_write_problem = 1;
 466                qeth_schedule_recovery(card);
 467        }
 468        return rc;
 469}
 470
 471static int qeth_issue_next_read(struct qeth_card *card)
 472{
 473        int ret;
 474
 475        spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
 476        ret = __qeth_issue_next_read(card);
 477        spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
 478
 479        return ret;
 480}
 481
 482static void qeth_enqueue_cmd(struct qeth_card *card,
 483                             struct qeth_cmd_buffer *iob)
 484{
 485        spin_lock_irq(&card->lock);
 486        list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
 487        spin_unlock_irq(&card->lock);
 488}
 489
 490static void qeth_dequeue_cmd(struct qeth_card *card,
 491                             struct qeth_cmd_buffer *iob)
 492{
 493        spin_lock_irq(&card->lock);
 494        list_del(&iob->list_entry);
 495        spin_unlock_irq(&card->lock);
 496}
 497
 498static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
 499{
 500        iob->rc = reason;
 501        complete(&iob->done);
 502}
 503
 504static void qeth_flush_local_addrs4(struct qeth_card *card)
 505{
 506        struct qeth_local_addr *addr;
 507        struct hlist_node *tmp;
 508        unsigned int i;
 509
 510        spin_lock_irq(&card->local_addrs4_lock);
 511        hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
 512                hash_del_rcu(&addr->hnode);
 513                kfree_rcu(addr, rcu);
 514        }
 515        spin_unlock_irq(&card->local_addrs4_lock);
 516}
 517
 518static void qeth_flush_local_addrs6(struct qeth_card *card)
 519{
 520        struct qeth_local_addr *addr;
 521        struct hlist_node *tmp;
 522        unsigned int i;
 523
 524        spin_lock_irq(&card->local_addrs6_lock);
 525        hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
 526                hash_del_rcu(&addr->hnode);
 527                kfree_rcu(addr, rcu);
 528        }
 529        spin_unlock_irq(&card->local_addrs6_lock);
 530}
 531
 532static void qeth_flush_local_addrs(struct qeth_card *card)
 533{
 534        qeth_flush_local_addrs4(card);
 535        qeth_flush_local_addrs6(card);
 536}
 537
 538static void qeth_add_local_addrs4(struct qeth_card *card,
 539                                  struct qeth_ipacmd_local_addrs4 *cmd)
 540{
 541        unsigned int i;
 542
 543        if (cmd->addr_length !=
 544            sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
 545                dev_err_ratelimited(&card->gdev->dev,
 546                                    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
 547                                    cmd->addr_length);
 548                return;
 549        }
 550
 551        spin_lock(&card->local_addrs4_lock);
 552        for (i = 0; i < cmd->count; i++) {
 553                unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
 554                struct qeth_local_addr *addr;
 555                bool duplicate = false;
 556
 557                hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
 558                        if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
 559                                duplicate = true;
 560                                break;
 561                        }
 562                }
 563
 564                if (duplicate)
 565                        continue;
 566
 567                addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
 568                if (!addr) {
 569                        dev_err(&card->gdev->dev,
 570                                "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
 571                                &cmd->addrs[i].addr);
 572                        continue;
 573                }
 574
 575                ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
 576                hash_add_rcu(card->local_addrs4, &addr->hnode, key);
 577        }
 578        spin_unlock(&card->local_addrs4_lock);
 579}
 580
 581static void qeth_add_local_addrs6(struct qeth_card *card,
 582                                  struct qeth_ipacmd_local_addrs6 *cmd)
 583{
 584        unsigned int i;
 585
 586        if (cmd->addr_length !=
 587            sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
 588                dev_err_ratelimited(&card->gdev->dev,
 589                                    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
 590                                    cmd->addr_length);
 591                return;
 592        }
 593
 594        spin_lock(&card->local_addrs6_lock);
 595        for (i = 0; i < cmd->count; i++) {
 596                u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
 597                struct qeth_local_addr *addr;
 598                bool duplicate = false;
 599
 600                hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
 601                        if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
 602                                duplicate = true;
 603                                break;
 604                        }
 605                }
 606
 607                if (duplicate)
 608                        continue;
 609
 610                addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
 611                if (!addr) {
 612                        dev_err(&card->gdev->dev,
 613                                "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
 614                                &cmd->addrs[i].addr);
 615                        continue;
 616                }
 617
 618                addr->addr = cmd->addrs[i].addr;
 619                hash_add_rcu(card->local_addrs6, &addr->hnode, key);
 620        }
 621        spin_unlock(&card->local_addrs6_lock);
 622}
 623
 624static void qeth_del_local_addrs4(struct qeth_card *card,
 625                                  struct qeth_ipacmd_local_addrs4 *cmd)
 626{
 627        unsigned int i;
 628
 629        if (cmd->addr_length !=
 630            sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
 631                dev_err_ratelimited(&card->gdev->dev,
 632                                    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
 633                                    cmd->addr_length);
 634                return;
 635        }
 636
 637        spin_lock(&card->local_addrs4_lock);
 638        for (i = 0; i < cmd->count; i++) {
 639                struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
 640                unsigned int key = ipv4_addr_hash(addr->addr);
 641                struct qeth_local_addr *tmp;
 642
 643                hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
 644                        if (tmp->addr.s6_addr32[3] == addr->addr) {
 645                                hash_del_rcu(&tmp->hnode);
 646                                kfree_rcu(tmp, rcu);
 647                                break;
 648                        }
 649                }
 650        }
 651        spin_unlock(&card->local_addrs4_lock);
 652}
 653
 654static void qeth_del_local_addrs6(struct qeth_card *card,
 655                                  struct qeth_ipacmd_local_addrs6 *cmd)
 656{
 657        unsigned int i;
 658
 659        if (cmd->addr_length !=
 660            sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
 661                dev_err_ratelimited(&card->gdev->dev,
 662                                    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
 663                                    cmd->addr_length);
 664                return;
 665        }
 666
 667        spin_lock(&card->local_addrs6_lock);
 668        for (i = 0; i < cmd->count; i++) {
 669                struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
 670                u32 key = ipv6_addr_hash(&addr->addr);
 671                struct qeth_local_addr *tmp;
 672
 673                hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
 674                        if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
 675                                hash_del_rcu(&tmp->hnode);
 676                                kfree_rcu(tmp, rcu);
 677                                break;
 678                        }
 679                }
 680        }
 681        spin_unlock(&card->local_addrs6_lock);
 682}
 683
 684static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
 685                                      struct sk_buff *skb)
 686{
 687        struct qeth_local_addr *tmp;
 688        bool is_local = false;
 689        unsigned int key;
 690        __be32 next_hop;
 691
 692        if (hash_empty(card->local_addrs4))
 693                return false;
 694
 695        rcu_read_lock();
 696        next_hop = qeth_next_hop_v4_rcu(skb,
 697                                        qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
 698        key = ipv4_addr_hash(next_hop);
 699
 700        hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
 701                if (tmp->addr.s6_addr32[3] == next_hop) {
 702                        is_local = true;
 703                        break;
 704                }
 705        }
 706        rcu_read_unlock();
 707
 708        return is_local;
 709}
 710
 711static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
 712                                      struct sk_buff *skb)
 713{
 714        struct qeth_local_addr *tmp;
 715        struct in6_addr *next_hop;
 716        bool is_local = false;
 717        u32 key;
 718
 719        if (hash_empty(card->local_addrs6))
 720                return false;
 721
 722        rcu_read_lock();
 723        next_hop = qeth_next_hop_v6_rcu(skb,
 724                                        qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
 725        key = ipv6_addr_hash(next_hop);
 726
 727        hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
 728                if (ipv6_addr_equal(&tmp->addr, next_hop)) {
 729                        is_local = true;
 730                        break;
 731                }
 732        }
 733        rcu_read_unlock();
 734
 735        return is_local;
 736}
 737
 738static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
 739{
 740        struct qeth_card *card = m->private;
 741        struct qeth_local_addr *tmp;
 742        unsigned int i;
 743
 744        rcu_read_lock();
 745        hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
 746                seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
 747        hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
 748                seq_printf(m, "%pI6c\n", &tmp->addr);
 749        rcu_read_unlock();
 750
 751        return 0;
 752}
 753
 754DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
 755
 756static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
 757                struct qeth_card *card)
 758{
 759        const char *ipa_name;
 760        int com = cmd->hdr.command;
 761
 762        ipa_name = qeth_get_ipa_cmd_name(com);
 763
 764        if (rc)
 765                QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
 766                                 ipa_name, com, CARD_DEVID(card), rc,
 767                                 qeth_get_ipa_msg(rc));
 768        else
 769                QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
 770                                 ipa_name, com, CARD_DEVID(card));
 771}
 772
 773static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
 774                                                struct qeth_ipa_cmd *cmd)
 775{
 776        QETH_CARD_TEXT(card, 5, "chkipad");
 777
 778        if (IS_IPA_REPLY(cmd)) {
 779                if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
 780                        qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
 781                return cmd;
 782        }
 783
 784        /* handle unsolicited event: */
 785        switch (cmd->hdr.command) {
 786        case IPA_CMD_STOPLAN:
 787                if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
 788                        dev_err(&card->gdev->dev,
 789                                "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
 790                                netdev_name(card->dev));
 791                        /* Set offline, then probably fail to set online: */
 792                        qeth_schedule_recovery(card);
 793                } else {
 794                        /* stay online for subsequent STARTLAN */
 795                        dev_warn(&card->gdev->dev,
 796                                 "The link for interface %s on CHPID 0x%X failed\n",
 797                                 netdev_name(card->dev), card->info.chpid);
 798                        qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
 799                        netif_carrier_off(card->dev);
 800                }
 801                return NULL;
 802        case IPA_CMD_STARTLAN:
 803                dev_info(&card->gdev->dev,
 804                         "The link for %s on CHPID 0x%X has been restored\n",
 805                         netdev_name(card->dev), card->info.chpid);
 806                if (card->info.hwtrap)
 807                        card->info.hwtrap = 2;
 808                qeth_schedule_recovery(card);
 809                return NULL;
 810        case IPA_CMD_SETBRIDGEPORT_IQD:
 811        case IPA_CMD_SETBRIDGEPORT_OSA:
 812        case IPA_CMD_ADDRESS_CHANGE_NOTIF:
 813                if (card->discipline->control_event_handler(card, cmd))
 814                        return cmd;
 815                return NULL;
 816        case IPA_CMD_REGISTER_LOCAL_ADDR:
 817                if (cmd->hdr.prot_version == QETH_PROT_IPV4)
 818                        qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
 819                else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
 820                        qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
 821
 822                QETH_CARD_TEXT(card, 3, "irla");
 823                return NULL;
 824        case IPA_CMD_UNREGISTER_LOCAL_ADDR:
 825                if (cmd->hdr.prot_version == QETH_PROT_IPV4)
 826                        qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
 827                else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
 828                        qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
 829
 830                QETH_CARD_TEXT(card, 3, "urla");
 831                return NULL;
 832        default:
 833                QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
 834                return cmd;
 835        }
 836}
 837
 838static void qeth_clear_ipacmd_list(struct qeth_card *card)
 839{
 840        struct qeth_cmd_buffer *iob;
 841        unsigned long flags;
 842
 843        QETH_CARD_TEXT(card, 4, "clipalst");
 844
 845        spin_lock_irqsave(&card->lock, flags);
 846        list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
 847                qeth_notify_cmd(iob, -ECANCELED);
 848        spin_unlock_irqrestore(&card->lock, flags);
 849}
 850
 851static int qeth_check_idx_response(struct qeth_card *card,
 852        unsigned char *buffer)
 853{
 854        QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
 855        if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
 856                QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
 857                                 buffer[4]);
 858                QETH_CARD_TEXT(card, 2, "ckidxres");
 859                QETH_CARD_TEXT(card, 2, " idxterm");
 860                QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
 861                if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
 862                    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
 863                        dev_err(&card->gdev->dev,
 864                                "The device does not support the configured transport mode\n");
 865                        return -EPROTONOSUPPORT;
 866                }
 867                return -EIO;
 868        }
 869        return 0;
 870}
 871
 872static void qeth_release_buffer_cb(struct qeth_card *card,
 873                                   struct qeth_cmd_buffer *iob,
 874                                   unsigned int data_length)
 875{
 876        qeth_put_cmd(iob);
 877}
 878
 879static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
 880{
 881        qeth_notify_cmd(iob, rc);
 882        qeth_put_cmd(iob);
 883}
 884
 885static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
 886                                              unsigned int length,
 887                                              unsigned int ccws, long timeout)
 888{
 889        struct qeth_cmd_buffer *iob;
 890
 891        if (length > QETH_BUFSIZE)
 892                return NULL;
 893
 894        iob = kzalloc(sizeof(*iob), GFP_KERNEL);
 895        if (!iob)
 896                return NULL;
 897
 898        iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
 899                            GFP_KERNEL | GFP_DMA);
 900        if (!iob->data) {
 901                kfree(iob);
 902                return NULL;
 903        }
 904
 905        init_completion(&iob->done);
 906        spin_lock_init(&iob->lock);
 907        refcount_set(&iob->ref_count, 1);
 908        iob->channel = channel;
 909        iob->timeout = timeout;
 910        iob->length = length;
 911        return iob;
 912}
 913
 914static void qeth_issue_next_read_cb(struct qeth_card *card,
 915                                    struct qeth_cmd_buffer *iob,
 916                                    unsigned int data_length)
 917{
 918        struct qeth_cmd_buffer *request = NULL;
 919        struct qeth_ipa_cmd *cmd = NULL;
 920        struct qeth_reply *reply = NULL;
 921        struct qeth_cmd_buffer *tmp;
 922        unsigned long flags;
 923        int rc = 0;
 924
 925        QETH_CARD_TEXT(card, 4, "sndctlcb");
 926        rc = qeth_check_idx_response(card, iob->data);
 927        switch (rc) {
 928        case 0:
 929                break;
 930        case -EIO:
 931                qeth_schedule_recovery(card);
 932                fallthrough;
 933        default:
 934                qeth_clear_ipacmd_list(card);
 935                goto err_idx;
 936        }
 937
 938        cmd = __ipa_reply(iob);
 939        if (cmd) {
 940                cmd = qeth_check_ipa_data(card, cmd);
 941                if (!cmd)
 942                        goto out;
 943        }
 944
 945        /* match against pending cmd requests */
 946        spin_lock_irqsave(&card->lock, flags);
 947        list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
 948                if (tmp->match && tmp->match(tmp, iob)) {
 949                        request = tmp;
 950                        /* take the object outside the lock */
 951                        qeth_get_cmd(request);
 952                        break;
 953                }
 954        }
 955        spin_unlock_irqrestore(&card->lock, flags);
 956
 957        if (!request)
 958                goto out;
 959
 960        reply = &request->reply;
 961        if (!reply->callback) {
 962                rc = 0;
 963                goto no_callback;
 964        }
 965
 966        spin_lock_irqsave(&request->lock, flags);
 967        if (request->rc)
 968                /* Bail out when the requestor has already left: */
 969                rc = request->rc;
 970        else
 971                rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
 972                                                        (unsigned long)iob);
 973        spin_unlock_irqrestore(&request->lock, flags);
 974
 975no_callback:
 976        if (rc <= 0)
 977                qeth_notify_cmd(request, rc);
 978        qeth_put_cmd(request);
 979out:
 980        memcpy(&card->seqno.pdu_hdr_ack,
 981                QETH_PDU_HEADER_SEQ_NO(iob->data),
 982                QETH_SEQ_NO_LENGTH);
 983        __qeth_issue_next_read(card);
 984err_idx:
 985        qeth_put_cmd(iob);
 986}
 987
 988static int qeth_set_thread_start_bit(struct qeth_card *card,
 989                unsigned long thread)
 990{
 991        unsigned long flags;
 992        int rc = 0;
 993
 994        spin_lock_irqsave(&card->thread_mask_lock, flags);
 995        if (!(card->thread_allowed_mask & thread))
 996                rc = -EPERM;
 997        else if (card->thread_start_mask & thread)
 998                rc = -EBUSY;
 999        else
1000                card->thread_start_mask |= thread;
1001        spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1002
1003        return rc;
1004}
1005
1006static void qeth_clear_thread_start_bit(struct qeth_card *card,
1007                                        unsigned long thread)
1008{
1009        unsigned long flags;
1010
1011        spin_lock_irqsave(&card->thread_mask_lock, flags);
1012        card->thread_start_mask &= ~thread;
1013        spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1014        wake_up(&card->wait_q);
1015}
1016
1017static void qeth_clear_thread_running_bit(struct qeth_card *card,
1018                                          unsigned long thread)
1019{
1020        unsigned long flags;
1021
1022        spin_lock_irqsave(&card->thread_mask_lock, flags);
1023        card->thread_running_mask &= ~thread;
1024        spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1025        wake_up_all(&card->wait_q);
1026}
1027
1028static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1029{
1030        unsigned long flags;
1031        int rc = 0;
1032
1033        spin_lock_irqsave(&card->thread_mask_lock, flags);
1034        if (card->thread_start_mask & thread) {
1035                if ((card->thread_allowed_mask & thread) &&
1036                    !(card->thread_running_mask & thread)) {
1037                        rc = 1;
1038                        card->thread_start_mask &= ~thread;
1039                        card->thread_running_mask |= thread;
1040                } else
1041                        rc = -EPERM;
1042        }
1043        spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1044        return rc;
1045}
1046
1047static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1048{
1049        int rc = 0;
1050
1051        wait_event(card->wait_q,
1052                   (rc = __qeth_do_run_thread(card, thread)) >= 0);
1053        return rc;
1054}
1055
1056int qeth_schedule_recovery(struct qeth_card *card)
1057{
1058        int rc;
1059
1060        QETH_CARD_TEXT(card, 2, "startrec");
1061
1062        rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1063        if (!rc)
1064                schedule_work(&card->kernel_thread_starter);
1065
1066        return rc;
1067}
1068
1069static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1070                            struct irb *irb)
1071{
1072        int dstat, cstat;
1073        char *sense;
1074
1075        sense = (char *) irb->ecw;
1076        cstat = irb->scsw.cmd.cstat;
1077        dstat = irb->scsw.cmd.dstat;
1078
1079        if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1080                     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1081                     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1082                QETH_CARD_TEXT(card, 2, "CGENCHK");
1083                dev_warn(&cdev->dev, "The qeth device driver "
1084                        "failed to recover an error on the device\n");
1085                QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1086                                 CCW_DEVID(cdev), dstat, cstat);
1087                print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1088                                16, 1, irb, 64, 1);
1089                return -EIO;
1090        }
1091
1092        if (dstat & DEV_STAT_UNIT_CHECK) {
1093                if (sense[SENSE_RESETTING_EVENT_BYTE] &
1094                    SENSE_RESETTING_EVENT_FLAG) {
1095                        QETH_CARD_TEXT(card, 2, "REVIND");
1096                        return -EIO;
1097                }
1098                if (sense[SENSE_COMMAND_REJECT_BYTE] &
1099                    SENSE_COMMAND_REJECT_FLAG) {
1100                        QETH_CARD_TEXT(card, 2, "CMDREJi");
1101                        return -EIO;
1102                }
1103                if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1104                        QETH_CARD_TEXT(card, 2, "AFFE");
1105                        return -EIO;
1106                }
1107                if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1108                        QETH_CARD_TEXT(card, 2, "ZEROSEN");
1109                        return 0;
1110                }
1111                QETH_CARD_TEXT(card, 2, "DGENCHK");
1112                return -EIO;
1113        }
1114        return 0;
1115}
1116
1117static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1118                                struct irb *irb)
1119{
1120        if (!IS_ERR(irb))
1121                return 0;
1122
1123        switch (PTR_ERR(irb)) {
1124        case -EIO:
1125                QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1126                                 CCW_DEVID(cdev));
1127                QETH_CARD_TEXT(card, 2, "ckirberr");
1128                QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1129                return -EIO;
1130        case -ETIMEDOUT:
1131                dev_warn(&cdev->dev, "A hardware operation timed out"
1132                        " on the device\n");
1133                QETH_CARD_TEXT(card, 2, "ckirberr");
1134                QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1135                return -ETIMEDOUT;
1136        default:
1137                QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1138                                 PTR_ERR(irb), CCW_DEVID(cdev));
1139                QETH_CARD_TEXT(card, 2, "ckirberr");
1140                QETH_CARD_TEXT(card, 2, "  rc???");
1141                return PTR_ERR(irb);
1142        }
1143}
1144
1145static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1146                struct irb *irb)
1147{
1148        int rc;
1149        int cstat, dstat;
1150        struct qeth_cmd_buffer *iob = NULL;
1151        struct ccwgroup_device *gdev;
1152        struct qeth_channel *channel;
1153        struct qeth_card *card;
1154
1155        /* while we hold the ccwdev lock, this stays valid: */
1156        gdev = dev_get_drvdata(&cdev->dev);
1157        card = dev_get_drvdata(&gdev->dev);
1158
1159        QETH_CARD_TEXT(card, 5, "irq");
1160
1161        if (card->read.ccwdev == cdev) {
1162                channel = &card->read;
1163                QETH_CARD_TEXT(card, 5, "read");
1164        } else if (card->write.ccwdev == cdev) {
1165                channel = &card->write;
1166                QETH_CARD_TEXT(card, 5, "write");
1167        } else {
1168                channel = &card->data;
1169                QETH_CARD_TEXT(card, 5, "data");
1170        }
1171
1172        if (intparm == 0) {
1173                QETH_CARD_TEXT(card, 5, "irqunsol");
1174        } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1175                QETH_CARD_TEXT(card, 5, "irqunexp");
1176
1177                dev_err(&cdev->dev,
1178                        "Received IRQ with intparm %lx, expected %px\n",
1179                        intparm, channel->active_cmd);
1180                if (channel->active_cmd)
1181                        qeth_cancel_cmd(channel->active_cmd, -EIO);
1182        } else {
1183                iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1184        }
1185
1186        qeth_unlock_channel(card, channel);
1187
1188        rc = qeth_check_irb_error(card, cdev, irb);
1189        if (rc) {
1190                /* IO was terminated, free its resources. */
1191                if (iob)
1192                        qeth_cancel_cmd(iob, rc);
1193                return;
1194        }
1195
1196        if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1197                channel->state = CH_STATE_STOPPED;
1198                wake_up(&card->wait_q);
1199        }
1200
1201        if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1202                channel->state = CH_STATE_HALTED;
1203                wake_up(&card->wait_q);
1204        }
1205
1206        if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1207                                          SCSW_FCTL_HALT_FUNC))) {
1208                qeth_cancel_cmd(iob, -ECANCELED);
1209                iob = NULL;
1210        }
1211
1212        cstat = irb->scsw.cmd.cstat;
1213        dstat = irb->scsw.cmd.dstat;
1214
1215        if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1216            (dstat & DEV_STAT_UNIT_CHECK) ||
1217            (cstat)) {
1218                if (irb->esw.esw0.erw.cons) {
1219                        dev_warn(&channel->ccwdev->dev,
1220                                "The qeth device driver failed to recover "
1221                                "an error on the device\n");
1222                        QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1223                                         CCW_DEVID(channel->ccwdev), cstat,
1224                                         dstat);
1225                        print_hex_dump(KERN_WARNING, "qeth: irb ",
1226                                DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1227                        print_hex_dump(KERN_WARNING, "qeth: sense data ",
1228                                DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1229                }
1230
1231                rc = qeth_get_problem(card, cdev, irb);
1232                if (rc) {
1233                        card->read_or_write_problem = 1;
1234                        if (iob)
1235                                qeth_cancel_cmd(iob, rc);
1236                        qeth_clear_ipacmd_list(card);
1237                        qeth_schedule_recovery(card);
1238                        return;
1239                }
1240        }
1241
1242        if (iob) {
1243                /* sanity check: */
1244                if (irb->scsw.cmd.count > iob->length) {
1245                        qeth_cancel_cmd(iob, -EIO);
1246                        return;
1247                }
1248                if (iob->callback)
1249                        iob->callback(card, iob,
1250                                      iob->length - irb->scsw.cmd.count);
1251        }
1252}
1253
1254static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1255                struct qeth_qdio_out_buffer *buf,
1256                enum iucv_tx_notify notification)
1257{
1258        struct sk_buff *skb;
1259
1260        skb_queue_walk(&buf->skb_list, skb) {
1261                struct sock *sk = skb->sk;
1262
1263                QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1264                QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1265                if (sk && sk->sk_family == PF_IUCV)
1266                        iucv_sk(sk)->sk_txnotify(sk, notification);
1267        }
1268}
1269
1270static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
1271                                 struct qeth_qdio_out_buffer *buf, bool error,
1272                                 int budget)
1273{
1274        struct sk_buff *skb;
1275
1276        /* Empty buffer? */
1277        if (buf->next_element_to_fill == 0)
1278                return;
1279
1280        QETH_TXQ_STAT_INC(queue, bufs);
1281        QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1282        if (error) {
1283                QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1284        } else {
1285                QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1286                QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1287        }
1288
1289        while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1290                unsigned int bytes = qdisc_pkt_len(skb);
1291                bool is_tso = skb_is_gso(skb);
1292                unsigned int packets;
1293
1294                packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1295                if (!error) {
1296                        if (skb->ip_summed == CHECKSUM_PARTIAL)
1297                                QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1298                        if (skb_is_nonlinear(skb))
1299                                QETH_TXQ_STAT_INC(queue, skbs_sg);
1300                        if (is_tso) {
1301                                QETH_TXQ_STAT_INC(queue, skbs_tso);
1302                                QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1303                        }
1304                }
1305
1306                napi_consume_skb(skb, budget);
1307        }
1308}
1309
1310static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1311                                     struct qeth_qdio_out_buffer *buf,
1312                                     bool error, int budget)
1313{
1314        int i;
1315
1316        /* is PCI flag set on buffer? */
1317        if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1318                atomic_dec(&queue->set_pci_flags_count);
1319                QETH_TXQ_STAT_INC(queue, completion_irq);
1320        }
1321
1322        qeth_tx_complete_buf(queue, buf, error, budget);
1323
1324        for (i = 0; i < queue->max_elements; ++i) {
1325                void *data = phys_to_virt(buf->buffer->element[i].addr);
1326
1327                if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1328                        kmem_cache_free(qeth_core_header_cache, data);
1329        }
1330
1331        qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1332        buf->next_element_to_fill = 0;
1333        buf->frames = 0;
1334        buf->bytes = 0;
1335        atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1336}
1337
1338static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
1339{
1340        if (buf->aob)
1341                qdio_release_aob(buf->aob);
1342        kmem_cache_free(qeth_qdio_outbuf_cache, buf);
1343}
1344
1345static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
1346                                          struct qeth_qdio_out_q *queue,
1347                                          bool drain, int budget)
1348{
1349        struct qeth_qdio_out_buffer *buf, *tmp;
1350
1351        list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1352                struct qeth_qaob_priv1 *priv;
1353                struct qaob *aob = buf->aob;
1354                enum iucv_tx_notify notify;
1355                unsigned int i;
1356
1357                priv = (struct qeth_qaob_priv1 *)&aob->user1;
1358                if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1359                        QETH_CARD_TEXT(card, 5, "fp");
1360                        QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
1361
1362                        notify = drain ? TX_NOTIFY_GENERALERROR :
1363                                         qeth_compute_cq_notification(aob->aorc, 1);
1364                        qeth_notify_skbs(queue, buf, notify);
1365                        qeth_tx_complete_buf(queue, buf, drain, budget);
1366
1367                        for (i = 0;
1368                             i < aob->sb_count && i < queue->max_elements;
1369                             i++) {
1370                                void *data = phys_to_virt(aob->sba[i]);
1371
1372                                if (test_bit(i, buf->from_kmem_cache) && data)
1373                                        kmem_cache_free(qeth_core_header_cache,
1374                                                        data);
1375                        }
1376
1377                        list_del(&buf->list_entry);
1378                        qeth_free_out_buf(buf);
1379                }
1380        }
1381}
1382
1383static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1384{
1385        int j;
1386
1387        qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1388
1389        for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1390                if (!q->bufs[j])
1391                        continue;
1392
1393                qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1394                if (free) {
1395                        qeth_free_out_buf(q->bufs[j]);
1396                        q->bufs[j] = NULL;
1397                }
1398        }
1399}
1400
1401static void qeth_drain_output_queues(struct qeth_card *card)
1402{
1403        int i;
1404
1405        QETH_CARD_TEXT(card, 2, "clearqdbf");
1406        /* clear outbound buffers to free skbs */
1407        for (i = 0; i < card->qdio.no_out_queues; ++i) {
1408                if (card->qdio.out_qs[i])
1409                        qeth_drain_output_queue(card->qdio.out_qs[i], false);
1410        }
1411}
1412
1413static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1414{
1415        unsigned int max = single ? 1 : card->dev->num_tx_queues;
1416
1417        if (card->qdio.no_out_queues == max)
1418                return;
1419
1420        if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1421                qeth_free_qdio_queues(card);
1422
1423        if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1424                dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1425
1426        card->qdio.no_out_queues = max;
1427}
1428
1429static int qeth_update_from_chp_desc(struct qeth_card *card)
1430{
1431        struct ccw_device *ccwdev;
1432        struct channel_path_desc_fmt0 *chp_dsc;
1433
1434        QETH_CARD_TEXT(card, 2, "chp_desc");
1435
1436        ccwdev = card->data.ccwdev;
1437        chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1438        if (!chp_dsc)
1439                return -ENOMEM;
1440
1441        card->info.func_level = 0x4100 + chp_dsc->desc;
1442
1443        if (IS_OSD(card) || IS_OSX(card))
1444                /* CHPP field bit 6 == 1 -> single queue */
1445                qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1446
1447        kfree(chp_dsc);
1448        QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1449        QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1450        return 0;
1451}
1452
1453static void qeth_init_qdio_info(struct qeth_card *card)
1454{
1455        QETH_CARD_TEXT(card, 4, "intqdinf");
1456        atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1457        card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1458        card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1459
1460        /* inbound */
1461        card->qdio.no_in_queues = 1;
1462        card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1463        if (IS_IQD(card))
1464                card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1465        else
1466                card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1467        card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1468        INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1469        INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1470}
1471
1472static void qeth_set_initial_options(struct qeth_card *card)
1473{
1474        card->options.route4.type = NO_ROUTER;
1475        card->options.route6.type = NO_ROUTER;
1476        card->options.isolation = ISOLATION_MODE_NONE;
1477        card->options.cq = QETH_CQ_DISABLED;
1478        card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1479}
1480
1481static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1482{
1483        unsigned long flags;
1484        int rc = 0;
1485
1486        spin_lock_irqsave(&card->thread_mask_lock, flags);
1487        QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
1488                        (u8) card->thread_start_mask,
1489                        (u8) card->thread_allowed_mask,
1490                        (u8) card->thread_running_mask);
1491        rc = (card->thread_start_mask & thread);
1492        spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1493        return rc;
1494}
1495
1496static int qeth_do_reset(void *data);
1497static void qeth_start_kernel_thread(struct work_struct *work)
1498{
1499        struct task_struct *ts;
1500        struct qeth_card *card = container_of(work, struct qeth_card,
1501                                        kernel_thread_starter);
1502        QETH_CARD_TEXT(card, 2, "strthrd");
1503
1504        if (card->read.state != CH_STATE_UP &&
1505            card->write.state != CH_STATE_UP)
1506                return;
1507        if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1508                ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1509                if (IS_ERR(ts)) {
1510                        qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1511                        qeth_clear_thread_running_bit(card,
1512                                QETH_RECOVER_THREAD);
1513                }
1514        }
1515}
1516
1517static void qeth_buffer_reclaim_work(struct work_struct *);
1518static void qeth_setup_card(struct qeth_card *card)
1519{
1520        QETH_CARD_TEXT(card, 2, "setupcrd");
1521
1522        card->info.type = CARD_RDEV(card)->id.driver_info;
1523        card->state = CARD_STATE_DOWN;
1524        spin_lock_init(&card->lock);
1525        spin_lock_init(&card->thread_mask_lock);
1526        mutex_init(&card->conf_mutex);
1527        mutex_init(&card->discipline_mutex);
1528        INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1529        INIT_LIST_HEAD(&card->cmd_waiter_list);
1530        init_waitqueue_head(&card->wait_q);
1531        qeth_set_initial_options(card);
1532        /* IP address takeover */
1533        INIT_LIST_HEAD(&card->ipato.entries);
1534        qeth_init_qdio_info(card);
1535        INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1536        hash_init(card->rx_mode_addrs);
1537        hash_init(card->local_addrs4);
1538        hash_init(card->local_addrs6);
1539        spin_lock_init(&card->local_addrs4_lock);
1540        spin_lock_init(&card->local_addrs6_lock);
1541}
1542
1543static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1544{
1545        struct qeth_card *card = container_of(slr, struct qeth_card,
1546                                        qeth_service_level);
1547        if (card->info.mcl_level[0])
1548                seq_printf(m, "qeth: %s firmware level %s\n",
1549                        CARD_BUS_ID(card), card->info.mcl_level);
1550}
1551
1552static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1553{
1554        struct qeth_card *card;
1555
1556        QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1557        card = kzalloc(sizeof(*card), GFP_KERNEL);
1558        if (!card)
1559                goto out;
1560        QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1561
1562        card->gdev = gdev;
1563        dev_set_drvdata(&gdev->dev, card);
1564        CARD_RDEV(card) = gdev->cdev[0];
1565        CARD_WDEV(card) = gdev->cdev[1];
1566        CARD_DDEV(card) = gdev->cdev[2];
1567
1568        card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1569                                                 dev_name(&gdev->dev));
1570        if (!card->event_wq)
1571                goto out_wq;
1572
1573        card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1574        if (!card->read_cmd)
1575                goto out_read_cmd;
1576
1577        card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1578                                           qeth_debugfs_root);
1579        debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1580                            &qeth_debugfs_local_addr_fops);
1581
1582        card->qeth_service_level.seq_print = qeth_core_sl_print;
1583        register_service_level(&card->qeth_service_level);
1584        return card;
1585
1586out_read_cmd:
1587        destroy_workqueue(card->event_wq);
1588out_wq:
1589        dev_set_drvdata(&gdev->dev, NULL);
1590        kfree(card);
1591out:
1592        return NULL;
1593}
1594
1595static int qeth_clear_channel(struct qeth_card *card,
1596                              struct qeth_channel *channel)
1597{
1598        int rc;
1599
1600        QETH_CARD_TEXT(card, 3, "clearch");
1601        spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1602        rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1603        spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1604
1605        if (rc)
1606                return rc;
1607        rc = wait_event_interruptible_timeout(card->wait_q,
1608                        channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1609        if (rc == -ERESTARTSYS)
1610                return rc;
1611        if (channel->state != CH_STATE_STOPPED)
1612                return -ETIME;
1613        channel->state = CH_STATE_DOWN;
1614        return 0;
1615}
1616
1617static int qeth_halt_channel(struct qeth_card *card,
1618                             struct qeth_channel *channel)
1619{
1620        int rc;
1621
1622        QETH_CARD_TEXT(card, 3, "haltch");
1623        spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1624        rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1625        spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1626
1627        if (rc)
1628                return rc;
1629        rc = wait_event_interruptible_timeout(card->wait_q,
1630                        channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1631        if (rc == -ERESTARTSYS)
1632                return rc;
1633        if (channel->state != CH_STATE_HALTED)
1634                return -ETIME;
1635        return 0;
1636}
1637
1638static int qeth_stop_channel(struct qeth_channel *channel)
1639{
1640        struct ccw_device *cdev = channel->ccwdev;
1641        int rc;
1642
1643        rc = ccw_device_set_offline(cdev);
1644
1645        spin_lock_irq(get_ccwdev_lock(cdev));
1646        if (channel->active_cmd)
1647                dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1648                        channel->active_cmd);
1649
1650        cdev->handler = NULL;
1651        spin_unlock_irq(get_ccwdev_lock(cdev));
1652
1653        return rc;
1654}
1655
1656static int qeth_start_channel(struct qeth_channel *channel)
1657{
1658        struct ccw_device *cdev = channel->ccwdev;
1659        int rc;
1660
1661        channel->state = CH_STATE_DOWN;
1662        xchg(&channel->active_cmd, NULL);
1663
1664        spin_lock_irq(get_ccwdev_lock(cdev));
1665        cdev->handler = qeth_irq;
1666        spin_unlock_irq(get_ccwdev_lock(cdev));
1667
1668        rc = ccw_device_set_online(cdev);
1669        if (rc)
1670                goto err;
1671
1672        return 0;
1673
1674err:
1675        spin_lock_irq(get_ccwdev_lock(cdev));
1676        cdev->handler = NULL;
1677        spin_unlock_irq(get_ccwdev_lock(cdev));
1678        return rc;
1679}
1680
1681static int qeth_halt_channels(struct qeth_card *card)
1682{
1683        int rc1 = 0, rc2 = 0, rc3 = 0;
1684
1685        QETH_CARD_TEXT(card, 3, "haltchs");
1686        rc1 = qeth_halt_channel(card, &card->read);
1687        rc2 = qeth_halt_channel(card, &card->write);
1688        rc3 = qeth_halt_channel(card, &card->data);
1689        if (rc1)
1690                return rc1;
1691        if (rc2)
1692                return rc2;
1693        return rc3;
1694}
1695
1696static int qeth_clear_channels(struct qeth_card *card)
1697{
1698        int rc1 = 0, rc2 = 0, rc3 = 0;
1699
1700        QETH_CARD_TEXT(card, 3, "clearchs");
1701        rc1 = qeth_clear_channel(card, &card->read);
1702        rc2 = qeth_clear_channel(card, &card->write);
1703        rc3 = qeth_clear_channel(card, &card->data);
1704        if (rc1)
1705                return rc1;
1706        if (rc2)
1707                return rc2;
1708        return rc3;
1709}
1710
1711static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1712{
1713        int rc = 0;
1714
1715        QETH_CARD_TEXT(card, 3, "clhacrd");
1716
1717        if (halt)
1718                rc = qeth_halt_channels(card);
1719        if (rc)
1720                return rc;
1721        return qeth_clear_channels(card);
1722}
1723
1724static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1725{
1726        int rc = 0;
1727
1728        QETH_CARD_TEXT(card, 3, "qdioclr");
1729        switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1730                QETH_QDIO_CLEANING)) {
1731        case QETH_QDIO_ESTABLISHED:
1732                if (IS_IQD(card))
1733                        rc = qdio_shutdown(CARD_DDEV(card),
1734                                QDIO_FLAG_CLEANUP_USING_HALT);
1735                else
1736                        rc = qdio_shutdown(CARD_DDEV(card),
1737                                QDIO_FLAG_CLEANUP_USING_CLEAR);
1738                if (rc)
1739                        QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1740                atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1741                break;
1742        case QETH_QDIO_CLEANING:
1743                return rc;
1744        default:
1745                break;
1746        }
1747        rc = qeth_clear_halt_card(card, use_halt);
1748        if (rc)
1749                QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1750        return rc;
1751}
1752
1753static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1754{
1755        enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1756        struct diag26c_vnic_resp *response = NULL;
1757        struct diag26c_vnic_req *request = NULL;
1758        struct ccw_dev_id id;
1759        char userid[80];
1760        int rc = 0;
1761
1762        QETH_CARD_TEXT(card, 2, "vmlayer");
1763
1764        cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1765        if (rc)
1766                goto out;
1767
1768        request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1769        response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1770        if (!request || !response) {
1771                rc = -ENOMEM;
1772                goto out;
1773        }
1774
1775        ccw_device_get_id(CARD_RDEV(card), &id);
1776        request->resp_buf_len = sizeof(*response);
1777        request->resp_version = DIAG26C_VERSION6_VM65918;
1778        request->req_format = DIAG26C_VNIC_INFO;
1779        ASCEBC(userid, 8);
1780        memcpy(&request->sys_name, userid, 8);
1781        request->devno = id.devno;
1782
1783        QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1784        rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1785        QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1786        if (rc)
1787                goto out;
1788        QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1789
1790        if (request->resp_buf_len < sizeof(*response) ||
1791            response->version != request->resp_version) {
1792                rc = -EIO;
1793                goto out;
1794        }
1795
1796        if (response->protocol == VNIC_INFO_PROT_L2)
1797                disc = QETH_DISCIPLINE_LAYER2;
1798        else if (response->protocol == VNIC_INFO_PROT_L3)
1799                disc = QETH_DISCIPLINE_LAYER3;
1800
1801out:
1802        kfree(response);
1803        kfree(request);
1804        if (rc)
1805                QETH_CARD_TEXT_(card, 2, "err%x", rc);
1806        return disc;
1807}
1808
1809/* Determine whether the device requires a specific layer discipline */
1810static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1811{
1812        enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1813
1814        if (IS_OSM(card))
1815                disc = QETH_DISCIPLINE_LAYER2;
1816        else if (IS_VM_NIC(card))
1817                disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1818                                      qeth_vm_detect_layer(card);
1819
1820        switch (disc) {
1821        case QETH_DISCIPLINE_LAYER2:
1822                QETH_CARD_TEXT(card, 3, "force l2");
1823                break;
1824        case QETH_DISCIPLINE_LAYER3:
1825                QETH_CARD_TEXT(card, 3, "force l3");
1826                break;
1827        default:
1828                QETH_CARD_TEXT(card, 3, "force no");
1829        }
1830
1831        return disc;
1832}
1833
1834static void qeth_set_blkt_defaults(struct qeth_card *card)
1835{
1836        QETH_CARD_TEXT(card, 2, "cfgblkt");
1837
1838        if (card->info.use_v1_blkt) {
1839                card->info.blkt.time_total = 0;
1840                card->info.blkt.inter_packet = 0;
1841                card->info.blkt.inter_packet_jumbo = 0;
1842        } else {
1843                card->info.blkt.time_total = 250;
1844                card->info.blkt.inter_packet = 5;
1845                card->info.blkt.inter_packet_jumbo = 15;
1846        }
1847}
1848
1849static void qeth_idx_init(struct qeth_card *card)
1850{
1851        memset(&card->seqno, 0, sizeof(card->seqno));
1852
1853        card->token.issuer_rm_w = 0x00010103UL;
1854        card->token.cm_filter_w = 0x00010108UL;
1855        card->token.cm_connection_w = 0x0001010aUL;
1856        card->token.ulp_filter_w = 0x0001010bUL;
1857        card->token.ulp_connection_w = 0x0001010dUL;
1858
1859        switch (card->info.type) {
1860        case QETH_CARD_TYPE_IQD:
1861                card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1862                break;
1863        case QETH_CARD_TYPE_OSD:
1864                card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1865                break;
1866        default:
1867                break;
1868        }
1869}
1870
1871static void qeth_idx_finalize_cmd(struct qeth_card *card,
1872                                  struct qeth_cmd_buffer *iob)
1873{
1874        memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1875               QETH_SEQ_NO_LENGTH);
1876        if (iob->channel == &card->write)
1877                card->seqno.trans_hdr++;
1878}
1879
1880static int qeth_peer_func_level(int level)
1881{
1882        if ((level & 0xff) == 8)
1883                return (level & 0xff) + 0x400;
1884        if (((level >> 8) & 3) == 1)
1885                return (level & 0xff) + 0x200;
1886        return level;
1887}
1888
1889static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1890                                  struct qeth_cmd_buffer *iob)
1891{
1892        qeth_idx_finalize_cmd(card, iob);
1893
1894        memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1895               &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1896        card->seqno.pdu_hdr++;
1897        memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1898               &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1899
1900        iob->callback = qeth_release_buffer_cb;
1901}
1902
1903static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
1904                                 struct qeth_cmd_buffer *reply)
1905{
1906        /* MPC cmds are issued strictly in sequence. */
1907        return !IS_IPA(reply->data);
1908}
1909
1910static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1911                                                  const void *data,
1912                                                  unsigned int data_length)
1913{
1914        struct qeth_cmd_buffer *iob;
1915
1916        iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1917        if (!iob)
1918                return NULL;
1919
1920        memcpy(iob->data, data, data_length);
1921        qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1922                       iob->data);
1923        iob->finalize = qeth_mpc_finalize_cmd;
1924        iob->match = qeth_mpc_match_reply;
1925        return iob;
1926}
1927
1928/**
1929 * qeth_send_control_data() -   send control command to the card
1930 * @card:                       qeth_card structure pointer
1931 * @iob:                        qeth_cmd_buffer pointer
1932 * @reply_cb:                   callback function pointer
1933 * @cb_card:                    pointer to the qeth_card structure
1934 * @cb_reply:                   pointer to the qeth_reply structure
1935 * @cb_cmd:                     pointer to the original iob for non-IPA
1936 *                              commands, or to the qeth_ipa_cmd structure
1937 *                              for the IPA commands.
1938 * @reply_param:                private pointer passed to the callback
1939 *
1940 * Callback function gets called one or more times, with cb_cmd
1941 * pointing to the response returned by the hardware. Callback
1942 * function must return
1943 *   > 0 if more reply blocks are expected,
1944 *     0 if the last or only reply block is received, and
1945 *   < 0 on error.
1946 * Callback function can get the value of the reply_param pointer from the
1947 * field 'param' of the structure qeth_reply.
1948 */
1949
1950static int qeth_send_control_data(struct qeth_card *card,
1951                                  struct qeth_cmd_buffer *iob,
1952                                  int (*reply_cb)(struct qeth_card *cb_card,
1953                                                  struct qeth_reply *cb_reply,
1954                                                  unsigned long cb_cmd),
1955                                  void *reply_param)
1956{
1957        struct qeth_channel *channel = iob->channel;
1958        struct qeth_reply *reply = &iob->reply;
1959        long timeout = iob->timeout;
1960        int rc;
1961
1962        QETH_CARD_TEXT(card, 2, "sendctl");
1963
1964        reply->callback = reply_cb;
1965        reply->param = reply_param;
1966
1967        timeout = wait_event_interruptible_timeout(card->wait_q,
1968                                                   qeth_trylock_channel(channel, iob),
1969                                                   timeout);
1970        if (timeout <= 0) {
1971                qeth_put_cmd(iob);
1972                return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1973        }
1974
1975        if (iob->finalize)
1976                iob->finalize(card, iob);
1977        QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1978
1979        qeth_enqueue_cmd(card, iob);
1980
1981        /* This pairs with iob->callback, and keeps the iob alive after IO: */
1982        qeth_get_cmd(iob);
1983
1984        QETH_CARD_TEXT(card, 6, "noirqpnd");
1985        spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1986        rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1987                                      (addr_t) iob, 0, 0, timeout);
1988        spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1989        if (rc) {
1990                QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
1991                                 CARD_DEVID(card), rc);
1992                QETH_CARD_TEXT_(card, 2, " err%d", rc);
1993                qeth_dequeue_cmd(card, iob);
1994                qeth_put_cmd(iob);
1995                qeth_unlock_channel(card, channel);
1996                goto out;
1997        }
1998
1999        timeout = wait_for_completion_interruptible_timeout(&iob->done,
2000                                                            timeout);
2001        if (timeout <= 0)
2002                rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2003
2004        qeth_dequeue_cmd(card, iob);
2005
2006        if (reply_cb) {
2007                /* Wait until the callback for a late reply has completed: */
2008                spin_lock_irq(&iob->lock);
2009                if (rc)
2010                        /* Zap any callback that's still pending: */
2011                        iob->rc = rc;
2012                spin_unlock_irq(&iob->lock);
2013        }
2014
2015        if (!rc)
2016                rc = iob->rc;
2017
2018out:
2019        qeth_put_cmd(iob);
2020        return rc;
2021}
2022
2023struct qeth_node_desc {
2024        struct node_descriptor nd1;
2025        struct node_descriptor nd2;
2026        struct node_descriptor nd3;
2027};
2028
2029static void qeth_read_conf_data_cb(struct qeth_card *card,
2030                                   struct qeth_cmd_buffer *iob,
2031                                   unsigned int data_length)
2032{
2033        struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2034        int rc = 0;
2035        u8 *tag;
2036
2037        QETH_CARD_TEXT(card, 2, "cfgunit");
2038
2039        if (data_length < sizeof(*nd)) {
2040                rc = -EINVAL;
2041                goto out;
2042        }
2043
2044        card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2045                               nd->nd1.plant[1] == _ascebc['M'];
2046        tag = (u8 *)&nd->nd1.tag;
2047        card->info.chpid = tag[0];
2048        card->info.unit_addr2 = tag[1];
2049
2050        tag = (u8 *)&nd->nd2.tag;
2051        card->info.cula = tag[1];
2052
2053        card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2054                                 nd->nd3.model[1] == 0xF0 &&
2055                                 nd->nd3.model[2] >= 0xF1 &&
2056                                 nd->nd3.model[2] <= 0xF4;
2057
2058out:
2059        qeth_notify_cmd(iob, rc);
2060        qeth_put_cmd(iob);
2061}
2062
2063static int qeth_read_conf_data(struct qeth_card *card)
2064{
2065        struct qeth_channel *channel = &card->data;
2066        struct qeth_cmd_buffer *iob;
2067        struct ciw *ciw;
2068
2069        /* scan for RCD command in extended SenseID data */
2070        ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2071        if (!ciw || ciw->cmd == 0)
2072                return -EOPNOTSUPP;
2073        if (ciw->count < sizeof(struct qeth_node_desc))
2074                return -EINVAL;
2075
2076        iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2077        if (!iob)
2078                return -ENOMEM;
2079
2080        iob->callback = qeth_read_conf_data_cb;
2081        qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2082                       iob->data);
2083
2084        return qeth_send_control_data(card, iob, NULL, NULL);
2085}
2086
2087static int qeth_idx_check_activate_response(struct qeth_card *card,
2088                                            struct qeth_channel *channel,
2089                                            struct qeth_cmd_buffer *iob)
2090{
2091        int rc;
2092
2093        rc = qeth_check_idx_response(card, iob->data);
2094        if (rc)
2095                return rc;
2096
2097        if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2098                return 0;
2099
2100        /* negative reply: */
2101        QETH_CARD_TEXT_(card, 2, "idxneg%c",
2102                        QETH_IDX_ACT_CAUSE_CODE(iob->data));
2103
2104        switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2105        case QETH_IDX_ACT_ERR_EXCL:
2106                dev_err(&channel->ccwdev->dev,
2107                        "The adapter is used exclusively by another host\n");
2108                return -EBUSY;
2109        case QETH_IDX_ACT_ERR_AUTH:
2110        case QETH_IDX_ACT_ERR_AUTH_USER:
2111                dev_err(&channel->ccwdev->dev,
2112                        "Setting the device online failed because of insufficient authorization\n");
2113                return -EPERM;
2114        default:
2115                QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2116                                 CCW_DEVID(channel->ccwdev));
2117                return -EIO;
2118        }
2119}
2120
2121static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2122                                              struct qeth_cmd_buffer *iob,
2123                                              unsigned int data_length)
2124{
2125        struct qeth_channel *channel = iob->channel;
2126        u16 peer_level;
2127        int rc;
2128
2129        QETH_CARD_TEXT(card, 2, "idxrdcb");
2130
2131        rc = qeth_idx_check_activate_response(card, channel, iob);
2132        if (rc)
2133                goto out;
2134
2135        memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2136        if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2137                QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2138                                 CCW_DEVID(channel->ccwdev),
2139                                 card->info.func_level, peer_level);
2140                rc = -EINVAL;
2141                goto out;
2142        }
2143
2144        memcpy(&card->token.issuer_rm_r,
2145               QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2146               QETH_MPC_TOKEN_LENGTH);
2147        memcpy(&card->info.mcl_level[0],
2148               QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2149
2150out:
2151        qeth_notify_cmd(iob, rc);
2152        qeth_put_cmd(iob);
2153}
2154
2155static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2156                                               struct qeth_cmd_buffer *iob,
2157                                               unsigned int data_length)
2158{
2159        struct qeth_channel *channel = iob->channel;
2160        u16 peer_level;
2161        int rc;
2162
2163        QETH_CARD_TEXT(card, 2, "idxwrcb");
2164
2165        rc = qeth_idx_check_activate_response(card, channel, iob);
2166        if (rc)
2167                goto out;
2168
2169        memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2170        if ((peer_level & ~0x0100) !=
2171            qeth_peer_func_level(card->info.func_level)) {
2172                QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2173                                 CCW_DEVID(channel->ccwdev),
2174                                 card->info.func_level, peer_level);
2175                rc = -EINVAL;
2176        }
2177
2178out:
2179        qeth_notify_cmd(iob, rc);
2180        qeth_put_cmd(iob);
2181}
2182
2183static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2184                                        struct qeth_cmd_buffer *iob)
2185{
2186        u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2187        u8 port = ((u8)card->dev->dev_port) | 0x80;
2188        struct ccw1 *ccw = __ccw_from_cmd(iob);
2189
2190        qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2191                       iob->data);
2192        qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2193        iob->finalize = qeth_idx_finalize_cmd;
2194
2195        port |= QETH_IDX_ACT_INVAL_FRAME;
2196        memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2197        memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2198               &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2199        memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2200               &card->info.func_level, 2);
2201        memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2202        memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2203}
2204
2205static int qeth_idx_activate_read_channel(struct qeth_card *card)
2206{
2207        struct qeth_channel *channel = &card->read;
2208        struct qeth_cmd_buffer *iob;
2209        int rc;
2210
2211        QETH_CARD_TEXT(card, 2, "idxread");
2212
2213        iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2214        if (!iob)
2215                return -ENOMEM;
2216
2217        memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2218        qeth_idx_setup_activate_cmd(card, iob);
2219        iob->callback = qeth_idx_activate_read_channel_cb;
2220
2221        rc = qeth_send_control_data(card, iob, NULL, NULL);
2222        if (rc)
2223                return rc;
2224
2225        channel->state = CH_STATE_UP;
2226        return 0;
2227}
2228
2229static int qeth_idx_activate_write_channel(struct qeth_card *card)
2230{
2231        struct qeth_channel *channel = &card->write;
2232        struct qeth_cmd_buffer *iob;
2233        int rc;
2234
2235        QETH_CARD_TEXT(card, 2, "idxwrite");
2236
2237        iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2238        if (!iob)
2239                return -ENOMEM;
2240
2241        memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2242        qeth_idx_setup_activate_cmd(card, iob);
2243        iob->callback = qeth_idx_activate_write_channel_cb;
2244
2245        rc = qeth_send_control_data(card, iob, NULL, NULL);
2246        if (rc)
2247                return rc;
2248
2249        channel->state = CH_STATE_UP;
2250        return 0;
2251}
2252
2253static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2254                unsigned long data)
2255{
2256        struct qeth_cmd_buffer *iob;
2257
2258        QETH_CARD_TEXT(card, 2, "cmenblcb");
2259
2260        iob = (struct qeth_cmd_buffer *) data;
2261        memcpy(&card->token.cm_filter_r,
2262               QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2263               QETH_MPC_TOKEN_LENGTH);
2264        return 0;
2265}
2266
2267static int qeth_cm_enable(struct qeth_card *card)
2268{
2269        struct qeth_cmd_buffer *iob;
2270
2271        QETH_CARD_TEXT(card, 2, "cmenable");
2272
2273        iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2274        if (!iob)
2275                return -ENOMEM;
2276
2277        memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2278               &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2279        memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2280               &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2281
2282        return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2283}
2284
2285static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2286                unsigned long data)
2287{
2288        struct qeth_cmd_buffer *iob;
2289
2290        QETH_CARD_TEXT(card, 2, "cmsetpcb");
2291
2292        iob = (struct qeth_cmd_buffer *) data;
2293        memcpy(&card->token.cm_connection_r,
2294               QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2295               QETH_MPC_TOKEN_LENGTH);
2296        return 0;
2297}
2298
2299static int qeth_cm_setup(struct qeth_card *card)
2300{
2301        struct qeth_cmd_buffer *iob;
2302
2303        QETH_CARD_TEXT(card, 2, "cmsetup");
2304
2305        iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2306        if (!iob)
2307                return -ENOMEM;
2308
2309        memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2310               &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2311        memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2312               &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2313        memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2314               &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2315        return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2316}
2317
2318static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2319{
2320        if (link_type == QETH_LINK_TYPE_LANE_TR ||
2321            link_type == QETH_LINK_TYPE_HSTR) {
2322                dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2323                return false;
2324        }
2325
2326        return true;
2327}
2328
2329static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2330{
2331        struct net_device *dev = card->dev;
2332        unsigned int new_mtu;
2333
2334        if (!max_mtu) {
2335                /* IQD needs accurate max MTU to set up its RX buffers: */
2336                if (IS_IQD(card))
2337                        return -EINVAL;
2338                /* tolerate quirky HW: */
2339                max_mtu = ETH_MAX_MTU;
2340        }
2341
2342        rtnl_lock();
2343        if (IS_IQD(card)) {
2344                /* move any device with default MTU to new max MTU: */
2345                new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2346
2347                /* adjust RX buffer size to new max MTU: */
2348                card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2349                if (dev->max_mtu && dev->max_mtu != max_mtu)
2350                        qeth_free_qdio_queues(card);
2351        } else {
2352                if (dev->mtu)
2353                        new_mtu = dev->mtu;
2354                /* default MTUs for first setup: */
2355                else if (IS_LAYER2(card))
2356                        new_mtu = ETH_DATA_LEN;
2357                else
2358                        new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2359        }
2360
2361        dev->max_mtu = max_mtu;
2362        dev->mtu = min(new_mtu, max_mtu);
2363        rtnl_unlock();
2364        return 0;
2365}
2366
2367static int qeth_get_mtu_outof_framesize(int framesize)
2368{
2369        switch (framesize) {
2370        case 0x4000:
2371                return 8192;
2372        case 0x6000:
2373                return 16384;
2374        case 0xa000:
2375                return 32768;
2376        case 0xffff:
2377                return 57344;
2378        default:
2379                return 0;
2380        }
2381}
2382
2383static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2384                unsigned long data)
2385{
2386        __u16 mtu, framesize;
2387        __u16 len;
2388        struct qeth_cmd_buffer *iob;
2389        u8 link_type = 0;
2390
2391        QETH_CARD_TEXT(card, 2, "ulpenacb");
2392
2393        iob = (struct qeth_cmd_buffer *) data;
2394        memcpy(&card->token.ulp_filter_r,
2395               QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2396               QETH_MPC_TOKEN_LENGTH);
2397        if (IS_IQD(card)) {
2398                memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2399                mtu = qeth_get_mtu_outof_framesize(framesize);
2400        } else {
2401                mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2402        }
2403        *(u16 *)reply->param = mtu;
2404
2405        memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2406        if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2407                memcpy(&link_type,
2408                       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2409                if (!qeth_is_supported_link_type(card, link_type))
2410                        return -EPROTONOSUPPORT;
2411        }
2412
2413        card->info.link_type = link_type;
2414        QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2415        return 0;
2416}
2417
2418static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2419{
2420        return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
2421}
2422
2423static int qeth_ulp_enable(struct qeth_card *card)
2424{
2425        u8 prot_type = qeth_mpc_select_prot_type(card);
2426        struct qeth_cmd_buffer *iob;
2427        u16 max_mtu;
2428        int rc;
2429
2430        QETH_CARD_TEXT(card, 2, "ulpenabl");
2431
2432        iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2433        if (!iob)
2434                return -ENOMEM;
2435
2436        *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2437        memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2438        memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2439               &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2440        memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2441               &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2442        rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2443        if (rc)
2444                return rc;
2445        return qeth_update_max_mtu(card, max_mtu);
2446}
2447
2448static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2449                unsigned long data)
2450{
2451        struct qeth_cmd_buffer *iob;
2452
2453        QETH_CARD_TEXT(card, 2, "ulpstpcb");
2454
2455        iob = (struct qeth_cmd_buffer *) data;
2456        memcpy(&card->token.ulp_connection_r,
2457               QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2458               QETH_MPC_TOKEN_LENGTH);
2459        if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2460                     3)) {
2461                QETH_CARD_TEXT(card, 2, "olmlimit");
2462                dev_err(&card->gdev->dev, "A connection could not be "
2463                        "established because of an OLM limit\n");
2464                return -EMLINK;
2465        }
2466        return 0;
2467}
2468
2469static int qeth_ulp_setup(struct qeth_card *card)
2470{
2471        __u16 temp;
2472        struct qeth_cmd_buffer *iob;
2473
2474        QETH_CARD_TEXT(card, 2, "ulpsetup");
2475
2476        iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2477        if (!iob)
2478                return -ENOMEM;
2479
2480        memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2481               &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2482        memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2483               &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2484        memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2485               &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2486
2487        memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
2488        temp = (card->info.cula << 8) + card->info.unit_addr2;
2489        memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2490        return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2491}
2492
2493static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
2494                              gfp_t gfp)
2495{
2496        struct qeth_qdio_out_buffer *newbuf;
2497
2498        newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2499        if (!newbuf)
2500                return -ENOMEM;
2501
2502        newbuf->buffer = q->qdio_bufs[bidx];
2503        skb_queue_head_init(&newbuf->skb_list);
2504        lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2505        atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2506        q->bufs[bidx] = newbuf;
2507        return 0;
2508}
2509
2510static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2511{
2512        if (!q)
2513                return;
2514
2515        qeth_drain_output_queue(q, true);
2516        qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2517        kfree(q);
2518}
2519
2520static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2521{
2522        struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2523        unsigned int i;
2524
2525        if (!q)
2526                return NULL;
2527
2528        if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
2529                goto err_qdio_bufs;
2530
2531        for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2532                if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2533                        goto err_out_bufs;
2534        }
2535
2536        return q;
2537
2538err_out_bufs:
2539        while (i > 0)
2540                qeth_free_out_buf(q->bufs[--i]);
2541        qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2542err_qdio_bufs:
2543        kfree(q);
2544        return NULL;
2545}
2546
2547static void qeth_tx_completion_timer(struct timer_list *timer)
2548{
2549        struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2550
2551        napi_schedule(&queue->napi);
2552        QETH_TXQ_STAT_INC(queue, completion_timer);
2553}
2554
2555static int qeth_alloc_qdio_queues(struct qeth_card *card)
2556{
2557        unsigned int i;
2558
2559        QETH_CARD_TEXT(card, 2, "allcqdbf");
2560
2561        if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2562                QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2563                return 0;
2564
2565        QETH_CARD_TEXT(card, 2, "inq");
2566        card->qdio.in_q = qeth_alloc_qdio_queue();
2567        if (!card->qdio.in_q)
2568                goto out_nomem;
2569
2570        /* inbound buffer pool */
2571        if (qeth_alloc_buffer_pool(card))
2572                goto out_freeinq;
2573
2574        /* outbound */
2575        for (i = 0; i < card->qdio.no_out_queues; ++i) {
2576                struct qeth_qdio_out_q *queue;
2577
2578                queue = qeth_alloc_output_queue();
2579                if (!queue)
2580                        goto out_freeoutq;
2581                QETH_CARD_TEXT_(card, 2, "outq %i", i);
2582                QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2583                card->qdio.out_qs[i] = queue;
2584                queue->card = card;
2585                queue->queue_no = i;
2586                INIT_LIST_HEAD(&queue->pending_bufs);
2587                spin_lock_init(&queue->lock);
2588                timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2589                if (IS_IQD(card)) {
2590                        queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2591                        queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2592                        queue->rescan_usecs = QETH_TX_TIMER_USECS;
2593                } else {
2594                        queue->coalesce_usecs = USEC_PER_SEC;
2595                        queue->max_coalesced_frames = 0;
2596                        queue->rescan_usecs = 10 * USEC_PER_SEC;
2597                }
2598                queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
2599        }
2600
2601        /* completion */
2602        if (qeth_alloc_cq(card))
2603                goto out_freeoutq;
2604
2605        return 0;
2606
2607out_freeoutq:
2608        while (i > 0) {
2609                qeth_free_output_queue(card->qdio.out_qs[--i]);
2610                card->qdio.out_qs[i] = NULL;
2611        }
2612        qeth_free_buffer_pool(card);
2613out_freeinq:
2614        qeth_free_qdio_queue(card->qdio.in_q);
2615        card->qdio.in_q = NULL;
2616out_nomem:
2617        atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2618        return -ENOMEM;
2619}
2620
2621static void qeth_free_qdio_queues(struct qeth_card *card)
2622{
2623        int i, j;
2624
2625        if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2626                QETH_QDIO_UNINITIALIZED)
2627                return;
2628
2629        qeth_free_cq(card);
2630        for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2631                if (card->qdio.in_q->bufs[j].rx_skb)
2632                        dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2633        }
2634        qeth_free_qdio_queue(card->qdio.in_q);
2635        card->qdio.in_q = NULL;
2636        /* inbound buffer pool */
2637        qeth_free_buffer_pool(card);
2638        /* free outbound qdio_qs */
2639        for (i = 0; i < card->qdio.no_out_queues; i++) {
2640                qeth_free_output_queue(card->qdio.out_qs[i]);
2641                card->qdio.out_qs[i] = NULL;
2642        }
2643}
2644
2645static void qeth_fill_qib_parms(struct qeth_card *card,
2646                                struct qeth_qib_parms *parms)
2647{
2648        struct qeth_qdio_out_q *queue;
2649        unsigned int i;
2650
2651        parms->pcit_magic[0] = 'P';
2652        parms->pcit_magic[1] = 'C';
2653        parms->pcit_magic[2] = 'I';
2654        parms->pcit_magic[3] = 'T';
2655        ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
2656        parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
2657        parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
2658        parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
2659
2660        parms->blkt_magic[0] = 'B';
2661        parms->blkt_magic[1] = 'L';
2662        parms->blkt_magic[2] = 'K';
2663        parms->blkt_magic[3] = 'T';
2664        ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
2665        parms->blkt_total = card->info.blkt.time_total;
2666        parms->blkt_inter_packet = card->info.blkt.inter_packet;
2667        parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2668
2669        /* Prio-queueing implicitly uses the default priorities: */
2670        if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
2671                return;
2672
2673        parms->pque_magic[0] = 'P';
2674        parms->pque_magic[1] = 'Q';
2675        parms->pque_magic[2] = 'U';
2676        parms->pque_magic[3] = 'E';
2677        ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
2678        parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
2679        parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
2680
2681        qeth_for_each_output_queue(card, queue, i)
2682                parms->pque_priority[i] = queue->priority;
2683}
2684
2685static int qeth_qdio_activate(struct qeth_card *card)
2686{
2687        QETH_CARD_TEXT(card, 3, "qdioact");
2688        return qdio_activate(CARD_DDEV(card));
2689}
2690
2691static int qeth_dm_act(struct qeth_card *card)
2692{
2693        struct qeth_cmd_buffer *iob;
2694
2695        QETH_CARD_TEXT(card, 2, "dmact");
2696
2697        iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2698        if (!iob)
2699                return -ENOMEM;
2700
2701        memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2702               &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2703        memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2704               &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2705        return qeth_send_control_data(card, iob, NULL, NULL);
2706}
2707
2708static int qeth_mpc_initialize(struct qeth_card *card)
2709{
2710        int rc;
2711
2712        QETH_CARD_TEXT(card, 2, "mpcinit");
2713
2714        rc = qeth_issue_next_read(card);
2715        if (rc) {
2716                QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2717                return rc;
2718        }
2719        rc = qeth_cm_enable(card);
2720        if (rc) {
2721                QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2722                return rc;
2723        }
2724        rc = qeth_cm_setup(card);
2725        if (rc) {
2726                QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2727                return rc;
2728        }
2729        rc = qeth_ulp_enable(card);
2730        if (rc) {
2731                QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2732                return rc;
2733        }
2734        rc = qeth_ulp_setup(card);
2735        if (rc) {
2736                QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2737                return rc;
2738        }
2739        rc = qeth_alloc_qdio_queues(card);
2740        if (rc) {
2741                QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2742                return rc;
2743        }
2744        rc = qeth_qdio_establish(card);
2745        if (rc) {
2746                QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2747                qeth_free_qdio_queues(card);
2748                return rc;
2749        }
2750        rc = qeth_qdio_activate(card);
2751        if (rc) {
2752                QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2753                return rc;
2754        }
2755        rc = qeth_dm_act(card);
2756        if (rc) {
2757                QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2758                return rc;
2759        }
2760
2761        return 0;
2762}
2763
2764static void qeth_print_status_message(struct qeth_card *card)
2765{
2766        switch (card->info.type) {
2767        case QETH_CARD_TYPE_OSD:
2768        case QETH_CARD_TYPE_OSM:
2769        case QETH_CARD_TYPE_OSX:
2770                /* VM will use a non-zero first character
2771                 * to indicate a HiperSockets like reporting
2772                 * of the level OSA sets the first character to zero
2773                 * */
2774                if (!card->info.mcl_level[0]) {
2775                        sprintf(card->info.mcl_level, "%02x%02x",
2776                                card->info.mcl_level[2],
2777                                card->info.mcl_level[3]);
2778                        break;
2779                }
2780                fallthrough;
2781        case QETH_CARD_TYPE_IQD:
2782                if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2783                        card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2784                                card->info.mcl_level[0]];
2785                        card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2786                                card->info.mcl_level[1]];
2787                        card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2788                                card->info.mcl_level[2]];
2789                        card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2790                                card->info.mcl_level[3]];
2791                        card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2792                }
2793                break;
2794        default:
2795                memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2796        }
2797        dev_info(&card->gdev->dev,
2798                 "Device is a%s card%s%s%s\nwith link type %s.\n",
2799                 qeth_get_cardname(card),
2800                 (card->info.mcl_level[0]) ? " (level: " : "",
2801                 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2802                 (card->info.mcl_level[0]) ? ")" : "",
2803                 qeth_get_cardname_short(card));
2804}
2805
2806static void qeth_initialize_working_pool_list(struct qeth_card *card)
2807{
2808        struct qeth_buffer_pool_entry *entry;
2809
2810        QETH_CARD_TEXT(card, 5, "inwrklst");
2811
2812        list_for_each_entry(entry,
2813                            &card->qdio.init_pool.entry_list, init_list) {
2814                qeth_put_buffer_pool_entry(card, entry);
2815        }
2816}
2817
2818static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2819                                        struct qeth_card *card)
2820{
2821        struct qeth_buffer_pool_entry *entry;
2822        int i, free;
2823
2824        if (list_empty(&card->qdio.in_buf_pool.entry_list))
2825                return NULL;
2826
2827        list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2828                free = 1;
2829                for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2830                        if (page_count(entry->elements[i]) > 1) {
2831                                free = 0;
2832                                break;
2833                        }
2834                }
2835                if (free) {
2836                        list_del_init(&entry->list);
2837                        return entry;
2838                }
2839        }
2840
2841        /* no free buffer in pool so take first one and swap pages */
2842        entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2843                                 struct qeth_buffer_pool_entry, list);
2844        for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2845                if (page_count(entry->elements[i]) > 1) {
2846                        struct page *page = dev_alloc_page();
2847
2848                        if (!page)
2849                                return NULL;
2850
2851                        __free_page(entry->elements[i]);
2852                        entry->elements[i] = page;
2853                        QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2854                }
2855        }
2856        list_del_init(&entry->list);
2857        return entry;
2858}
2859
2860static int qeth_init_input_buffer(struct qeth_card *card,
2861                struct qeth_qdio_buffer *buf)
2862{
2863        struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2864        int i;
2865
2866        if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2867                buf->rx_skb = netdev_alloc_skb(card->dev,
2868                                               ETH_HLEN +
2869                                               sizeof(struct ipv6hdr));
2870                if (!buf->rx_skb)
2871                        return -ENOMEM;
2872        }
2873
2874        if (!pool_entry) {
2875                pool_entry = qeth_find_free_buffer_pool_entry(card);
2876                if (!pool_entry)
2877                        return -ENOBUFS;
2878
2879                buf->pool_entry = pool_entry;
2880        }
2881
2882        /*
2883         * since the buffer is accessed only from the input_tasklet
2884         * there shouldn't be a need to synchronize; also, since we use
2885         * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2886         * buffers
2887         */
2888        for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2889                buf->buffer->element[i].length = PAGE_SIZE;
2890                buf->buffer->element[i].addr =
2891                        page_to_phys(pool_entry->elements[i]);
2892                if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2893                        buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2894                else
2895                        buf->buffer->element[i].eflags = 0;
2896                buf->buffer->element[i].sflags = 0;
2897        }
2898        return 0;
2899}
2900
2901static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2902                                            struct qeth_qdio_out_q *queue)
2903{
2904        if (!IS_IQD(card) ||
2905            qeth_iqd_is_mcast_queue(card, queue) ||
2906            card->options.cq == QETH_CQ_ENABLED ||
2907            qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
2908                return 1;
2909
2910        return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
2911}
2912
2913static int qeth_init_qdio_queues(struct qeth_card *card)
2914{
2915        unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2916        unsigned int i;
2917        int rc;
2918
2919        QETH_CARD_TEXT(card, 2, "initqdqs");
2920
2921        /* inbound queue */
2922        qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2923        memset(&card->rx, 0, sizeof(struct qeth_rx));
2924
2925        qeth_initialize_working_pool_list(card);
2926        /*give only as many buffers to hardware as we have buffer pool entries*/
2927        for (i = 0; i < rx_bufs; i++) {
2928                rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2929                if (rc)
2930                        return rc;
2931        }
2932
2933        card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
2934        rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
2935                     NULL);
2936        if (rc) {
2937                QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2938                return rc;
2939        }
2940
2941        /* completion */
2942        rc = qeth_cq_init(card);
2943        if (rc) {
2944                return rc;
2945        }
2946
2947        /* outbound queue */
2948        for (i = 0; i < card->qdio.no_out_queues; ++i) {
2949                struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
2950
2951                qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2952                queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
2953                queue->next_buf_to_fill = 0;
2954                queue->do_pack = 0;
2955                queue->prev_hdr = NULL;
2956                queue->coalesced_frames = 0;
2957                queue->bulk_start = 0;
2958                queue->bulk_count = 0;
2959                queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2960                atomic_set(&queue->used_buffers, 0);
2961                atomic_set(&queue->set_pci_flags_count, 0);
2962                netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
2963        }
2964        return 0;
2965}
2966
2967static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2968                                  struct qeth_cmd_buffer *iob)
2969{
2970        qeth_mpc_finalize_cmd(card, iob);
2971
2972        /* override with IPA-specific values: */
2973        __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2974}
2975
2976static void qeth_prepare_ipa_cmd(struct qeth_card *card,
2977                                 struct qeth_cmd_buffer *iob, u16 cmd_length)
2978{
2979        u8 prot_type = qeth_mpc_select_prot_type(card);
2980        u16 total_length = iob->length;
2981
2982        qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
2983                       iob->data);
2984        iob->finalize = qeth_ipa_finalize_cmd;
2985
2986        memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2987        memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2988        memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2989        memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
2990        memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2991        memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2992               &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2993        memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2994}
2995
2996static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
2997                                 struct qeth_cmd_buffer *reply)
2998{
2999        struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3000
3001        return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3002}
3003
3004struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3005                                           enum qeth_ipa_cmds cmd_code,
3006                                           enum qeth_prot_versions prot,
3007                                           unsigned int data_length)
3008{
3009        struct qeth_cmd_buffer *iob;
3010        struct qeth_ipacmd_hdr *hdr;
3011
3012        data_length += offsetof(struct qeth_ipa_cmd, data);
3013        iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3014                             QETH_IPA_TIMEOUT);
3015        if (!iob)
3016                return NULL;
3017
3018        qeth_prepare_ipa_cmd(card, iob, data_length);
3019        iob->match = qeth_ipa_match_reply;
3020
3021        hdr = &__ipa_cmd(iob)->hdr;
3022        hdr->command = cmd_code;
3023        hdr->initiator = IPA_CMD_INITIATOR_HOST;
3024        /* hdr->seqno is set by qeth_send_control_data() */
3025        hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3026        hdr->rel_adapter_no = (u8) card->dev->dev_port;
3027        hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3028        hdr->param_count = 1;
3029        hdr->prot_version = prot;
3030        return iob;
3031}
3032EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3033
3034static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3035                                struct qeth_reply *reply, unsigned long data)
3036{
3037        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3038
3039        return (cmd->hdr.return_code) ? -EIO : 0;
3040}
3041
3042/**
3043 * qeth_send_ipa_cmd() - send an IPA command
3044 *
3045 * See qeth_send_control_data() for explanation of the arguments.
3046 */
3047
3048int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3049                int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3050                        unsigned long),
3051                void *reply_param)
3052{
3053        int rc;
3054
3055        QETH_CARD_TEXT(card, 4, "sendipa");
3056
3057        if (card->read_or_write_problem) {
3058                qeth_put_cmd(iob);
3059                return -EIO;
3060        }
3061
3062        if (reply_cb == NULL)
3063                reply_cb = qeth_send_ipa_cmd_cb;
3064        rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3065        if (rc == -ETIME) {
3066                qeth_clear_ipacmd_list(card);
3067                qeth_schedule_recovery(card);
3068        }
3069        return rc;
3070}
3071EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3072
3073static int qeth_send_startlan_cb(struct qeth_card *card,
3074                                 struct qeth_reply *reply, unsigned long data)
3075{
3076        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3077
3078        if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3079                return -ENETDOWN;
3080
3081        return (cmd->hdr.return_code) ? -EIO : 0;
3082}
3083
3084static int qeth_send_startlan(struct qeth_card *card)
3085{
3086        struct qeth_cmd_buffer *iob;
3087
3088        QETH_CARD_TEXT(card, 2, "strtlan");
3089
3090        iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3091        if (!iob)
3092                return -ENOMEM;
3093        return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3094}
3095
3096static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3097{
3098        if (!cmd->hdr.return_code)
3099                cmd->hdr.return_code =
3100                        cmd->data.setadapterparms.hdr.return_code;
3101        return cmd->hdr.return_code;
3102}
3103
3104static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3105                struct qeth_reply *reply, unsigned long data)
3106{
3107        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3108        struct qeth_query_cmds_supp *query_cmd;
3109
3110        QETH_CARD_TEXT(card, 3, "quyadpcb");
3111        if (qeth_setadpparms_inspect_rc(cmd))
3112                return -EIO;
3113
3114        query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3115        if (query_cmd->lan_type & 0x7f) {
3116                if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3117                        return -EPROTONOSUPPORT;
3118
3119                card->info.link_type = query_cmd->lan_type;
3120                QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3121        }
3122
3123        card->options.adp.supported = query_cmd->supported_cmds;
3124        return 0;
3125}
3126
3127static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3128                                                    enum qeth_ipa_setadp_cmd adp_cmd,
3129                                                    unsigned int data_length)
3130{
3131        struct qeth_ipacmd_setadpparms_hdr *hdr;
3132        struct qeth_cmd_buffer *iob;
3133
3134        iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3135                                 data_length +
3136                                 offsetof(struct qeth_ipacmd_setadpparms,
3137                                          data));
3138        if (!iob)
3139                return NULL;
3140
3141        hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3142        hdr->cmdlength = sizeof(*hdr) + data_length;
3143        hdr->command_code = adp_cmd;
3144        hdr->used_total = 1;
3145        hdr->seq_no = 1;
3146        return iob;
3147}
3148
3149static int qeth_query_setadapterparms(struct qeth_card *card)
3150{
3151        int rc;
3152        struct qeth_cmd_buffer *iob;
3153
3154        QETH_CARD_TEXT(card, 3, "queryadp");
3155        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3156                                   SETADP_DATA_SIZEOF(query_cmds_supp));
3157        if (!iob)
3158                return -ENOMEM;
3159        rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3160        return rc;
3161}
3162
3163static int qeth_query_ipassists_cb(struct qeth_card *card,
3164                struct qeth_reply *reply, unsigned long data)
3165{
3166        struct qeth_ipa_cmd *cmd;
3167
3168        QETH_CARD_TEXT(card, 2, "qipasscb");
3169
3170        cmd = (struct qeth_ipa_cmd *) data;
3171
3172        switch (cmd->hdr.return_code) {
3173        case IPA_RC_SUCCESS:
3174                break;
3175        case IPA_RC_NOTSUPP:
3176        case IPA_RC_L2_UNSUPPORTED_CMD:
3177                QETH_CARD_TEXT(card, 2, "ipaunsup");
3178                card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3179                card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3180                return -EOPNOTSUPP;
3181        default:
3182                QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3183                                 CARD_DEVID(card), cmd->hdr.return_code);
3184                return -EIO;
3185        }
3186
3187        if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3188                card->options.ipa4 = cmd->hdr.assists;
3189        else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3190                card->options.ipa6 = cmd->hdr.assists;
3191        else
3192                QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3193                                 CARD_DEVID(card));
3194        return 0;
3195}
3196
3197static int qeth_query_ipassists(struct qeth_card *card,
3198                                enum qeth_prot_versions prot)
3199{
3200        int rc;
3201        struct qeth_cmd_buffer *iob;
3202
3203        QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3204        iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3205        if (!iob)
3206                return -ENOMEM;
3207        rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3208        return rc;
3209}
3210
3211static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3212                                struct qeth_reply *reply, unsigned long data)
3213{
3214        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3215        struct qeth_query_switch_attributes *attrs;
3216        struct qeth_switch_info *sw_info;
3217
3218        QETH_CARD_TEXT(card, 2, "qswiatcb");
3219        if (qeth_setadpparms_inspect_rc(cmd))
3220                return -EIO;
3221
3222        sw_info = (struct qeth_switch_info *)reply->param;
3223        attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3224        sw_info->capabilities = attrs->capabilities;
3225        sw_info->settings = attrs->settings;
3226        QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3227                        sw_info->settings);
3228        return 0;
3229}
3230
3231int qeth_query_switch_attributes(struct qeth_card *card,
3232                                 struct qeth_switch_info *sw_info)
3233{
3234        struct qeth_cmd_buffer *iob;
3235
3236        QETH_CARD_TEXT(card, 2, "qswiattr");
3237        if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3238                return -EOPNOTSUPP;
3239        if (!netif_carrier_ok(card->dev))
3240                return -ENOMEDIUM;
3241        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3242        if (!iob)
3243                return -ENOMEM;
3244        return qeth_send_ipa_cmd(card, iob,
3245                                qeth_query_switch_attributes_cb, sw_info);
3246}
3247
3248struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3249                                          enum qeth_diags_cmds sub_cmd,
3250                                          unsigned int data_length)
3251{
3252        struct qeth_ipacmd_diagass *cmd;
3253        struct qeth_cmd_buffer *iob;
3254
3255        iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3256                                 DIAG_HDR_LEN + data_length);
3257        if (!iob)
3258                return NULL;
3259
3260        cmd = &__ipa_cmd(iob)->data.diagass;
3261        cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3262        cmd->subcmd = sub_cmd;
3263        return iob;
3264}
3265EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3266
3267static int qeth_query_setdiagass_cb(struct qeth_card *card,
3268                struct qeth_reply *reply, unsigned long data)
3269{
3270        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3271        u16 rc = cmd->hdr.return_code;
3272
3273        if (rc) {
3274                QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3275                return -EIO;
3276        }
3277
3278        card->info.diagass_support = cmd->data.diagass.ext;
3279        return 0;
3280}
3281
3282static int qeth_query_setdiagass(struct qeth_card *card)
3283{
3284        struct qeth_cmd_buffer *iob;
3285
3286        QETH_CARD_TEXT(card, 2, "qdiagass");
3287        iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3288        if (!iob)
3289                return -ENOMEM;
3290        return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3291}
3292
3293static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3294{
3295        unsigned long info = get_zeroed_page(GFP_KERNEL);
3296        struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3297        struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3298        struct ccw_dev_id ccwid;
3299        int level;
3300
3301        tid->chpid = card->info.chpid;
3302        ccw_device_get_id(CARD_RDEV(card), &ccwid);
3303        tid->ssid = ccwid.ssid;
3304        tid->devno = ccwid.devno;
3305        if (!info)
3306                return;
3307        level = stsi(NULL, 0, 0, 0);
3308        if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3309                tid->lparnr = info222->lpar_number;
3310        if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3311                EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3312                memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3313        }
3314        free_page(info);
3315}
3316
3317static int qeth_hw_trap_cb(struct qeth_card *card,
3318                struct qeth_reply *reply, unsigned long data)
3319{
3320        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3321        u16 rc = cmd->hdr.return_code;
3322
3323        if (rc) {
3324                QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3325                return -EIO;
3326        }
3327        return 0;
3328}
3329
3330int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3331{
3332        struct qeth_cmd_buffer *iob;
3333        struct qeth_ipa_cmd *cmd;
3334
3335        QETH_CARD_TEXT(card, 2, "diagtrap");
3336        iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3337        if (!iob)
3338                return -ENOMEM;
3339        cmd = __ipa_cmd(iob);
3340        cmd->data.diagass.type = 1;
3341        cmd->data.diagass.action = action;
3342        switch (action) {
3343        case QETH_DIAGS_TRAP_ARM:
3344                cmd->data.diagass.options = 0x0003;
3345                cmd->data.diagass.ext = 0x00010000 +
3346                        sizeof(struct qeth_trap_id);
3347                qeth_get_trap_id(card,
3348                        (struct qeth_trap_id *)cmd->data.diagass.cdata);
3349                break;
3350        case QETH_DIAGS_TRAP_DISARM:
3351                cmd->data.diagass.options = 0x0001;
3352                break;
3353        case QETH_DIAGS_TRAP_CAPTURE:
3354                break;
3355        }
3356        return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3357}
3358
3359static int qeth_check_qdio_errors(struct qeth_card *card,
3360                                  struct qdio_buffer *buf,
3361                                  unsigned int qdio_error,
3362                                  const char *dbftext)
3363{
3364        if (qdio_error) {
3365                QETH_CARD_TEXT(card, 2, dbftext);
3366                QETH_CARD_TEXT_(card, 2, " F15=%02X",
3367                               buf->element[15].sflags);
3368                QETH_CARD_TEXT_(card, 2, " F14=%02X",
3369                               buf->element[14].sflags);
3370                QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3371                if ((buf->element[15].sflags) == 0x12) {
3372                        QETH_CARD_STAT_INC(card, rx_fifo_errors);
3373                        return 0;
3374                } else
3375                        return 1;
3376        }
3377        return 0;
3378}
3379
3380static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3381                                         unsigned int count)
3382{
3383        struct qeth_qdio_q *queue = card->qdio.in_q;
3384        struct list_head *lh;
3385        int i;
3386        int rc;
3387        int newcount = 0;
3388
3389        /* only requeue at a certain threshold to avoid SIGAs */
3390        if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3391                for (i = queue->next_buf_to_init;
3392                     i < queue->next_buf_to_init + count; ++i) {
3393                        if (qeth_init_input_buffer(card,
3394                                &queue->bufs[QDIO_BUFNR(i)])) {
3395                                break;
3396                        } else {
3397                                newcount++;
3398                        }
3399                }
3400
3401                if (newcount < count) {
3402                        /* we are in memory shortage so we switch back to
3403                           traditional skb allocation and drop packages */
3404                        atomic_set(&card->force_alloc_skb, 3);
3405                        count = newcount;
3406                } else {
3407                        atomic_add_unless(&card->force_alloc_skb, -1, 0);
3408                }
3409
3410                if (!count) {
3411                        i = 0;
3412                        list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3413                                i++;
3414                        if (i == card->qdio.in_buf_pool.buf_count) {
3415                                QETH_CARD_TEXT(card, 2, "qsarbw");
3416                                schedule_delayed_work(
3417                                        &card->buffer_reclaim_work,
3418                                        QETH_RECLAIM_WORK_TIME);
3419                        }
3420                        return 0;
3421                }
3422
3423                rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3424                             queue->next_buf_to_init, count, NULL);
3425                if (rc) {
3426                        QETH_CARD_TEXT(card, 2, "qinberr");
3427                }
3428                queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3429                                                     count);
3430                return count;
3431        }
3432
3433        return 0;
3434}
3435
3436static void qeth_buffer_reclaim_work(struct work_struct *work)
3437{
3438        struct qeth_card *card = container_of(to_delayed_work(work),
3439                                              struct qeth_card,
3440                                              buffer_reclaim_work);
3441
3442        local_bh_disable();
3443        napi_schedule(&card->napi);
3444        /* kick-start the NAPI softirq: */
3445        local_bh_enable();
3446}
3447
3448static void qeth_handle_send_error(struct qeth_card *card,
3449                struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3450{
3451        int sbalf15 = buffer->buffer->element[15].sflags;
3452
3453        QETH_CARD_TEXT(card, 6, "hdsnderr");
3454        qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3455
3456        if (!qdio_err)
3457                return;
3458
3459        if ((sbalf15 >= 15) && (sbalf15 <= 31))
3460                return;
3461
3462        QETH_CARD_TEXT(card, 1, "lnkfail");
3463        QETH_CARD_TEXT_(card, 1, "%04x %02x",
3464                       (u16)qdio_err, (u8)sbalf15);
3465}
3466
3467/**
3468 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3469 * @queue: queue to check for packing buffer
3470 *
3471 * Returns number of buffers that were prepared for flush.
3472 */
3473static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3474{
3475        struct qeth_qdio_out_buffer *buffer;
3476
3477        buffer = queue->bufs[queue->next_buf_to_fill];
3478        if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3479            (buffer->next_element_to_fill > 0)) {
3480                /* it's a packing buffer */
3481                atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3482                queue->next_buf_to_fill =
3483                        QDIO_BUFNR(queue->next_buf_to_fill + 1);
3484                return 1;
3485        }
3486        return 0;
3487}
3488
3489/*
3490 * Switched to packing state if the number of used buffers on a queue
3491 * reaches a certain limit.
3492 */
3493static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3494{
3495        if (!queue->do_pack) {
3496                if (atomic_read(&queue->used_buffers)
3497                    >= QETH_HIGH_WATERMARK_PACK){
3498                        /* switch non-PACKING -> PACKING */
3499                        QETH_CARD_TEXT(queue->card, 6, "np->pack");
3500                        QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3501                        queue->do_pack = 1;
3502                }
3503        }
3504}
3505
3506/*
3507 * Switches from packing to non-packing mode. If there is a packing
3508 * buffer on the queue this buffer will be prepared to be flushed.
3509 * In that case 1 is returned to inform the caller. If no buffer
3510 * has to be flushed, zero is returned.
3511 */
3512static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3513{
3514        if (queue->do_pack) {
3515                if (atomic_read(&queue->used_buffers)
3516                    <= QETH_LOW_WATERMARK_PACK) {
3517                        /* switch PACKING -> non-PACKING */
3518                        QETH_CARD_TEXT(queue->card, 6, "pack->np");
3519                        QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3520                        queue->do_pack = 0;
3521                        return qeth_prep_flush_pack_buffer(queue);
3522                }
3523        }
3524        return 0;
3525}
3526
3527static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3528                               int count)
3529{
3530        struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3531        struct qeth_card *card = queue->card;
3532        unsigned int frames, usecs;
3533        struct qaob *aob = NULL;
3534        int rc;
3535        int i;
3536
3537        for (i = index; i < index + count; ++i) {
3538                unsigned int bidx = QDIO_BUFNR(i);
3539                struct sk_buff *skb;
3540
3541                buf = queue->bufs[bidx];
3542                buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3543                                SBAL_EFLAGS_LAST_ENTRY;
3544                queue->coalesced_frames += buf->frames;
3545
3546                if (IS_IQD(card)) {
3547                        skb_queue_walk(&buf->skb_list, skb)
3548                                skb_tx_timestamp(skb);
3549                }
3550        }
3551
3552        if (IS_IQD(card)) {
3553                if (card->options.cq == QETH_CQ_ENABLED &&
3554                    !qeth_iqd_is_mcast_queue(card, queue) &&
3555                    count == 1) {
3556                        if (!buf->aob)
3557                                buf->aob = qdio_allocate_aob();
3558                        if (buf->aob) {
3559                                struct qeth_qaob_priv1 *priv;
3560
3561                                aob = buf->aob;
3562                                priv = (struct qeth_qaob_priv1 *)&aob->user1;
3563                                priv->state = QETH_QAOB_ISSUED;
3564                                priv->queue_no = queue->queue_no;
3565                        }
3566                }
3567        } else {
3568                if (!queue->do_pack) {
3569                        if ((atomic_read(&queue->used_buffers) >=
3570                                (QETH_HIGH_WATERMARK_PACK -
3571                                 QETH_WATERMARK_PACK_FUZZ)) &&
3572                            !atomic_read(&queue->set_pci_flags_count)) {
3573                                /* it's likely that we'll go to packing
3574                                 * mode soon */
3575                                atomic_inc(&queue->set_pci_flags_count);
3576                                buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3577                        }
3578                } else {
3579                        if (!atomic_read(&queue->set_pci_flags_count)) {
3580                                /*
3581                                 * there's no outstanding PCI any more, so we
3582                                 * have to request a PCI to be sure the the PCI
3583                                 * will wake at some time in the future then we
3584                                 * can flush packed buffers that might still be
3585                                 * hanging around, which can happen if no
3586                                 * further send was requested by the stack
3587                                 */
3588                                atomic_inc(&queue->set_pci_flags_count);
3589                                buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3590                        }
3591                }
3592        }
3593
3594        QETH_TXQ_STAT_INC(queue, doorbell);
3595        rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
3596                     index, count, aob);
3597
3598        switch (rc) {
3599        case 0:
3600        case -ENOBUFS:
3601                /* ignore temporary SIGA errors without busy condition */
3602
3603                /* Fake the TX completion interrupt: */
3604                frames = READ_ONCE(queue->max_coalesced_frames);
3605                usecs = READ_ONCE(queue->coalesce_usecs);
3606
3607                if (frames && queue->coalesced_frames >= frames) {
3608                        napi_schedule(&queue->napi);
3609                        queue->coalesced_frames = 0;
3610                        QETH_TXQ_STAT_INC(queue, coal_frames);
3611                } else if (qeth_use_tx_irqs(card) &&
3612                           atomic_read(&queue->used_buffers) >= 32) {
3613                        /* Old behaviour carried over from the qdio layer: */
3614                        napi_schedule(&queue->napi);
3615                        QETH_TXQ_STAT_INC(queue, coal_frames);
3616                } else if (usecs) {
3617                        qeth_tx_arm_timer(queue, usecs);
3618                }
3619
3620                break;
3621        default:
3622                QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3623                QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3624                QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3625                QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3626                QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3627
3628                /* this must not happen under normal circumstances. if it
3629                 * happens something is really wrong -> recover */
3630                qeth_schedule_recovery(queue->card);
3631        }
3632}
3633
3634static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3635{
3636        qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3637
3638        queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3639        queue->prev_hdr = NULL;
3640        queue->bulk_count = 0;
3641}
3642
3643static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3644{
3645        /*
3646         * check if weed have to switch to non-packing mode or if
3647         * we have to get a pci flag out on the queue
3648         */
3649        if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3650            !atomic_read(&queue->set_pci_flags_count)) {
3651                unsigned int index, flush_cnt;
3652                bool q_was_packing;
3653
3654                spin_lock(&queue->lock);
3655
3656                index = queue->next_buf_to_fill;
3657                q_was_packing = queue->do_pack;
3658
3659                flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3660                if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3661                        flush_cnt = qeth_prep_flush_pack_buffer(queue);
3662
3663                if (flush_cnt) {
3664                        qeth_flush_buffers(queue, index, flush_cnt);
3665                        if (q_was_packing)
3666                                QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3667                }
3668
3669                spin_unlock(&queue->lock);
3670        }
3671}
3672
3673static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3674{
3675        struct qeth_card *card = (struct qeth_card *)card_ptr;
3676
3677        napi_schedule_irqoff(&card->napi);
3678}
3679
3680int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3681{
3682        int rc;
3683
3684        if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
3685                rc = -1;
3686                goto out;
3687        } else {
3688                if (card->options.cq == cq) {
3689                        rc = 0;
3690                        goto out;
3691                }
3692
3693                qeth_free_qdio_queues(card);
3694                card->options.cq = cq;
3695                rc = 0;
3696        }
3697out:
3698        return rc;
3699
3700}
3701EXPORT_SYMBOL_GPL(qeth_configure_cq);
3702
3703static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
3704{
3705        struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
3706        unsigned int queue_no = priv->queue_no;
3707
3708        BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
3709
3710        if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
3711            queue_no < card->qdio.no_out_queues)
3712                napi_schedule(&card->qdio.out_qs[queue_no]->napi);
3713}
3714
3715static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3716                                 unsigned int queue, int first_element,
3717                                 int count)
3718{
3719        struct qeth_qdio_q *cq = card->qdio.c_q;
3720        int i;
3721        int rc;
3722
3723        QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3724        QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3725        QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3726
3727        if (qdio_err) {
3728                netif_tx_stop_all_queues(card->dev);
3729                qeth_schedule_recovery(card);
3730                return;
3731        }
3732
3733        for (i = first_element; i < first_element + count; ++i) {
3734                struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3735                int e = 0;
3736
3737                while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3738                       buffer->element[e].addr) {
3739                        unsigned long phys_aob_addr = buffer->element[e].addr;
3740
3741                        qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
3742                        ++e;
3743                }
3744                qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3745        }
3746        rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3747                     cq->next_buf_to_init, count, NULL);
3748        if (rc) {
3749                dev_warn(&card->gdev->dev,
3750                        "QDIO reported an error, rc=%i\n", rc);
3751                QETH_CARD_TEXT(card, 2, "qcqherr");
3752        }
3753
3754        cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3755}
3756
3757static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3758                                    unsigned int qdio_err, int queue,
3759                                    int first_elem, int count,
3760                                    unsigned long card_ptr)
3761{
3762        struct qeth_card *card = (struct qeth_card *)card_ptr;
3763
3764        QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3765        QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3766
3767        if (qdio_err)
3768                qeth_schedule_recovery(card);
3769}
3770
3771static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3772                                     unsigned int qdio_error, int __queue,
3773                                     int first_element, int count,
3774                                     unsigned long card_ptr)
3775{
3776        struct qeth_card *card        = (struct qeth_card *) card_ptr;
3777
3778        QETH_CARD_TEXT(card, 2, "achkcond");
3779        netif_tx_stop_all_queues(card->dev);
3780        qeth_schedule_recovery(card);
3781}
3782
3783/**
3784 * Note: Function assumes that we have 4 outbound queues.
3785 */
3786int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3787{
3788        struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3789        u8 tos;
3790
3791        switch (card->qdio.do_prio_queueing) {
3792        case QETH_PRIO_Q_ING_TOS:
3793        case QETH_PRIO_Q_ING_PREC:
3794                switch (vlan_get_protocol(skb)) {
3795                case htons(ETH_P_IP):
3796                        tos = ipv4_get_dsfield(ip_hdr(skb));
3797                        break;
3798                case htons(ETH_P_IPV6):
3799                        tos = ipv6_get_dsfield(ipv6_hdr(skb));
3800                        break;
3801                default:
3802                        return card->qdio.default_out_queue;
3803                }
3804                if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3805                        return ~tos >> 6 & 3;
3806                if (tos & IPTOS_MINCOST)
3807                        return 3;
3808                if (tos & IPTOS_RELIABILITY)
3809                        return 2;
3810                if (tos & IPTOS_THROUGHPUT)
3811                        return 1;
3812                if (tos & IPTOS_LOWDELAY)
3813                        return 0;
3814                break;
3815        case QETH_PRIO_Q_ING_SKB:
3816                if (skb->priority > 5)
3817                        return 0;
3818                return ~skb->priority >> 1 & 3;
3819        case QETH_PRIO_Q_ING_VLAN:
3820                if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3821                        return ~ntohs(veth->h_vlan_TCI) >>
3822                               (VLAN_PRIO_SHIFT + 1) & 3;
3823                break;
3824        case QETH_PRIO_Q_ING_FIXED:
3825                return card->qdio.default_out_queue;
3826        default:
3827                break;
3828        }
3829        return card->qdio.default_out_queue;
3830}
3831EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3832
3833/**
3834 * qeth_get_elements_for_frags() -      find number of SBALEs for skb frags.
3835 * @skb:                                SKB address
3836 *
3837 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3838 * fragmented part of the SKB. Returns zero for linear SKB.
3839 */
3840static int qeth_get_elements_for_frags(struct sk_buff *skb)
3841{
3842        int cnt, elements = 0;
3843
3844        for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3845                skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3846
3847                elements += qeth_get_elements_for_range(
3848                        (addr_t)skb_frag_address(frag),
3849                        (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3850        }
3851        return elements;
3852}
3853
3854/**
3855 * qeth_count_elements() -      Counts the number of QDIO buffer elements needed
3856 *                              to transmit an skb.
3857 * @skb:                        the skb to operate on.
3858 * @data_offset:                skip this part of the skb's linear data
3859 *
3860 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3861 * skb's data (both its linear part and paged fragments).
3862 */
3863static unsigned int qeth_count_elements(struct sk_buff *skb,
3864                                        unsigned int data_offset)
3865{
3866        unsigned int elements = qeth_get_elements_for_frags(skb);
3867        addr_t end = (addr_t)skb->data + skb_headlen(skb);
3868        addr_t start = (addr_t)skb->data + data_offset;
3869
3870        if (start != end)
3871                elements += qeth_get_elements_for_range(start, end);
3872        return elements;
3873}
3874
3875#define QETH_HDR_CACHE_OBJ_SIZE         (sizeof(struct qeth_hdr_tso) + \
3876                                         MAX_TCP_HEADER)
3877
3878/**
3879 * qeth_add_hw_header() - add a HW header to an skb.
3880 * @skb: skb that the HW header should be added to.
3881 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3882 *       it contains a valid pointer to a qeth_hdr.
3883 * @hdr_len: length of the HW header.
3884 * @proto_len: length of protocol headers that need to be in same page as the
3885 *             HW header.
3886 *
3887 * Returns the pushed length. If the header can't be pushed on
3888 * (eg. because it would cross a page boundary), it is allocated from
3889 * the cache instead and 0 is returned.
3890 * The number of needed buffer elements is returned in @elements.
3891 * Error to create the hdr is indicated by returning with < 0.
3892 */
3893static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3894                              struct sk_buff *skb, struct qeth_hdr **hdr,
3895                              unsigned int hdr_len, unsigned int proto_len,
3896                              unsigned int *elements)
3897{
3898        gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3899        const unsigned int contiguous = proto_len ? proto_len : 1;
3900        const unsigned int max_elements = queue->max_elements;
3901        unsigned int __elements;
3902        addr_t start, end;
3903        bool push_ok;
3904        int rc;
3905
3906check_layout:
3907        start = (addr_t)skb->data - hdr_len;
3908        end = (addr_t)skb->data;
3909
3910        if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3911                /* Push HW header into same page as first protocol header. */
3912                push_ok = true;
3913                /* ... but TSO always needs a separate element for headers: */
3914                if (skb_is_gso(skb))
3915                        __elements = 1 + qeth_count_elements(skb, proto_len);
3916                else
3917                        __elements = qeth_count_elements(skb, 0);
3918        } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3919                /* Push HW header into preceding page, flush with skb->data. */
3920                push_ok = true;
3921                __elements = 1 + qeth_count_elements(skb, 0);
3922        } else {
3923                /* Use header cache, copy protocol headers up. */
3924                push_ok = false;
3925                __elements = 1 + qeth_count_elements(skb, proto_len);
3926        }
3927
3928        /* Compress skb to fit into one IO buffer: */
3929        if (__elements > max_elements) {
3930                if (!skb_is_nonlinear(skb)) {
3931                        /* Drop it, no easy way of shrinking it further. */
3932                        QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3933                                         max_elements, __elements, skb->len);
3934                        return -E2BIG;
3935                }
3936
3937                rc = skb_linearize(skb);
3938                if (rc) {
3939                        QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3940                        return rc;
3941                }
3942
3943                QETH_TXQ_STAT_INC(queue, skbs_linearized);
3944                /* Linearization changed the layout, re-evaluate: */
3945                goto check_layout;
3946        }
3947
3948        *elements = __elements;
3949        /* Add the header: */
3950        if (push_ok) {
3951                *hdr = skb_push(skb, hdr_len);
3952                return hdr_len;
3953        }
3954
3955        /* Fall back to cache element with known-good alignment: */
3956        if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3957                return -E2BIG;
3958        *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
3959        if (!*hdr)
3960                return -ENOMEM;
3961        /* Copy protocol headers behind HW header: */
3962        skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3963        return 0;
3964}
3965
3966static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
3967                              struct sk_buff *curr_skb,
3968                              struct qeth_hdr *curr_hdr)
3969{
3970        struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3971        struct qeth_hdr *prev_hdr = queue->prev_hdr;
3972
3973        if (!prev_hdr)
3974                return true;
3975
3976        /* All packets must have the same target: */
3977        if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
3978                struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
3979
3980                return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
3981                                        eth_hdr(curr_skb)->h_dest) &&
3982                       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
3983        }
3984
3985        return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
3986               qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
3987}
3988
3989/**
3990 * qeth_fill_buffer() - map skb into an output buffer
3991 * @buf:        buffer to transport the skb
3992 * @skb:        skb to map into the buffer
3993 * @hdr:        qeth_hdr for this skb. Either at skb->data, or allocated
3994 *              from qeth_core_header_cache.
3995 * @offset:     when mapping the skb, start at skb->data + offset
3996 * @hd_len:     if > 0, build a dedicated header element of this size
3997 */
3998static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
3999                                     struct sk_buff *skb, struct qeth_hdr *hdr,
4000                                     unsigned int offset, unsigned int hd_len)
4001{
4002        struct qdio_buffer *buffer = buf->buffer;
4003        int element = buf->next_element_to_fill;
4004        int length = skb_headlen(skb) - offset;
4005        char *data = skb->data + offset;
4006        unsigned int elem_length, cnt;
4007        bool is_first_elem = true;
4008
4009        __skb_queue_tail(&buf->skb_list, skb);
4010
4011        /* build dedicated element for HW Header */
4012        if (hd_len) {
4013                is_first_elem = false;
4014
4015                buffer->element[element].addr = virt_to_phys(hdr);
4016                buffer->element[element].length = hd_len;
4017                buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4018
4019                /* HW header is allocated from cache: */
4020                if ((void *)hdr != skb->data)
4021                        __set_bit(element, buf->from_kmem_cache);
4022                /* HW header was pushed and is contiguous with linear part: */
4023                else if (length > 0 && !PAGE_ALIGNED(data) &&
4024                         (data == (char *)hdr + hd_len))
4025                        buffer->element[element].eflags |=
4026                                SBAL_EFLAGS_CONTIGUOUS;
4027
4028                element++;
4029        }
4030
4031        /* map linear part into buffer element(s) */
4032        while (length > 0) {
4033                elem_length = min_t(unsigned int, length,
4034                                    PAGE_SIZE - offset_in_page(data));
4035
4036                buffer->element[element].addr = virt_to_phys(data);
4037                buffer->element[element].length = elem_length;
4038                length -= elem_length;
4039                if (is_first_elem) {
4040                        is_first_elem = false;
4041                        if (length || skb_is_nonlinear(skb))
4042                                /* skb needs additional elements */
4043                                buffer->element[element].eflags =
4044                                        SBAL_EFLAGS_FIRST_FRAG;
4045                        else
4046                                buffer->element[element].eflags = 0;
4047                } else {
4048                        buffer->element[element].eflags =
4049                                SBAL_EFLAGS_MIDDLE_FRAG;
4050                }
4051
4052                data += elem_length;
4053                element++;
4054        }
4055
4056        /* map page frags into buffer element(s) */
4057        for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4058                skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4059
4060                data = skb_frag_address(frag);
4061                length = skb_frag_size(frag);
4062                while (length > 0) {
4063                        elem_length = min_t(unsigned int, length,
4064                                            PAGE_SIZE - offset_in_page(data));
4065
4066                        buffer->element[element].addr = virt_to_phys(data);
4067                        buffer->element[element].length = elem_length;
4068                        buffer->element[element].eflags =
4069                                SBAL_EFLAGS_MIDDLE_FRAG;
4070
4071                        length -= elem_length;
4072                        data += elem_length;
4073                        element++;
4074                }
4075        }
4076
4077        if (buffer->element[element - 1].eflags)
4078                buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4079        buf->next_element_to_fill = element;
4080        return element;
4081}
4082
4083static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4084                       struct sk_buff *skb, unsigned int elements,
4085                       struct qeth_hdr *hdr, unsigned int offset,
4086                       unsigned int hd_len)
4087{
4088        unsigned int bytes = qdisc_pkt_len(skb);
4089        struct qeth_qdio_out_buffer *buffer;
4090        unsigned int next_element;
4091        struct netdev_queue *txq;
4092        bool stopped = false;
4093        bool flush;
4094
4095        buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4096        txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4097
4098        /* Just a sanity check, the wake/stop logic should ensure that we always
4099         * get a free buffer.
4100         */
4101        if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4102                return -EBUSY;
4103
4104        flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4105
4106        if (flush ||
4107            (buffer->next_element_to_fill + elements > queue->max_elements)) {
4108                if (buffer->next_element_to_fill > 0) {
4109                        atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4110                        queue->bulk_count++;
4111                }
4112
4113                if (queue->bulk_count >= queue->bulk_max)
4114                        flush = true;
4115
4116                if (flush)
4117                        qeth_flush_queue(queue);
4118
4119                buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4120                                                queue->bulk_count)];
4121
4122                /* Sanity-check again: */
4123                if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4124                        return -EBUSY;
4125        }
4126
4127        if (buffer->next_element_to_fill == 0 &&
4128            atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4129                /* If a TX completion happens right _here_ and misses to wake
4130                 * the txq, then our re-check below will catch the race.
4131                 */
4132                QETH_TXQ_STAT_INC(queue, stopped);
4133                netif_tx_stop_queue(txq);
4134                stopped = true;
4135        }
4136
4137        next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4138        buffer->bytes += bytes;
4139        buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4140        queue->prev_hdr = hdr;
4141
4142        flush = __netdev_tx_sent_queue(txq, bytes,
4143                                       !stopped && netdev_xmit_more());
4144
4145        if (flush || next_element >= queue->max_elements) {
4146                atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4147                queue->bulk_count++;
4148
4149                if (queue->bulk_count >= queue->bulk_max)
4150                        flush = true;
4151
4152                if (flush)
4153                        qeth_flush_queue(queue);
4154        }
4155
4156        if (stopped && !qeth_out_queue_is_full(queue))
4157                netif_tx_start_queue(txq);
4158        return 0;
4159}
4160
4161static int qeth_do_send_packet(struct qeth_card *card,
4162                               struct qeth_qdio_out_q *queue,
4163                               struct sk_buff *skb, struct qeth_hdr *hdr,
4164                               unsigned int offset, unsigned int hd_len,
4165                               unsigned int elements_needed)
4166{
4167        unsigned int start_index = queue->next_buf_to_fill;
4168        struct qeth_qdio_out_buffer *buffer;
4169        unsigned int next_element;
4170        struct netdev_queue *txq;
4171        bool stopped = false;
4172        int flush_count = 0;
4173        int do_pack = 0;
4174        int rc = 0;
4175
4176        buffer = queue->bufs[queue->next_buf_to_fill];
4177
4178        /* Just a sanity check, the wake/stop logic should ensure that we always
4179         * get a free buffer.
4180         */
4181        if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4182                return -EBUSY;
4183
4184        txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4185
4186        /* check if we need to switch packing state of this queue */
4187        qeth_switch_to_packing_if_needed(queue);
4188        if (queue->do_pack) {
4189                do_pack = 1;
4190                /* does packet fit in current buffer? */
4191                if (buffer->next_element_to_fill + elements_needed >
4192                    queue->max_elements) {
4193                        /* ... no -> set state PRIMED */
4194                        atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4195                        flush_count++;
4196                        queue->next_buf_to_fill =
4197                                QDIO_BUFNR(queue->next_buf_to_fill + 1);
4198                        buffer = queue->bufs[queue->next_buf_to_fill];
4199
4200                        /* We stepped forward, so sanity-check again: */
4201                        if (atomic_read(&buffer->state) !=
4202                            QETH_QDIO_BUF_EMPTY) {
4203                                qeth_flush_buffers(queue, start_index,
4204                                                           flush_count);
4205                                rc = -EBUSY;
4206                                goto out;
4207                        }
4208                }
4209        }
4210
4211        if (buffer->next_element_to_fill == 0 &&
4212            atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4213                /* If a TX completion happens right _here_ and misses to wake
4214                 * the txq, then our re-check below will catch the race.
4215                 */
4216                QETH_TXQ_STAT_INC(queue, stopped);
4217                netif_tx_stop_queue(txq);
4218                stopped = true;
4219        }
4220
4221        next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4222        buffer->bytes += qdisc_pkt_len(skb);
4223        buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4224
4225        if (queue->do_pack)
4226                QETH_TXQ_STAT_INC(queue, skbs_pack);
4227        if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4228                flush_count++;
4229                atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4230                queue->next_buf_to_fill =
4231                                QDIO_BUFNR(queue->next_buf_to_fill + 1);
4232        }
4233
4234        if (flush_count)
4235                qeth_flush_buffers(queue, start_index, flush_count);
4236
4237out:
4238        if (do_pack)
4239                QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4240
4241        if (stopped && !qeth_out_queue_is_full(queue))
4242                netif_tx_start_queue(txq);
4243        return rc;
4244}
4245
4246static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4247                              unsigned int payload_len, struct sk_buff *skb,
4248                              unsigned int proto_len)
4249{
4250        struct qeth_hdr_ext_tso *ext = &hdr->ext;
4251
4252        ext->hdr_tot_len = sizeof(*ext);
4253        ext->imb_hdr_no = 1;
4254        ext->hdr_type = 1;
4255        ext->hdr_version = 1;
4256        ext->hdr_len = 28;
4257        ext->payload_len = payload_len;
4258        ext->mss = skb_shinfo(skb)->gso_size;
4259        ext->dg_hdr_len = proto_len;
4260}
4261
4262int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4263              struct qeth_qdio_out_q *queue, __be16 proto,
4264              void (*fill_header)(struct qeth_qdio_out_q *queue,
4265                                  struct qeth_hdr *hdr, struct sk_buff *skb,
4266                                  __be16 proto, unsigned int data_len))
4267{
4268        unsigned int proto_len, hw_hdr_len;
4269        unsigned int frame_len = skb->len;
4270        bool is_tso = skb_is_gso(skb);
4271        unsigned int data_offset = 0;
4272        struct qeth_hdr *hdr = NULL;
4273        unsigned int hd_len = 0;
4274        unsigned int elements;
4275        int push_len, rc;
4276
4277        if (is_tso) {
4278                hw_hdr_len = sizeof(struct qeth_hdr_tso);
4279                proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4280        } else {
4281                hw_hdr_len = sizeof(struct qeth_hdr);
4282                proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4283        }
4284
4285        rc = skb_cow_head(skb, hw_hdr_len);
4286        if (rc)
4287                return rc;
4288
4289        push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4290                                      &elements);
4291        if (push_len < 0)
4292                return push_len;
4293        if (is_tso || !push_len) {
4294                /* HW header needs its own buffer element. */
4295                hd_len = hw_hdr_len + proto_len;
4296                data_offset = push_len + proto_len;
4297        }
4298        memset(hdr, 0, hw_hdr_len);
4299        fill_header(queue, hdr, skb, proto, frame_len);
4300        if (is_tso)
4301                qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4302                                  frame_len - proto_len, skb, proto_len);
4303
4304        if (IS_IQD(card)) {
4305                rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4306                                 hd_len);
4307        } else {
4308                /* TODO: drop skb_orphan() once TX completion is fast enough */
4309                skb_orphan(skb);
4310                spin_lock(&queue->lock);
4311                rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4312                                         hd_len, elements);
4313                spin_unlock(&queue->lock);
4314        }
4315
4316        if (rc && !push_len)
4317                kmem_cache_free(qeth_core_header_cache, hdr);
4318
4319        return rc;
4320}
4321EXPORT_SYMBOL_GPL(qeth_xmit);
4322
4323static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4324                struct qeth_reply *reply, unsigned long data)
4325{
4326        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4327        struct qeth_ipacmd_setadpparms *setparms;
4328
4329        QETH_CARD_TEXT(card, 4, "prmadpcb");
4330
4331        setparms = &(cmd->data.setadapterparms);
4332        if (qeth_setadpparms_inspect_rc(cmd)) {
4333                QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4334                setparms->data.mode = SET_PROMISC_MODE_OFF;
4335        }
4336        card->info.promisc_mode = setparms->data.mode;
4337        return (cmd->hdr.return_code) ? -EIO : 0;
4338}
4339
4340void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4341{
4342        enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4343                                                    SET_PROMISC_MODE_OFF;
4344        struct qeth_cmd_buffer *iob;
4345        struct qeth_ipa_cmd *cmd;
4346
4347        QETH_CARD_TEXT(card, 4, "setprom");
4348        QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4349
4350        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4351                                   SETADP_DATA_SIZEOF(mode));
4352        if (!iob)
4353                return;
4354        cmd = __ipa_cmd(iob);
4355        cmd->data.setadapterparms.data.mode = mode;
4356        qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4357}
4358EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4359
4360static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4361                struct qeth_reply *reply, unsigned long data)
4362{
4363        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4364        struct qeth_ipacmd_setadpparms *adp_cmd;
4365
4366        QETH_CARD_TEXT(card, 4, "chgmaccb");
4367        if (qeth_setadpparms_inspect_rc(cmd))
4368                return -EIO;
4369
4370        adp_cmd = &cmd->data.setadapterparms;
4371        if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4372                return -EADDRNOTAVAIL;
4373
4374        if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4375            !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4376                return -EADDRNOTAVAIL;
4377
4378        ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4379        return 0;
4380}
4381
4382int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4383{
4384        int rc;
4385        struct qeth_cmd_buffer *iob;
4386        struct qeth_ipa_cmd *cmd;
4387
4388        QETH_CARD_TEXT(card, 4, "chgmac");
4389
4390        iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4391                                   SETADP_DATA_SIZEOF(change_addr));
4392        if (!iob)
4393                return -ENOMEM;
4394        cmd = __ipa_cmd(iob);
4395        cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4396        cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4397        ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4398                        card->dev->dev_addr);
4399        rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4400                               NULL);
4401        return rc;
4402}
4403EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4404
4405static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4406                struct qeth_reply *reply, unsigned long data)
4407{
4408        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4409        struct qeth_set_access_ctrl *access_ctrl_req;
4410
4411        QETH_CARD_TEXT(card, 4, "setaccb");
4412
4413        access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4414        QETH_CARD_TEXT_(card, 2, "rc=%d",
4415                        cmd->data.setadapterparms.hdr.return_code);
4416        if (cmd->data.setadapterparms.hdr.return_code !=
4417                                                SET_ACCESS_CTRL_RC_SUCCESS)
4418                QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4419                                 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4420                                 cmd->data.setadapterparms.hdr.return_code);
4421        switch (qeth_setadpparms_inspect_rc(cmd)) {
4422        case SET_ACCESS_CTRL_RC_SUCCESS:
4423                if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4424                        dev_info(&card->gdev->dev,
4425                            "QDIO data connection isolation is deactivated\n");
4426                else
4427                        dev_info(&card->gdev->dev,
4428                            "QDIO data connection isolation is activated\n");
4429                return 0;
4430        case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4431                QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4432                                 CARD_DEVID(card));
4433                return 0;
4434        case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4435                QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4436                                 CARD_DEVID(card));
4437                return 0;
4438        case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4439                dev_err(&card->gdev->dev, "Adapter does not "
4440                        "support QDIO data connection isolation\n");
4441                return -EOPNOTSUPP;
4442        case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4443                dev_err(&card->gdev->dev,
4444                        "Adapter is dedicated. "
4445                        "QDIO data connection isolation not supported\n");
4446                return -EOPNOTSUPP;
4447        case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4448                dev_err(&card->gdev->dev,
4449                        "TSO does not permit QDIO data connection isolation\n");
4450                return -EPERM;
4451        case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4452                dev_err(&card->gdev->dev, "The adjacent switch port does not "
4453                        "support reflective relay mode\n");
4454                return -EOPNOTSUPP;
4455        case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4456                dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4457                                        "enabled at the adjacent switch port");
4458                return -EREMOTEIO;
4459        case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4460                dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4461                                        "at the adjacent switch failed\n");
4462                /* benign error while disabling ISOLATION_MODE_FWD */
4463                return 0;
4464        default:
4465                return -EIO;
4466        }
4467}
4468
4469int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4470                                     enum qeth_ipa_isolation_modes mode)
4471{
4472        int rc;
4473        struct qeth_cmd_buffer *iob;
4474        struct qeth_ipa_cmd *cmd;
4475        struct qeth_set_access_ctrl *access_ctrl_req;
4476
4477        QETH_CARD_TEXT(card, 4, "setacctl");
4478
4479        if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4480                dev_err(&card->gdev->dev,
4481                        "Adapter does not support QDIO data connection isolation\n");
4482                return -EOPNOTSUPP;
4483        }
4484
4485        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4486                                   SETADP_DATA_SIZEOF(set_access_ctrl));
4487        if (!iob)
4488                return -ENOMEM;
4489        cmd = __ipa_cmd(iob);
4490        access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4491        access_ctrl_req->subcmd_code = mode;
4492
4493        rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4494                               NULL);
4495        if (rc) {
4496                QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4497                QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4498                                 rc, CARD_DEVID(card));
4499        }
4500
4501        return rc;
4502}
4503
4504void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4505{
4506        struct qeth_card *card;
4507
4508        card = dev->ml_priv;
4509        QETH_CARD_TEXT(card, 4, "txtimeo");
4510        qeth_schedule_recovery(card);
4511}
4512EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4513
4514static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4515{
4516        struct qeth_card *card = dev->ml_priv;
4517        int rc = 0;
4518
4519        switch (regnum) {
4520        case MII_BMCR: /* Basic mode control register */
4521                rc = BMCR_FULLDPLX;
4522                if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4523                    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4524                    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4525                        rc |= BMCR_SPEED100;
4526                break;
4527        case MII_BMSR: /* Basic mode status register */
4528                rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4529                     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4530                     BMSR_100BASE4;
4531                break;
4532        case MII_PHYSID1: /* PHYS ID 1 */
4533                rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4534                     dev->dev_addr[2];
4535                rc = (rc >> 5) & 0xFFFF;
4536                break;
4537        case MII_PHYSID2: /* PHYS ID 2 */
4538                rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4539                break;
4540        case MII_ADVERTISE: /* Advertisement control reg */
4541                rc = ADVERTISE_ALL;
4542                break;
4543        case MII_LPA: /* Link partner ability reg */
4544                rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4545                     LPA_100BASE4 | LPA_LPACK;
4546                break;
4547        case MII_EXPANSION: /* Expansion register */
4548                break;
4549        case MII_DCOUNTER: /* disconnect counter */
4550                break;
4551        case MII_FCSCOUNTER: /* false carrier counter */
4552                break;
4553        case MII_NWAYTEST: /* N-way auto-neg test register */
4554                break;
4555        case MII_RERRCOUNTER: /* rx error counter */
4556                rc = card->stats.rx_length_errors +
4557                     card->stats.rx_frame_errors +
4558                     card->stats.rx_fifo_errors;
4559                break;
4560        case MII_SREVISION: /* silicon revision */
4561                break;
4562        case MII_RESV1: /* reserved 1 */
4563                break;
4564        case MII_LBRERROR: /* loopback, rx, bypass error */
4565                break;
4566        case MII_PHYADDR: /* physical address */
4567                break;
4568        case MII_RESV2: /* reserved 2 */
4569                break;
4570        case MII_TPISTATUS: /* TPI status for 10mbps */
4571                break;
4572        case MII_NCONFIG: /* network interface config */
4573                break;
4574        default:
4575                break;
4576        }
4577        return rc;
4578}
4579
4580static int qeth_snmp_command_cb(struct qeth_card *card,
4581                                struct qeth_reply *reply, unsigned long data)
4582{
4583        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4584        struct qeth_arp_query_info *qinfo = reply->param;
4585        struct qeth_ipacmd_setadpparms *adp_cmd;
4586        unsigned int data_len;
4587        void *snmp_data;
4588
4589        QETH_CARD_TEXT(card, 3, "snpcmdcb");
4590
4591        if (cmd->hdr.return_code) {
4592                QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4593                return -EIO;
4594        }
4595        if (cmd->data.setadapterparms.hdr.return_code) {
4596                cmd->hdr.return_code =
4597                        cmd->data.setadapterparms.hdr.return_code;
4598                QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4599                return -EIO;
4600        }
4601
4602        adp_cmd = &cmd->data.setadapterparms;
4603        data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4604        if (adp_cmd->hdr.seq_no == 1) {
4605                snmp_data = &adp_cmd->data.snmp;
4606        } else {
4607                snmp_data = &adp_cmd->data.snmp.request;
4608                data_len -= offsetof(struct qeth_snmp_cmd, request);
4609        }
4610
4611        /* check if there is enough room in userspace */
4612        if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4613                QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4614                return -ENOSPC;
4615        }
4616        QETH_CARD_TEXT_(card, 4, "snore%i",
4617                        cmd->data.setadapterparms.hdr.used_total);
4618        QETH_CARD_TEXT_(card, 4, "sseqn%i",
4619                        cmd->data.setadapterparms.hdr.seq_no);
4620        /*copy entries to user buffer*/
4621        memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4622        qinfo->udata_offset += data_len;
4623
4624        if (cmd->data.setadapterparms.hdr.seq_no <
4625            cmd->data.setadapterparms.hdr.used_total)
4626                return 1;
4627        return 0;
4628}
4629
4630static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4631{
4632        struct qeth_snmp_ureq __user *ureq;
4633        struct qeth_cmd_buffer *iob;
4634        unsigned int req_len;
4635        struct qeth_arp_query_info qinfo = {0, };
4636        int rc = 0;
4637
4638        QETH_CARD_TEXT(card, 3, "snmpcmd");
4639
4640        if (IS_VM_NIC(card))
4641                return -EOPNOTSUPP;
4642
4643        if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4644            IS_LAYER3(card))
4645                return -EOPNOTSUPP;
4646
4647        ureq = (struct qeth_snmp_ureq __user *) udata;
4648        if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4649            get_user(req_len, &ureq->hdr.req_len))
4650                return -EFAULT;
4651
4652        /* Sanitize user input, to avoid overflows in iob size calculation: */
4653        if (req_len > QETH_BUFSIZE)
4654                return -EINVAL;
4655
4656        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4657        if (!iob)
4658                return -ENOMEM;
4659
4660        if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4661                           &ureq->cmd, req_len)) {
4662                qeth_put_cmd(iob);
4663                return -EFAULT;
4664        }
4665
4666        qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4667        if (!qinfo.udata) {
4668                qeth_put_cmd(iob);
4669                return -ENOMEM;
4670        }
4671        qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4672
4673        rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4674        if (rc)
4675                QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4676                                 CARD_DEVID(card), rc);
4677        else {
4678                if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4679                        rc = -EFAULT;
4680        }
4681
4682        kfree(qinfo.udata);
4683        return rc;
4684}
4685
4686static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4687                                         struct qeth_reply *reply,
4688                                         unsigned long data)
4689{
4690        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4691        struct qeth_qoat_priv *priv = reply->param;
4692        int resdatalen;
4693
4694        QETH_CARD_TEXT(card, 3, "qoatcb");
4695        if (qeth_setadpparms_inspect_rc(cmd))
4696                return -EIO;
4697
4698        resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4699
4700        if (resdatalen > (priv->buffer_len - priv->response_len))
4701                return -ENOSPC;
4702
4703        memcpy(priv->buffer + priv->response_len,
4704               &cmd->data.setadapterparms.hdr, resdatalen);
4705        priv->response_len += resdatalen;
4706
4707        if (cmd->data.setadapterparms.hdr.seq_no <
4708            cmd->data.setadapterparms.hdr.used_total)
4709                return 1;
4710        return 0;
4711}
4712
4713static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4714{
4715        int rc = 0;
4716        struct qeth_cmd_buffer *iob;
4717        struct qeth_ipa_cmd *cmd;
4718        struct qeth_query_oat *oat_req;
4719        struct qeth_query_oat_data oat_data;
4720        struct qeth_qoat_priv priv;
4721        void __user *tmp;
4722
4723        QETH_CARD_TEXT(card, 3, "qoatcmd");
4724
4725        if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4726                return -EOPNOTSUPP;
4727
4728        if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4729                return -EFAULT;
4730
4731        priv.buffer_len = oat_data.buffer_len;
4732        priv.response_len = 0;
4733        priv.buffer = vzalloc(oat_data.buffer_len);
4734        if (!priv.buffer)
4735                return -ENOMEM;
4736
4737        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4738                                   SETADP_DATA_SIZEOF(query_oat));
4739        if (!iob) {
4740                rc = -ENOMEM;
4741                goto out_free;
4742        }
4743        cmd = __ipa_cmd(iob);
4744        oat_req = &cmd->data.setadapterparms.data.query_oat;
4745        oat_req->subcmd_code = oat_data.command;
4746
4747        rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4748        if (!rc) {
4749                tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4750                                         u64_to_user_ptr(oat_data.ptr);
4751                oat_data.response_len = priv.response_len;
4752
4753                if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4754                    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4755                        rc = -EFAULT;
4756        }
4757
4758out_free:
4759        vfree(priv.buffer);
4760        return rc;
4761}
4762
4763static int qeth_query_card_info_cb(struct qeth_card *card,
4764                                   struct qeth_reply *reply, unsigned long data)
4765{
4766        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4767        struct qeth_link_info *link_info = reply->param;
4768        struct qeth_query_card_info *card_info;
4769
4770        QETH_CARD_TEXT(card, 2, "qcrdincb");
4771        if (qeth_setadpparms_inspect_rc(cmd))
4772                return -EIO;
4773
4774        card_info = &cmd->data.setadapterparms.data.card_info;
4775        netdev_dbg(card->dev,
4776                   "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
4777                   card_info->card_type, card_info->port_mode,
4778                   card_info->port_speed);
4779
4780        switch (card_info->port_mode) {
4781        case CARD_INFO_PORTM_FULLDUPLEX:
4782                link_info->duplex = DUPLEX_FULL;
4783                break;
4784        case CARD_INFO_PORTM_HALFDUPLEX:
4785                link_info->duplex = DUPLEX_HALF;
4786                break;
4787        default:
4788                link_info->duplex = DUPLEX_UNKNOWN;
4789        }
4790
4791        switch (card_info->card_type) {
4792        case CARD_INFO_TYPE_1G_COPPER_A:
4793        case CARD_INFO_TYPE_1G_COPPER_B:
4794                link_info->speed = SPEED_1000;
4795                link_info->port = PORT_TP;
4796                break;
4797        case CARD_INFO_TYPE_1G_FIBRE_A:
4798        case CARD_INFO_TYPE_1G_FIBRE_B:
4799                link_info->speed = SPEED_1000;
4800                link_info->port = PORT_FIBRE;
4801                break;
4802        case CARD_INFO_TYPE_10G_FIBRE_A:
4803        case CARD_INFO_TYPE_10G_FIBRE_B:
4804                link_info->speed = SPEED_10000;
4805                link_info->port = PORT_FIBRE;
4806                break;
4807        default:
4808                switch (card_info->port_speed) {
4809                case CARD_INFO_PORTS_10M:
4810                        link_info->speed = SPEED_10;
4811                        break;
4812                case CARD_INFO_PORTS_100M:
4813                        link_info->speed = SPEED_100;
4814                        break;
4815                case CARD_INFO_PORTS_1G:
4816                        link_info->speed = SPEED_1000;
4817                        break;
4818                case CARD_INFO_PORTS_10G:
4819                        link_info->speed = SPEED_10000;
4820                        break;
4821                case CARD_INFO_PORTS_25G:
4822                        link_info->speed = SPEED_25000;
4823                        break;
4824                default:
4825                        link_info->speed = SPEED_UNKNOWN;
4826                }
4827
4828                link_info->port = PORT_OTHER;
4829        }
4830
4831        return 0;
4832}
4833
4834int qeth_query_card_info(struct qeth_card *card,
4835                         struct qeth_link_info *link_info)
4836{
4837        struct qeth_cmd_buffer *iob;
4838
4839        QETH_CARD_TEXT(card, 2, "qcrdinfo");
4840        if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4841                return -EOPNOTSUPP;
4842        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4843        if (!iob)
4844                return -ENOMEM;
4845
4846        return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
4847}
4848
4849static int qeth_init_link_info_oat_cb(struct qeth_card *card,
4850                                      struct qeth_reply *reply_priv,
4851                                      unsigned long data)
4852{
4853        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4854        struct qeth_link_info *link_info = reply_priv->param;
4855        struct qeth_query_oat_physical_if *phys_if;
4856        struct qeth_query_oat_reply *reply;
4857
4858        if (qeth_setadpparms_inspect_rc(cmd))
4859                return -EIO;
4860
4861        /* Multi-part reply is unexpected, don't bother: */
4862        if (cmd->data.setadapterparms.hdr.used_total > 1)
4863                return -EINVAL;
4864
4865        /* Expect the reply to start with phys_if data: */
4866        reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
4867        if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
4868            reply->length < sizeof(*reply))
4869                return -EINVAL;
4870
4871        phys_if = &reply->phys_if;
4872
4873        switch (phys_if->speed_duplex) {
4874        case QETH_QOAT_PHYS_SPEED_10M_HALF:
4875                link_info->speed = SPEED_10;
4876                link_info->duplex = DUPLEX_HALF;
4877                break;
4878        case QETH_QOAT_PHYS_SPEED_10M_FULL:
4879                link_info->speed = SPEED_10;
4880                link_info->duplex = DUPLEX_FULL;
4881                break;
4882        case QETH_QOAT_PHYS_SPEED_100M_HALF:
4883                link_info->speed = SPEED_100;
4884                link_info->duplex = DUPLEX_HALF;
4885                break;
4886        case QETH_QOAT_PHYS_SPEED_100M_FULL:
4887                link_info->speed = SPEED_100;
4888                link_info->duplex = DUPLEX_FULL;
4889                break;
4890        case QETH_QOAT_PHYS_SPEED_1000M_HALF:
4891                link_info->speed = SPEED_1000;
4892                link_info->duplex = DUPLEX_HALF;
4893                break;
4894        case QETH_QOAT_PHYS_SPEED_1000M_FULL:
4895                link_info->speed = SPEED_1000;
4896                link_info->duplex = DUPLEX_FULL;
4897                break;
4898        case QETH_QOAT_PHYS_SPEED_10G_FULL:
4899                link_info->speed = SPEED_10000;
4900                link_info->duplex = DUPLEX_FULL;
4901                break;
4902        case QETH_QOAT_PHYS_SPEED_25G_FULL:
4903                link_info->speed = SPEED_25000;
4904                link_info->duplex = DUPLEX_FULL;
4905                break;
4906        case QETH_QOAT_PHYS_SPEED_UNKNOWN:
4907        default:
4908                link_info->speed = SPEED_UNKNOWN;
4909                link_info->duplex = DUPLEX_UNKNOWN;
4910                break;
4911        }
4912
4913        switch (phys_if->media_type) {
4914        case QETH_QOAT_PHYS_MEDIA_COPPER:
4915                link_info->port = PORT_TP;
4916                link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4917                break;
4918        case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4919                link_info->port = PORT_FIBRE;
4920                link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4921                break;
4922        case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
4923                link_info->port = PORT_FIBRE;
4924                link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4925                break;
4926        default:
4927                link_info->port = PORT_OTHER;
4928                link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4929                break;
4930        }
4931
4932        return 0;
4933}
4934
4935static void qeth_init_link_info(struct qeth_card *card)
4936{
4937        card->info.link_info.duplex = DUPLEX_FULL;
4938
4939        if (IS_IQD(card) || IS_VM_NIC(card)) {
4940                card->info.link_info.speed = SPEED_10000;
4941                card->info.link_info.port = PORT_FIBRE;
4942                card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4943        } else {
4944                switch (card->info.link_type) {
4945                case QETH_LINK_TYPE_FAST_ETH:
4946                case QETH_LINK_TYPE_LANE_ETH100:
4947                        card->info.link_info.speed = SPEED_100;
4948                        card->info.link_info.port = PORT_TP;
4949                        break;
4950                case QETH_LINK_TYPE_GBIT_ETH:
4951                case QETH_LINK_TYPE_LANE_ETH1000:
4952                        card->info.link_info.speed = SPEED_1000;
4953                        card->info.link_info.port = PORT_FIBRE;
4954                        break;
4955                case QETH_LINK_TYPE_10GBIT_ETH:
4956                        card->info.link_info.speed = SPEED_10000;
4957                        card->info.link_info.port = PORT_FIBRE;
4958                        break;
4959                case QETH_LINK_TYPE_25GBIT_ETH:
4960                        card->info.link_info.speed = SPEED_25000;
4961                        card->info.link_info.port = PORT_FIBRE;
4962                        break;
4963                default:
4964                        dev_info(&card->gdev->dev, "Unknown link type %x\n",
4965                                 card->info.link_type);
4966                        card->info.link_info.speed = SPEED_UNKNOWN;
4967                        card->info.link_info.port = PORT_OTHER;
4968                }
4969
4970                card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
4971        }
4972
4973        /* Get more accurate data via QUERY OAT: */
4974        if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4975                struct qeth_link_info link_info;
4976                struct qeth_cmd_buffer *iob;
4977
4978                iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4979                                           SETADP_DATA_SIZEOF(query_oat));
4980                if (iob) {
4981                        struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
4982                        struct qeth_query_oat *oat_req;
4983
4984                        oat_req = &cmd->data.setadapterparms.data.query_oat;
4985                        oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
4986
4987                        if (!qeth_send_ipa_cmd(card, iob,
4988                                               qeth_init_link_info_oat_cb,
4989                                               &link_info)) {
4990                                if (link_info.speed != SPEED_UNKNOWN)
4991                                        card->info.link_info.speed = link_info.speed;
4992                                if (link_info.duplex != DUPLEX_UNKNOWN)
4993                                        card->info.link_info.duplex = link_info.duplex;
4994                                if (link_info.port != PORT_OTHER)
4995                                        card->info.link_info.port = link_info.port;
4996                                if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
4997                                        card->info.link_info.link_mode = link_info.link_mode;
4998                        }
4999                }
5000        }
5001}
5002
5003/**
5004 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
5005 * @card: pointer to a qeth_card
5006 *
5007 * Returns
5008 *      0, if a MAC address has been set for the card's netdevice
5009 *      a return code, for various error conditions
5010 */
5011int qeth_vm_request_mac(struct qeth_card *card)
5012{
5013        struct diag26c_mac_resp *response;
5014        struct diag26c_mac_req *request;
5015        int rc;
5016
5017        QETH_CARD_TEXT(card, 2, "vmreqmac");
5018
5019        request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
5020        response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
5021        if (!request || !response) {
5022                rc = -ENOMEM;
5023                goto out;
5024        }
5025
5026        request->resp_buf_len = sizeof(*response);
5027        request->resp_version = DIAG26C_VERSION2;
5028        request->op_code = DIAG26C_GET_MAC;
5029        request->devno = card->info.ddev_devno;
5030
5031        QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5032        rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
5033        QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
5034        if (rc)
5035                goto out;
5036        QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
5037
5038        if (request->resp_buf_len < sizeof(*response) ||
5039            response->version != request->resp_version) {
5040                rc = -EIO;
5041                QETH_CARD_TEXT(card, 2, "badresp");
5042                QETH_CARD_HEX(card, 2, &request->resp_buf_len,
5043                              sizeof(request->resp_buf_len));
5044        } else if (!is_valid_ether_addr(response->mac)) {
5045                rc = -EINVAL;
5046                QETH_CARD_TEXT(card, 2, "badmac");
5047                QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
5048        } else {
5049                ether_addr_copy(card->dev->dev_addr, response->mac);
5050        }
5051
5052out:
5053        kfree(response);
5054        kfree(request);
5055        return rc;
5056}
5057EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
5058
5059static void qeth_determine_capabilities(struct qeth_card *card)
5060{
5061        struct qeth_channel *channel = &card->data;
5062        struct ccw_device *ddev = channel->ccwdev;
5063        int rc;
5064        int ddev_offline = 0;
5065
5066        QETH_CARD_TEXT(card, 2, "detcapab");
5067        if (!ddev->online) {
5068                ddev_offline = 1;
5069                rc = qeth_start_channel(channel);
5070                if (rc) {
5071                        QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5072                        goto out;
5073                }
5074        }
5075
5076        rc = qeth_read_conf_data(card);
5077        if (rc) {
5078                QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
5079                                 CARD_DEVID(card), rc);
5080                QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5081                goto out_offline;
5082        }
5083
5084        rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
5085        if (rc)
5086                QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5087
5088        QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
5089        QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
5090        QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
5091        QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
5092        QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5093        if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
5094            ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
5095            ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
5096                dev_info(&card->gdev->dev,
5097                        "Completion Queueing supported\n");
5098        } else {
5099                card->options.cq = QETH_CQ_NOTAVAILABLE;
5100        }
5101
5102out_offline:
5103        if (ddev_offline == 1)
5104                qeth_stop_channel(channel);
5105out:
5106        return;
5107}
5108
5109static void qeth_read_ccw_conf_data(struct qeth_card *card)
5110{
5111        struct qeth_card_info *info = &card->info;
5112        struct ccw_device *cdev = CARD_DDEV(card);
5113        struct ccw_dev_id dev_id;
5114
5115        QETH_CARD_TEXT(card, 2, "ccwconfd");
5116        ccw_device_get_id(cdev, &dev_id);
5117
5118        info->ddev_devno = dev_id.devno;
5119        info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5120                          !ccw_device_get_iid(cdev, &info->iid) &&
5121                          !ccw_device_get_chid(cdev, 0, &info->chid);
5122        info->ssid = dev_id.ssid;
5123
5124        dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5125                 info->chid, info->chpid);
5126
5127        QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5128        QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5129        QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5130        QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5131        QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5132        QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5133        QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
5134}
5135
5136static int qeth_qdio_establish(struct qeth_card *card)
5137{
5138        struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5139        struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5140        struct qeth_qib_parms *qib_parms = NULL;
5141        struct qdio_initialize init_data;
5142        unsigned int i;
5143        int rc = 0;
5144
5145        QETH_CARD_TEXT(card, 2, "qdioest");
5146
5147        if (!IS_IQD(card) && !IS_VM_NIC(card)) {
5148                qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5149                if (!qib_parms)
5150                        return -ENOMEM;
5151
5152                qeth_fill_qib_parms(card, qib_parms);
5153        }
5154
5155        in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5156        if (card->options.cq == QETH_CQ_ENABLED)
5157                in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5158
5159        for (i = 0; i < card->qdio.no_out_queues; i++)
5160                out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5161
5162        memset(&init_data, 0, sizeof(struct qdio_initialize));
5163        init_data.q_format               = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5164                                                          QDIO_QETH_QFMT;
5165        init_data.qib_param_field_format = 0;
5166        init_data.qib_param_field        = (void *)qib_parms;
5167        init_data.no_input_qs            = card->qdio.no_in_queues;
5168        init_data.no_output_qs           = card->qdio.no_out_queues;
5169        init_data.input_handler          = qeth_qdio_input_handler;
5170        init_data.output_handler         = qeth_qdio_output_handler;
5171        init_data.irq_poll               = qeth_qdio_poll;
5172        init_data.int_parm               = (unsigned long) card;
5173        init_data.input_sbal_addr_array  = in_sbal_ptrs;
5174        init_data.output_sbal_addr_array = out_sbal_ptrs;
5175
5176        if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5177                QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5178                rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5179                                   init_data.no_output_qs);
5180                if (rc) {
5181                        atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5182                        goto out;
5183                }
5184                rc = qdio_establish(CARD_DDEV(card), &init_data);
5185                if (rc) {
5186                        atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5187                        qdio_free(CARD_DDEV(card));
5188                }
5189        }
5190
5191        switch (card->options.cq) {
5192        case QETH_CQ_ENABLED:
5193                dev_info(&card->gdev->dev, "Completion Queue support enabled");
5194                break;
5195        case QETH_CQ_DISABLED:
5196                dev_info(&card->gdev->dev, "Completion Queue support disabled");
5197                break;
5198        default:
5199                break;
5200        }
5201
5202out:
5203        kfree(qib_parms);
5204        return rc;
5205}
5206
5207static void qeth_core_free_card(struct qeth_card *card)
5208{
5209        QETH_CARD_TEXT(card, 2, "freecrd");
5210
5211        unregister_service_level(&card->qeth_service_level);
5212        debugfs_remove_recursive(card->debugfs);
5213        qeth_put_cmd(card->read_cmd);
5214        destroy_workqueue(card->event_wq);
5215        dev_set_drvdata(&card->gdev->dev, NULL);
5216        kfree(card);
5217}
5218
5219static void qeth_trace_features(struct qeth_card *card)
5220{
5221        QETH_CARD_TEXT(card, 2, "features");
5222        QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5223        QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5224        QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5225        QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5226                      sizeof(card->info.diagass_support));
5227}
5228
5229static struct ccw_device_id qeth_ids[] = {
5230        {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5231                                        .driver_info = QETH_CARD_TYPE_OSD},
5232        {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5233                                        .driver_info = QETH_CARD_TYPE_IQD},
5234        {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5235                                        .driver_info = QETH_CARD_TYPE_OSM},
5236#ifdef CONFIG_QETH_OSX
5237        {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5238                                        .driver_info = QETH_CARD_TYPE_OSX},
5239#endif
5240        {},
5241};
5242MODULE_DEVICE_TABLE(ccw, qeth_ids);
5243
5244static struct ccw_driver qeth_ccw_driver = {
5245        .driver = {
5246                .owner = THIS_MODULE,
5247                .name = "qeth",
5248        },
5249        .ids = qeth_ids,
5250        .probe = ccwgroup_probe_ccwdev,
5251        .remove = ccwgroup_remove_ccwdev,
5252};
5253
5254static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5255{
5256        int retries = 3;
5257        int rc;
5258
5259        QETH_CARD_TEXT(card, 2, "hrdsetup");
5260        atomic_set(&card->force_alloc_skb, 0);
5261        rc = qeth_update_from_chp_desc(card);
5262        if (rc)
5263                return rc;
5264retry:
5265        if (retries < 3)
5266                QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5267                                 CARD_DEVID(card));
5268        rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5269        qeth_stop_channel(&card->data);
5270        qeth_stop_channel(&card->write);
5271        qeth_stop_channel(&card->read);
5272        qdio_free(CARD_DDEV(card));
5273
5274        rc = qeth_start_channel(&card->read);
5275        if (rc)
5276                goto retriable;
5277        rc = qeth_start_channel(&card->write);
5278        if (rc)
5279                goto retriable;
5280        rc = qeth_start_channel(&card->data);
5281        if (rc)
5282                goto retriable;
5283retriable:
5284        if (rc == -ERESTARTSYS) {
5285                QETH_CARD_TEXT(card, 2, "break1");
5286                return rc;
5287        } else if (rc) {
5288                QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5289                if (--retries < 0)
5290                        goto out;
5291                else
5292                        goto retry;
5293        }
5294
5295        qeth_determine_capabilities(card);
5296        qeth_read_ccw_conf_data(card);
5297        qeth_idx_init(card);
5298
5299        rc = qeth_idx_activate_read_channel(card);
5300        if (rc == -EINTR) {
5301                QETH_CARD_TEXT(card, 2, "break2");
5302                return rc;
5303        } else if (rc) {
5304                QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5305                if (--retries < 0)
5306                        goto out;
5307                else
5308                        goto retry;
5309        }
5310
5311        rc = qeth_idx_activate_write_channel(card);
5312        if (rc == -EINTR) {
5313                QETH_CARD_TEXT(card, 2, "break3");
5314                return rc;
5315        } else if (rc) {
5316                QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5317                if (--retries < 0)
5318                        goto out;
5319                else
5320                        goto retry;
5321        }
5322        card->read_or_write_problem = 0;
5323        rc = qeth_mpc_initialize(card);
5324        if (rc) {
5325                QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5326                goto out;
5327        }
5328
5329        rc = qeth_send_startlan(card);
5330        if (rc) {
5331                QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5332                if (rc == -ENETDOWN) {
5333                        dev_warn(&card->gdev->dev, "The LAN is offline\n");
5334                        *carrier_ok = false;
5335                } else {
5336                        goto out;
5337                }
5338        } else {
5339                *carrier_ok = true;
5340        }
5341
5342        card->options.ipa4.supported = 0;
5343        card->options.ipa6.supported = 0;
5344        card->options.adp.supported = 0;
5345        card->options.sbp.supported_funcs = 0;
5346        card->info.diagass_support = 0;
5347        rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5348        if (rc == -ENOMEM)
5349                goto out;
5350        if (qeth_is_supported(card, IPA_IPV6)) {
5351                rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5352                if (rc == -ENOMEM)
5353                        goto out;
5354        }
5355        if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5356                rc = qeth_query_setadapterparms(card);
5357                if (rc < 0) {
5358                        QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5359                        goto out;
5360                }
5361        }
5362        if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5363                rc = qeth_query_setdiagass(card);
5364                if (rc)
5365                        QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5366        }
5367
5368        qeth_trace_features(card);
5369
5370        if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5371            (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5372                card->info.hwtrap = 0;
5373
5374        if (card->options.isolation != ISOLATION_MODE_NONE) {
5375                rc = qeth_setadpparms_set_access_ctrl(card,
5376                                                      card->options.isolation);
5377                if (rc)
5378                        goto out;
5379        }
5380
5381        qeth_init_link_info(card);
5382
5383        rc = qeth_init_qdio_queues(card);
5384        if (rc) {
5385                QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5386                goto out;
5387        }
5388
5389        return 0;
5390out:
5391        dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5392                "an error on the device\n");
5393        QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5394                         CARD_DEVID(card), rc);
5395        return rc;
5396}
5397
5398static int qeth_set_online(struct qeth_card *card,
5399                           const struct qeth_discipline *disc)
5400{
5401        bool carrier_ok;
5402        int rc;
5403
5404        mutex_lock(&card->conf_mutex);
5405        QETH_CARD_TEXT(card, 2, "setonlin");
5406
5407        rc = qeth_hardsetup_card(card, &carrier_ok);
5408        if (rc) {
5409                QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
5410                rc = -ENODEV;
5411                goto err_hardsetup;
5412        }
5413
5414        qeth_print_status_message(card);
5415
5416        if (card->dev->reg_state != NETREG_REGISTERED)
5417                /* no need for locking / error handling at this early stage: */
5418                qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
5419
5420        rc = disc->set_online(card, carrier_ok);
5421        if (rc)
5422                goto err_online;
5423
5424        /* let user_space know that device is online */
5425        kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5426
5427        mutex_unlock(&card->conf_mutex);
5428        return 0;
5429
5430err_online:
5431err_hardsetup:
5432        qeth_qdio_clear_card(card, 0);
5433        qeth_clear_working_pool_list(card);
5434        qeth_flush_local_addrs(card);
5435
5436        qeth_stop_channel(&card->data);
5437        qeth_stop_channel(&card->write);
5438        qeth_stop_channel(&card->read);
5439        qdio_free(CARD_DDEV(card));
5440
5441        mutex_unlock(&card->conf_mutex);
5442        return rc;
5443}
5444
5445int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
5446                     bool resetting)
5447{
5448        int rc, rc2, rc3;
5449
5450        mutex_lock(&card->conf_mutex);
5451        QETH_CARD_TEXT(card, 3, "setoffl");
5452
5453        if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5454                qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5455                card->info.hwtrap = 1;
5456        }
5457
5458        /* cancel any stalled cmd that might block the rtnl: */
5459        qeth_clear_ipacmd_list(card);
5460
5461        rtnl_lock();
5462        card->info.open_when_online = card->dev->flags & IFF_UP;
5463        dev_close(card->dev);
5464        netif_device_detach(card->dev);
5465        netif_carrier_off(card->dev);
5466        rtnl_unlock();
5467
5468        cancel_work_sync(&card->rx_mode_work);
5469
5470        disc->set_offline(card);
5471
5472        qeth_qdio_clear_card(card, 0);
5473        qeth_drain_output_queues(card);
5474        qeth_clear_working_pool_list(card);
5475        qeth_flush_local_addrs(card);
5476        card->info.promisc_mode = 0;
5477
5478        rc  = qeth_stop_channel(&card->data);
5479        rc2 = qeth_stop_channel(&card->write);
5480        rc3 = qeth_stop_channel(&card->read);
5481        if (!rc)
5482                rc = (rc2) ? rc2 : rc3;
5483        if (rc)
5484                QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5485        qdio_free(CARD_DDEV(card));
5486
5487        /* let user_space know that device is offline */
5488        kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5489
5490        mutex_unlock(&card->conf_mutex);
5491        return 0;
5492}
5493EXPORT_SYMBOL_GPL(qeth_set_offline);
5494
5495static int qeth_do_reset(void *data)
5496{
5497        const struct qeth_discipline *disc;
5498        struct qeth_card *card = data;
5499        int rc;
5500
5501        /* Lock-free, other users will block until we are done. */
5502        disc = card->discipline;
5503
5504        QETH_CARD_TEXT(card, 2, "recover1");
5505        if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5506                return 0;
5507        QETH_CARD_TEXT(card, 2, "recover2");
5508        dev_warn(&card->gdev->dev,
5509                 "A recovery process has been started for the device\n");
5510
5511        qeth_set_offline(card, disc, true);
5512        rc = qeth_set_online(card, disc);
5513        if (!rc) {
5514                dev_info(&card->gdev->dev,
5515                         "Device successfully recovered!\n");
5516        } else {
5517                qeth_set_offline(card, disc, true);
5518                ccwgroup_set_offline(card->gdev, false);
5519                dev_warn(&card->gdev->dev,
5520                         "The qeth device driver failed to recover an error on the device\n");
5521        }
5522        qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5523        qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5524        return 0;
5525}
5526
5527#if IS_ENABLED(CONFIG_QETH_L3)
5528static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5529                                struct qeth_hdr *hdr)
5530{
5531        struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5532        struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5533        struct net_device *dev = skb->dev;
5534
5535        if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5536                dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5537                                "FAKELL", skb->len);
5538                return;
5539        }
5540
5541        if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5542                u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5543                                                             ETH_P_IP;
5544                unsigned char tg_addr[ETH_ALEN];
5545
5546                skb_reset_network_header(skb);
5547                switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5548                case QETH_CAST_MULTICAST:
5549                        if (prot == ETH_P_IP)
5550                                ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5551                        else
5552                                ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5553                        QETH_CARD_STAT_INC(card, rx_multicast);
5554                        break;
5555                case QETH_CAST_BROADCAST:
5556                        ether_addr_copy(tg_addr, dev->broadcast);
5557                        QETH_CARD_STAT_INC(card, rx_multicast);
5558                        break;
5559                default:
5560                        if (card->options.sniffer)
5561                                skb->pkt_type = PACKET_OTHERHOST;
5562                        ether_addr_copy(tg_addr, dev->dev_addr);
5563                }
5564
5565                if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5566                        dev_hard_header(skb, dev, prot, tg_addr,
5567                                        &l3_hdr->next_hop.rx.src_mac, skb->len);
5568                else
5569                        dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5570                                        skb->len);
5571        }
5572
5573        /* copy VLAN tag from hdr into skb */
5574        if (!card->options.sniffer &&
5575            (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5576                                  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5577                u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5578                                l3_hdr->vlan_id :
5579                                l3_hdr->next_hop.rx.vlan_id;
5580
5581                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5582        }
5583}
5584#endif
5585
5586static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5587                             struct qeth_hdr *hdr, bool uses_frags)
5588{
5589        struct napi_struct *napi = &card->napi;
5590        bool is_cso;
5591
5592        switch (hdr->hdr.l2.id) {
5593#if IS_ENABLED(CONFIG_QETH_L3)
5594        case QETH_HEADER_TYPE_LAYER3:
5595                qeth_l3_rebuild_skb(card, skb, hdr);
5596                is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5597                break;
5598#endif
5599        case QETH_HEADER_TYPE_LAYER2:
5600                is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5601                break;
5602        default:
5603                /* never happens */
5604                if (uses_frags)
5605                        napi_free_frags(napi);
5606                else
5607                        dev_kfree_skb_any(skb);
5608                return;
5609        }
5610
5611        if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5612                skb->ip_summed = CHECKSUM_UNNECESSARY;
5613                QETH_CARD_STAT_INC(card, rx_skb_csum);
5614        } else {
5615                skb->ip_summed = CHECKSUM_NONE;
5616        }
5617
5618        QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5619        QETH_CARD_STAT_INC(card, rx_packets);
5620        if (skb_is_nonlinear(skb)) {
5621                QETH_CARD_STAT_INC(card, rx_sg_skbs);
5622                QETH_CARD_STAT_ADD(card, rx_sg_frags,
5623                                   skb_shinfo(skb)->nr_frags);
5624        }
5625
5626        if (uses_frags) {
5627                napi_gro_frags(napi);
5628        } else {
5629                skb->protocol = eth_type_trans(skb, skb->dev);
5630                napi_gro_receive(napi, skb);
5631        }
5632}
5633
5634static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5635{
5636        struct page *page = virt_to_page(data);
5637        unsigned int next_frag;
5638
5639        next_frag = skb_shinfo(skb)->nr_frags;
5640        get_page(page);
5641        skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5642                        data_len);
5643}
5644
5645static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5646{
5647        return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5648}
5649
5650static int qeth_extract_skb(struct qeth_card *card,
5651                            struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5652                            int *__offset)
5653{
5654        struct qeth_priv *priv = netdev_priv(card->dev);
5655        struct qdio_buffer *buffer = qethbuffer->buffer;
5656        struct napi_struct *napi = &card->napi;
5657        struct qdio_buffer_element *element;
5658        unsigned int linear_len = 0;
5659        bool uses_frags = false;
5660        int offset = *__offset;
5661        bool use_rx_sg = false;
5662        unsigned int headroom;
5663        struct qeth_hdr *hdr;
5664        struct sk_buff *skb;
5665        int skb_len = 0;
5666
5667        element = &buffer->element[*element_no];
5668
5669next_packet:
5670        /* qeth_hdr must not cross element boundaries */
5671        while (element->length < offset + sizeof(struct qeth_hdr)) {
5672                if (qeth_is_last_sbale(element))
5673                        return -ENODATA;
5674                element++;
5675                offset = 0;
5676        }
5677
5678        hdr = phys_to_virt(element->addr) + offset;
5679        offset += sizeof(*hdr);
5680        skb = NULL;
5681
5682        switch (hdr->hdr.l2.id) {
5683        case QETH_HEADER_TYPE_LAYER2:
5684                skb_len = hdr->hdr.l2.pkt_length;
5685                linear_len = ETH_HLEN;
5686                headroom = 0;
5687                break;
5688        case QETH_HEADER_TYPE_LAYER3:
5689                skb_len = hdr->hdr.l3.length;
5690                if (!IS_LAYER3(card)) {
5691                        QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5692                        goto walk_packet;
5693                }
5694
5695                if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5696                        linear_len = ETH_HLEN;
5697                        headroom = 0;
5698                        break;
5699                }
5700
5701                if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5702                        linear_len = sizeof(struct ipv6hdr);
5703                else
5704                        linear_len = sizeof(struct iphdr);
5705                headroom = ETH_HLEN;
5706                break;
5707        default:
5708                if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5709                        QETH_CARD_STAT_INC(card, rx_frame_errors);
5710                else
5711                        QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5712
5713                /* Can't determine packet length, drop the whole buffer. */
5714                return -EPROTONOSUPPORT;
5715        }
5716
5717        if (skb_len < linear_len) {
5718                QETH_CARD_STAT_INC(card, rx_dropped_runt);
5719                goto walk_packet;
5720        }
5721
5722        use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5723                    (skb_len > READ_ONCE(priv->rx_copybreak) &&
5724                     !atomic_read(&card->force_alloc_skb));
5725
5726        if (use_rx_sg) {
5727                /* QETH_CQ_ENABLED only: */
5728                if (qethbuffer->rx_skb &&
5729                    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5730                        skb = qethbuffer->rx_skb;
5731                        qethbuffer->rx_skb = NULL;
5732                        goto use_skb;
5733                }
5734
5735                skb = napi_get_frags(napi);
5736                if (!skb) {
5737                        /* -ENOMEM, no point in falling back further. */
5738                        QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5739                        goto walk_packet;
5740                }
5741
5742                if (skb_tailroom(skb) >= linear_len + headroom) {
5743                        uses_frags = true;
5744                        goto use_skb;
5745                }
5746
5747                netdev_info_once(card->dev,
5748                                 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5749                                 linear_len + headroom, skb_tailroom(skb));
5750                /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5751        }
5752
5753        linear_len = skb_len;
5754        skb = napi_alloc_skb(napi, linear_len + headroom);
5755        if (!skb) {
5756                QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5757                goto walk_packet;
5758        }
5759
5760use_skb:
5761        if (headroom)
5762                skb_reserve(skb, headroom);
5763walk_packet:
5764        while (skb_len) {
5765                int data_len = min(skb_len, (int)(element->length - offset));
5766                char *data = phys_to_virt(element->addr) + offset;
5767
5768                skb_len -= data_len;
5769                offset += data_len;
5770
5771                /* Extract data from current element: */
5772                if (skb && data_len) {
5773                        if (linear_len) {
5774                                unsigned int copy_len;
5775
5776                                copy_len = min_t(unsigned int, linear_len,
5777                                                 data_len);
5778
5779                                skb_put_data(skb, data, copy_len);
5780                                linear_len -= copy_len;
5781                                data_len -= copy_len;
5782                                data += copy_len;
5783                        }
5784
5785                        if (data_len)
5786                                qeth_create_skb_frag(skb, data, data_len);
5787                }
5788
5789                /* Step forward to next element: */
5790                if (skb_len) {
5791                        if (qeth_is_last_sbale(element)) {
5792                                QETH_CARD_TEXT(card, 4, "unexeob");
5793                                QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5794                                if (skb) {
5795                                        if (uses_frags)
5796                                                napi_free_frags(napi);
5797                                        else
5798                                                dev_kfree_skb_any(skb);
5799                                        QETH_CARD_STAT_INC(card,
5800                                                           rx_length_errors);
5801                                }
5802                                return -EMSGSIZE;
5803                        }
5804                        element++;
5805                        offset = 0;
5806                }
5807        }
5808
5809        /* This packet was skipped, go get another one: */
5810        if (!skb)
5811                goto next_packet;
5812
5813        *element_no = element - &buffer->element[0];
5814        *__offset = offset;
5815
5816        qeth_receive_skb(card, skb, hdr, uses_frags);
5817        return 0;
5818}
5819
5820static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5821                                      struct qeth_qdio_buffer *buf, bool *done)
5822{
5823        unsigned int work_done = 0;
5824
5825        while (budget) {
5826                if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5827                                     &card->rx.e_offset)) {
5828                        *done = true;
5829                        break;
5830                }
5831
5832                work_done++;
5833                budget--;
5834        }
5835
5836        return work_done;
5837}
5838
5839static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5840{
5841        struct qeth_rx *ctx = &card->rx;
5842        unsigned int work_done = 0;
5843
5844        while (budget > 0) {
5845                struct qeth_qdio_buffer *buffer;
5846                unsigned int skbs_done = 0;
5847                bool done = false;
5848
5849                /* Fetch completed RX buffers: */
5850                if (!card->rx.b_count) {
5851                        card->rx.qdio_err = 0;
5852                        card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
5853                                                              0, true,
5854                                                              &card->rx.b_index,
5855                                                              &card->rx.qdio_err);
5856                        if (card->rx.b_count <= 0) {
5857                                card->rx.b_count = 0;
5858                                break;
5859                        }
5860                }
5861
5862                /* Process one completed RX buffer: */
5863                buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5864                if (!(card->rx.qdio_err &&
5865                      qeth_check_qdio_errors(card, buffer->buffer,
5866                                             card->rx.qdio_err, "qinerr")))
5867                        skbs_done = qeth_extract_skbs(card, budget, buffer,
5868                                                      &done);
5869                else
5870                        done = true;
5871
5872                work_done += skbs_done;
5873                budget -= skbs_done;
5874
5875                if (done) {
5876                        QETH_CARD_STAT_INC(card, rx_bufs);
5877                        qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5878                        buffer->pool_entry = NULL;
5879                        card->rx.b_count--;
5880                        ctx->bufs_refill++;
5881                        ctx->bufs_refill -= qeth_rx_refill_queue(card,
5882                                                                 ctx->bufs_refill);
5883
5884                        /* Step forward to next buffer: */
5885                        card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5886                        card->rx.buf_element = 0;
5887                        card->rx.e_offset = 0;
5888                }
5889        }
5890
5891        return work_done;
5892}
5893
5894static void qeth_cq_poll(struct qeth_card *card)
5895{
5896        unsigned int work_done = 0;
5897
5898        while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5899                unsigned int start, error;
5900                int completed;
5901
5902                completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
5903                                               &error);
5904                if (completed <= 0)
5905                        return;
5906
5907                qeth_qdio_cq_handler(card, error, 1, start, completed);
5908                work_done += completed;
5909        }
5910}
5911
5912int qeth_poll(struct napi_struct *napi, int budget)
5913{
5914        struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5915        unsigned int work_done;
5916
5917        work_done = qeth_rx_poll(card, budget);
5918
5919        if (qeth_use_tx_irqs(card)) {
5920                struct qeth_qdio_out_q *queue;
5921                unsigned int i;
5922
5923                qeth_for_each_output_queue(card, queue, i) {
5924                        if (!qeth_out_queue_is_empty(queue))
5925                                napi_schedule(&queue->napi);
5926                }
5927        }
5928
5929        if (card->options.cq == QETH_CQ_ENABLED)
5930                qeth_cq_poll(card);
5931
5932        if (budget) {
5933                struct qeth_rx *ctx = &card->rx;
5934
5935                /* Process any substantial refill backlog: */
5936                ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5937
5938                /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5939                if (work_done >= budget)
5940                        return work_done;
5941        }
5942
5943        if (napi_complete_done(napi, work_done) &&
5944            qdio_start_irq(CARD_DDEV(card)))
5945                napi_schedule(napi);
5946
5947        return work_done;
5948}
5949EXPORT_SYMBOL_GPL(qeth_poll);
5950
5951static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5952                                 unsigned int bidx, unsigned int qdio_error,
5953                                 int budget)
5954{
5955        struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5956        u8 sflags = buffer->buffer->element[15].sflags;
5957        struct qeth_card *card = queue->card;
5958        bool error = !!qdio_error;
5959
5960        if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
5961                struct qaob *aob = buffer->aob;
5962                struct qeth_qaob_priv1 *priv;
5963                enum iucv_tx_notify notify;
5964
5965                if (!aob) {
5966                        netdev_WARN_ONCE(card->dev,
5967                                         "Pending TX buffer %#x without QAOB on TX queue %u\n",
5968                                         bidx, queue->queue_no);
5969                        qeth_schedule_recovery(card);
5970                        return;
5971                }
5972
5973                QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5974
5975                priv = (struct qeth_qaob_priv1 *)&aob->user1;
5976                /* QAOB hasn't completed yet: */
5977                if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
5978                        qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5979
5980                        /* Prepare the queue slot for immediate re-use: */
5981                        qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5982                        if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
5983                                QETH_CARD_TEXT(card, 2, "outofbuf");
5984                                qeth_schedule_recovery(card);
5985                        }
5986
5987                        list_add(&buffer->list_entry, &queue->pending_bufs);
5988                        /* Skip clearing the buffer: */
5989                        return;
5990                }
5991
5992                /* QAOB already completed: */
5993                notify = qeth_compute_cq_notification(aob->aorc, 0);
5994                qeth_notify_skbs(queue, buffer, notify);
5995                error = !!aob->aorc;
5996                memset(aob, 0, sizeof(*aob));
5997        } else if (card->options.cq == QETH_CQ_ENABLED) {
5998                qeth_notify_skbs(queue, buffer,
5999                                 qeth_compute_cq_notification(sflags, 0));
6000        }
6001
6002        qeth_clear_output_buffer(queue, buffer, error, budget);
6003}
6004
6005static int qeth_tx_poll(struct napi_struct *napi, int budget)
6006{
6007        struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
6008        unsigned int queue_no = queue->queue_no;
6009        struct qeth_card *card = queue->card;
6010        struct net_device *dev = card->dev;
6011        unsigned int work_done = 0;
6012        struct netdev_queue *txq;
6013
6014        if (IS_IQD(card))
6015                txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
6016        else
6017                txq = netdev_get_tx_queue(dev, queue_no);
6018
6019        while (1) {
6020                unsigned int start, error, i;
6021                unsigned int packets = 0;
6022                unsigned int bytes = 0;
6023                int completed;
6024
6025                qeth_tx_complete_pending_bufs(card, queue, false, budget);
6026
6027                if (qeth_out_queue_is_empty(queue)) {
6028                        napi_complete(napi);
6029                        return 0;
6030                }
6031
6032                /* Give the CPU a breather: */
6033                if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
6034                        QETH_TXQ_STAT_INC(queue, completion_yield);
6035                        if (napi_complete_done(napi, 0))
6036                                napi_schedule(napi);
6037                        return 0;
6038                }
6039
6040                completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
6041                                               &start, &error);
6042                if (completed <= 0) {
6043                        /* Ensure we see TX completion for pending work: */
6044                        if (napi_complete_done(napi, 0) &&
6045                            !atomic_read(&queue->set_pci_flags_count))
6046                                qeth_tx_arm_timer(queue, queue->rescan_usecs);
6047                        return 0;
6048                }
6049
6050                for (i = start; i < start + completed; i++) {
6051                        struct qeth_qdio_out_buffer *buffer;
6052                        unsigned int bidx = QDIO_BUFNR(i);
6053
6054                        buffer = queue->bufs[bidx];
6055                        packets += buffer->frames;
6056                        bytes += buffer->bytes;
6057
6058                        qeth_handle_send_error(card, buffer, error);
6059                        if (IS_IQD(card))
6060                                qeth_iqd_tx_complete(queue, bidx, error, budget);
6061                        else
6062                                qeth_clear_output_buffer(queue, buffer, error,
6063                                                         budget);
6064                }
6065
6066                atomic_sub(completed, &queue->used_buffers);
6067                work_done += completed;
6068                if (IS_IQD(card))
6069                        netdev_tx_completed_queue(txq, packets, bytes);
6070                else
6071                        qeth_check_outbound_queue(queue);
6072
6073                /* xmit may have observed the full-condition, but not yet
6074                 * stopped the txq. In which case the code below won't trigger.
6075                 * So before returning, xmit will re-check the txq's fill level
6076                 * and wake it up if needed.
6077                 */
6078                if (netif_tx_queue_stopped(txq) &&
6079                    !qeth_out_queue_is_full(queue))
6080                        netif_tx_wake_queue(txq);
6081        }
6082}
6083
6084static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
6085{
6086        if (!cmd->hdr.return_code)
6087                cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6088        return cmd->hdr.return_code;
6089}
6090
6091static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
6092                                        struct qeth_reply *reply,
6093                                        unsigned long data)
6094{
6095        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6096        struct qeth_ipa_caps *caps = reply->param;
6097
6098        if (qeth_setassparms_inspect_rc(cmd))
6099                return -EIO;
6100
6101        caps->supported = cmd->data.setassparms.data.caps.supported;
6102        caps->enabled = cmd->data.setassparms.data.caps.enabled;
6103        return 0;
6104}
6105
6106int qeth_setassparms_cb(struct qeth_card *card,
6107                        struct qeth_reply *reply, unsigned long data)
6108{
6109        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6110
6111        QETH_CARD_TEXT(card, 4, "defadpcb");
6112
6113        if (cmd->hdr.return_code)
6114                return -EIO;
6115
6116        cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6117        if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6118                card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6119        if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6120                card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6121        return 0;
6122}
6123EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6124
6125struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
6126                                                 enum qeth_ipa_funcs ipa_func,
6127                                                 u16 cmd_code,
6128                                                 unsigned int data_length,
6129                                                 enum qeth_prot_versions prot)
6130{
6131        struct qeth_ipacmd_setassparms *setassparms;
6132        struct qeth_ipacmd_setassparms_hdr *hdr;
6133        struct qeth_cmd_buffer *iob;
6134
6135        QETH_CARD_TEXT(card, 4, "getasscm");
6136        iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6137                                 data_length +
6138                                 offsetof(struct qeth_ipacmd_setassparms,
6139                                          data));
6140        if (!iob)
6141                return NULL;
6142
6143        setassparms = &__ipa_cmd(iob)->data.setassparms;
6144        setassparms->assist_no = ipa_func;
6145
6146        hdr = &setassparms->hdr;
6147        hdr->length = sizeof(*hdr) + data_length;
6148        hdr->command_code = cmd_code;
6149        return iob;
6150}
6151EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6152
6153int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6154                                      enum qeth_ipa_funcs ipa_func,
6155                                      u16 cmd_code, u32 *data,
6156                                      enum qeth_prot_versions prot)
6157{
6158        unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6159        struct qeth_cmd_buffer *iob;
6160
6161        QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6162        iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6163        if (!iob)
6164                return -ENOMEM;
6165
6166        if (data)
6167                __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6168        return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6169}
6170EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6171
6172static void qeth_unregister_dbf_views(void)
6173{
6174        int x;
6175
6176        for (x = 0; x < QETH_DBF_INFOS; x++) {
6177                debug_unregister(qeth_dbf[x].id);
6178                qeth_dbf[x].id = NULL;
6179        }
6180}
6181
6182void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6183{
6184        char dbf_txt_buf[32];
6185        va_list args;
6186
6187        if (!debug_level_enabled(id, level))
6188                return;
6189        va_start(args, fmt);
6190        vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6191        va_end(args);
6192        debug_text_event(id, level, dbf_txt_buf);
6193}
6194EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6195
6196static int qeth_register_dbf_views(void)
6197{
6198        int ret;
6199        int x;
6200
6201        for (x = 0; x < QETH_DBF_INFOS; x++) {
6202                /* register the areas */
6203                qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6204                                                qeth_dbf[x].pages,
6205                                                qeth_dbf[x].areas,
6206                                                qeth_dbf[x].len);
6207                if (qeth_dbf[x].id == NULL) {
6208                        qeth_unregister_dbf_views();
6209                        return -ENOMEM;
6210                }
6211
6212                /* register a view */
6213                ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6214                if (ret) {
6215                        qeth_unregister_dbf_views();
6216                        return ret;
6217                }
6218
6219                /* set a passing level */
6220                debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6221        }
6222
6223        return 0;
6224}
6225
6226static DEFINE_MUTEX(qeth_mod_mutex);    /* for synchronized module loading */
6227
6228int qeth_setup_discipline(struct qeth_card *card,
6229                          enum qeth_discipline_id discipline)
6230{
6231        int rc;
6232
6233        mutex_lock(&qeth_mod_mutex);
6234        switch (discipline) {
6235        case QETH_DISCIPLINE_LAYER3:
6236                card->discipline = try_then_request_module(
6237                        symbol_get(qeth_l3_discipline), "qeth_l3");
6238                break;
6239        case QETH_DISCIPLINE_LAYER2:
6240                card->discipline = try_then_request_module(
6241                        symbol_get(qeth_l2_discipline), "qeth_l2");
6242                break;
6243        default:
6244                break;
6245        }
6246        mutex_unlock(&qeth_mod_mutex);
6247
6248        if (!card->discipline) {
6249                dev_err(&card->gdev->dev, "There is no kernel module to "
6250                        "support discipline %d\n", discipline);
6251                return -EINVAL;
6252        }
6253
6254        rc = card->discipline->setup(card->gdev);
6255        if (rc) {
6256                if (discipline == QETH_DISCIPLINE_LAYER2)
6257                        symbol_put(qeth_l2_discipline);
6258                else
6259                        symbol_put(qeth_l3_discipline);
6260                card->discipline = NULL;
6261
6262                return rc;
6263        }
6264
6265        card->options.layer = discipline;
6266        return 0;
6267}
6268
6269void qeth_remove_discipline(struct qeth_card *card)
6270{
6271        card->discipline->remove(card->gdev);
6272
6273        if (IS_LAYER2(card))
6274                symbol_put(qeth_l2_discipline);
6275        else
6276                symbol_put(qeth_l3_discipline);
6277        card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6278        card->discipline = NULL;
6279}
6280
6281static const struct device_type qeth_generic_devtype = {
6282        .name = "qeth_generic",
6283};
6284
6285#define DBF_NAME_LEN    20
6286
6287struct qeth_dbf_entry {
6288        char dbf_name[DBF_NAME_LEN];
6289        debug_info_t *dbf_info;
6290        struct list_head dbf_list;
6291};
6292
6293static LIST_HEAD(qeth_dbf_list);
6294static DEFINE_MUTEX(qeth_dbf_list_mutex);
6295
6296static debug_info_t *qeth_get_dbf_entry(char *name)
6297{
6298        struct qeth_dbf_entry *entry;
6299        debug_info_t *rc = NULL;
6300
6301        mutex_lock(&qeth_dbf_list_mutex);
6302        list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6303                if (strcmp(entry->dbf_name, name) == 0) {
6304                        rc = entry->dbf_info;
6305                        break;
6306                }
6307        }
6308        mutex_unlock(&qeth_dbf_list_mutex);
6309        return rc;
6310}
6311
6312static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6313{
6314        struct qeth_dbf_entry *new_entry;
6315
6316        card->debug = debug_register(name, 2, 1, 8);
6317        if (!card->debug) {
6318                QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6319                goto err;
6320        }
6321        if (debug_register_view(card->debug, &debug_hex_ascii_view))
6322                goto err_dbg;
6323        new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6324        if (!new_entry)
6325                goto err_dbg;
6326        strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
6327        new_entry->dbf_info = card->debug;
6328        mutex_lock(&qeth_dbf_list_mutex);
6329        list_add(&new_entry->dbf_list, &qeth_dbf_list);
6330        mutex_unlock(&qeth_dbf_list_mutex);
6331
6332        return 0;
6333
6334err_dbg:
6335        debug_unregister(card->debug);
6336err:
6337        return -ENOMEM;
6338}
6339
6340static void qeth_clear_dbf_list(void)
6341{
6342        struct qeth_dbf_entry *entry, *tmp;
6343
6344        mutex_lock(&qeth_dbf_list_mutex);
6345        list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6346                list_del(&entry->dbf_list);
6347                debug_unregister(entry->dbf_info);
6348                kfree(entry);
6349        }
6350        mutex_unlock(&qeth_dbf_list_mutex);
6351}
6352
6353static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6354{
6355        struct net_device *dev;
6356        struct qeth_priv *priv;
6357
6358        switch (card->info.type) {
6359        case QETH_CARD_TYPE_IQD:
6360                dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6361                                       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6362                break;
6363        case QETH_CARD_TYPE_OSM:
6364                dev = alloc_etherdev(sizeof(*priv));
6365                break;
6366        default:
6367                dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6368        }
6369
6370        if (!dev)
6371                return NULL;
6372
6373        priv = netdev_priv(dev);
6374        priv->rx_copybreak = QETH_RX_COPYBREAK;
6375        priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6376
6377        dev->ml_priv = card;
6378        dev->watchdog_timeo = QETH_TX_TIMEOUT;
6379        dev->min_mtu = 576;
6380         /* initialized when device first goes online: */
6381        dev->max_mtu = 0;
6382        dev->mtu = 0;
6383        SET_NETDEV_DEV(dev, &card->gdev->dev);
6384        netif_carrier_off(dev);
6385
6386        dev->ethtool_ops = &qeth_ethtool_ops;
6387        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6388        dev->hw_features |= NETIF_F_SG;
6389        dev->vlan_features |= NETIF_F_SG;
6390        if (IS_IQD(card))
6391                dev->features |= NETIF_F_SG;
6392
6393        return dev;
6394}
6395
6396struct net_device *qeth_clone_netdev(struct net_device *orig)
6397{
6398        struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6399
6400        if (!clone)
6401                return NULL;
6402
6403        clone->dev_port = orig->dev_port;
6404        return clone;
6405}
6406
6407static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6408{
6409        struct qeth_card *card;
6410        struct device *dev;
6411        int rc;
6412        enum qeth_discipline_id enforced_disc;
6413        char dbf_name[DBF_NAME_LEN];
6414
6415        QETH_DBF_TEXT(SETUP, 2, "probedev");
6416
6417        dev = &gdev->dev;
6418        if (!get_device(dev))
6419                return -ENODEV;
6420
6421        QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6422
6423        card = qeth_alloc_card(gdev);
6424        if (!card) {
6425                QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6426                rc = -ENOMEM;
6427                goto err_dev;
6428        }
6429
6430        snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6431                dev_name(&gdev->dev));
6432        card->debug = qeth_get_dbf_entry(dbf_name);
6433        if (!card->debug) {
6434                rc = qeth_add_dbf_entry(card, dbf_name);
6435                if (rc)
6436                        goto err_card;
6437        }
6438
6439        qeth_setup_card(card);
6440        card->dev = qeth_alloc_netdev(card);
6441        if (!card->dev) {
6442                rc = -ENOMEM;
6443                goto err_card;
6444        }
6445
6446        qeth_determine_capabilities(card);
6447        qeth_set_blkt_defaults(card);
6448
6449        card->qdio.no_out_queues = card->dev->num_tx_queues;
6450        rc = qeth_update_from_chp_desc(card);
6451        if (rc)
6452                goto err_chp_desc;
6453
6454        gdev->dev.groups = qeth_dev_groups;
6455
6456        enforced_disc = qeth_enforce_discipline(card);
6457        switch (enforced_disc) {
6458        case QETH_DISCIPLINE_UNDETERMINED:
6459                gdev->dev.type = &qeth_generic_devtype;
6460                break;
6461        default:
6462                card->info.layer_enforced = true;
6463                /* It's so early that we don't need the discipline_mutex yet. */
6464                rc = qeth_setup_discipline(card, enforced_disc);
6465                if (rc)
6466                        goto err_setup_disc;
6467
6468                break;
6469        }
6470
6471        return 0;
6472
6473err_setup_disc:
6474err_chp_desc:
6475        free_netdev(card->dev);
6476err_card:
6477        qeth_core_free_card(card);
6478err_dev:
6479        put_device(dev);
6480        return rc;
6481}
6482
6483static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6484{
6485        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6486
6487        QETH_CARD_TEXT(card, 2, "removedv");
6488
6489        mutex_lock(&card->discipline_mutex);
6490        if (card->discipline)
6491                qeth_remove_discipline(card);
6492        mutex_unlock(&card->discipline_mutex);
6493
6494        qeth_free_qdio_queues(card);
6495
6496        free_netdev(card->dev);
6497        qeth_core_free_card(card);
6498        put_device(&gdev->dev);
6499}
6500
6501static int qeth_core_set_online(struct ccwgroup_device *gdev)
6502{
6503        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6504        int rc = 0;
6505        enum qeth_discipline_id def_discipline;
6506
6507        mutex_lock(&card->discipline_mutex);
6508        if (!card->discipline) {
6509                def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6510                                                QETH_DISCIPLINE_LAYER2;
6511                rc = qeth_setup_discipline(card, def_discipline);
6512                if (rc)
6513                        goto err;
6514        }
6515
6516        rc = qeth_set_online(card, card->discipline);
6517
6518err:
6519        mutex_unlock(&card->discipline_mutex);
6520        return rc;
6521}
6522
6523static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6524{
6525        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6526        int rc;
6527
6528        mutex_lock(&card->discipline_mutex);
6529        rc = qeth_set_offline(card, card->discipline, false);
6530        mutex_unlock(&card->discipline_mutex);
6531
6532        return rc;
6533}
6534
6535static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6536{
6537        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6538
6539        qeth_set_allowed_threads(card, 0, 1);
6540        if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6541                qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6542        qeth_qdio_clear_card(card, 0);
6543        qeth_drain_output_queues(card);
6544        qdio_free(CARD_DDEV(card));
6545}
6546
6547static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6548                           size_t count)
6549{
6550        int err;
6551
6552        err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6553                                  buf);
6554
6555        return err ? err : count;
6556}
6557static DRIVER_ATTR_WO(group);
6558
6559static struct attribute *qeth_drv_attrs[] = {
6560        &driver_attr_group.attr,
6561        NULL,
6562};
6563static struct attribute_group qeth_drv_attr_group = {
6564        .attrs = qeth_drv_attrs,
6565};
6566static const struct attribute_group *qeth_drv_attr_groups[] = {
6567        &qeth_drv_attr_group,
6568        NULL,
6569};
6570
6571static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6572        .driver = {
6573                .groups = qeth_drv_attr_groups,
6574                .owner = THIS_MODULE,
6575                .name = "qeth",
6576        },
6577        .ccw_driver = &qeth_ccw_driver,
6578        .setup = qeth_core_probe_device,
6579        .remove = qeth_core_remove_device,
6580        .set_online = qeth_core_set_online,
6581        .set_offline = qeth_core_set_offline,
6582        .shutdown = qeth_core_shutdown,
6583};
6584
6585int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
6586{
6587        struct qeth_card *card = dev->ml_priv;
6588        int rc = 0;
6589
6590        switch (cmd) {
6591        case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6592                rc = qeth_snmp_command(card, data);
6593                break;
6594        case SIOC_QETH_GET_CARD_TYPE:
6595                if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6596                    !IS_VM_NIC(card))
6597                        return 1;
6598                return 0;
6599        case SIOC_QETH_QUERY_OAT:
6600                rc = qeth_query_oat_command(card, data);
6601                break;
6602        default:
6603                if (card->discipline->do_ioctl)
6604                        rc = card->discipline->do_ioctl(dev, rq, data, cmd);
6605                else
6606                        rc = -EOPNOTSUPP;
6607        }
6608        if (rc)
6609                QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6610        return rc;
6611}
6612EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
6613
6614int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6615{
6616        struct qeth_card *card = dev->ml_priv;
6617        struct mii_ioctl_data *mii_data;
6618        int rc = 0;
6619
6620        switch (cmd) {
6621        case SIOCGMIIPHY:
6622                mii_data = if_mii(rq);
6623                mii_data->phy_id = 0;
6624                break;
6625        case SIOCGMIIREG:
6626                mii_data = if_mii(rq);
6627                if (mii_data->phy_id != 0)
6628                        rc = -EINVAL;
6629                else
6630                        mii_data->val_out = qeth_mdio_read(dev,
6631                                mii_data->phy_id, mii_data->reg_num);
6632                break;
6633        default:
6634                return -EOPNOTSUPP;
6635        }
6636        if (rc)
6637                QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6638        return rc;
6639}
6640EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6641
6642static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6643                              unsigned long data)
6644{
6645        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6646        u32 *features = reply->param;
6647
6648        if (qeth_setassparms_inspect_rc(cmd))
6649                return -EIO;
6650
6651        *features = cmd->data.setassparms.data.flags_32bit;
6652        return 0;
6653}
6654
6655static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6656                             enum qeth_prot_versions prot)
6657{
6658        return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6659                                                 NULL, prot);
6660}
6661
6662static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6663                            enum qeth_prot_versions prot, u8 *lp2lp)
6664{
6665        u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6666        struct qeth_cmd_buffer *iob;
6667        struct qeth_ipa_caps caps;
6668        u32 features;
6669        int rc;
6670
6671        /* some L3 HW requires combined L3+L4 csum offload: */
6672        if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6673            cstype == IPA_OUTBOUND_CHECKSUM)
6674                required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6675
6676        iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6677                                       prot);
6678        if (!iob)
6679                return -ENOMEM;
6680
6681        rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6682        if (rc)
6683                return rc;
6684
6685        if ((required_features & features) != required_features) {
6686                qeth_set_csum_off(card, cstype, prot);
6687                return -EOPNOTSUPP;
6688        }
6689
6690        iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6691                                       SETASS_DATA_SIZEOF(flags_32bit),
6692                                       prot);
6693        if (!iob) {
6694                qeth_set_csum_off(card, cstype, prot);
6695                return -ENOMEM;
6696        }
6697
6698        if (features & QETH_IPA_CHECKSUM_LP2LP)
6699                required_features |= QETH_IPA_CHECKSUM_LP2LP;
6700        __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6701        rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6702        if (rc) {
6703                qeth_set_csum_off(card, cstype, prot);
6704                return rc;
6705        }
6706
6707        if (!qeth_ipa_caps_supported(&caps, required_features) ||
6708            !qeth_ipa_caps_enabled(&caps, required_features)) {
6709                qeth_set_csum_off(card, cstype, prot);
6710                return -EOPNOTSUPP;
6711        }
6712
6713        dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6714                 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6715
6716        if (lp2lp)
6717                *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6718
6719        return 0;
6720}
6721
6722static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6723                             enum qeth_prot_versions prot, u8 *lp2lp)
6724{
6725        return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6726                    qeth_set_csum_off(card, cstype, prot);
6727}
6728
6729static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6730                             unsigned long data)
6731{
6732        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6733        struct qeth_tso_start_data *tso_data = reply->param;
6734
6735        if (qeth_setassparms_inspect_rc(cmd))
6736                return -EIO;
6737
6738        tso_data->mss = cmd->data.setassparms.data.tso.mss;
6739        tso_data->supported = cmd->data.setassparms.data.tso.supported;
6740        return 0;
6741}
6742
6743static int qeth_set_tso_off(struct qeth_card *card,
6744                            enum qeth_prot_versions prot)
6745{
6746        return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6747                                                 IPA_CMD_ASS_STOP, NULL, prot);
6748}
6749
6750static int qeth_set_tso_on(struct qeth_card *card,
6751                           enum qeth_prot_versions prot)
6752{
6753        struct qeth_tso_start_data tso_data;
6754        struct qeth_cmd_buffer *iob;
6755        struct qeth_ipa_caps caps;
6756        int rc;
6757
6758        iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6759                                       IPA_CMD_ASS_START, 0, prot);
6760        if (!iob)
6761                return -ENOMEM;
6762
6763        rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6764        if (rc)
6765                return rc;
6766
6767        if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6768                qeth_set_tso_off(card, prot);
6769                return -EOPNOTSUPP;
6770        }
6771
6772        iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6773                                       IPA_CMD_ASS_ENABLE,
6774                                       SETASS_DATA_SIZEOF(caps), prot);
6775        if (!iob) {
6776                qeth_set_tso_off(card, prot);
6777                return -ENOMEM;
6778        }
6779
6780        /* enable TSO capability */
6781        __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6782                QETH_IPA_LARGE_SEND_TCP;
6783        rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6784        if (rc) {
6785                qeth_set_tso_off(card, prot);
6786                return rc;
6787        }
6788
6789        if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6790            !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6791                qeth_set_tso_off(card, prot);
6792                return -EOPNOTSUPP;
6793        }
6794
6795        dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6796                 tso_data.mss);
6797        return 0;
6798}
6799
6800static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6801                            enum qeth_prot_versions prot)
6802{
6803        return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6804}
6805
6806static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6807{
6808        int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6809        int rc_ipv6;
6810
6811        if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6812                rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6813                                            QETH_PROT_IPV4, NULL);
6814        if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6815                /* no/one Offload Assist available, so the rc is trivial */
6816                return rc_ipv4;
6817
6818        rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6819                                    QETH_PROT_IPV6, NULL);
6820
6821        if (on)
6822                /* enable: success if any Assist is active */
6823                return (rc_ipv6) ? rc_ipv4 : 0;
6824
6825        /* disable: failure if any Assist is still active */
6826        return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6827}
6828
6829/**
6830 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6831 * @dev:        a net_device
6832 */
6833void qeth_enable_hw_features(struct net_device *dev)
6834{
6835        struct qeth_card *card = dev->ml_priv;
6836        netdev_features_t features;
6837
6838        features = dev->features;
6839        /* force-off any feature that might need an IPA sequence.
6840         * netdev_update_features() will restart them.
6841         */
6842        dev->features &= ~dev->hw_features;
6843        /* toggle VLAN filter, so that VIDs are re-programmed: */
6844        if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6845                dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6846                dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6847        }
6848        netdev_update_features(dev);
6849        if (features != dev->features)
6850                dev_warn(&card->gdev->dev,
6851                         "Device recovery failed to restore all offload features\n");
6852}
6853EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6854
6855static void qeth_check_restricted_features(struct qeth_card *card,
6856                                           netdev_features_t changed,
6857                                           netdev_features_t actual)
6858{
6859        netdev_features_t ipv6_features = NETIF_F_TSO6;
6860        netdev_features_t ipv4_features = NETIF_F_TSO;
6861
6862        if (!card->info.has_lp2lp_cso_v6)
6863                ipv6_features |= NETIF_F_IPV6_CSUM;
6864        if (!card->info.has_lp2lp_cso_v4)
6865                ipv4_features |= NETIF_F_IP_CSUM;
6866
6867        if ((changed & ipv6_features) && !(actual & ipv6_features))
6868                qeth_flush_local_addrs6(card);
6869        if ((changed & ipv4_features) && !(actual & ipv4_features))
6870                qeth_flush_local_addrs4(card);
6871}
6872
6873int qeth_set_features(struct net_device *dev, netdev_features_t features)
6874{
6875        struct qeth_card *card = dev->ml_priv;
6876        netdev_features_t changed = dev->features ^ features;
6877        int rc = 0;
6878
6879        QETH_CARD_TEXT(card, 2, "setfeat");
6880        QETH_CARD_HEX(card, 2, &features, sizeof(features));
6881
6882        if ((changed & NETIF_F_IP_CSUM)) {
6883                rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6884                                       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6885                                       &card->info.has_lp2lp_cso_v4);
6886                if (rc)
6887                        changed ^= NETIF_F_IP_CSUM;
6888        }
6889        if (changed & NETIF_F_IPV6_CSUM) {
6890                rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6891                                       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6892                                       &card->info.has_lp2lp_cso_v6);
6893                if (rc)
6894                        changed ^= NETIF_F_IPV6_CSUM;
6895        }
6896        if (changed & NETIF_F_RXCSUM) {
6897                rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6898                if (rc)
6899                        changed ^= NETIF_F_RXCSUM;
6900        }
6901        if (changed & NETIF_F_TSO) {
6902                rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6903                                      QETH_PROT_IPV4);
6904                if (rc)
6905                        changed ^= NETIF_F_TSO;
6906        }
6907        if (changed & NETIF_F_TSO6) {
6908                rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6909                                      QETH_PROT_IPV6);
6910                if (rc)
6911                        changed ^= NETIF_F_TSO6;
6912        }
6913
6914        qeth_check_restricted_features(card, dev->features ^ features,
6915                                       dev->features ^ changed);
6916
6917        /* everything changed successfully? */
6918        if ((dev->features ^ features) == changed)
6919                return 0;
6920        /* something went wrong. save changed features and return error */
6921        dev->features ^= changed;
6922        return -EIO;
6923}
6924EXPORT_SYMBOL_GPL(qeth_set_features);
6925
6926netdev_features_t qeth_fix_features(struct net_device *dev,
6927                                    netdev_features_t features)
6928{
6929        struct qeth_card *card = dev->ml_priv;
6930
6931        QETH_CARD_TEXT(card, 2, "fixfeat");
6932        if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6933                features &= ~NETIF_F_IP_CSUM;
6934        if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6935                features &= ~NETIF_F_IPV6_CSUM;
6936        if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6937            !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6938                features &= ~NETIF_F_RXCSUM;
6939        if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6940                features &= ~NETIF_F_TSO;
6941        if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6942                features &= ~NETIF_F_TSO6;
6943
6944        QETH_CARD_HEX(card, 2, &features, sizeof(features));
6945        return features;
6946}
6947EXPORT_SYMBOL_GPL(qeth_fix_features);
6948
6949netdev_features_t qeth_features_check(struct sk_buff *skb,
6950                                      struct net_device *dev,
6951                                      netdev_features_t features)
6952{
6953        struct qeth_card *card = dev->ml_priv;
6954
6955        /* Traffic with local next-hop is not eligible for some offloads: */
6956        if (skb->ip_summed == CHECKSUM_PARTIAL &&
6957            READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6958                netdev_features_t restricted = 0;
6959
6960                if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6961                        restricted |= NETIF_F_ALL_TSO;
6962
6963                switch (vlan_get_protocol(skb)) {
6964                case htons(ETH_P_IP):
6965                        if (!card->info.has_lp2lp_cso_v4)
6966                                restricted |= NETIF_F_IP_CSUM;
6967
6968                        if (restricted && qeth_next_hop_is_local_v4(card, skb))
6969                                features &= ~restricted;
6970                        break;
6971                case htons(ETH_P_IPV6):
6972                        if (!card->info.has_lp2lp_cso_v6)
6973                                restricted |= NETIF_F_IPV6_CSUM;
6974
6975                        if (restricted && qeth_next_hop_is_local_v6(card, skb))
6976                                features &= ~restricted;
6977                        break;
6978                default:
6979                        break;
6980                }
6981        }
6982
6983        /* GSO segmentation builds skbs with
6984         *      a (small) linear part for the headers, and
6985         *      page frags for the data.
6986         * Compared to a linear skb, the header-only part consumes an
6987         * additional buffer element. This reduces buffer utilization, and
6988         * hurts throughput. So compress small segments into one element.
6989         */
6990        if (netif_needs_gso(skb, features)) {
6991                /* match skb_segment(): */
6992                unsigned int doffset = skb->data - skb_mac_header(skb);
6993                unsigned int hsize = skb_shinfo(skb)->gso_size;
6994                unsigned int hroom = skb_headroom(skb);
6995
6996                /* linearize only if resulting skb allocations are order-0: */
6997                if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6998                        features &= ~NETIF_F_SG;
6999        }
7000
7001        return vlan_features_check(skb, features);
7002}
7003EXPORT_SYMBOL_GPL(qeth_features_check);
7004
7005void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7006{
7007        struct qeth_card *card = dev->ml_priv;
7008        struct qeth_qdio_out_q *queue;
7009        unsigned int i;
7010
7011        QETH_CARD_TEXT(card, 5, "getstat");
7012
7013        stats->rx_packets = card->stats.rx_packets;
7014        stats->rx_bytes = card->stats.rx_bytes;
7015        stats->rx_errors = card->stats.rx_length_errors +
7016                           card->stats.rx_frame_errors +
7017                           card->stats.rx_fifo_errors;
7018        stats->rx_dropped = card->stats.rx_dropped_nomem +
7019                            card->stats.rx_dropped_notsupp +
7020                            card->stats.rx_dropped_runt;
7021        stats->multicast = card->stats.rx_multicast;
7022        stats->rx_length_errors = card->stats.rx_length_errors;
7023        stats->rx_frame_errors = card->stats.rx_frame_errors;
7024        stats->rx_fifo_errors = card->stats.rx_fifo_errors;
7025
7026        for (i = 0; i < card->qdio.no_out_queues; i++) {
7027                queue = card->qdio.out_qs[i];
7028
7029                stats->tx_packets += queue->stats.tx_packets;
7030                stats->tx_bytes += queue->stats.tx_bytes;
7031                stats->tx_errors += queue->stats.tx_errors;
7032                stats->tx_dropped += queue->stats.tx_dropped;
7033        }
7034}
7035EXPORT_SYMBOL_GPL(qeth_get_stats64);
7036
7037#define TC_IQD_UCAST   0
7038static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
7039                                     unsigned int ucast_txqs)
7040{
7041        unsigned int prio;
7042
7043        /* IQD requires mcast traffic to be placed on a dedicated queue, and
7044         * qeth_iqd_select_queue() deals with this.
7045         * For unicast traffic, we defer the queue selection to the stack.
7046         * By installing a trivial prio map that spans over only the unicast
7047         * queues, we can encourage the stack to spread the ucast traffic evenly
7048         * without selecting the mcast queue.
7049         */
7050
7051        /* One traffic class, spanning over all active ucast queues: */
7052        netdev_set_num_tc(dev, 1);
7053        netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
7054                            QETH_IQD_MIN_UCAST_TXQ);
7055
7056        /* Map all priorities to this traffic class: */
7057        for (prio = 0; prio <= TC_BITMASK; prio++)
7058                netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
7059}
7060
7061int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
7062{
7063        struct net_device *dev = card->dev;
7064        int rc;
7065
7066        /* Per netif_setup_tc(), adjust the mapping first: */
7067        if (IS_IQD(card))
7068                qeth_iqd_set_prio_tc_map(dev, count - 1);
7069
7070        rc = netif_set_real_num_tx_queues(dev, count);
7071
7072        if (rc && IS_IQD(card))
7073                qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
7074
7075        return rc;
7076}
7077EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7078
7079u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
7080                          u8 cast_type, struct net_device *sb_dev)
7081{
7082        u16 txq;
7083
7084        if (cast_type != RTN_UNICAST)
7085                return QETH_IQD_MCAST_TXQ;
7086        if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
7087                return QETH_IQD_MIN_UCAST_TXQ;
7088
7089        txq = netdev_pick_tx(dev, skb, sb_dev);
7090        return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7091}
7092EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
7093
7094int qeth_open(struct net_device *dev)
7095{
7096        struct qeth_card *card = dev->ml_priv;
7097        struct qeth_qdio_out_q *queue;
7098        unsigned int i;
7099
7100        QETH_CARD_TEXT(card, 4, "qethopen");
7101
7102        card->data.state = CH_STATE_UP;
7103        netif_tx_start_all_queues(dev);
7104
7105        local_bh_disable();
7106        qeth_for_each_output_queue(card, queue, i) {
7107                netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
7108                                  QETH_NAPI_WEIGHT);
7109                napi_enable(&queue->napi);
7110                napi_schedule(&queue->napi);
7111        }
7112
7113        napi_enable(&card->napi);
7114        napi_schedule(&card->napi);
7115        /* kick-start the NAPI softirq: */
7116        local_bh_enable();
7117
7118        return 0;
7119}
7120EXPORT_SYMBOL_GPL(qeth_open);
7121
7122int qeth_stop(struct net_device *dev)
7123{
7124        struct qeth_card *card = dev->ml_priv;
7125        struct qeth_qdio_out_q *queue;
7126        unsigned int i;
7127
7128        QETH_CARD_TEXT(card, 4, "qethstop");
7129
7130        napi_disable(&card->napi);
7131        cancel_delayed_work_sync(&card->buffer_reclaim_work);
7132        qdio_stop_irq(CARD_DDEV(card));
7133
7134        /* Quiesce the NAPI instances: */
7135        qeth_for_each_output_queue(card, queue, i)
7136                napi_disable(&queue->napi);
7137
7138        /* Stop .ndo_start_xmit, might still access queue->napi. */
7139        netif_tx_disable(dev);
7140
7141        qeth_for_each_output_queue(card, queue, i) {
7142                del_timer_sync(&queue->timer);
7143                /* Queues may get re-allocated, so remove the NAPIs. */
7144                netif_napi_del(&queue->napi);
7145        }
7146
7147        return 0;
7148}
7149EXPORT_SYMBOL_GPL(qeth_stop);
7150
7151static int __init qeth_core_init(void)
7152{
7153        int rc;
7154
7155        pr_info("loading core functions\n");
7156
7157        qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7158
7159        rc = qeth_register_dbf_views();
7160        if (rc)
7161                goto dbf_err;
7162        qeth_core_root_dev = root_device_register("qeth");
7163        rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7164        if (rc)
7165                goto register_err;
7166        qeth_core_header_cache =
7167                kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7168                                  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7169                                  0, NULL);
7170        if (!qeth_core_header_cache) {
7171                rc = -ENOMEM;
7172                goto slab_err;
7173        }
7174        qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7175                        sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7176        if (!qeth_qdio_outbuf_cache) {
7177                rc = -ENOMEM;
7178                goto cqslab_err;
7179        }
7180        rc = ccw_driver_register(&qeth_ccw_driver);
7181        if (rc)
7182                goto ccw_err;
7183        rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7184        if (rc)
7185                goto ccwgroup_err;
7186
7187        return 0;
7188
7189ccwgroup_err:
7190        ccw_driver_unregister(&qeth_ccw_driver);
7191ccw_err:
7192        kmem_cache_destroy(qeth_qdio_outbuf_cache);
7193cqslab_err:
7194        kmem_cache_destroy(qeth_core_header_cache);
7195slab_err:
7196        root_device_unregister(qeth_core_root_dev);
7197register_err:
7198        qeth_unregister_dbf_views();
7199dbf_err:
7200        debugfs_remove_recursive(qeth_debugfs_root);
7201        pr_err("Initializing the qeth device driver failed\n");
7202        return rc;
7203}
7204
7205static void __exit qeth_core_exit(void)
7206{
7207        qeth_clear_dbf_list();
7208        ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7209        ccw_driver_unregister(&qeth_ccw_driver);
7210        kmem_cache_destroy(qeth_qdio_outbuf_cache);
7211        kmem_cache_destroy(qeth_core_header_cache);
7212        root_device_unregister(qeth_core_root_dev);
7213        qeth_unregister_dbf_views();
7214        debugfs_remove_recursive(qeth_debugfs_root);
7215        pr_info("core functions removed\n");
7216}
7217
7218module_init(qeth_core_init);
7219module_exit(qeth_core_exit);
7220MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7221MODULE_DESCRIPTION("qeth core functions");
7222MODULE_LICENSE("GPL");
7223