linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
   3 *
   4 *  eHEA ethernet device driver for IBM eServer System p
   5 *
   6 *  (C) Copyright IBM Corp. 2006
   7 *
   8 *  Authors:
   9 *       Christoph Raisch <raisch@de.ibm.com>
  10 *       Jan-Bernd Themann <themann@de.ibm.com>
  11 *       Thomas Klein <tklein@de.ibm.com>
  12 *
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2, or (at your option)
  17 * any later version.
  18 *
  19 * This program is distributed in the hope that it will be useful,
  20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  22 * GNU General Public License for more details.
  23 *
  24 * You should have received a copy of the GNU General Public License
  25 * along with this program; if not, write to the Free Software
  26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/device.h>
  32#include <linux/in.h>
  33#include <linux/ip.h>
  34#include <linux/tcp.h>
  35#include <linux/udp.h>
  36#include <linux/if.h>
  37#include <linux/list.h>
  38#include <linux/slab.h>
  39#include <linux/if_ether.h>
  40#include <linux/notifier.h>
  41#include <linux/reboot.h>
  42#include <linux/memory.h>
  43#include <asm/kexec.h>
  44#include <linux/mutex.h>
  45#include <linux/prefetch.h>
  46
  47#include <net/ip.h>
  48
  49#include "ehea.h"
  50#include "ehea_qmr.h"
  51#include "ehea_phyp.h"
  52
  53
  54MODULE_LICENSE("GPL");
  55MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
  56MODULE_DESCRIPTION("IBM eServer HEA Driver");
  57MODULE_VERSION(DRV_VERSION);
  58
  59
  60static int msg_level = -1;
  61static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
  62static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
  63static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
  64static int sq_entries = EHEA_DEF_ENTRIES_SQ;
  65static int use_mcs = 1;
  66static int prop_carrier_state;
  67
  68module_param(msg_level, int, 0);
  69module_param(rq1_entries, int, 0);
  70module_param(rq2_entries, int, 0);
  71module_param(rq3_entries, int, 0);
  72module_param(sq_entries, int, 0);
  73module_param(prop_carrier_state, int, 0);
  74module_param(use_mcs, int, 0);
  75
  76MODULE_PARM_DESC(msg_level, "msg_level");
  77MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
  78                 "port to stack. 1:yes, 0:no.  Default = 0 ");
  79MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
  80                 "[2^x - 1], x = [7..14]. Default = "
  81                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
  82MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
  83                 "[2^x - 1], x = [7..14]. Default = "
  84                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
  85MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
  86                 "[2^x - 1], x = [7..14]. Default = "
  87                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
  88MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
  89                 "[2^x - 1], x = [7..14]. Default = "
  90                 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
  91MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
  92                 "Default = 1");
  93
  94static int port_name_cnt;
  95static LIST_HEAD(adapter_list);
  96static unsigned long ehea_driver_flags;
  97static DEFINE_MUTEX(dlpar_mem_lock);
  98static struct ehea_fw_handle_array ehea_fw_handles;
  99static struct ehea_bcmc_reg_array ehea_bcmc_regs;
 100
 101
 102static int ehea_probe_adapter(struct platform_device *dev);
 103
 104static int ehea_remove(struct platform_device *dev);
 105
 106static const struct of_device_id ehea_module_device_table[] = {
 107        {
 108                .name = "lhea",
 109                .compatible = "IBM,lhea",
 110        },
 111        {
 112                .type = "network",
 113                .compatible = "IBM,lhea-ethernet",
 114        },
 115        {},
 116};
 117MODULE_DEVICE_TABLE(of, ehea_module_device_table);
 118
 119static const struct of_device_id ehea_device_table[] = {
 120        {
 121                .name = "lhea",
 122                .compatible = "IBM,lhea",
 123        },
 124        {},
 125};
 126
 127static struct platform_driver ehea_driver = {
 128        .driver = {
 129                .name = "ehea",
 130                .owner = THIS_MODULE,
 131                .of_match_table = ehea_device_table,
 132        },
 133        .probe = ehea_probe_adapter,
 134        .remove = ehea_remove,
 135};
 136
 137void ehea_dump(void *adr, int len, char *msg)
 138{
 139        int x;
 140        unsigned char *deb = adr;
 141        for (x = 0; x < len; x += 16) {
 142                pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
 143                        msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
 144                deb += 16;
 145        }
 146}
 147
 148static void ehea_schedule_port_reset(struct ehea_port *port)
 149{
 150        if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
 151                schedule_work(&port->reset_task);
 152}
 153
 154static void ehea_update_firmware_handles(void)
 155{
 156        struct ehea_fw_handle_entry *arr = NULL;
 157        struct ehea_adapter *adapter;
 158        int num_adapters = 0;
 159        int num_ports = 0;
 160        int num_portres = 0;
 161        int i = 0;
 162        int num_fw_handles, k, l;
 163
 164        /* Determine number of handles */
 165        mutex_lock(&ehea_fw_handles.lock);
 166
 167        list_for_each_entry(adapter, &adapter_list, list) {
 168                num_adapters++;
 169
 170                for (k = 0; k < EHEA_MAX_PORTS; k++) {
 171                        struct ehea_port *port = adapter->port[k];
 172
 173                        if (!port || (port->state != EHEA_PORT_UP))
 174                                continue;
 175
 176                        num_ports++;
 177                        num_portres += port->num_def_qps;
 178                }
 179        }
 180
 181        num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
 182                         num_ports * EHEA_NUM_PORT_FW_HANDLES +
 183                         num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
 184
 185        if (num_fw_handles) {
 186                arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
 187                if (!arr)
 188                        goto out;  /* Keep the existing array */
 189        } else
 190                goto out_update;
 191
 192        list_for_each_entry(adapter, &adapter_list, list) {
 193                if (num_adapters == 0)
 194                        break;
 195
 196                for (k = 0; k < EHEA_MAX_PORTS; k++) {
 197                        struct ehea_port *port = adapter->port[k];
 198
 199                        if (!port || (port->state != EHEA_PORT_UP) ||
 200                            (num_ports == 0))
 201                                continue;
 202
 203                        for (l = 0; l < port->num_def_qps; l++) {
 204                                struct ehea_port_res *pr = &port->port_res[l];
 205
 206                                arr[i].adh = adapter->handle;
 207                                arr[i++].fwh = pr->qp->fw_handle;
 208                                arr[i].adh = adapter->handle;
 209                                arr[i++].fwh = pr->send_cq->fw_handle;
 210                                arr[i].adh = adapter->handle;
 211                                arr[i++].fwh = pr->recv_cq->fw_handle;
 212                                arr[i].adh = adapter->handle;
 213                                arr[i++].fwh = pr->eq->fw_handle;
 214                                arr[i].adh = adapter->handle;
 215                                arr[i++].fwh = pr->send_mr.handle;
 216                                arr[i].adh = adapter->handle;
 217                                arr[i++].fwh = pr->recv_mr.handle;
 218                        }
 219                        arr[i].adh = adapter->handle;
 220                        arr[i++].fwh = port->qp_eq->fw_handle;
 221                        num_ports--;
 222                }
 223
 224                arr[i].adh = adapter->handle;
 225                arr[i++].fwh = adapter->neq->fw_handle;
 226
 227                if (adapter->mr.handle) {
 228                        arr[i].adh = adapter->handle;
 229                        arr[i++].fwh = adapter->mr.handle;
 230                }
 231                num_adapters--;
 232        }
 233
 234out_update:
 235        kfree(ehea_fw_handles.arr);
 236        ehea_fw_handles.arr = arr;
 237        ehea_fw_handles.num_entries = i;
 238out:
 239        mutex_unlock(&ehea_fw_handles.lock);
 240}
 241
 242static void ehea_update_bcmc_registrations(void)
 243{
 244        unsigned long flags;
 245        struct ehea_bcmc_reg_entry *arr = NULL;
 246        struct ehea_adapter *adapter;
 247        struct ehea_mc_list *mc_entry;
 248        int num_registrations = 0;
 249        int i = 0;
 250        int k;
 251
 252        spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
 253
 254        /* Determine number of registrations */
 255        list_for_each_entry(adapter, &adapter_list, list)
 256                for (k = 0; k < EHEA_MAX_PORTS; k++) {
 257                        struct ehea_port *port = adapter->port[k];
 258
 259                        if (!port || (port->state != EHEA_PORT_UP))
 260                                continue;
 261
 262                        num_registrations += 2; /* Broadcast registrations */
 263
 264                        list_for_each_entry(mc_entry, &port->mc_list->list,list)
 265                                num_registrations += 2;
 266                }
 267
 268        if (num_registrations) {
 269                arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
 270                if (!arr)
 271                        goto out;  /* Keep the existing array */
 272        } else
 273                goto out_update;
 274
 275        list_for_each_entry(adapter, &adapter_list, list) {
 276                for (k = 0; k < EHEA_MAX_PORTS; k++) {
 277                        struct ehea_port *port = adapter->port[k];
 278
 279                        if (!port || (port->state != EHEA_PORT_UP))
 280                                continue;
 281
 282                        if (num_registrations == 0)
 283                                goto out_update;
 284
 285                        arr[i].adh = adapter->handle;
 286                        arr[i].port_id = port->logical_port_id;
 287                        arr[i].reg_type = EHEA_BCMC_BROADCAST |
 288                                          EHEA_BCMC_UNTAGGED;
 289                        arr[i++].macaddr = port->mac_addr;
 290
 291                        arr[i].adh = adapter->handle;
 292                        arr[i].port_id = port->logical_port_id;
 293                        arr[i].reg_type = EHEA_BCMC_BROADCAST |
 294                                          EHEA_BCMC_VLANID_ALL;
 295                        arr[i++].macaddr = port->mac_addr;
 296                        num_registrations -= 2;
 297
 298                        list_for_each_entry(mc_entry,
 299                                            &port->mc_list->list, list) {
 300                                if (num_registrations == 0)
 301                                        goto out_update;
 302
 303                                arr[i].adh = adapter->handle;
 304                                arr[i].port_id = port->logical_port_id;
 305                                arr[i].reg_type = EHEA_BCMC_MULTICAST |
 306                                                  EHEA_BCMC_UNTAGGED;
 307                                if (mc_entry->macaddr == 0)
 308                                        arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
 309                                arr[i++].macaddr = mc_entry->macaddr;
 310
 311                                arr[i].adh = adapter->handle;
 312                                arr[i].port_id = port->logical_port_id;
 313                                arr[i].reg_type = EHEA_BCMC_MULTICAST |
 314                                                  EHEA_BCMC_VLANID_ALL;
 315                                if (mc_entry->macaddr == 0)
 316                                        arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
 317                                arr[i++].macaddr = mc_entry->macaddr;
 318                                num_registrations -= 2;
 319                        }
 320                }
 321        }
 322
 323out_update:
 324        kfree(ehea_bcmc_regs.arr);
 325        ehea_bcmc_regs.arr = arr;
 326        ehea_bcmc_regs.num_entries = i;
 327out:
 328        spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
 329}
 330
 331static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
 332                                        struct rtnl_link_stats64 *stats)
 333{
 334        struct ehea_port *port = netdev_priv(dev);
 335        u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
 336        int i;
 337
 338        for (i = 0; i < port->num_def_qps; i++) {
 339                rx_packets += port->port_res[i].rx_packets;
 340                rx_bytes   += port->port_res[i].rx_bytes;
 341        }
 342
 343        for (i = 0; i < port->num_def_qps; i++) {
 344                tx_packets += port->port_res[i].tx_packets;
 345                tx_bytes   += port->port_res[i].tx_bytes;
 346        }
 347
 348        stats->tx_packets = tx_packets;
 349        stats->rx_bytes = rx_bytes;
 350        stats->tx_bytes = tx_bytes;
 351        stats->rx_packets = rx_packets;
 352
 353        stats->multicast = port->stats.multicast;
 354        stats->rx_errors = port->stats.rx_errors;
 355        return stats;
 356}
 357
 358static void ehea_update_stats(struct work_struct *work)
 359{
 360        struct ehea_port *port =
 361                container_of(work, struct ehea_port, stats_work.work);
 362        struct net_device *dev = port->netdev;
 363        struct rtnl_link_stats64 *stats = &port->stats;
 364        struct hcp_ehea_port_cb2 *cb2;
 365        u64 hret;
 366
 367        cb2 = (void *)get_zeroed_page(GFP_KERNEL);
 368        if (!cb2) {
 369                netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
 370                goto resched;
 371        }
 372
 373        hret = ehea_h_query_ehea_port(port->adapter->handle,
 374                                      port->logical_port_id,
 375                                      H_PORT_CB2, H_PORT_CB2_ALL, cb2);
 376        if (hret != H_SUCCESS) {
 377                netdev_err(dev, "query_ehea_port failed\n");
 378                goto out_herr;
 379        }
 380
 381        if (netif_msg_hw(port))
 382                ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
 383
 384        stats->multicast = cb2->rxmcp;
 385        stats->rx_errors = cb2->rxuerr;
 386
 387out_herr:
 388        free_page((unsigned long)cb2);
 389resched:
 390        schedule_delayed_work(&port->stats_work,
 391                              round_jiffies_relative(msecs_to_jiffies(1000)));
 392}
 393
 394static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
 395{
 396        struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
 397        struct net_device *dev = pr->port->netdev;
 398        int max_index_mask = pr->rq1_skba.len - 1;
 399        int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
 400        int adder = 0;
 401        int i;
 402
 403        pr->rq1_skba.os_skbs = 0;
 404
 405        if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
 406                if (nr_of_wqes > 0)
 407                        pr->rq1_skba.index = index;
 408                pr->rq1_skba.os_skbs = fill_wqes;
 409                return;
 410        }
 411
 412        for (i = 0; i < fill_wqes; i++) {
 413                if (!skb_arr_rq1[index]) {
 414                        skb_arr_rq1[index] = netdev_alloc_skb(dev,
 415                                                              EHEA_L_PKT_SIZE);
 416                        if (!skb_arr_rq1[index]) {
 417                                pr->rq1_skba.os_skbs = fill_wqes - i;
 418                                break;
 419                        }
 420                }
 421                index--;
 422                index &= max_index_mask;
 423                adder++;
 424        }
 425
 426        if (adder == 0)
 427                return;
 428
 429        /* Ring doorbell */
 430        ehea_update_rq1a(pr->qp, adder);
 431}
 432
 433static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
 434{
 435        struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
 436        struct net_device *dev = pr->port->netdev;
 437        int i;
 438
 439        if (nr_rq1a > pr->rq1_skba.len) {
 440                netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
 441                return;
 442        }
 443
 444        for (i = 0; i < nr_rq1a; i++) {
 445                skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
 446                if (!skb_arr_rq1[i])
 447                        break;
 448        }
 449        /* Ring doorbell */
 450        ehea_update_rq1a(pr->qp, i - 1);
 451}
 452
 453static int ehea_refill_rq_def(struct ehea_port_res *pr,
 454                              struct ehea_q_skb_arr *q_skba, int rq_nr,
 455                              int num_wqes, int wqe_type, int packet_size)
 456{
 457        struct net_device *dev = pr->port->netdev;
 458        struct ehea_qp *qp = pr->qp;
 459        struct sk_buff **skb_arr = q_skba->arr;
 460        struct ehea_rwqe *rwqe;
 461        int i, index, max_index_mask, fill_wqes;
 462        int adder = 0;
 463        int ret = 0;
 464
 465        fill_wqes = q_skba->os_skbs + num_wqes;
 466        q_skba->os_skbs = 0;
 467
 468        if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
 469                q_skba->os_skbs = fill_wqes;
 470                return ret;
 471        }
 472
 473        index = q_skba->index;
 474        max_index_mask = q_skba->len - 1;
 475        for (i = 0; i < fill_wqes; i++) {
 476                u64 tmp_addr;
 477                struct sk_buff *skb;
 478
 479                skb = netdev_alloc_skb_ip_align(dev, packet_size);
 480                if (!skb) {
 481                        q_skba->os_skbs = fill_wqes - i;
 482                        if (q_skba->os_skbs == q_skba->len - 2) {
 483                                netdev_info(pr->port->netdev,
 484                                            "rq%i ran dry - no mem for skb\n",
 485                                            rq_nr);
 486                                ret = -ENOMEM;
 487                        }
 488                        break;
 489                }
 490
 491                skb_arr[index] = skb;
 492                tmp_addr = ehea_map_vaddr(skb->data);
 493                if (tmp_addr == -1) {
 494                        dev_consume_skb_any(skb);
 495                        q_skba->os_skbs = fill_wqes - i;
 496                        ret = 0;
 497                        break;
 498                }
 499
 500                rwqe = ehea_get_next_rwqe(qp, rq_nr);
 501                rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
 502                            | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
 503                rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
 504                rwqe->sg_list[0].vaddr = tmp_addr;
 505                rwqe->sg_list[0].len = packet_size;
 506                rwqe->data_segments = 1;
 507
 508                index++;
 509                index &= max_index_mask;
 510                adder++;
 511        }
 512
 513        q_skba->index = index;
 514        if (adder == 0)
 515                goto out;
 516
 517        /* Ring doorbell */
 518        iosync();
 519        if (rq_nr == 2)
 520                ehea_update_rq2a(pr->qp, adder);
 521        else
 522                ehea_update_rq3a(pr->qp, adder);
 523out:
 524        return ret;
 525}
 526
 527
 528static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
 529{
 530        return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
 531                                  nr_of_wqes, EHEA_RWQE2_TYPE,
 532                                  EHEA_RQ2_PKT_SIZE);
 533}
 534
 535
 536static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
 537{
 538        return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
 539                                  nr_of_wqes, EHEA_RWQE3_TYPE,
 540                                  EHEA_MAX_PACKET_SIZE);
 541}
 542
 543static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
 544{
 545        *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
 546        if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
 547                return 0;
 548        if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
 549            (cqe->header_length == 0))
 550                return 0;
 551        return -EINVAL;
 552}
 553
 554static inline void ehea_fill_skb(struct net_device *dev,
 555                                 struct sk_buff *skb, struct ehea_cqe *cqe,
 556                                 struct ehea_port_res *pr)
 557{
 558        int length = cqe->num_bytes_transfered - 4;     /*remove CRC */
 559
 560        skb_put(skb, length);
 561        skb->protocol = eth_type_trans(skb, dev);
 562
 563        /* The packet was not an IPV4 packet so a complemented checksum was
 564           calculated. The value is found in the Internet Checksum field. */
 565        if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
 566                skb->ip_summed = CHECKSUM_COMPLETE;
 567                skb->csum = csum_unfold(~cqe->inet_checksum_value);
 568        } else
 569                skb->ip_summed = CHECKSUM_UNNECESSARY;
 570
 571        skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
 572}
 573
 574static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
 575                                               int arr_len,
 576                                               struct ehea_cqe *cqe)
 577{
 578        int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
 579        struct sk_buff *skb;
 580        void *pref;
 581        int x;
 582
 583        x = skb_index + 1;
 584        x &= (arr_len - 1);
 585
 586        pref = skb_array[x];
 587        if (pref) {
 588                prefetchw(pref);
 589                prefetchw(pref + EHEA_CACHE_LINE);
 590
 591                pref = (skb_array[x]->data);
 592                prefetch(pref);
 593                prefetch(pref + EHEA_CACHE_LINE);
 594                prefetch(pref + EHEA_CACHE_LINE * 2);
 595                prefetch(pref + EHEA_CACHE_LINE * 3);
 596        }
 597
 598        skb = skb_array[skb_index];
 599        skb_array[skb_index] = NULL;
 600        return skb;
 601}
 602
 603static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
 604                                                  int arr_len, int wqe_index)
 605{
 606        struct sk_buff *skb;
 607        void *pref;
 608        int x;
 609
 610        x = wqe_index + 1;
 611        x &= (arr_len - 1);
 612
 613        pref = skb_array[x];
 614        if (pref) {
 615                prefetchw(pref);
 616                prefetchw(pref + EHEA_CACHE_LINE);
 617
 618                pref = (skb_array[x]->data);
 619                prefetchw(pref);
 620                prefetchw(pref + EHEA_CACHE_LINE);
 621        }
 622
 623        skb = skb_array[wqe_index];
 624        skb_array[wqe_index] = NULL;
 625        return skb;
 626}
 627
 628static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
 629                                 struct ehea_cqe *cqe, int *processed_rq2,
 630                                 int *processed_rq3)
 631{
 632        struct sk_buff *skb;
 633
 634        if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
 635                pr->p_stats.err_tcp_cksum++;
 636        if (cqe->status & EHEA_CQE_STAT_ERR_IP)
 637                pr->p_stats.err_ip_cksum++;
 638        if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
 639                pr->p_stats.err_frame_crc++;
 640
 641        if (rq == 2) {
 642                *processed_rq2 += 1;
 643                skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
 644                dev_kfree_skb(skb);
 645        } else if (rq == 3) {
 646                *processed_rq3 += 1;
 647                skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
 648                dev_kfree_skb(skb);
 649        }
 650
 651        if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
 652                if (netif_msg_rx_err(pr->port)) {
 653                        pr_err("Critical receive error for QP %d. Resetting port.\n",
 654                               pr->qp->init_attr.qp_nr);
 655                        ehea_dump(cqe, sizeof(*cqe), "CQE");
 656                }
 657                ehea_schedule_port_reset(pr->port);
 658                return 1;
 659        }
 660
 661        return 0;
 662}
 663
 664static int ehea_proc_rwqes(struct net_device *dev,
 665                           struct ehea_port_res *pr,
 666                           int budget)
 667{
 668        struct ehea_port *port = pr->port;
 669        struct ehea_qp *qp = pr->qp;
 670        struct ehea_cqe *cqe;
 671        struct sk_buff *skb;
 672        struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
 673        struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
 674        struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
 675        int skb_arr_rq1_len = pr->rq1_skba.len;
 676        int skb_arr_rq2_len = pr->rq2_skba.len;
 677        int skb_arr_rq3_len = pr->rq3_skba.len;
 678        int processed, processed_rq1, processed_rq2, processed_rq3;
 679        u64 processed_bytes = 0;
 680        int wqe_index, last_wqe_index, rq, port_reset;
 681
 682        processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
 683        last_wqe_index = 0;
 684
 685        cqe = ehea_poll_rq1(qp, &wqe_index);
 686        while ((processed < budget) && cqe) {
 687                ehea_inc_rq1(qp);
 688                processed_rq1++;
 689                processed++;
 690                if (netif_msg_rx_status(port))
 691                        ehea_dump(cqe, sizeof(*cqe), "CQE");
 692
 693                last_wqe_index = wqe_index;
 694                rmb();
 695                if (!ehea_check_cqe(cqe, &rq)) {
 696                        if (rq == 1) {
 697                                /* LL RQ1 */
 698                                skb = get_skb_by_index_ll(skb_arr_rq1,
 699                                                          skb_arr_rq1_len,
 700                                                          wqe_index);
 701                                if (unlikely(!skb)) {
 702                                        netif_info(port, rx_err, dev,
 703                                                  "LL rq1: skb=NULL\n");
 704
 705                                        skb = netdev_alloc_skb(dev,
 706                                                               EHEA_L_PKT_SIZE);
 707                                        if (!skb)
 708                                                break;
 709                                }
 710                                skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
 711                                                 cqe->num_bytes_transfered - 4);
 712                                ehea_fill_skb(dev, skb, cqe, pr);
 713                        } else if (rq == 2) {
 714                                /* RQ2 */
 715                                skb = get_skb_by_index(skb_arr_rq2,
 716                                                       skb_arr_rq2_len, cqe);
 717                                if (unlikely(!skb)) {
 718                                        netif_err(port, rx_err, dev,
 719                                                  "rq2: skb=NULL\n");
 720                                        break;
 721                                }
 722                                ehea_fill_skb(dev, skb, cqe, pr);
 723                                processed_rq2++;
 724                        } else {
 725                                /* RQ3 */
 726                                skb = get_skb_by_index(skb_arr_rq3,
 727                                                       skb_arr_rq3_len, cqe);
 728                                if (unlikely(!skb)) {
 729                                        netif_err(port, rx_err, dev,
 730                                                  "rq3: skb=NULL\n");
 731                                        break;
 732                                }
 733                                ehea_fill_skb(dev, skb, cqe, pr);
 734                                processed_rq3++;
 735                        }
 736
 737                        processed_bytes += skb->len;
 738
 739                        if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
 740                                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 741                                                       cqe->vlan_tag);
 742
 743                        napi_gro_receive(&pr->napi, skb);
 744                } else {
 745                        pr->p_stats.poll_receive_errors++;
 746                        port_reset = ehea_treat_poll_error(pr, rq, cqe,
 747                                                           &processed_rq2,
 748                                                           &processed_rq3);
 749                        if (port_reset)
 750                                break;
 751                }
 752                cqe = ehea_poll_rq1(qp, &wqe_index);
 753        }
 754
 755        pr->rx_packets += processed;
 756        pr->rx_bytes += processed_bytes;
 757
 758        ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
 759        ehea_refill_rq2(pr, processed_rq2);
 760        ehea_refill_rq3(pr, processed_rq3);
 761
 762        return processed;
 763}
 764
 765#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
 766
 767static void reset_sq_restart_flag(struct ehea_port *port)
 768{
 769        int i;
 770
 771        for (i = 0; i < port->num_def_qps; i++) {
 772                struct ehea_port_res *pr = &port->port_res[i];
 773                pr->sq_restart_flag = 0;
 774        }
 775        wake_up(&port->restart_wq);
 776}
 777
 778static void check_sqs(struct ehea_port *port)
 779{
 780        struct ehea_swqe *swqe;
 781        int swqe_index;
 782        int i, k;
 783
 784        for (i = 0; i < port->num_def_qps; i++) {
 785                struct ehea_port_res *pr = &port->port_res[i];
 786                int ret;
 787                k = 0;
 788                swqe = ehea_get_swqe(pr->qp, &swqe_index);
 789                memset(swqe, 0, SWQE_HEADER_SIZE);
 790                atomic_dec(&pr->swqe_avail);
 791
 792                swqe->tx_control |= EHEA_SWQE_PURGE;
 793                swqe->wr_id = SWQE_RESTART_CHECK;
 794                swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
 795                swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
 796                swqe->immediate_data_length = 80;
 797
 798                ehea_post_swqe(pr->qp, swqe);
 799
 800                ret = wait_event_timeout(port->restart_wq,
 801                                         pr->sq_restart_flag == 0,
 802                                         msecs_to_jiffies(100));
 803
 804                if (!ret) {
 805                        pr_err("HW/SW queues out of sync\n");
 806                        ehea_schedule_port_reset(pr->port);
 807                        return;
 808                }
 809        }
 810}
 811
 812
 813static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 814{
 815        struct sk_buff *skb;
 816        struct ehea_cq *send_cq = pr->send_cq;
 817        struct ehea_cqe *cqe;
 818        int quota = my_quota;
 819        int cqe_counter = 0;
 820        int swqe_av = 0;
 821        int index;
 822        struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
 823                                                pr - &pr->port->port_res[0]);
 824
 825        cqe = ehea_poll_cq(send_cq);
 826        while (cqe && (quota > 0)) {
 827                ehea_inc_cq(send_cq);
 828
 829                cqe_counter++;
 830                rmb();
 831
 832                if (cqe->wr_id == SWQE_RESTART_CHECK) {
 833                        pr->sq_restart_flag = 1;
 834                        swqe_av++;
 835                        break;
 836                }
 837
 838                if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
 839                        pr_err("Bad send completion status=0x%04X\n",
 840                               cqe->status);
 841
 842                        if (netif_msg_tx_err(pr->port))
 843                                ehea_dump(cqe, sizeof(*cqe), "Send CQE");
 844
 845                        if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
 846                                pr_err("Resetting port\n");
 847                                ehea_schedule_port_reset(pr->port);
 848                                break;
 849                        }
 850                }
 851
 852                if (netif_msg_tx_done(pr->port))
 853                        ehea_dump(cqe, sizeof(*cqe), "CQE");
 854
 855                if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
 856                           == EHEA_SWQE2_TYPE)) {
 857
 858                        index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
 859                        skb = pr->sq_skba.arr[index];
 860                        dev_consume_skb_any(skb);
 861                        pr->sq_skba.arr[index] = NULL;
 862                }
 863
 864                swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
 865                quota--;
 866
 867                cqe = ehea_poll_cq(send_cq);
 868        }
 869
 870        ehea_update_feca(send_cq, cqe_counter);
 871        atomic_add(swqe_av, &pr->swqe_avail);
 872
 873        if (unlikely(netif_tx_queue_stopped(txq) &&
 874                     (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
 875                __netif_tx_lock(txq, smp_processor_id());
 876                if (netif_tx_queue_stopped(txq) &&
 877                    (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
 878                        netif_tx_wake_queue(txq);
 879                __netif_tx_unlock(txq);
 880        }
 881
 882        wake_up(&pr->port->swqe_avail_wq);
 883
 884        return cqe;
 885}
 886
 887#define EHEA_POLL_MAX_CQES 65535
 888
 889static int ehea_poll(struct napi_struct *napi, int budget)
 890{
 891        struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
 892                                                napi);
 893        struct net_device *dev = pr->port->netdev;
 894        struct ehea_cqe *cqe;
 895        struct ehea_cqe *cqe_skb = NULL;
 896        int wqe_index;
 897        int rx = 0;
 898
 899        cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
 900        rx += ehea_proc_rwqes(dev, pr, budget - rx);
 901
 902        while (rx != budget) {
 903                napi_complete(napi);
 904                ehea_reset_cq_ep(pr->recv_cq);
 905                ehea_reset_cq_ep(pr->send_cq);
 906                ehea_reset_cq_n1(pr->recv_cq);
 907                ehea_reset_cq_n1(pr->send_cq);
 908                rmb();
 909                cqe = ehea_poll_rq1(pr->qp, &wqe_index);
 910                cqe_skb = ehea_poll_cq(pr->send_cq);
 911
 912                if (!cqe && !cqe_skb)
 913                        return rx;
 914
 915                if (!napi_reschedule(napi))
 916                        return rx;
 917
 918                cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
 919                rx += ehea_proc_rwqes(dev, pr, budget - rx);
 920        }
 921
 922        return rx;
 923}
 924
 925#ifdef CONFIG_NET_POLL_CONTROLLER
 926static void ehea_netpoll(struct net_device *dev)
 927{
 928        struct ehea_port *port = netdev_priv(dev);
 929        int i;
 930
 931        for (i = 0; i < port->num_def_qps; i++)
 932                napi_schedule(&port->port_res[i].napi);
 933}
 934#endif
 935
 936static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
 937{
 938        struct ehea_port_res *pr = param;
 939
 940        napi_schedule(&pr->napi);
 941
 942        return IRQ_HANDLED;
 943}
 944
 945static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
 946{
 947        struct ehea_port *port = param;
 948        struct ehea_eqe *eqe;
 949        struct ehea_qp *qp;
 950        u32 qp_token;
 951        u64 resource_type, aer, aerr;
 952        int reset_port = 0;
 953
 954        eqe = ehea_poll_eq(port->qp_eq);
 955
 956        while (eqe) {
 957                qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
 958                pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
 959                       eqe->entry, qp_token);
 960
 961                qp = port->port_res[qp_token].qp;
 962
 963                resource_type = ehea_error_data(port->adapter, qp->fw_handle,
 964                                                &aer, &aerr);
 965
 966                if (resource_type == EHEA_AER_RESTYPE_QP) {
 967                        if ((aer & EHEA_AER_RESET_MASK) ||
 968                            (aerr & EHEA_AERR_RESET_MASK))
 969                                 reset_port = 1;
 970                } else
 971                        reset_port = 1;   /* Reset in case of CQ or EQ error */
 972
 973                eqe = ehea_poll_eq(port->qp_eq);
 974        }
 975
 976        if (reset_port) {
 977                pr_err("Resetting port\n");
 978                ehea_schedule_port_reset(port);
 979        }
 980
 981        return IRQ_HANDLED;
 982}
 983
 984static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
 985                                       int logical_port)
 986{
 987        int i;
 988
 989        for (i = 0; i < EHEA_MAX_PORTS; i++)
 990                if (adapter->port[i])
 991                        if (adapter->port[i]->logical_port_id == logical_port)
 992                                return adapter->port[i];
 993        return NULL;
 994}
 995
 996int ehea_sense_port_attr(struct ehea_port *port)
 997{
 998        int ret;
 999        u64 hret;
1000        struct hcp_ehea_port_cb0 *cb0;
1001
1002        /* may be called via ehea_neq_tasklet() */
1003        cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
1004        if (!cb0) {
1005                pr_err("no mem for cb0\n");
1006                ret = -ENOMEM;
1007                goto out;
1008        }
1009
1010        hret = ehea_h_query_ehea_port(port->adapter->handle,
1011                                      port->logical_port_id, H_PORT_CB0,
1012                                      EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1013                                      cb0);
1014        if (hret != H_SUCCESS) {
1015                ret = -EIO;
1016                goto out_free;
1017        }
1018
1019        /* MAC address */
1020        port->mac_addr = cb0->port_mac_addr << 16;
1021
1022        if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1023                ret = -EADDRNOTAVAIL;
1024                goto out_free;
1025        }
1026
1027        /* Port speed */
1028        switch (cb0->port_speed) {
1029        case H_SPEED_10M_H:
1030                port->port_speed = EHEA_SPEED_10M;
1031                port->full_duplex = 0;
1032                break;
1033        case H_SPEED_10M_F:
1034                port->port_speed = EHEA_SPEED_10M;
1035                port->full_duplex = 1;
1036                break;
1037        case H_SPEED_100M_H:
1038                port->port_speed = EHEA_SPEED_100M;
1039                port->full_duplex = 0;
1040                break;
1041        case H_SPEED_100M_F:
1042                port->port_speed = EHEA_SPEED_100M;
1043                port->full_duplex = 1;
1044                break;
1045        case H_SPEED_1G_F:
1046                port->port_speed = EHEA_SPEED_1G;
1047                port->full_duplex = 1;
1048                break;
1049        case H_SPEED_10G_F:
1050                port->port_speed = EHEA_SPEED_10G;
1051                port->full_duplex = 1;
1052                break;
1053        default:
1054                port->port_speed = 0;
1055                port->full_duplex = 0;
1056                break;
1057        }
1058
1059        port->autoneg = 1;
1060        port->num_mcs = cb0->num_default_qps;
1061
1062        /* Number of default QPs */
1063        if (use_mcs)
1064                port->num_def_qps = cb0->num_default_qps;
1065        else
1066                port->num_def_qps = 1;
1067
1068        if (!port->num_def_qps) {
1069                ret = -EINVAL;
1070                goto out_free;
1071        }
1072
1073        ret = 0;
1074out_free:
1075        if (ret || netif_msg_probe(port))
1076                ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1077        free_page((unsigned long)cb0);
1078out:
1079        return ret;
1080}
1081
1082int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1083{
1084        struct hcp_ehea_port_cb4 *cb4;
1085        u64 hret;
1086        int ret = 0;
1087
1088        cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1089        if (!cb4) {
1090                pr_err("no mem for cb4\n");
1091                ret = -ENOMEM;
1092                goto out;
1093        }
1094
1095        cb4->port_speed = port_speed;
1096
1097        netif_carrier_off(port->netdev);
1098
1099        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1100                                       port->logical_port_id,
1101                                       H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1102        if (hret == H_SUCCESS) {
1103                port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1104
1105                hret = ehea_h_query_ehea_port(port->adapter->handle,
1106                                              port->logical_port_id,
1107                                              H_PORT_CB4, H_PORT_CB4_SPEED,
1108                                              cb4);
1109                if (hret == H_SUCCESS) {
1110                        switch (cb4->port_speed) {
1111                        case H_SPEED_10M_H:
1112                                port->port_speed = EHEA_SPEED_10M;
1113                                port->full_duplex = 0;
1114                                break;
1115                        case H_SPEED_10M_F:
1116                                port->port_speed = EHEA_SPEED_10M;
1117                                port->full_duplex = 1;
1118                                break;
1119                        case H_SPEED_100M_H:
1120                                port->port_speed = EHEA_SPEED_100M;
1121                                port->full_duplex = 0;
1122                                break;
1123                        case H_SPEED_100M_F:
1124                                port->port_speed = EHEA_SPEED_100M;
1125                                port->full_duplex = 1;
1126                                break;
1127                        case H_SPEED_1G_F:
1128                                port->port_speed = EHEA_SPEED_1G;
1129                                port->full_duplex = 1;
1130                                break;
1131                        case H_SPEED_10G_F:
1132                                port->port_speed = EHEA_SPEED_10G;
1133                                port->full_duplex = 1;
1134                                break;
1135                        default:
1136                                port->port_speed = 0;
1137                                port->full_duplex = 0;
1138                                break;
1139                        }
1140                } else {
1141                        pr_err("Failed sensing port speed\n");
1142                        ret = -EIO;
1143                }
1144        } else {
1145                if (hret == H_AUTHORITY) {
1146                        pr_info("Hypervisor denied setting port speed\n");
1147                        ret = -EPERM;
1148                } else {
1149                        ret = -EIO;
1150                        pr_err("Failed setting port speed\n");
1151                }
1152        }
1153        if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1154                netif_carrier_on(port->netdev);
1155
1156        free_page((unsigned long)cb4);
1157out:
1158        return ret;
1159}
1160
1161static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1162{
1163        int ret;
1164        u8 ec;
1165        u8 portnum;
1166        struct ehea_port *port;
1167        struct net_device *dev;
1168
1169        ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1170        portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1171        port = ehea_get_port(adapter, portnum);
1172        dev = port->netdev;
1173
1174        switch (ec) {
1175        case EHEA_EC_PORTSTATE_CHG:     /* port state change */
1176
1177                if (!port) {
1178                        netdev_err(dev, "unknown portnum %x\n", portnum);
1179                        break;
1180                }
1181
1182                if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1183                        if (!netif_carrier_ok(dev)) {
1184                                ret = ehea_sense_port_attr(port);
1185                                if (ret) {
1186                                        netdev_err(dev, "failed resensing port attributes\n");
1187                                        break;
1188                                }
1189
1190                                netif_info(port, link, dev,
1191                                           "Logical port up: %dMbps %s Duplex\n",
1192                                           port->port_speed,
1193                                           port->full_duplex == 1 ?
1194                                           "Full" : "Half");
1195
1196                                netif_carrier_on(dev);
1197                                netif_wake_queue(dev);
1198                        }
1199                } else
1200                        if (netif_carrier_ok(dev)) {
1201                                netif_info(port, link, dev,
1202                                           "Logical port down\n");
1203                                netif_carrier_off(dev);
1204                                netif_tx_disable(dev);
1205                        }
1206
1207                if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1208                        port->phy_link = EHEA_PHY_LINK_UP;
1209                        netif_info(port, link, dev,
1210                                   "Physical port up\n");
1211                        if (prop_carrier_state)
1212                                netif_carrier_on(dev);
1213                } else {
1214                        port->phy_link = EHEA_PHY_LINK_DOWN;
1215                        netif_info(port, link, dev,
1216                                   "Physical port down\n");
1217                        if (prop_carrier_state)
1218                                netif_carrier_off(dev);
1219                }
1220
1221                if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1222                        netdev_info(dev,
1223                                    "External switch port is primary port\n");
1224                else
1225                        netdev_info(dev,
1226                                    "External switch port is backup port\n");
1227
1228                break;
1229        case EHEA_EC_ADAPTER_MALFUNC:
1230                netdev_err(dev, "Adapter malfunction\n");
1231                break;
1232        case EHEA_EC_PORT_MALFUNC:
1233                netdev_info(dev, "Port malfunction\n");
1234                netif_carrier_off(dev);
1235                netif_tx_disable(dev);
1236                break;
1237        default:
1238                netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1239                break;
1240        }
1241}
1242
1243static void ehea_neq_tasklet(unsigned long data)
1244{
1245        struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1246        struct ehea_eqe *eqe;
1247        u64 event_mask;
1248
1249        eqe = ehea_poll_eq(adapter->neq);
1250        pr_debug("eqe=%p\n", eqe);
1251
1252        while (eqe) {
1253                pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1254                ehea_parse_eqe(adapter, eqe->entry);
1255                eqe = ehea_poll_eq(adapter->neq);
1256                pr_debug("next eqe=%p\n", eqe);
1257        }
1258
1259        event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1260                   | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1261                   | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1262
1263        ehea_h_reset_events(adapter->handle,
1264                            adapter->neq->fw_handle, event_mask);
1265}
1266
1267static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1268{
1269        struct ehea_adapter *adapter = param;
1270        tasklet_hi_schedule(&adapter->neq_tasklet);
1271        return IRQ_HANDLED;
1272}
1273
1274
1275static int ehea_fill_port_res(struct ehea_port_res *pr)
1276{
1277        int ret;
1278        struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1279
1280        ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1281
1282        ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1283
1284        ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1285
1286        return ret;
1287}
1288
1289static int ehea_reg_interrupts(struct net_device *dev)
1290{
1291        struct ehea_port *port = netdev_priv(dev);
1292        struct ehea_port_res *pr;
1293        int i, ret;
1294
1295
1296        snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1297                 dev->name);
1298
1299        ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1300                                  ehea_qp_aff_irq_handler,
1301                                  0, port->int_aff_name, port);
1302        if (ret) {
1303                netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1304                           port->qp_eq->attr.ist1);
1305                goto out_free_qpeq;
1306        }
1307
1308        netif_info(port, ifup, dev,
1309                   "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1310                   port->qp_eq->attr.ist1);
1311
1312
1313        for (i = 0; i < port->num_def_qps; i++) {
1314                pr = &port->port_res[i];
1315                snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1316                         "%s-queue%d", dev->name, i);
1317                ret = ibmebus_request_irq(pr->eq->attr.ist1,
1318                                          ehea_recv_irq_handler,
1319                                          0, pr->int_send_name, pr);
1320                if (ret) {
1321                        netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1322                                   i, pr->eq->attr.ist1);
1323                        goto out_free_req;
1324                }
1325                netif_info(port, ifup, dev,
1326                           "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1327                           pr->eq->attr.ist1, i);
1328        }
1329out:
1330        return ret;
1331
1332
1333out_free_req:
1334        while (--i >= 0) {
1335                u32 ist = port->port_res[i].eq->attr.ist1;
1336                ibmebus_free_irq(ist, &port->port_res[i]);
1337        }
1338
1339out_free_qpeq:
1340        ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1341        i = port->num_def_qps;
1342
1343        goto out;
1344
1345}
1346
1347static void ehea_free_interrupts(struct net_device *dev)
1348{
1349        struct ehea_port *port = netdev_priv(dev);
1350        struct ehea_port_res *pr;
1351        int i;
1352
1353        /* send */
1354
1355        for (i = 0; i < port->num_def_qps; i++) {
1356                pr = &port->port_res[i];
1357                ibmebus_free_irq(pr->eq->attr.ist1, pr);
1358                netif_info(port, intr, dev,
1359                           "free send irq for res %d with handle 0x%X\n",
1360                           i, pr->eq->attr.ist1);
1361        }
1362
1363        /* associated events */
1364        ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1365        netif_info(port, intr, dev,
1366                   "associated event interrupt for handle 0x%X freed\n",
1367                   port->qp_eq->attr.ist1);
1368}
1369
1370static int ehea_configure_port(struct ehea_port *port)
1371{
1372        int ret, i;
1373        u64 hret, mask;
1374        struct hcp_ehea_port_cb0 *cb0;
1375
1376        ret = -ENOMEM;
1377        cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1378        if (!cb0)
1379                goto out;
1380
1381        cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1382                     | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1383                     | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1384                     | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1385                     | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1386                                      PXLY_RC_VLAN_FILTER)
1387                     | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1388
1389        for (i = 0; i < port->num_mcs; i++)
1390                if (use_mcs)
1391                        cb0->default_qpn_arr[i] =
1392                                port->port_res[i].qp->init_attr.qp_nr;
1393                else
1394                        cb0->default_qpn_arr[i] =
1395                                port->port_res[0].qp->init_attr.qp_nr;
1396
1397        if (netif_msg_ifup(port))
1398                ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1399
1400        mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1401             | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1402
1403        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1404                                       port->logical_port_id,
1405                                       H_PORT_CB0, mask, cb0);
1406        ret = -EIO;
1407        if (hret != H_SUCCESS)
1408                goto out_free;
1409
1410        ret = 0;
1411
1412out_free:
1413        free_page((unsigned long)cb0);
1414out:
1415        return ret;
1416}
1417
1418static int ehea_gen_smrs(struct ehea_port_res *pr)
1419{
1420        int ret;
1421        struct ehea_adapter *adapter = pr->port->adapter;
1422
1423        ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1424        if (ret)
1425                goto out;
1426
1427        ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1428        if (ret)
1429                goto out_free;
1430
1431        return 0;
1432
1433out_free:
1434        ehea_rem_mr(&pr->send_mr);
1435out:
1436        pr_err("Generating SMRS failed\n");
1437        return -EIO;
1438}
1439
1440static int ehea_rem_smrs(struct ehea_port_res *pr)
1441{
1442        if ((ehea_rem_mr(&pr->send_mr)) ||
1443            (ehea_rem_mr(&pr->recv_mr)))
1444                return -EIO;
1445        else
1446                return 0;
1447}
1448
1449static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1450{
1451        int arr_size = sizeof(void *) * max_q_entries;
1452
1453        q_skba->arr = vzalloc(arr_size);
1454        if (!q_skba->arr)
1455                return -ENOMEM;
1456
1457        q_skba->len = max_q_entries;
1458        q_skba->index = 0;
1459        q_skba->os_skbs = 0;
1460
1461        return 0;
1462}
1463
1464static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1465                              struct port_res_cfg *pr_cfg, int queue_token)
1466{
1467        struct ehea_adapter *adapter = port->adapter;
1468        enum ehea_eq_type eq_type = EHEA_EQ;
1469        struct ehea_qp_init_attr *init_attr = NULL;
1470        int ret = -EIO;
1471        u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1472
1473        tx_bytes = pr->tx_bytes;
1474        tx_packets = pr->tx_packets;
1475        rx_bytes = pr->rx_bytes;
1476        rx_packets = pr->rx_packets;
1477
1478        memset(pr, 0, sizeof(struct ehea_port_res));
1479
1480        pr->tx_bytes = rx_bytes;
1481        pr->tx_packets = tx_packets;
1482        pr->rx_bytes = rx_bytes;
1483        pr->rx_packets = rx_packets;
1484
1485        pr->port = port;
1486
1487        pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1488        if (!pr->eq) {
1489                pr_err("create_eq failed (eq)\n");
1490                goto out_free;
1491        }
1492
1493        pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1494                                     pr->eq->fw_handle,
1495                                     port->logical_port_id);
1496        if (!pr->recv_cq) {
1497                pr_err("create_cq failed (cq_recv)\n");
1498                goto out_free;
1499        }
1500
1501        pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1502                                     pr->eq->fw_handle,
1503                                     port->logical_port_id);
1504        if (!pr->send_cq) {
1505                pr_err("create_cq failed (cq_send)\n");
1506                goto out_free;
1507        }
1508
1509        if (netif_msg_ifup(port))
1510                pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1511                        pr->send_cq->attr.act_nr_of_cqes,
1512                        pr->recv_cq->attr.act_nr_of_cqes);
1513
1514        init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1515        if (!init_attr) {
1516                ret = -ENOMEM;
1517                pr_err("no mem for ehea_qp_init_attr\n");
1518                goto out_free;
1519        }
1520
1521        init_attr->low_lat_rq1 = 1;
1522        init_attr->signalingtype = 1;   /* generate CQE if specified in WQE */
1523        init_attr->rq_count = 3;
1524        init_attr->qp_token = queue_token;
1525        init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1526        init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1527        init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1528        init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1529        init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1530        init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1531        init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1532        init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1533        init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1534        init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1535        init_attr->port_nr = port->logical_port_id;
1536        init_attr->send_cq_handle = pr->send_cq->fw_handle;
1537        init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1538        init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1539
1540        pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1541        if (!pr->qp) {
1542                pr_err("create_qp failed\n");
1543                ret = -EIO;
1544                goto out_free;
1545        }
1546
1547        if (netif_msg_ifup(port))
1548                pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1549                        init_attr->qp_nr,
1550                        init_attr->act_nr_send_wqes,
1551                        init_attr->act_nr_rwqes_rq1,
1552                        init_attr->act_nr_rwqes_rq2,
1553                        init_attr->act_nr_rwqes_rq3);
1554
1555        pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1556
1557        ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1558        ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1559        ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1560        ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1561        if (ret)
1562                goto out_free;
1563
1564        pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1565        if (ehea_gen_smrs(pr) != 0) {
1566                ret = -EIO;
1567                goto out_free;
1568        }
1569
1570        atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1571
1572        kfree(init_attr);
1573
1574        netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1575
1576        ret = 0;
1577        goto out;
1578
1579out_free:
1580        kfree(init_attr);
1581        vfree(pr->sq_skba.arr);
1582        vfree(pr->rq1_skba.arr);
1583        vfree(pr->rq2_skba.arr);
1584        vfree(pr->rq3_skba.arr);
1585        ehea_destroy_qp(pr->qp);
1586        ehea_destroy_cq(pr->send_cq);
1587        ehea_destroy_cq(pr->recv_cq);
1588        ehea_destroy_eq(pr->eq);
1589out:
1590        return ret;
1591}
1592
1593static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1594{
1595        int ret, i;
1596
1597        if (pr->qp)
1598                netif_napi_del(&pr->napi);
1599
1600        ret = ehea_destroy_qp(pr->qp);
1601
1602        if (!ret) {
1603                ehea_destroy_cq(pr->send_cq);
1604                ehea_destroy_cq(pr->recv_cq);
1605                ehea_destroy_eq(pr->eq);
1606
1607                for (i = 0; i < pr->rq1_skba.len; i++)
1608                        if (pr->rq1_skba.arr[i])
1609                                dev_kfree_skb(pr->rq1_skba.arr[i]);
1610
1611                for (i = 0; i < pr->rq2_skba.len; i++)
1612                        if (pr->rq2_skba.arr[i])
1613                                dev_kfree_skb(pr->rq2_skba.arr[i]);
1614
1615                for (i = 0; i < pr->rq3_skba.len; i++)
1616                        if (pr->rq3_skba.arr[i])
1617                                dev_kfree_skb(pr->rq3_skba.arr[i]);
1618
1619                for (i = 0; i < pr->sq_skba.len; i++)
1620                        if (pr->sq_skba.arr[i])
1621                                dev_kfree_skb(pr->sq_skba.arr[i]);
1622
1623                vfree(pr->rq1_skba.arr);
1624                vfree(pr->rq2_skba.arr);
1625                vfree(pr->rq3_skba.arr);
1626                vfree(pr->sq_skba.arr);
1627                ret = ehea_rem_smrs(pr);
1628        }
1629        return ret;
1630}
1631
1632static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1633                                  u32 lkey)
1634{
1635        int skb_data_size = skb_headlen(skb);
1636        u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1637        struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1638        unsigned int immediate_len = SWQE2_MAX_IMM;
1639
1640        swqe->descriptors = 0;
1641
1642        if (skb_is_gso(skb)) {
1643                swqe->tx_control |= EHEA_SWQE_TSO;
1644                swqe->mss = skb_shinfo(skb)->gso_size;
1645                /*
1646                 * For TSO packets we only copy the headers into the
1647                 * immediate area.
1648                 */
1649                immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1650        }
1651
1652        if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1653                skb_copy_from_linear_data(skb, imm_data, immediate_len);
1654                swqe->immediate_data_length = immediate_len;
1655
1656                if (skb_data_size > immediate_len) {
1657                        sg1entry->l_key = lkey;
1658                        sg1entry->len = skb_data_size - immediate_len;
1659                        sg1entry->vaddr =
1660                                ehea_map_vaddr(skb->data + immediate_len);
1661                        swqe->descriptors++;
1662                }
1663        } else {
1664                skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1665                swqe->immediate_data_length = skb_data_size;
1666        }
1667}
1668
1669static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1670                                    struct ehea_swqe *swqe, u32 lkey)
1671{
1672        struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1673        skb_frag_t *frag;
1674        int nfrags, sg1entry_contains_frag_data, i;
1675
1676        nfrags = skb_shinfo(skb)->nr_frags;
1677        sg1entry = &swqe->u.immdata_desc.sg_entry;
1678        sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1679        sg1entry_contains_frag_data = 0;
1680
1681        write_swqe2_immediate(skb, swqe, lkey);
1682
1683        /* write descriptors */
1684        if (nfrags > 0) {
1685                if (swqe->descriptors == 0) {
1686                        /* sg1entry not yet used */
1687                        frag = &skb_shinfo(skb)->frags[0];
1688
1689                        /* copy sg1entry data */
1690                        sg1entry->l_key = lkey;
1691                        sg1entry->len = skb_frag_size(frag);
1692                        sg1entry->vaddr =
1693                                ehea_map_vaddr(skb_frag_address(frag));
1694                        swqe->descriptors++;
1695                        sg1entry_contains_frag_data = 1;
1696                }
1697
1698                for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1699
1700                        frag = &skb_shinfo(skb)->frags[i];
1701                        sgentry = &sg_list[i - sg1entry_contains_frag_data];
1702
1703                        sgentry->l_key = lkey;
1704                        sgentry->len = skb_frag_size(frag);
1705                        sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1706                        swqe->descriptors++;
1707                }
1708        }
1709}
1710
1711static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1712{
1713        int ret = 0;
1714        u64 hret;
1715        u8 reg_type;
1716
1717        /* De/Register untagged packets */
1718        reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1719        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1720                                     port->logical_port_id,
1721                                     reg_type, port->mac_addr, 0, hcallid);
1722        if (hret != H_SUCCESS) {
1723                pr_err("%sregistering bc address failed (tagged)\n",
1724                       hcallid == H_REG_BCMC ? "" : "de");
1725                ret = -EIO;
1726                goto out_herr;
1727        }
1728
1729        /* De/Register VLAN packets */
1730        reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1731        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1732                                     port->logical_port_id,
1733                                     reg_type, port->mac_addr, 0, hcallid);
1734        if (hret != H_SUCCESS) {
1735                pr_err("%sregistering bc address failed (vlan)\n",
1736                       hcallid == H_REG_BCMC ? "" : "de");
1737                ret = -EIO;
1738        }
1739out_herr:
1740        return ret;
1741}
1742
1743static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1744{
1745        struct ehea_port *port = netdev_priv(dev);
1746        struct sockaddr *mac_addr = sa;
1747        struct hcp_ehea_port_cb0 *cb0;
1748        int ret;
1749        u64 hret;
1750
1751        if (!is_valid_ether_addr(mac_addr->sa_data)) {
1752                ret = -EADDRNOTAVAIL;
1753                goto out;
1754        }
1755
1756        cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1757        if (!cb0) {
1758                pr_err("no mem for cb0\n");
1759                ret = -ENOMEM;
1760                goto out;
1761        }
1762
1763        memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1764
1765        cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1766
1767        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1768                                       port->logical_port_id, H_PORT_CB0,
1769                                       EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1770        if (hret != H_SUCCESS) {
1771                ret = -EIO;
1772                goto out_free;
1773        }
1774
1775        memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1776
1777        /* Deregister old MAC in pHYP */
1778        if (port->state == EHEA_PORT_UP) {
1779                ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1780                if (ret)
1781                        goto out_upregs;
1782        }
1783
1784        port->mac_addr = cb0->port_mac_addr << 16;
1785
1786        /* Register new MAC in pHYP */
1787        if (port->state == EHEA_PORT_UP) {
1788                ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1789                if (ret)
1790                        goto out_upregs;
1791        }
1792
1793        ret = 0;
1794
1795out_upregs:
1796        ehea_update_bcmc_registrations();
1797out_free:
1798        free_page((unsigned long)cb0);
1799out:
1800        return ret;
1801}
1802
1803static void ehea_promiscuous_error(u64 hret, int enable)
1804{
1805        if (hret == H_AUTHORITY)
1806                pr_info("Hypervisor denied %sabling promiscuous mode\n",
1807                        enable == 1 ? "en" : "dis");
1808        else
1809                pr_err("failed %sabling promiscuous mode\n",
1810                       enable == 1 ? "en" : "dis");
1811}
1812
1813static void ehea_promiscuous(struct net_device *dev, int enable)
1814{
1815        struct ehea_port *port = netdev_priv(dev);
1816        struct hcp_ehea_port_cb7 *cb7;
1817        u64 hret;
1818
1819        if (enable == port->promisc)
1820                return;
1821
1822        cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1823        if (!cb7) {
1824                pr_err("no mem for cb7\n");
1825                goto out;
1826        }
1827
1828        /* Modify Pxs_DUCQPN in CB7 */
1829        cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1830
1831        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1832                                       port->logical_port_id,
1833                                       H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1834        if (hret) {
1835                ehea_promiscuous_error(hret, enable);
1836                goto out;
1837        }
1838
1839        port->promisc = enable;
1840out:
1841        free_page((unsigned long)cb7);
1842}
1843
1844static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1845                                     u32 hcallid)
1846{
1847        u64 hret;
1848        u8 reg_type;
1849
1850        reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1851        if (mc_mac_addr == 0)
1852                reg_type |= EHEA_BCMC_SCOPE_ALL;
1853
1854        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1855                                     port->logical_port_id,
1856                                     reg_type, mc_mac_addr, 0, hcallid);
1857        if (hret)
1858                goto out;
1859
1860        reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1861        if (mc_mac_addr == 0)
1862                reg_type |= EHEA_BCMC_SCOPE_ALL;
1863
1864        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1865                                     port->logical_port_id,
1866                                     reg_type, mc_mac_addr, 0, hcallid);
1867out:
1868        return hret;
1869}
1870
1871static int ehea_drop_multicast_list(struct net_device *dev)
1872{
1873        struct ehea_port *port = netdev_priv(dev);
1874        struct ehea_mc_list *mc_entry = port->mc_list;
1875        struct list_head *pos;
1876        struct list_head *temp;
1877        int ret = 0;
1878        u64 hret;
1879
1880        list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1881                mc_entry = list_entry(pos, struct ehea_mc_list, list);
1882
1883                hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1884                                                 H_DEREG_BCMC);
1885                if (hret) {
1886                        pr_err("failed deregistering mcast MAC\n");
1887                        ret = -EIO;
1888                }
1889
1890                list_del(pos);
1891                kfree(mc_entry);
1892        }
1893        return ret;
1894}
1895
1896static void ehea_allmulti(struct net_device *dev, int enable)
1897{
1898        struct ehea_port *port = netdev_priv(dev);
1899        u64 hret;
1900
1901        if (!port->allmulti) {
1902                if (enable) {
1903                        /* Enable ALLMULTI */
1904                        ehea_drop_multicast_list(dev);
1905                        hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1906                        if (!hret)
1907                                port->allmulti = 1;
1908                        else
1909                                netdev_err(dev,
1910                                           "failed enabling IFF_ALLMULTI\n");
1911                }
1912        } else {
1913                if (!enable) {
1914                        /* Disable ALLMULTI */
1915                        hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1916                        if (!hret)
1917                                port->allmulti = 0;
1918                        else
1919                                netdev_err(dev,
1920                                           "failed disabling IFF_ALLMULTI\n");
1921                }
1922        }
1923}
1924
1925static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1926{
1927        struct ehea_mc_list *ehea_mcl_entry;
1928        u64 hret;
1929
1930        ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1931        if (!ehea_mcl_entry)
1932                return;
1933
1934        INIT_LIST_HEAD(&ehea_mcl_entry->list);
1935
1936        memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1937
1938        hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1939                                         H_REG_BCMC);
1940        if (!hret)
1941                list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1942        else {
1943                pr_err("failed registering mcast MAC\n");
1944                kfree(ehea_mcl_entry);
1945        }
1946}
1947
1948static void ehea_set_multicast_list(struct net_device *dev)
1949{
1950        struct ehea_port *port = netdev_priv(dev);
1951        struct netdev_hw_addr *ha;
1952        int ret;
1953
1954        ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1955
1956        if (dev->flags & IFF_ALLMULTI) {
1957                ehea_allmulti(dev, 1);
1958                goto out;
1959        }
1960        ehea_allmulti(dev, 0);
1961
1962        if (!netdev_mc_empty(dev)) {
1963                ret = ehea_drop_multicast_list(dev);
1964                if (ret) {
1965                        /* Dropping the current multicast list failed.
1966                         * Enabling ALL_MULTI is the best we can do.
1967                         */
1968                        ehea_allmulti(dev, 1);
1969                }
1970
1971                if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1972                        pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1973                                port->adapter->max_mc_mac);
1974                        goto out;
1975                }
1976
1977                netdev_for_each_mc_addr(ha, dev)
1978                        ehea_add_multicast_entry(port, ha->addr);
1979
1980        }
1981out:
1982        ehea_update_bcmc_registrations();
1983}
1984
1985static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1986{
1987        if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1988                return -EINVAL;
1989        dev->mtu = new_mtu;
1990        return 0;
1991}
1992
1993static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1994{
1995        swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1996
1997        if (vlan_get_protocol(skb) != htons(ETH_P_IP))
1998                return;
1999
2000        if (skb->ip_summed == CHECKSUM_PARTIAL)
2001                swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
2002
2003        swqe->ip_start = skb_network_offset(skb);
2004        swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
2005
2006        switch (ip_hdr(skb)->protocol) {
2007        case IPPROTO_UDP:
2008                if (skb->ip_summed == CHECKSUM_PARTIAL)
2009                        swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2010
2011                swqe->tcp_offset = swqe->ip_end + 1 +
2012                                   offsetof(struct udphdr, check);
2013                break;
2014
2015        case IPPROTO_TCP:
2016                if (skb->ip_summed == CHECKSUM_PARTIAL)
2017                        swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
2018
2019                swqe->tcp_offset = swqe->ip_end + 1 +
2020                                   offsetof(struct tcphdr, check);
2021                break;
2022        }
2023}
2024
2025static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2026                       struct ehea_swqe *swqe, u32 lkey)
2027{
2028        swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
2029
2030        xmit_common(skb, swqe);
2031
2032        write_swqe2_data(skb, dev, swqe, lkey);
2033}
2034
2035static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2036                       struct ehea_swqe *swqe)
2037{
2038        u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2039
2040        xmit_common(skb, swqe);
2041
2042        if (!skb->data_len)
2043                skb_copy_from_linear_data(skb, imm_data, skb->len);
2044        else
2045                skb_copy_bits(skb, 0, imm_data, skb->len);
2046
2047        swqe->immediate_data_length = skb->len;
2048        dev_consume_skb_any(skb);
2049}
2050
2051static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2052{
2053        struct ehea_port *port = netdev_priv(dev);
2054        struct ehea_swqe *swqe;
2055        u32 lkey;
2056        int swqe_index;
2057        struct ehea_port_res *pr;
2058        struct netdev_queue *txq;
2059
2060        pr = &port->port_res[skb_get_queue_mapping(skb)];
2061        txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2062
2063        swqe = ehea_get_swqe(pr->qp, &swqe_index);
2064        memset(swqe, 0, SWQE_HEADER_SIZE);
2065        atomic_dec(&pr->swqe_avail);
2066
2067        if (skb_vlan_tag_present(skb)) {
2068                swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2069                swqe->vlan_tag = skb_vlan_tag_get(skb);
2070        }
2071
2072        pr->tx_packets++;
2073        pr->tx_bytes += skb->len;
2074
2075        if (skb->len <= SWQE3_MAX_IMM) {
2076                u32 sig_iv = port->sig_comp_iv;
2077                u32 swqe_num = pr->swqe_id_counter;
2078                ehea_xmit3(skb, dev, swqe);
2079                swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2080                        | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2081                if (pr->swqe_ll_count >= (sig_iv - 1)) {
2082                        swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2083                                                      sig_iv);
2084                        swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2085                        pr->swqe_ll_count = 0;
2086                } else
2087                        pr->swqe_ll_count += 1;
2088        } else {
2089                swqe->wr_id =
2090                        EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2091                      | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2092                      | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2093                      | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2094                pr->sq_skba.arr[pr->sq_skba.index] = skb;
2095
2096                pr->sq_skba.index++;
2097                pr->sq_skba.index &= (pr->sq_skba.len - 1);
2098
2099                lkey = pr->send_mr.lkey;
2100                ehea_xmit2(skb, dev, swqe, lkey);
2101                swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2102        }
2103        pr->swqe_id_counter += 1;
2104
2105        netif_info(port, tx_queued, dev,
2106                   "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2107        if (netif_msg_tx_queued(port))
2108                ehea_dump(swqe, 512, "swqe");
2109
2110        if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2111                netif_tx_stop_queue(txq);
2112                swqe->tx_control |= EHEA_SWQE_PURGE;
2113        }
2114
2115        ehea_post_swqe(pr->qp, swqe);
2116
2117        if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2118                pr->p_stats.queue_stopped++;
2119                netif_tx_stop_queue(txq);
2120        }
2121
2122        return NETDEV_TX_OK;
2123}
2124
2125static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
2126{
2127        struct ehea_port *port = netdev_priv(dev);
2128        struct ehea_adapter *adapter = port->adapter;
2129        struct hcp_ehea_port_cb1 *cb1;
2130        int index;
2131        u64 hret;
2132        int err = 0;
2133
2134        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2135        if (!cb1) {
2136                pr_err("no mem for cb1\n");
2137                err = -ENOMEM;
2138                goto out;
2139        }
2140
2141        hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2142                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2143        if (hret != H_SUCCESS) {
2144                pr_err("query_ehea_port failed\n");
2145                err = -EINVAL;
2146                goto out;
2147        }
2148
2149        index = (vid / 64);
2150        cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2151
2152        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2153                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2154        if (hret != H_SUCCESS) {
2155                pr_err("modify_ehea_port failed\n");
2156                err = -EINVAL;
2157        }
2158out:
2159        free_page((unsigned long)cb1);
2160        return err;
2161}
2162
2163static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2164{
2165        struct ehea_port *port = netdev_priv(dev);
2166        struct ehea_adapter *adapter = port->adapter;
2167        struct hcp_ehea_port_cb1 *cb1;
2168        int index;
2169        u64 hret;
2170        int err = 0;
2171
2172        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2173        if (!cb1) {
2174                pr_err("no mem for cb1\n");
2175                err = -ENOMEM;
2176                goto out;
2177        }
2178
2179        hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2180                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2181        if (hret != H_SUCCESS) {
2182                pr_err("query_ehea_port failed\n");
2183                err = -EINVAL;
2184                goto out;
2185        }
2186
2187        index = (vid / 64);
2188        cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2189
2190        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2191                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2192        if (hret != H_SUCCESS) {
2193                pr_err("modify_ehea_port failed\n");
2194                err = -EINVAL;
2195        }
2196out:
2197        free_page((unsigned long)cb1);
2198        return err;
2199}
2200
2201static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2202{
2203        int ret = -EIO;
2204        u64 hret;
2205        u16 dummy16 = 0;
2206        u64 dummy64 = 0;
2207        struct hcp_modify_qp_cb0 *cb0;
2208
2209        cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2210        if (!cb0) {
2211                ret = -ENOMEM;
2212                goto out;
2213        }
2214
2215        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2216                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2217        if (hret != H_SUCCESS) {
2218                pr_err("query_ehea_qp failed (1)\n");
2219                goto out;
2220        }
2221
2222        cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2223        hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2224                                     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2225                                     &dummy64, &dummy64, &dummy16, &dummy16);
2226        if (hret != H_SUCCESS) {
2227                pr_err("modify_ehea_qp failed (1)\n");
2228                goto out;
2229        }
2230
2231        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2232                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2233        if (hret != H_SUCCESS) {
2234                pr_err("query_ehea_qp failed (2)\n");
2235                goto out;
2236        }
2237
2238        cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2239        hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2240                                     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2241                                     &dummy64, &dummy64, &dummy16, &dummy16);
2242        if (hret != H_SUCCESS) {
2243                pr_err("modify_ehea_qp failed (2)\n");
2244                goto out;
2245        }
2246
2247        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2248                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2249        if (hret != H_SUCCESS) {
2250                pr_err("query_ehea_qp failed (3)\n");
2251                goto out;
2252        }
2253
2254        cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2255        hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2256                                     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2257                                     &dummy64, &dummy64, &dummy16, &dummy16);
2258        if (hret != H_SUCCESS) {
2259                pr_err("modify_ehea_qp failed (3)\n");
2260                goto out;
2261        }
2262
2263        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2264                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2265        if (hret != H_SUCCESS) {
2266                pr_err("query_ehea_qp failed (4)\n");
2267                goto out;
2268        }
2269
2270        ret = 0;
2271out:
2272        free_page((unsigned long)cb0);
2273        return ret;
2274}
2275
2276static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2277{
2278        int ret, i;
2279        struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2280        enum ehea_eq_type eq_type = EHEA_EQ;
2281
2282        port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2283                                   EHEA_MAX_ENTRIES_EQ, 1);
2284        if (!port->qp_eq) {
2285                ret = -EINVAL;
2286                pr_err("ehea_create_eq failed (qp_eq)\n");
2287                goto out_kill_eq;
2288        }
2289
2290        pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2291        pr_cfg.max_entries_scq = sq_entries * 2;
2292        pr_cfg.max_entries_sq = sq_entries;
2293        pr_cfg.max_entries_rq1 = rq1_entries;
2294        pr_cfg.max_entries_rq2 = rq2_entries;
2295        pr_cfg.max_entries_rq3 = rq3_entries;
2296
2297        pr_cfg_small_rx.max_entries_rcq = 1;
2298        pr_cfg_small_rx.max_entries_scq = sq_entries;
2299        pr_cfg_small_rx.max_entries_sq = sq_entries;
2300        pr_cfg_small_rx.max_entries_rq1 = 1;
2301        pr_cfg_small_rx.max_entries_rq2 = 1;
2302        pr_cfg_small_rx.max_entries_rq3 = 1;
2303
2304        for (i = 0; i < def_qps; i++) {
2305                ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2306                if (ret)
2307                        goto out_clean_pr;
2308        }
2309        for (i = def_qps; i < def_qps; i++) {
2310                ret = ehea_init_port_res(port, &port->port_res[i],
2311                                         &pr_cfg_small_rx, i);
2312                if (ret)
2313                        goto out_clean_pr;
2314        }
2315
2316        return 0;
2317
2318out_clean_pr:
2319        while (--i >= 0)
2320                ehea_clean_portres(port, &port->port_res[i]);
2321
2322out_kill_eq:
2323        ehea_destroy_eq(port->qp_eq);
2324        return ret;
2325}
2326
2327static int ehea_clean_all_portres(struct ehea_port *port)
2328{
2329        int ret = 0;
2330        int i;
2331
2332        for (i = 0; i < port->num_def_qps; i++)
2333                ret |= ehea_clean_portres(port, &port->port_res[i]);
2334
2335        ret |= ehea_destroy_eq(port->qp_eq);
2336
2337        return ret;
2338}
2339
2340static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2341{
2342        if (adapter->active_ports)
2343                return;
2344
2345        ehea_rem_mr(&adapter->mr);
2346}
2347
2348static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2349{
2350        if (adapter->active_ports)
2351                return 0;
2352
2353        return ehea_reg_kernel_mr(adapter, &adapter->mr);
2354}
2355
2356static int ehea_up(struct net_device *dev)
2357{
2358        int ret, i;
2359        struct ehea_port *port = netdev_priv(dev);
2360
2361        if (port->state == EHEA_PORT_UP)
2362                return 0;
2363
2364        ret = ehea_port_res_setup(port, port->num_def_qps);
2365        if (ret) {
2366                netdev_err(dev, "port_res_failed\n");
2367                goto out;
2368        }
2369
2370        /* Set default QP for this port */
2371        ret = ehea_configure_port(port);
2372        if (ret) {
2373                netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2374                goto out_clean_pr;
2375        }
2376
2377        ret = ehea_reg_interrupts(dev);
2378        if (ret) {
2379                netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2380                goto out_clean_pr;
2381        }
2382
2383        for (i = 0; i < port->num_def_qps; i++) {
2384                ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2385                if (ret) {
2386                        netdev_err(dev, "activate_qp failed\n");
2387                        goto out_free_irqs;
2388                }
2389        }
2390
2391        for (i = 0; i < port->num_def_qps; i++) {
2392                ret = ehea_fill_port_res(&port->port_res[i]);
2393                if (ret) {
2394                        netdev_err(dev, "out_free_irqs\n");
2395                        goto out_free_irqs;
2396                }
2397        }
2398
2399        ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2400        if (ret) {
2401                ret = -EIO;
2402                goto out_free_irqs;
2403        }
2404
2405        port->state = EHEA_PORT_UP;
2406
2407        ret = 0;
2408        goto out;
2409
2410out_free_irqs:
2411        ehea_free_interrupts(dev);
2412
2413out_clean_pr:
2414        ehea_clean_all_portres(port);
2415out:
2416        if (ret)
2417                netdev_info(dev, "Failed starting. ret=%i\n", ret);
2418
2419        ehea_update_bcmc_registrations();
2420        ehea_update_firmware_handles();
2421
2422        return ret;
2423}
2424
2425static void port_napi_disable(struct ehea_port *port)
2426{
2427        int i;
2428
2429        for (i = 0; i < port->num_def_qps; i++)
2430                napi_disable(&port->port_res[i].napi);
2431}
2432
2433static void port_napi_enable(struct ehea_port *port)
2434{
2435        int i;
2436
2437        for (i = 0; i < port->num_def_qps; i++)
2438                napi_enable(&port->port_res[i].napi);
2439}
2440
2441static int ehea_open(struct net_device *dev)
2442{
2443        int ret;
2444        struct ehea_port *port = netdev_priv(dev);
2445
2446        mutex_lock(&port->port_lock);
2447
2448        netif_info(port, ifup, dev, "enabling port\n");
2449
2450        ret = ehea_up(dev);
2451        if (!ret) {
2452                port_napi_enable(port);
2453                netif_tx_start_all_queues(dev);
2454        }
2455
2456        mutex_unlock(&port->port_lock);
2457        schedule_delayed_work(&port->stats_work,
2458                              round_jiffies_relative(msecs_to_jiffies(1000)));
2459
2460        return ret;
2461}
2462
2463static int ehea_down(struct net_device *dev)
2464{
2465        int ret;
2466        struct ehea_port *port = netdev_priv(dev);
2467
2468        if (port->state == EHEA_PORT_DOWN)
2469                return 0;
2470
2471        ehea_drop_multicast_list(dev);
2472        ehea_allmulti(dev, 0);
2473        ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2474
2475        ehea_free_interrupts(dev);
2476
2477        port->state = EHEA_PORT_DOWN;
2478
2479        ehea_update_bcmc_registrations();
2480
2481        ret = ehea_clean_all_portres(port);
2482        if (ret)
2483                netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2484
2485        ehea_update_firmware_handles();
2486
2487        return ret;
2488}
2489
2490static int ehea_stop(struct net_device *dev)
2491{
2492        int ret;
2493        struct ehea_port *port = netdev_priv(dev);
2494
2495        netif_info(port, ifdown, dev, "disabling port\n");
2496
2497        set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2498        cancel_work_sync(&port->reset_task);
2499        cancel_delayed_work_sync(&port->stats_work);
2500        mutex_lock(&port->port_lock);
2501        netif_tx_stop_all_queues(dev);
2502        port_napi_disable(port);
2503        ret = ehea_down(dev);
2504        mutex_unlock(&port->port_lock);
2505        clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2506        return ret;
2507}
2508
2509static void ehea_purge_sq(struct ehea_qp *orig_qp)
2510{
2511        struct ehea_qp qp = *orig_qp;
2512        struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2513        struct ehea_swqe *swqe;
2514        int wqe_index;
2515        int i;
2516
2517        for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2518                swqe = ehea_get_swqe(&qp, &wqe_index);
2519                swqe->tx_control |= EHEA_SWQE_PURGE;
2520        }
2521}
2522
2523static void ehea_flush_sq(struct ehea_port *port)
2524{
2525        int i;
2526
2527        for (i = 0; i < port->num_def_qps; i++) {
2528                struct ehea_port_res *pr = &port->port_res[i];
2529                int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2530                int ret;
2531
2532                ret = wait_event_timeout(port->swqe_avail_wq,
2533                         atomic_read(&pr->swqe_avail) >= swqe_max,
2534                         msecs_to_jiffies(100));
2535
2536                if (!ret) {
2537                        pr_err("WARNING: sq not flushed completely\n");
2538                        break;
2539                }
2540        }
2541}
2542
2543static int ehea_stop_qps(struct net_device *dev)
2544{
2545        struct ehea_port *port = netdev_priv(dev);
2546        struct ehea_adapter *adapter = port->adapter;
2547        struct hcp_modify_qp_cb0 *cb0;
2548        int ret = -EIO;
2549        int dret;
2550        int i;
2551        u64 hret;
2552        u64 dummy64 = 0;
2553        u16 dummy16 = 0;
2554
2555        cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2556        if (!cb0) {
2557                ret = -ENOMEM;
2558                goto out;
2559        }
2560
2561        for (i = 0; i < (port->num_def_qps); i++) {
2562                struct ehea_port_res *pr =  &port->port_res[i];
2563                struct ehea_qp *qp = pr->qp;
2564
2565                /* Purge send queue */
2566                ehea_purge_sq(qp);
2567
2568                /* Disable queue pair */
2569                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2570                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2571                                            cb0);
2572                if (hret != H_SUCCESS) {
2573                        pr_err("query_ehea_qp failed (1)\n");
2574                        goto out;
2575                }
2576
2577                cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2578                cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2579
2580                hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2581                                             EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2582                                                            1), cb0, &dummy64,
2583                                             &dummy64, &dummy16, &dummy16);
2584                if (hret != H_SUCCESS) {
2585                        pr_err("modify_ehea_qp failed (1)\n");
2586                        goto out;
2587                }
2588
2589                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2590                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2591                                            cb0);
2592                if (hret != H_SUCCESS) {
2593                        pr_err("query_ehea_qp failed (2)\n");
2594                        goto out;
2595                }
2596
2597                /* deregister shared memory regions */
2598                dret = ehea_rem_smrs(pr);
2599                if (dret) {
2600                        pr_err("unreg shared memory region failed\n");
2601                        goto out;
2602                }
2603        }
2604
2605        ret = 0;
2606out:
2607        free_page((unsigned long)cb0);
2608
2609        return ret;
2610}
2611
2612static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2613{
2614        struct ehea_qp qp = *orig_qp;
2615        struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2616        struct ehea_rwqe *rwqe;
2617        struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2618        struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2619        struct sk_buff *skb;
2620        u32 lkey = pr->recv_mr.lkey;
2621
2622
2623        int i;
2624        int index;
2625
2626        for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2627                rwqe = ehea_get_next_rwqe(&qp, 2);
2628                rwqe->sg_list[0].l_key = lkey;
2629                index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2630                skb = skba_rq2[index];
2631                if (skb)
2632                        rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2633        }
2634
2635        for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2636                rwqe = ehea_get_next_rwqe(&qp, 3);
2637                rwqe->sg_list[0].l_key = lkey;
2638                index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2639                skb = skba_rq3[index];
2640                if (skb)
2641                        rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2642        }
2643}
2644
2645static int ehea_restart_qps(struct net_device *dev)
2646{
2647        struct ehea_port *port = netdev_priv(dev);
2648        struct ehea_adapter *adapter = port->adapter;
2649        int ret = 0;
2650        int i;
2651
2652        struct hcp_modify_qp_cb0 *cb0;
2653        u64 hret;
2654        u64 dummy64 = 0;
2655        u16 dummy16 = 0;
2656
2657        cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2658        if (!cb0) {
2659                ret = -ENOMEM;
2660                goto out;
2661        }
2662
2663        for (i = 0; i < (port->num_def_qps); i++) {
2664                struct ehea_port_res *pr =  &port->port_res[i];
2665                struct ehea_qp *qp = pr->qp;
2666
2667                ret = ehea_gen_smrs(pr);
2668                if (ret) {
2669                        netdev_err(dev, "creation of shared memory regions failed\n");
2670                        goto out;
2671                }
2672
2673                ehea_update_rqs(qp, pr);
2674
2675                /* Enable queue pair */
2676                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2677                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2678                                            cb0);
2679                if (hret != H_SUCCESS) {
2680                        netdev_err(dev, "query_ehea_qp failed (1)\n");
2681                        goto out;
2682                }
2683
2684                cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2685                cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2686
2687                hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2688                                             EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2689                                                            1), cb0, &dummy64,
2690                                             &dummy64, &dummy16, &dummy16);
2691                if (hret != H_SUCCESS) {
2692                        netdev_err(dev, "modify_ehea_qp failed (1)\n");
2693                        goto out;
2694                }
2695
2696                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2697                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2698                                            cb0);
2699                if (hret != H_SUCCESS) {
2700                        netdev_err(dev, "query_ehea_qp failed (2)\n");
2701                        goto out;
2702                }
2703
2704                /* refill entire queue */
2705                ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2706                ehea_refill_rq2(pr, 0);
2707                ehea_refill_rq3(pr, 0);
2708        }
2709out:
2710        free_page((unsigned long)cb0);
2711
2712        return ret;
2713}
2714
2715static void ehea_reset_port(struct work_struct *work)
2716{
2717        int ret;
2718        struct ehea_port *port =
2719                container_of(work, struct ehea_port, reset_task);
2720        struct net_device *dev = port->netdev;
2721
2722        mutex_lock(&dlpar_mem_lock);
2723        port->resets++;
2724        mutex_lock(&port->port_lock);
2725        netif_tx_disable(dev);
2726
2727        port_napi_disable(port);
2728
2729        ehea_down(dev);
2730
2731        ret = ehea_up(dev);
2732        if (ret)
2733                goto out;
2734
2735        ehea_set_multicast_list(dev);
2736
2737        netif_info(port, timer, dev, "reset successful\n");
2738
2739        port_napi_enable(port);
2740
2741        netif_tx_wake_all_queues(dev);
2742out:
2743        mutex_unlock(&port->port_lock);
2744        mutex_unlock(&dlpar_mem_lock);
2745}
2746
2747static void ehea_rereg_mrs(void)
2748{
2749        int ret, i;
2750        struct ehea_adapter *adapter;
2751
2752        pr_info("LPAR memory changed - re-initializing driver\n");
2753
2754        list_for_each_entry(adapter, &adapter_list, list)
2755                if (adapter->active_ports) {
2756                        /* Shutdown all ports */
2757                        for (i = 0; i < EHEA_MAX_PORTS; i++) {
2758                                struct ehea_port *port = adapter->port[i];
2759                                struct net_device *dev;
2760
2761                                if (!port)
2762                                        continue;
2763
2764                                dev = port->netdev;
2765
2766                                if (dev->flags & IFF_UP) {
2767                                        mutex_lock(&port->port_lock);
2768                                        netif_tx_disable(dev);
2769                                        ehea_flush_sq(port);
2770                                        ret = ehea_stop_qps(dev);
2771                                        if (ret) {
2772                                                mutex_unlock(&port->port_lock);
2773                                                goto out;
2774                                        }
2775                                        port_napi_disable(port);
2776                                        mutex_unlock(&port->port_lock);
2777                                }
2778                                reset_sq_restart_flag(port);
2779                        }
2780
2781                        /* Unregister old memory region */
2782                        ret = ehea_rem_mr(&adapter->mr);
2783                        if (ret) {
2784                                pr_err("unregister MR failed - driver inoperable!\n");
2785                                goto out;
2786                        }
2787                }
2788
2789        clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2790
2791        list_for_each_entry(adapter, &adapter_list, list)
2792                if (adapter->active_ports) {
2793                        /* Register new memory region */
2794                        ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2795                        if (ret) {
2796                                pr_err("register MR failed - driver inoperable!\n");
2797                                goto out;
2798                        }
2799
2800                        /* Restart all ports */
2801                        for (i = 0; i < EHEA_MAX_PORTS; i++) {
2802                                struct ehea_port *port = adapter->port[i];
2803
2804                                if (port) {
2805                                        struct net_device *dev = port->netdev;
2806
2807                                        if (dev->flags & IFF_UP) {
2808                                                mutex_lock(&port->port_lock);
2809                                                ret = ehea_restart_qps(dev);
2810                                                if (!ret) {
2811                                                        check_sqs(port);
2812                                                        port_napi_enable(port);
2813                                                        netif_tx_wake_all_queues(dev);
2814                                                } else {
2815                                                        netdev_err(dev, "Unable to restart QPS\n");
2816                                                }
2817                                                mutex_unlock(&port->port_lock);
2818                                        }
2819                                }
2820                        }
2821                }
2822        pr_info("re-initializing driver complete\n");
2823out:
2824        return;
2825}
2826
2827static void ehea_tx_watchdog(struct net_device *dev)
2828{
2829        struct ehea_port *port = netdev_priv(dev);
2830
2831        if (netif_carrier_ok(dev) &&
2832            !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2833                ehea_schedule_port_reset(port);
2834}
2835
2836static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2837{
2838        struct hcp_query_ehea *cb;
2839        u64 hret;
2840        int ret;
2841
2842        cb = (void *)get_zeroed_page(GFP_KERNEL);
2843        if (!cb) {
2844                ret = -ENOMEM;
2845                goto out;
2846        }
2847
2848        hret = ehea_h_query_ehea(adapter->handle, cb);
2849
2850        if (hret != H_SUCCESS) {
2851                ret = -EIO;
2852                goto out_herr;
2853        }
2854
2855        adapter->max_mc_mac = cb->max_mc_mac - 1;
2856        ret = 0;
2857
2858out_herr:
2859        free_page((unsigned long)cb);
2860out:
2861        return ret;
2862}
2863
2864static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2865{
2866        struct hcp_ehea_port_cb4 *cb4;
2867        u64 hret;
2868        int ret = 0;
2869
2870        *jumbo = 0;
2871
2872        /* (Try to) enable *jumbo frames */
2873        cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2874        if (!cb4) {
2875                pr_err("no mem for cb4\n");
2876                ret = -ENOMEM;
2877                goto out;
2878        } else {
2879                hret = ehea_h_query_ehea_port(port->adapter->handle,
2880                                              port->logical_port_id,
2881                                              H_PORT_CB4,
2882                                              H_PORT_CB4_JUMBO, cb4);
2883                if (hret == H_SUCCESS) {
2884                        if (cb4->jumbo_frame)
2885                                *jumbo = 1;
2886                        else {
2887                                cb4->jumbo_frame = 1;
2888                                hret = ehea_h_modify_ehea_port(port->adapter->
2889                                                               handle,
2890                                                               port->
2891                                                               logical_port_id,
2892                                                               H_PORT_CB4,
2893                                                               H_PORT_CB4_JUMBO,
2894                                                               cb4);
2895                                if (hret == H_SUCCESS)
2896                                        *jumbo = 1;
2897                        }
2898                } else
2899                        ret = -EINVAL;
2900
2901                free_page((unsigned long)cb4);
2902        }
2903out:
2904        return ret;
2905}
2906
2907static ssize_t ehea_show_port_id(struct device *dev,
2908                                 struct device_attribute *attr, char *buf)
2909{
2910        struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2911        return sprintf(buf, "%d", port->logical_port_id);
2912}
2913
2914static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2915                   NULL);
2916
2917static void logical_port_release(struct device *dev)
2918{
2919        struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2920        of_node_put(port->ofdev.dev.of_node);
2921}
2922
2923static struct device *ehea_register_port(struct ehea_port *port,
2924                                         struct device_node *dn)
2925{
2926        int ret;
2927
2928        port->ofdev.dev.of_node = of_node_get(dn);
2929        port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2930        port->ofdev.dev.bus = &ibmebus_bus_type;
2931
2932        dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2933        port->ofdev.dev.release = logical_port_release;
2934
2935        ret = of_device_register(&port->ofdev);
2936        if (ret) {
2937                pr_err("failed to register device. ret=%d\n", ret);
2938                goto out;
2939        }
2940
2941        ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2942        if (ret) {
2943                pr_err("failed to register attributes, ret=%d\n", ret);
2944                goto out_unreg_of_dev;
2945        }
2946
2947        return &port->ofdev.dev;
2948
2949out_unreg_of_dev:
2950        of_device_unregister(&port->ofdev);
2951out:
2952        return NULL;
2953}
2954
2955static void ehea_unregister_port(struct ehea_port *port)
2956{
2957        device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2958        of_device_unregister(&port->ofdev);
2959}
2960
2961static const struct net_device_ops ehea_netdev_ops = {
2962        .ndo_open               = ehea_open,
2963        .ndo_stop               = ehea_stop,
2964        .ndo_start_xmit         = ehea_start_xmit,
2965#ifdef CONFIG_NET_POLL_CONTROLLER
2966        .ndo_poll_controller    = ehea_netpoll,
2967#endif
2968        .ndo_get_stats64        = ehea_get_stats64,
2969        .ndo_set_mac_address    = ehea_set_mac_addr,
2970        .ndo_validate_addr      = eth_validate_addr,
2971        .ndo_set_rx_mode        = ehea_set_multicast_list,
2972        .ndo_change_mtu         = ehea_change_mtu,
2973        .ndo_vlan_rx_add_vid    = ehea_vlan_rx_add_vid,
2974        .ndo_vlan_rx_kill_vid   = ehea_vlan_rx_kill_vid,
2975        .ndo_tx_timeout         = ehea_tx_watchdog,
2976};
2977
2978static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2979                                         u32 logical_port_id,
2980                                         struct device_node *dn)
2981{
2982        int ret;
2983        struct net_device *dev;
2984        struct ehea_port *port;
2985        struct device *port_dev;
2986        int jumbo;
2987
2988        /* allocate memory for the port structures */
2989        dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2990
2991        if (!dev) {
2992                ret = -ENOMEM;
2993                goto out_err;
2994        }
2995
2996        port = netdev_priv(dev);
2997
2998        mutex_init(&port->port_lock);
2999        port->state = EHEA_PORT_DOWN;
3000        port->sig_comp_iv = sq_entries / 10;
3001
3002        port->adapter = adapter;
3003        port->netdev = dev;
3004        port->logical_port_id = logical_port_id;
3005
3006        port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3007
3008        port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3009        if (!port->mc_list) {
3010                ret = -ENOMEM;
3011                goto out_free_ethdev;
3012        }
3013
3014        INIT_LIST_HEAD(&port->mc_list->list);
3015
3016        ret = ehea_sense_port_attr(port);
3017        if (ret)
3018                goto out_free_mc_list;
3019
3020        netif_set_real_num_rx_queues(dev, port->num_def_qps);
3021        netif_set_real_num_tx_queues(dev, port->num_def_qps);
3022
3023        port_dev = ehea_register_port(port, dn);
3024        if (!port_dev)
3025                goto out_free_mc_list;
3026
3027        SET_NETDEV_DEV(dev, port_dev);
3028
3029        /* initialize net_device structure */
3030        memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3031
3032        dev->netdev_ops = &ehea_netdev_ops;
3033        ehea_set_ethtool_ops(dev);
3034
3035        dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
3036                      NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
3037        dev->features = NETIF_F_SG | NETIF_F_TSO |
3038                      NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
3039                      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3040                      NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
3041        dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3042                        NETIF_F_IP_CSUM;
3043        dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3044
3045        INIT_WORK(&port->reset_task, ehea_reset_port);
3046        INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3047
3048        init_waitqueue_head(&port->swqe_avail_wq);
3049        init_waitqueue_head(&port->restart_wq);
3050
3051        memset(&port->stats, 0, sizeof(struct net_device_stats));
3052        ret = register_netdev(dev);
3053        if (ret) {
3054                pr_err("register_netdev failed. ret=%d\n", ret);
3055                goto out_unreg_port;
3056        }
3057
3058        ret = ehea_get_jumboframe_status(port, &jumbo);
3059        if (ret)
3060                netdev_err(dev, "failed determining jumbo frame status\n");
3061
3062        netdev_info(dev, "Jumbo frames are %sabled\n",
3063                    jumbo == 1 ? "en" : "dis");
3064
3065        adapter->active_ports++;
3066
3067        return port;
3068
3069out_unreg_port:
3070        ehea_unregister_port(port);
3071
3072out_free_mc_list:
3073        kfree(port->mc_list);
3074
3075out_free_ethdev:
3076        free_netdev(dev);
3077
3078out_err:
3079        pr_err("setting up logical port with id=%d failed, ret=%d\n",
3080               logical_port_id, ret);
3081        return NULL;
3082}
3083
3084static void ehea_shutdown_single_port(struct ehea_port *port)
3085{
3086        struct ehea_adapter *adapter = port->adapter;
3087
3088        cancel_work_sync(&port->reset_task);
3089        cancel_delayed_work_sync(&port->stats_work);
3090        unregister_netdev(port->netdev);
3091        ehea_unregister_port(port);
3092        kfree(port->mc_list);
3093        free_netdev(port->netdev);
3094        adapter->active_ports--;
3095}
3096
3097static int ehea_setup_ports(struct ehea_adapter *adapter)
3098{
3099        struct device_node *lhea_dn;
3100        struct device_node *eth_dn = NULL;
3101
3102        const u32 *dn_log_port_id;
3103        int i = 0;
3104
3105        lhea_dn = adapter->ofdev->dev.of_node;
3106        while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3107
3108                dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3109                                                 NULL);
3110                if (!dn_log_port_id) {
3111                        pr_err("bad device node: eth_dn name=%s\n",
3112                               eth_dn->full_name);
3113                        continue;
3114                }
3115
3116                if (ehea_add_adapter_mr(adapter)) {
3117                        pr_err("creating MR failed\n");
3118                        of_node_put(eth_dn);
3119                        return -EIO;
3120                }
3121
3122                adapter->port[i] = ehea_setup_single_port(adapter,
3123                                                          *dn_log_port_id,
3124                                                          eth_dn);
3125                if (adapter->port[i])
3126                        netdev_info(adapter->port[i]->netdev,
3127                                    "logical port id #%d\n", *dn_log_port_id);
3128                else
3129                        ehea_remove_adapter_mr(adapter);
3130
3131                i++;
3132        }
3133        return 0;
3134}
3135
3136static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3137                                           u32 logical_port_id)
3138{
3139        struct device_node *lhea_dn;
3140        struct device_node *eth_dn = NULL;
3141        const u32 *dn_log_port_id;
3142
3143        lhea_dn = adapter->ofdev->dev.of_node;
3144        while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3145
3146                dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3147                                                 NULL);
3148                if (dn_log_port_id)
3149                        if (*dn_log_port_id == logical_port_id)
3150                                return eth_dn;
3151        }
3152
3153        return NULL;
3154}
3155
3156static ssize_t ehea_probe_port(struct device *dev,
3157                               struct device_attribute *attr,
3158                               const char *buf, size_t count)
3159{
3160        struct ehea_adapter *adapter = dev_get_drvdata(dev);
3161        struct ehea_port *port;
3162        struct device_node *eth_dn = NULL;
3163        int i;
3164
3165        u32 logical_port_id;
3166
3167        sscanf(buf, "%d", &logical_port_id);
3168
3169        port = ehea_get_port(adapter, logical_port_id);
3170
3171        if (port) {
3172                netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3173                            logical_port_id);
3174                return -EINVAL;
3175        }
3176
3177        eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3178
3179        if (!eth_dn) {
3180                pr_info("no logical port with id %d found\n", logical_port_id);
3181                return -EINVAL;
3182        }
3183
3184        if (ehea_add_adapter_mr(adapter)) {
3185                pr_err("creating MR failed\n");
3186                return -EIO;
3187        }
3188
3189        port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3190
3191        of_node_put(eth_dn);
3192
3193        if (port) {
3194                for (i = 0; i < EHEA_MAX_PORTS; i++)
3195                        if (!adapter->port[i]) {
3196                                adapter->port[i] = port;
3197                                break;
3198                        }
3199
3200                netdev_info(port->netdev, "added: (logical port id=%d)\n",
3201                            logical_port_id);
3202        } else {
3203                ehea_remove_adapter_mr(adapter);
3204                return -EIO;
3205        }
3206
3207        return (ssize_t) count;
3208}
3209
3210static ssize_t ehea_remove_port(struct device *dev,
3211                                struct device_attribute *attr,
3212                                const char *buf, size_t count)
3213{
3214        struct ehea_adapter *adapter = dev_get_drvdata(dev);
3215        struct ehea_port *port;
3216        int i;
3217        u32 logical_port_id;
3218
3219        sscanf(buf, "%d", &logical_port_id);
3220
3221        port = ehea_get_port(adapter, logical_port_id);
3222
3223        if (port) {
3224                netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3225                            logical_port_id);
3226
3227                ehea_shutdown_single_port(port);
3228
3229                for (i = 0; i < EHEA_MAX_PORTS; i++)
3230                        if (adapter->port[i] == port) {
3231                                adapter->port[i] = NULL;
3232                                break;
3233                        }
3234        } else {
3235                pr_err("removing port with logical port id=%d failed. port not configured.\n",
3236                       logical_port_id);
3237                return -EINVAL;
3238        }
3239
3240        ehea_remove_adapter_mr(adapter);
3241
3242        return (ssize_t) count;
3243}
3244
3245static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3246static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3247
3248static int ehea_create_device_sysfs(struct platform_device *dev)
3249{
3250        int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3251        if (ret)
3252                goto out;
3253
3254        ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3255out:
3256        return ret;
3257}
3258
3259static void ehea_remove_device_sysfs(struct platform_device *dev)
3260{
3261        device_remove_file(&dev->dev, &dev_attr_probe_port);
3262        device_remove_file(&dev->dev, &dev_attr_remove_port);
3263}
3264
3265static int ehea_reboot_notifier(struct notifier_block *nb,
3266                                unsigned long action, void *unused)
3267{
3268        if (action == SYS_RESTART) {
3269                pr_info("Reboot: freeing all eHEA resources\n");
3270                ibmebus_unregister_driver(&ehea_driver);
3271        }
3272        return NOTIFY_DONE;
3273}
3274
3275static struct notifier_block ehea_reboot_nb = {
3276        .notifier_call = ehea_reboot_notifier,
3277};
3278
3279static int ehea_mem_notifier(struct notifier_block *nb,
3280                             unsigned long action, void *data)
3281{
3282        int ret = NOTIFY_BAD;
3283        struct memory_notify *arg = data;
3284
3285        mutex_lock(&dlpar_mem_lock);
3286
3287        switch (action) {
3288        case MEM_CANCEL_OFFLINE:
3289                pr_info("memory offlining canceled");
3290                /* Fall through: re-add canceled memory block */
3291
3292        case MEM_ONLINE:
3293                pr_info("memory is going online");
3294                set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3295                if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3296                        goto out_unlock;
3297                ehea_rereg_mrs();
3298                break;
3299
3300        case MEM_GOING_OFFLINE:
3301                pr_info("memory is going offline");
3302                set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3303                if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3304                        goto out_unlock;
3305                ehea_rereg_mrs();
3306                break;
3307
3308        default:
3309                break;
3310        }
3311
3312        ehea_update_firmware_handles();
3313        ret = NOTIFY_OK;
3314
3315out_unlock:
3316        mutex_unlock(&dlpar_mem_lock);
3317        return ret;
3318}
3319
3320static struct notifier_block ehea_mem_nb = {
3321        .notifier_call = ehea_mem_notifier,
3322};
3323
3324static void ehea_crash_handler(void)
3325{
3326        int i;
3327
3328        if (ehea_fw_handles.arr)
3329                for (i = 0; i < ehea_fw_handles.num_entries; i++)
3330                        ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3331                                             ehea_fw_handles.arr[i].fwh,
3332                                             FORCE_FREE);
3333
3334        if (ehea_bcmc_regs.arr)
3335                for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3336                        ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3337                                              ehea_bcmc_regs.arr[i].port_id,
3338                                              ehea_bcmc_regs.arr[i].reg_type,
3339                                              ehea_bcmc_regs.arr[i].macaddr,
3340                                              0, H_DEREG_BCMC);
3341}
3342
3343static atomic_t ehea_memory_hooks_registered;
3344
3345/* Register memory hooks on probe of first adapter */
3346static int ehea_register_memory_hooks(void)
3347{
3348        int ret = 0;
3349
3350        if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
3351                return 0;
3352
3353        ret = ehea_create_busmap();
3354        if (ret) {
3355                pr_info("ehea_create_busmap failed\n");
3356                goto out;
3357        }
3358
3359        ret = register_reboot_notifier(&ehea_reboot_nb);
3360        if (ret) {
3361                pr_info("register_reboot_notifier failed\n");
3362                goto out;
3363        }
3364
3365        ret = register_memory_notifier(&ehea_mem_nb);
3366        if (ret) {
3367                pr_info("register_memory_notifier failed\n");
3368                goto out2;
3369        }
3370
3371        ret = crash_shutdown_register(ehea_crash_handler);
3372        if (ret) {
3373                pr_info("crash_shutdown_register failed\n");
3374                goto out3;
3375        }
3376
3377        return 0;
3378
3379out3:
3380        unregister_memory_notifier(&ehea_mem_nb);
3381out2:
3382        unregister_reboot_notifier(&ehea_reboot_nb);
3383out:
3384        atomic_dec(&ehea_memory_hooks_registered);
3385        return ret;
3386}
3387
3388static void ehea_unregister_memory_hooks(void)
3389{
3390        /* Only remove the hooks if we've registered them */
3391        if (atomic_read(&ehea_memory_hooks_registered) == 0)
3392                return;
3393
3394        unregister_reboot_notifier(&ehea_reboot_nb);
3395        if (crash_shutdown_unregister(ehea_crash_handler))
3396                pr_info("failed unregistering crash handler\n");
3397        unregister_memory_notifier(&ehea_mem_nb);
3398}
3399
3400static int ehea_probe_adapter(struct platform_device *dev)
3401{
3402        struct ehea_adapter *adapter;
3403        const u64 *adapter_handle;
3404        int ret;
3405        int i;
3406
3407        ret = ehea_register_memory_hooks();
3408        if (ret)
3409                return ret;
3410
3411        if (!dev || !dev->dev.of_node) {
3412                pr_err("Invalid ibmebus device probed\n");
3413                return -EINVAL;
3414        }
3415
3416        adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
3417        if (!adapter) {
3418                ret = -ENOMEM;
3419                dev_err(&dev->dev, "no mem for ehea_adapter\n");
3420                goto out;
3421        }
3422
3423        list_add(&adapter->list, &adapter_list);
3424
3425        adapter->ofdev = dev;
3426
3427        adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3428                                         NULL);
3429        if (adapter_handle)
3430                adapter->handle = *adapter_handle;
3431
3432        if (!adapter->handle) {
3433                dev_err(&dev->dev, "failed getting handle for adapter"
3434                        " '%s'\n", dev->dev.of_node->full_name);
3435                ret = -ENODEV;
3436                goto out_free_ad;
3437        }
3438
3439        adapter->pd = EHEA_PD_ID;
3440
3441        platform_set_drvdata(dev, adapter);
3442
3443
3444        /* initialize adapter and ports */
3445        /* get adapter properties */
3446        ret = ehea_sense_adapter_attr(adapter);
3447        if (ret) {
3448                dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3449                goto out_free_ad;
3450        }
3451
3452        adapter->neq = ehea_create_eq(adapter,
3453                                      EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3454        if (!adapter->neq) {
3455                ret = -EIO;
3456                dev_err(&dev->dev, "NEQ creation failed\n");
3457                goto out_free_ad;
3458        }
3459
3460        tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3461                     (unsigned long)adapter);
3462
3463        ret = ehea_create_device_sysfs(dev);
3464        if (ret)
3465                goto out_kill_eq;
3466
3467        ret = ehea_setup_ports(adapter);
3468        if (ret) {
3469                dev_err(&dev->dev, "setup_ports failed\n");
3470                goto out_rem_dev_sysfs;
3471        }
3472
3473        ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3474                                  ehea_interrupt_neq, 0,
3475                                  "ehea_neq", adapter);
3476        if (ret) {
3477                dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3478                goto out_shutdown_ports;
3479        }
3480
3481        /* Handle any events that might be pending. */
3482        tasklet_hi_schedule(&adapter->neq_tasklet);
3483
3484        ret = 0;
3485        goto out;
3486
3487out_shutdown_ports:
3488        for (i = 0; i < EHEA_MAX_PORTS; i++)
3489                if (adapter->port[i]) {
3490                        ehea_shutdown_single_port(adapter->port[i]);
3491                        adapter->port[i] = NULL;
3492                }
3493
3494out_rem_dev_sysfs:
3495        ehea_remove_device_sysfs(dev);
3496
3497out_kill_eq:
3498        ehea_destroy_eq(adapter->neq);
3499
3500out_free_ad:
3501        list_del(&adapter->list);
3502
3503out:
3504        ehea_update_firmware_handles();
3505
3506        return ret;
3507}
3508
3509static int ehea_remove(struct platform_device *dev)
3510{
3511        struct ehea_adapter *adapter = platform_get_drvdata(dev);
3512        int i;
3513
3514        for (i = 0; i < EHEA_MAX_PORTS; i++)
3515                if (adapter->port[i]) {
3516                        ehea_shutdown_single_port(adapter->port[i]);
3517                        adapter->port[i] = NULL;
3518                }
3519
3520        ehea_remove_device_sysfs(dev);
3521
3522        ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3523        tasklet_kill(&adapter->neq_tasklet);
3524
3525        ehea_destroy_eq(adapter->neq);
3526        ehea_remove_adapter_mr(adapter);
3527        list_del(&adapter->list);
3528
3529        ehea_update_firmware_handles();
3530
3531        return 0;
3532}
3533
3534static int check_module_parm(void)
3535{
3536        int ret = 0;
3537
3538        if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3539            (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3540                pr_info("Bad parameter: rq1_entries\n");
3541                ret = -EINVAL;
3542        }
3543        if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3544            (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3545                pr_info("Bad parameter: rq2_entries\n");
3546                ret = -EINVAL;
3547        }
3548        if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3549            (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3550                pr_info("Bad parameter: rq3_entries\n");
3551                ret = -EINVAL;
3552        }
3553        if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3554            (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3555                pr_info("Bad parameter: sq_entries\n");
3556                ret = -EINVAL;
3557        }
3558
3559        return ret;
3560}
3561
3562static ssize_t ehea_show_capabilities(struct device_driver *drv,
3563                                      char *buf)
3564{
3565        return sprintf(buf, "%d", EHEA_CAPABILITIES);
3566}
3567
3568static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3569                   ehea_show_capabilities, NULL);
3570
3571static int __init ehea_module_init(void)
3572{
3573        int ret;
3574
3575        pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3576
3577        memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3578        memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3579
3580        mutex_init(&ehea_fw_handles.lock);
3581        spin_lock_init(&ehea_bcmc_regs.lock);
3582
3583        ret = check_module_parm();
3584        if (ret)
3585                goto out;
3586
3587        ret = ibmebus_register_driver(&ehea_driver);
3588        if (ret) {
3589                pr_err("failed registering eHEA device driver on ebus\n");
3590                goto out;
3591        }
3592
3593        ret = driver_create_file(&ehea_driver.driver,
3594                                 &driver_attr_capabilities);
3595        if (ret) {
3596                pr_err("failed to register capabilities attribute, ret=%d\n",
3597                       ret);
3598                goto out2;
3599        }
3600
3601        return ret;
3602
3603out2:
3604        ibmebus_unregister_driver(&ehea_driver);
3605out:
3606        return ret;
3607}
3608
3609static void __exit ehea_module_exit(void)
3610{
3611        driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3612        ibmebus_unregister_driver(&ehea_driver);
3613        ehea_unregister_memory_hooks();
3614        kfree(ehea_fw_handles.arr);
3615        kfree(ehea_bcmc_regs.arr);
3616        ehea_destroy_busmap();
3617}
3618
3619module_init(ehea_module_init);
3620module_exit(ehea_module_exit);
3621