linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  linux/drivers/net/ethernet/ibm/ehea/ehea_main.c
   4 *
   5 *  eHEA ethernet device driver for IBM eServer System p
   6 *
   7 *  (C) Copyright IBM Corp. 2006
   8 *
   9 *  Authors:
  10 *       Christoph Raisch <raisch@de.ibm.com>
  11 *       Jan-Bernd Themann <themann@de.ibm.com>
  12 *       Thomas Klein <tklein@de.ibm.com>
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/device.h>
  18#include <linux/in.h>
  19#include <linux/ip.h>
  20#include <linux/tcp.h>
  21#include <linux/udp.h>
  22#include <linux/if.h>
  23#include <linux/list.h>
  24#include <linux/slab.h>
  25#include <linux/if_ether.h>
  26#include <linux/notifier.h>
  27#include <linux/reboot.h>
  28#include <linux/memory.h>
  29#include <asm/kexec.h>
  30#include <linux/mutex.h>
  31#include <linux/prefetch.h>
  32
  33#include <net/ip.h>
  34
  35#include "ehea.h"
  36#include "ehea_qmr.h"
  37#include "ehea_phyp.h"
  38
  39
  40MODULE_LICENSE("GPL");
  41MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
  42MODULE_DESCRIPTION("IBM eServer HEA Driver");
  43MODULE_VERSION(DRV_VERSION);
  44
  45
  46static int msg_level = -1;
  47static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
  48static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
  49static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
  50static int sq_entries = EHEA_DEF_ENTRIES_SQ;
  51static int use_mcs = 1;
  52static int prop_carrier_state;
  53
  54module_param(msg_level, int, 0);
  55module_param(rq1_entries, int, 0);
  56module_param(rq2_entries, int, 0);
  57module_param(rq3_entries, int, 0);
  58module_param(sq_entries, int, 0);
  59module_param(prop_carrier_state, int, 0);
  60module_param(use_mcs, int, 0);
  61
  62MODULE_PARM_DESC(msg_level, "msg_level");
  63MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
  64                 "port to stack. 1:yes, 0:no.  Default = 0 ");
  65MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
  66                 "[2^x - 1], x = [7..14]. Default = "
  67                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
  68MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
  69                 "[2^x - 1], x = [7..14]. Default = "
  70                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
  71MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
  72                 "[2^x - 1], x = [7..14]. Default = "
  73                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
  74MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
  75                 "[2^x - 1], x = [7..14]. Default = "
  76                 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
  77MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
  78                 "Default = 1");
  79
  80static int port_name_cnt;
  81static LIST_HEAD(adapter_list);
  82static unsigned long ehea_driver_flags;
  83static DEFINE_MUTEX(dlpar_mem_lock);
  84static struct ehea_fw_handle_array ehea_fw_handles;
  85static struct ehea_bcmc_reg_array ehea_bcmc_regs;
  86
  87
  88static int ehea_probe_adapter(struct platform_device *dev);
  89
  90static int ehea_remove(struct platform_device *dev);
  91
  92static const struct of_device_id ehea_module_device_table[] = {
  93        {
  94                .name = "lhea",
  95                .compatible = "IBM,lhea",
  96        },
  97        {
  98                .type = "network",
  99                .compatible = "IBM,lhea-ethernet",
 100        },
 101        {},
 102};
 103MODULE_DEVICE_TABLE(of, ehea_module_device_table);
 104
 105static const struct of_device_id ehea_device_table[] = {
 106        {
 107                .name = "lhea",
 108                .compatible = "IBM,lhea",
 109        },
 110        {},
 111};
 112
 113static struct platform_driver ehea_driver = {
 114        .driver = {
 115                .name = "ehea",
 116                .owner = THIS_MODULE,
 117                .of_match_table = ehea_device_table,
 118        },
 119        .probe = ehea_probe_adapter,
 120        .remove = ehea_remove,
 121};
 122
 123void ehea_dump(void *adr, int len, char *msg)
 124{
 125        int x;
 126        unsigned char *deb = adr;
 127        for (x = 0; x < len; x += 16) {
 128                pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
 129                        msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
 130                deb += 16;
 131        }
 132}
 133
 134static void ehea_schedule_port_reset(struct ehea_port *port)
 135{
 136        if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
 137                schedule_work(&port->reset_task);
 138}
 139
 140static void ehea_update_firmware_handles(void)
 141{
 142        struct ehea_fw_handle_entry *arr = NULL;
 143        struct ehea_adapter *adapter;
 144        int num_adapters = 0;
 145        int num_ports = 0;
 146        int num_portres = 0;
 147        int i = 0;
 148        int num_fw_handles, k, l;
 149
 150        /* Determine number of handles */
 151        mutex_lock(&ehea_fw_handles.lock);
 152
 153        list_for_each_entry(adapter, &adapter_list, list) {
 154                num_adapters++;
 155
 156                for (k = 0; k < EHEA_MAX_PORTS; k++) {
 157                        struct ehea_port *port = adapter->port[k];
 158
 159                        if (!port || (port->state != EHEA_PORT_UP))
 160                                continue;
 161
 162                        num_ports++;
 163                        num_portres += port->num_def_qps;
 164                }
 165        }
 166
 167        num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
 168                         num_ports * EHEA_NUM_PORT_FW_HANDLES +
 169                         num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
 170
 171        if (num_fw_handles) {
 172                arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
 173                if (!arr)
 174                        goto out;  /* Keep the existing array */
 175        } else
 176                goto out_update;
 177
 178        list_for_each_entry(adapter, &adapter_list, list) {
 179                if (num_adapters == 0)
 180                        break;
 181
 182                for (k = 0; k < EHEA_MAX_PORTS; k++) {
 183                        struct ehea_port *port = adapter->port[k];
 184
 185                        if (!port || (port->state != EHEA_PORT_UP) ||
 186                            (num_ports == 0))
 187                                continue;
 188
 189                        for (l = 0; l < port->num_def_qps; l++) {
 190                                struct ehea_port_res *pr = &port->port_res[l];
 191
 192                                arr[i].adh = adapter->handle;
 193                                arr[i++].fwh = pr->qp->fw_handle;
 194                                arr[i].adh = adapter->handle;
 195                                arr[i++].fwh = pr->send_cq->fw_handle;
 196                                arr[i].adh = adapter->handle;
 197                                arr[i++].fwh = pr->recv_cq->fw_handle;
 198                                arr[i].adh = adapter->handle;
 199                                arr[i++].fwh = pr->eq->fw_handle;
 200                                arr[i].adh = adapter->handle;
 201                                arr[i++].fwh = pr->send_mr.handle;
 202                                arr[i].adh = adapter->handle;
 203                                arr[i++].fwh = pr->recv_mr.handle;
 204                        }
 205                        arr[i].adh = adapter->handle;
 206                        arr[i++].fwh = port->qp_eq->fw_handle;
 207                        num_ports--;
 208                }
 209
 210                arr[i].adh = adapter->handle;
 211                arr[i++].fwh = adapter->neq->fw_handle;
 212
 213                if (adapter->mr.handle) {
 214                        arr[i].adh = adapter->handle;
 215                        arr[i++].fwh = adapter->mr.handle;
 216                }
 217                num_adapters--;
 218        }
 219
 220out_update:
 221        kfree(ehea_fw_handles.arr);
 222        ehea_fw_handles.arr = arr;
 223        ehea_fw_handles.num_entries = i;
 224out:
 225        mutex_unlock(&ehea_fw_handles.lock);
 226}
 227
 228static void ehea_update_bcmc_registrations(void)
 229{
 230        unsigned long flags;
 231        struct ehea_bcmc_reg_entry *arr = NULL;
 232        struct ehea_adapter *adapter;
 233        struct ehea_mc_list *mc_entry;
 234        int num_registrations = 0;
 235        int i = 0;
 236        int k;
 237
 238        spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
 239
 240        /* Determine number of registrations */
 241        list_for_each_entry(adapter, &adapter_list, list)
 242                for (k = 0; k < EHEA_MAX_PORTS; k++) {
 243                        struct ehea_port *port = adapter->port[k];
 244
 245                        if (!port || (port->state != EHEA_PORT_UP))
 246                                continue;
 247
 248                        num_registrations += 2; /* Broadcast registrations */
 249
 250                        list_for_each_entry(mc_entry, &port->mc_list->list,list)
 251                                num_registrations += 2;
 252                }
 253
 254        if (num_registrations) {
 255                arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
 256                if (!arr)
 257                        goto out;  /* Keep the existing array */
 258        } else
 259                goto out_update;
 260
 261        list_for_each_entry(adapter, &adapter_list, list) {
 262                for (k = 0; k < EHEA_MAX_PORTS; k++) {
 263                        struct ehea_port *port = adapter->port[k];
 264
 265                        if (!port || (port->state != EHEA_PORT_UP))
 266                                continue;
 267
 268                        if (num_registrations == 0)
 269                                goto out_update;
 270
 271                        arr[i].adh = adapter->handle;
 272                        arr[i].port_id = port->logical_port_id;
 273                        arr[i].reg_type = EHEA_BCMC_BROADCAST |
 274                                          EHEA_BCMC_UNTAGGED;
 275                        arr[i++].macaddr = port->mac_addr;
 276
 277                        arr[i].adh = adapter->handle;
 278                        arr[i].port_id = port->logical_port_id;
 279                        arr[i].reg_type = EHEA_BCMC_BROADCAST |
 280                                          EHEA_BCMC_VLANID_ALL;
 281                        arr[i++].macaddr = port->mac_addr;
 282                        num_registrations -= 2;
 283
 284                        list_for_each_entry(mc_entry,
 285                                            &port->mc_list->list, list) {
 286                                if (num_registrations == 0)
 287                                        goto out_update;
 288
 289                                arr[i].adh = adapter->handle;
 290                                arr[i].port_id = port->logical_port_id;
 291                                arr[i].reg_type = EHEA_BCMC_MULTICAST |
 292                                                  EHEA_BCMC_UNTAGGED;
 293                                if (mc_entry->macaddr == 0)
 294                                        arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
 295                                arr[i++].macaddr = mc_entry->macaddr;
 296
 297                                arr[i].adh = adapter->handle;
 298                                arr[i].port_id = port->logical_port_id;
 299                                arr[i].reg_type = EHEA_BCMC_MULTICAST |
 300                                                  EHEA_BCMC_VLANID_ALL;
 301                                if (mc_entry->macaddr == 0)
 302                                        arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
 303                                arr[i++].macaddr = mc_entry->macaddr;
 304                                num_registrations -= 2;
 305                        }
 306                }
 307        }
 308
 309out_update:
 310        kfree(ehea_bcmc_regs.arr);
 311        ehea_bcmc_regs.arr = arr;
 312        ehea_bcmc_regs.num_entries = i;
 313out:
 314        spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
 315}
 316
 317static void ehea_get_stats64(struct net_device *dev,
 318                             struct rtnl_link_stats64 *stats)
 319{
 320        struct ehea_port *port = netdev_priv(dev);
 321        u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
 322        int i;
 323
 324        for (i = 0; i < port->num_def_qps; i++) {
 325                rx_packets += port->port_res[i].rx_packets;
 326                rx_bytes   += port->port_res[i].rx_bytes;
 327        }
 328
 329        for (i = 0; i < port->num_def_qps; i++) {
 330                tx_packets += port->port_res[i].tx_packets;
 331                tx_bytes   += port->port_res[i].tx_bytes;
 332        }
 333
 334        stats->tx_packets = tx_packets;
 335        stats->rx_bytes = rx_bytes;
 336        stats->tx_bytes = tx_bytes;
 337        stats->rx_packets = rx_packets;
 338
 339        stats->multicast = port->stats.multicast;
 340        stats->rx_errors = port->stats.rx_errors;
 341}
 342
 343static void ehea_update_stats(struct work_struct *work)
 344{
 345        struct ehea_port *port =
 346                container_of(work, struct ehea_port, stats_work.work);
 347        struct net_device *dev = port->netdev;
 348        struct rtnl_link_stats64 *stats = &port->stats;
 349        struct hcp_ehea_port_cb2 *cb2;
 350        u64 hret;
 351
 352        cb2 = (void *)get_zeroed_page(GFP_KERNEL);
 353        if (!cb2) {
 354                netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
 355                goto resched;
 356        }
 357
 358        hret = ehea_h_query_ehea_port(port->adapter->handle,
 359                                      port->logical_port_id,
 360                                      H_PORT_CB2, H_PORT_CB2_ALL, cb2);
 361        if (hret != H_SUCCESS) {
 362                netdev_err(dev, "query_ehea_port failed\n");
 363                goto out_herr;
 364        }
 365
 366        if (netif_msg_hw(port))
 367                ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
 368
 369        stats->multicast = cb2->rxmcp;
 370        stats->rx_errors = cb2->rxuerr;
 371
 372out_herr:
 373        free_page((unsigned long)cb2);
 374resched:
 375        schedule_delayed_work(&port->stats_work,
 376                              round_jiffies_relative(msecs_to_jiffies(1000)));
 377}
 378
 379static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
 380{
 381        struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
 382        struct net_device *dev = pr->port->netdev;
 383        int max_index_mask = pr->rq1_skba.len - 1;
 384        int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
 385        int adder = 0;
 386        int i;
 387
 388        pr->rq1_skba.os_skbs = 0;
 389
 390        if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
 391                if (nr_of_wqes > 0)
 392                        pr->rq1_skba.index = index;
 393                pr->rq1_skba.os_skbs = fill_wqes;
 394                return;
 395        }
 396
 397        for (i = 0; i < fill_wqes; i++) {
 398                if (!skb_arr_rq1[index]) {
 399                        skb_arr_rq1[index] = netdev_alloc_skb(dev,
 400                                                              EHEA_L_PKT_SIZE);
 401                        if (!skb_arr_rq1[index]) {
 402                                pr->rq1_skba.os_skbs = fill_wqes - i;
 403                                break;
 404                        }
 405                }
 406                index--;
 407                index &= max_index_mask;
 408                adder++;
 409        }
 410
 411        if (adder == 0)
 412                return;
 413
 414        /* Ring doorbell */
 415        ehea_update_rq1a(pr->qp, adder);
 416}
 417
 418static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
 419{
 420        struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
 421        struct net_device *dev = pr->port->netdev;
 422        int i;
 423
 424        if (nr_rq1a > pr->rq1_skba.len) {
 425                netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
 426                return;
 427        }
 428
 429        for (i = 0; i < nr_rq1a; i++) {
 430                skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
 431                if (!skb_arr_rq1[i])
 432                        break;
 433        }
 434        /* Ring doorbell */
 435        ehea_update_rq1a(pr->qp, i - 1);
 436}
 437
 438static int ehea_refill_rq_def(struct ehea_port_res *pr,
 439                              struct ehea_q_skb_arr *q_skba, int rq_nr,
 440                              int num_wqes, int wqe_type, int packet_size)
 441{
 442        struct net_device *dev = pr->port->netdev;
 443        struct ehea_qp *qp = pr->qp;
 444        struct sk_buff **skb_arr = q_skba->arr;
 445        struct ehea_rwqe *rwqe;
 446        int i, index, max_index_mask, fill_wqes;
 447        int adder = 0;
 448        int ret = 0;
 449
 450        fill_wqes = q_skba->os_skbs + num_wqes;
 451        q_skba->os_skbs = 0;
 452
 453        if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
 454                q_skba->os_skbs = fill_wqes;
 455                return ret;
 456        }
 457
 458        index = q_skba->index;
 459        max_index_mask = q_skba->len - 1;
 460        for (i = 0; i < fill_wqes; i++) {
 461                u64 tmp_addr;
 462                struct sk_buff *skb;
 463
 464                skb = netdev_alloc_skb_ip_align(dev, packet_size);
 465                if (!skb) {
 466                        q_skba->os_skbs = fill_wqes - i;
 467                        if (q_skba->os_skbs == q_skba->len - 2) {
 468                                netdev_info(pr->port->netdev,
 469                                            "rq%i ran dry - no mem for skb\n",
 470                                            rq_nr);
 471                                ret = -ENOMEM;
 472                        }
 473                        break;
 474                }
 475
 476                skb_arr[index] = skb;
 477                tmp_addr = ehea_map_vaddr(skb->data);
 478                if (tmp_addr == -1) {
 479                        dev_consume_skb_any(skb);
 480                        q_skba->os_skbs = fill_wqes - i;
 481                        ret = 0;
 482                        break;
 483                }
 484
 485                rwqe = ehea_get_next_rwqe(qp, rq_nr);
 486                rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
 487                            | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
 488                rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
 489                rwqe->sg_list[0].vaddr = tmp_addr;
 490                rwqe->sg_list[0].len = packet_size;
 491                rwqe->data_segments = 1;
 492
 493                index++;
 494                index &= max_index_mask;
 495                adder++;
 496        }
 497
 498        q_skba->index = index;
 499        if (adder == 0)
 500                goto out;
 501
 502        /* Ring doorbell */
 503        iosync();
 504        if (rq_nr == 2)
 505                ehea_update_rq2a(pr->qp, adder);
 506        else
 507                ehea_update_rq3a(pr->qp, adder);
 508out:
 509        return ret;
 510}
 511
 512
 513static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
 514{
 515        return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
 516                                  nr_of_wqes, EHEA_RWQE2_TYPE,
 517                                  EHEA_RQ2_PKT_SIZE);
 518}
 519
 520
 521static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
 522{
 523        return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
 524                                  nr_of_wqes, EHEA_RWQE3_TYPE,
 525                                  EHEA_MAX_PACKET_SIZE);
 526}
 527
 528static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
 529{
 530        *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
 531        if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
 532                return 0;
 533        if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
 534            (cqe->header_length == 0))
 535                return 0;
 536        return -EINVAL;
 537}
 538
 539static inline void ehea_fill_skb(struct net_device *dev,
 540                                 struct sk_buff *skb, struct ehea_cqe *cqe,
 541                                 struct ehea_port_res *pr)
 542{
 543        int length = cqe->num_bytes_transfered - 4;     /*remove CRC */
 544
 545        skb_put(skb, length);
 546        skb->protocol = eth_type_trans(skb, dev);
 547
 548        /* The packet was not an IPV4 packet so a complemented checksum was
 549           calculated. The value is found in the Internet Checksum field. */
 550        if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
 551                skb->ip_summed = CHECKSUM_COMPLETE;
 552                skb->csum = csum_unfold(~cqe->inet_checksum_value);
 553        } else
 554                skb->ip_summed = CHECKSUM_UNNECESSARY;
 555
 556        skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
 557}
 558
 559static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
 560                                               int arr_len,
 561                                               struct ehea_cqe *cqe)
 562{
 563        int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
 564        struct sk_buff *skb;
 565        void *pref;
 566        int x;
 567
 568        x = skb_index + 1;
 569        x &= (arr_len - 1);
 570
 571        pref = skb_array[x];
 572        if (pref) {
 573                prefetchw(pref);
 574                prefetchw(pref + EHEA_CACHE_LINE);
 575
 576                pref = (skb_array[x]->data);
 577                prefetch(pref);
 578                prefetch(pref + EHEA_CACHE_LINE);
 579                prefetch(pref + EHEA_CACHE_LINE * 2);
 580                prefetch(pref + EHEA_CACHE_LINE * 3);
 581        }
 582
 583        skb = skb_array[skb_index];
 584        skb_array[skb_index] = NULL;
 585        return skb;
 586}
 587
 588static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
 589                                                  int arr_len, int wqe_index)
 590{
 591        struct sk_buff *skb;
 592        void *pref;
 593        int x;
 594
 595        x = wqe_index + 1;
 596        x &= (arr_len - 1);
 597
 598        pref = skb_array[x];
 599        if (pref) {
 600                prefetchw(pref);
 601                prefetchw(pref + EHEA_CACHE_LINE);
 602
 603                pref = (skb_array[x]->data);
 604                prefetchw(pref);
 605                prefetchw(pref + EHEA_CACHE_LINE);
 606        }
 607
 608        skb = skb_array[wqe_index];
 609        skb_array[wqe_index] = NULL;
 610        return skb;
 611}
 612
 613static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
 614                                 struct ehea_cqe *cqe, int *processed_rq2,
 615                                 int *processed_rq3)
 616{
 617        struct sk_buff *skb;
 618
 619        if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
 620                pr->p_stats.err_tcp_cksum++;
 621        if (cqe->status & EHEA_CQE_STAT_ERR_IP)
 622                pr->p_stats.err_ip_cksum++;
 623        if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
 624                pr->p_stats.err_frame_crc++;
 625
 626        if (rq == 2) {
 627                *processed_rq2 += 1;
 628                skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
 629                dev_kfree_skb(skb);
 630        } else if (rq == 3) {
 631                *processed_rq3 += 1;
 632                skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
 633                dev_kfree_skb(skb);
 634        }
 635
 636        if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
 637                if (netif_msg_rx_err(pr->port)) {
 638                        pr_err("Critical receive error for QP %d. Resetting port.\n",
 639                               pr->qp->init_attr.qp_nr);
 640                        ehea_dump(cqe, sizeof(*cqe), "CQE");
 641                }
 642                ehea_schedule_port_reset(pr->port);
 643                return 1;
 644        }
 645
 646        return 0;
 647}
 648
 649static int ehea_proc_rwqes(struct net_device *dev,
 650                           struct ehea_port_res *pr,
 651                           int budget)
 652{
 653        struct ehea_port *port = pr->port;
 654        struct ehea_qp *qp = pr->qp;
 655        struct ehea_cqe *cqe;
 656        struct sk_buff *skb;
 657        struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
 658        struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
 659        struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
 660        int skb_arr_rq1_len = pr->rq1_skba.len;
 661        int skb_arr_rq2_len = pr->rq2_skba.len;
 662        int skb_arr_rq3_len = pr->rq3_skba.len;
 663        int processed, processed_rq1, processed_rq2, processed_rq3;
 664        u64 processed_bytes = 0;
 665        int wqe_index, last_wqe_index, rq, port_reset;
 666
 667        processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
 668        last_wqe_index = 0;
 669
 670        cqe = ehea_poll_rq1(qp, &wqe_index);
 671        while ((processed < budget) && cqe) {
 672                ehea_inc_rq1(qp);
 673                processed_rq1++;
 674                processed++;
 675                if (netif_msg_rx_status(port))
 676                        ehea_dump(cqe, sizeof(*cqe), "CQE");
 677
 678                last_wqe_index = wqe_index;
 679                rmb();
 680                if (!ehea_check_cqe(cqe, &rq)) {
 681                        if (rq == 1) {
 682                                /* LL RQ1 */
 683                                skb = get_skb_by_index_ll(skb_arr_rq1,
 684                                                          skb_arr_rq1_len,
 685                                                          wqe_index);
 686                                if (unlikely(!skb)) {
 687                                        netif_info(port, rx_err, dev,
 688                                                  "LL rq1: skb=NULL\n");
 689
 690                                        skb = netdev_alloc_skb(dev,
 691                                                               EHEA_L_PKT_SIZE);
 692                                        if (!skb)
 693                                                break;
 694                                }
 695                                skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
 696                                                 cqe->num_bytes_transfered - 4);
 697                                ehea_fill_skb(dev, skb, cqe, pr);
 698                        } else if (rq == 2) {
 699                                /* RQ2 */
 700                                skb = get_skb_by_index(skb_arr_rq2,
 701                                                       skb_arr_rq2_len, cqe);
 702                                if (unlikely(!skb)) {
 703                                        netif_err(port, rx_err, dev,
 704                                                  "rq2: skb=NULL\n");
 705                                        break;
 706                                }
 707                                ehea_fill_skb(dev, skb, cqe, pr);
 708                                processed_rq2++;
 709                        } else {
 710                                /* RQ3 */
 711                                skb = get_skb_by_index(skb_arr_rq3,
 712                                                       skb_arr_rq3_len, cqe);
 713                                if (unlikely(!skb)) {
 714                                        netif_err(port, rx_err, dev,
 715                                                  "rq3: skb=NULL\n");
 716                                        break;
 717                                }
 718                                ehea_fill_skb(dev, skb, cqe, pr);
 719                                processed_rq3++;
 720                        }
 721
 722                        processed_bytes += skb->len;
 723
 724                        if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
 725                                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 726                                                       cqe->vlan_tag);
 727
 728                        napi_gro_receive(&pr->napi, skb);
 729                } else {
 730                        pr->p_stats.poll_receive_errors++;
 731                        port_reset = ehea_treat_poll_error(pr, rq, cqe,
 732                                                           &processed_rq2,
 733                                                           &processed_rq3);
 734                        if (port_reset)
 735                                break;
 736                }
 737                cqe = ehea_poll_rq1(qp, &wqe_index);
 738        }
 739
 740        pr->rx_packets += processed;
 741        pr->rx_bytes += processed_bytes;
 742
 743        ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
 744        ehea_refill_rq2(pr, processed_rq2);
 745        ehea_refill_rq3(pr, processed_rq3);
 746
 747        return processed;
 748}
 749
 750#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
 751
 752static void reset_sq_restart_flag(struct ehea_port *port)
 753{
 754        int i;
 755
 756        for (i = 0; i < port->num_def_qps; i++) {
 757                struct ehea_port_res *pr = &port->port_res[i];
 758                pr->sq_restart_flag = 0;
 759        }
 760        wake_up(&port->restart_wq);
 761}
 762
 763static void check_sqs(struct ehea_port *port)
 764{
 765        struct ehea_swqe *swqe;
 766        int swqe_index;
 767        int i;
 768
 769        for (i = 0; i < port->num_def_qps; i++) {
 770                struct ehea_port_res *pr = &port->port_res[i];
 771                int ret;
 772                swqe = ehea_get_swqe(pr->qp, &swqe_index);
 773                memset(swqe, 0, SWQE_HEADER_SIZE);
 774                atomic_dec(&pr->swqe_avail);
 775
 776                swqe->tx_control |= EHEA_SWQE_PURGE;
 777                swqe->wr_id = SWQE_RESTART_CHECK;
 778                swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
 779                swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
 780                swqe->immediate_data_length = 80;
 781
 782                ehea_post_swqe(pr->qp, swqe);
 783
 784                ret = wait_event_timeout(port->restart_wq,
 785                                         pr->sq_restart_flag == 0,
 786                                         msecs_to_jiffies(100));
 787
 788                if (!ret) {
 789                        pr_err("HW/SW queues out of sync\n");
 790                        ehea_schedule_port_reset(pr->port);
 791                        return;
 792                }
 793        }
 794}
 795
 796
 797static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 798{
 799        struct sk_buff *skb;
 800        struct ehea_cq *send_cq = pr->send_cq;
 801        struct ehea_cqe *cqe;
 802        int quota = my_quota;
 803        int cqe_counter = 0;
 804        int swqe_av = 0;
 805        int index;
 806        struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
 807                                                pr - &pr->port->port_res[0]);
 808
 809        cqe = ehea_poll_cq(send_cq);
 810        while (cqe && (quota > 0)) {
 811                ehea_inc_cq(send_cq);
 812
 813                cqe_counter++;
 814                rmb();
 815
 816                if (cqe->wr_id == SWQE_RESTART_CHECK) {
 817                        pr->sq_restart_flag = 1;
 818                        swqe_av++;
 819                        break;
 820                }
 821
 822                if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
 823                        pr_err("Bad send completion status=0x%04X\n",
 824                               cqe->status);
 825
 826                        if (netif_msg_tx_err(pr->port))
 827                                ehea_dump(cqe, sizeof(*cqe), "Send CQE");
 828
 829                        if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
 830                                pr_err("Resetting port\n");
 831                                ehea_schedule_port_reset(pr->port);
 832                                break;
 833                        }
 834                }
 835
 836                if (netif_msg_tx_done(pr->port))
 837                        ehea_dump(cqe, sizeof(*cqe), "CQE");
 838
 839                if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
 840                           == EHEA_SWQE2_TYPE)) {
 841
 842                        index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
 843                        skb = pr->sq_skba.arr[index];
 844                        dev_consume_skb_any(skb);
 845                        pr->sq_skba.arr[index] = NULL;
 846                }
 847
 848                swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
 849                quota--;
 850
 851                cqe = ehea_poll_cq(send_cq);
 852        }
 853
 854        ehea_update_feca(send_cq, cqe_counter);
 855        atomic_add(swqe_av, &pr->swqe_avail);
 856
 857        if (unlikely(netif_tx_queue_stopped(txq) &&
 858                     (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
 859                __netif_tx_lock(txq, smp_processor_id());
 860                if (netif_tx_queue_stopped(txq) &&
 861                    (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
 862                        netif_tx_wake_queue(txq);
 863                __netif_tx_unlock(txq);
 864        }
 865
 866        wake_up(&pr->port->swqe_avail_wq);
 867
 868        return cqe;
 869}
 870
 871#define EHEA_POLL_MAX_CQES 65535
 872
 873static int ehea_poll(struct napi_struct *napi, int budget)
 874{
 875        struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
 876                                                napi);
 877        struct net_device *dev = pr->port->netdev;
 878        struct ehea_cqe *cqe;
 879        struct ehea_cqe *cqe_skb = NULL;
 880        int wqe_index;
 881        int rx = 0;
 882
 883        cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
 884        rx += ehea_proc_rwqes(dev, pr, budget - rx);
 885
 886        while (rx != budget) {
 887                napi_complete(napi);
 888                ehea_reset_cq_ep(pr->recv_cq);
 889                ehea_reset_cq_ep(pr->send_cq);
 890                ehea_reset_cq_n1(pr->recv_cq);
 891                ehea_reset_cq_n1(pr->send_cq);
 892                rmb();
 893                cqe = ehea_poll_rq1(pr->qp, &wqe_index);
 894                cqe_skb = ehea_poll_cq(pr->send_cq);
 895
 896                if (!cqe && !cqe_skb)
 897                        return rx;
 898
 899                if (!napi_reschedule(napi))
 900                        return rx;
 901
 902                cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
 903                rx += ehea_proc_rwqes(dev, pr, budget - rx);
 904        }
 905
 906        return rx;
 907}
 908
 909static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
 910{
 911        struct ehea_port_res *pr = param;
 912
 913        napi_schedule(&pr->napi);
 914
 915        return IRQ_HANDLED;
 916}
 917
 918static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
 919{
 920        struct ehea_port *port = param;
 921        struct ehea_eqe *eqe;
 922        struct ehea_qp *qp;
 923        u32 qp_token;
 924        u64 resource_type, aer, aerr;
 925        int reset_port = 0;
 926
 927        eqe = ehea_poll_eq(port->qp_eq);
 928
 929        while (eqe) {
 930                qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
 931                pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
 932                       eqe->entry, qp_token);
 933
 934                qp = port->port_res[qp_token].qp;
 935
 936                resource_type = ehea_error_data(port->adapter, qp->fw_handle,
 937                                                &aer, &aerr);
 938
 939                if (resource_type == EHEA_AER_RESTYPE_QP) {
 940                        if ((aer & EHEA_AER_RESET_MASK) ||
 941                            (aerr & EHEA_AERR_RESET_MASK))
 942                                 reset_port = 1;
 943                } else
 944                        reset_port = 1;   /* Reset in case of CQ or EQ error */
 945
 946                eqe = ehea_poll_eq(port->qp_eq);
 947        }
 948
 949        if (reset_port) {
 950                pr_err("Resetting port\n");
 951                ehea_schedule_port_reset(port);
 952        }
 953
 954        return IRQ_HANDLED;
 955}
 956
 957static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
 958                                       int logical_port)
 959{
 960        int i;
 961
 962        for (i = 0; i < EHEA_MAX_PORTS; i++)
 963                if (adapter->port[i])
 964                        if (adapter->port[i]->logical_port_id == logical_port)
 965                                return adapter->port[i];
 966        return NULL;
 967}
 968
 969int ehea_sense_port_attr(struct ehea_port *port)
 970{
 971        int ret;
 972        u64 hret;
 973        struct hcp_ehea_port_cb0 *cb0;
 974
 975        /* may be called via ehea_neq_tasklet() */
 976        cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
 977        if (!cb0) {
 978                pr_err("no mem for cb0\n");
 979                ret = -ENOMEM;
 980                goto out;
 981        }
 982
 983        hret = ehea_h_query_ehea_port(port->adapter->handle,
 984                                      port->logical_port_id, H_PORT_CB0,
 985                                      EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
 986                                      cb0);
 987        if (hret != H_SUCCESS) {
 988                ret = -EIO;
 989                goto out_free;
 990        }
 991
 992        /* MAC address */
 993        port->mac_addr = cb0->port_mac_addr << 16;
 994
 995        if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
 996                ret = -EADDRNOTAVAIL;
 997                goto out_free;
 998        }
 999
1000        /* Port speed */
1001        switch (cb0->port_speed) {
1002        case H_SPEED_10M_H:
1003                port->port_speed = EHEA_SPEED_10M;
1004                port->full_duplex = 0;
1005                break;
1006        case H_SPEED_10M_F:
1007                port->port_speed = EHEA_SPEED_10M;
1008                port->full_duplex = 1;
1009                break;
1010        case H_SPEED_100M_H:
1011                port->port_speed = EHEA_SPEED_100M;
1012                port->full_duplex = 0;
1013                break;
1014        case H_SPEED_100M_F:
1015                port->port_speed = EHEA_SPEED_100M;
1016                port->full_duplex = 1;
1017                break;
1018        case H_SPEED_1G_F:
1019                port->port_speed = EHEA_SPEED_1G;
1020                port->full_duplex = 1;
1021                break;
1022        case H_SPEED_10G_F:
1023                port->port_speed = EHEA_SPEED_10G;
1024                port->full_duplex = 1;
1025                break;
1026        default:
1027                port->port_speed = 0;
1028                port->full_duplex = 0;
1029                break;
1030        }
1031
1032        port->autoneg = 1;
1033        port->num_mcs = cb0->num_default_qps;
1034
1035        /* Number of default QPs */
1036        if (use_mcs)
1037                port->num_def_qps = cb0->num_default_qps;
1038        else
1039                port->num_def_qps = 1;
1040
1041        if (!port->num_def_qps) {
1042                ret = -EINVAL;
1043                goto out_free;
1044        }
1045
1046        ret = 0;
1047out_free:
1048        if (ret || netif_msg_probe(port))
1049                ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1050        free_page((unsigned long)cb0);
1051out:
1052        return ret;
1053}
1054
1055int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1056{
1057        struct hcp_ehea_port_cb4 *cb4;
1058        u64 hret;
1059        int ret = 0;
1060
1061        cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1062        if (!cb4) {
1063                pr_err("no mem for cb4\n");
1064                ret = -ENOMEM;
1065                goto out;
1066        }
1067
1068        cb4->port_speed = port_speed;
1069
1070        netif_carrier_off(port->netdev);
1071
1072        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1073                                       port->logical_port_id,
1074                                       H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1075        if (hret == H_SUCCESS) {
1076                port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1077
1078                hret = ehea_h_query_ehea_port(port->adapter->handle,
1079                                              port->logical_port_id,
1080                                              H_PORT_CB4, H_PORT_CB4_SPEED,
1081                                              cb4);
1082                if (hret == H_SUCCESS) {
1083                        switch (cb4->port_speed) {
1084                        case H_SPEED_10M_H:
1085                                port->port_speed = EHEA_SPEED_10M;
1086                                port->full_duplex = 0;
1087                                break;
1088                        case H_SPEED_10M_F:
1089                                port->port_speed = EHEA_SPEED_10M;
1090                                port->full_duplex = 1;
1091                                break;
1092                        case H_SPEED_100M_H:
1093                                port->port_speed = EHEA_SPEED_100M;
1094                                port->full_duplex = 0;
1095                                break;
1096                        case H_SPEED_100M_F:
1097                                port->port_speed = EHEA_SPEED_100M;
1098                                port->full_duplex = 1;
1099                                break;
1100                        case H_SPEED_1G_F:
1101                                port->port_speed = EHEA_SPEED_1G;
1102                                port->full_duplex = 1;
1103                                break;
1104                        case H_SPEED_10G_F:
1105                                port->port_speed = EHEA_SPEED_10G;
1106                                port->full_duplex = 1;
1107                                break;
1108                        default:
1109                                port->port_speed = 0;
1110                                port->full_duplex = 0;
1111                                break;
1112                        }
1113                } else {
1114                        pr_err("Failed sensing port speed\n");
1115                        ret = -EIO;
1116                }
1117        } else {
1118                if (hret == H_AUTHORITY) {
1119                        pr_info("Hypervisor denied setting port speed\n");
1120                        ret = -EPERM;
1121                } else {
1122                        ret = -EIO;
1123                        pr_err("Failed setting port speed\n");
1124                }
1125        }
1126        if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1127                netif_carrier_on(port->netdev);
1128
1129        free_page((unsigned long)cb4);
1130out:
1131        return ret;
1132}
1133
1134static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1135{
1136        int ret;
1137        u8 ec;
1138        u8 portnum;
1139        struct ehea_port *port;
1140        struct net_device *dev;
1141
1142        ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1143        portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1144        port = ehea_get_port(adapter, portnum);
1145        if (!port) {
1146                netdev_err(NULL, "unknown portnum %x\n", portnum);
1147                return;
1148        }
1149        dev = port->netdev;
1150
1151        switch (ec) {
1152        case EHEA_EC_PORTSTATE_CHG:     /* port state change */
1153
1154                if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1155                        if (!netif_carrier_ok(dev)) {
1156                                ret = ehea_sense_port_attr(port);
1157                                if (ret) {
1158                                        netdev_err(dev, "failed resensing port attributes\n");
1159                                        break;
1160                                }
1161
1162                                netif_info(port, link, dev,
1163                                           "Logical port up: %dMbps %s Duplex\n",
1164                                           port->port_speed,
1165                                           port->full_duplex == 1 ?
1166                                           "Full" : "Half");
1167
1168                                netif_carrier_on(dev);
1169                                netif_wake_queue(dev);
1170                        }
1171                } else
1172                        if (netif_carrier_ok(dev)) {
1173                                netif_info(port, link, dev,
1174                                           "Logical port down\n");
1175                                netif_carrier_off(dev);
1176                                netif_tx_disable(dev);
1177                        }
1178
1179                if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1180                        port->phy_link = EHEA_PHY_LINK_UP;
1181                        netif_info(port, link, dev,
1182                                   "Physical port up\n");
1183                        if (prop_carrier_state)
1184                                netif_carrier_on(dev);
1185                } else {
1186                        port->phy_link = EHEA_PHY_LINK_DOWN;
1187                        netif_info(port, link, dev,
1188                                   "Physical port down\n");
1189                        if (prop_carrier_state)
1190                                netif_carrier_off(dev);
1191                }
1192
1193                if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1194                        netdev_info(dev,
1195                                    "External switch port is primary port\n");
1196                else
1197                        netdev_info(dev,
1198                                    "External switch port is backup port\n");
1199
1200                break;
1201        case EHEA_EC_ADAPTER_MALFUNC:
1202                netdev_err(dev, "Adapter malfunction\n");
1203                break;
1204        case EHEA_EC_PORT_MALFUNC:
1205                netdev_info(dev, "Port malfunction\n");
1206                netif_carrier_off(dev);
1207                netif_tx_disable(dev);
1208                break;
1209        default:
1210                netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1211                break;
1212        }
1213}
1214
1215static void ehea_neq_tasklet(unsigned long data)
1216{
1217        struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1218        struct ehea_eqe *eqe;
1219        u64 event_mask;
1220
1221        eqe = ehea_poll_eq(adapter->neq);
1222        pr_debug("eqe=%p\n", eqe);
1223
1224        while (eqe) {
1225                pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1226                ehea_parse_eqe(adapter, eqe->entry);
1227                eqe = ehea_poll_eq(adapter->neq);
1228                pr_debug("next eqe=%p\n", eqe);
1229        }
1230
1231        event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1232                   | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1233                   | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1234
1235        ehea_h_reset_events(adapter->handle,
1236                            adapter->neq->fw_handle, event_mask);
1237}
1238
1239static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1240{
1241        struct ehea_adapter *adapter = param;
1242        tasklet_hi_schedule(&adapter->neq_tasklet);
1243        return IRQ_HANDLED;
1244}
1245
1246
1247static int ehea_fill_port_res(struct ehea_port_res *pr)
1248{
1249        int ret;
1250        struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1251
1252        ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1253
1254        ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1255
1256        ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1257
1258        return ret;
1259}
1260
1261static int ehea_reg_interrupts(struct net_device *dev)
1262{
1263        struct ehea_port *port = netdev_priv(dev);
1264        struct ehea_port_res *pr;
1265        int i, ret;
1266
1267
1268        snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1269                 dev->name);
1270
1271        ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1272                                  ehea_qp_aff_irq_handler,
1273                                  0, port->int_aff_name, port);
1274        if (ret) {
1275                netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1276                           port->qp_eq->attr.ist1);
1277                goto out_free_qpeq;
1278        }
1279
1280        netif_info(port, ifup, dev,
1281                   "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1282                   port->qp_eq->attr.ist1);
1283
1284
1285        for (i = 0; i < port->num_def_qps; i++) {
1286                pr = &port->port_res[i];
1287                snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1288                         "%s-queue%d", dev->name, i);
1289                ret = ibmebus_request_irq(pr->eq->attr.ist1,
1290                                          ehea_recv_irq_handler,
1291                                          0, pr->int_send_name, pr);
1292                if (ret) {
1293                        netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1294                                   i, pr->eq->attr.ist1);
1295                        goto out_free_req;
1296                }
1297                netif_info(port, ifup, dev,
1298                           "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1299                           pr->eq->attr.ist1, i);
1300        }
1301out:
1302        return ret;
1303
1304
1305out_free_req:
1306        while (--i >= 0) {
1307                u32 ist = port->port_res[i].eq->attr.ist1;
1308                ibmebus_free_irq(ist, &port->port_res[i]);
1309        }
1310
1311out_free_qpeq:
1312        ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1313        i = port->num_def_qps;
1314
1315        goto out;
1316
1317}
1318
1319static void ehea_free_interrupts(struct net_device *dev)
1320{
1321        struct ehea_port *port = netdev_priv(dev);
1322        struct ehea_port_res *pr;
1323        int i;
1324
1325        /* send */
1326
1327        for (i = 0; i < port->num_def_qps; i++) {
1328                pr = &port->port_res[i];
1329                ibmebus_free_irq(pr->eq->attr.ist1, pr);
1330                netif_info(port, intr, dev,
1331                           "free send irq for res %d with handle 0x%X\n",
1332                           i, pr->eq->attr.ist1);
1333        }
1334
1335        /* associated events */
1336        ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1337        netif_info(port, intr, dev,
1338                   "associated event interrupt for handle 0x%X freed\n",
1339                   port->qp_eq->attr.ist1);
1340}
1341
1342static int ehea_configure_port(struct ehea_port *port)
1343{
1344        int ret, i;
1345        u64 hret, mask;
1346        struct hcp_ehea_port_cb0 *cb0;
1347
1348        ret = -ENOMEM;
1349        cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1350        if (!cb0)
1351                goto out;
1352
1353        cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1354                     | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1355                     | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1356                     | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1357                     | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1358                                      PXLY_RC_VLAN_FILTER)
1359                     | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1360
1361        for (i = 0; i < port->num_mcs; i++)
1362                if (use_mcs)
1363                        cb0->default_qpn_arr[i] =
1364                                port->port_res[i].qp->init_attr.qp_nr;
1365                else
1366                        cb0->default_qpn_arr[i] =
1367                                port->port_res[0].qp->init_attr.qp_nr;
1368
1369        if (netif_msg_ifup(port))
1370                ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1371
1372        mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1373             | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1374
1375        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1376                                       port->logical_port_id,
1377                                       H_PORT_CB0, mask, cb0);
1378        ret = -EIO;
1379        if (hret != H_SUCCESS)
1380                goto out_free;
1381
1382        ret = 0;
1383
1384out_free:
1385        free_page((unsigned long)cb0);
1386out:
1387        return ret;
1388}
1389
1390static int ehea_gen_smrs(struct ehea_port_res *pr)
1391{
1392        int ret;
1393        struct ehea_adapter *adapter = pr->port->adapter;
1394
1395        ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1396        if (ret)
1397                goto out;
1398
1399        ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1400        if (ret)
1401                goto out_free;
1402
1403        return 0;
1404
1405out_free:
1406        ehea_rem_mr(&pr->send_mr);
1407out:
1408        pr_err("Generating SMRS failed\n");
1409        return -EIO;
1410}
1411
1412static int ehea_rem_smrs(struct ehea_port_res *pr)
1413{
1414        if ((ehea_rem_mr(&pr->send_mr)) ||
1415            (ehea_rem_mr(&pr->recv_mr)))
1416                return -EIO;
1417        else
1418                return 0;
1419}
1420
1421static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1422{
1423        int arr_size = sizeof(void *) * max_q_entries;
1424
1425        q_skba->arr = vzalloc(arr_size);
1426        if (!q_skba->arr)
1427                return -ENOMEM;
1428
1429        q_skba->len = max_q_entries;
1430        q_skba->index = 0;
1431        q_skba->os_skbs = 0;
1432
1433        return 0;
1434}
1435
1436static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1437                              struct port_res_cfg *pr_cfg, int queue_token)
1438{
1439        struct ehea_adapter *adapter = port->adapter;
1440        enum ehea_eq_type eq_type = EHEA_EQ;
1441        struct ehea_qp_init_attr *init_attr = NULL;
1442        int ret = -EIO;
1443        u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1444
1445        tx_bytes = pr->tx_bytes;
1446        tx_packets = pr->tx_packets;
1447        rx_bytes = pr->rx_bytes;
1448        rx_packets = pr->rx_packets;
1449
1450        memset(pr, 0, sizeof(struct ehea_port_res));
1451
1452        pr->tx_bytes = tx_bytes;
1453        pr->tx_packets = tx_packets;
1454        pr->rx_bytes = rx_bytes;
1455        pr->rx_packets = rx_packets;
1456
1457        pr->port = port;
1458
1459        pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1460        if (!pr->eq) {
1461                pr_err("create_eq failed (eq)\n");
1462                goto out_free;
1463        }
1464
1465        pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1466                                     pr->eq->fw_handle,
1467                                     port->logical_port_id);
1468        if (!pr->recv_cq) {
1469                pr_err("create_cq failed (cq_recv)\n");
1470                goto out_free;
1471        }
1472
1473        pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1474                                     pr->eq->fw_handle,
1475                                     port->logical_port_id);
1476        if (!pr->send_cq) {
1477                pr_err("create_cq failed (cq_send)\n");
1478                goto out_free;
1479        }
1480
1481        if (netif_msg_ifup(port))
1482                pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1483                        pr->send_cq->attr.act_nr_of_cqes,
1484                        pr->recv_cq->attr.act_nr_of_cqes);
1485
1486        init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1487        if (!init_attr) {
1488                ret = -ENOMEM;
1489                pr_err("no mem for ehea_qp_init_attr\n");
1490                goto out_free;
1491        }
1492
1493        init_attr->low_lat_rq1 = 1;
1494        init_attr->signalingtype = 1;   /* generate CQE if specified in WQE */
1495        init_attr->rq_count = 3;
1496        init_attr->qp_token = queue_token;
1497        init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1498        init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1499        init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1500        init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1501        init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1502        init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1503        init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1504        init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1505        init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1506        init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1507        init_attr->port_nr = port->logical_port_id;
1508        init_attr->send_cq_handle = pr->send_cq->fw_handle;
1509        init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1510        init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1511
1512        pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1513        if (!pr->qp) {
1514                pr_err("create_qp failed\n");
1515                ret = -EIO;
1516                goto out_free;
1517        }
1518
1519        if (netif_msg_ifup(port))
1520                pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1521                        init_attr->qp_nr,
1522                        init_attr->act_nr_send_wqes,
1523                        init_attr->act_nr_rwqes_rq1,
1524                        init_attr->act_nr_rwqes_rq2,
1525                        init_attr->act_nr_rwqes_rq3);
1526
1527        pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1528
1529        ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1530        ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1531        ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1532        ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1533        if (ret)
1534                goto out_free;
1535
1536        pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1537        if (ehea_gen_smrs(pr) != 0) {
1538                ret = -EIO;
1539                goto out_free;
1540        }
1541
1542        atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1543
1544        kfree(init_attr);
1545
1546        netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1547
1548        ret = 0;
1549        goto out;
1550
1551out_free:
1552        kfree(init_attr);
1553        vfree(pr->sq_skba.arr);
1554        vfree(pr->rq1_skba.arr);
1555        vfree(pr->rq2_skba.arr);
1556        vfree(pr->rq3_skba.arr);
1557        ehea_destroy_qp(pr->qp);
1558        ehea_destroy_cq(pr->send_cq);
1559        ehea_destroy_cq(pr->recv_cq);
1560        ehea_destroy_eq(pr->eq);
1561out:
1562        return ret;
1563}
1564
1565static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1566{
1567        int ret, i;
1568
1569        if (pr->qp)
1570                netif_napi_del(&pr->napi);
1571
1572        ret = ehea_destroy_qp(pr->qp);
1573
1574        if (!ret) {
1575                ehea_destroy_cq(pr->send_cq);
1576                ehea_destroy_cq(pr->recv_cq);
1577                ehea_destroy_eq(pr->eq);
1578
1579                for (i = 0; i < pr->rq1_skba.len; i++)
1580                        if (pr->rq1_skba.arr[i])
1581                                dev_kfree_skb(pr->rq1_skba.arr[i]);
1582
1583                for (i = 0; i < pr->rq2_skba.len; i++)
1584                        if (pr->rq2_skba.arr[i])
1585                                dev_kfree_skb(pr->rq2_skba.arr[i]);
1586
1587                for (i = 0; i < pr->rq3_skba.len; i++)
1588                        if (pr->rq3_skba.arr[i])
1589                                dev_kfree_skb(pr->rq3_skba.arr[i]);
1590
1591                for (i = 0; i < pr->sq_skba.len; i++)
1592                        if (pr->sq_skba.arr[i])
1593                                dev_kfree_skb(pr->sq_skba.arr[i]);
1594
1595                vfree(pr->rq1_skba.arr);
1596                vfree(pr->rq2_skba.arr);
1597                vfree(pr->rq3_skba.arr);
1598                vfree(pr->sq_skba.arr);
1599                ret = ehea_rem_smrs(pr);
1600        }
1601        return ret;
1602}
1603
1604static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1605                                  u32 lkey)
1606{
1607        int skb_data_size = skb_headlen(skb);
1608        u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1609        struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1610        unsigned int immediate_len = SWQE2_MAX_IMM;
1611
1612        swqe->descriptors = 0;
1613
1614        if (skb_is_gso(skb)) {
1615                swqe->tx_control |= EHEA_SWQE_TSO;
1616                swqe->mss = skb_shinfo(skb)->gso_size;
1617                /*
1618                 * For TSO packets we only copy the headers into the
1619                 * immediate area.
1620                 */
1621                immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1622        }
1623
1624        if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1625                skb_copy_from_linear_data(skb, imm_data, immediate_len);
1626                swqe->immediate_data_length = immediate_len;
1627
1628                if (skb_data_size > immediate_len) {
1629                        sg1entry->l_key = lkey;
1630                        sg1entry->len = skb_data_size - immediate_len;
1631                        sg1entry->vaddr =
1632                                ehea_map_vaddr(skb->data + immediate_len);
1633                        swqe->descriptors++;
1634                }
1635        } else {
1636                skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1637                swqe->immediate_data_length = skb_data_size;
1638        }
1639}
1640
1641static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1642                                    struct ehea_swqe *swqe, u32 lkey)
1643{
1644        struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1645        skb_frag_t *frag;
1646        int nfrags, sg1entry_contains_frag_data, i;
1647
1648        nfrags = skb_shinfo(skb)->nr_frags;
1649        sg1entry = &swqe->u.immdata_desc.sg_entry;
1650        sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1651        sg1entry_contains_frag_data = 0;
1652
1653        write_swqe2_immediate(skb, swqe, lkey);
1654
1655        /* write descriptors */
1656        if (nfrags > 0) {
1657                if (swqe->descriptors == 0) {
1658                        /* sg1entry not yet used */
1659                        frag = &skb_shinfo(skb)->frags[0];
1660
1661                        /* copy sg1entry data */
1662                        sg1entry->l_key = lkey;
1663                        sg1entry->len = skb_frag_size(frag);
1664                        sg1entry->vaddr =
1665                                ehea_map_vaddr(skb_frag_address(frag));
1666                        swqe->descriptors++;
1667                        sg1entry_contains_frag_data = 1;
1668                }
1669
1670                for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1671
1672                        frag = &skb_shinfo(skb)->frags[i];
1673                        sgentry = &sg_list[i - sg1entry_contains_frag_data];
1674
1675                        sgentry->l_key = lkey;
1676                        sgentry->len = skb_frag_size(frag);
1677                        sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1678                        swqe->descriptors++;
1679                }
1680        }
1681}
1682
1683static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1684{
1685        int ret = 0;
1686        u64 hret;
1687        u8 reg_type;
1688
1689        /* De/Register untagged packets */
1690        reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1691        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1692                                     port->logical_port_id,
1693                                     reg_type, port->mac_addr, 0, hcallid);
1694        if (hret != H_SUCCESS) {
1695                pr_err("%sregistering bc address failed (tagged)\n",
1696                       hcallid == H_REG_BCMC ? "" : "de");
1697                ret = -EIO;
1698                goto out_herr;
1699        }
1700
1701        /* De/Register VLAN packets */
1702        reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1703        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1704                                     port->logical_port_id,
1705                                     reg_type, port->mac_addr, 0, hcallid);
1706        if (hret != H_SUCCESS) {
1707                pr_err("%sregistering bc address failed (vlan)\n",
1708                       hcallid == H_REG_BCMC ? "" : "de");
1709                ret = -EIO;
1710        }
1711out_herr:
1712        return ret;
1713}
1714
1715static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1716{
1717        struct ehea_port *port = netdev_priv(dev);
1718        struct sockaddr *mac_addr = sa;
1719        struct hcp_ehea_port_cb0 *cb0;
1720        int ret;
1721        u64 hret;
1722
1723        if (!is_valid_ether_addr(mac_addr->sa_data)) {
1724                ret = -EADDRNOTAVAIL;
1725                goto out;
1726        }
1727
1728        cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1729        if (!cb0) {
1730                pr_err("no mem for cb0\n");
1731                ret = -ENOMEM;
1732                goto out;
1733        }
1734
1735        memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1736
1737        cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1738
1739        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1740                                       port->logical_port_id, H_PORT_CB0,
1741                                       EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1742        if (hret != H_SUCCESS) {
1743                ret = -EIO;
1744                goto out_free;
1745        }
1746
1747        memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1748
1749        /* Deregister old MAC in pHYP */
1750        if (port->state == EHEA_PORT_UP) {
1751                ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1752                if (ret)
1753                        goto out_upregs;
1754        }
1755
1756        port->mac_addr = cb0->port_mac_addr << 16;
1757
1758        /* Register new MAC in pHYP */
1759        if (port->state == EHEA_PORT_UP) {
1760                ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1761                if (ret)
1762                        goto out_upregs;
1763        }
1764
1765        ret = 0;
1766
1767out_upregs:
1768        ehea_update_bcmc_registrations();
1769out_free:
1770        free_page((unsigned long)cb0);
1771out:
1772        return ret;
1773}
1774
1775static void ehea_promiscuous_error(u64 hret, int enable)
1776{
1777        if (hret == H_AUTHORITY)
1778                pr_info("Hypervisor denied %sabling promiscuous mode\n",
1779                        enable == 1 ? "en" : "dis");
1780        else
1781                pr_err("failed %sabling promiscuous mode\n",
1782                       enable == 1 ? "en" : "dis");
1783}
1784
1785static void ehea_promiscuous(struct net_device *dev, int enable)
1786{
1787        struct ehea_port *port = netdev_priv(dev);
1788        struct hcp_ehea_port_cb7 *cb7;
1789        u64 hret;
1790
1791        if (enable == port->promisc)
1792                return;
1793
1794        cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1795        if (!cb7) {
1796                pr_err("no mem for cb7\n");
1797                goto out;
1798        }
1799
1800        /* Modify Pxs_DUCQPN in CB7 */
1801        cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1802
1803        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1804                                       port->logical_port_id,
1805                                       H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1806        if (hret) {
1807                ehea_promiscuous_error(hret, enable);
1808                goto out;
1809        }
1810
1811        port->promisc = enable;
1812out:
1813        free_page((unsigned long)cb7);
1814}
1815
1816static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1817                                     u32 hcallid)
1818{
1819        u64 hret;
1820        u8 reg_type;
1821
1822        reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1823        if (mc_mac_addr == 0)
1824                reg_type |= EHEA_BCMC_SCOPE_ALL;
1825
1826        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1827                                     port->logical_port_id,
1828                                     reg_type, mc_mac_addr, 0, hcallid);
1829        if (hret)
1830                goto out;
1831
1832        reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1833        if (mc_mac_addr == 0)
1834                reg_type |= EHEA_BCMC_SCOPE_ALL;
1835
1836        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1837                                     port->logical_port_id,
1838                                     reg_type, mc_mac_addr, 0, hcallid);
1839out:
1840        return hret;
1841}
1842
1843static int ehea_drop_multicast_list(struct net_device *dev)
1844{
1845        struct ehea_port *port = netdev_priv(dev);
1846        struct ehea_mc_list *mc_entry = port->mc_list;
1847        struct list_head *pos;
1848        struct list_head *temp;
1849        int ret = 0;
1850        u64 hret;
1851
1852        list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1853                mc_entry = list_entry(pos, struct ehea_mc_list, list);
1854
1855                hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1856                                                 H_DEREG_BCMC);
1857                if (hret) {
1858                        pr_err("failed deregistering mcast MAC\n");
1859                        ret = -EIO;
1860                }
1861
1862                list_del(pos);
1863                kfree(mc_entry);
1864        }
1865        return ret;
1866}
1867
1868static void ehea_allmulti(struct net_device *dev, int enable)
1869{
1870        struct ehea_port *port = netdev_priv(dev);
1871        u64 hret;
1872
1873        if (!port->allmulti) {
1874                if (enable) {
1875                        /* Enable ALLMULTI */
1876                        ehea_drop_multicast_list(dev);
1877                        hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1878                        if (!hret)
1879                                port->allmulti = 1;
1880                        else
1881                                netdev_err(dev,
1882                                           "failed enabling IFF_ALLMULTI\n");
1883                }
1884        } else {
1885                if (!enable) {
1886                        /* Disable ALLMULTI */
1887                        hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1888                        if (!hret)
1889                                port->allmulti = 0;
1890                        else
1891                                netdev_err(dev,
1892                                           "failed disabling IFF_ALLMULTI\n");
1893                }
1894        }
1895}
1896
1897static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1898{
1899        struct ehea_mc_list *ehea_mcl_entry;
1900        u64 hret;
1901
1902        ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1903        if (!ehea_mcl_entry)
1904                return;
1905
1906        INIT_LIST_HEAD(&ehea_mcl_entry->list);
1907
1908        memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1909
1910        hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1911                                         H_REG_BCMC);
1912        if (!hret)
1913                list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1914        else {
1915                pr_err("failed registering mcast MAC\n");
1916                kfree(ehea_mcl_entry);
1917        }
1918}
1919
1920static void ehea_set_multicast_list(struct net_device *dev)
1921{
1922        struct ehea_port *port = netdev_priv(dev);
1923        struct netdev_hw_addr *ha;
1924        int ret;
1925
1926        ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1927
1928        if (dev->flags & IFF_ALLMULTI) {
1929                ehea_allmulti(dev, 1);
1930                goto out;
1931        }
1932        ehea_allmulti(dev, 0);
1933
1934        if (!netdev_mc_empty(dev)) {
1935                ret = ehea_drop_multicast_list(dev);
1936                if (ret) {
1937                        /* Dropping the current multicast list failed.
1938                         * Enabling ALL_MULTI is the best we can do.
1939                         */
1940                        ehea_allmulti(dev, 1);
1941                }
1942
1943                if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1944                        pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1945                                port->adapter->max_mc_mac);
1946                        goto out;
1947                }
1948
1949                netdev_for_each_mc_addr(ha, dev)
1950                        ehea_add_multicast_entry(port, ha->addr);
1951
1952        }
1953out:
1954        ehea_update_bcmc_registrations();
1955}
1956
1957static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1958{
1959        swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1960
1961        if (vlan_get_protocol(skb) != htons(ETH_P_IP))
1962                return;
1963
1964        if (skb->ip_summed == CHECKSUM_PARTIAL)
1965                swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1966
1967        swqe->ip_start = skb_network_offset(skb);
1968        swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
1969
1970        switch (ip_hdr(skb)->protocol) {
1971        case IPPROTO_UDP:
1972                if (skb->ip_summed == CHECKSUM_PARTIAL)
1973                        swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1974
1975                swqe->tcp_offset = swqe->ip_end + 1 +
1976                                   offsetof(struct udphdr, check);
1977                break;
1978
1979        case IPPROTO_TCP:
1980                if (skb->ip_summed == CHECKSUM_PARTIAL)
1981                        swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1982
1983                swqe->tcp_offset = swqe->ip_end + 1 +
1984                                   offsetof(struct tcphdr, check);
1985                break;
1986        }
1987}
1988
1989static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1990                       struct ehea_swqe *swqe, u32 lkey)
1991{
1992        swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
1993
1994        xmit_common(skb, swqe);
1995
1996        write_swqe2_data(skb, dev, swqe, lkey);
1997}
1998
1999static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2000                       struct ehea_swqe *swqe)
2001{
2002        u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2003
2004        xmit_common(skb, swqe);
2005
2006        if (!skb->data_len)
2007                skb_copy_from_linear_data(skb, imm_data, skb->len);
2008        else
2009                skb_copy_bits(skb, 0, imm_data, skb->len);
2010
2011        swqe->immediate_data_length = skb->len;
2012        dev_consume_skb_any(skb);
2013}
2014
2015static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2016{
2017        struct ehea_port *port = netdev_priv(dev);
2018        struct ehea_swqe *swqe;
2019        u32 lkey;
2020        int swqe_index;
2021        struct ehea_port_res *pr;
2022        struct netdev_queue *txq;
2023
2024        pr = &port->port_res[skb_get_queue_mapping(skb)];
2025        txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2026
2027        swqe = ehea_get_swqe(pr->qp, &swqe_index);
2028        memset(swqe, 0, SWQE_HEADER_SIZE);
2029        atomic_dec(&pr->swqe_avail);
2030
2031        if (skb_vlan_tag_present(skb)) {
2032                swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2033                swqe->vlan_tag = skb_vlan_tag_get(skb);
2034        }
2035
2036        pr->tx_packets++;
2037        pr->tx_bytes += skb->len;
2038
2039        if (skb->len <= SWQE3_MAX_IMM) {
2040                u32 sig_iv = port->sig_comp_iv;
2041                u32 swqe_num = pr->swqe_id_counter;
2042                ehea_xmit3(skb, dev, swqe);
2043                swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2044                        | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2045                if (pr->swqe_ll_count >= (sig_iv - 1)) {
2046                        swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2047                                                      sig_iv);
2048                        swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2049                        pr->swqe_ll_count = 0;
2050                } else
2051                        pr->swqe_ll_count += 1;
2052        } else {
2053                swqe->wr_id =
2054                        EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2055                      | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2056                      | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2057                      | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2058                pr->sq_skba.arr[pr->sq_skba.index] = skb;
2059
2060                pr->sq_skba.index++;
2061                pr->sq_skba.index &= (pr->sq_skba.len - 1);
2062
2063                lkey = pr->send_mr.lkey;
2064                ehea_xmit2(skb, dev, swqe, lkey);
2065                swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2066        }
2067        pr->swqe_id_counter += 1;
2068
2069        netif_info(port, tx_queued, dev,
2070                   "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2071        if (netif_msg_tx_queued(port))
2072                ehea_dump(swqe, 512, "swqe");
2073
2074        if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2075                netif_tx_stop_queue(txq);
2076                swqe->tx_control |= EHEA_SWQE_PURGE;
2077        }
2078
2079        ehea_post_swqe(pr->qp, swqe);
2080
2081        if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2082                pr->p_stats.queue_stopped++;
2083                netif_tx_stop_queue(txq);
2084        }
2085
2086        return NETDEV_TX_OK;
2087}
2088
2089static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
2090{
2091        struct ehea_port *port = netdev_priv(dev);
2092        struct ehea_adapter *adapter = port->adapter;
2093        struct hcp_ehea_port_cb1 *cb1;
2094        int index;
2095        u64 hret;
2096        int err = 0;
2097
2098        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2099        if (!cb1) {
2100                pr_err("no mem for cb1\n");
2101                err = -ENOMEM;
2102                goto out;
2103        }
2104
2105        hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2106                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2107        if (hret != H_SUCCESS) {
2108                pr_err("query_ehea_port failed\n");
2109                err = -EINVAL;
2110                goto out;
2111        }
2112
2113        index = (vid / 64);
2114        cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2115
2116        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2117                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2118        if (hret != H_SUCCESS) {
2119                pr_err("modify_ehea_port failed\n");
2120                err = -EINVAL;
2121        }
2122out:
2123        free_page((unsigned long)cb1);
2124        return err;
2125}
2126
2127static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2128{
2129        struct ehea_port *port = netdev_priv(dev);
2130        struct ehea_adapter *adapter = port->adapter;
2131        struct hcp_ehea_port_cb1 *cb1;
2132        int index;
2133        u64 hret;
2134        int err = 0;
2135
2136        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2137        if (!cb1) {
2138                pr_err("no mem for cb1\n");
2139                err = -ENOMEM;
2140                goto out;
2141        }
2142
2143        hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2144                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2145        if (hret != H_SUCCESS) {
2146                pr_err("query_ehea_port failed\n");
2147                err = -EINVAL;
2148                goto out;
2149        }
2150
2151        index = (vid / 64);
2152        cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2153
2154        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2155                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2156        if (hret != H_SUCCESS) {
2157                pr_err("modify_ehea_port failed\n");
2158                err = -EINVAL;
2159        }
2160out:
2161        free_page((unsigned long)cb1);
2162        return err;
2163}
2164
2165static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2166{
2167        int ret = -EIO;
2168        u64 hret;
2169        u16 dummy16 = 0;
2170        u64 dummy64 = 0;
2171        struct hcp_modify_qp_cb0 *cb0;
2172
2173        cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2174        if (!cb0) {
2175                ret = -ENOMEM;
2176                goto out;
2177        }
2178
2179        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2180                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2181        if (hret != H_SUCCESS) {
2182                pr_err("query_ehea_qp failed (1)\n");
2183                goto out;
2184        }
2185
2186        cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2187        hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2188                                     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2189                                     &dummy64, &dummy64, &dummy16, &dummy16);
2190        if (hret != H_SUCCESS) {
2191                pr_err("modify_ehea_qp failed (1)\n");
2192                goto out;
2193        }
2194
2195        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2196                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2197        if (hret != H_SUCCESS) {
2198                pr_err("query_ehea_qp failed (2)\n");
2199                goto out;
2200        }
2201
2202        cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2203        hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2204                                     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2205                                     &dummy64, &dummy64, &dummy16, &dummy16);
2206        if (hret != H_SUCCESS) {
2207                pr_err("modify_ehea_qp failed (2)\n");
2208                goto out;
2209        }
2210
2211        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2212                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2213        if (hret != H_SUCCESS) {
2214                pr_err("query_ehea_qp failed (3)\n");
2215                goto out;
2216        }
2217
2218        cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2219        hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2220                                     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2221                                     &dummy64, &dummy64, &dummy16, &dummy16);
2222        if (hret != H_SUCCESS) {
2223                pr_err("modify_ehea_qp failed (3)\n");
2224                goto out;
2225        }
2226
2227        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2228                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2229        if (hret != H_SUCCESS) {
2230                pr_err("query_ehea_qp failed (4)\n");
2231                goto out;
2232        }
2233
2234        ret = 0;
2235out:
2236        free_page((unsigned long)cb0);
2237        return ret;
2238}
2239
2240static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2241{
2242        int ret, i;
2243        struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2244        enum ehea_eq_type eq_type = EHEA_EQ;
2245
2246        port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2247                                   EHEA_MAX_ENTRIES_EQ, 1);
2248        if (!port->qp_eq) {
2249                ret = -EINVAL;
2250                pr_err("ehea_create_eq failed (qp_eq)\n");
2251                goto out_kill_eq;
2252        }
2253
2254        pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2255        pr_cfg.max_entries_scq = sq_entries * 2;
2256        pr_cfg.max_entries_sq = sq_entries;
2257        pr_cfg.max_entries_rq1 = rq1_entries;
2258        pr_cfg.max_entries_rq2 = rq2_entries;
2259        pr_cfg.max_entries_rq3 = rq3_entries;
2260
2261        pr_cfg_small_rx.max_entries_rcq = 1;
2262        pr_cfg_small_rx.max_entries_scq = sq_entries;
2263        pr_cfg_small_rx.max_entries_sq = sq_entries;
2264        pr_cfg_small_rx.max_entries_rq1 = 1;
2265        pr_cfg_small_rx.max_entries_rq2 = 1;
2266        pr_cfg_small_rx.max_entries_rq3 = 1;
2267
2268        for (i = 0; i < def_qps; i++) {
2269                ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2270                if (ret)
2271                        goto out_clean_pr;
2272        }
2273        for (i = def_qps; i < def_qps; i++) {
2274                ret = ehea_init_port_res(port, &port->port_res[i],
2275                                         &pr_cfg_small_rx, i);
2276                if (ret)
2277                        goto out_clean_pr;
2278        }
2279
2280        return 0;
2281
2282out_clean_pr:
2283        while (--i >= 0)
2284                ehea_clean_portres(port, &port->port_res[i]);
2285
2286out_kill_eq:
2287        ehea_destroy_eq(port->qp_eq);
2288        return ret;
2289}
2290
2291static int ehea_clean_all_portres(struct ehea_port *port)
2292{
2293        int ret = 0;
2294        int i;
2295
2296        for (i = 0; i < port->num_def_qps; i++)
2297                ret |= ehea_clean_portres(port, &port->port_res[i]);
2298
2299        ret |= ehea_destroy_eq(port->qp_eq);
2300
2301        return ret;
2302}
2303
2304static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2305{
2306        if (adapter->active_ports)
2307                return;
2308
2309        ehea_rem_mr(&adapter->mr);
2310}
2311
2312static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2313{
2314        if (adapter->active_ports)
2315                return 0;
2316
2317        return ehea_reg_kernel_mr(adapter, &adapter->mr);
2318}
2319
2320static int ehea_up(struct net_device *dev)
2321{
2322        int ret, i;
2323        struct ehea_port *port = netdev_priv(dev);
2324
2325        if (port->state == EHEA_PORT_UP)
2326                return 0;
2327
2328        ret = ehea_port_res_setup(port, port->num_def_qps);
2329        if (ret) {
2330                netdev_err(dev, "port_res_failed\n");
2331                goto out;
2332        }
2333
2334        /* Set default QP for this port */
2335        ret = ehea_configure_port(port);
2336        if (ret) {
2337                netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2338                goto out_clean_pr;
2339        }
2340
2341        ret = ehea_reg_interrupts(dev);
2342        if (ret) {
2343                netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2344                goto out_clean_pr;
2345        }
2346
2347        for (i = 0; i < port->num_def_qps; i++) {
2348                ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2349                if (ret) {
2350                        netdev_err(dev, "activate_qp failed\n");
2351                        goto out_free_irqs;
2352                }
2353        }
2354
2355        for (i = 0; i < port->num_def_qps; i++) {
2356                ret = ehea_fill_port_res(&port->port_res[i]);
2357                if (ret) {
2358                        netdev_err(dev, "out_free_irqs\n");
2359                        goto out_free_irqs;
2360                }
2361        }
2362
2363        ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2364        if (ret) {
2365                ret = -EIO;
2366                goto out_free_irqs;
2367        }
2368
2369        port->state = EHEA_PORT_UP;
2370
2371        ret = 0;
2372        goto out;
2373
2374out_free_irqs:
2375        ehea_free_interrupts(dev);
2376
2377out_clean_pr:
2378        ehea_clean_all_portres(port);
2379out:
2380        if (ret)
2381                netdev_info(dev, "Failed starting. ret=%i\n", ret);
2382
2383        ehea_update_bcmc_registrations();
2384        ehea_update_firmware_handles();
2385
2386        return ret;
2387}
2388
2389static void port_napi_disable(struct ehea_port *port)
2390{
2391        int i;
2392
2393        for (i = 0; i < port->num_def_qps; i++)
2394                napi_disable(&port->port_res[i].napi);
2395}
2396
2397static void port_napi_enable(struct ehea_port *port)
2398{
2399        int i;
2400
2401        for (i = 0; i < port->num_def_qps; i++)
2402                napi_enable(&port->port_res[i].napi);
2403}
2404
2405static int ehea_open(struct net_device *dev)
2406{
2407        int ret;
2408        struct ehea_port *port = netdev_priv(dev);
2409
2410        mutex_lock(&port->port_lock);
2411
2412        netif_info(port, ifup, dev, "enabling port\n");
2413
2414        netif_carrier_off(dev);
2415
2416        ret = ehea_up(dev);
2417        if (!ret) {
2418                port_napi_enable(port);
2419                netif_tx_start_all_queues(dev);
2420        }
2421
2422        mutex_unlock(&port->port_lock);
2423        schedule_delayed_work(&port->stats_work,
2424                              round_jiffies_relative(msecs_to_jiffies(1000)));
2425
2426        return ret;
2427}
2428
2429static int ehea_down(struct net_device *dev)
2430{
2431        int ret;
2432        struct ehea_port *port = netdev_priv(dev);
2433
2434        if (port->state == EHEA_PORT_DOWN)
2435                return 0;
2436
2437        ehea_drop_multicast_list(dev);
2438        ehea_allmulti(dev, 0);
2439        ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2440
2441        ehea_free_interrupts(dev);
2442
2443        port->state = EHEA_PORT_DOWN;
2444
2445        ehea_update_bcmc_registrations();
2446
2447        ret = ehea_clean_all_portres(port);
2448        if (ret)
2449                netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2450
2451        ehea_update_firmware_handles();
2452
2453        return ret;
2454}
2455
2456static int ehea_stop(struct net_device *dev)
2457{
2458        int ret;
2459        struct ehea_port *port = netdev_priv(dev);
2460
2461        netif_info(port, ifdown, dev, "disabling port\n");
2462
2463        set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2464        cancel_work_sync(&port->reset_task);
2465        cancel_delayed_work_sync(&port->stats_work);
2466        mutex_lock(&port->port_lock);
2467        netif_tx_stop_all_queues(dev);
2468        port_napi_disable(port);
2469        ret = ehea_down(dev);
2470        mutex_unlock(&port->port_lock);
2471        clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2472        return ret;
2473}
2474
2475static void ehea_purge_sq(struct ehea_qp *orig_qp)
2476{
2477        struct ehea_qp qp = *orig_qp;
2478        struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2479        struct ehea_swqe *swqe;
2480        int wqe_index;
2481        int i;
2482
2483        for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2484                swqe = ehea_get_swqe(&qp, &wqe_index);
2485                swqe->tx_control |= EHEA_SWQE_PURGE;
2486        }
2487}
2488
2489static void ehea_flush_sq(struct ehea_port *port)
2490{
2491        int i;
2492
2493        for (i = 0; i < port->num_def_qps; i++) {
2494                struct ehea_port_res *pr = &port->port_res[i];
2495                int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2496                int ret;
2497
2498                ret = wait_event_timeout(port->swqe_avail_wq,
2499                         atomic_read(&pr->swqe_avail) >= swqe_max,
2500                         msecs_to_jiffies(100));
2501
2502                if (!ret) {
2503                        pr_err("WARNING: sq not flushed completely\n");
2504                        break;
2505                }
2506        }
2507}
2508
2509static int ehea_stop_qps(struct net_device *dev)
2510{
2511        struct ehea_port *port = netdev_priv(dev);
2512        struct ehea_adapter *adapter = port->adapter;
2513        struct hcp_modify_qp_cb0 *cb0;
2514        int ret = -EIO;
2515        int dret;
2516        int i;
2517        u64 hret;
2518        u64 dummy64 = 0;
2519        u16 dummy16 = 0;
2520
2521        cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2522        if (!cb0) {
2523                ret = -ENOMEM;
2524                goto out;
2525        }
2526
2527        for (i = 0; i < (port->num_def_qps); i++) {
2528                struct ehea_port_res *pr =  &port->port_res[i];
2529                struct ehea_qp *qp = pr->qp;
2530
2531                /* Purge send queue */
2532                ehea_purge_sq(qp);
2533
2534                /* Disable queue pair */
2535                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2536                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2537                                            cb0);
2538                if (hret != H_SUCCESS) {
2539                        pr_err("query_ehea_qp failed (1)\n");
2540                        goto out;
2541                }
2542
2543                cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2544                cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2545
2546                hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2547                                             EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2548                                                            1), cb0, &dummy64,
2549                                             &dummy64, &dummy16, &dummy16);
2550                if (hret != H_SUCCESS) {
2551                        pr_err("modify_ehea_qp failed (1)\n");
2552                        goto out;
2553                }
2554
2555                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2556                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2557                                            cb0);
2558                if (hret != H_SUCCESS) {
2559                        pr_err("query_ehea_qp failed (2)\n");
2560                        goto out;
2561                }
2562
2563                /* deregister shared memory regions */
2564                dret = ehea_rem_smrs(pr);
2565                if (dret) {
2566                        pr_err("unreg shared memory region failed\n");
2567                        goto out;
2568                }
2569        }
2570
2571        ret = 0;
2572out:
2573        free_page((unsigned long)cb0);
2574
2575        return ret;
2576}
2577
2578static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2579{
2580        struct ehea_qp qp = *orig_qp;
2581        struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2582        struct ehea_rwqe *rwqe;
2583        struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2584        struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2585        struct sk_buff *skb;
2586        u32 lkey = pr->recv_mr.lkey;
2587
2588
2589        int i;
2590        int index;
2591
2592        for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2593                rwqe = ehea_get_next_rwqe(&qp, 2);
2594                rwqe->sg_list[0].l_key = lkey;
2595                index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2596                skb = skba_rq2[index];
2597                if (skb)
2598                        rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2599        }
2600
2601        for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2602                rwqe = ehea_get_next_rwqe(&qp, 3);
2603                rwqe->sg_list[0].l_key = lkey;
2604                index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2605                skb = skba_rq3[index];
2606                if (skb)
2607                        rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2608        }
2609}
2610
2611static int ehea_restart_qps(struct net_device *dev)
2612{
2613        struct ehea_port *port = netdev_priv(dev);
2614        struct ehea_adapter *adapter = port->adapter;
2615        int ret = 0;
2616        int i;
2617
2618        struct hcp_modify_qp_cb0 *cb0;
2619        u64 hret;
2620        u64 dummy64 = 0;
2621        u16 dummy16 = 0;
2622
2623        cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2624        if (!cb0) {
2625                ret = -ENOMEM;
2626                goto out;
2627        }
2628
2629        for (i = 0; i < (port->num_def_qps); i++) {
2630                struct ehea_port_res *pr =  &port->port_res[i];
2631                struct ehea_qp *qp = pr->qp;
2632
2633                ret = ehea_gen_smrs(pr);
2634                if (ret) {
2635                        netdev_err(dev, "creation of shared memory regions failed\n");
2636                        goto out;
2637                }
2638
2639                ehea_update_rqs(qp, pr);
2640
2641                /* Enable queue pair */
2642                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2643                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2644                                            cb0);
2645                if (hret != H_SUCCESS) {
2646                        netdev_err(dev, "query_ehea_qp failed (1)\n");
2647                        goto out;
2648                }
2649
2650                cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2651                cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2652
2653                hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2654                                             EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2655                                                            1), cb0, &dummy64,
2656                                             &dummy64, &dummy16, &dummy16);
2657                if (hret != H_SUCCESS) {
2658                        netdev_err(dev, "modify_ehea_qp failed (1)\n");
2659                        goto out;
2660                }
2661
2662                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2663                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2664                                            cb0);
2665                if (hret != H_SUCCESS) {
2666                        netdev_err(dev, "query_ehea_qp failed (2)\n");
2667                        goto out;
2668                }
2669
2670                /* refill entire queue */
2671                ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2672                ehea_refill_rq2(pr, 0);
2673                ehea_refill_rq3(pr, 0);
2674        }
2675out:
2676        free_page((unsigned long)cb0);
2677
2678        return ret;
2679}
2680
2681static void ehea_reset_port(struct work_struct *work)
2682{
2683        int ret;
2684        struct ehea_port *port =
2685                container_of(work, struct ehea_port, reset_task);
2686        struct net_device *dev = port->netdev;
2687
2688        mutex_lock(&dlpar_mem_lock);
2689        port->resets++;
2690        mutex_lock(&port->port_lock);
2691        netif_tx_disable(dev);
2692
2693        port_napi_disable(port);
2694
2695        ehea_down(dev);
2696
2697        ret = ehea_up(dev);
2698        if (ret)
2699                goto out;
2700
2701        ehea_set_multicast_list(dev);
2702
2703        netif_info(port, timer, dev, "reset successful\n");
2704
2705        port_napi_enable(port);
2706
2707        netif_tx_wake_all_queues(dev);
2708out:
2709        mutex_unlock(&port->port_lock);
2710        mutex_unlock(&dlpar_mem_lock);
2711}
2712
2713static void ehea_rereg_mrs(void)
2714{
2715        int ret, i;
2716        struct ehea_adapter *adapter;
2717
2718        pr_info("LPAR memory changed - re-initializing driver\n");
2719
2720        list_for_each_entry(adapter, &adapter_list, list)
2721                if (adapter->active_ports) {
2722                        /* Shutdown all ports */
2723                        for (i = 0; i < EHEA_MAX_PORTS; i++) {
2724                                struct ehea_port *port = adapter->port[i];
2725                                struct net_device *dev;
2726
2727                                if (!port)
2728                                        continue;
2729
2730                                dev = port->netdev;
2731
2732                                if (dev->flags & IFF_UP) {
2733                                        mutex_lock(&port->port_lock);
2734                                        netif_tx_disable(dev);
2735                                        ehea_flush_sq(port);
2736                                        ret = ehea_stop_qps(dev);
2737                                        if (ret) {
2738                                                mutex_unlock(&port->port_lock);
2739                                                goto out;
2740                                        }
2741                                        port_napi_disable(port);
2742                                        mutex_unlock(&port->port_lock);
2743                                }
2744                                reset_sq_restart_flag(port);
2745                        }
2746
2747                        /* Unregister old memory region */
2748                        ret = ehea_rem_mr(&adapter->mr);
2749                        if (ret) {
2750                                pr_err("unregister MR failed - driver inoperable!\n");
2751                                goto out;
2752                        }
2753                }
2754
2755        clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2756
2757        list_for_each_entry(adapter, &adapter_list, list)
2758                if (adapter->active_ports) {
2759                        /* Register new memory region */
2760                        ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2761                        if (ret) {
2762                                pr_err("register MR failed - driver inoperable!\n");
2763                                goto out;
2764                        }
2765
2766                        /* Restart all ports */
2767                        for (i = 0; i < EHEA_MAX_PORTS; i++) {
2768                                struct ehea_port *port = adapter->port[i];
2769
2770                                if (port) {
2771                                        struct net_device *dev = port->netdev;
2772
2773                                        if (dev->flags & IFF_UP) {
2774                                                mutex_lock(&port->port_lock);
2775                                                ret = ehea_restart_qps(dev);
2776                                                if (!ret) {
2777                                                        check_sqs(port);
2778                                                        port_napi_enable(port);
2779                                                        netif_tx_wake_all_queues(dev);
2780                                                } else {
2781                                                        netdev_err(dev, "Unable to restart QPS\n");
2782                                                }
2783                                                mutex_unlock(&port->port_lock);
2784                                        }
2785                                }
2786                        }
2787                }
2788        pr_info("re-initializing driver complete\n");
2789out:
2790        return;
2791}
2792
2793static void ehea_tx_watchdog(struct net_device *dev)
2794{
2795        struct ehea_port *port = netdev_priv(dev);
2796
2797        if (netif_carrier_ok(dev) &&
2798            !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2799                ehea_schedule_port_reset(port);
2800}
2801
2802static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2803{
2804        struct hcp_query_ehea *cb;
2805        u64 hret;
2806        int ret;
2807
2808        cb = (void *)get_zeroed_page(GFP_KERNEL);
2809        if (!cb) {
2810                ret = -ENOMEM;
2811                goto out;
2812        }
2813
2814        hret = ehea_h_query_ehea(adapter->handle, cb);
2815
2816        if (hret != H_SUCCESS) {
2817                ret = -EIO;
2818                goto out_herr;
2819        }
2820
2821        adapter->max_mc_mac = cb->max_mc_mac - 1;
2822        ret = 0;
2823
2824out_herr:
2825        free_page((unsigned long)cb);
2826out:
2827        return ret;
2828}
2829
2830static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2831{
2832        struct hcp_ehea_port_cb4 *cb4;
2833        u64 hret;
2834        int ret = 0;
2835
2836        *jumbo = 0;
2837
2838        /* (Try to) enable *jumbo frames */
2839        cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2840        if (!cb4) {
2841                pr_err("no mem for cb4\n");
2842                ret = -ENOMEM;
2843                goto out;
2844        } else {
2845                hret = ehea_h_query_ehea_port(port->adapter->handle,
2846                                              port->logical_port_id,
2847                                              H_PORT_CB4,
2848                                              H_PORT_CB4_JUMBO, cb4);
2849                if (hret == H_SUCCESS) {
2850                        if (cb4->jumbo_frame)
2851                                *jumbo = 1;
2852                        else {
2853                                cb4->jumbo_frame = 1;
2854                                hret = ehea_h_modify_ehea_port(port->adapter->
2855                                                               handle,
2856                                                               port->
2857                                                               logical_port_id,
2858                                                               H_PORT_CB4,
2859                                                               H_PORT_CB4_JUMBO,
2860                                                               cb4);
2861                                if (hret == H_SUCCESS)
2862                                        *jumbo = 1;
2863                        }
2864                } else
2865                        ret = -EINVAL;
2866
2867                free_page((unsigned long)cb4);
2868        }
2869out:
2870        return ret;
2871}
2872
2873static ssize_t ehea_show_port_id(struct device *dev,
2874                                 struct device_attribute *attr, char *buf)
2875{
2876        struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2877        return sprintf(buf, "%d", port->logical_port_id);
2878}
2879
2880static DEVICE_ATTR(log_port_id, 0444, ehea_show_port_id, NULL);
2881
2882static void logical_port_release(struct device *dev)
2883{
2884        struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2885        of_node_put(port->ofdev.dev.of_node);
2886}
2887
2888static struct device *ehea_register_port(struct ehea_port *port,
2889                                         struct device_node *dn)
2890{
2891        int ret;
2892
2893        port->ofdev.dev.of_node = of_node_get(dn);
2894        port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2895        port->ofdev.dev.bus = &ibmebus_bus_type;
2896
2897        dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2898        port->ofdev.dev.release = logical_port_release;
2899
2900        ret = of_device_register(&port->ofdev);
2901        if (ret) {
2902                pr_err("failed to register device. ret=%d\n", ret);
2903                goto out;
2904        }
2905
2906        ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2907        if (ret) {
2908                pr_err("failed to register attributes, ret=%d\n", ret);
2909                goto out_unreg_of_dev;
2910        }
2911
2912        return &port->ofdev.dev;
2913
2914out_unreg_of_dev:
2915        of_device_unregister(&port->ofdev);
2916out:
2917        return NULL;
2918}
2919
2920static void ehea_unregister_port(struct ehea_port *port)
2921{
2922        device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2923        of_device_unregister(&port->ofdev);
2924}
2925
2926static const struct net_device_ops ehea_netdev_ops = {
2927        .ndo_open               = ehea_open,
2928        .ndo_stop               = ehea_stop,
2929        .ndo_start_xmit         = ehea_start_xmit,
2930        .ndo_get_stats64        = ehea_get_stats64,
2931        .ndo_set_mac_address    = ehea_set_mac_addr,
2932        .ndo_validate_addr      = eth_validate_addr,
2933        .ndo_set_rx_mode        = ehea_set_multicast_list,
2934        .ndo_vlan_rx_add_vid    = ehea_vlan_rx_add_vid,
2935        .ndo_vlan_rx_kill_vid   = ehea_vlan_rx_kill_vid,
2936        .ndo_tx_timeout         = ehea_tx_watchdog,
2937};
2938
2939static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2940                                         u32 logical_port_id,
2941                                         struct device_node *dn)
2942{
2943        int ret;
2944        struct net_device *dev;
2945        struct ehea_port *port;
2946        struct device *port_dev;
2947        int jumbo;
2948
2949        /* allocate memory for the port structures */
2950        dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2951
2952        if (!dev) {
2953                ret = -ENOMEM;
2954                goto out_err;
2955        }
2956
2957        port = netdev_priv(dev);
2958
2959        mutex_init(&port->port_lock);
2960        port->state = EHEA_PORT_DOWN;
2961        port->sig_comp_iv = sq_entries / 10;
2962
2963        port->adapter = adapter;
2964        port->netdev = dev;
2965        port->logical_port_id = logical_port_id;
2966
2967        port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2968
2969        port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2970        if (!port->mc_list) {
2971                ret = -ENOMEM;
2972                goto out_free_ethdev;
2973        }
2974
2975        INIT_LIST_HEAD(&port->mc_list->list);
2976
2977        ret = ehea_sense_port_attr(port);
2978        if (ret)
2979                goto out_free_mc_list;
2980
2981        netif_set_real_num_rx_queues(dev, port->num_def_qps);
2982        netif_set_real_num_tx_queues(dev, port->num_def_qps);
2983
2984        port_dev = ehea_register_port(port, dn);
2985        if (!port_dev)
2986                goto out_free_mc_list;
2987
2988        SET_NETDEV_DEV(dev, port_dev);
2989
2990        /* initialize net_device structure */
2991        memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2992
2993        dev->netdev_ops = &ehea_netdev_ops;
2994        ehea_set_ethtool_ops(dev);
2995
2996        dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
2997                      NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
2998        dev->features = NETIF_F_SG | NETIF_F_TSO |
2999                      NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
3000                      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3001                      NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
3002        dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
3003                        NETIF_F_IP_CSUM;
3004        dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3005
3006        /* MTU range: 68 - 9022 */
3007        dev->min_mtu = ETH_MIN_MTU;
3008        dev->max_mtu = EHEA_MAX_PACKET_SIZE;
3009
3010        INIT_WORK(&port->reset_task, ehea_reset_port);
3011        INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3012
3013        init_waitqueue_head(&port->swqe_avail_wq);
3014        init_waitqueue_head(&port->restart_wq);
3015
3016        ret = register_netdev(dev);
3017        if (ret) {
3018                pr_err("register_netdev failed. ret=%d\n", ret);
3019                goto out_unreg_port;
3020        }
3021
3022        ret = ehea_get_jumboframe_status(port, &jumbo);
3023        if (ret)
3024                netdev_err(dev, "failed determining jumbo frame status\n");
3025
3026        netdev_info(dev, "Jumbo frames are %sabled\n",
3027                    jumbo == 1 ? "en" : "dis");
3028
3029        adapter->active_ports++;
3030
3031        return port;
3032
3033out_unreg_port:
3034        ehea_unregister_port(port);
3035
3036out_free_mc_list:
3037        kfree(port->mc_list);
3038
3039out_free_ethdev:
3040        free_netdev(dev);
3041
3042out_err:
3043        pr_err("setting up logical port with id=%d failed, ret=%d\n",
3044               logical_port_id, ret);
3045        return NULL;
3046}
3047
3048static void ehea_shutdown_single_port(struct ehea_port *port)
3049{
3050        struct ehea_adapter *adapter = port->adapter;
3051
3052        cancel_work_sync(&port->reset_task);
3053        cancel_delayed_work_sync(&port->stats_work);
3054        unregister_netdev(port->netdev);
3055        ehea_unregister_port(port);
3056        kfree(port->mc_list);
3057        free_netdev(port->netdev);
3058        adapter->active_ports--;
3059}
3060
3061static int ehea_setup_ports(struct ehea_adapter *adapter)
3062{
3063        struct device_node *lhea_dn;
3064        struct device_node *eth_dn = NULL;
3065
3066        const u32 *dn_log_port_id;
3067        int i = 0;
3068
3069        lhea_dn = adapter->ofdev->dev.of_node;
3070        while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3071
3072                dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3073                                                 NULL);
3074                if (!dn_log_port_id) {
3075                        pr_err("bad device node: eth_dn name=%pOF\n", eth_dn);
3076                        continue;
3077                }
3078
3079                if (ehea_add_adapter_mr(adapter)) {
3080                        pr_err("creating MR failed\n");
3081                        of_node_put(eth_dn);
3082                        return -EIO;
3083                }
3084
3085                adapter->port[i] = ehea_setup_single_port(adapter,
3086                                                          *dn_log_port_id,
3087                                                          eth_dn);
3088                if (adapter->port[i])
3089                        netdev_info(adapter->port[i]->netdev,
3090                                    "logical port id #%d\n", *dn_log_port_id);
3091                else
3092                        ehea_remove_adapter_mr(adapter);
3093
3094                i++;
3095        }
3096        return 0;
3097}
3098
3099static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3100                                           u32 logical_port_id)
3101{
3102        struct device_node *lhea_dn;
3103        struct device_node *eth_dn = NULL;
3104        const u32 *dn_log_port_id;
3105
3106        lhea_dn = adapter->ofdev->dev.of_node;
3107        while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3108
3109                dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3110                                                 NULL);
3111                if (dn_log_port_id)
3112                        if (*dn_log_port_id == logical_port_id)
3113                                return eth_dn;
3114        }
3115
3116        return NULL;
3117}
3118
3119static ssize_t ehea_probe_port(struct device *dev,
3120                               struct device_attribute *attr,
3121                               const char *buf, size_t count)
3122{
3123        struct ehea_adapter *adapter = dev_get_drvdata(dev);
3124        struct ehea_port *port;
3125        struct device_node *eth_dn = NULL;
3126        int i;
3127
3128        u32 logical_port_id;
3129
3130        sscanf(buf, "%d", &logical_port_id);
3131
3132        port = ehea_get_port(adapter, logical_port_id);
3133
3134        if (port) {
3135                netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3136                            logical_port_id);
3137                return -EINVAL;
3138        }
3139
3140        eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3141
3142        if (!eth_dn) {
3143                pr_info("no logical port with id %d found\n", logical_port_id);
3144                return -EINVAL;
3145        }
3146
3147        if (ehea_add_adapter_mr(adapter)) {
3148                pr_err("creating MR failed\n");
3149                of_node_put(eth_dn);
3150                return -EIO;
3151        }
3152
3153        port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3154
3155        of_node_put(eth_dn);
3156
3157        if (port) {
3158                for (i = 0; i < EHEA_MAX_PORTS; i++)
3159                        if (!adapter->port[i]) {
3160                                adapter->port[i] = port;
3161                                break;
3162                        }
3163
3164                netdev_info(port->netdev, "added: (logical port id=%d)\n",
3165                            logical_port_id);
3166        } else {
3167                ehea_remove_adapter_mr(adapter);
3168                return -EIO;
3169        }
3170
3171        return (ssize_t) count;
3172}
3173
3174static ssize_t ehea_remove_port(struct device *dev,
3175                                struct device_attribute *attr,
3176                                const char *buf, size_t count)
3177{
3178        struct ehea_adapter *adapter = dev_get_drvdata(dev);
3179        struct ehea_port *port;
3180        int i;
3181        u32 logical_port_id;
3182
3183        sscanf(buf, "%d", &logical_port_id);
3184
3185        port = ehea_get_port(adapter, logical_port_id);
3186
3187        if (port) {
3188                netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3189                            logical_port_id);
3190
3191                ehea_shutdown_single_port(port);
3192
3193                for (i = 0; i < EHEA_MAX_PORTS; i++)
3194                        if (adapter->port[i] == port) {
3195                                adapter->port[i] = NULL;
3196                                break;
3197                        }
3198        } else {
3199                pr_err("removing port with logical port id=%d failed. port not configured.\n",
3200                       logical_port_id);
3201                return -EINVAL;
3202        }
3203
3204        ehea_remove_adapter_mr(adapter);
3205
3206        return (ssize_t) count;
3207}
3208
3209static DEVICE_ATTR(probe_port, 0200, NULL, ehea_probe_port);
3210static DEVICE_ATTR(remove_port, 0200, NULL, ehea_remove_port);
3211
3212static int ehea_create_device_sysfs(struct platform_device *dev)
3213{
3214        int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3215        if (ret)
3216                goto out;
3217
3218        ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3219out:
3220        return ret;
3221}
3222
3223static void ehea_remove_device_sysfs(struct platform_device *dev)
3224{
3225        device_remove_file(&dev->dev, &dev_attr_probe_port);
3226        device_remove_file(&dev->dev, &dev_attr_remove_port);
3227}
3228
3229static int ehea_reboot_notifier(struct notifier_block *nb,
3230                                unsigned long action, void *unused)
3231{
3232        if (action == SYS_RESTART) {
3233                pr_info("Reboot: freeing all eHEA resources\n");
3234                ibmebus_unregister_driver(&ehea_driver);
3235        }
3236        return NOTIFY_DONE;
3237}
3238
3239static struct notifier_block ehea_reboot_nb = {
3240        .notifier_call = ehea_reboot_notifier,
3241};
3242
3243static int ehea_mem_notifier(struct notifier_block *nb,
3244                             unsigned long action, void *data)
3245{
3246        int ret = NOTIFY_BAD;
3247        struct memory_notify *arg = data;
3248
3249        mutex_lock(&dlpar_mem_lock);
3250
3251        switch (action) {
3252        case MEM_CANCEL_OFFLINE:
3253                pr_info("memory offlining canceled");
3254                /* Fall through - re-add canceled memory block */
3255
3256        case MEM_ONLINE:
3257                pr_info("memory is going online");
3258                set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3259                if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3260                        goto out_unlock;
3261                ehea_rereg_mrs();
3262                break;
3263
3264        case MEM_GOING_OFFLINE:
3265                pr_info("memory is going offline");
3266                set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3267                if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3268                        goto out_unlock;
3269                ehea_rereg_mrs();
3270                break;
3271
3272        default:
3273                break;
3274        }
3275
3276        ehea_update_firmware_handles();
3277        ret = NOTIFY_OK;
3278
3279out_unlock:
3280        mutex_unlock(&dlpar_mem_lock);
3281        return ret;
3282}
3283
3284static struct notifier_block ehea_mem_nb = {
3285        .notifier_call = ehea_mem_notifier,
3286};
3287
3288static void ehea_crash_handler(void)
3289{
3290        int i;
3291
3292        if (ehea_fw_handles.arr)
3293                for (i = 0; i < ehea_fw_handles.num_entries; i++)
3294                        ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3295                                             ehea_fw_handles.arr[i].fwh,
3296                                             FORCE_FREE);
3297
3298        if (ehea_bcmc_regs.arr)
3299                for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3300                        ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3301                                              ehea_bcmc_regs.arr[i].port_id,
3302                                              ehea_bcmc_regs.arr[i].reg_type,
3303                                              ehea_bcmc_regs.arr[i].macaddr,
3304                                              0, H_DEREG_BCMC);
3305}
3306
3307static atomic_t ehea_memory_hooks_registered;
3308
3309/* Register memory hooks on probe of first adapter */
3310static int ehea_register_memory_hooks(void)
3311{
3312        int ret = 0;
3313
3314        if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
3315                return 0;
3316
3317        ret = ehea_create_busmap();
3318        if (ret) {
3319                pr_info("ehea_create_busmap failed\n");
3320                goto out;
3321        }
3322
3323        ret = register_reboot_notifier(&ehea_reboot_nb);
3324        if (ret) {
3325                pr_info("register_reboot_notifier failed\n");
3326                goto out;
3327        }
3328
3329        ret = register_memory_notifier(&ehea_mem_nb);
3330        if (ret) {
3331                pr_info("register_memory_notifier failed\n");
3332                goto out2;
3333        }
3334
3335        ret = crash_shutdown_register(ehea_crash_handler);
3336        if (ret) {
3337                pr_info("crash_shutdown_register failed\n");
3338                goto out3;
3339        }
3340
3341        return 0;
3342
3343out3:
3344        unregister_memory_notifier(&ehea_mem_nb);
3345out2:
3346        unregister_reboot_notifier(&ehea_reboot_nb);
3347out:
3348        atomic_dec(&ehea_memory_hooks_registered);
3349        return ret;
3350}
3351
3352static void ehea_unregister_memory_hooks(void)
3353{
3354        /* Only remove the hooks if we've registered them */
3355        if (atomic_read(&ehea_memory_hooks_registered) == 0)
3356                return;
3357
3358        unregister_reboot_notifier(&ehea_reboot_nb);
3359        if (crash_shutdown_unregister(ehea_crash_handler))
3360                pr_info("failed unregistering crash handler\n");
3361        unregister_memory_notifier(&ehea_mem_nb);
3362}
3363
3364static int ehea_probe_adapter(struct platform_device *dev)
3365{
3366        struct ehea_adapter *adapter;
3367        const u64 *adapter_handle;
3368        int ret;
3369        int i;
3370
3371        ret = ehea_register_memory_hooks();
3372        if (ret)
3373                return ret;
3374
3375        if (!dev || !dev->dev.of_node) {
3376                pr_err("Invalid ibmebus device probed\n");
3377                return -EINVAL;
3378        }
3379
3380        adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
3381        if (!adapter) {
3382                ret = -ENOMEM;
3383                dev_err(&dev->dev, "no mem for ehea_adapter\n");
3384                goto out;
3385        }
3386
3387        list_add(&adapter->list, &adapter_list);
3388
3389        adapter->ofdev = dev;
3390
3391        adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3392                                         NULL);
3393        if (adapter_handle)
3394                adapter->handle = *adapter_handle;
3395
3396        if (!adapter->handle) {
3397                dev_err(&dev->dev, "failed getting handle for adapter"
3398                        " '%pOF'\n", dev->dev.of_node);
3399                ret = -ENODEV;
3400                goto out_free_ad;
3401        }
3402
3403        adapter->pd = EHEA_PD_ID;
3404
3405        platform_set_drvdata(dev, adapter);
3406
3407
3408        /* initialize adapter and ports */
3409        /* get adapter properties */
3410        ret = ehea_sense_adapter_attr(adapter);
3411        if (ret) {
3412                dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3413                goto out_free_ad;
3414        }
3415
3416        adapter->neq = ehea_create_eq(adapter,
3417                                      EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3418        if (!adapter->neq) {
3419                ret = -EIO;
3420                dev_err(&dev->dev, "NEQ creation failed\n");
3421                goto out_free_ad;
3422        }
3423
3424        tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3425                     (unsigned long)adapter);
3426
3427        ret = ehea_create_device_sysfs(dev);
3428        if (ret)
3429                goto out_kill_eq;
3430
3431        ret = ehea_setup_ports(adapter);
3432        if (ret) {
3433                dev_err(&dev->dev, "setup_ports failed\n");
3434                goto out_rem_dev_sysfs;
3435        }
3436
3437        ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3438                                  ehea_interrupt_neq, 0,
3439                                  "ehea_neq", adapter);
3440        if (ret) {
3441                dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3442                goto out_shutdown_ports;
3443        }
3444
3445        /* Handle any events that might be pending. */
3446        tasklet_hi_schedule(&adapter->neq_tasklet);
3447
3448        ret = 0;
3449        goto out;
3450
3451out_shutdown_ports:
3452        for (i = 0; i < EHEA_MAX_PORTS; i++)
3453                if (adapter->port[i]) {
3454                        ehea_shutdown_single_port(adapter->port[i]);
3455                        adapter->port[i] = NULL;
3456                }
3457
3458out_rem_dev_sysfs:
3459        ehea_remove_device_sysfs(dev);
3460
3461out_kill_eq:
3462        ehea_destroy_eq(adapter->neq);
3463
3464out_free_ad:
3465        list_del(&adapter->list);
3466
3467out:
3468        ehea_update_firmware_handles();
3469
3470        return ret;
3471}
3472
3473static int ehea_remove(struct platform_device *dev)
3474{
3475        struct ehea_adapter *adapter = platform_get_drvdata(dev);
3476        int i;
3477
3478        for (i = 0; i < EHEA_MAX_PORTS; i++)
3479                if (adapter->port[i]) {
3480                        ehea_shutdown_single_port(adapter->port[i]);
3481                        adapter->port[i] = NULL;
3482                }
3483
3484        ehea_remove_device_sysfs(dev);
3485
3486        ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3487        tasklet_kill(&adapter->neq_tasklet);
3488
3489        ehea_destroy_eq(adapter->neq);
3490        ehea_remove_adapter_mr(adapter);
3491        list_del(&adapter->list);
3492
3493        ehea_update_firmware_handles();
3494
3495        return 0;
3496}
3497
3498static int check_module_parm(void)
3499{
3500        int ret = 0;
3501
3502        if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3503            (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3504                pr_info("Bad parameter: rq1_entries\n");
3505                ret = -EINVAL;
3506        }
3507        if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3508            (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3509                pr_info("Bad parameter: rq2_entries\n");
3510                ret = -EINVAL;
3511        }
3512        if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3513            (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3514                pr_info("Bad parameter: rq3_entries\n");
3515                ret = -EINVAL;
3516        }
3517        if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3518            (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3519                pr_info("Bad parameter: sq_entries\n");
3520                ret = -EINVAL;
3521        }
3522
3523        return ret;
3524}
3525
3526static ssize_t capabilities_show(struct device_driver *drv, char *buf)
3527{
3528        return sprintf(buf, "%d", EHEA_CAPABILITIES);
3529}
3530
3531static DRIVER_ATTR_RO(capabilities);
3532
3533static int __init ehea_module_init(void)
3534{
3535        int ret;
3536
3537        pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3538
3539        memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3540        memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3541
3542        mutex_init(&ehea_fw_handles.lock);
3543        spin_lock_init(&ehea_bcmc_regs.lock);
3544
3545        ret = check_module_parm();
3546        if (ret)
3547                goto out;
3548
3549        ret = ibmebus_register_driver(&ehea_driver);
3550        if (ret) {
3551                pr_err("failed registering eHEA device driver on ebus\n");
3552                goto out;
3553        }
3554
3555        ret = driver_create_file(&ehea_driver.driver,
3556                                 &driver_attr_capabilities);
3557        if (ret) {
3558                pr_err("failed to register capabilities attribute, ret=%d\n",
3559                       ret);
3560                goto out2;
3561        }
3562
3563        return ret;
3564
3565out2:
3566        ibmebus_unregister_driver(&ehea_driver);
3567out:
3568        return ret;
3569}
3570
3571static void __exit ehea_module_exit(void)
3572{
3573        driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3574        ibmebus_unregister_driver(&ehea_driver);
3575        ehea_unregister_memory_hooks();
3576        kfree(ehea_fw_handles.arr);
3577        kfree(ehea_bcmc_regs.arr);
3578        ehea_destroy_busmap();
3579}
3580
3581module_init(ehea_module_init);
3582module_exit(ehea_module_exit);
3583