linux/drivers/net/ehea/ehea_main.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/net/ehea/ehea_main.c
   3 *
   4 *  eHEA ethernet device driver for IBM eServer System p
   5 *
   6 *  (C) Copyright IBM Corp. 2006
   7 *
   8 *  Authors:
   9 *       Christoph Raisch <raisch@de.ibm.com>
  10 *       Jan-Bernd Themann <themann@de.ibm.com>
  11 *       Thomas Klein <tklein@de.ibm.com>
  12 *
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2, or (at your option)
  17 * any later version.
  18 *
  19 * This program is distributed in the hope that it will be useful,
  20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  22 * GNU General Public License for more details.
  23 *
  24 * You should have received a copy of the GNU General Public License
  25 * along with this program; if not, write to the Free Software
  26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27 */
  28
  29#include <linux/in.h>
  30#include <linux/ip.h>
  31#include <linux/tcp.h>
  32#include <linux/udp.h>
  33#include <linux/if.h>
  34#include <linux/list.h>
  35#include <linux/if_ether.h>
  36#include <linux/notifier.h>
  37#include <linux/reboot.h>
  38
  39#include <net/ip.h>
  40
  41#include "ehea.h"
  42#include "ehea_qmr.h"
  43#include "ehea_phyp.h"
  44
  45
  46MODULE_LICENSE("GPL");
  47MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
  48MODULE_DESCRIPTION("IBM eServer HEA Driver");
  49MODULE_VERSION(DRV_VERSION);
  50
  51
  52static int msg_level = -1;
  53static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
  54static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
  55static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
  56static int sq_entries = EHEA_DEF_ENTRIES_SQ;
  57static int use_mcs = 0;
  58static int use_lro = 0;
  59static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
  60static int num_tx_qps = EHEA_NUM_TX_QP;
  61static int prop_carrier_state = 0;
  62
  63module_param(msg_level, int, 0);
  64module_param(rq1_entries, int, 0);
  65module_param(rq2_entries, int, 0);
  66module_param(rq3_entries, int, 0);
  67module_param(sq_entries, int, 0);
  68module_param(prop_carrier_state, int, 0);
  69module_param(use_mcs, int, 0);
  70module_param(use_lro, int, 0);
  71module_param(lro_max_aggr, int, 0);
  72module_param(num_tx_qps, int, 0);
  73
  74MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
  75MODULE_PARM_DESC(msg_level, "msg_level");
  76MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
  77                 "port to stack. 1:yes, 0:no.  Default = 0 ");
  78MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
  79                 "[2^x - 1], x = [6..14]. Default = "
  80                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
  81MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
  82                 "[2^x - 1], x = [6..14]. Default = "
  83                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
  84MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
  85                 "[2^x - 1], x = [6..14]. Default = "
  86                 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
  87MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
  88                 "[2^x - 1], x = [6..14]. Default = "
  89                 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
  90MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
  91
  92MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
  93                 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
  94MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
  95                 "Default = 0");
  96
  97static int port_name_cnt = 0;
  98static LIST_HEAD(adapter_list);
  99u64 ehea_driver_flags = 0;
 100struct work_struct ehea_rereg_mr_task;
 101
 102struct semaphore dlpar_mem_lock;
 103
 104static int __devinit ehea_probe_adapter(struct of_device *dev,
 105                                        const struct of_device_id *id);
 106
 107static int __devexit ehea_remove(struct of_device *dev);
 108
 109static struct of_device_id ehea_device_table[] = {
 110        {
 111                .name = "lhea",
 112                .compatible = "IBM,lhea",
 113        },
 114        {},
 115};
 116
 117static struct of_platform_driver ehea_driver = {
 118        .name = "ehea",
 119        .match_table = ehea_device_table,
 120        .probe = ehea_probe_adapter,
 121        .remove = ehea_remove,
 122};
 123
 124void ehea_dump(void *adr, int len, char *msg) {
 125        int x;
 126        unsigned char *deb = adr;
 127        for (x = 0; x < len; x += 16) {
 128                printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
 129                          deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
 130                deb += 16;
 131        }
 132}
 133
 134static struct net_device_stats *ehea_get_stats(struct net_device *dev)
 135{
 136        struct ehea_port *port = netdev_priv(dev);
 137        struct net_device_stats *stats = &port->stats;
 138        struct hcp_ehea_port_cb2 *cb2;
 139        u64 hret, rx_packets, tx_packets;
 140        int i;
 141
 142        memset(stats, 0, sizeof(*stats));
 143
 144        cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
 145        if (!cb2) {
 146                ehea_error("no mem for cb2");
 147                goto out;
 148        }
 149
 150        hret = ehea_h_query_ehea_port(port->adapter->handle,
 151                                      port->logical_port_id,
 152                                      H_PORT_CB2, H_PORT_CB2_ALL, cb2);
 153        if (hret != H_SUCCESS) {
 154                ehea_error("query_ehea_port failed");
 155                goto out_herr;
 156        }
 157
 158        if (netif_msg_hw(port))
 159                ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
 160
 161        rx_packets = 0;
 162        for (i = 0; i < port->num_def_qps; i++)
 163                rx_packets += port->port_res[i].rx_packets;
 164
 165        tx_packets = 0;
 166        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
 167                tx_packets += port->port_res[i].tx_packets;
 168
 169        stats->tx_packets = tx_packets;
 170        stats->multicast = cb2->rxmcp;
 171        stats->rx_errors = cb2->rxuerr;
 172        stats->rx_bytes = cb2->rxo;
 173        stats->tx_bytes = cb2->txo;
 174        stats->rx_packets = rx_packets;
 175
 176out_herr:
 177        kfree(cb2);
 178out:
 179        return stats;
 180}
 181
 182static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
 183{
 184        struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
 185        struct net_device *dev = pr->port->netdev;
 186        int max_index_mask = pr->rq1_skba.len - 1;
 187        int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
 188        int adder = 0;
 189        int i;
 190
 191        pr->rq1_skba.os_skbs = 0;
 192
 193        if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
 194                pr->rq1_skba.index = index;
 195                pr->rq1_skba.os_skbs = fill_wqes;
 196                return;
 197        }
 198
 199        for (i = 0; i < fill_wqes; i++) {
 200                if (!skb_arr_rq1[index]) {
 201                        skb_arr_rq1[index] = netdev_alloc_skb(dev,
 202                                                              EHEA_L_PKT_SIZE);
 203                        if (!skb_arr_rq1[index]) {
 204                                pr->rq1_skba.os_skbs = fill_wqes - i;
 205                                ehea_error("%s: no mem for skb/%d wqes filled",
 206                                           dev->name, i);
 207                                break;
 208                        }
 209                }
 210                index--;
 211                index &= max_index_mask;
 212                adder++;
 213        }
 214
 215        if (adder == 0)
 216                return;
 217
 218        /* Ring doorbell */
 219        ehea_update_rq1a(pr->qp, adder);
 220}
 221
 222static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
 223{
 224        int ret = 0;
 225        struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
 226        struct net_device *dev = pr->port->netdev;
 227        int i;
 228
 229        for (i = 0; i < pr->rq1_skba.len; i++) {
 230                skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
 231                if (!skb_arr_rq1[i]) {
 232                        ehea_error("%s: no mem for skb/%d wqes filled",
 233                                   dev->name, i);
 234                        ret = -ENOMEM;
 235                        goto out;
 236                }
 237        }
 238        /* Ring doorbell */
 239        ehea_update_rq1a(pr->qp, nr_rq1a);
 240out:
 241        return ret;
 242}
 243
 244static int ehea_refill_rq_def(struct ehea_port_res *pr,
 245                              struct ehea_q_skb_arr *q_skba, int rq_nr,
 246                              int num_wqes, int wqe_type, int packet_size)
 247{
 248        struct net_device *dev = pr->port->netdev;
 249        struct ehea_qp *qp = pr->qp;
 250        struct sk_buff **skb_arr = q_skba->arr;
 251        struct ehea_rwqe *rwqe;
 252        int i, index, max_index_mask, fill_wqes;
 253        int adder = 0;
 254        int ret = 0;
 255
 256        fill_wqes = q_skba->os_skbs + num_wqes;
 257        q_skba->os_skbs = 0;
 258
 259        if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
 260                q_skba->os_skbs = fill_wqes;
 261                return ret;
 262        }
 263
 264        index = q_skba->index;
 265        max_index_mask = q_skba->len - 1;
 266        for (i = 0; i < fill_wqes; i++) {
 267                u64 tmp_addr;
 268                struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
 269                if (!skb) {
 270                        ehea_error("%s: no mem for skb/%d wqes filled",
 271                                   pr->port->netdev->name, i);
 272                        q_skba->os_skbs = fill_wqes - i;
 273                        ret = -ENOMEM;
 274                        break;
 275                }
 276                skb_reserve(skb, NET_IP_ALIGN);
 277
 278                skb_arr[index] = skb;
 279                tmp_addr = ehea_map_vaddr(skb->data);
 280                if (tmp_addr == -1) {
 281                        dev_kfree_skb(skb);
 282                        q_skba->os_skbs = fill_wqes - i;
 283                        ret = 0;
 284                        break;
 285                }
 286
 287                rwqe = ehea_get_next_rwqe(qp, rq_nr);
 288                rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
 289                            | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
 290                rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
 291                rwqe->sg_list[0].vaddr = tmp_addr;
 292                rwqe->sg_list[0].len = packet_size;
 293                rwqe->data_segments = 1;
 294
 295                index++;
 296                index &= max_index_mask;
 297                adder++;
 298        }
 299
 300        q_skba->index = index;
 301        if (adder == 0)
 302                goto out;
 303
 304        /* Ring doorbell */
 305        iosync();
 306        if (rq_nr == 2)
 307                ehea_update_rq2a(pr->qp, adder);
 308        else
 309                ehea_update_rq3a(pr->qp, adder);
 310out:
 311        return ret;
 312}
 313
 314
 315static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
 316{
 317        return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
 318                                  nr_of_wqes, EHEA_RWQE2_TYPE,
 319                                  EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
 320}
 321
 322
 323static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
 324{
 325        return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
 326                                  nr_of_wqes, EHEA_RWQE3_TYPE,
 327                                  EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
 328}
 329
 330static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
 331{
 332        *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
 333        if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
 334                return 0;
 335        if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
 336            (cqe->header_length == 0))
 337                return 0;
 338        return -EINVAL;
 339}
 340
 341static inline void ehea_fill_skb(struct net_device *dev,
 342                                 struct sk_buff *skb, struct ehea_cqe *cqe)
 343{
 344        int length = cqe->num_bytes_transfered - 4;     /*remove CRC */
 345
 346        skb_put(skb, length);
 347        skb->ip_summed = CHECKSUM_UNNECESSARY;
 348        skb->protocol = eth_type_trans(skb, dev);
 349}
 350
 351static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
 352                                               int arr_len,
 353                                               struct ehea_cqe *cqe)
 354{
 355        int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
 356        struct sk_buff *skb;
 357        void *pref;
 358        int x;
 359
 360        x = skb_index + 1;
 361        x &= (arr_len - 1);
 362
 363        pref = skb_array[x];
 364        prefetchw(pref);
 365        prefetchw(pref + EHEA_CACHE_LINE);
 366
 367        pref = (skb_array[x]->data);
 368        prefetch(pref);
 369        prefetch(pref + EHEA_CACHE_LINE);
 370        prefetch(pref + EHEA_CACHE_LINE * 2);
 371        prefetch(pref + EHEA_CACHE_LINE * 3);
 372        skb = skb_array[skb_index];
 373        skb_array[skb_index] = NULL;
 374        return skb;
 375}
 376
 377static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
 378                                                  int arr_len, int wqe_index)
 379{
 380        struct sk_buff *skb;
 381        void *pref;
 382        int x;
 383
 384        x = wqe_index + 1;
 385        x &= (arr_len - 1);
 386
 387        pref = skb_array[x];
 388        prefetchw(pref);
 389        prefetchw(pref + EHEA_CACHE_LINE);
 390
 391        pref = (skb_array[x]->data);
 392        prefetchw(pref);
 393        prefetchw(pref + EHEA_CACHE_LINE);
 394
 395        skb = skb_array[wqe_index];
 396        skb_array[wqe_index] = NULL;
 397        return skb;
 398}
 399
 400static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
 401                                 struct ehea_cqe *cqe, int *processed_rq2,
 402                                 int *processed_rq3)
 403{
 404        struct sk_buff *skb;
 405
 406        if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
 407                pr->p_stats.err_tcp_cksum++;
 408        if (cqe->status & EHEA_CQE_STAT_ERR_IP)
 409                pr->p_stats.err_ip_cksum++;
 410        if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
 411                pr->p_stats.err_frame_crc++;
 412
 413        if (rq == 2) {
 414                *processed_rq2 += 1;
 415                skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
 416                dev_kfree_skb(skb);
 417        } else if (rq == 3) {
 418                *processed_rq3 += 1;
 419                skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
 420                dev_kfree_skb(skb);
 421        }
 422
 423        if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
 424                if (netif_msg_rx_err(pr->port)) {
 425                        ehea_error("Critical receive error for QP %d. "
 426                                   "Resetting port.", pr->qp->init_attr.qp_nr);
 427                        ehea_dump(cqe, sizeof(*cqe), "CQE");
 428                }
 429                schedule_work(&pr->port->reset_task);
 430                return 1;
 431        }
 432
 433        return 0;
 434}
 435
 436static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
 437                       void **tcph, u64 *hdr_flags, void *priv)
 438{
 439        struct ehea_cqe *cqe = priv;
 440        unsigned int ip_len;
 441        struct iphdr *iph;
 442
 443        /* non tcp/udp packets */
 444        if (!cqe->header_length)
 445                return -1;
 446
 447        /* non tcp packet */
 448        skb_reset_network_header(skb);
 449        iph = ip_hdr(skb);
 450        if (iph->protocol != IPPROTO_TCP)
 451                return -1;
 452
 453        ip_len = ip_hdrlen(skb);
 454        skb_set_transport_header(skb, ip_len);
 455        *tcph = tcp_hdr(skb);
 456
 457        /* check if ip header and tcp header are complete */
 458        if (iph->tot_len < ip_len + tcp_hdrlen(skb))
 459                return -1;
 460
 461        *hdr_flags = LRO_IPV4 | LRO_TCP;
 462        *iphdr = iph;
 463
 464        return 0;
 465}
 466
 467static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
 468                          struct sk_buff *skb)
 469{
 470        int vlan_extracted = (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
 471                && pr->port->vgrp;
 472
 473        if (use_lro) {
 474                if (vlan_extracted)
 475                        lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
 476                                                     pr->port->vgrp,
 477                                                     cqe->vlan_tag,
 478                                                     cqe);
 479                else
 480                        lro_receive_skb(&pr->lro_mgr, skb, cqe);
 481        } else {
 482                if (vlan_extracted)
 483                        vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
 484                                                 cqe->vlan_tag);
 485                else
 486                        netif_receive_skb(skb);
 487        }
 488}
 489
 490static int ehea_proc_rwqes(struct net_device *dev,
 491                           struct ehea_port_res *pr,
 492                           int budget)
 493{
 494        struct ehea_port *port = pr->port;
 495        struct ehea_qp *qp = pr->qp;
 496        struct ehea_cqe *cqe;
 497        struct sk_buff *skb;
 498        struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
 499        struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
 500        struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
 501        int skb_arr_rq1_len = pr->rq1_skba.len;
 502        int skb_arr_rq2_len = pr->rq2_skba.len;
 503        int skb_arr_rq3_len = pr->rq3_skba.len;
 504        int processed, processed_rq1, processed_rq2, processed_rq3;
 505        int wqe_index, last_wqe_index, rq, port_reset;
 506
 507        processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
 508        last_wqe_index = 0;
 509
 510        cqe = ehea_poll_rq1(qp, &wqe_index);
 511        while ((processed < budget) && cqe) {
 512                ehea_inc_rq1(qp);
 513                processed_rq1++;
 514                processed++;
 515                if (netif_msg_rx_status(port))
 516                        ehea_dump(cqe, sizeof(*cqe), "CQE");
 517
 518                last_wqe_index = wqe_index;
 519                rmb();
 520                if (!ehea_check_cqe(cqe, &rq)) {
 521                        if (rq == 1) {  /* LL RQ1 */
 522                                skb = get_skb_by_index_ll(skb_arr_rq1,
 523                                                          skb_arr_rq1_len,
 524                                                          wqe_index);
 525                                if (unlikely(!skb)) {
 526                                        if (netif_msg_rx_err(port))
 527                                                ehea_error("LL rq1: skb=NULL");
 528
 529                                        skb = netdev_alloc_skb(dev,
 530                                                               EHEA_L_PKT_SIZE);
 531                                        if (!skb)
 532                                                break;
 533                                }
 534                                skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
 535                                                 cqe->num_bytes_transfered - 4);
 536                                ehea_fill_skb(dev, skb, cqe);
 537                        } else if (rq == 2) {  /* RQ2 */
 538                                skb = get_skb_by_index(skb_arr_rq2,
 539                                                       skb_arr_rq2_len, cqe);
 540                                if (unlikely(!skb)) {
 541                                        if (netif_msg_rx_err(port))
 542                                                ehea_error("rq2: skb=NULL");
 543                                        break;
 544                                }
 545                                ehea_fill_skb(dev, skb, cqe);
 546                                processed_rq2++;
 547                        } else {  /* RQ3 */
 548                                skb = get_skb_by_index(skb_arr_rq3,
 549                                                       skb_arr_rq3_len, cqe);
 550                                if (unlikely(!skb)) {
 551                                        if (netif_msg_rx_err(port))
 552                                                ehea_error("rq3: skb=NULL");
 553                                        break;
 554                                }
 555                                ehea_fill_skb(dev, skb, cqe);
 556                                processed_rq3++;
 557                        }
 558
 559                        ehea_proc_skb(pr, cqe, skb);
 560                        dev->last_rx = jiffies;
 561                } else {
 562                        pr->p_stats.poll_receive_errors++;
 563                        port_reset = ehea_treat_poll_error(pr, rq, cqe,
 564                                                           &processed_rq2,
 565                                                           &processed_rq3);
 566                        if (port_reset)
 567                                break;
 568                }
 569                cqe = ehea_poll_rq1(qp, &wqe_index);
 570        }
 571        if (use_lro)
 572                lro_flush_all(&pr->lro_mgr);
 573
 574        pr->rx_packets += processed;
 575
 576        ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
 577        ehea_refill_rq2(pr, processed_rq2);
 578        ehea_refill_rq3(pr, processed_rq3);
 579
 580        return processed;
 581}
 582
 583static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 584{
 585        struct sk_buff *skb;
 586        struct ehea_cq *send_cq = pr->send_cq;
 587        struct ehea_cqe *cqe;
 588        int quota = my_quota;
 589        int cqe_counter = 0;
 590        int swqe_av = 0;
 591        int index;
 592        unsigned long flags;
 593
 594        cqe = ehea_poll_cq(send_cq);
 595        while(cqe && (quota > 0)) {
 596                ehea_inc_cq(send_cq);
 597
 598                cqe_counter++;
 599                rmb();
 600                if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
 601                        ehea_error("Send Completion Error: Resetting port");
 602                        if (netif_msg_tx_err(pr->port))
 603                                ehea_dump(cqe, sizeof(*cqe), "Send CQE");
 604                        schedule_work(&pr->port->reset_task);
 605                        break;
 606                }
 607
 608                if (netif_msg_tx_done(pr->port))
 609                        ehea_dump(cqe, sizeof(*cqe), "CQE");
 610
 611                if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
 612                           == EHEA_SWQE2_TYPE)) {
 613
 614                        index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
 615                        skb = pr->sq_skba.arr[index];
 616                        dev_kfree_skb(skb);
 617                        pr->sq_skba.arr[index] = NULL;
 618                }
 619
 620                swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
 621                quota--;
 622
 623                cqe = ehea_poll_cq(send_cq);
 624        };
 625
 626        ehea_update_feca(send_cq, cqe_counter);
 627        atomic_add(swqe_av, &pr->swqe_avail);
 628
 629        spin_lock_irqsave(&pr->netif_queue, flags);
 630
 631        if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
 632                                  >= pr->swqe_refill_th)) {
 633                netif_wake_queue(pr->port->netdev);
 634                pr->queue_stopped = 0;
 635        }
 636        spin_unlock_irqrestore(&pr->netif_queue, flags);
 637
 638        return cqe;
 639}
 640
 641#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
 642#define EHEA_POLL_MAX_CQES 65535
 643
 644static int ehea_poll(struct napi_struct *napi, int budget)
 645{
 646        struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi);
 647        struct net_device *dev = pr->port->netdev;
 648        struct ehea_cqe *cqe;
 649        struct ehea_cqe *cqe_skb = NULL;
 650        int force_irq, wqe_index;
 651        int rx = 0;
 652
 653        force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
 654        cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
 655
 656        if (!force_irq)
 657                rx += ehea_proc_rwqes(dev, pr, budget - rx);
 658
 659        while ((rx != budget) || force_irq) {
 660                pr->poll_counter = 0;
 661                force_irq = 0;
 662                netif_rx_complete(dev, napi);
 663                ehea_reset_cq_ep(pr->recv_cq);
 664                ehea_reset_cq_ep(pr->send_cq);
 665                ehea_reset_cq_n1(pr->recv_cq);
 666                ehea_reset_cq_n1(pr->send_cq);
 667                cqe = ehea_poll_rq1(pr->qp, &wqe_index);
 668                cqe_skb = ehea_poll_cq(pr->send_cq);
 669
 670                if (!cqe && !cqe_skb)
 671                        return rx;
 672
 673                if (!netif_rx_reschedule(dev, napi))
 674                        return rx;
 675
 676                cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
 677                rx += ehea_proc_rwqes(dev, pr, budget - rx);
 678        }
 679
 680        pr->poll_counter++;
 681        return rx;
 682}
 683
 684#ifdef CONFIG_NET_POLL_CONTROLLER
 685static void ehea_netpoll(struct net_device *dev)
 686{
 687        struct ehea_port *port = netdev_priv(dev);
 688        int i;
 689
 690        for (i = 0; i < port->num_def_qps; i++)
 691                netif_rx_schedule(dev, &port->port_res[i].napi);
 692}
 693#endif
 694
 695static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
 696{
 697        struct ehea_port_res *pr = param;
 698
 699        netif_rx_schedule(pr->port->netdev, &pr->napi);
 700
 701        return IRQ_HANDLED;
 702}
 703
 704static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
 705{
 706        struct ehea_port *port = param;
 707        struct ehea_eqe *eqe;
 708        struct ehea_qp *qp;
 709        u32 qp_token;
 710
 711        eqe = ehea_poll_eq(port->qp_eq);
 712
 713        while (eqe) {
 714                qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
 715                ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
 716                           eqe->entry, qp_token);
 717
 718                qp = port->port_res[qp_token].qp;
 719                ehea_error_data(port->adapter, qp->fw_handle);
 720                eqe = ehea_poll_eq(port->qp_eq);
 721        }
 722
 723        schedule_work(&port->reset_task);
 724
 725        return IRQ_HANDLED;
 726}
 727
 728static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
 729                                       int logical_port)
 730{
 731        int i;
 732
 733        for (i = 0; i < EHEA_MAX_PORTS; i++)
 734                if (adapter->port[i])
 735                        if (adapter->port[i]->logical_port_id == logical_port)
 736                                return adapter->port[i];
 737        return NULL;
 738}
 739
 740int ehea_sense_port_attr(struct ehea_port *port)
 741{
 742        int ret;
 743        u64 hret;
 744        struct hcp_ehea_port_cb0 *cb0;
 745
 746        cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);   /* May be called via */
 747        if (!cb0) {                             /* ehea_neq_tasklet() */
 748                ehea_error("no mem for cb0");
 749                ret = -ENOMEM;
 750                goto out;
 751        }
 752
 753        hret = ehea_h_query_ehea_port(port->adapter->handle,
 754                                      port->logical_port_id, H_PORT_CB0,
 755                                      EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
 756                                      cb0);
 757        if (hret != H_SUCCESS) {
 758                ret = -EIO;
 759                goto out_free;
 760        }
 761
 762        /* MAC address */
 763        port->mac_addr = cb0->port_mac_addr << 16;
 764
 765        if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
 766                ret = -EADDRNOTAVAIL;
 767                goto out_free;
 768        }
 769
 770        /* Port speed */
 771        switch (cb0->port_speed) {
 772        case H_SPEED_10M_H:
 773                port->port_speed = EHEA_SPEED_10M;
 774                port->full_duplex = 0;
 775                break;
 776        case H_SPEED_10M_F:
 777                port->port_speed = EHEA_SPEED_10M;
 778                port->full_duplex = 1;
 779                break;
 780        case H_SPEED_100M_H:
 781                port->port_speed = EHEA_SPEED_100M;
 782                port->full_duplex = 0;
 783                break;
 784        case H_SPEED_100M_F:
 785                port->port_speed = EHEA_SPEED_100M;
 786                port->full_duplex = 1;
 787                break;
 788        case H_SPEED_1G_F:
 789                port->port_speed = EHEA_SPEED_1G;
 790                port->full_duplex = 1;
 791                break;
 792        case H_SPEED_10G_F:
 793                port->port_speed = EHEA_SPEED_10G;
 794                port->full_duplex = 1;
 795                break;
 796        default:
 797                port->port_speed = 0;
 798                port->full_duplex = 0;
 799                break;
 800        }
 801
 802        port->autoneg = 1;
 803        port->num_mcs = cb0->num_default_qps;
 804
 805        /* Number of default QPs */
 806        if (use_mcs)
 807                port->num_def_qps = cb0->num_default_qps;
 808        else
 809                port->num_def_qps = 1;
 810
 811        if (!port->num_def_qps) {
 812                ret = -EINVAL;
 813                goto out_free;
 814        }
 815
 816        port->num_tx_qps = num_tx_qps;
 817
 818        if (port->num_def_qps >= port->num_tx_qps)
 819                port->num_add_tx_qps = 0;
 820        else
 821                port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
 822
 823        ret = 0;
 824out_free:
 825        if (ret || netif_msg_probe(port))
 826                ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
 827        kfree(cb0);
 828out:
 829        return ret;
 830}
 831
 832int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
 833{
 834        struct hcp_ehea_port_cb4 *cb4;
 835        u64 hret;
 836        int ret = 0;
 837
 838        cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
 839        if (!cb4) {
 840                ehea_error("no mem for cb4");
 841                ret = -ENOMEM;
 842                goto out;
 843        }
 844
 845        cb4->port_speed = port_speed;
 846
 847        netif_carrier_off(port->netdev);
 848
 849        hret = ehea_h_modify_ehea_port(port->adapter->handle,
 850                                       port->logical_port_id,
 851                                       H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
 852        if (hret == H_SUCCESS) {
 853                port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
 854
 855                hret = ehea_h_query_ehea_port(port->adapter->handle,
 856                                              port->logical_port_id,
 857                                              H_PORT_CB4, H_PORT_CB4_SPEED,
 858                                              cb4);
 859                if (hret == H_SUCCESS) {
 860                        switch (cb4->port_speed) {
 861                        case H_SPEED_10M_H:
 862                                port->port_speed = EHEA_SPEED_10M;
 863                                port->full_duplex = 0;
 864                                break;
 865                        case H_SPEED_10M_F:
 866                                port->port_speed = EHEA_SPEED_10M;
 867                                port->full_duplex = 1;
 868                                break;
 869                        case H_SPEED_100M_H:
 870                                port->port_speed = EHEA_SPEED_100M;
 871                                port->full_duplex = 0;
 872                                break;
 873                        case H_SPEED_100M_F:
 874                                port->port_speed = EHEA_SPEED_100M;
 875                                port->full_duplex = 1;
 876                                break;
 877                        case H_SPEED_1G_F:
 878                                port->port_speed = EHEA_SPEED_1G;
 879                                port->full_duplex = 1;
 880                                break;
 881                        case H_SPEED_10G_F:
 882                                port->port_speed = EHEA_SPEED_10G;
 883                                port->full_duplex = 1;
 884                                break;
 885                        default:
 886                                port->port_speed = 0;
 887                                port->full_duplex = 0;
 888                                break;
 889                        }
 890                } else {
 891                        ehea_error("Failed sensing port speed");
 892                        ret = -EIO;
 893                }
 894        } else {
 895                if (hret == H_AUTHORITY) {
 896                        ehea_info("Hypervisor denied setting port speed");
 897                        ret = -EPERM;
 898                } else {
 899                        ret = -EIO;
 900                        ehea_error("Failed setting port speed");
 901                }
 902        }
 903        if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
 904                netif_carrier_on(port->netdev);
 905
 906        kfree(cb4);
 907out:
 908        return ret;
 909}
 910
 911static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
 912{
 913        int ret;
 914        u8 ec;
 915        u8 portnum;
 916        struct ehea_port *port;
 917
 918        ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
 919        portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
 920        port = ehea_get_port(adapter, portnum);
 921
 922        switch (ec) {
 923        case EHEA_EC_PORTSTATE_CHG:     /* port state change */
 924
 925                if (!port) {
 926                        ehea_error("unknown portnum %x", portnum);
 927                        break;
 928                }
 929
 930                if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
 931                        if (!netif_carrier_ok(port->netdev)) {
 932                                ret = ehea_sense_port_attr(port);
 933                                if (ret) {
 934                                        ehea_error("failed resensing port "
 935                                                   "attributes");
 936                                        break;
 937                                }
 938
 939                                if (netif_msg_link(port))
 940                                        ehea_info("%s: Logical port up: %dMbps "
 941                                                  "%s Duplex",
 942                                                  port->netdev->name,
 943                                                  port->port_speed,
 944                                                  port->full_duplex ==
 945                                                  1 ? "Full" : "Half");
 946
 947                                netif_carrier_on(port->netdev);
 948                                netif_wake_queue(port->netdev);
 949                        }
 950                } else
 951                        if (netif_carrier_ok(port->netdev)) {
 952                                if (netif_msg_link(port))
 953                                        ehea_info("%s: Logical port down",
 954                                                  port->netdev->name);
 955                                netif_carrier_off(port->netdev);
 956                                netif_stop_queue(port->netdev);
 957                        }
 958
 959                if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
 960                        port->phy_link = EHEA_PHY_LINK_UP;
 961                        if (netif_msg_link(port))
 962                                ehea_info("%s: Physical port up",
 963                                          port->netdev->name);
 964                        if (prop_carrier_state)
 965                                netif_carrier_on(port->netdev);
 966                } else {
 967                        port->phy_link = EHEA_PHY_LINK_DOWN;
 968                        if (netif_msg_link(port))
 969                                ehea_info("%s: Physical port down",
 970                                          port->netdev->name);
 971                        if (prop_carrier_state)
 972                                netif_carrier_off(port->netdev);
 973                }
 974
 975                if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
 976                        ehea_info("External switch port is primary port");
 977                else
 978                        ehea_info("External switch port is backup port");
 979
 980                break;
 981        case EHEA_EC_ADAPTER_MALFUNC:
 982                ehea_error("Adapter malfunction");
 983                break;
 984        case EHEA_EC_PORT_MALFUNC:
 985                ehea_info("Port malfunction: Device: %s", port->netdev->name);
 986                netif_carrier_off(port->netdev);
 987                netif_stop_queue(port->netdev);
 988                break;
 989        default:
 990                ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
 991                break;
 992        }
 993}
 994
 995static void ehea_neq_tasklet(unsigned long data)
 996{
 997        struct ehea_adapter *adapter = (struct ehea_adapter*)data;
 998        struct ehea_eqe *eqe;
 999        u64 event_mask;
1000
1001        eqe = ehea_poll_eq(adapter->neq);
1002        ehea_debug("eqe=%p", eqe);
1003
1004        while (eqe) {
1005                ehea_debug("*eqe=%lx", eqe->entry);
1006                ehea_parse_eqe(adapter, eqe->entry);
1007                eqe = ehea_poll_eq(adapter->neq);
1008                ehea_debug("next eqe=%p", eqe);
1009        }
1010
1011        event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1012                   | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1013                   | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1014
1015        ehea_h_reset_events(adapter->handle,
1016                            adapter->neq->fw_handle, event_mask);
1017}
1018
1019static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1020{
1021        struct ehea_adapter *adapter = param;
1022        tasklet_hi_schedule(&adapter->neq_tasklet);
1023        return IRQ_HANDLED;
1024}
1025
1026
1027static int ehea_fill_port_res(struct ehea_port_res *pr)
1028{
1029        int ret;
1030        struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1031
1032        ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1033                                     - init_attr->act_nr_rwqes_rq2
1034                                     - init_attr->act_nr_rwqes_rq3 - 1);
1035
1036        ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1037
1038        ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1039
1040        return ret;
1041}
1042
1043static int ehea_reg_interrupts(struct net_device *dev)
1044{
1045        struct ehea_port *port = netdev_priv(dev);
1046        struct ehea_port_res *pr;
1047        int i, ret;
1048
1049
1050        snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1051                 dev->name);
1052
1053        ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1054                                  ehea_qp_aff_irq_handler,
1055                                  IRQF_DISABLED, port->int_aff_name, port);
1056        if (ret) {
1057                ehea_error("failed registering irq for qp_aff_irq_handler:"
1058                           "ist=%X", port->qp_eq->attr.ist1);
1059                goto out_free_qpeq;
1060        }
1061
1062        if (netif_msg_ifup(port))
1063                ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1064                          "registered", port->qp_eq->attr.ist1);
1065
1066
1067        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1068                pr = &port->port_res[i];
1069                snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1070                         "%s-queue%d", dev->name, i);
1071                ret = ibmebus_request_irq(pr->eq->attr.ist1,
1072                                          ehea_recv_irq_handler,
1073                                          IRQF_DISABLED, pr->int_send_name,
1074                                          pr);
1075                if (ret) {
1076                        ehea_error("failed registering irq for ehea_queue "
1077                                   "port_res_nr:%d, ist=%X", i,
1078                                   pr->eq->attr.ist1);
1079                        goto out_free_req;
1080                }
1081                if (netif_msg_ifup(port))
1082                        ehea_info("irq_handle 0x%X for function ehea_queue_int "
1083                                  "%d registered", pr->eq->attr.ist1, i);
1084        }
1085out:
1086        return ret;
1087
1088
1089out_free_req:
1090        while (--i >= 0) {
1091                u32 ist = port->port_res[i].eq->attr.ist1;
1092                ibmebus_free_irq(ist, &port->port_res[i]);
1093        }
1094
1095out_free_qpeq:
1096        ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1097        i = port->num_def_qps;
1098
1099        goto out;
1100
1101}
1102
1103static void ehea_free_interrupts(struct net_device *dev)
1104{
1105        struct ehea_port *port = netdev_priv(dev);
1106        struct ehea_port_res *pr;
1107        int i;
1108
1109        /* send */
1110
1111        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1112                pr = &port->port_res[i];
1113                ibmebus_free_irq(pr->eq->attr.ist1, pr);
1114                if (netif_msg_intr(port))
1115                        ehea_info("free send irq for res %d with handle 0x%X",
1116                                  i, pr->eq->attr.ist1);
1117        }
1118
1119        /* associated events */
1120        ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1121        if (netif_msg_intr(port))
1122                ehea_info("associated event interrupt for handle 0x%X freed",
1123                          port->qp_eq->attr.ist1);
1124}
1125
1126static int ehea_configure_port(struct ehea_port *port)
1127{
1128        int ret, i;
1129        u64 hret, mask;
1130        struct hcp_ehea_port_cb0 *cb0;
1131
1132        ret = -ENOMEM;
1133        cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1134        if (!cb0)
1135                goto out;
1136
1137        cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1138                     | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1139                     | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1140                     | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1141                     | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1142                                      PXLY_RC_VLAN_FILTER)
1143                     | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1144
1145        for (i = 0; i < port->num_mcs; i++)
1146                if (use_mcs)
1147                        cb0->default_qpn_arr[i] =
1148                                port->port_res[i].qp->init_attr.qp_nr;
1149                else
1150                        cb0->default_qpn_arr[i] =
1151                                port->port_res[0].qp->init_attr.qp_nr;
1152
1153        if (netif_msg_ifup(port))
1154                ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1155
1156        mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1157             | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1158
1159        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1160                                       port->logical_port_id,
1161                                       H_PORT_CB0, mask, cb0);
1162        ret = -EIO;
1163        if (hret != H_SUCCESS)
1164                goto out_free;
1165
1166        ret = 0;
1167
1168out_free:
1169        kfree(cb0);
1170out:
1171        return ret;
1172}
1173
1174int ehea_gen_smrs(struct ehea_port_res *pr)
1175{
1176        int ret;
1177        struct ehea_adapter *adapter = pr->port->adapter;
1178
1179        ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1180        if (ret)
1181                goto out;
1182
1183        ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1184        if (ret)
1185                goto out_free;
1186
1187        return 0;
1188
1189out_free:
1190        ehea_rem_mr(&pr->send_mr);
1191out:
1192        ehea_error("Generating SMRS failed\n");
1193        return -EIO;
1194}
1195
1196int ehea_rem_smrs(struct ehea_port_res *pr)
1197{
1198        if ((ehea_rem_mr(&pr->send_mr))
1199            || (ehea_rem_mr(&pr->recv_mr)))
1200                return -EIO;
1201        else
1202                return 0;
1203}
1204
1205static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1206{
1207        int arr_size = sizeof(void*) * max_q_entries;
1208
1209        q_skba->arr = vmalloc(arr_size);
1210        if (!q_skba->arr)
1211                return -ENOMEM;
1212
1213        memset(q_skba->arr, 0, arr_size);
1214
1215        q_skba->len = max_q_entries;
1216        q_skba->index = 0;
1217        q_skba->os_skbs = 0;
1218
1219        return 0;
1220}
1221
1222static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1223                              struct port_res_cfg *pr_cfg, int queue_token)
1224{
1225        struct ehea_adapter *adapter = port->adapter;
1226        enum ehea_eq_type eq_type = EHEA_EQ;
1227        struct ehea_qp_init_attr *init_attr = NULL;
1228        int ret = -EIO;
1229
1230        memset(pr, 0, sizeof(struct ehea_port_res));
1231
1232        pr->port = port;
1233        spin_lock_init(&pr->xmit_lock);
1234        spin_lock_init(&pr->netif_queue);
1235
1236        pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1237        if (!pr->eq) {
1238                ehea_error("create_eq failed (eq)");
1239                goto out_free;
1240        }
1241
1242        pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1243                                     pr->eq->fw_handle,
1244                                     port->logical_port_id);
1245        if (!pr->recv_cq) {
1246                ehea_error("create_cq failed (cq_recv)");
1247                goto out_free;
1248        }
1249
1250        pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1251                                     pr->eq->fw_handle,
1252                                     port->logical_port_id);
1253        if (!pr->send_cq) {
1254                ehea_error("create_cq failed (cq_send)");
1255                goto out_free;
1256        }
1257
1258        if (netif_msg_ifup(port))
1259                ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1260                          pr->send_cq->attr.act_nr_of_cqes,
1261                          pr->recv_cq->attr.act_nr_of_cqes);
1262
1263        init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1264        if (!init_attr) {
1265                ret = -ENOMEM;
1266                ehea_error("no mem for ehea_qp_init_attr");
1267                goto out_free;
1268        }
1269
1270        init_attr->low_lat_rq1 = 1;
1271        init_attr->signalingtype = 1;   /* generate CQE if specified in WQE */
1272        init_attr->rq_count = 3;
1273        init_attr->qp_token = queue_token;
1274        init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1275        init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1276        init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1277        init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1278        init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1279        init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1280        init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1281        init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1282        init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1283        init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1284        init_attr->port_nr = port->logical_port_id;
1285        init_attr->send_cq_handle = pr->send_cq->fw_handle;
1286        init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1287        init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1288
1289        pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1290        if (!pr->qp) {
1291                ehea_error("create_qp failed");
1292                ret = -EIO;
1293                goto out_free;
1294        }
1295
1296        if (netif_msg_ifup(port))
1297                ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1298                          "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1299                          init_attr->act_nr_send_wqes,
1300                          init_attr->act_nr_rwqes_rq1,
1301                          init_attr->act_nr_rwqes_rq2,
1302                          init_attr->act_nr_rwqes_rq3);
1303
1304        ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
1305        ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1306        ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1307        ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1308        if (ret)
1309                goto out_free;
1310
1311        pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1312        if (ehea_gen_smrs(pr) != 0) {
1313                ret = -EIO;
1314                goto out_free;
1315        }
1316
1317        atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1318
1319        kfree(init_attr);
1320
1321        netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1322
1323        pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1324        pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1325        pr->lro_mgr.lro_arr = pr->lro_desc;
1326        pr->lro_mgr.get_skb_header = get_skb_hdr;
1327        pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1328        pr->lro_mgr.dev = port->netdev;
1329        pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1330        pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1331
1332        ret = 0;
1333        goto out;
1334
1335out_free:
1336        kfree(init_attr);
1337        vfree(pr->sq_skba.arr);
1338        vfree(pr->rq1_skba.arr);
1339        vfree(pr->rq2_skba.arr);
1340        vfree(pr->rq3_skba.arr);
1341        ehea_destroy_qp(pr->qp);
1342        ehea_destroy_cq(pr->send_cq);
1343        ehea_destroy_cq(pr->recv_cq);
1344        ehea_destroy_eq(pr->eq);
1345out:
1346        return ret;
1347}
1348
1349static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1350{
1351        int ret, i;
1352
1353        ret = ehea_destroy_qp(pr->qp);
1354
1355        if (!ret) {
1356                ehea_destroy_cq(pr->send_cq);
1357                ehea_destroy_cq(pr->recv_cq);
1358                ehea_destroy_eq(pr->eq);
1359
1360                for (i = 0; i < pr->rq1_skba.len; i++)
1361                        if (pr->rq1_skba.arr[i])
1362                                dev_kfree_skb(pr->rq1_skba.arr[i]);
1363
1364                for (i = 0; i < pr->rq2_skba.len; i++)
1365                        if (pr->rq2_skba.arr[i])
1366                                dev_kfree_skb(pr->rq2_skba.arr[i]);
1367
1368                for (i = 0; i < pr->rq3_skba.len; i++)
1369                        if (pr->rq3_skba.arr[i])
1370                                dev_kfree_skb(pr->rq3_skba.arr[i]);
1371
1372                for (i = 0; i < pr->sq_skba.len; i++)
1373                        if (pr->sq_skba.arr[i])
1374                                dev_kfree_skb(pr->sq_skba.arr[i]);
1375
1376                vfree(pr->rq1_skba.arr);
1377                vfree(pr->rq2_skba.arr);
1378                vfree(pr->rq3_skba.arr);
1379                vfree(pr->sq_skba.arr);
1380                ret = ehea_rem_smrs(pr);
1381        }
1382        return ret;
1383}
1384
1385/*
1386 * The write_* functions store information in swqe which is used by
1387 * the hardware to calculate the ip/tcp/udp checksum
1388 */
1389
1390static inline void write_ip_start_end(struct ehea_swqe *swqe,
1391                                      const struct sk_buff *skb)
1392{
1393        swqe->ip_start = skb_network_offset(skb);
1394        swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1395}
1396
1397static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1398                                        const struct sk_buff *skb)
1399{
1400        swqe->tcp_offset =
1401                (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1402
1403        swqe->tcp_end = (u16)skb->len - 1;
1404}
1405
1406static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1407                                        const struct sk_buff *skb)
1408{
1409        swqe->tcp_offset =
1410                (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1411
1412        swqe->tcp_end = (u16)skb->len - 1;
1413}
1414
1415
1416static void write_swqe2_TSO(struct sk_buff *skb,
1417                            struct ehea_swqe *swqe, u32 lkey)
1418{
1419        struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1420        u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1421        int skb_data_size = skb->len - skb->data_len;
1422        int headersize;
1423
1424        /* Packet is TCP with TSO enabled */
1425        swqe->tx_control |= EHEA_SWQE_TSO;
1426        swqe->mss = skb_shinfo(skb)->gso_size;
1427        /* copy only eth/ip/tcp headers to immediate data and
1428         * the rest of skb->data to sg1entry
1429         */
1430        headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1431
1432        skb_data_size = skb->len - skb->data_len;
1433
1434        if (skb_data_size >= headersize) {
1435                /* copy immediate data */
1436                skb_copy_from_linear_data(skb, imm_data, headersize);
1437                swqe->immediate_data_length = headersize;
1438
1439                if (skb_data_size > headersize) {
1440                        /* set sg1entry data */
1441                        sg1entry->l_key = lkey;
1442                        sg1entry->len = skb_data_size - headersize;
1443                        sg1entry->vaddr =
1444                                ehea_map_vaddr(skb->data + headersize);
1445                        swqe->descriptors++;
1446                }
1447        } else
1448                ehea_error("cannot handle fragmented headers");
1449}
1450
1451static void write_swqe2_nonTSO(struct sk_buff *skb,
1452                               struct ehea_swqe *swqe, u32 lkey)
1453{
1454        int skb_data_size = skb->len - skb->data_len;
1455        u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1456        struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1457
1458        /* Packet is any nonTSO type
1459         *
1460         * Copy as much as possible skb->data to immediate data and
1461         * the rest to sg1entry
1462         */
1463        if (skb_data_size >= SWQE2_MAX_IMM) {
1464                /* copy immediate data */
1465                skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1466
1467                swqe->immediate_data_length = SWQE2_MAX_IMM;
1468
1469                if (skb_data_size > SWQE2_MAX_IMM) {
1470                        /* copy sg1entry data */
1471                        sg1entry->l_key = lkey;
1472                        sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1473                        sg1entry->vaddr =
1474                                ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
1475                        swqe->descriptors++;
1476                }
1477        } else {
1478                skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1479                swqe->immediate_data_length = skb_data_size;
1480        }
1481}
1482
1483static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1484                                    struct ehea_swqe *swqe, u32 lkey)
1485{
1486        struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1487        skb_frag_t *frag;
1488        int nfrags, sg1entry_contains_frag_data, i;
1489
1490        nfrags = skb_shinfo(skb)->nr_frags;
1491        sg1entry = &swqe->u.immdata_desc.sg_entry;
1492        sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
1493        swqe->descriptors = 0;
1494        sg1entry_contains_frag_data = 0;
1495
1496        if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1497                write_swqe2_TSO(skb, swqe, lkey);
1498        else
1499                write_swqe2_nonTSO(skb, swqe, lkey);
1500
1501        /* write descriptors */
1502        if (nfrags > 0) {
1503                if (swqe->descriptors == 0) {
1504                        /* sg1entry not yet used */
1505                        frag = &skb_shinfo(skb)->frags[0];
1506
1507                        /* copy sg1entry data */
1508                        sg1entry->l_key = lkey;
1509                        sg1entry->len = frag->size;
1510                        sg1entry->vaddr =
1511                                ehea_map_vaddr(page_address(frag->page)
1512                                               + frag->page_offset);
1513                        swqe->descriptors++;
1514                        sg1entry_contains_frag_data = 1;
1515                }
1516
1517                for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1518
1519                        frag = &skb_shinfo(skb)->frags[i];
1520                        sgentry = &sg_list[i - sg1entry_contains_frag_data];
1521
1522                        sgentry->l_key = lkey;
1523                        sgentry->len = frag->size;
1524                        sgentry->vaddr =
1525                                ehea_map_vaddr(page_address(frag->page)
1526                                               + frag->page_offset);
1527                        swqe->descriptors++;
1528                }
1529        }
1530}
1531
1532static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1533{
1534        int ret = 0;
1535        u64 hret;
1536        u8 reg_type;
1537
1538        /* De/Register untagged packets */
1539        reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1540        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1541                                     port->logical_port_id,
1542                                     reg_type, port->mac_addr, 0, hcallid);
1543        if (hret != H_SUCCESS) {
1544                ehea_error("%sregistering bc address failed (tagged)",
1545                           hcallid == H_REG_BCMC ? "" : "de");
1546                ret = -EIO;
1547                goto out_herr;
1548        }
1549
1550        /* De/Register VLAN packets */
1551        reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1552        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1553                                     port->logical_port_id,
1554                                     reg_type, port->mac_addr, 0, hcallid);
1555        if (hret != H_SUCCESS) {
1556                ehea_error("%sregistering bc address failed (vlan)",
1557                           hcallid == H_REG_BCMC ? "" : "de");
1558                ret = -EIO;
1559        }
1560out_herr:
1561        return ret;
1562}
1563
1564static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1565{
1566        struct ehea_port *port = netdev_priv(dev);
1567        struct sockaddr *mac_addr = sa;
1568        struct hcp_ehea_port_cb0 *cb0;
1569        int ret;
1570        u64 hret;
1571
1572        if (!is_valid_ether_addr(mac_addr->sa_data)) {
1573                ret = -EADDRNOTAVAIL;
1574                goto out;
1575        }
1576
1577        cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1578        if (!cb0) {
1579                ehea_error("no mem for cb0");
1580                ret = -ENOMEM;
1581                goto out;
1582        }
1583
1584        memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1585
1586        cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1587
1588        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1589                                       port->logical_port_id, H_PORT_CB0,
1590                                       EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1591        if (hret != H_SUCCESS) {
1592                ret = -EIO;
1593                goto out_free;
1594        }
1595
1596        memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1597
1598        /* Deregister old MAC in pHYP */
1599        ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1600        if (ret)
1601                goto out_free;
1602
1603        port->mac_addr = cb0->port_mac_addr << 16;
1604
1605        /* Register new MAC in pHYP */
1606        ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1607        if (ret)
1608                goto out_free;
1609
1610        ret = 0;
1611out_free:
1612        kfree(cb0);
1613out:
1614        return ret;
1615}
1616
1617static void ehea_promiscuous_error(u64 hret, int enable)
1618{
1619        if (hret == H_AUTHORITY)
1620                ehea_info("Hypervisor denied %sabling promiscuous mode",
1621                          enable == 1 ? "en" : "dis");
1622        else
1623                ehea_error("failed %sabling promiscuous mode",
1624                           enable == 1 ? "en" : "dis");
1625}
1626
1627static void ehea_promiscuous(struct net_device *dev, int enable)
1628{
1629        struct ehea_port *port = netdev_priv(dev);
1630        struct hcp_ehea_port_cb7 *cb7;
1631        u64 hret;
1632
1633        if ((enable && port->promisc) || (!enable && !port->promisc))
1634                return;
1635
1636        cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
1637        if (!cb7) {
1638                ehea_error("no mem for cb7");
1639                goto out;
1640        }
1641
1642        /* Modify Pxs_DUCQPN in CB7 */
1643        cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1644
1645        hret = ehea_h_modify_ehea_port(port->adapter->handle,
1646                                       port->logical_port_id,
1647                                       H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1648        if (hret) {
1649                ehea_promiscuous_error(hret, enable);
1650                goto out;
1651        }
1652
1653        port->promisc = enable;
1654out:
1655        kfree(cb7);
1656        return;
1657}
1658
1659static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1660                                     u32 hcallid)
1661{
1662        u64 hret;
1663        u8 reg_type;
1664
1665        reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1666                 | EHEA_BCMC_UNTAGGED;
1667
1668        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1669                                     port->logical_port_id,
1670                                     reg_type, mc_mac_addr, 0, hcallid);
1671        if (hret)
1672                goto out;
1673
1674        reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1675                 | EHEA_BCMC_VLANID_ALL;
1676
1677        hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1678                                     port->logical_port_id,
1679                                     reg_type, mc_mac_addr, 0, hcallid);
1680out:
1681        return hret;
1682}
1683
1684static int ehea_drop_multicast_list(struct net_device *dev)
1685{
1686        struct ehea_port *port = netdev_priv(dev);
1687        struct ehea_mc_list *mc_entry = port->mc_list;
1688        struct list_head *pos;
1689        struct list_head *temp;
1690        int ret = 0;
1691        u64 hret;
1692
1693        list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1694                mc_entry = list_entry(pos, struct ehea_mc_list, list);
1695
1696                hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1697                                                 H_DEREG_BCMC);
1698                if (hret) {
1699                        ehea_error("failed deregistering mcast MAC");
1700                        ret = -EIO;
1701                }
1702
1703                list_del(pos);
1704                kfree(mc_entry);
1705        }
1706        return ret;
1707}
1708
1709static void ehea_allmulti(struct net_device *dev, int enable)
1710{
1711        struct ehea_port *port = netdev_priv(dev);
1712        u64 hret;
1713
1714        if (!port->allmulti) {
1715                if (enable) {
1716                        /* Enable ALLMULTI */
1717                        ehea_drop_multicast_list(dev);
1718                        hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1719                        if (!hret)
1720                                port->allmulti = 1;
1721                        else
1722                                ehea_error("failed enabling IFF_ALLMULTI");
1723                }
1724        } else
1725                if (!enable) {
1726                        /* Disable ALLMULTI */
1727                        hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1728                        if (!hret)
1729                                port->allmulti = 0;
1730                        else
1731                                ehea_error("failed disabling IFF_ALLMULTI");
1732                }
1733}
1734
1735static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
1736{
1737        struct ehea_mc_list *ehea_mcl_entry;
1738        u64 hret;
1739
1740        ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1741        if (!ehea_mcl_entry) {
1742                ehea_error("no mem for mcl_entry");
1743                return;
1744        }
1745
1746        INIT_LIST_HEAD(&ehea_mcl_entry->list);
1747
1748        memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1749
1750        hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1751                                         H_REG_BCMC);
1752        if (!hret)
1753                list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1754        else {
1755                ehea_error("failed registering mcast MAC");
1756                kfree(ehea_mcl_entry);
1757        }
1758}
1759
1760static void ehea_set_multicast_list(struct net_device *dev)
1761{
1762        struct ehea_port *port = netdev_priv(dev);
1763        struct dev_mc_list *k_mcl_entry;
1764        int ret, i;
1765
1766        if (dev->flags & IFF_PROMISC) {
1767                ehea_promiscuous(dev, 1);
1768                return;
1769        }
1770        ehea_promiscuous(dev, 0);
1771
1772        if (dev->flags & IFF_ALLMULTI) {
1773                ehea_allmulti(dev, 1);
1774                return;
1775        }
1776        ehea_allmulti(dev, 0);
1777
1778        if (dev->mc_count) {
1779                ret = ehea_drop_multicast_list(dev);
1780                if (ret) {
1781                        /* Dropping the current multicast list failed.
1782                         * Enabling ALL_MULTI is the best we can do.
1783                         */
1784                        ehea_allmulti(dev, 1);
1785                }
1786
1787                if (dev->mc_count > port->adapter->max_mc_mac) {
1788                        ehea_info("Mcast registration limit reached (0x%lx). "
1789                                  "Use ALLMULTI!",
1790                                  port->adapter->max_mc_mac);
1791                        goto out;
1792                }
1793
1794                for (i = 0, k_mcl_entry = dev->mc_list;
1795                     i < dev->mc_count;
1796                     i++, k_mcl_entry = k_mcl_entry->next) {
1797                        ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
1798                }
1799        }
1800out:
1801        return;
1802}
1803
1804static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1805{
1806        if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1807                return -EINVAL;
1808        dev->mtu = new_mtu;
1809        return 0;
1810}
1811
1812static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1813                       struct ehea_swqe *swqe, u32 lkey)
1814{
1815        if (skb->protocol == htons(ETH_P_IP)) {
1816                const struct iphdr *iph = ip_hdr(skb);
1817
1818                /* IPv4 */
1819                swqe->tx_control |= EHEA_SWQE_CRC
1820                                 | EHEA_SWQE_IP_CHECKSUM
1821                                 | EHEA_SWQE_TCP_CHECKSUM
1822                                 | EHEA_SWQE_IMM_DATA_PRESENT
1823                                 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1824
1825                write_ip_start_end(swqe, skb);
1826
1827                if (iph->protocol == IPPROTO_UDP) {
1828                        if ((iph->frag_off & IP_MF)
1829                            || (iph->frag_off & IP_OFFSET))
1830                                /* IP fragment, so don't change cs */
1831                                swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
1832                        else
1833                                write_udp_offset_end(swqe, skb);
1834                } else if (iph->protocol == IPPROTO_TCP) {
1835                        write_tcp_offset_end(swqe, skb);
1836                }
1837
1838                /* icmp (big data) and ip segmentation packets (all other ip
1839                   packets) do not require any special handling */
1840
1841        } else {
1842                /* Other Ethernet Protocol */
1843                swqe->tx_control |= EHEA_SWQE_CRC
1844                                 | EHEA_SWQE_IMM_DATA_PRESENT
1845                                 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1846        }
1847
1848        write_swqe2_data(skb, dev, swqe, lkey);
1849}
1850
1851static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1852                       struct ehea_swqe *swqe)
1853{
1854        int nfrags = skb_shinfo(skb)->nr_frags;
1855        u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
1856        skb_frag_t *frag;
1857        int i;
1858
1859        if (skb->protocol == htons(ETH_P_IP)) {
1860                const struct iphdr *iph = ip_hdr(skb);
1861
1862                /* IPv4 */
1863                write_ip_start_end(swqe, skb);
1864
1865                if (iph->protocol == IPPROTO_TCP) {
1866                        swqe->tx_control |= EHEA_SWQE_CRC
1867                                         | EHEA_SWQE_IP_CHECKSUM
1868                                         | EHEA_SWQE_TCP_CHECKSUM
1869                                         | EHEA_SWQE_IMM_DATA_PRESENT;
1870
1871                        write_tcp_offset_end(swqe, skb);
1872
1873                } else if (iph->protocol == IPPROTO_UDP) {
1874                        if ((iph->frag_off & IP_MF)
1875                            || (iph->frag_off & IP_OFFSET))
1876                                /* IP fragment, so don't change cs */
1877                                swqe->tx_control |= EHEA_SWQE_CRC
1878                                                 | EHEA_SWQE_IMM_DATA_PRESENT;
1879                        else {
1880                                swqe->tx_control |= EHEA_SWQE_CRC
1881                                                 | EHEA_SWQE_IP_CHECKSUM
1882                                                 | EHEA_SWQE_TCP_CHECKSUM
1883                                                 | EHEA_SWQE_IMM_DATA_PRESENT;
1884
1885                                write_udp_offset_end(swqe, skb);
1886                        }
1887                } else {
1888                        /* icmp (big data) and
1889                           ip segmentation packets (all other ip packets) */
1890                        swqe->tx_control |= EHEA_SWQE_CRC
1891                                         | EHEA_SWQE_IP_CHECKSUM
1892                                         | EHEA_SWQE_IMM_DATA_PRESENT;
1893                }
1894        } else {
1895                /* Other Ethernet Protocol */
1896                swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
1897        }
1898        /* copy (immediate) data */
1899        if (nfrags == 0) {
1900                /* data is in a single piece */
1901                skb_copy_from_linear_data(skb, imm_data, skb->len);
1902        } else {
1903                /* first copy data from the skb->data buffer ... */
1904                skb_copy_from_linear_data(skb, imm_data,
1905                                          skb->len - skb->data_len);
1906                imm_data += skb->len - skb->data_len;
1907
1908                /* ... then copy data from the fragments */
1909                for (i = 0; i < nfrags; i++) {
1910                        frag = &skb_shinfo(skb)->frags[i];
1911                        memcpy(imm_data,
1912                               page_address(frag->page) + frag->page_offset,
1913                               frag->size);
1914                        imm_data += frag->size;
1915                }
1916        }
1917        swqe->immediate_data_length = skb->len;
1918        dev_kfree_skb(skb);
1919}
1920
1921static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
1922{
1923        struct tcphdr *tcp;
1924        u32 tmp;
1925
1926        if ((skb->protocol == htons(ETH_P_IP)) &&
1927            (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
1928                tcp = (struct tcphdr*)(skb_network_header(skb) + (ip_hdr(skb)->ihl * 4));
1929                tmp = (tcp->source + (tcp->dest << 16)) % 31;
1930                tmp += ip_hdr(skb)->daddr % 31;
1931                return tmp % num_qps;
1932        }
1933        else
1934                return 0;
1935}
1936
1937static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1938{
1939        struct ehea_port *port = netdev_priv(dev);
1940        struct ehea_swqe *swqe;
1941        unsigned long flags;
1942        u32 lkey;
1943        int swqe_index;
1944        struct ehea_port_res *pr;
1945
1946        pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
1947
1948        if (!spin_trylock(&pr->xmit_lock))
1949                return NETDEV_TX_BUSY;
1950
1951        if (pr->queue_stopped) {
1952                spin_unlock(&pr->xmit_lock);
1953                return NETDEV_TX_BUSY;
1954        }
1955
1956        swqe = ehea_get_swqe(pr->qp, &swqe_index);
1957        memset(swqe, 0, SWQE_HEADER_SIZE);
1958        atomic_dec(&pr->swqe_avail);
1959
1960        if (skb->len <= SWQE3_MAX_IMM) {
1961                u32 sig_iv = port->sig_comp_iv;
1962                u32 swqe_num = pr->swqe_id_counter;
1963                ehea_xmit3(skb, dev, swqe);
1964                swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
1965                        | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
1966                if (pr->swqe_ll_count >= (sig_iv - 1)) {
1967                        swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
1968                                                      sig_iv);
1969                        swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1970                        pr->swqe_ll_count = 0;
1971                } else
1972                        pr->swqe_ll_count += 1;
1973        } else {
1974                swqe->wr_id =
1975                        EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
1976                      | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
1977                      | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
1978                      | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
1979                pr->sq_skba.arr[pr->sq_skba.index] = skb;
1980
1981                pr->sq_skba.index++;
1982                pr->sq_skba.index &= (pr->sq_skba.len - 1);
1983
1984                lkey = pr->send_mr.lkey;
1985                ehea_xmit2(skb, dev, swqe, lkey);
1986                swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1987        }
1988        pr->swqe_id_counter += 1;
1989
1990        if (port->vgrp && vlan_tx_tag_present(skb)) {
1991                swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
1992                swqe->vlan_tag = vlan_tx_tag_get(skb);
1993        }
1994
1995        if (netif_msg_tx_queued(port)) {
1996                ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
1997                ehea_dump(swqe, 512, "swqe");
1998        }
1999
2000        if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2001                netif_stop_queue(dev);
2002                swqe->tx_control |= EHEA_SWQE_PURGE;
2003        }
2004
2005        ehea_post_swqe(pr->qp, swqe);
2006        pr->tx_packets++;
2007
2008        if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2009                spin_lock_irqsave(&pr->netif_queue, flags);
2010                if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2011                        pr->p_stats.queue_stopped++;
2012                        netif_stop_queue(dev);
2013                        pr->queue_stopped = 1;
2014                }
2015                spin_unlock_irqrestore(&pr->netif_queue, flags);
2016        }
2017        dev->trans_start = jiffies;
2018        spin_unlock(&pr->xmit_lock);
2019
2020        return NETDEV_TX_OK;
2021}
2022
2023static void ehea_vlan_rx_register(struct net_device *dev,
2024                                  struct vlan_group *grp)
2025{
2026        struct ehea_port *port = netdev_priv(dev);
2027        struct ehea_adapter *adapter = port->adapter;
2028        struct hcp_ehea_port_cb1 *cb1;
2029        u64 hret;
2030
2031        port->vgrp = grp;
2032
2033        cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2034        if (!cb1) {
2035                ehea_error("no mem for cb1");
2036                goto out;
2037        }
2038
2039        memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
2040
2041        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2042                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2043        if (hret != H_SUCCESS)
2044                ehea_error("modify_ehea_port failed");
2045
2046        kfree(cb1);
2047out:
2048        return;
2049}
2050
2051static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2052{
2053        struct ehea_port *port = netdev_priv(dev);
2054        struct ehea_adapter *adapter = port->adapter;
2055        struct hcp_ehea_port_cb1 *cb1;
2056        int index;
2057        u64 hret;
2058
2059        cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2060        if (!cb1) {
2061                ehea_error("no mem for cb1");
2062                goto out;
2063        }
2064
2065        hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2066                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2067        if (hret != H_SUCCESS) {
2068                ehea_error("query_ehea_port failed");
2069                goto out;
2070        }
2071
2072        index = (vid / 64);
2073        cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2074
2075        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2076                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2077        if (hret != H_SUCCESS)
2078                ehea_error("modify_ehea_port failed");
2079out:
2080        kfree(cb1);
2081        return;
2082}
2083
2084static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2085{
2086        struct ehea_port *port = netdev_priv(dev);
2087        struct ehea_adapter *adapter = port->adapter;
2088        struct hcp_ehea_port_cb1 *cb1;
2089        int index;
2090        u64 hret;
2091
2092        vlan_group_set_device(port->vgrp, vid, NULL);
2093
2094        cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2095        if (!cb1) {
2096                ehea_error("no mem for cb1");
2097                goto out;
2098        }
2099
2100        hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2101                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2102        if (hret != H_SUCCESS) {
2103                ehea_error("query_ehea_port failed");
2104                goto out;
2105        }
2106
2107        index = (vid / 64);
2108        cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2109
2110        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2111                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2112        if (hret != H_SUCCESS)
2113                ehea_error("modify_ehea_port failed");
2114out:
2115        kfree(cb1);
2116        return;
2117}
2118
2119int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2120{
2121        int ret = -EIO;
2122        u64 hret;
2123        u16 dummy16 = 0;
2124        u64 dummy64 = 0;
2125        struct hcp_modify_qp_cb0* cb0;
2126
2127        cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2128        if (!cb0) {
2129                ret = -ENOMEM;
2130                goto out;
2131        }
2132
2133        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2134                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2135        if (hret != H_SUCCESS) {
2136                ehea_error("query_ehea_qp failed (1)");
2137                goto out;
2138        }
2139
2140        cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2141        hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2142                                     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2143                                     &dummy64, &dummy64, &dummy16, &dummy16);
2144        if (hret != H_SUCCESS) {
2145                ehea_error("modify_ehea_qp failed (1)");
2146                goto out;
2147        }
2148
2149        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2150                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2151        if (hret != H_SUCCESS) {
2152                ehea_error("query_ehea_qp failed (2)");
2153                goto out;
2154        }
2155
2156        cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2157        hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2158                                     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2159                                     &dummy64, &dummy64, &dummy16, &dummy16);
2160        if (hret != H_SUCCESS) {
2161                ehea_error("modify_ehea_qp failed (2)");
2162                goto out;
2163        }
2164
2165        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2166                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2167        if (hret != H_SUCCESS) {
2168                ehea_error("query_ehea_qp failed (3)");
2169                goto out;
2170        }
2171
2172        cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2173        hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2174                                     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2175                                     &dummy64, &dummy64, &dummy16, &dummy16);
2176        if (hret != H_SUCCESS) {
2177                ehea_error("modify_ehea_qp failed (3)");
2178                goto out;
2179        }
2180
2181        hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2182                                    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2183        if (hret != H_SUCCESS) {
2184                ehea_error("query_ehea_qp failed (4)");
2185                goto out;
2186        }
2187
2188        ret = 0;
2189out:
2190        kfree(cb0);
2191        return ret;
2192}
2193
2194static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2195                               int add_tx_qps)
2196{
2197        int ret, i;
2198        struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2199        enum ehea_eq_type eq_type = EHEA_EQ;
2200
2201        port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2202                                   EHEA_MAX_ENTRIES_EQ, 1);
2203        if (!port->qp_eq) {
2204                ret = -EINVAL;
2205                ehea_error("ehea_create_eq failed (qp_eq)");
2206                goto out_kill_eq;
2207        }
2208
2209        pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2210        pr_cfg.max_entries_scq = sq_entries * 2;
2211        pr_cfg.max_entries_sq = sq_entries;
2212        pr_cfg.max_entries_rq1 = rq1_entries;
2213        pr_cfg.max_entries_rq2 = rq2_entries;
2214        pr_cfg.max_entries_rq3 = rq3_entries;
2215
2216        pr_cfg_small_rx.max_entries_rcq = 1;
2217        pr_cfg_small_rx.max_entries_scq = sq_entries;
2218        pr_cfg_small_rx.max_entries_sq = sq_entries;
2219        pr_cfg_small_rx.max_entries_rq1 = 1;
2220        pr_cfg_small_rx.max_entries_rq2 = 1;
2221        pr_cfg_small_rx.max_entries_rq3 = 1;
2222
2223        for (i = 0; i < def_qps; i++) {
2224                ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2225                if (ret)
2226                        goto out_clean_pr;
2227        }
2228        for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2229                ret = ehea_init_port_res(port, &port->port_res[i],
2230                                         &pr_cfg_small_rx, i);
2231                if (ret)
2232                        goto out_clean_pr;
2233        }
2234
2235        return 0;
2236
2237out_clean_pr:
2238        while (--i >= 0)
2239                ehea_clean_portres(port, &port->port_res[i]);
2240
2241out_kill_eq:
2242        ehea_destroy_eq(port->qp_eq);
2243        return ret;
2244}
2245
2246static int ehea_clean_all_portres(struct ehea_port *port)
2247{
2248        int ret = 0;
2249        int i;
2250
2251        for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2252                ret |= ehea_clean_portres(port, &port->port_res[i]);
2253
2254        ret |= ehea_destroy_eq(port->qp_eq);
2255
2256        return ret;
2257}
2258
2259static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2260{
2261        if (adapter->active_ports)
2262                return;
2263
2264        ehea_rem_mr(&adapter->mr);
2265}
2266
2267static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2268{
2269        if (adapter->active_ports)
2270                return 0;
2271
2272        return ehea_reg_kernel_mr(adapter, &adapter->mr);
2273}
2274
2275static int ehea_up(struct net_device *dev)
2276{
2277        int ret, i;
2278        struct ehea_port *port = netdev_priv(dev);
2279
2280        if (port->state == EHEA_PORT_UP)
2281                return 0;
2282
2283        ret = ehea_port_res_setup(port, port->num_def_qps,
2284                                  port->num_add_tx_qps);
2285        if (ret) {
2286                ehea_error("port_res_failed");
2287                goto out;
2288        }
2289
2290        /* Set default QP for this port */
2291        ret = ehea_configure_port(port);
2292        if (ret) {
2293                ehea_error("ehea_configure_port failed. ret:%d", ret);
2294                goto out_clean_pr;
2295        }
2296
2297        ret = ehea_reg_interrupts(dev);
2298        if (ret) {
2299                ehea_error("reg_interrupts failed. ret:%d", ret);
2300                goto out_clean_pr;
2301        }
2302
2303        for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2304                ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2305                if (ret) {
2306                        ehea_error("activate_qp failed");
2307                        goto out_free_irqs;
2308                }
2309        }
2310
2311        for(i = 0; i < port->num_def_qps; i++) {
2312                ret = ehea_fill_port_res(&port->port_res[i]);
2313                if (ret) {
2314                        ehea_error("out_free_irqs");
2315                        goto out_free_irqs;
2316                }
2317        }
2318
2319        ret = 0;
2320        port->state = EHEA_PORT_UP;
2321        goto out;
2322
2323out_free_irqs:
2324        ehea_free_interrupts(dev);
2325
2326out_clean_pr:
2327        ehea_clean_all_portres(port);
2328out:
2329        if (ret)
2330                ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2331
2332        return ret;
2333}
2334
2335static void port_napi_disable(struct ehea_port *port)
2336{
2337        int i;
2338
2339        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2340                napi_disable(&port->port_res[i].napi);
2341}
2342
2343static void port_napi_enable(struct ehea_port *port)
2344{
2345        int i;
2346
2347        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2348                napi_enable(&port->port_res[i].napi);
2349}
2350
2351static int ehea_open(struct net_device *dev)
2352{
2353        int ret;
2354        struct ehea_port *port = netdev_priv(dev);
2355
2356        down(&port->port_lock);
2357
2358        if (netif_msg_ifup(port))
2359                ehea_info("enabling port %s", dev->name);
2360
2361        ret = ehea_up(dev);
2362        if (!ret) {
2363                port_napi_enable(port);
2364                netif_start_queue(dev);
2365        }
2366
2367        up(&port->port_lock);
2368
2369        return ret;
2370}
2371
2372static int ehea_down(struct net_device *dev)
2373{
2374        int ret;
2375        struct ehea_port *port = netdev_priv(dev);
2376
2377        if (port->state == EHEA_PORT_DOWN)
2378                return 0;
2379
2380        ehea_drop_multicast_list(dev);
2381        ehea_free_interrupts(dev);
2382
2383        port->state = EHEA_PORT_DOWN;
2384
2385        ret = ehea_clean_all_portres(port);
2386        if (ret)
2387                ehea_info("Failed freeing resources for %s. ret=%i",
2388                          dev->name, ret);
2389
2390        return ret;
2391}
2392
2393static int ehea_stop(struct net_device *dev)
2394{
2395        int ret;
2396        struct ehea_port *port = netdev_priv(dev);
2397
2398        if (netif_msg_ifdown(port))
2399                ehea_info("disabling port %s", dev->name);
2400
2401        flush_scheduled_work();
2402        down(&port->port_lock);
2403        netif_stop_queue(dev);
2404        port_napi_disable(port);
2405        ret = ehea_down(dev);
2406        up(&port->port_lock);
2407        return ret;
2408}
2409
2410void ehea_purge_sq(struct ehea_qp *orig_qp)
2411{
2412        struct ehea_qp qp = *orig_qp;
2413        struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2414        struct ehea_swqe *swqe;
2415        int wqe_index;
2416        int i;
2417
2418        for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2419                swqe = ehea_get_swqe(&qp, &wqe_index);
2420                swqe->tx_control |= EHEA_SWQE_PURGE;
2421        }
2422}
2423
2424int ehea_stop_qps(struct net_device *dev)
2425{
2426        struct ehea_port *port = netdev_priv(dev);
2427        struct ehea_adapter *adapter = port->adapter;
2428        struct hcp_modify_qp_cb0* cb0;
2429        int ret = -EIO;
2430        int dret;
2431        int i;
2432        u64 hret;
2433        u64 dummy64 = 0;
2434        u16 dummy16 = 0;
2435
2436        cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2437        if (!cb0) {
2438                ret = -ENOMEM;
2439                goto out;
2440        }
2441
2442        for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2443                struct ehea_port_res *pr =  &port->port_res[i];
2444                struct ehea_qp *qp = pr->qp;
2445
2446                /* Purge send queue */
2447                ehea_purge_sq(qp);
2448
2449                /* Disable queue pair */
2450                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2451                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2452                                            cb0);
2453                if (hret != H_SUCCESS) {
2454                        ehea_error("query_ehea_qp failed (1)");
2455                        goto out;
2456                }
2457
2458                cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2459                cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2460
2461                hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2462                                             EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2463                                                            1), cb0, &dummy64,
2464                                             &dummy64, &dummy16, &dummy16);
2465                if (hret != H_SUCCESS) {
2466                        ehea_error("modify_ehea_qp failed (1)");
2467                        goto out;
2468                }
2469
2470                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2471                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2472                                            cb0);
2473                if (hret != H_SUCCESS) {
2474                        ehea_error("query_ehea_qp failed (2)");
2475                        goto out;
2476                }
2477
2478                /* deregister shared memory regions */
2479                dret = ehea_rem_smrs(pr);
2480                if (dret) {
2481                        ehea_error("unreg shared memory region failed");
2482                        goto out;
2483                }
2484        }
2485
2486        ret = 0;
2487out:
2488        kfree(cb0);
2489
2490        return ret;
2491}
2492
2493void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr)
2494{
2495        struct ehea_qp qp = *orig_qp;
2496        struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2497        struct ehea_rwqe *rwqe;
2498        struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2499        struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2500        struct sk_buff *skb;
2501        u32 lkey = pr->recv_mr.lkey;
2502
2503
2504        int i;
2505        int index;
2506
2507        for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2508                rwqe = ehea_get_next_rwqe(&qp, 2);
2509                rwqe->sg_list[0].l_key = lkey;
2510                index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2511                skb = skba_rq2[index];
2512                if (skb)
2513                        rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2514        }
2515
2516        for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2517                rwqe = ehea_get_next_rwqe(&qp, 3);
2518                rwqe->sg_list[0].l_key = lkey;
2519                index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2520                skb = skba_rq3[index];
2521                if (skb)
2522                        rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2523        }
2524}
2525
2526int ehea_restart_qps(struct net_device *dev)
2527{
2528        struct ehea_port *port = netdev_priv(dev);
2529        struct ehea_adapter *adapter = port->adapter;
2530        int ret = 0;
2531        int i;
2532
2533        struct hcp_modify_qp_cb0* cb0;
2534        u64 hret;
2535        u64 dummy64 = 0;
2536        u16 dummy16 = 0;
2537
2538        cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2539        if (!cb0) {
2540                ret = -ENOMEM;
2541                goto out;
2542        }
2543
2544        for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2545                struct ehea_port_res *pr =  &port->port_res[i];
2546                struct ehea_qp *qp = pr->qp;
2547
2548                ret = ehea_gen_smrs(pr);
2549                if (ret) {
2550                        ehea_error("creation of shared memory regions failed");
2551                        goto out;
2552                }
2553
2554                ehea_update_rqs(qp, pr);
2555
2556                /* Enable queue pair */
2557                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2558                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2559                                            cb0);
2560                if (hret != H_SUCCESS) {
2561                        ehea_error("query_ehea_qp failed (1)");
2562                        goto out;
2563                }
2564
2565                cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2566                cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2567
2568                hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2569                                             EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2570                                                            1), cb0, &dummy64,
2571                                             &dummy64, &dummy16, &dummy16);
2572                if (hret != H_SUCCESS) {
2573                        ehea_error("modify_ehea_qp failed (1)");
2574                        goto out;
2575                }
2576
2577                hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2578                                            EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2579                                            cb0);
2580                if (hret != H_SUCCESS) {
2581                        ehea_error("query_ehea_qp failed (2)");
2582                        goto out;
2583                }
2584
2585                /* refill entire queue */
2586                ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2587                ehea_refill_rq2(pr, 0);
2588                ehea_refill_rq3(pr, 0);
2589        }
2590out:
2591        kfree(cb0);
2592
2593        return ret;
2594}
2595
2596static void ehea_reset_port(struct work_struct *work)
2597{
2598        int ret;
2599        struct ehea_port *port =
2600                container_of(work, struct ehea_port, reset_task);
2601        struct net_device *dev = port->netdev;
2602
2603        port->resets++;
2604        down(&port->port_lock);
2605        netif_stop_queue(dev);
2606
2607        port_napi_disable(port);
2608
2609        ehea_down(dev);
2610
2611        ret = ehea_up(dev);
2612        if (ret)
2613                goto out;
2614
2615        ehea_set_multicast_list(dev);
2616
2617        if (netif_msg_timer(port))
2618                ehea_info("Device %s resetted successfully", dev->name);
2619
2620        port_napi_enable(port);
2621
2622        netif_wake_queue(dev);
2623out:
2624        up(&port->port_lock);
2625        return;
2626}
2627
2628static void ehea_rereg_mrs(struct work_struct *work)
2629{
2630        int ret, i;
2631        struct ehea_adapter *adapter;
2632
2633        down(&dlpar_mem_lock);
2634        ehea_info("LPAR memory enlarged - re-initializing driver");
2635
2636        list_for_each_entry(adapter, &adapter_list, list)
2637                if (adapter->active_ports) {
2638                        /* Shutdown all ports */
2639                        for (i = 0; i < EHEA_MAX_PORTS; i++) {
2640                                struct ehea_port *port = adapter->port[i];
2641
2642                                if (port) {
2643                                        struct net_device *dev = port->netdev;
2644
2645                                        if (dev->flags & IFF_UP) {
2646                                                down(&port->port_lock);
2647                                                netif_stop_queue(dev);
2648                                                ret = ehea_stop_qps(dev);
2649                                                if (ret) {
2650                                                        up(&port->port_lock);
2651                                                        goto out;
2652                                                }
2653                                                port_napi_disable(port);
2654                                                up(&port->port_lock);
2655                                        }
2656                                }
2657                        }
2658
2659                        /* Unregister old memory region */
2660                        ret = ehea_rem_mr(&adapter->mr);
2661                        if (ret) {
2662                                ehea_error("unregister MR failed - driver"
2663                                           " inoperable!");
2664                                goto out;
2665                        }
2666                }
2667
2668        ehea_destroy_busmap();
2669        ret = ehea_create_busmap();
2670        if (ret) {
2671                ehea_error("creating ehea busmap failed");
2672                goto out;
2673        }
2674
2675        clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2676
2677        list_for_each_entry(adapter, &adapter_list, list)
2678                if (adapter->active_ports) {
2679                        /* Register new memory region */
2680                        ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2681                        if (ret) {
2682                                ehea_error("register MR failed - driver"
2683                                           " inoperable!");
2684                                goto out;
2685                        }
2686
2687                        /* Restart all ports */
2688                        for (i = 0; i < EHEA_MAX_PORTS; i++) {
2689                                struct ehea_port *port = adapter->port[i];
2690
2691                                if (port) {
2692                                        struct net_device *dev = port->netdev;
2693
2694                                        if (dev->flags & IFF_UP) {
2695                                                down(&port->port_lock);
2696                                                port_napi_enable(port);
2697                                                ret = ehea_restart_qps(dev);
2698                                                if (!ret)
2699                                                        netif_wake_queue(dev);
2700                                                up(&port->port_lock);
2701                                        }
2702                                }
2703                        }
2704                }
2705       up(&dlpar_mem_lock);
2706       ehea_info("re-initializing driver complete");
2707out:
2708        return;
2709}
2710
2711static void ehea_tx_watchdog(struct net_device *dev)
2712{
2713        struct ehea_port *port = netdev_priv(dev);
2714
2715        if (netif_carrier_ok(dev) &&
2716            !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2717                schedule_work(&port->reset_task);
2718}
2719
2720int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2721{
2722        struct hcp_query_ehea *cb;
2723        u64 hret;
2724        int ret;
2725
2726        cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2727        if (!cb) {
2728                ret = -ENOMEM;
2729                goto out;
2730        }
2731
2732        hret = ehea_h_query_ehea(adapter->handle, cb);
2733
2734        if (hret != H_SUCCESS) {
2735                ret = -EIO;
2736                goto out_herr;
2737        }
2738
2739        adapter->max_mc_mac = cb->max_mc_mac - 1;
2740        ret = 0;
2741
2742out_herr:
2743        kfree(cb);
2744out:
2745        return ret;
2746}
2747
2748int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2749{
2750        struct hcp_ehea_port_cb4 *cb4;
2751        u64 hret;
2752        int ret = 0;
2753
2754        *jumbo = 0;
2755
2756        /* (Try to) enable *jumbo frames */
2757        cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2758        if (!cb4) {
2759                ehea_error("no mem for cb4");
2760                ret = -ENOMEM;
2761                goto out;
2762        } else {
2763                hret = ehea_h_query_ehea_port(port->adapter->handle,
2764                                              port->logical_port_id,
2765                                              H_PORT_CB4,
2766                                              H_PORT_CB4_JUMBO, cb4);
2767                if (hret == H_SUCCESS) {
2768                        if (cb4->jumbo_frame)
2769                                *jumbo = 1;
2770                        else {
2771                                cb4->jumbo_frame = 1;
2772                                hret = ehea_h_modify_ehea_port(port->adapter->
2773                                                               handle,
2774                                                               port->
2775                                                               logical_port_id,
2776                                                               H_PORT_CB4,
2777                                                               H_PORT_CB4_JUMBO,
2778                                                               cb4);
2779                                if (hret == H_SUCCESS)
2780                                        *jumbo = 1;
2781                        }
2782                } else
2783                        ret = -EINVAL;
2784
2785                kfree(cb4);
2786        }
2787out:
2788        return ret;
2789}
2790
2791static ssize_t ehea_show_port_id(struct device *dev,
2792                                 struct device_attribute *attr, char *buf)
2793{
2794        struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2795        return sprintf(buf, "%d", port->logical_port_id);
2796}
2797
2798static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2799                   NULL);
2800
2801static void __devinit logical_port_release(struct device *dev)
2802{
2803        struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2804        of_node_put(port->ofdev.node);
2805}
2806
2807static int ehea_driver_sysfs_add(struct device *dev,
2808                                 struct device_driver *driver)
2809{
2810        int ret;
2811
2812        ret = sysfs_create_link(&driver->kobj, &dev->kobj,
2813                                kobject_name(&dev->kobj));
2814        if (ret == 0) {
2815                ret = sysfs_create_link(&dev->kobj, &driver->kobj,
2816                                        "driver");
2817                if (ret)
2818                        sysfs_remove_link(&driver->kobj,
2819                                          kobject_name(&dev->kobj));
2820        }
2821        return ret;
2822}
2823
2824static void ehea_driver_sysfs_remove(struct device *dev,
2825                                     struct device_driver *driver)
2826{
2827        struct device_driver *drv = driver;
2828
2829        if (drv) {
2830                sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
2831                sysfs_remove_link(&dev->kobj, "driver");
2832        }
2833}
2834
2835static struct device *ehea_register_port(struct ehea_port *port,
2836                                         struct device_node *dn)
2837{
2838        int ret;
2839
2840        port->ofdev.node = of_node_get(dn);
2841        port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2842        port->ofdev.dev.bus = &ibmebus_bus_type;
2843
2844        sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++);
2845        port->ofdev.dev.release = logical_port_release;
2846
2847        ret = of_device_register(&port->ofdev);
2848        if (ret) {
2849                ehea_error("failed to register device. ret=%d", ret);
2850                goto out;
2851        }
2852
2853        ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2854        if (ret) {
2855                ehea_error("failed to register attributes, ret=%d", ret);
2856                goto out_unreg_of_dev;
2857        }
2858
2859        ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver);
2860        if (ret) {
2861                ehea_error("failed to register sysfs driver link");
2862                goto out_rem_dev_file;
2863        }
2864
2865        return &port->ofdev.dev;
2866
2867out_rem_dev_file:
2868        device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2869out_unreg_of_dev:
2870        of_device_unregister(&port->ofdev);
2871out:
2872        return NULL;
2873}
2874
2875static void ehea_unregister_port(struct ehea_port *port)
2876{
2877        ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver);
2878        device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2879        of_device_unregister(&port->ofdev);
2880}
2881
2882struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2883                                         u32 logical_port_id,
2884                                         struct device_node *dn)
2885{
2886        int ret;
2887        struct net_device *dev;
2888        struct ehea_port *port;
2889        struct device *port_dev;
2890        int jumbo;
2891
2892        /* allocate memory for the port structures */
2893        dev = alloc_etherdev(sizeof(struct ehea_port));
2894
2895        if (!dev) {
2896                ehea_error("no mem for net_device");
2897                ret = -ENOMEM;
2898                goto out_err;
2899        }
2900
2901        port = netdev_priv(dev);
2902
2903        sema_init(&port->port_lock, 1);
2904        port->state = EHEA_PORT_DOWN;
2905        port->sig_comp_iv = sq_entries / 10;
2906
2907        port->adapter = adapter;
2908        port->netdev = dev;
2909        port->logical_port_id = logical_port_id;
2910
2911        port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2912
2913        port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2914        if (!port->mc_list) {
2915                ret = -ENOMEM;
2916                goto out_free_ethdev;
2917        }
2918
2919        INIT_LIST_HEAD(&port->mc_list->list);
2920
2921        ret = ehea_sense_port_attr(port);
2922        if (ret)
2923                goto out_free_mc_list;
2924
2925        port_dev = ehea_register_port(port, dn);
2926        if (!port_dev)
2927                goto out_free_mc_list;
2928
2929        SET_NETDEV_DEV(dev, port_dev);
2930
2931        /* initialize net_device structure */
2932        memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2933
2934        dev->open = ehea_open;
2935#ifdef CONFIG_NET_POLL_CONTROLLER
2936        dev->poll_controller = ehea_netpoll;
2937#endif
2938        dev->stop = ehea_stop;
2939        dev->hard_start_xmit = ehea_start_xmit;
2940        dev->get_stats = ehea_get_stats;
2941        dev->set_multicast_list = ehea_set_multicast_list;
2942        dev->set_mac_address = ehea_set_mac_addr;
2943        dev->change_mtu = ehea_change_mtu;
2944        dev->vlan_rx_register = ehea_vlan_rx_register;
2945        dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
2946        dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
2947        dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
2948                      | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX
2949                      | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
2950                      | NETIF_F_LLTX;
2951        dev->tx_timeout = &ehea_tx_watchdog;
2952        dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
2953
2954        INIT_WORK(&port->reset_task, ehea_reset_port);
2955
2956        ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2957        if (ret) {
2958                ret = -EIO;
2959                goto out_unreg_port;
2960        }
2961
2962        ehea_set_ethtool_ops(dev);
2963
2964        ret = register_netdev(dev);
2965        if (ret) {
2966                ehea_error("register_netdev failed. ret=%d", ret);
2967                goto out_dereg_bc;
2968        }
2969
2970        port->lro_max_aggr = lro_max_aggr;
2971
2972        ret = ehea_get_jumboframe_status(port, &jumbo);
2973        if (ret)
2974                ehea_error("failed determining jumbo frame status for %s",
2975                           port->netdev->name);
2976
2977        ehea_info("%s: Jumbo frames are %sabled", dev->name,
2978                  jumbo == 1 ? "en" : "dis");
2979
2980        adapter->active_ports++;
2981
2982        return port;
2983
2984out_dereg_bc:
2985        ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2986
2987out_unreg_port:
2988        ehea_unregister_port(port);
2989
2990out_free_mc_list:
2991        kfree(port->mc_list);
2992
2993out_free_ethdev:
2994        free_netdev(dev);
2995
2996out_err:
2997        ehea_error("setting up logical port with id=%d failed, ret=%d",
2998                   logical_port_id, ret);
2999        return NULL;
3000}
3001
3002static void ehea_shutdown_single_port(struct ehea_port *port)
3003{
3004        unregister_netdev(port->netdev);
3005        ehea_unregister_port(port);
3006        ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
3007        kfree(port->mc_list);
3008        free_netdev(port->netdev);
3009        port->adapter->active_ports--;
3010}
3011
3012static int ehea_setup_ports(struct ehea_adapter *adapter)
3013{
3014        struct device_node *lhea_dn;
3015        struct device_node *eth_dn = NULL;
3016
3017        const u32 *dn_log_port_id;
3018        int i = 0;
3019
3020        lhea_dn = adapter->ofdev->node;
3021        while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3022
3023                dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3024                                                 NULL);
3025                if (!dn_log_port_id) {
3026                        ehea_error("bad device node: eth_dn name=%s",
3027                                   eth_dn->full_name);
3028                        continue;
3029                }
3030
3031                if (ehea_add_adapter_mr(adapter)) {
3032                        ehea_error("creating MR failed");
3033                        of_node_put(eth_dn);
3034                        return -EIO;
3035                }
3036
3037                adapter->port[i] = ehea_setup_single_port(adapter,
3038                                                          *dn_log_port_id,
3039                                                          eth_dn);
3040                if (adapter->port[i])
3041                        ehea_info("%s -> logical port id #%d",
3042                                  adapter->port[i]->netdev->name,
3043                                  *dn_log_port_id);
3044                else
3045                        ehea_remove_adapter_mr(adapter);
3046
3047                i++;
3048        };
3049
3050        return 0;
3051}
3052
3053static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3054                                           u32 logical_port_id)
3055{
3056        struct device_node *lhea_dn;
3057        struct device_node *eth_dn = NULL;
3058        const u32 *dn_log_port_id;
3059
3060        lhea_dn = adapter->ofdev->node;
3061        while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3062
3063                dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3064                                                 NULL);
3065                if (dn_log_port_id)
3066                        if (*dn_log_port_id == logical_port_id)
3067                                return eth_dn;
3068        };
3069
3070        return NULL;
3071}
3072
3073static ssize_t ehea_probe_port(struct device *dev,
3074                               struct device_attribute *attr,
3075                               const char *buf, size_t count)
3076{
3077        struct ehea_adapter *adapter = dev->driver_data;
3078        struct ehea_port *port;
3079        struct device_node *eth_dn = NULL;
3080        int i;
3081
3082        u32 logical_port_id;
3083
3084        sscanf(buf, "%d", &logical_port_id);
3085
3086        port = ehea_get_port(adapter, logical_port_id);
3087
3088        if (port) {
3089                ehea_info("adding port with logical port id=%d failed. port "
3090                          "already configured as %s.", logical_port_id,
3091                          port->netdev->name);
3092                return -EINVAL;
3093        }
3094
3095        eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3096
3097        if (!eth_dn) {
3098                ehea_info("no logical port with id %d found", logical_port_id);
3099                return -EINVAL;
3100        }
3101
3102        if (ehea_add_adapter_mr(adapter)) {
3103                ehea_error("creating MR failed");
3104                return -EIO;
3105        }
3106
3107        port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3108
3109        of_node_put(eth_dn);
3110
3111        if (port) {
3112                for (i=0; i < EHEA_MAX_PORTS; i++)
3113                        if (!adapter->port[i]) {
3114                                adapter->port[i] = port;
3115                                break;
3116                        }
3117
3118                ehea_info("added %s (logical port id=%d)", port->netdev->name,
3119                          logical_port_id);
3120        } else {
3121                ehea_remove_adapter_mr(adapter);
3122                return -EIO;
3123        }
3124
3125        return (ssize_t) count;
3126}
3127
3128static ssize_t ehea_remove_port(struct device *dev,
3129                                struct device_attribute *attr,
3130                                const char *buf, size_t count)
3131{
3132        struct ehea_adapter *adapter = dev->driver_data;
3133        struct ehea_port *port;
3134        int i;
3135        u32 logical_port_id;
3136
3137        sscanf(buf, "%d", &logical_port_id);
3138
3139        port = ehea_get_port(adapter, logical_port_id);
3140
3141        if (port) {
3142                ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3143                          logical_port_id);
3144
3145                ehea_shutdown_single_port(port);
3146
3147                for (i=0; i < EHEA_MAX_PORTS; i++)
3148                        if (adapter->port[i] == port) {
3149                                adapter->port[i] = NULL;
3150                                break;
3151                        }
3152        } else {
3153                ehea_error("removing port with logical port id=%d failed. port "
3154                           "not configured.", logical_port_id);
3155                return -EINVAL;
3156        }
3157
3158        ehea_remove_adapter_mr(adapter);
3159
3160        return (ssize_t) count;
3161}
3162
3163static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3164static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3165
3166int ehea_create_device_sysfs(struct of_device *dev)
3167{
3168        int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3169        if (ret)
3170                goto out;
3171
3172        ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3173out:
3174        return ret;
3175}
3176
3177void ehea_remove_device_sysfs(struct of_device *dev)
3178{
3179        device_remove_file(&dev->dev, &dev_attr_probe_port);
3180        device_remove_file(&dev->dev, &dev_attr_remove_port);
3181}
3182
3183static int __devinit ehea_probe_adapter(struct of_device *dev,
3184                                        const struct of_device_id *id)
3185{
3186        struct ehea_adapter *adapter;
3187        const u64 *adapter_handle;
3188        int ret;
3189
3190        if (!dev || !dev->node) {
3191                ehea_error("Invalid ibmebus device probed");
3192                return -EINVAL;
3193        }
3194
3195        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3196        if (!adapter) {
3197                ret = -ENOMEM;
3198                dev_err(&dev->dev, "no mem for ehea_adapter\n");
3199                goto out;
3200        }
3201
3202        list_add(&adapter->list, &adapter_list);
3203
3204        adapter->ofdev = dev;
3205
3206        adapter_handle = of_get_property(dev->node, "ibm,hea-handle",
3207                                         NULL);
3208        if (adapter_handle)
3209                adapter->handle = *adapter_handle;
3210
3211        if (!adapter->handle) {
3212                dev_err(&dev->dev, "failed getting handle for adapter"
3213                        " '%s'\n", dev->node->full_name);
3214                ret = -ENODEV;
3215                goto out_free_ad;
3216        }
3217
3218        adapter->pd = EHEA_PD_ID;
3219
3220        dev->dev.driver_data = adapter;
3221
3222
3223        /* initialize adapter and ports */
3224        /* get adapter properties */
3225        ret = ehea_sense_adapter_attr(adapter);
3226        if (ret) {
3227                dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3228                goto out_free_ad;
3229        }
3230
3231        adapter->neq = ehea_create_eq(adapter,
3232                                      EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3233        if (!adapter->neq) {
3234                ret = -EIO;
3235                dev_err(&dev->dev, "NEQ creation failed\n");
3236                goto out_free_ad;
3237        }
3238
3239        tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3240                     (unsigned long)adapter);
3241
3242        ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3243                                  ehea_interrupt_neq, IRQF_DISABLED,
3244                                  "ehea_neq", adapter);
3245        if (ret) {
3246                dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3247                goto out_kill_eq;
3248        }
3249
3250        ret = ehea_create_device_sysfs(dev);
3251        if (ret)
3252                goto out_free_irq;
3253
3254        ret = ehea_setup_ports(adapter);
3255        if (ret) {
3256                dev_err(&dev->dev, "setup_ports failed\n");
3257                goto out_rem_dev_sysfs;
3258        }
3259
3260        ret = 0;
3261        goto out;
3262
3263out_rem_dev_sysfs:
3264        ehea_remove_device_sysfs(dev);
3265
3266out_free_irq:
3267        ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3268
3269out_kill_eq:
3270        ehea_destroy_eq(adapter->neq);
3271
3272out_free_ad:
3273        kfree(adapter);
3274out:
3275        return ret;
3276}
3277
3278static int __devexit ehea_remove(struct of_device *dev)
3279{
3280        struct ehea_adapter *adapter = dev->dev.driver_data;
3281        int i;
3282
3283        for (i = 0; i < EHEA_MAX_PORTS; i++)
3284                if (adapter->port[i]) {
3285                        ehea_shutdown_single_port(adapter->port[i]);
3286                        adapter->port[i] = NULL;
3287                }
3288
3289        ehea_remove_device_sysfs(dev);
3290
3291        flush_scheduled_work();
3292
3293        ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3294        tasklet_kill(&adapter->neq_tasklet);
3295
3296        ehea_destroy_eq(adapter->neq);
3297        ehea_remove_adapter_mr(adapter);
3298        list_del(&adapter->list);
3299
3300        kfree(adapter);
3301
3302        return 0;
3303}
3304
3305static int ehea_reboot_notifier(struct notifier_block *nb,
3306                                unsigned long action, void *unused)
3307{
3308        if (action == SYS_RESTART) {
3309                ehea_info("Reboot: freeing all eHEA resources");
3310                ibmebus_unregister_driver(&ehea_driver);
3311        }
3312        return NOTIFY_DONE;
3313}
3314
3315static struct notifier_block ehea_reboot_nb = {
3316        .notifier_call = ehea_reboot_notifier,
3317};
3318
3319static int check_module_parm(void)
3320{
3321        int ret = 0;
3322
3323        if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3324            (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3325                ehea_info("Bad parameter: rq1_entries");
3326                ret = -EINVAL;
3327        }
3328        if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3329            (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3330                ehea_info("Bad parameter: rq2_entries");
3331                ret = -EINVAL;
3332        }
3333        if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3334            (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3335                ehea_info("Bad parameter: rq3_entries");
3336                ret = -EINVAL;
3337        }
3338        if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3339            (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3340                ehea_info("Bad parameter: sq_entries");
3341                ret = -EINVAL;
3342        }
3343
3344        return ret;
3345}
3346
3347static ssize_t ehea_show_capabilities(struct device_driver *drv,
3348                                      char *buf)
3349{
3350        return sprintf(buf, "%d", EHEA_CAPABILITIES);
3351}
3352
3353static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3354                   ehea_show_capabilities, NULL);
3355
3356int __init ehea_module_init(void)
3357{
3358        int ret;
3359
3360        printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3361               DRV_VERSION);
3362
3363
3364        INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3365        sema_init(&dlpar_mem_lock, 1);
3366
3367        ret = check_module_parm();
3368        if (ret)
3369                goto out;
3370
3371        ret = ehea_create_busmap();
3372        if (ret)
3373                goto out;
3374
3375        register_reboot_notifier(&ehea_reboot_nb);
3376
3377        ret = ibmebus_register_driver(&ehea_driver);
3378        if (ret) {
3379                ehea_error("failed registering eHEA device driver on ebus");
3380                goto out;
3381        }
3382
3383        ret = driver_create_file(&ehea_driver.driver,
3384                                 &driver_attr_capabilities);
3385        if (ret) {
3386                ehea_error("failed to register capabilities attribute, ret=%d",
3387                           ret);
3388                unregister_reboot_notifier(&ehea_reboot_nb);
3389                ibmebus_unregister_driver(&ehea_driver);
3390                goto out;
3391        }
3392
3393out:
3394        return ret;
3395}
3396
3397static void __exit ehea_module_exit(void)
3398{
3399        flush_scheduled_work();
3400        driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3401        ibmebus_unregister_driver(&ehea_driver);
3402        unregister_reboot_notifier(&ehea_reboot_nb);
3403        ehea_destroy_busmap();
3404}
3405
3406module_init(ehea_module_init);
3407module_exit(ehea_module_exit);
3408