linux/drivers/infiniband/hw/i40iw/i40iw_cm.c
<<
>>
Prefs
   1/*******************************************************************************
   2*
   3* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
   4*
   5* This software is available to you under a choice of one of two
   6* licenses.  You may choose to be licensed under the terms of the GNU
   7* General Public License (GPL) Version 2, available from the file
   8* COPYING in the main directory of this source tree, or the
   9* OpenFabrics.org BSD license below:
  10*
  11*   Redistribution and use in source and binary forms, with or
  12*   without modification, are permitted provided that the following
  13*   conditions are met:
  14*
  15*    - Redistributions of source code must retain the above
  16*       copyright notice, this list of conditions and the following
  17*       disclaimer.
  18*
  19*    - Redistributions in binary form must reproduce the above
  20*       copyright notice, this list of conditions and the following
  21*       disclaimer in the documentation and/or other materials
  22*       provided with the distribution.
  23*
  24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31* SOFTWARE.
  32*
  33*******************************************************************************/
  34
  35#include <linux/atomic.h>
  36#include <linux/ip.h>
  37#include <linux/tcp.h>
  38#include <linux/init.h>
  39#include <linux/if_arp.h>
  40#include <linux/if_vlan.h>
  41#include <linux/notifier.h>
  42#include <linux/net.h>
  43#include <linux/types.h>
  44#include <linux/timer.h>
  45#include <linux/time.h>
  46#include <linux/delay.h>
  47#include <linux/etherdevice.h>
  48#include <linux/netdevice.h>
  49#include <linux/random.h>
  50#include <linux/list.h>
  51#include <linux/threads.h>
  52#include <linux/highmem.h>
  53#include <net/arp.h>
  54#include <net/ndisc.h>
  55#include <net/neighbour.h>
  56#include <net/route.h>
  57#include <net/addrconf.h>
  58#include <net/ip6_route.h>
  59#include <net/ip_fib.h>
  60#include <net/tcp.h>
  61#include <asm/checksum.h>
  62
  63#include "i40iw.h"
  64
  65static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);
  66static void i40iw_cm_post_event(struct i40iw_cm_event *event);
  67static void i40iw_disconnect_worker(struct work_struct *work);
  68
  69/**
  70 * i40iw_free_sqbuf - put back puda buffer if refcount = 0
  71 * @vsi: pointer to vsi structure
  72 * @buf: puda buffer to free
  73 */
  74void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
  75{
  76        struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
  77        struct i40iw_puda_rsrc *ilq = vsi->ilq;
  78
  79        if (!atomic_dec_return(&buf->refcount))
  80                i40iw_puda_ret_bufpool(ilq, buf);
  81}
  82
  83/**
  84 * i40iw_derive_hw_ird_setting - Calculate IRD
  85 *
  86 * @cm_ird: IRD of connection's node
  87 *
  88 * The ird from the connection is rounded to a supported HW
  89 * setting (2,8,32,64) and then encoded for ird_size field of
  90 * qp_ctx
  91 */
  92static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
  93{
  94        u8 encoded_ird_size;
  95        u8 pof2_cm_ird = 1;
  96
  97        /* round-off to next powerof2 */
  98        while (pof2_cm_ird < cm_ird)
  99                pof2_cm_ird *= 2;
 100
 101        /* ird_size field is encoded in qp_ctx */
 102        switch (pof2_cm_ird) {
 103        case I40IW_HW_IRD_SETTING_64:
 104                encoded_ird_size = 3;
 105                break;
 106        case I40IW_HW_IRD_SETTING_32:
 107        case I40IW_HW_IRD_SETTING_16:
 108                encoded_ird_size = 2;
 109                break;
 110        case I40IW_HW_IRD_SETTING_8:
 111        case I40IW_HW_IRD_SETTING_4:
 112                encoded_ird_size = 1;
 113                break;
 114        case I40IW_HW_IRD_SETTING_2:
 115        default:
 116                encoded_ird_size = 0;
 117                break;
 118        }
 119        return encoded_ird_size;
 120}
 121
 122/**
 123 * i40iw_record_ird_ord - Record IRD/ORD passed in
 124 * @cm_node: connection's node
 125 * @conn_ird: connection IRD
 126 * @conn_ord: connection ORD
 127 */
 128static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
 129{
 130        if (conn_ird > I40IW_MAX_IRD_SIZE)
 131                conn_ird = I40IW_MAX_IRD_SIZE;
 132
 133        if (conn_ord > I40IW_MAX_ORD_SIZE)
 134                conn_ord = I40IW_MAX_ORD_SIZE;
 135
 136        cm_node->ird_size = conn_ird;
 137        cm_node->ord_size = conn_ord;
 138}
 139
 140/**
 141 * i40iw_copy_ip_ntohl - change network to host ip
 142 * @dst: host ip
 143 * @src: big endian
 144 */
 145void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)
 146{
 147        *dst++ = ntohl(*src++);
 148        *dst++ = ntohl(*src++);
 149        *dst++ = ntohl(*src++);
 150        *dst = ntohl(*src);
 151}
 152
 153/**
 154 * i40iw_copy_ip_htonl - change host addr to network ip
 155 * @dst: host ip
 156 * @src: little endian
 157 */
 158static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)
 159{
 160        *dst++ = htonl(*src++);
 161        *dst++ = htonl(*src++);
 162        *dst++ = htonl(*src++);
 163        *dst = htonl(*src);
 164}
 165
 166/**
 167 * i40iw_fill_sockaddr4 - get addr info for passive connection
 168 * @cm_node: connection's node
 169 * @event: upper layer's cm event
 170 */
 171static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,
 172                                        struct iw_cm_event *event)
 173{
 174        struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
 175        struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
 176
 177        laddr->sin_family = AF_INET;
 178        raddr->sin_family = AF_INET;
 179
 180        laddr->sin_port = htons(cm_node->loc_port);
 181        raddr->sin_port = htons(cm_node->rem_port);
 182
 183        laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
 184        raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
 185}
 186
 187/**
 188 * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side
 189 * @cm_node: connection's node
 190 * @event: upper layer's cm event
 191 */
 192static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,
 193                                        struct iw_cm_event *event)
 194{
 195        struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
 196        struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
 197
 198        laddr6->sin6_family = AF_INET6;
 199        raddr6->sin6_family = AF_INET6;
 200
 201        laddr6->sin6_port = htons(cm_node->loc_port);
 202        raddr6->sin6_port = htons(cm_node->rem_port);
 203
 204        i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
 205                            cm_node->loc_addr);
 206        i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
 207                            cm_node->rem_addr);
 208}
 209
 210/**
 211 * i40iw_get_addr_info
 212 * @cm_node: contains ip/tcp info
 213 * @cm_info: to get a copy of the cm_node ip/tcp info
 214*/
 215static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
 216                                struct i40iw_cm_info *cm_info)
 217{
 218        cm_info->ipv4 = cm_node->ipv4;
 219        cm_info->vlan_id = cm_node->vlan_id;
 220        memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
 221        memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
 222        cm_info->loc_port = cm_node->loc_port;
 223        cm_info->rem_port = cm_node->rem_port;
 224        cm_info->user_pri = cm_node->user_pri;
 225}
 226
 227/**
 228 * i40iw_get_cmevent_info - for cm event upcall
 229 * @cm_node: connection's node
 230 * @cm_id: upper layers cm struct for the event
 231 * @event: upper layer's cm event
 232 */
 233static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,
 234                                          struct iw_cm_id *cm_id,
 235                                          struct iw_cm_event *event)
 236{
 237        memcpy(&event->local_addr, &cm_id->m_local_addr,
 238               sizeof(event->local_addr));
 239        memcpy(&event->remote_addr, &cm_id->m_remote_addr,
 240               sizeof(event->remote_addr));
 241        if (cm_node) {
 242                event->private_data = (void *)cm_node->pdata_buf;
 243                event->private_data_len = (u8)cm_node->pdata.size;
 244                event->ird = cm_node->ird_size;
 245                event->ord = cm_node->ord_size;
 246        }
 247}
 248
 249/**
 250 * i40iw_send_cm_event - upcall cm's event handler
 251 * @cm_node: connection's node
 252 * @cm_id: upper layer's cm info struct
 253 * @type: Event type to indicate
 254 * @status: status for the event type
 255 */
 256static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
 257                               struct iw_cm_id *cm_id,
 258                               enum iw_cm_event_type type,
 259                               int status)
 260{
 261        struct iw_cm_event event;
 262
 263        memset(&event, 0, sizeof(event));
 264        event.event = type;
 265        event.status = status;
 266        switch (type) {
 267        case IW_CM_EVENT_CONNECT_REQUEST:
 268                if (cm_node->ipv4)
 269                        i40iw_fill_sockaddr4(cm_node, &event);
 270                else
 271                        i40iw_fill_sockaddr6(cm_node, &event);
 272                event.provider_data = (void *)cm_node;
 273                event.private_data = (void *)cm_node->pdata_buf;
 274                event.private_data_len = (u8)cm_node->pdata.size;
 275                event.ird = cm_node->ird_size;
 276                break;
 277        case IW_CM_EVENT_CONNECT_REPLY:
 278                i40iw_get_cmevent_info(cm_node, cm_id, &event);
 279                break;
 280        case IW_CM_EVENT_ESTABLISHED:
 281                event.ird = cm_node->ird_size;
 282                event.ord = cm_node->ord_size;
 283                break;
 284        case IW_CM_EVENT_DISCONNECT:
 285                break;
 286        case IW_CM_EVENT_CLOSE:
 287                break;
 288        default:
 289                i40iw_pr_err("event type received type = %d\n", type);
 290                return -1;
 291        }
 292        return cm_id->event_handler(cm_id, &event);
 293}
 294
 295/**
 296 * i40iw_create_event - create cm event
 297 * @cm_node: connection's node
 298 * @type: Event type to generate
 299 */
 300static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
 301                                                 enum i40iw_cm_event_type type)
 302{
 303        struct i40iw_cm_event *event;
 304
 305        if (!cm_node->cm_id)
 306                return NULL;
 307
 308        event = kzalloc(sizeof(*event), GFP_ATOMIC);
 309
 310        if (!event)
 311                return NULL;
 312
 313        event->type = type;
 314        event->cm_node = cm_node;
 315        memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));
 316        memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));
 317        event->cm_info.rem_port = cm_node->rem_port;
 318        event->cm_info.loc_port = cm_node->loc_port;
 319        event->cm_info.cm_id = cm_node->cm_id;
 320
 321        i40iw_debug(cm_node->dev,
 322                    I40IW_DEBUG_CM,
 323                    "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
 324                    cm_node,
 325                    event,
 326                    type,
 327                    event->cm_info.loc_addr,
 328                    event->cm_info.rem_addr);
 329
 330        i40iw_cm_post_event(event);
 331        return event;
 332}
 333
 334/**
 335 * i40iw_free_retrans_entry - free send entry
 336 * @cm_node: connection's node
 337 */
 338static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
 339{
 340        struct i40iw_device *iwdev = cm_node->iwdev;
 341        struct i40iw_timer_entry *send_entry;
 342
 343        send_entry = cm_node->send_entry;
 344        if (send_entry) {
 345                cm_node->send_entry = NULL;
 346                i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
 347                kfree(send_entry);
 348                atomic_dec(&cm_node->ref_count);
 349        }
 350}
 351
 352/**
 353 * i40iw_cleanup_retrans_entry - free send entry with lock
 354 * @cm_node: connection's node
 355 */
 356static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
 357{
 358        unsigned long flags;
 359
 360        spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
 361        i40iw_free_retrans_entry(cm_node);
 362        spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
 363}
 364
 365/**
 366 * i40iw_form_cm_frame - get a free packet and build frame
 367 * @cm_node: connection's node ionfo to use in frame
 368 * @options: pointer to options info
 369 * @hdr: pointer mpa header
 370 * @pdata: pointer to private data
 371 * @flags:  indicates FIN or ACK
 372 */
 373static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
 374                                                  struct i40iw_kmem_info *options,
 375                                                  struct i40iw_kmem_info *hdr,
 376                                                  struct i40iw_kmem_info *pdata,
 377                                                  u8 flags)
 378{
 379        struct i40iw_puda_buf *sqbuf;
 380        struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
 381        u8 *buf;
 382
 383        struct tcphdr *tcph;
 384        struct iphdr *iph;
 385        struct ipv6hdr *ip6h;
 386        struct ethhdr *ethh;
 387        u16 packetsize;
 388        u16 eth_hlen = ETH_HLEN;
 389        u32 opts_len = 0;
 390        u32 pd_len = 0;
 391        u32 hdr_len = 0;
 392        u16 vtag;
 393
 394        sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
 395        if (!sqbuf)
 396                return NULL;
 397        buf = sqbuf->mem.va;
 398
 399        if (options)
 400                opts_len = (u32)options->size;
 401
 402        if (hdr)
 403                hdr_len = hdr->size;
 404
 405        if (pdata)
 406                pd_len = pdata->size;
 407
 408        if (cm_node->vlan_id < VLAN_TAG_PRESENT)
 409                eth_hlen += 4;
 410
 411        if (cm_node->ipv4)
 412                packetsize = sizeof(*iph) + sizeof(*tcph);
 413        else
 414                packetsize = sizeof(*ip6h) + sizeof(*tcph);
 415        packetsize += opts_len + hdr_len + pd_len;
 416
 417        memset(buf, 0x00, eth_hlen + packetsize);
 418
 419        sqbuf->totallen = packetsize + eth_hlen;
 420        sqbuf->maclen = eth_hlen;
 421        sqbuf->tcphlen = sizeof(*tcph) + opts_len;
 422        sqbuf->scratch = (void *)cm_node;
 423
 424        ethh = (struct ethhdr *)buf;
 425        buf += eth_hlen;
 426
 427        if (cm_node->ipv4) {
 428                sqbuf->ipv4 = true;
 429
 430                iph = (struct iphdr *)buf;
 431                buf += sizeof(*iph);
 432                tcph = (struct tcphdr *)buf;
 433                buf += sizeof(*tcph);
 434
 435                ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
 436                ether_addr_copy(ethh->h_source, cm_node->loc_mac);
 437                if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
 438                        ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
 439                        vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
 440                        ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
 441
 442                        ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
 443                } else {
 444                        ethh->h_proto = htons(ETH_P_IP);
 445                }
 446
 447                iph->version = IPVERSION;
 448                iph->ihl = 5;   /* 5 * 4Byte words, IP headr len */
 449                iph->tos = cm_node->tos;
 450                iph->tot_len = htons(packetsize);
 451                iph->id = htons(++cm_node->tcp_cntxt.loc_id);
 452
 453                iph->frag_off = htons(0x4000);
 454                iph->ttl = 0x40;
 455                iph->protocol = IPPROTO_TCP;
 456                iph->saddr = htonl(cm_node->loc_addr[0]);
 457                iph->daddr = htonl(cm_node->rem_addr[0]);
 458        } else {
 459                sqbuf->ipv4 = false;
 460                ip6h = (struct ipv6hdr *)buf;
 461                buf += sizeof(*ip6h);
 462                tcph = (struct tcphdr *)buf;
 463                buf += sizeof(*tcph);
 464
 465                ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
 466                ether_addr_copy(ethh->h_source, cm_node->loc_mac);
 467                if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
 468                        ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
 469                        vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
 470                        ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
 471                        ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
 472                } else {
 473                        ethh->h_proto = htons(ETH_P_IPV6);
 474                }
 475                ip6h->version = 6;
 476                ip6h->priority = cm_node->tos >> 4;
 477                ip6h->flow_lbl[0] = cm_node->tos << 4;
 478                ip6h->flow_lbl[1] = 0;
 479                ip6h->flow_lbl[2] = 0;
 480                ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
 481                ip6h->nexthdr = 6;
 482                ip6h->hop_limit = 128;
 483                i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
 484                                    cm_node->loc_addr);
 485                i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
 486                                    cm_node->rem_addr);
 487        }
 488
 489        tcph->source = htons(cm_node->loc_port);
 490        tcph->dest = htons(cm_node->rem_port);
 491
 492        tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
 493
 494        if (flags & SET_ACK) {
 495                cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
 496                tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
 497                tcph->ack = 1;
 498        } else {
 499                tcph->ack_seq = 0;
 500        }
 501
 502        if (flags & SET_SYN) {
 503                cm_node->tcp_cntxt.loc_seq_num++;
 504                tcph->syn = 1;
 505        } else {
 506                cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
 507        }
 508
 509        if (flags & SET_FIN) {
 510                cm_node->tcp_cntxt.loc_seq_num++;
 511                tcph->fin = 1;
 512        }
 513
 514        if (flags & SET_RST)
 515                tcph->rst = 1;
 516
 517        tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
 518        sqbuf->tcphlen = tcph->doff << 2;
 519        tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
 520        tcph->urg_ptr = 0;
 521
 522        if (opts_len) {
 523                memcpy(buf, options->addr, opts_len);
 524                buf += opts_len;
 525        }
 526
 527        if (hdr_len) {
 528                memcpy(buf, hdr->addr, hdr_len);
 529                buf += hdr_len;
 530        }
 531
 532        if (pdata && pdata->addr)
 533                memcpy(buf, pdata->addr, pdata->size);
 534
 535        atomic_set(&sqbuf->refcount, 1);
 536
 537        return sqbuf;
 538}
 539
 540/**
 541 * i40iw_send_reset - Send RST packet
 542 * @cm_node: connection's node
 543 */
 544static int i40iw_send_reset(struct i40iw_cm_node *cm_node)
 545{
 546        struct i40iw_puda_buf *sqbuf;
 547        int flags = SET_RST | SET_ACK;
 548
 549        sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);
 550        if (!sqbuf) {
 551                i40iw_pr_err("no sqbuf\n");
 552                return -1;
 553        }
 554
 555        return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);
 556}
 557
 558/**
 559 * i40iw_active_open_err - send event for active side cm error
 560 * @cm_node: connection's node
 561 * @reset: Flag to send reset or not
 562 */
 563static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)
 564{
 565        i40iw_cleanup_retrans_entry(cm_node);
 566        cm_node->cm_core->stats_connect_errs++;
 567        if (reset) {
 568                i40iw_debug(cm_node->dev,
 569                            I40IW_DEBUG_CM,
 570                            "%s cm_node=%p state=%d\n",
 571                            __func__,
 572                            cm_node,
 573                            cm_node->state);
 574                atomic_inc(&cm_node->ref_count);
 575                i40iw_send_reset(cm_node);
 576        }
 577
 578        cm_node->state = I40IW_CM_STATE_CLOSED;
 579        i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
 580}
 581
 582/**
 583 * i40iw_passive_open_err - handle passive side cm error
 584 * @cm_node: connection's node
 585 * @reset: send reset or just free cm_node
 586 */
 587static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)
 588{
 589        i40iw_cleanup_retrans_entry(cm_node);
 590        cm_node->cm_core->stats_passive_errs++;
 591        cm_node->state = I40IW_CM_STATE_CLOSED;
 592        i40iw_debug(cm_node->dev,
 593                    I40IW_DEBUG_CM,
 594                    "%s cm_node=%p state =%d\n",
 595                    __func__,
 596                    cm_node,
 597                    cm_node->state);
 598        if (reset)
 599                i40iw_send_reset(cm_node);
 600        else
 601                i40iw_rem_ref_cm_node(cm_node);
 602}
 603
 604/**
 605 * i40iw_event_connect_error - to create connect error event
 606 * @event: cm information for connect event
 607 */
 608static void i40iw_event_connect_error(struct i40iw_cm_event *event)
 609{
 610        struct i40iw_qp *iwqp;
 611        struct iw_cm_id *cm_id;
 612
 613        cm_id = event->cm_node->cm_id;
 614        if (!cm_id)
 615                return;
 616
 617        iwqp = cm_id->provider_data;
 618
 619        if (!iwqp || !iwqp->iwdev)
 620                return;
 621
 622        iwqp->cm_id = NULL;
 623        cm_id->provider_data = NULL;
 624        i40iw_send_cm_event(event->cm_node, cm_id,
 625                            IW_CM_EVENT_CONNECT_REPLY,
 626                            -ECONNRESET);
 627        cm_id->rem_ref(cm_id);
 628        i40iw_rem_ref_cm_node(event->cm_node);
 629}
 630
 631/**
 632 * i40iw_process_options
 633 * @cm_node: connection's node
 634 * @optionsloc: point to start of options
 635 * @optionsize: size of all options
 636 * @syn_packet: flag if syn packet
 637 */
 638static int i40iw_process_options(struct i40iw_cm_node *cm_node,
 639                                 u8 *optionsloc,
 640                                 u32 optionsize,
 641                                 u32 syn_packet)
 642{
 643        u32 tmp;
 644        u32 offset = 0;
 645        union all_known_options *all_options;
 646        char got_mss_option = 0;
 647
 648        while (offset < optionsize) {
 649                all_options = (union all_known_options *)(optionsloc + offset);
 650                switch (all_options->as_base.optionnum) {
 651                case OPTION_NUMBER_END:
 652                        offset = optionsize;
 653                        break;
 654                case OPTION_NUMBER_NONE:
 655                        offset += 1;
 656                        continue;
 657                case OPTION_NUMBER_MSS:
 658                        i40iw_debug(cm_node->dev,
 659                                    I40IW_DEBUG_CM,
 660                                    "%s: MSS Length: %d Offset: %d Size: %d\n",
 661                                    __func__,
 662                                    all_options->as_mss.length,
 663                                    offset,
 664                                    optionsize);
 665                        got_mss_option = 1;
 666                        if (all_options->as_mss.length != 4)
 667                                return -1;
 668                        tmp = ntohs(all_options->as_mss.mss);
 669                        if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
 670                                cm_node->tcp_cntxt.mss = tmp;
 671                        break;
 672                case OPTION_NUMBER_WINDOW_SCALE:
 673                        cm_node->tcp_cntxt.snd_wscale =
 674                            all_options->as_windowscale.shiftcount;
 675                        break;
 676                default:
 677                        i40iw_debug(cm_node->dev,
 678                                    I40IW_DEBUG_CM,
 679                                    "TCP Option not understood: %x\n",
 680                                    all_options->as_base.optionnum);
 681                        break;
 682                }
 683                offset += all_options->as_base.length;
 684        }
 685        if (!got_mss_option && syn_packet)
 686                cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;
 687        return 0;
 688}
 689
 690/**
 691 * i40iw_handle_tcp_options -
 692 * @cm_node: connection's node
 693 * @tcph: pointer tcp header
 694 * @optionsize: size of options rcvd
 695 * @passive: active or passive flag
 696 */
 697static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
 698                                    struct tcphdr *tcph,
 699                                    int optionsize,
 700                                    int passive)
 701{
 702        u8 *optionsloc = (u8 *)&tcph[1];
 703
 704        if (optionsize) {
 705                if (i40iw_process_options(cm_node,
 706                                          optionsloc,
 707                                          optionsize,
 708                                          (u32)tcph->syn)) {
 709                        i40iw_debug(cm_node->dev,
 710                                    I40IW_DEBUG_CM,
 711                                    "%s: Node %p, Sending RESET\n",
 712                                    __func__,
 713                                    cm_node);
 714                        if (passive)
 715                                i40iw_passive_open_err(cm_node, true);
 716                        else
 717                                i40iw_active_open_err(cm_node, true);
 718                        return -1;
 719                }
 720        }
 721
 722        cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
 723            cm_node->tcp_cntxt.snd_wscale;
 724
 725        if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
 726                cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
 727        return 0;
 728}
 729
 730/**
 731 * i40iw_build_mpa_v1 - build a MPA V1 frame
 732 * @cm_node: connection's node
 733 * @mpa_key: to do read0 or write0
 734 */
 735static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
 736                               void *start_addr,
 737                               u8 mpa_key)
 738{
 739        struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
 740
 741        switch (mpa_key) {
 742        case MPA_KEY_REQUEST:
 743                memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
 744                break;
 745        case MPA_KEY_REPLY:
 746                memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
 747                break;
 748        default:
 749                break;
 750        }
 751        mpa_frame->flags = IETF_MPA_FLAGS_CRC;
 752        mpa_frame->rev = cm_node->mpa_frame_rev;
 753        mpa_frame->priv_data_len = htons(cm_node->pdata.size);
 754}
 755
 756/**
 757 * i40iw_build_mpa_v2 - build a MPA V2 frame
 758 * @cm_node: connection's node
 759 * @start_addr: buffer start address
 760 * @mpa_key: to do read0 or write0
 761 */
 762static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
 763                               void *start_addr,
 764                               u8 mpa_key)
 765{
 766        struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
 767        struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
 768        u16 ctrl_ird, ctrl_ord;
 769
 770        /* initialize the upper 5 bytes of the frame */
 771        i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
 772        mpa_frame->flags |= IETF_MPA_V2_FLAG;
 773        mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
 774
 775        /* initialize RTR msg */
 776        if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
 777                ctrl_ird = IETF_NO_IRD_ORD;
 778                ctrl_ord = IETF_NO_IRD_ORD;
 779        } else {
 780                ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
 781                        IETF_NO_IRD_ORD : cm_node->ird_size;
 782                ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
 783                        IETF_NO_IRD_ORD : cm_node->ord_size;
 784        }
 785
 786        ctrl_ird |= IETF_PEER_TO_PEER;
 787
 788        switch (mpa_key) {
 789        case MPA_KEY_REQUEST:
 790                ctrl_ord |= IETF_RDMA0_WRITE;
 791                ctrl_ord |= IETF_RDMA0_READ;
 792                break;
 793        case MPA_KEY_REPLY:
 794                switch (cm_node->send_rdma0_op) {
 795                case SEND_RDMA_WRITE_ZERO:
 796                        ctrl_ord |= IETF_RDMA0_WRITE;
 797                        break;
 798                case SEND_RDMA_READ_ZERO:
 799                        ctrl_ord |= IETF_RDMA0_READ;
 800                        break;
 801                }
 802                break;
 803        default:
 804                break;
 805        }
 806        rtr_msg->ctrl_ird = htons(ctrl_ird);
 807        rtr_msg->ctrl_ord = htons(ctrl_ord);
 808}
 809
 810/**
 811 * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
 812 * @cm_node: connection's node
 813 * @mpa: mpa: data buffer
 814 * @mpa_key: to do read0 or write0
 815 */
 816static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,
 817                                    struct i40iw_kmem_info *mpa,
 818                                    u8 mpa_key)
 819{
 820        int hdr_len = 0;
 821
 822        switch (cm_node->mpa_frame_rev) {
 823        case IETF_MPA_V1:
 824                hdr_len = sizeof(struct ietf_mpa_v1);
 825                i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);
 826                break;
 827        case IETF_MPA_V2:
 828                hdr_len = sizeof(struct ietf_mpa_v2);
 829                i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);
 830                break;
 831        default:
 832                break;
 833        }
 834
 835        return hdr_len;
 836}
 837
 838/**
 839 * i40iw_send_mpa_request - active node send mpa request to passive node
 840 * @cm_node: connection's node
 841 */
 842static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)
 843{
 844        struct i40iw_puda_buf *sqbuf;
 845
 846        if (!cm_node) {
 847                i40iw_pr_err("cm_node == NULL\n");
 848                return -1;
 849        }
 850
 851        cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
 852        cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
 853                                                         &cm_node->mpa_hdr,
 854                                                         MPA_KEY_REQUEST);
 855        if (!cm_node->mpa_hdr.size) {
 856                i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size);
 857                return -1;
 858        }
 859
 860        sqbuf = i40iw_form_cm_frame(cm_node,
 861                                    NULL,
 862                                    &cm_node->mpa_hdr,
 863                                    &cm_node->pdata,
 864                                    SET_ACK);
 865        if (!sqbuf) {
 866                i40iw_pr_err("sq_buf == NULL\n");
 867                return -1;
 868        }
 869        return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
 870}
 871
 872/**
 873 * i40iw_send_mpa_reject -
 874 * @cm_node: connection's node
 875 * @pdata: reject data for connection
 876 * @plen: length of reject data
 877 */
 878static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
 879                                 const void *pdata,
 880                                 u8 plen)
 881{
 882        struct i40iw_puda_buf *sqbuf;
 883        struct i40iw_kmem_info priv_info;
 884
 885        cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
 886        cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
 887                                                         &cm_node->mpa_hdr,
 888                                                         MPA_KEY_REPLY);
 889
 890        cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
 891        priv_info.addr = (void *)pdata;
 892        priv_info.size = plen;
 893
 894        sqbuf = i40iw_form_cm_frame(cm_node,
 895                                    NULL,
 896                                    &cm_node->mpa_hdr,
 897                                    &priv_info,
 898                                    SET_ACK | SET_FIN);
 899        if (!sqbuf) {
 900                i40iw_pr_err("no sqbuf\n");
 901                return -ENOMEM;
 902        }
 903        cm_node->state = I40IW_CM_STATE_FIN_WAIT1;
 904        return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
 905}
 906
 907/**
 908 * recv_mpa - process an IETF MPA frame
 909 * @cm_node: connection's node
 910 * @buffer: Data pointer
 911 * @type: to return accept or reject
 912 * @len: Len of mpa buffer
 913 */
 914static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)
 915{
 916        struct ietf_mpa_v1 *mpa_frame;
 917        struct ietf_mpa_v2 *mpa_v2_frame;
 918        struct ietf_rtr_msg *rtr_msg;
 919        int mpa_hdr_len;
 920        int priv_data_len;
 921
 922        *type = I40IW_MPA_REQUEST_ACCEPT;
 923
 924        if (len < sizeof(struct ietf_mpa_v1)) {
 925                i40iw_pr_err("ietf buffer small (%x)\n", len);
 926                return -1;
 927        }
 928
 929        mpa_frame = (struct ietf_mpa_v1 *)buffer;
 930        mpa_hdr_len = sizeof(struct ietf_mpa_v1);
 931        priv_data_len = ntohs(mpa_frame->priv_data_len);
 932
 933        if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
 934                i40iw_pr_err("large pri_data %d\n", priv_data_len);
 935                return -1;
 936        }
 937        if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
 938                i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev);
 939                return -1;
 940        }
 941        if (mpa_frame->rev > cm_node->mpa_frame_rev) {
 942                i40iw_pr_err("rev %d\n", mpa_frame->rev);
 943                return -1;
 944        }
 945        cm_node->mpa_frame_rev = mpa_frame->rev;
 946
 947        if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
 948                if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
 949                        i40iw_pr_err("Unexpected MPA Key received\n");
 950                        return -1;
 951                }
 952        } else {
 953                if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
 954                        i40iw_pr_err("Unexpected MPA Key received\n");
 955                        return -1;
 956                }
 957        }
 958
 959        if (priv_data_len + mpa_hdr_len > len) {
 960                i40iw_pr_err("ietf buffer len(%x + %x != %x)\n",
 961                             priv_data_len, mpa_hdr_len, len);
 962                return -1;
 963        }
 964        if (len > MAX_CM_BUFFER) {
 965                i40iw_pr_err("ietf buffer large len = %d\n", len);
 966                return -1;
 967        }
 968
 969        switch (mpa_frame->rev) {
 970        case IETF_MPA_V2:{
 971                        u16 ird_size;
 972                        u16 ord_size;
 973                        u16 ctrl_ord;
 974                        u16 ctrl_ird;
 975
 976                        mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
 977                        mpa_hdr_len += IETF_RTR_MSG_SIZE;
 978                        rtr_msg = &mpa_v2_frame->rtr_msg;
 979
 980                        /* parse rtr message */
 981                        ctrl_ord = ntohs(rtr_msg->ctrl_ord);
 982                        ctrl_ird = ntohs(rtr_msg->ctrl_ird);
 983                        ird_size = ctrl_ird & IETF_NO_IRD_ORD;
 984                        ord_size = ctrl_ord & IETF_NO_IRD_ORD;
 985
 986                        if (!(ctrl_ird & IETF_PEER_TO_PEER))
 987                                return -1;
 988
 989                        if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
 990                                cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
 991                                goto negotiate_done;
 992                        }
 993
 994                        if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
 995                                /* responder */
 996                                if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
 997                                        cm_node->ird_size = 1;
 998                                if (cm_node->ord_size > ird_size)
 999                                        cm_node->ord_size = ird_size;
1000                        } else {
1001                                /* initiator */
1002                                if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
1003                                        return -1;
1004                                if (cm_node->ord_size > ird_size)
1005                                        cm_node->ord_size = ird_size;
1006
1007                                if (cm_node->ird_size < ord_size)
1008                                        /* no resources available */
1009                                        return -1;
1010                        }
1011
1012negotiate_done:
1013                        if (ctrl_ord & IETF_RDMA0_READ)
1014                                cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
1015                        else if (ctrl_ord & IETF_RDMA0_WRITE)
1016                                cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
1017                        else    /* Not supported RDMA0 operation */
1018                                return -1;
1019                        i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
1020                                    "MPAV2: Negotiated ORD: %d, IRD: %d\n",
1021                                    cm_node->ord_size, cm_node->ird_size);
1022                        break;
1023                }
1024                break;
1025        case IETF_MPA_V1:
1026        default:
1027                break;
1028        }
1029
1030        memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);
1031        cm_node->pdata.size = priv_data_len;
1032
1033        if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
1034                *type = I40IW_MPA_REQUEST_REJECT;
1035
1036        if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
1037                cm_node->snd_mark_en = true;
1038
1039        return 0;
1040}
1041
1042/**
1043 * i40iw_schedule_cm_timer
1044 * @@cm_node: connection's node
1045 * @sqbuf: buffer to send
1046 * @type: if it es send ot close
1047 * @send_retrans: if rexmits to be done
1048 * @close_when_complete: is cm_node to be removed
1049 *
1050 * note - cm_node needs to be protected before calling this. Encase in:
1051 *              i40iw_rem_ref_cm_node(cm_core, cm_node);
1052 *              i40iw_schedule_cm_timer(...)
1053 *              atomic_inc(&cm_node->ref_count);
1054 */
1055int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
1056                            struct i40iw_puda_buf *sqbuf,
1057                            enum i40iw_timer_type type,
1058                            int send_retrans,
1059                            int close_when_complete)
1060{
1061        struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
1062        struct i40iw_cm_core *cm_core = cm_node->cm_core;
1063        struct i40iw_timer_entry *new_send;
1064        int ret = 0;
1065        u32 was_timer_set;
1066        unsigned long flags;
1067
1068        new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
1069        if (!new_send) {
1070                i40iw_free_sqbuf(vsi, (void *)sqbuf);
1071                return -ENOMEM;
1072        }
1073        new_send->retrycount = I40IW_DEFAULT_RETRYS;
1074        new_send->retranscount = I40IW_DEFAULT_RETRANS;
1075        new_send->sqbuf = sqbuf;
1076        new_send->timetosend = jiffies;
1077        new_send->type = type;
1078        new_send->send_retrans = send_retrans;
1079        new_send->close_when_complete = close_when_complete;
1080
1081        if (type == I40IW_TIMER_TYPE_CLOSE) {
1082                new_send->timetosend += (HZ / 10);
1083                if (cm_node->close_entry) {
1084                        kfree(new_send);
1085                        i40iw_free_sqbuf(vsi, (void *)sqbuf);
1086                        i40iw_pr_err("already close entry\n");
1087                        return -EINVAL;
1088                }
1089                cm_node->close_entry = new_send;
1090        }
1091
1092        if (type == I40IW_TIMER_TYPE_SEND) {
1093                spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1094                cm_node->send_entry = new_send;
1095                atomic_inc(&cm_node->ref_count);
1096                spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1097                new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
1098
1099                atomic_inc(&sqbuf->refcount);
1100                i40iw_puda_send_buf(vsi->ilq, sqbuf);
1101                if (!send_retrans) {
1102                        i40iw_cleanup_retrans_entry(cm_node);
1103                        if (close_when_complete)
1104                                i40iw_rem_ref_cm_node(cm_node);
1105                        return ret;
1106                }
1107        }
1108
1109        spin_lock_irqsave(&cm_core->ht_lock, flags);
1110        was_timer_set = timer_pending(&cm_core->tcp_timer);
1111
1112        if (!was_timer_set) {
1113                cm_core->tcp_timer.expires = new_send->timetosend;
1114                add_timer(&cm_core->tcp_timer);
1115        }
1116        spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1117
1118        return ret;
1119}
1120
1121/**
1122 * i40iw_retrans_expired - Could not rexmit the packet
1123 * @cm_node: connection's node
1124 */
1125static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)
1126{
1127        struct iw_cm_id *cm_id = cm_node->cm_id;
1128        enum i40iw_cm_node_state state = cm_node->state;
1129
1130        cm_node->state = I40IW_CM_STATE_CLOSED;
1131        switch (state) {
1132        case I40IW_CM_STATE_SYN_RCVD:
1133        case I40IW_CM_STATE_CLOSING:
1134                i40iw_rem_ref_cm_node(cm_node);
1135                break;
1136        case I40IW_CM_STATE_FIN_WAIT1:
1137        case I40IW_CM_STATE_LAST_ACK:
1138                if (cm_node->cm_id)
1139                        cm_id->rem_ref(cm_id);
1140                i40iw_send_reset(cm_node);
1141                break;
1142        default:
1143                atomic_inc(&cm_node->ref_count);
1144                i40iw_send_reset(cm_node);
1145                i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
1146                break;
1147        }
1148}
1149
1150/**
1151 * i40iw_handle_close_entry - for handling retry/timeouts
1152 * @cm_node: connection's node
1153 * @rem_node: flag for remove cm_node
1154 */
1155static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)
1156{
1157        struct i40iw_timer_entry *close_entry = cm_node->close_entry;
1158        struct iw_cm_id *cm_id = cm_node->cm_id;
1159        struct i40iw_qp *iwqp;
1160        unsigned long flags;
1161
1162        if (!close_entry)
1163                return;
1164        iwqp = (struct i40iw_qp *)close_entry->sqbuf;
1165        if (iwqp) {
1166                spin_lock_irqsave(&iwqp->lock, flags);
1167                if (iwqp->cm_id) {
1168                        iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1169                        iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;
1170                        iwqp->last_aeq = I40IW_AE_RESET_SENT;
1171                        iwqp->ibqp_state = IB_QPS_ERR;
1172                        spin_unlock_irqrestore(&iwqp->lock, flags);
1173                        i40iw_cm_disconn(iwqp);
1174                } else {
1175                        spin_unlock_irqrestore(&iwqp->lock, flags);
1176                }
1177        } else if (rem_node) {
1178                /* TIME_WAIT state */
1179                i40iw_rem_ref_cm_node(cm_node);
1180        }
1181        if (cm_id)
1182                cm_id->rem_ref(cm_id);
1183        kfree(close_entry);
1184        cm_node->close_entry = NULL;
1185}
1186
1187/**
1188 * i40iw_cm_timer_tick - system's timer expired callback
1189 * @pass: Pointing to cm_core
1190 */
1191static void i40iw_cm_timer_tick(unsigned long pass)
1192{
1193        unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
1194        struct i40iw_cm_node *cm_node;
1195        struct i40iw_timer_entry *send_entry, *close_entry;
1196        struct list_head *list_core_temp;
1197        struct i40iw_sc_vsi *vsi;
1198        struct list_head *list_node;
1199        struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
1200        u32 settimer = 0;
1201        unsigned long timetosend;
1202        struct i40iw_sc_dev *dev;
1203        unsigned long flags;
1204
1205        struct list_head timer_list;
1206
1207        INIT_LIST_HEAD(&timer_list);
1208        spin_lock_irqsave(&cm_core->ht_lock, flags);
1209
1210        list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
1211                cm_node = container_of(list_node, struct i40iw_cm_node, list);
1212                if (cm_node->close_entry || cm_node->send_entry) {
1213                        atomic_inc(&cm_node->ref_count);
1214                        list_add(&cm_node->timer_entry, &timer_list);
1215                }
1216        }
1217        spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1218
1219        list_for_each_safe(list_node, list_core_temp, &timer_list) {
1220                cm_node = container_of(list_node,
1221                                       struct i40iw_cm_node,
1222                                       timer_entry);
1223                close_entry = cm_node->close_entry;
1224
1225                if (close_entry) {
1226                        if (time_after(close_entry->timetosend, jiffies)) {
1227                                if (nexttimeout > close_entry->timetosend ||
1228                                    !settimer) {
1229                                        nexttimeout = close_entry->timetosend;
1230                                        settimer = 1;
1231                                }
1232                        } else {
1233                                i40iw_handle_close_entry(cm_node, 1);
1234                        }
1235                }
1236
1237                spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1238
1239                send_entry = cm_node->send_entry;
1240                if (!send_entry)
1241                        goto done;
1242                if (time_after(send_entry->timetosend, jiffies)) {
1243                        if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
1244                                if ((nexttimeout > send_entry->timetosend) ||
1245                                    !settimer) {
1246                                        nexttimeout = send_entry->timetosend;
1247                                        settimer = 1;
1248                                }
1249                        } else {
1250                                i40iw_free_retrans_entry(cm_node);
1251                        }
1252                        goto done;
1253                }
1254
1255                if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||
1256                    (cm_node->state == I40IW_CM_STATE_CLOSED)) {
1257                        i40iw_free_retrans_entry(cm_node);
1258                        goto done;
1259                }
1260
1261                if (!send_entry->retranscount || !send_entry->retrycount) {
1262                        i40iw_free_retrans_entry(cm_node);
1263
1264                        spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1265                        i40iw_retrans_expired(cm_node);
1266                        cm_node->state = I40IW_CM_STATE_CLOSED;
1267                        spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1268                        goto done;
1269                }
1270                cm_node->cm_core->stats_pkt_retrans++;
1271                spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1272
1273                vsi = &cm_node->iwdev->vsi;
1274                dev = cm_node->dev;
1275                atomic_inc(&send_entry->sqbuf->refcount);
1276                i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
1277                spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1278                if (send_entry->send_retrans) {
1279                        send_entry->retranscount--;
1280                        timetosend = (I40IW_RETRY_TIMEOUT <<
1281                                      (I40IW_DEFAULT_RETRANS -
1282                                       send_entry->retranscount));
1283
1284                        send_entry->timetosend = jiffies +
1285                            min(timetosend, I40IW_MAX_TIMEOUT);
1286                        if (nexttimeout > send_entry->timetosend || !settimer) {
1287                                nexttimeout = send_entry->timetosend;
1288                                settimer = 1;
1289                        }
1290                } else {
1291                        int close_when_complete;
1292
1293                        close_when_complete = send_entry->close_when_complete;
1294                        i40iw_debug(cm_node->dev,
1295                                    I40IW_DEBUG_CM,
1296                                    "cm_node=%p state=%d\n",
1297                                    cm_node,
1298                                    cm_node->state);
1299                        i40iw_free_retrans_entry(cm_node);
1300                        if (close_when_complete)
1301                                i40iw_rem_ref_cm_node(cm_node);
1302                }
1303done:
1304                spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1305                i40iw_rem_ref_cm_node(cm_node);
1306        }
1307
1308        if (settimer) {
1309                spin_lock_irqsave(&cm_core->ht_lock, flags);
1310                if (!timer_pending(&cm_core->tcp_timer)) {
1311                        cm_core->tcp_timer.expires = nexttimeout;
1312                        add_timer(&cm_core->tcp_timer);
1313                }
1314                spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1315        }
1316}
1317
1318/**
1319 * i40iw_send_syn - send SYN packet
1320 * @cm_node: connection's node
1321 * @sendack: flag to set ACK bit or not
1322 */
1323int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
1324{
1325        struct i40iw_puda_buf *sqbuf;
1326        int flags = SET_SYN;
1327        char optionsbuffer[sizeof(struct option_mss) +
1328                           sizeof(struct option_windowscale) +
1329                           sizeof(struct option_base) + TCP_OPTIONS_PADDING];
1330        struct i40iw_kmem_info opts;
1331
1332        int optionssize = 0;
1333        /* Sending MSS option */
1334        union all_known_options *options;
1335
1336        opts.addr = optionsbuffer;
1337        if (!cm_node) {
1338                i40iw_pr_err("no cm_node\n");
1339                return -EINVAL;
1340        }
1341
1342        options = (union all_known_options *)&optionsbuffer[optionssize];
1343        options->as_mss.optionnum = OPTION_NUMBER_MSS;
1344        options->as_mss.length = sizeof(struct option_mss);
1345        options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
1346        optionssize += sizeof(struct option_mss);
1347
1348        options = (union all_known_options *)&optionsbuffer[optionssize];
1349        options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
1350        options->as_windowscale.length = sizeof(struct option_windowscale);
1351        options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
1352        optionssize += sizeof(struct option_windowscale);
1353        options = (union all_known_options *)&optionsbuffer[optionssize];
1354        options->as_end = OPTION_NUMBER_END;
1355        optionssize += 1;
1356
1357        if (sendack)
1358                flags |= SET_ACK;
1359
1360        opts.size = optionssize;
1361
1362        sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);
1363        if (!sqbuf) {
1364                i40iw_pr_err("no sqbuf\n");
1365                return -1;
1366        }
1367        return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
1368}
1369
1370/**
1371 * i40iw_send_ack - Send ACK packet
1372 * @cm_node: connection's node
1373 */
1374static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
1375{
1376        struct i40iw_puda_buf *sqbuf;
1377        struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
1378
1379        sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
1380        if (sqbuf)
1381                i40iw_puda_send_buf(vsi->ilq, sqbuf);
1382        else
1383                i40iw_pr_err("no sqbuf\n");
1384}
1385
1386/**
1387 * i40iw_send_fin - Send FIN pkt
1388 * @cm_node: connection's node
1389 */
1390static int i40iw_send_fin(struct i40iw_cm_node *cm_node)
1391{
1392        struct i40iw_puda_buf *sqbuf;
1393
1394        sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);
1395        if (!sqbuf) {
1396                i40iw_pr_err("no sqbuf\n");
1397                return -1;
1398        }
1399        return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
1400}
1401
1402/**
1403 * i40iw_find_node - find a cm node that matches the reference cm node
1404 * @cm_core: cm's core
1405 * @rem_port: remote tcp port num
1406 * @rem_addr: remote ip addr
1407 * @loc_port: local tcp port num
1408 * @loc_addr: loc ip addr
1409 * @add_refcnt: flag to increment refcount of cm_node
1410 */
1411struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
1412                                      u16 rem_port,
1413                                      u32 *rem_addr,
1414                                      u16 loc_port,
1415                                      u32 *loc_addr,
1416                                      bool add_refcnt)
1417{
1418        struct list_head *hte;
1419        struct i40iw_cm_node *cm_node;
1420        unsigned long flags;
1421
1422        hte = &cm_core->connected_nodes;
1423
1424        /* walk list and find cm_node associated with this session ID */
1425        spin_lock_irqsave(&cm_core->ht_lock, flags);
1426        list_for_each_entry(cm_node, hte, list) {
1427                if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
1428                    (cm_node->loc_port == loc_port) &&
1429                    !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&
1430                    (cm_node->rem_port == rem_port)) {
1431                        if (add_refcnt)
1432                                atomic_inc(&cm_node->ref_count);
1433                        spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1434                        return cm_node;
1435                }
1436        }
1437        spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1438
1439        /* no owner node */
1440        return NULL;
1441}
1442
1443/**
1444 * i40iw_find_listener - find a cm node listening on this addr-port pair
1445 * @cm_core: cm's core
1446 * @dst_port: listener tcp port num
1447 * @dst_addr: listener ip addr
1448 * @listener_state: state to match with listen node's
1449 */
1450static struct i40iw_cm_listener *i40iw_find_listener(
1451                                                     struct i40iw_cm_core *cm_core,
1452                                                     u32 *dst_addr,
1453                                                     u16 dst_port,
1454                                                     u16 vlan_id,
1455                                                     enum i40iw_cm_listener_state
1456                                                     listener_state)
1457{
1458        struct i40iw_cm_listener *listen_node;
1459        static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1460        u32 listen_addr[4];
1461        u16 listen_port;
1462        unsigned long flags;
1463
1464        /* walk list and find cm_node associated with this session ID */
1465        spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1466        list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
1467                memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
1468                listen_port = listen_node->loc_port;
1469                /* compare node pair, return node handle if a match */
1470                if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
1471                     !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
1472                     (listen_port == dst_port) &&
1473                     (listener_state & listen_node->listener_state)) {
1474                        atomic_inc(&listen_node->ref_count);
1475                        spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1476                        return listen_node;
1477                }
1478        }
1479        spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1480        return NULL;
1481}
1482
1483/**
1484 * i40iw_add_hte_node - add a cm node to the hash table
1485 * @cm_core: cm's core
1486 * @cm_node: connection's node
1487 */
1488static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
1489                               struct i40iw_cm_node *cm_node)
1490{
1491        struct list_head *hte;
1492        unsigned long flags;
1493
1494        if (!cm_node || !cm_core) {
1495                i40iw_pr_err("cm_node or cm_core == NULL\n");
1496                return;
1497        }
1498        spin_lock_irqsave(&cm_core->ht_lock, flags);
1499
1500        /* get a handle on the hash table element (list head for this slot) */
1501        hte = &cm_core->connected_nodes;
1502        list_add_tail(&cm_node->list, hte);
1503        spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1504}
1505
1506/**
1507 * listen_port_in_use - determine if port is in use
1508 * @port: Listen port number
1509 */
1510static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
1511{
1512        struct i40iw_cm_listener *listen_node;
1513        unsigned long flags;
1514        bool ret = false;
1515
1516        spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1517        list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
1518                if (listen_node->loc_port == port) {
1519                        ret = true;
1520                        break;
1521                }
1522        }
1523        spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1524        return ret;
1525}
1526
1527/**
1528 * i40iw_del_multiple_qhash - Remove qhash and child listens
1529 * @iwdev: iWarp device
1530 * @cm_info: CM info for parent listen node
1531 * @cm_parent_listen_node: The parent listen node
1532 */
1533static enum i40iw_status_code i40iw_del_multiple_qhash(
1534                                                       struct i40iw_device *iwdev,
1535                                                       struct i40iw_cm_info *cm_info,
1536                                                       struct i40iw_cm_listener *cm_parent_listen_node)
1537{
1538        struct i40iw_cm_listener *child_listen_node;
1539        enum i40iw_status_code ret = I40IW_ERR_CONFIG;
1540        struct list_head *pos, *tpos;
1541        unsigned long flags;
1542
1543        spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
1544        list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {
1545                child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);
1546                if (child_listen_node->ipv4)
1547                        i40iw_debug(&iwdev->sc_dev,
1548                                    I40IW_DEBUG_CM,
1549                                    "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
1550                                    child_listen_node->loc_addr,
1551                                    child_listen_node->loc_port,
1552                                    child_listen_node->vlan_id);
1553                else
1554                        i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1555                                    "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
1556                                    child_listen_node->loc_addr,
1557                                    child_listen_node->loc_port,
1558                                    child_listen_node->vlan_id);
1559                list_del(pos);
1560                memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
1561                       sizeof(cm_info->loc_addr));
1562                cm_info->vlan_id = child_listen_node->vlan_id;
1563                if (child_listen_node->qhash_set) {
1564                        ret = i40iw_manage_qhash(iwdev, cm_info,
1565                                                 I40IW_QHASH_TYPE_TCP_SYN,
1566                                                 I40IW_QHASH_MANAGE_TYPE_DELETE,
1567                                                 NULL, false);
1568                        child_listen_node->qhash_set = false;
1569                } else {
1570                        ret = I40IW_SUCCESS;
1571                }
1572                i40iw_debug(&iwdev->sc_dev,
1573                            I40IW_DEBUG_CM,
1574                            "freed pointer = %p\n",
1575                            child_listen_node);
1576                kfree(child_listen_node);
1577                cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
1578        }
1579        spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
1580
1581        return ret;
1582}
1583
1584/**
1585 * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac
1586 * @addr: local IPv6 address
1587 * @vlan_id: vlan id for the given IPv6 address
1588 * @mac: mac address for the given IPv6 address
1589 *
1590 * Returns the net_device of the IPv6 address and also sets the
1591 * vlan id and mac for that address.
1592 */
1593static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
1594{
1595        struct net_device *ip_dev = NULL;
1596        struct in6_addr laddr6;
1597
1598        if (!IS_ENABLED(CONFIG_IPV6))
1599                return NULL;
1600        i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
1601        if (vlan_id)
1602                *vlan_id = I40IW_NO_VLAN;
1603        if (mac)
1604                eth_zero_addr(mac);
1605        rcu_read_lock();
1606        for_each_netdev_rcu(&init_net, ip_dev) {
1607                if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
1608                        if (vlan_id)
1609                                *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
1610                        if (ip_dev->dev_addr && mac)
1611                                ether_addr_copy(mac, ip_dev->dev_addr);
1612                        break;
1613                }
1614        }
1615        rcu_read_unlock();
1616        return ip_dev;
1617}
1618
1619/**
1620 * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
1621 * @addr: local IPv4 address
1622 */
1623static u16 i40iw_get_vlan_ipv4(u32 *addr)
1624{
1625        struct net_device *netdev;
1626        u16 vlan_id = I40IW_NO_VLAN;
1627
1628        netdev = ip_dev_find(&init_net, htonl(addr[0]));
1629        if (netdev) {
1630                vlan_id = rdma_vlan_dev_vlan_id(netdev);
1631                dev_put(netdev);
1632        }
1633        return vlan_id;
1634}
1635
1636/**
1637 * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6
1638 * @iwdev: iWarp device
1639 * @cm_info: CM info for parent listen node
1640 * @cm_parent_listen_node: The parent listen node
1641 *
1642 * Adds a qhash and a child listen node for every IPv6 address
1643 * on the adapter and adds the associated qhash filter
1644 */
1645static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
1646                                              struct i40iw_cm_info *cm_info,
1647                                              struct i40iw_cm_listener *cm_parent_listen_node)
1648{
1649        struct net_device *ip_dev;
1650        struct inet6_dev *idev;
1651        struct inet6_ifaddr *ifp, *tmp;
1652        enum i40iw_status_code ret = 0;
1653        struct i40iw_cm_listener *child_listen_node;
1654        unsigned long flags;
1655
1656        rtnl_lock();
1657        for_each_netdev_rcu(&init_net, ip_dev) {
1658                if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
1659                      (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
1660                     (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
1661                        idev = __in6_dev_get(ip_dev);
1662                        if (!idev) {
1663                                i40iw_pr_err("idev == NULL\n");
1664                                break;
1665                        }
1666                        list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
1667                                i40iw_debug(&iwdev->sc_dev,
1668                                            I40IW_DEBUG_CM,
1669                                            "IP=%pI6, vlan_id=%d, MAC=%pM\n",
1670                                            &ifp->addr,
1671                                            rdma_vlan_dev_vlan_id(ip_dev),
1672                                            ip_dev->dev_addr);
1673                                child_listen_node =
1674                                        kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
1675                                i40iw_debug(&iwdev->sc_dev,
1676                                            I40IW_DEBUG_CM,
1677                                            "Allocating child listener %p\n",
1678                                            child_listen_node);
1679                                if (!child_listen_node) {
1680                                        ret = I40IW_ERR_NO_MEMORY;
1681                                        goto exit;
1682                                }
1683                                cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
1684                                cm_parent_listen_node->vlan_id = cm_info->vlan_id;
1685
1686                                memcpy(child_listen_node, cm_parent_listen_node,
1687                                       sizeof(*child_listen_node));
1688
1689                                i40iw_copy_ip_ntohl(child_listen_node->loc_addr,
1690                                                    ifp->addr.in6_u.u6_addr32);
1691                                memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
1692                                       sizeof(cm_info->loc_addr));
1693
1694                                ret = i40iw_manage_qhash(iwdev, cm_info,
1695                                                         I40IW_QHASH_TYPE_TCP_SYN,
1696                                                         I40IW_QHASH_MANAGE_TYPE_ADD,
1697                                                         NULL, true);
1698                                if (!ret) {
1699                                        child_listen_node->qhash_set = true;
1700                                        spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
1701                                        list_add(&child_listen_node->child_listen_list,
1702                                                 &cm_parent_listen_node->child_listen_list);
1703                                        spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
1704                                        cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
1705                                } else {
1706                                        kfree(child_listen_node);
1707                                }
1708                        }
1709                }
1710        }
1711exit:
1712        rtnl_unlock();
1713        return ret;
1714}
1715
1716/**
1717 * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4
1718 * @iwdev: iWarp device
1719 * @cm_info: CM info for parent listen node
1720 * @cm_parent_listen_node: The parent listen node
1721 *
1722 * Adds a qhash and a child listen node for every IPv4 address
1723 * on the adapter and adds the associated qhash filter
1724 */
1725static enum i40iw_status_code i40iw_add_mqh_4(
1726                                struct i40iw_device *iwdev,
1727                                struct i40iw_cm_info *cm_info,
1728                                struct i40iw_cm_listener *cm_parent_listen_node)
1729{
1730        struct net_device *dev;
1731        struct in_device *idev;
1732        struct i40iw_cm_listener *child_listen_node;
1733        enum i40iw_status_code ret = 0;
1734        unsigned long flags;
1735
1736        rtnl_lock();
1737        for_each_netdev(&init_net, dev) {
1738                if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
1739                      (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
1740                    (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
1741                        idev = in_dev_get(dev);
1742                        for_ifa(idev) {
1743                                i40iw_debug(&iwdev->sc_dev,
1744                                            I40IW_DEBUG_CM,
1745                                            "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
1746                                            &ifa->ifa_address,
1747                                            rdma_vlan_dev_vlan_id(dev),
1748                                            dev->dev_addr);
1749                                child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
1750                                cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
1751                                i40iw_debug(&iwdev->sc_dev,
1752                                            I40IW_DEBUG_CM,
1753                                            "Allocating child listener %p\n",
1754                                            child_listen_node);
1755                                if (!child_listen_node) {
1756                                        in_dev_put(idev);
1757                                        ret = I40IW_ERR_NO_MEMORY;
1758                                        goto exit;
1759                                }
1760                                cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);
1761                                cm_parent_listen_node->vlan_id = cm_info->vlan_id;
1762                                memcpy(child_listen_node,
1763                                       cm_parent_listen_node,
1764                                       sizeof(*child_listen_node));
1765
1766                                child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);
1767                                memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
1768                                       sizeof(cm_info->loc_addr));
1769
1770                                ret = i40iw_manage_qhash(iwdev,
1771                                                         cm_info,
1772                                                         I40IW_QHASH_TYPE_TCP_SYN,
1773                                                         I40IW_QHASH_MANAGE_TYPE_ADD,
1774                                                         NULL,
1775                                                         true);
1776                                if (!ret) {
1777                                        child_listen_node->qhash_set = true;
1778                                        spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
1779                                        list_add(&child_listen_node->child_listen_list,
1780                                                 &cm_parent_listen_node->child_listen_list);
1781                                        spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
1782                                } else {
1783                                        kfree(child_listen_node);
1784                                        cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
1785                                }
1786                        }
1787                        endfor_ifa(idev);
1788                        in_dev_put(idev);
1789                }
1790        }
1791exit:
1792        rtnl_unlock();
1793        return ret;
1794}
1795
1796/**
1797 * i40iw_dec_refcnt_listen - delete listener and associated cm nodes
1798 * @cm_core: cm's core
1799 * @free_hanging_nodes: to free associated cm_nodes
1800 * @apbvt_del: flag to delete the apbvt
1801 */
1802static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
1803                                   struct i40iw_cm_listener *listener,
1804                                   int free_hanging_nodes, bool apbvt_del)
1805{
1806        int ret = -EINVAL;
1807        int err = 0;
1808        struct list_head *list_pos;
1809        struct list_head *list_temp;
1810        struct i40iw_cm_node *cm_node;
1811        struct list_head reset_list;
1812        struct i40iw_cm_info nfo;
1813        struct i40iw_cm_node *loopback;
1814        enum i40iw_cm_node_state old_state;
1815        unsigned long flags;
1816
1817        /* free non-accelerated child nodes for this listener */
1818        INIT_LIST_HEAD(&reset_list);
1819        if (free_hanging_nodes) {
1820                spin_lock_irqsave(&cm_core->ht_lock, flags);
1821                list_for_each_safe(list_pos, list_temp, &cm_core->connected_nodes) {
1822                        cm_node = container_of(list_pos, struct i40iw_cm_node, list);
1823                        if ((cm_node->listener == listener) && !cm_node->accelerated) {
1824                                atomic_inc(&cm_node->ref_count);
1825                                list_add(&cm_node->reset_entry, &reset_list);
1826                        }
1827                }
1828                spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1829        }
1830
1831        list_for_each_safe(list_pos, list_temp, &reset_list) {
1832                cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);
1833                loopback = cm_node->loopbackpartner;
1834                if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {
1835                        i40iw_rem_ref_cm_node(cm_node);
1836                } else {
1837                        if (!loopback) {
1838                                i40iw_cleanup_retrans_entry(cm_node);
1839                                err = i40iw_send_reset(cm_node);
1840                                if (err) {
1841                                        cm_node->state = I40IW_CM_STATE_CLOSED;
1842                                        i40iw_pr_err("send reset\n");
1843                                } else {
1844                                        old_state = cm_node->state;
1845                                        cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
1846                                        if (old_state != I40IW_CM_STATE_MPAREQ_RCVD)
1847                                                i40iw_rem_ref_cm_node(cm_node);
1848                                }
1849                        } else {
1850                                struct i40iw_cm_event event;
1851
1852                                event.cm_node = loopback;
1853                                memcpy(event.cm_info.rem_addr,
1854                                       loopback->rem_addr, sizeof(event.cm_info.rem_addr));
1855                                memcpy(event.cm_info.loc_addr,
1856                                       loopback->loc_addr, sizeof(event.cm_info.loc_addr));
1857                                event.cm_info.rem_port = loopback->rem_port;
1858                                event.cm_info.loc_port = loopback->loc_port;
1859                                event.cm_info.cm_id = loopback->cm_id;
1860                                event.cm_info.ipv4 = loopback->ipv4;
1861                                atomic_inc(&loopback->ref_count);
1862                                loopback->state = I40IW_CM_STATE_CLOSED;
1863                                i40iw_event_connect_error(&event);
1864                                cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
1865                                i40iw_rem_ref_cm_node(cm_node);
1866                        }
1867                }
1868        }
1869
1870        if (!atomic_dec_return(&listener->ref_count)) {
1871                spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1872                list_del(&listener->list);
1873                spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1874
1875                if (listener->iwdev) {
1876                        if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port))
1877                                i40iw_manage_apbvt(listener->iwdev,
1878                                                   listener->loc_port,
1879                                                   I40IW_MANAGE_APBVT_DEL);
1880
1881                        memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
1882                        nfo.loc_port = listener->loc_port;
1883                        nfo.ipv4 = listener->ipv4;
1884                        nfo.vlan_id = listener->vlan_id;
1885                        nfo.user_pri = listener->user_pri;
1886
1887                        if (!list_empty(&listener->child_listen_list)) {
1888                                i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
1889                        } else {
1890                                if (listener->qhash_set)
1891                                        i40iw_manage_qhash(listener->iwdev,
1892                                                           &nfo,
1893                                                           I40IW_QHASH_TYPE_TCP_SYN,
1894                                                           I40IW_QHASH_MANAGE_TYPE_DELETE,
1895                                                           NULL,
1896                                                           false);
1897                        }
1898                }
1899
1900                cm_core->stats_listen_destroyed++;
1901                kfree(listener);
1902                cm_core->stats_listen_nodes_destroyed++;
1903                listener = NULL;
1904                ret = 0;
1905        }
1906
1907        if (listener) {
1908                if (atomic_read(&listener->pend_accepts_cnt) > 0)
1909                        i40iw_debug(cm_core->dev,
1910                                    I40IW_DEBUG_CM,
1911                                    "%s: listener (%p) pending accepts=%u\n",
1912                                    __func__,
1913                                    listener,
1914                                    atomic_read(&listener->pend_accepts_cnt));
1915        }
1916
1917        return ret;
1918}
1919
1920/**
1921 * i40iw_cm_del_listen - delete a linstener
1922 * @cm_core: cm's core
1923  * @listener: passive connection's listener
1924 * @apbvt_del: flag to delete apbvt
1925 */
1926static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,
1927                               struct i40iw_cm_listener *listener,
1928                               bool apbvt_del)
1929{
1930        listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;
1931        listener->cm_id = NULL; /* going to be destroyed pretty soon */
1932        return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
1933}
1934
1935/**
1936 * i40iw_addr_resolve_neigh - resolve neighbor address
1937 * @iwdev: iwarp device structure
1938 * @src_ip: local ip address
1939 * @dst_ip: remote ip address
1940 * @arpindex: if there is an arp entry
1941 */
1942static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
1943                                    u32 src_ip,
1944                                    u32 dst_ip,
1945                                    int arpindex)
1946{
1947        struct rtable *rt;
1948        struct neighbour *neigh;
1949        int rc = arpindex;
1950        struct net_device *netdev = iwdev->netdev;
1951        __be32 dst_ipaddr = htonl(dst_ip);
1952        __be32 src_ipaddr = htonl(src_ip);
1953
1954        rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
1955        if (IS_ERR(rt)) {
1956                i40iw_pr_err("ip_route_output\n");
1957                return rc;
1958        }
1959
1960        if (netif_is_bond_slave(netdev))
1961                netdev = netdev_master_upper_dev_get(netdev);
1962
1963        neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
1964
1965        rcu_read_lock();
1966        if (neigh) {
1967                if (neigh->nud_state & NUD_VALID) {
1968                        if (arpindex >= 0) {
1969                                if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,
1970                                                     neigh->ha))
1971                                        /* Mac address same as arp table */
1972                                        goto resolve_neigh_exit;
1973                                i40iw_manage_arp_cache(iwdev,
1974                                                       iwdev->arp_table[arpindex].mac_addr,
1975                                                       &dst_ip,
1976                                                       true,
1977                                                       I40IW_ARP_DELETE);
1978                        }
1979
1980                        i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);
1981                        rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);
1982                } else {
1983                        neigh_event_send(neigh, NULL);
1984                }
1985        }
1986 resolve_neigh_exit:
1987
1988        rcu_read_unlock();
1989        if (neigh)
1990                neigh_release(neigh);
1991
1992        ip_rt_put(rt);
1993        return rc;
1994}
1995
1996/**
1997 * i40iw_get_dst_ipv6
1998 */
1999static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
2000                                            struct sockaddr_in6 *dst_addr)
2001{
2002        struct dst_entry *dst;
2003        struct flowi6 fl6;
2004
2005        memset(&fl6, 0, sizeof(fl6));
2006        fl6.daddr = dst_addr->sin6_addr;
2007        fl6.saddr = src_addr->sin6_addr;
2008        if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
2009                fl6.flowi6_oif = dst_addr->sin6_scope_id;
2010
2011        dst = ip6_route_output(&init_net, NULL, &fl6);
2012        return dst;
2013}
2014
2015/**
2016 * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
2017 * @iwdev: iwarp device structure
2018 * @dst_ip: remote ip address
2019 * @arpindex: if there is an arp entry
2020 */
2021static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2022                                         u32 *src,
2023                                         u32 *dest,
2024                                         int arpindex)
2025{
2026        struct neighbour *neigh;
2027        int rc = arpindex;
2028        struct net_device *netdev = iwdev->netdev;
2029        struct dst_entry *dst;
2030        struct sockaddr_in6 dst_addr;
2031        struct sockaddr_in6 src_addr;
2032
2033        memset(&dst_addr, 0, sizeof(dst_addr));
2034        dst_addr.sin6_family = AF_INET6;
2035        i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
2036        memset(&src_addr, 0, sizeof(src_addr));
2037        src_addr.sin6_family = AF_INET6;
2038        i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
2039        dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
2040        if (!dst || dst->error) {
2041                if (dst) {
2042                        dst_release(dst);
2043                        i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
2044                                     dst->error);
2045                }
2046                return rc;
2047        }
2048
2049        if (netif_is_bond_slave(netdev))
2050                netdev = netdev_master_upper_dev_get(netdev);
2051
2052        neigh = dst_neigh_lookup(dst, &dst_addr);
2053
2054        rcu_read_lock();
2055        if (neigh) {
2056                i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha);
2057                if (neigh->nud_state & NUD_VALID) {
2058                        if (arpindex >= 0) {
2059                                if (ether_addr_equal
2060                                    (iwdev->arp_table[arpindex].mac_addr,
2061                                     neigh->ha)) {
2062                                        /* Mac address same as in arp table */
2063                                        goto resolve_neigh_exit6;
2064                                }
2065                                i40iw_manage_arp_cache(iwdev,
2066                                                       iwdev->arp_table[arpindex].mac_addr,
2067                                                       dest,
2068                                                       false,
2069                                                       I40IW_ARP_DELETE);
2070                        }
2071                        i40iw_manage_arp_cache(iwdev,
2072                                               neigh->ha,
2073                                               dest,
2074                                               false,
2075                                               I40IW_ARP_ADD);
2076                        rc = i40iw_arp_table(iwdev,
2077                                             dest,
2078                                             false,
2079                                             NULL,
2080                                             I40IW_ARP_RESOLVE);
2081                } else {
2082                        neigh_event_send(neigh, NULL);
2083                }
2084        }
2085
2086 resolve_neigh_exit6:
2087        rcu_read_unlock();
2088        if (neigh)
2089                neigh_release(neigh);
2090        dst_release(dst);
2091        return rc;
2092}
2093
2094/**
2095 * i40iw_ipv4_is_loopback - check if loopback
2096 * @loc_addr: local addr to compare
2097 * @rem_addr: remote address
2098 */
2099static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)
2100{
2101        return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
2102}
2103
2104/**
2105 * i40iw_ipv6_is_loopback - check if loopback
2106 * @loc_addr: local addr to compare
2107 * @rem_addr: remote address
2108 */
2109static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
2110{
2111        struct in6_addr raddr6;
2112
2113        i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
2114        return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
2115}
2116
2117/**
2118 * i40iw_make_cm_node - create a new instance of a cm node
2119 * @cm_core: cm's core
2120 * @iwdev: iwarp device structure
2121 * @cm_info: quad info for connection
2122 * @listener: passive connection's listener
2123 */
2124static struct i40iw_cm_node *i40iw_make_cm_node(
2125                                   struct i40iw_cm_core *cm_core,
2126                                   struct i40iw_device *iwdev,
2127                                   struct i40iw_cm_info *cm_info,
2128                                   struct i40iw_cm_listener *listener)
2129{
2130        struct i40iw_cm_node *cm_node;
2131        struct timespec ts;
2132        int oldarpindex;
2133        int arpindex;
2134        struct net_device *netdev = iwdev->netdev;
2135
2136        /* create an hte and cm_node for this instance */
2137        cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
2138        if (!cm_node)
2139                return NULL;
2140
2141        /* set our node specific transport info */
2142        cm_node->ipv4 = cm_info->ipv4;
2143        cm_node->vlan_id = cm_info->vlan_id;
2144        if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
2145                cm_node->vlan_id = 0;
2146        cm_node->tos = cm_info->tos;
2147        cm_node->user_pri = cm_info->user_pri;
2148        if (listener) {
2149                if (listener->tos != cm_info->tos)
2150                        i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
2151                                    "application TOS[%d] and remote client TOS[%d] mismatch\n",
2152                                     listener->tos, cm_info->tos);
2153                cm_node->tos = max(listener->tos, cm_info->tos);
2154                cm_node->user_pri = rt_tos2priority(cm_node->tos);
2155                i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
2156                            cm_node->tos, cm_node->user_pri);
2157        }
2158        memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
2159        memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
2160        cm_node->loc_port = cm_info->loc_port;
2161        cm_node->rem_port = cm_info->rem_port;
2162
2163        cm_node->mpa_frame_rev = iwdev->mpa_version;
2164        cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
2165        cm_node->ird_size = I40IW_MAX_IRD_SIZE;
2166        cm_node->ord_size = I40IW_MAX_ORD_SIZE;
2167
2168        cm_node->listener = listener;
2169        cm_node->cm_id = cm_info->cm_id;
2170        ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
2171        spin_lock_init(&cm_node->retrans_list_lock);
2172
2173        atomic_set(&cm_node->ref_count, 1);
2174        /* associate our parent CM core */
2175        cm_node->cm_core = cm_core;
2176        cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;
2177        cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
2178        cm_node->tcp_cntxt.rcv_wnd =
2179                        I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
2180        ts = current_kernel_time();
2181        cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
2182        cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
2183
2184        cm_node->iwdev = iwdev;
2185        cm_node->dev = &iwdev->sc_dev;
2186
2187        if ((cm_node->ipv4 &&
2188             i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
2189             (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,
2190                                                       cm_node->rem_addr))) {
2191                arpindex = i40iw_arp_table(iwdev,
2192                                           cm_node->rem_addr,
2193                                           false,
2194                                           NULL,
2195                                           I40IW_ARP_RESOLVE);
2196        } else {
2197                oldarpindex = i40iw_arp_table(iwdev,
2198                                              cm_node->rem_addr,
2199                                              false,
2200                                              NULL,
2201                                              I40IW_ARP_RESOLVE);
2202                if (cm_node->ipv4)
2203                        arpindex = i40iw_addr_resolve_neigh(iwdev,
2204                                                            cm_info->loc_addr[0],
2205                                                            cm_info->rem_addr[0],
2206                                                            oldarpindex);
2207                else if (IS_ENABLED(CONFIG_IPV6))
2208                        arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
2209                                                                 cm_info->loc_addr,
2210                                                                 cm_info->rem_addr,
2211                                                                 oldarpindex);
2212                else
2213                        arpindex = -EINVAL;
2214        }
2215        if (arpindex < 0) {
2216                i40iw_pr_err("cm_node arpindex\n");
2217                kfree(cm_node);
2218                return NULL;
2219        }
2220        ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);
2221        i40iw_add_hte_node(cm_core, cm_node);
2222        cm_core->stats_nodes_created++;
2223        return cm_node;
2224}
2225
2226/**
2227 * i40iw_rem_ref_cm_node - destroy an instance of a cm node
2228 * @cm_node: connection's node
2229 */
2230static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
2231{
2232        struct i40iw_cm_core *cm_core = cm_node->cm_core;
2233        struct i40iw_qp *iwqp;
2234        struct i40iw_cm_info nfo;
2235        unsigned long flags;
2236
2237        spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
2238        if (atomic_dec_return(&cm_node->ref_count)) {
2239                spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
2240                return;
2241        }
2242        list_del(&cm_node->list);
2243        spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
2244
2245        /* if the node is destroyed before connection was accelerated */
2246        if (!cm_node->accelerated && cm_node->accept_pend) {
2247                pr_err("node destroyed before established\n");
2248                atomic_dec(&cm_node->listener->pend_accepts_cnt);
2249        }
2250        if (cm_node->close_entry)
2251                i40iw_handle_close_entry(cm_node, 0);
2252        if (cm_node->listener) {
2253                i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
2254        } else {
2255                if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
2256                    cm_node->apbvt_set) {
2257                        i40iw_manage_apbvt(cm_node->iwdev,
2258                                           cm_node->loc_port,
2259                                           I40IW_MANAGE_APBVT_DEL);
2260                        i40iw_get_addr_info(cm_node, &nfo);
2261                        if (cm_node->qhash_set) {
2262                                i40iw_manage_qhash(cm_node->iwdev,
2263                                                   &nfo,
2264                                                   I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2265                                                   I40IW_QHASH_MANAGE_TYPE_DELETE,
2266                                                   NULL,
2267                                                   false);
2268                                cm_node->qhash_set = 0;
2269                        }
2270                }
2271        }
2272
2273        iwqp = cm_node->iwqp;
2274        if (iwqp) {
2275                iwqp->cm_node = NULL;
2276                i40iw_rem_ref(&iwqp->ibqp);
2277                cm_node->iwqp = NULL;
2278        } else if (cm_node->qhash_set) {
2279                i40iw_get_addr_info(cm_node, &nfo);
2280                i40iw_manage_qhash(cm_node->iwdev,
2281                                   &nfo,
2282                                   I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2283                                   I40IW_QHASH_MANAGE_TYPE_DELETE,
2284                                   NULL,
2285                                   false);
2286                cm_node->qhash_set = 0;
2287        }
2288
2289        cm_node->cm_core->stats_nodes_destroyed++;
2290        kfree(cm_node);
2291}
2292
2293/**
2294 * i40iw_handle_fin_pkt - FIN packet received
2295 * @cm_node: connection's node
2296 */
2297static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)
2298{
2299        u32 ret;
2300
2301        switch (cm_node->state) {
2302        case I40IW_CM_STATE_SYN_RCVD:
2303        case I40IW_CM_STATE_SYN_SENT:
2304        case I40IW_CM_STATE_ESTABLISHED:
2305        case I40IW_CM_STATE_MPAREJ_RCVD:
2306                cm_node->tcp_cntxt.rcv_nxt++;
2307                i40iw_cleanup_retrans_entry(cm_node);
2308                cm_node->state = I40IW_CM_STATE_LAST_ACK;
2309                i40iw_send_fin(cm_node);
2310                break;
2311        case I40IW_CM_STATE_MPAREQ_SENT:
2312                i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
2313                cm_node->tcp_cntxt.rcv_nxt++;
2314                i40iw_cleanup_retrans_entry(cm_node);
2315                cm_node->state = I40IW_CM_STATE_CLOSED;
2316                atomic_inc(&cm_node->ref_count);
2317                i40iw_send_reset(cm_node);
2318                break;
2319        case I40IW_CM_STATE_FIN_WAIT1:
2320                cm_node->tcp_cntxt.rcv_nxt++;
2321                i40iw_cleanup_retrans_entry(cm_node);
2322                cm_node->state = I40IW_CM_STATE_CLOSING;
2323                i40iw_send_ack(cm_node);
2324                /*
2325                 * Wait for ACK as this is simultaneous close.
2326                 * After we receive ACK, do not send anything.
2327                 * Just rm the node.
2328                 */
2329                break;
2330        case I40IW_CM_STATE_FIN_WAIT2:
2331                cm_node->tcp_cntxt.rcv_nxt++;
2332                i40iw_cleanup_retrans_entry(cm_node);
2333                cm_node->state = I40IW_CM_STATE_TIME_WAIT;
2334                i40iw_send_ack(cm_node);
2335                ret =
2336                    i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);
2337                if (ret)
2338                        i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state);
2339                break;
2340        case I40IW_CM_STATE_TIME_WAIT:
2341                cm_node->tcp_cntxt.rcv_nxt++;
2342                i40iw_cleanup_retrans_entry(cm_node);
2343                cm_node->state = I40IW_CM_STATE_CLOSED;
2344                i40iw_rem_ref_cm_node(cm_node);
2345                break;
2346        case I40IW_CM_STATE_OFFLOADED:
2347        default:
2348                i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state);
2349                break;
2350        }
2351}
2352
2353/**
2354 * i40iw_handle_rst_pkt - process received RST packet
2355 * @cm_node: connection's node
2356 * @rbuf: receive buffer
2357 */
2358static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
2359                                 struct i40iw_puda_buf *rbuf)
2360{
2361        i40iw_cleanup_retrans_entry(cm_node);
2362        switch (cm_node->state) {
2363        case I40IW_CM_STATE_SYN_SENT:
2364        case I40IW_CM_STATE_MPAREQ_SENT:
2365                switch (cm_node->mpa_frame_rev) {
2366                case IETF_MPA_V2:
2367                        cm_node->mpa_frame_rev = IETF_MPA_V1;
2368                        /* send a syn and goto syn sent state */
2369                        cm_node->state = I40IW_CM_STATE_SYN_SENT;
2370                        if (i40iw_send_syn(cm_node, 0))
2371                                i40iw_active_open_err(cm_node, false);
2372                        break;
2373                case IETF_MPA_V1:
2374                default:
2375                        i40iw_active_open_err(cm_node, false);
2376                        break;
2377                }
2378                break;
2379        case I40IW_CM_STATE_MPAREQ_RCVD:
2380                atomic_add_return(1, &cm_node->passive_state);
2381                break;
2382        case I40IW_CM_STATE_ESTABLISHED:
2383        case I40IW_CM_STATE_SYN_RCVD:
2384        case I40IW_CM_STATE_LISTENING:
2385                i40iw_pr_err("Bad state state = %d\n", cm_node->state);
2386                i40iw_passive_open_err(cm_node, false);
2387                break;
2388        case I40IW_CM_STATE_OFFLOADED:
2389                i40iw_active_open_err(cm_node, false);
2390                break;
2391        case I40IW_CM_STATE_CLOSED:
2392                break;
2393        case I40IW_CM_STATE_FIN_WAIT2:
2394        case I40IW_CM_STATE_FIN_WAIT1:
2395        case I40IW_CM_STATE_LAST_ACK:
2396                cm_node->cm_id->rem_ref(cm_node->cm_id);
2397        case I40IW_CM_STATE_TIME_WAIT:
2398                cm_node->state = I40IW_CM_STATE_CLOSED;
2399                i40iw_rem_ref_cm_node(cm_node);
2400                break;
2401        default:
2402                break;
2403        }
2404}
2405
2406/**
2407 * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer
2408 * @cm_node: connection's node
2409 * @rbuf: receive buffer
2410 */
2411static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
2412                                 struct i40iw_puda_buf *rbuf)
2413{
2414        int ret;
2415        int datasize = rbuf->datalen;
2416        u8 *dataloc = rbuf->data;
2417
2418        enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;
2419        u32 res_type;
2420
2421        ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);
2422        if (ret) {
2423                if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)
2424                        i40iw_active_open_err(cm_node, true);
2425                else
2426                        i40iw_passive_open_err(cm_node, true);
2427                return;
2428        }
2429
2430        switch (cm_node->state) {
2431        case I40IW_CM_STATE_ESTABLISHED:
2432                if (res_type == I40IW_MPA_REQUEST_REJECT)
2433                        i40iw_pr_err("state for reject\n");
2434                cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;
2435                type = I40IW_CM_EVENT_MPA_REQ;
2436                i40iw_send_ack(cm_node);        /* ACK received MPA request */
2437                atomic_set(&cm_node->passive_state,
2438                           I40IW_PASSIVE_STATE_INDICATED);
2439                break;
2440        case I40IW_CM_STATE_MPAREQ_SENT:
2441                i40iw_cleanup_retrans_entry(cm_node);
2442                if (res_type == I40IW_MPA_REQUEST_REJECT) {
2443                        type = I40IW_CM_EVENT_MPA_REJECT;
2444                        cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;
2445                } else {
2446                        type = I40IW_CM_EVENT_CONNECTED;
2447                        cm_node->state = I40IW_CM_STATE_OFFLOADED;
2448                }
2449                i40iw_send_ack(cm_node);
2450                break;
2451        default:
2452                pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
2453                break;
2454        }
2455        i40iw_create_event(cm_node, type);
2456}
2457
2458/**
2459 * i40iw_indicate_pkt_err - Send up err event to cm
2460 * @cm_node: connection's node
2461 */
2462static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)
2463{
2464        switch (cm_node->state) {
2465        case I40IW_CM_STATE_SYN_SENT:
2466        case I40IW_CM_STATE_MPAREQ_SENT:
2467                i40iw_active_open_err(cm_node, true);
2468                break;
2469        case I40IW_CM_STATE_ESTABLISHED:
2470        case I40IW_CM_STATE_SYN_RCVD:
2471                i40iw_passive_open_err(cm_node, true);
2472                break;
2473        case I40IW_CM_STATE_OFFLOADED:
2474        default:
2475                break;
2476        }
2477}
2478
2479/**
2480 * i40iw_check_syn - Check for error on received syn ack
2481 * @cm_node: connection's node
2482 * @tcph: pointer tcp header
2483 */
2484static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
2485{
2486        int err = 0;
2487
2488        if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
2489                err = 1;
2490                i40iw_active_open_err(cm_node, true);
2491        }
2492        return err;
2493}
2494
2495/**
2496 * i40iw_check_seq - check seq numbers if OK
2497 * @cm_node: connection's node
2498 * @tcph: pointer tcp header
2499 */
2500static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
2501{
2502        int err = 0;
2503        u32 seq;
2504        u32 ack_seq;
2505        u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
2506        u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
2507        u32 rcv_wnd;
2508
2509        seq = ntohl(tcph->seq);
2510        ack_seq = ntohl(tcph->ack_seq);
2511        rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
2512        if (ack_seq != loc_seq_num)
2513                err = -1;
2514        else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
2515                err = -1;
2516        if (err) {
2517                i40iw_pr_err("seq number\n");
2518                i40iw_indicate_pkt_err(cm_node);
2519        }
2520        return err;
2521}
2522
2523/**
2524 * i40iw_handle_syn_pkt - is for Passive node
2525 * @cm_node: connection's node
2526 * @rbuf: receive buffer
2527 */
2528static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,
2529                                 struct i40iw_puda_buf *rbuf)
2530{
2531        struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2532        int ret;
2533        u32 inc_sequence;
2534        int optionsize;
2535        struct i40iw_cm_info nfo;
2536
2537        optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
2538        inc_sequence = ntohl(tcph->seq);
2539
2540        switch (cm_node->state) {
2541        case I40IW_CM_STATE_SYN_SENT:
2542        case I40IW_CM_STATE_MPAREQ_SENT:
2543                /* Rcvd syn on active open connection */
2544                i40iw_active_open_err(cm_node, 1);
2545                break;
2546        case I40IW_CM_STATE_LISTENING:
2547                /* Passive OPEN */
2548                if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
2549                    cm_node->listener->backlog) {
2550                        cm_node->cm_core->stats_backlog_drops++;
2551                        i40iw_passive_open_err(cm_node, false);
2552                        break;
2553                }
2554                ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
2555                if (ret) {
2556                        i40iw_passive_open_err(cm_node, false);
2557                        /* drop pkt */
2558                        break;
2559                }
2560                cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
2561                cm_node->accept_pend = 1;
2562                atomic_inc(&cm_node->listener->pend_accepts_cnt);
2563
2564                cm_node->state = I40IW_CM_STATE_SYN_RCVD;
2565                i40iw_get_addr_info(cm_node, &nfo);
2566                ret = i40iw_manage_qhash(cm_node->iwdev,
2567                                         &nfo,
2568                                         I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2569                                         I40IW_QHASH_MANAGE_TYPE_ADD,
2570                                         (void *)cm_node,
2571                                         false);
2572                cm_node->qhash_set = true;
2573                break;
2574        case I40IW_CM_STATE_CLOSED:
2575                i40iw_cleanup_retrans_entry(cm_node);
2576                atomic_inc(&cm_node->ref_count);
2577                i40iw_send_reset(cm_node);
2578                break;
2579        case I40IW_CM_STATE_OFFLOADED:
2580        case I40IW_CM_STATE_ESTABLISHED:
2581        case I40IW_CM_STATE_FIN_WAIT1:
2582        case I40IW_CM_STATE_FIN_WAIT2:
2583        case I40IW_CM_STATE_MPAREQ_RCVD:
2584        case I40IW_CM_STATE_LAST_ACK:
2585        case I40IW_CM_STATE_CLOSING:
2586        case I40IW_CM_STATE_UNKNOWN:
2587        default:
2588                break;
2589        }
2590}
2591
2592/**
2593 * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)
2594 * @cm_node: connection's node
2595 * @rbuf: receive buffer
2596 */
2597static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,
2598                                    struct i40iw_puda_buf *rbuf)
2599{
2600        struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2601        int ret;
2602        u32 inc_sequence;
2603        int optionsize;
2604
2605        optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
2606        inc_sequence = ntohl(tcph->seq);
2607        switch (cm_node->state) {
2608        case I40IW_CM_STATE_SYN_SENT:
2609                i40iw_cleanup_retrans_entry(cm_node);
2610                /* active open */
2611                if (i40iw_check_syn(cm_node, tcph)) {
2612                        i40iw_pr_err("check syn fail\n");
2613                        return;
2614                }
2615                cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
2616                /* setup options */
2617                ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);
2618                if (ret) {
2619                        i40iw_debug(cm_node->dev,
2620                                    I40IW_DEBUG_CM,
2621                                    "cm_node=%p tcp_options failed\n",
2622                                    cm_node);
2623                        break;
2624                }
2625                i40iw_cleanup_retrans_entry(cm_node);
2626                cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
2627                i40iw_send_ack(cm_node);        /* ACK  for the syn_ack */
2628                ret = i40iw_send_mpa_request(cm_node);
2629                if (ret) {
2630                        i40iw_debug(cm_node->dev,
2631                                    I40IW_DEBUG_CM,
2632                                    "cm_node=%p i40iw_send_mpa_request failed\n",
2633                                    cm_node);
2634                        break;
2635                }
2636                cm_node->state = I40IW_CM_STATE_MPAREQ_SENT;
2637                break;
2638        case I40IW_CM_STATE_MPAREQ_RCVD:
2639                i40iw_passive_open_err(cm_node, true);
2640                break;
2641        case I40IW_CM_STATE_LISTENING:
2642                cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
2643                i40iw_cleanup_retrans_entry(cm_node);
2644                cm_node->state = I40IW_CM_STATE_CLOSED;
2645                i40iw_send_reset(cm_node);
2646                break;
2647        case I40IW_CM_STATE_CLOSED:
2648                cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
2649                i40iw_cleanup_retrans_entry(cm_node);
2650                atomic_inc(&cm_node->ref_count);
2651                i40iw_send_reset(cm_node);
2652                break;
2653        case I40IW_CM_STATE_ESTABLISHED:
2654        case I40IW_CM_STATE_FIN_WAIT1:
2655        case I40IW_CM_STATE_FIN_WAIT2:
2656        case I40IW_CM_STATE_LAST_ACK:
2657        case I40IW_CM_STATE_OFFLOADED:
2658        case I40IW_CM_STATE_CLOSING:
2659        case I40IW_CM_STATE_UNKNOWN:
2660        case I40IW_CM_STATE_MPAREQ_SENT:
2661        default:
2662                break;
2663        }
2664}
2665
2666/**
2667 * i40iw_handle_ack_pkt - process packet with ACK
2668 * @cm_node: connection's node
2669 * @rbuf: receive buffer
2670 */
2671static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
2672                                struct i40iw_puda_buf *rbuf)
2673{
2674        struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2675        u32 inc_sequence;
2676        int ret = 0;
2677        int optionsize;
2678        u32 datasize = rbuf->datalen;
2679
2680        optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
2681
2682        if (i40iw_check_seq(cm_node, tcph))
2683                return -EINVAL;
2684
2685        inc_sequence = ntohl(tcph->seq);
2686        switch (cm_node->state) {
2687        case I40IW_CM_STATE_SYN_RCVD:
2688                i40iw_cleanup_retrans_entry(cm_node);
2689                ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
2690                if (ret)
2691                        break;
2692                cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
2693                cm_node->state = I40IW_CM_STATE_ESTABLISHED;
2694                if (datasize) {
2695                        cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
2696                        i40iw_handle_rcv_mpa(cm_node, rbuf);
2697                }
2698                break;
2699        case I40IW_CM_STATE_ESTABLISHED:
2700                i40iw_cleanup_retrans_entry(cm_node);
2701                if (datasize) {
2702                        cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
2703                        i40iw_handle_rcv_mpa(cm_node, rbuf);
2704                }
2705                break;
2706        case I40IW_CM_STATE_MPAREQ_SENT:
2707                cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
2708                if (datasize) {
2709                        cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
2710                        i40iw_handle_rcv_mpa(cm_node, rbuf);
2711                }
2712                break;
2713        case I40IW_CM_STATE_LISTENING:
2714                i40iw_cleanup_retrans_entry(cm_node);
2715                cm_node->state = I40IW_CM_STATE_CLOSED;
2716                i40iw_send_reset(cm_node);
2717                break;
2718        case I40IW_CM_STATE_CLOSED:
2719                i40iw_cleanup_retrans_entry(cm_node);
2720                atomic_inc(&cm_node->ref_count);
2721                i40iw_send_reset(cm_node);
2722                break;
2723        case I40IW_CM_STATE_LAST_ACK:
2724        case I40IW_CM_STATE_CLOSING:
2725                i40iw_cleanup_retrans_entry(cm_node);
2726                cm_node->state = I40IW_CM_STATE_CLOSED;
2727                if (!cm_node->accept_pend)
2728                        cm_node->cm_id->rem_ref(cm_node->cm_id);
2729                i40iw_rem_ref_cm_node(cm_node);
2730                break;
2731        case I40IW_CM_STATE_FIN_WAIT1:
2732                i40iw_cleanup_retrans_entry(cm_node);
2733                cm_node->state = I40IW_CM_STATE_FIN_WAIT2;
2734                break;
2735        case I40IW_CM_STATE_SYN_SENT:
2736        case I40IW_CM_STATE_FIN_WAIT2:
2737        case I40IW_CM_STATE_OFFLOADED:
2738        case I40IW_CM_STATE_MPAREQ_RCVD:
2739        case I40IW_CM_STATE_UNKNOWN:
2740        default:
2741                i40iw_cleanup_retrans_entry(cm_node);
2742                break;
2743        }
2744        return ret;
2745}
2746
2747/**
2748 * i40iw_process_packet - process cm packet
2749 * @cm_node: connection's node
2750 * @rbuf: receive buffer
2751 */
2752static void i40iw_process_packet(struct i40iw_cm_node *cm_node,
2753                                 struct i40iw_puda_buf *rbuf)
2754{
2755        enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;
2756        struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
2757        u32 fin_set = 0;
2758        int ret;
2759
2760        if (tcph->rst) {
2761                pkt_type = I40IW_PKT_TYPE_RST;
2762        } else if (tcph->syn) {
2763                pkt_type = I40IW_PKT_TYPE_SYN;
2764                if (tcph->ack)
2765                        pkt_type = I40IW_PKT_TYPE_SYNACK;
2766        } else if (tcph->ack) {
2767                pkt_type = I40IW_PKT_TYPE_ACK;
2768        }
2769        if (tcph->fin)
2770                fin_set = 1;
2771
2772        switch (pkt_type) {
2773        case I40IW_PKT_TYPE_SYN:
2774                i40iw_handle_syn_pkt(cm_node, rbuf);
2775                break;
2776        case I40IW_PKT_TYPE_SYNACK:
2777                i40iw_handle_synack_pkt(cm_node, rbuf);
2778                break;
2779        case I40IW_PKT_TYPE_ACK:
2780                ret = i40iw_handle_ack_pkt(cm_node, rbuf);
2781                if (fin_set && !ret)
2782                        i40iw_handle_fin_pkt(cm_node);
2783                break;
2784        case I40IW_PKT_TYPE_RST:
2785                i40iw_handle_rst_pkt(cm_node, rbuf);
2786                break;
2787        default:
2788                if (fin_set &&
2789                    (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
2790                        i40iw_handle_fin_pkt(cm_node);
2791                break;
2792        }
2793}
2794
2795/**
2796 * i40iw_make_listen_node - create a listen node with params
2797 * @cm_core: cm's core
2798 * @iwdev: iwarp device structure
2799 * @cm_info: quad info for connection
2800 */
2801static struct i40iw_cm_listener *i40iw_make_listen_node(
2802                                        struct i40iw_cm_core *cm_core,
2803                                        struct i40iw_device *iwdev,
2804                                        struct i40iw_cm_info *cm_info)
2805{
2806        struct i40iw_cm_listener *listener;
2807        unsigned long flags;
2808
2809        /* cannot have multiple matching listeners */
2810        listener = i40iw_find_listener(cm_core, cm_info->loc_addr,
2811                                       cm_info->loc_port,
2812                                       cm_info->vlan_id,
2813                                       I40IW_CM_LISTENER_EITHER_STATE);
2814        if (listener &&
2815            (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {
2816                atomic_dec(&listener->ref_count);
2817                i40iw_debug(cm_core->dev,
2818                            I40IW_DEBUG_CM,
2819                            "Not creating listener since it already exists\n");
2820                return NULL;
2821        }
2822
2823        if (!listener) {
2824                /* create a CM listen node (1/2 node to compare incoming traffic to) */
2825                listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
2826                if (!listener)
2827                        return NULL;
2828                cm_core->stats_listen_nodes_created++;
2829                memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));
2830                listener->loc_port = cm_info->loc_port;
2831
2832                INIT_LIST_HEAD(&listener->child_listen_list);
2833
2834                atomic_set(&listener->ref_count, 1);
2835        } else {
2836                listener->reused_node = 1;
2837        }
2838
2839        listener->cm_id = cm_info->cm_id;
2840        listener->ipv4 = cm_info->ipv4;
2841        listener->vlan_id = cm_info->vlan_id;
2842        atomic_set(&listener->pend_accepts_cnt, 0);
2843        listener->cm_core = cm_core;
2844        listener->iwdev = iwdev;
2845
2846        listener->backlog = cm_info->backlog;
2847        listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;
2848
2849        if (!listener->reused_node) {
2850                spin_lock_irqsave(&cm_core->listen_list_lock, flags);
2851                list_add(&listener->list, &cm_core->listen_nodes);
2852                spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
2853        }
2854
2855        return listener;
2856}
2857
2858/**
2859 * i40iw_create_cm_node - make a connection node with params
2860 * @cm_core: cm's core
2861 * @iwdev: iwarp device structure
2862 * @private_data_len: len to provate data for mpa request
2863 * @private_data: pointer to private data for connection
2864 * @cm_info: quad info for connection
2865 */
2866static struct i40iw_cm_node *i40iw_create_cm_node(
2867                                        struct i40iw_cm_core *cm_core,
2868                                        struct i40iw_device *iwdev,
2869                                        u16 private_data_len,
2870                                        void *private_data,
2871                                        struct i40iw_cm_info *cm_info)
2872{
2873        struct i40iw_cm_node *cm_node;
2874        struct i40iw_cm_listener *loopback_remotelistener;
2875        struct i40iw_cm_node *loopback_remotenode;
2876        struct i40iw_cm_info loopback_cm_info;
2877
2878        /* create a CM connection node */
2879        cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
2880        if (!cm_node)
2881                return ERR_PTR(-ENOMEM);
2882        /* set our node side to client (active) side */
2883        cm_node->tcp_cntxt.client = 1;
2884        cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
2885
2886        if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
2887                loopback_remotelistener = i40iw_find_listener(
2888                                                cm_core,
2889                                                cm_info->rem_addr,
2890                                                cm_node->rem_port,
2891                                                cm_node->vlan_id,
2892                                                I40IW_CM_LISTENER_ACTIVE_STATE);
2893                if (!loopback_remotelistener) {
2894                        i40iw_rem_ref_cm_node(cm_node);
2895                        return ERR_PTR(-ECONNREFUSED);
2896                } else {
2897                        loopback_cm_info = *cm_info;
2898                        loopback_cm_info.loc_port = cm_info->rem_port;
2899                        loopback_cm_info.rem_port = cm_info->loc_port;
2900                        loopback_cm_info.cm_id = loopback_remotelistener->cm_id;
2901                        loopback_cm_info.ipv4 = cm_info->ipv4;
2902                        loopback_remotenode = i40iw_make_cm_node(cm_core,
2903                                                                 iwdev,
2904                                                                 &loopback_cm_info,
2905                                                                 loopback_remotelistener);
2906                        if (!loopback_remotenode) {
2907                                i40iw_rem_ref_cm_node(cm_node);
2908                                return ERR_PTR(-ENOMEM);
2909                        }
2910                        cm_core->stats_loopbacks++;
2911                        loopback_remotenode->loopbackpartner = cm_node;
2912                        loopback_remotenode->tcp_cntxt.rcv_wscale =
2913                                I40IW_CM_DEFAULT_RCV_WND_SCALE;
2914                        cm_node->loopbackpartner = loopback_remotenode;
2915                        memcpy(loopback_remotenode->pdata_buf, private_data,
2916                               private_data_len);
2917                        loopback_remotenode->pdata.size = private_data_len;
2918
2919                        cm_node->state = I40IW_CM_STATE_OFFLOADED;
2920                        cm_node->tcp_cntxt.rcv_nxt =
2921                                loopback_remotenode->tcp_cntxt.loc_seq_num;
2922                        loopback_remotenode->tcp_cntxt.rcv_nxt =
2923                                cm_node->tcp_cntxt.loc_seq_num;
2924                        cm_node->tcp_cntxt.max_snd_wnd =
2925                                loopback_remotenode->tcp_cntxt.rcv_wnd;
2926                        loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
2927                        cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;
2928                        loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
2929                        cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
2930                        loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
2931                        loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
2932                        i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
2933                }
2934                return cm_node;
2935        }
2936
2937        cm_node->pdata.size = private_data_len;
2938        cm_node->pdata.addr = cm_node->pdata_buf;
2939
2940        memcpy(cm_node->pdata_buf, private_data, private_data_len);
2941
2942        cm_node->state = I40IW_CM_STATE_SYN_SENT;
2943        return cm_node;
2944}
2945
2946/**
2947 * i40iw_cm_reject - reject and teardown a connection
2948 * @cm_node: connection's node
2949 * @pdate: ptr to private data for reject
2950 * @plen: size of private data
2951 */
2952static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
2953{
2954        int ret = 0;
2955        int err;
2956        int passive_state;
2957        struct iw_cm_id *cm_id = cm_node->cm_id;
2958        struct i40iw_cm_node *loopback = cm_node->loopbackpartner;
2959
2960        if (cm_node->tcp_cntxt.client)
2961                return ret;
2962        i40iw_cleanup_retrans_entry(cm_node);
2963
2964        if (!loopback) {
2965                passive_state = atomic_add_return(1, &cm_node->passive_state);
2966                if (passive_state == I40IW_SEND_RESET_EVENT) {
2967                        cm_node->state = I40IW_CM_STATE_CLOSED;
2968                        i40iw_rem_ref_cm_node(cm_node);
2969                } else {
2970                        if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
2971                                i40iw_rem_ref_cm_node(cm_node);
2972                        } else {
2973                                ret = i40iw_send_mpa_reject(cm_node, pdata, plen);
2974                                if (ret) {
2975                                        cm_node->state = I40IW_CM_STATE_CLOSED;
2976                                        err = i40iw_send_reset(cm_node);
2977                                        if (err)
2978                                                i40iw_pr_err("send reset failed\n");
2979                                } else {
2980                                        cm_id->add_ref(cm_id);
2981                                }
2982                        }
2983                }
2984        } else {
2985                cm_node->cm_id = NULL;
2986                if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
2987                        i40iw_rem_ref_cm_node(cm_node);
2988                        i40iw_rem_ref_cm_node(loopback);
2989                } else {
2990                        ret = i40iw_send_cm_event(loopback,
2991                                                  loopback->cm_id,
2992                                                  IW_CM_EVENT_CONNECT_REPLY,
2993                                                  -ECONNREFUSED);
2994                        i40iw_rem_ref_cm_node(cm_node);
2995                        loopback->state = I40IW_CM_STATE_CLOSING;
2996
2997                        cm_id = loopback->cm_id;
2998                        i40iw_rem_ref_cm_node(loopback);
2999                        cm_id->rem_ref(cm_id);
3000                }
3001        }
3002
3003        return ret;
3004}
3005
3006/**
3007 * i40iw_cm_close - close of cm connection
3008 * @cm_node: connection's node
3009 */
3010static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
3011{
3012        int ret = 0;
3013
3014        if (!cm_node)
3015                return -EINVAL;
3016
3017        switch (cm_node->state) {
3018        case I40IW_CM_STATE_SYN_RCVD:
3019        case I40IW_CM_STATE_SYN_SENT:
3020        case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:
3021        case I40IW_CM_STATE_ESTABLISHED:
3022        case I40IW_CM_STATE_ACCEPTING:
3023        case I40IW_CM_STATE_MPAREQ_SENT:
3024        case I40IW_CM_STATE_MPAREQ_RCVD:
3025                i40iw_cleanup_retrans_entry(cm_node);
3026                i40iw_send_reset(cm_node);
3027                break;
3028        case I40IW_CM_STATE_CLOSE_WAIT:
3029                cm_node->state = I40IW_CM_STATE_LAST_ACK;
3030                i40iw_send_fin(cm_node);
3031                break;
3032        case I40IW_CM_STATE_FIN_WAIT1:
3033        case I40IW_CM_STATE_FIN_WAIT2:
3034        case I40IW_CM_STATE_LAST_ACK:
3035        case I40IW_CM_STATE_TIME_WAIT:
3036        case I40IW_CM_STATE_CLOSING:
3037                ret = -1;
3038                break;
3039        case I40IW_CM_STATE_LISTENING:
3040                i40iw_cleanup_retrans_entry(cm_node);
3041                i40iw_send_reset(cm_node);
3042                break;
3043        case I40IW_CM_STATE_MPAREJ_RCVD:
3044        case I40IW_CM_STATE_UNKNOWN:
3045        case I40IW_CM_STATE_INITED:
3046        case I40IW_CM_STATE_CLOSED:
3047        case I40IW_CM_STATE_LISTENER_DESTROYED:
3048                i40iw_rem_ref_cm_node(cm_node);
3049                break;
3050        case I40IW_CM_STATE_OFFLOADED:
3051                if (cm_node->send_entry)
3052                        i40iw_pr_err("send_entry\n");
3053                i40iw_rem_ref_cm_node(cm_node);
3054                break;
3055        }
3056        return ret;
3057}
3058
3059/**
3060 * i40iw_receive_ilq - recv an ETHERNET packet, and process it
3061 * through CM
3062 * @vsi: pointer to the vsi structure
3063 * @rbuf: receive buffer
3064 */
3065void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
3066{
3067        struct i40iw_cm_node *cm_node;
3068        struct i40iw_cm_listener *listener;
3069        struct iphdr *iph;
3070        struct ipv6hdr *ip6h;
3071        struct tcphdr *tcph;
3072        struct i40iw_cm_info cm_info;
3073        struct i40iw_sc_dev *dev = vsi->dev;
3074        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
3075        struct i40iw_cm_core *cm_core = &iwdev->cm_core;
3076        struct vlan_ethhdr *ethh;
3077        u16 vtag;
3078
3079        /* if vlan, then maclen = 18 else 14 */
3080        iph = (struct iphdr *)rbuf->iph;
3081        memset(&cm_info, 0, sizeof(cm_info));
3082
3083        i40iw_debug_buf(dev,
3084                        I40IW_DEBUG_ILQ,
3085                        "RECEIVE ILQ BUFFER",
3086                        rbuf->mem.va,
3087                        rbuf->totallen);
3088        ethh = (struct vlan_ethhdr *)rbuf->mem.va;
3089
3090        if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
3091                vtag = ntohs(ethh->h_vlan_TCI);
3092                cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3093                cm_info.vlan_id = vtag & VLAN_VID_MASK;
3094                i40iw_debug(cm_core->dev,
3095                            I40IW_DEBUG_CM,
3096                            "%s vlan_id=%d\n",
3097                            __func__,
3098                            cm_info.vlan_id);
3099        } else {
3100                cm_info.vlan_id = I40IW_NO_VLAN;
3101        }
3102        tcph = (struct tcphdr *)rbuf->tcph;
3103
3104        if (rbuf->ipv4) {
3105                cm_info.loc_addr[0] = ntohl(iph->daddr);
3106                cm_info.rem_addr[0] = ntohl(iph->saddr);
3107                cm_info.ipv4 = true;
3108                cm_info.tos = iph->tos;
3109        } else {
3110                ip6h = (struct ipv6hdr *)rbuf->iph;
3111                i40iw_copy_ip_ntohl(cm_info.loc_addr,
3112                                    ip6h->daddr.in6_u.u6_addr32);
3113                i40iw_copy_ip_ntohl(cm_info.rem_addr,
3114                                    ip6h->saddr.in6_u.u6_addr32);
3115                cm_info.ipv4 = false;
3116                cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
3117        }
3118        cm_info.loc_port = ntohs(tcph->dest);
3119        cm_info.rem_port = ntohs(tcph->source);
3120        cm_node = i40iw_find_node(cm_core,
3121                                  cm_info.rem_port,
3122                                  cm_info.rem_addr,
3123                                  cm_info.loc_port,
3124                                  cm_info.loc_addr,
3125                                  true);
3126
3127        if (!cm_node) {
3128                /* Only type of packet accepted are for */
3129                /* the PASSIVE open (syn only) */
3130                if (!tcph->syn || tcph->ack)
3131                        return;
3132                listener =
3133                    i40iw_find_listener(cm_core,
3134                                        cm_info.loc_addr,
3135                                        cm_info.loc_port,
3136                                        cm_info.vlan_id,
3137                                        I40IW_CM_LISTENER_ACTIVE_STATE);
3138                if (!listener) {
3139                        cm_info.cm_id = NULL;
3140                        i40iw_debug(cm_core->dev,
3141                                    I40IW_DEBUG_CM,
3142                                    "%s no listener found\n",
3143                                    __func__);
3144                        return;
3145                }
3146                cm_info.cm_id = listener->cm_id;
3147                cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);
3148                if (!cm_node) {
3149                        i40iw_debug(cm_core->dev,
3150                                    I40IW_DEBUG_CM,
3151                                    "%s allocate node failed\n",
3152                                    __func__);
3153                        atomic_dec(&listener->ref_count);
3154                        return;
3155                }
3156                if (!tcph->rst && !tcph->fin) {
3157                        cm_node->state = I40IW_CM_STATE_LISTENING;
3158                } else {
3159                        i40iw_rem_ref_cm_node(cm_node);
3160                        return;
3161                }
3162                atomic_inc(&cm_node->ref_count);
3163        } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {
3164                i40iw_rem_ref_cm_node(cm_node);
3165                return;
3166        }
3167        i40iw_process_packet(cm_node, rbuf);
3168        i40iw_rem_ref_cm_node(cm_node);
3169}
3170
3171/**
3172 * i40iw_setup_cm_core - allocate a top level instance of a cm
3173 * core
3174 * @iwdev: iwarp device structure
3175 */
3176void i40iw_setup_cm_core(struct i40iw_device *iwdev)
3177{
3178        struct i40iw_cm_core *cm_core = &iwdev->cm_core;
3179
3180        cm_core->iwdev = iwdev;
3181        cm_core->dev = &iwdev->sc_dev;
3182
3183        INIT_LIST_HEAD(&cm_core->connected_nodes);
3184        INIT_LIST_HEAD(&cm_core->listen_nodes);
3185
3186        setup_timer(&cm_core->tcp_timer, i40iw_cm_timer_tick,
3187                    (unsigned long)cm_core);
3188
3189        spin_lock_init(&cm_core->ht_lock);
3190        spin_lock_init(&cm_core->listen_list_lock);
3191
3192        cm_core->event_wq = alloc_ordered_workqueue("iwewq",
3193                                                    WQ_MEM_RECLAIM);
3194
3195        cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
3196                                                      WQ_MEM_RECLAIM);
3197}
3198
3199/**
3200 * i40iw_cleanup_cm_core - deallocate a top level instance of a
3201 * cm core
3202 * @cm_core: cm's core
3203 */
3204void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
3205{
3206        unsigned long flags;
3207
3208        if (!cm_core)
3209                return;
3210
3211        spin_lock_irqsave(&cm_core->ht_lock, flags);
3212        if (timer_pending(&cm_core->tcp_timer))
3213                del_timer_sync(&cm_core->tcp_timer);
3214        spin_unlock_irqrestore(&cm_core->ht_lock, flags);
3215
3216        destroy_workqueue(cm_core->event_wq);
3217        destroy_workqueue(cm_core->disconn_wq);
3218}
3219
3220/**
3221 * i40iw_init_tcp_ctx - setup qp context
3222 * @cm_node: connection's node
3223 * @tcp_info: offload info for tcp
3224 * @iwqp: associate qp for the connection
3225 */
3226static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
3227                               struct i40iw_tcp_offload_info *tcp_info,
3228                               struct i40iw_qp *iwqp)
3229{
3230        tcp_info->ipv4 = cm_node->ipv4;
3231        tcp_info->drop_ooo_seg = true;
3232        tcp_info->wscale = true;
3233        tcp_info->ignore_tcp_opt = true;
3234        tcp_info->ignore_tcp_uns_opt = true;
3235        tcp_info->no_nagle = false;
3236
3237        tcp_info->ttl = I40IW_DEFAULT_TTL;
3238        tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);
3239        tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);
3240        tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;
3241
3242        tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;
3243        tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
3244        tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
3245
3246        tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3247        tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
3248        tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
3249        tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3250
3251        tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3252        tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
3253        tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
3254        tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
3255        tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
3256        tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
3257                                        cm_node->tcp_cntxt.rcv_wscale);
3258
3259        tcp_info->flow_label = 0;
3260        tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
3261        if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
3262                tcp_info->insert_vlan_tag = true;
3263                tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id);
3264        }
3265        if (cm_node->ipv4) {
3266                tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
3267                tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
3268
3269                tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
3270                tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
3271                tcp_info->arp_idx =
3272                        cpu_to_le16((u16)i40iw_arp_table(
3273                                                         iwqp->iwdev,
3274                                                         &tcp_info->dest_ip_addr3,
3275                                                         true,
3276                                                         NULL,
3277                                                         I40IW_ARP_RESOLVE));
3278        } else {
3279                tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
3280                tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
3281                tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);
3282                tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);
3283                tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);
3284                tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);
3285                tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);
3286                tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
3287                tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
3288                tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
3289                tcp_info->arp_idx =
3290                        cpu_to_le16((u16)i40iw_arp_table(
3291                                                         iwqp->iwdev,
3292                                                         &tcp_info->dest_ip_addr0,
3293                                                         false,
3294                                                         NULL,
3295                                                         I40IW_ARP_RESOLVE));
3296        }
3297}
3298
3299/**
3300 * i40iw_cm_init_tsa_conn - setup qp for RTS
3301 * @iwqp: associate qp for the connection
3302 * @cm_node: connection's node
3303 */
3304static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
3305                                   struct i40iw_cm_node *cm_node)
3306{
3307        struct i40iw_tcp_offload_info tcp_info;
3308        struct i40iwarp_offload_info *iwarp_info;
3309        struct i40iw_qp_host_ctx_info *ctx_info;
3310        struct i40iw_device *iwdev = iwqp->iwdev;
3311        struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
3312
3313        memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));
3314        iwarp_info = &iwqp->iwarp_info;
3315        ctx_info = &iwqp->ctx_info;
3316
3317        ctx_info->tcp_info = &tcp_info;
3318        ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
3319        ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
3320
3321        iwarp_info->ord_size = cm_node->ord_size;
3322        iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);
3323
3324        if (iwarp_info->ord_size == 1)
3325                iwarp_info->ord_size = 2;
3326
3327        iwarp_info->rd_enable = true;
3328        iwarp_info->rdmap_ver = 1;
3329        iwarp_info->ddp_ver = 1;
3330
3331        iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
3332
3333        ctx_info->tcp_info_valid = true;
3334        ctx_info->iwarp_info_valid = true;
3335        ctx_info->add_to_qoslist = true;
3336        ctx_info->user_pri = cm_node->user_pri;
3337
3338        i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
3339        if (cm_node->snd_mark_en) {
3340                iwarp_info->snd_mark_en = true;
3341                iwarp_info->snd_mark_offset = (tcp_info.snd_nxt &
3342                                SNDMARKER_SEQNMASK) + cm_node->lsmm_size;
3343        }
3344
3345        cm_node->state = I40IW_CM_STATE_OFFLOADED;
3346        tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
3347        tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
3348        tcp_info.tos = cm_node->tos;
3349
3350        dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
3351
3352        /* once tcp_info is set, no need to do it again */
3353        ctx_info->tcp_info_valid = false;
3354        ctx_info->iwarp_info_valid = false;
3355        ctx_info->add_to_qoslist = false;
3356}
3357
3358/**
3359 * i40iw_cm_disconn - when a connection is being closed
3360 * @iwqp: associate qp for the connection
3361 */
3362void i40iw_cm_disconn(struct i40iw_qp *iwqp)
3363{
3364        struct disconn_work *work;
3365        struct i40iw_device *iwdev = iwqp->iwdev;
3366        struct i40iw_cm_core *cm_core = &iwdev->cm_core;
3367        unsigned long flags;
3368
3369        work = kzalloc(sizeof(*work), GFP_ATOMIC);
3370        if (!work)
3371                return; /* Timer will clean up */
3372
3373        spin_lock_irqsave(&iwdev->qptable_lock, flags);
3374        if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
3375                spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
3376                i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
3377                            "%s qp_id %d is already freed\n",
3378                             __func__, iwqp->ibqp.qp_num);
3379                kfree(work);
3380                return;
3381        }
3382        i40iw_add_ref(&iwqp->ibqp);
3383        spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
3384
3385        work->iwqp = iwqp;
3386        INIT_WORK(&work->work, i40iw_disconnect_worker);
3387        queue_work(cm_core->disconn_wq, &work->work);
3388        return;
3389}
3390
3391/**
3392 * i40iw_qp_disconnect - free qp and close cm
3393 * @iwqp: associate qp for the connection
3394 */
3395static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
3396{
3397        struct i40iw_device *iwdev;
3398        struct i40iw_ib_device *iwibdev;
3399
3400        iwdev = to_iwdev(iwqp->ibqp.device);
3401        if (!iwdev) {
3402                i40iw_pr_err("iwdev == NULL\n");
3403                return;
3404        }
3405
3406        iwibdev = iwdev->iwibdev;
3407
3408        if (iwqp->active_conn) {
3409                /* indicate this connection is NOT active */
3410                iwqp->active_conn = 0;
3411        } else {
3412                /* Need to free the Last Streaming Mode Message */
3413                if (iwqp->ietf_mem.va) {
3414                        if (iwqp->lsmm_mr)
3415                                iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
3416                        i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
3417                }
3418        }
3419
3420        /* close the CM node down if it is still active */
3421        if (iwqp->cm_node) {
3422                i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__);
3423                i40iw_cm_close(iwqp->cm_node);
3424        }
3425}
3426
3427/**
3428 * i40iw_cm_disconn_true - called by worker thread to disconnect qp
3429 * @iwqp: associate qp for the connection
3430 */
3431static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
3432{
3433        struct iw_cm_id *cm_id;
3434        struct i40iw_device *iwdev;
3435        struct i40iw_sc_qp *qp = &iwqp->sc_qp;
3436        u16 last_ae;
3437        u8 original_hw_tcp_state;
3438        u8 original_ibqp_state;
3439        int disconn_status = 0;
3440        int issue_disconn = 0;
3441        int issue_close = 0;
3442        int issue_flush = 0;
3443        struct ib_event ibevent;
3444        unsigned long flags;
3445        int ret;
3446
3447        if (!iwqp) {
3448                i40iw_pr_err("iwqp == NULL\n");
3449                return;
3450        }
3451
3452        spin_lock_irqsave(&iwqp->lock, flags);
3453        cm_id = iwqp->cm_id;
3454        /* make sure we havent already closed this connection */
3455        if (!cm_id) {
3456                spin_unlock_irqrestore(&iwqp->lock, flags);
3457                return;
3458        }
3459
3460        iwdev = to_iwdev(iwqp->ibqp.device);
3461
3462        original_hw_tcp_state = iwqp->hw_tcp_state;
3463        original_ibqp_state = iwqp->ibqp_state;
3464        last_ae = iwqp->last_aeq;
3465
3466        if (qp->term_flags) {
3467                issue_disconn = 1;
3468                issue_close = 1;
3469                iwqp->cm_id = NULL;
3470                /*When term timer expires after cm_timer, don't want
3471                 *terminate-handler to issue cm_disconn which can re-free
3472                 *a QP even after its refcnt=0.
3473                 */
3474                i40iw_terminate_del_timer(qp);
3475                if (!iwqp->flush_issued) {
3476                        iwqp->flush_issued = 1;
3477                        issue_flush = 1;
3478                }
3479        } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||
3480                   ((original_ibqp_state == IB_QPS_RTS) &&
3481                    (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
3482                issue_disconn = 1;
3483                if (last_ae == I40IW_AE_LLP_CONNECTION_RESET)
3484                        disconn_status = -ECONNRESET;
3485        }
3486
3487        if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
3488             (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
3489             (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
3490             (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
3491                issue_close = 1;
3492                iwqp->cm_id = NULL;
3493                if (!iwqp->flush_issued) {
3494                        iwqp->flush_issued = 1;
3495                        issue_flush = 1;
3496                }
3497        }
3498
3499        spin_unlock_irqrestore(&iwqp->lock, flags);
3500        if (issue_flush && !iwqp->destroyed) {
3501                /* Flush the queues */
3502                i40iw_flush_wqes(iwdev, iwqp);
3503
3504                if (qp->term_flags && iwqp->ibqp.event_handler) {
3505                        ibevent.device = iwqp->ibqp.device;
3506                        ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
3507                                        IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
3508                        ibevent.element.qp = &iwqp->ibqp;
3509                        iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
3510                }
3511        }
3512
3513        if (cm_id && cm_id->event_handler) {
3514                if (issue_disconn) {
3515                        ret = i40iw_send_cm_event(NULL,
3516                                                  cm_id,
3517                                                  IW_CM_EVENT_DISCONNECT,
3518                                                  disconn_status);
3519
3520                        if (ret)
3521                                i40iw_debug(&iwdev->sc_dev,
3522                                            I40IW_DEBUG_CM,
3523                                            "disconnect event failed %s: - cm_id = %p\n",
3524                                            __func__, cm_id);
3525                }
3526                if (issue_close) {
3527                        i40iw_qp_disconnect(iwqp);
3528                        cm_id->provider_data = iwqp;
3529                        ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);
3530                        if (ret)
3531                                i40iw_debug(&iwdev->sc_dev,
3532                                            I40IW_DEBUG_CM,
3533                                            "close event failed %s: - cm_id = %p\n",
3534                                            __func__, cm_id);
3535                        cm_id->rem_ref(cm_id);
3536                }
3537        }
3538}
3539
3540/**
3541 * i40iw_disconnect_worker - worker for connection close
3542 * @work: points or disconn structure
3543 */
3544static void i40iw_disconnect_worker(struct work_struct *work)
3545{
3546        struct disconn_work *dwork = container_of(work, struct disconn_work, work);
3547        struct i40iw_qp *iwqp = dwork->iwqp;
3548
3549        kfree(dwork);
3550        i40iw_cm_disconn_true(iwqp);
3551        i40iw_rem_ref(&iwqp->ibqp);
3552}
3553
3554/**
3555 * i40iw_accept - registered call for connection to be accepted
3556 * @cm_id: cm information for passive connection
3557 * @conn_param: accpet parameters
3558 */
3559int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3560{
3561        struct ib_qp *ibqp;
3562        struct i40iw_qp *iwqp;
3563        struct i40iw_device *iwdev;
3564        struct i40iw_sc_dev *dev;
3565        struct i40iw_cm_node *cm_node;
3566        struct ib_qp_attr attr;
3567        int passive_state;
3568        struct ib_mr *ibmr;
3569        struct i40iw_pd *iwpd;
3570        u16 buf_len = 0;
3571        struct i40iw_kmem_info accept;
3572        enum i40iw_status_code status;
3573        u64 tagged_offset;
3574
3575        memset(&attr, 0, sizeof(attr));
3576        ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
3577        if (!ibqp)
3578                return -EINVAL;
3579
3580        iwqp = to_iwqp(ibqp);
3581        iwdev = iwqp->iwdev;
3582        dev = &iwdev->sc_dev;
3583        cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
3584
3585        if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
3586                cm_node->ipv4 = true;
3587                cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
3588        } else {
3589                cm_node->ipv4 = false;
3590                i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL);
3591        }
3592        i40iw_debug(cm_node->dev,
3593                    I40IW_DEBUG_CM,
3594                    "Accept vlan_id=%d\n",
3595                    cm_node->vlan_id);
3596        if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
3597                if (cm_node->loopbackpartner)
3598                        i40iw_rem_ref_cm_node(cm_node->loopbackpartner);
3599                i40iw_rem_ref_cm_node(cm_node);
3600                return -EINVAL;
3601        }
3602
3603        passive_state = atomic_add_return(1, &cm_node->passive_state);
3604        if (passive_state == I40IW_SEND_RESET_EVENT) {
3605                i40iw_rem_ref_cm_node(cm_node);
3606                return -ECONNRESET;
3607        }
3608
3609        cm_node->cm_core->stats_accepts++;
3610        iwqp->cm_node = (void *)cm_node;
3611        cm_node->iwqp = iwqp;
3612
3613        buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
3614
3615        status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
3616
3617        if (status)
3618                return -ENOMEM;
3619        cm_node->pdata.size = conn_param->private_data_len;
3620        accept.addr = iwqp->ietf_mem.va;
3621        accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
3622        memcpy(accept.addr + accept.size, conn_param->private_data,
3623               conn_param->private_data_len);
3624
3625        /* setup our first outgoing iWarp send WQE (the IETF frame response) */
3626        if ((cm_node->ipv4 &&
3627             !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
3628            (!cm_node->ipv4 &&
3629             !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
3630                iwpd = iwqp->iwpd;
3631                tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
3632                ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
3633                                         iwqp->ietf_mem.pa,
3634                                         buf_len,
3635                                         IB_ACCESS_LOCAL_WRITE,
3636                                         &tagged_offset);
3637                if (IS_ERR(ibmr)) {
3638                        i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);
3639                        return -ENOMEM;
3640                }
3641
3642                ibmr->pd = &iwpd->ibpd;
3643                ibmr->device = iwpd->ibpd.device;
3644                iwqp->lsmm_mr = ibmr;
3645                if (iwqp->page)
3646                        iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
3647                dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
3648                                                        iwqp->ietf_mem.va,
3649                                                        (accept.size + conn_param->private_data_len),
3650                                                        ibmr->lkey);
3651
3652        } else {
3653                if (iwqp->page)
3654                        iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
3655                dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
3656        }
3657
3658        if (iwqp->page)
3659                kunmap(iwqp->page);
3660
3661        iwqp->cm_id = cm_id;
3662        cm_node->cm_id = cm_id;
3663
3664        cm_id->provider_data = (void *)iwqp;
3665        iwqp->active_conn = 0;
3666
3667        cm_node->lsmm_size = accept.size + conn_param->private_data_len;
3668        i40iw_cm_init_tsa_conn(iwqp, cm_node);
3669        cm_id->add_ref(cm_id);
3670        i40iw_add_ref(&iwqp->ibqp);
3671
3672        i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
3673
3674        attr.qp_state = IB_QPS_RTS;
3675        cm_node->qhash_set = false;
3676        i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
3677        if (cm_node->loopbackpartner) {
3678                cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
3679
3680                /* copy entire MPA frame to our cm_node's frame */
3681                memcpy(cm_node->loopbackpartner->pdata_buf,
3682                       conn_param->private_data,
3683                       conn_param->private_data_len);
3684                i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
3685        }
3686
3687        cm_node->accelerated = 1;
3688        if (cm_node->accept_pend) {
3689                if (!cm_node->listener)
3690                        i40iw_pr_err("cm_node->listener NULL for passive node\n");
3691                atomic_dec(&cm_node->listener->pend_accepts_cnt);
3692                cm_node->accept_pend = 0;
3693        }
3694        return 0;
3695}
3696
3697/**
3698 * i40iw_reject - registered call for connection to be rejected
3699 * @cm_id: cm information for passive connection
3700 * @pdata: private data to be sent
3701 * @pdata_len: private data length
3702 */
3703int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
3704{
3705        struct i40iw_device *iwdev;
3706        struct i40iw_cm_node *cm_node;
3707        struct i40iw_cm_node *loopback;
3708
3709        cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
3710        loopback = cm_node->loopbackpartner;
3711        cm_node->cm_id = cm_id;
3712        cm_node->pdata.size = pdata_len;
3713
3714        iwdev = to_iwdev(cm_id->device);
3715        if (!iwdev)
3716                return -EINVAL;
3717        cm_node->cm_core->stats_rejects++;
3718
3719        if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
3720                return -EINVAL;
3721
3722        if (loopback) {
3723                memcpy(&loopback->pdata_buf, pdata, pdata_len);
3724                loopback->pdata.size = pdata_len;
3725        }
3726
3727        return i40iw_cm_reject(cm_node, pdata, pdata_len);
3728}
3729
3730/**
3731 * i40iw_connect - registered call for connection to be established
3732 * @cm_id: cm information for passive connection
3733 * @conn_param: Information about the connection
3734 */
3735int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3736{
3737        struct ib_qp *ibqp;
3738        struct i40iw_qp *iwqp;
3739        struct i40iw_device *iwdev;
3740        struct i40iw_cm_node *cm_node;
3741        struct i40iw_cm_info cm_info;
3742        struct sockaddr_in *laddr;
3743        struct sockaddr_in *raddr;
3744        struct sockaddr_in6 *laddr6;
3745        struct sockaddr_in6 *raddr6;
3746        bool qhash_set = false;
3747        int apbvt_set = 0;
3748        int err = 0;
3749        enum i40iw_status_code status;
3750
3751        ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
3752        if (!ibqp)
3753                return -EINVAL;
3754        iwqp = to_iwqp(ibqp);
3755        if (!iwqp)
3756                return -EINVAL;
3757        iwdev = to_iwdev(iwqp->ibqp.device);
3758        if (!iwdev)
3759                return -EINVAL;
3760
3761        laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3762        raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
3763        laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3764        raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
3765
3766        if (!(laddr->sin_port) || !(raddr->sin_port))
3767                return -EINVAL;
3768
3769        iwqp->active_conn = 1;
3770        iwqp->cm_id = NULL;
3771        cm_id->provider_data = iwqp;
3772
3773        /* set up the connection params for the node */
3774        if (cm_id->remote_addr.ss_family == AF_INET) {
3775                cm_info.ipv4 = true;
3776                memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
3777                memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
3778                cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
3779                cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
3780                cm_info.loc_port = ntohs(laddr->sin_port);
3781                cm_info.rem_port = ntohs(raddr->sin_port);
3782                cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
3783        } else {
3784                cm_info.ipv4 = false;
3785                i40iw_copy_ip_ntohl(cm_info.loc_addr,
3786                                    laddr6->sin6_addr.in6_u.u6_addr32);
3787                i40iw_copy_ip_ntohl(cm_info.rem_addr,
3788                                    raddr6->sin6_addr.in6_u.u6_addr32);
3789                cm_info.loc_port = ntohs(laddr6->sin6_port);
3790                cm_info.rem_port = ntohs(raddr6->sin6_port);
3791                i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
3792        }
3793        cm_info.cm_id = cm_id;
3794        cm_info.tos = cm_id->tos;
3795        cm_info.user_pri = rt_tos2priority(cm_id->tos);
3796        i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
3797                    __func__, cm_id->tos, cm_info.user_pri);
3798        if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
3799            (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
3800                                     raddr6->sin6_addr.in6_u.u6_addr32,
3801                                     sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
3802                status = i40iw_manage_qhash(iwdev,
3803                                            &cm_info,
3804                                            I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3805                                            I40IW_QHASH_MANAGE_TYPE_ADD,
3806                                            NULL,
3807                                            true);
3808                if (status)
3809                        return -EINVAL;
3810                qhash_set = true;
3811        }
3812        status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
3813        if (status) {
3814                i40iw_manage_qhash(iwdev,
3815                                   &cm_info,
3816                                   I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3817                                   I40IW_QHASH_MANAGE_TYPE_DELETE,
3818                                   NULL,
3819                                   false);
3820                return -EINVAL;
3821        }
3822
3823        apbvt_set = 1;
3824        cm_id->add_ref(cm_id);
3825        cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
3826                                       conn_param->private_data_len,
3827                                       (void *)conn_param->private_data,
3828                                       &cm_info);
3829
3830        if (IS_ERR(cm_node)) {
3831                err = PTR_ERR(cm_node);
3832                goto err_out;
3833        }
3834
3835        i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
3836        if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
3837            !cm_node->ord_size)
3838                cm_node->ord_size = 1;
3839
3840        cm_node->apbvt_set = apbvt_set;
3841        cm_node->qhash_set = qhash_set;
3842        iwqp->cm_node = cm_node;
3843        cm_node->iwqp = iwqp;
3844        iwqp->cm_id = cm_id;
3845        i40iw_add_ref(&iwqp->ibqp);
3846
3847        if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
3848                cm_node->state = I40IW_CM_STATE_SYN_SENT;
3849                err = i40iw_send_syn(cm_node, 0);
3850                if (err) {
3851                        i40iw_rem_ref_cm_node(cm_node);
3852                        goto err_out;
3853                }
3854        }
3855
3856        i40iw_debug(cm_node->dev,
3857                    I40IW_DEBUG_CM,
3858                    "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
3859                    cm_node->rem_port,
3860                    cm_node,
3861                    cm_node->cm_id);
3862        return 0;
3863
3864err_out:
3865        if (cm_info.ipv4)
3866                i40iw_debug(&iwdev->sc_dev,
3867                            I40IW_DEBUG_CM,
3868                            "Api - connect() FAILED: dest addr=%pI4",
3869                            cm_info.rem_addr);
3870        else
3871                i40iw_debug(&iwdev->sc_dev,
3872                            I40IW_DEBUG_CM,
3873                            "Api - connect() FAILED: dest addr=%pI6",
3874                            cm_info.rem_addr);
3875
3876        if (qhash_set)
3877                i40iw_manage_qhash(iwdev,
3878                                   &cm_info,
3879                                   I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3880                                   I40IW_QHASH_MANAGE_TYPE_DELETE,
3881                                   NULL,
3882                                   false);
3883
3884        if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
3885                                                   cm_info.loc_port))
3886                i40iw_manage_apbvt(iwdev,
3887                                   cm_info.loc_port,
3888                                   I40IW_MANAGE_APBVT_DEL);
3889        cm_id->rem_ref(cm_id);
3890        iwdev->cm_core.stats_connect_errs++;
3891        return err;
3892}
3893
3894/**
3895 * i40iw_create_listen - registered call creating listener
3896 * @cm_id: cm information for passive connection
3897 * @backlog: to max accept pending count
3898 */
3899int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3900{
3901        struct i40iw_device *iwdev;
3902        struct i40iw_cm_listener *cm_listen_node;
3903        struct i40iw_cm_info cm_info;
3904        enum i40iw_status_code ret;
3905        struct sockaddr_in *laddr;
3906        struct sockaddr_in6 *laddr6;
3907        bool wildcard = false;
3908
3909        iwdev = to_iwdev(cm_id->device);
3910        if (!iwdev)
3911                return -EINVAL;
3912
3913        laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3914        laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3915        memset(&cm_info, 0, sizeof(cm_info));
3916        if (laddr->sin_family == AF_INET) {
3917                cm_info.ipv4 = true;
3918                cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
3919                cm_info.loc_port = ntohs(laddr->sin_port);
3920
3921                if (laddr->sin_addr.s_addr != INADDR_ANY)
3922                        cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
3923                else
3924                        wildcard = true;
3925
3926        } else {
3927                cm_info.ipv4 = false;
3928                i40iw_copy_ip_ntohl(cm_info.loc_addr,
3929                                    laddr6->sin6_addr.in6_u.u6_addr32);
3930                cm_info.loc_port = ntohs(laddr6->sin6_port);
3931                if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
3932                        i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
3933                                               &cm_info.vlan_id,
3934                                               NULL);
3935                else
3936                        wildcard = true;
3937        }
3938        cm_info.backlog = backlog;
3939        cm_info.cm_id = cm_id;
3940
3941        cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);
3942        if (!cm_listen_node) {
3943                i40iw_pr_err("cm_listen_node == NULL\n");
3944                return -ENOMEM;
3945        }
3946
3947        cm_id->provider_data = cm_listen_node;
3948
3949        cm_listen_node->tos = cm_id->tos;
3950        cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
3951        cm_info.user_pri = cm_listen_node->user_pri;
3952
3953        if (!cm_listen_node->reused_node) {
3954                if (wildcard) {
3955                        if (cm_info.ipv4)
3956                                ret = i40iw_add_mqh_4(iwdev,
3957                                                      &cm_info,
3958                                                      cm_listen_node);
3959                        else
3960                                ret = i40iw_add_mqh_6(iwdev,
3961                                                      &cm_info,
3962                                                      cm_listen_node);
3963                        if (ret)
3964                                goto error;
3965
3966                        ret = i40iw_manage_apbvt(iwdev,
3967                                                 cm_info.loc_port,
3968                                                 I40IW_MANAGE_APBVT_ADD);
3969
3970                        if (ret)
3971                                goto error;
3972                } else {
3973                        ret = i40iw_manage_qhash(iwdev,
3974                                                 &cm_info,
3975                                                 I40IW_QHASH_TYPE_TCP_SYN,
3976                                                 I40IW_QHASH_MANAGE_TYPE_ADD,
3977                                                 NULL,
3978                                                 true);
3979                        if (ret)
3980                                goto error;
3981                        cm_listen_node->qhash_set = true;
3982                        ret = i40iw_manage_apbvt(iwdev,
3983                                                 cm_info.loc_port,
3984                                                 I40IW_MANAGE_APBVT_ADD);
3985                        if (ret)
3986                                goto error;
3987                }
3988        }
3989        cm_id->add_ref(cm_id);
3990        cm_listen_node->cm_core->stats_listen_created++;
3991        return 0;
3992 error:
3993        i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);
3994        return -EINVAL;
3995}
3996
3997/**
3998 * i40iw_destroy_listen - registered call to destroy listener
3999 * @cm_id: cm information for passive connection
4000 */
4001int i40iw_destroy_listen(struct iw_cm_id *cm_id)
4002{
4003        struct i40iw_device *iwdev;
4004
4005        iwdev = to_iwdev(cm_id->device);
4006        if (cm_id->provider_data)
4007                i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);
4008        else
4009                i40iw_pr_err("cm_id->provider_data was NULL\n");
4010
4011        cm_id->rem_ref(cm_id);
4012
4013        return 0;
4014}
4015
4016/**
4017 * i40iw_cm_event_connected - handle connected active node
4018 * @event: the info for cm_node of connection
4019 */
4020static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
4021{
4022        struct i40iw_qp *iwqp;
4023        struct i40iw_device *iwdev;
4024        struct i40iw_cm_node *cm_node;
4025        struct i40iw_sc_dev *dev;
4026        struct ib_qp_attr attr;
4027        struct iw_cm_id *cm_id;
4028        int status;
4029        bool read0;
4030
4031        cm_node = event->cm_node;
4032        cm_id = cm_node->cm_id;
4033        iwqp = (struct i40iw_qp *)cm_id->provider_data;
4034        iwdev = to_iwdev(iwqp->ibqp.device);
4035        dev = &iwdev->sc_dev;
4036
4037        if (iwqp->destroyed) {
4038                status = -ETIMEDOUT;
4039                goto error;
4040        }
4041        i40iw_cm_init_tsa_conn(iwqp, cm_node);
4042        read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
4043        if (iwqp->page)
4044                iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
4045        dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
4046        if (iwqp->page)
4047                kunmap(iwqp->page);
4048        status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
4049        if (status)
4050                i40iw_pr_err("send cm event\n");
4051
4052        memset(&attr, 0, sizeof(attr));
4053        attr.qp_state = IB_QPS_RTS;
4054        cm_node->qhash_set = false;
4055        i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
4056
4057        cm_node->accelerated = 1;
4058        if (cm_node->accept_pend) {
4059                if (!cm_node->listener)
4060                        i40iw_pr_err("listener is null for passive node\n");
4061                atomic_dec(&cm_node->listener->pend_accepts_cnt);
4062                cm_node->accept_pend = 0;
4063        }
4064        return;
4065
4066error:
4067        iwqp->cm_id = NULL;
4068        cm_id->provider_data = NULL;
4069        i40iw_send_cm_event(event->cm_node,
4070                            cm_id,
4071                            IW_CM_EVENT_CONNECT_REPLY,
4072                            status);
4073        cm_id->rem_ref(cm_id);
4074        i40iw_rem_ref_cm_node(event->cm_node);
4075}
4076
4077/**
4078 * i40iw_cm_event_reset - handle reset
4079 * @event: the info for cm_node of connection
4080 */
4081static void i40iw_cm_event_reset(struct i40iw_cm_event *event)
4082{
4083        struct i40iw_cm_node *cm_node = event->cm_node;
4084        struct iw_cm_id   *cm_id = cm_node->cm_id;
4085        struct i40iw_qp *iwqp;
4086
4087        if (!cm_id)
4088                return;
4089
4090        iwqp = cm_id->provider_data;
4091        if (!iwqp)
4092                return;
4093
4094        i40iw_debug(cm_node->dev,
4095                    I40IW_DEBUG_CM,
4096                    "reset event %p - cm_id = %p\n",
4097                     event->cm_node, cm_id);
4098        iwqp->cm_id = NULL;
4099
4100        i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);
4101        i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
4102}
4103
4104/**
4105 * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer
4106 * @work: pointer of cm event info.
4107 */
4108static void i40iw_cm_event_handler(struct work_struct *work)
4109{
4110        struct i40iw_cm_event *event = container_of(work,
4111                                                    struct i40iw_cm_event,
4112                                                    event_work);
4113        struct i40iw_cm_node *cm_node;
4114
4115        if (!event || !event->cm_node || !event->cm_node->cm_core)
4116                return;
4117
4118        cm_node = event->cm_node;
4119
4120        switch (event->type) {
4121        case I40IW_CM_EVENT_MPA_REQ:
4122                i40iw_send_cm_event(cm_node,
4123                                    cm_node->cm_id,
4124                                    IW_CM_EVENT_CONNECT_REQUEST,
4125                                    0);
4126                break;
4127        case I40IW_CM_EVENT_RESET:
4128                i40iw_cm_event_reset(event);
4129                break;
4130        case I40IW_CM_EVENT_CONNECTED:
4131                if (!event->cm_node->cm_id ||
4132                    (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))
4133                        break;
4134                i40iw_cm_event_connected(event);
4135                break;
4136        case I40IW_CM_EVENT_MPA_REJECT:
4137                if (!event->cm_node->cm_id ||
4138                    (cm_node->state == I40IW_CM_STATE_OFFLOADED))
4139                        break;
4140                i40iw_send_cm_event(cm_node,
4141                                    cm_node->cm_id,
4142                                    IW_CM_EVENT_CONNECT_REPLY,
4143                                    -ECONNREFUSED);
4144                break;
4145        case I40IW_CM_EVENT_ABORTED:
4146                if (!event->cm_node->cm_id ||
4147                    (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))
4148                        break;
4149                i40iw_event_connect_error(event);
4150                break;
4151        default:
4152                i40iw_pr_err("event type = %d\n", event->type);
4153                break;
4154        }
4155
4156        event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
4157        i40iw_rem_ref_cm_node(event->cm_node);
4158        kfree(event);
4159}
4160
4161/**
4162 * i40iw_cm_post_event - queue event request for worker thread
4163 * @event: cm node's info for up event call
4164 */
4165static void i40iw_cm_post_event(struct i40iw_cm_event *event)
4166{
4167        atomic_inc(&event->cm_node->ref_count);
4168        event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
4169        INIT_WORK(&event->event_work, i40iw_cm_event_handler);
4170
4171        queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
4172}
4173
4174/**
4175 * i40iw_qhash_ctrl - enable/disable qhash for list
4176 * @iwdev: device pointer
4177 * @parent_listen_node: parent listen node
4178 * @nfo: cm info node
4179 * @ipaddr: Pointer to IPv4 or IPv6 address
4180 * @ipv4: flag indicating IPv4 when true
4181 * @ifup: flag indicating interface up when true
4182 *
4183 * Enables or disables the qhash for the node in the child
4184 * listen list that matches ipaddr. If no matching IP was found
4185 * it will allocate and add a new child listen node to the
4186 * parent listen node. The listen_list_lock is assumed to be
4187 * held when called.
4188 */
4189static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
4190                             struct i40iw_cm_listener *parent_listen_node,
4191                             struct i40iw_cm_info *nfo,
4192                             u32 *ipaddr, bool ipv4, bool ifup)
4193{
4194        struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
4195        struct i40iw_cm_listener *child_listen_node;
4196        struct list_head *pos, *tpos;
4197        enum i40iw_status_code ret;
4198        bool node_allocated = false;
4199        enum i40iw_quad_hash_manage_type op =
4200                ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
4201
4202        list_for_each_safe(pos, tpos, child_listen_list) {
4203                child_listen_node =
4204                        list_entry(pos,
4205                                   struct i40iw_cm_listener,
4206                                   child_listen_list);
4207                if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
4208                        goto set_qhash;
4209        }
4210
4211        /* if not found then add a child listener if interface is going up */
4212        if (!ifup)
4213                return;
4214        child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
4215        if (!child_listen_node)
4216                return;
4217        node_allocated = true;
4218        memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node));
4219
4220        memcpy(child_listen_node->loc_addr, ipaddr,  ipv4 ? 4 : 16);
4221
4222set_qhash:
4223        memcpy(nfo->loc_addr,
4224               child_listen_node->loc_addr,
4225               sizeof(nfo->loc_addr));
4226        nfo->vlan_id = child_listen_node->vlan_id;
4227        ret = i40iw_manage_qhash(iwdev, nfo,
4228                                 I40IW_QHASH_TYPE_TCP_SYN,
4229                                 op,
4230                                 NULL, false);
4231        if (!ret) {
4232                child_listen_node->qhash_set = ifup;
4233                if (node_allocated)
4234                        list_add(&child_listen_node->child_listen_list,
4235                                 &parent_listen_node->child_listen_list);
4236        } else if (node_allocated) {
4237                kfree(child_listen_node);
4238        }
4239}
4240
4241/**
4242 * i40iw_cm_disconnect_all - disconnect all connected qp's
4243 * @iwdev: device pointer
4244 */
4245void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
4246{
4247        struct i40iw_cm_core *cm_core = &iwdev->cm_core;
4248        struct list_head *list_core_temp;
4249        struct list_head *list_node;
4250        struct i40iw_cm_node *cm_node;
4251        unsigned long flags;
4252        struct list_head connected_list;
4253        struct ib_qp_attr attr;
4254
4255        INIT_LIST_HEAD(&connected_list);
4256        spin_lock_irqsave(&cm_core->ht_lock, flags);
4257        list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
4258                cm_node = container_of(list_node, struct i40iw_cm_node, list);
4259                atomic_inc(&cm_node->ref_count);
4260                list_add(&cm_node->connected_entry, &connected_list);
4261        }
4262        spin_unlock_irqrestore(&cm_core->ht_lock, flags);
4263
4264        list_for_each_safe(list_node, list_core_temp, &connected_list) {
4265                cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
4266                attr.qp_state = IB_QPS_ERR;
4267                i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
4268                i40iw_rem_ref_cm_node(cm_node);
4269        }
4270}
4271
4272/**
4273 * i40iw_ifdown_notify - process an ifdown on an interface
4274 * @iwdev: device pointer
4275 * @ipaddr: Pointer to IPv4 or IPv6 address
4276 * @ipv4: flag indicating IPv4 when true
4277 * @ifup: flag indicating interface up when true
4278 */
4279void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
4280                     u32 *ipaddr, bool ipv4, bool ifup)
4281{
4282        struct i40iw_cm_core *cm_core = &iwdev->cm_core;
4283        unsigned long flags;
4284        struct i40iw_cm_listener *listen_node;
4285        static const u32 ip_zero[4] = { 0, 0, 0, 0 };
4286        struct i40iw_cm_info nfo;
4287        u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
4288        enum i40iw_status_code ret;
4289        enum i40iw_quad_hash_manage_type op =
4290                ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
4291
4292        /* Disable or enable qhash for listeners */
4293        spin_lock_irqsave(&cm_core->listen_list_lock, flags);
4294        list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
4295                if (vlan_id == listen_node->vlan_id &&
4296                    (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
4297                    !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
4298                        memcpy(nfo.loc_addr, listen_node->loc_addr,
4299                               sizeof(nfo.loc_addr));
4300                        nfo.loc_port = listen_node->loc_port;
4301                        nfo.ipv4 = listen_node->ipv4;
4302                        nfo.vlan_id = listen_node->vlan_id;
4303                        nfo.user_pri = listen_node->user_pri;
4304                        if (!list_empty(&listen_node->child_listen_list)) {
4305                                i40iw_qhash_ctrl(iwdev,
4306                                                 listen_node,
4307                                                 &nfo,
4308                                                 ipaddr, ipv4, ifup);
4309                        } else if (memcmp(listen_node->loc_addr, ip_zero,
4310                                          ipv4 ? 4 : 16)) {
4311                                ret = i40iw_manage_qhash(iwdev,
4312                                                         &nfo,
4313                                                         I40IW_QHASH_TYPE_TCP_SYN,
4314                                                         op,
4315                                                         NULL,
4316                                                         false);
4317                                if (!ret)
4318                                        listen_node->qhash_set = ifup;
4319                        }
4320                }
4321        }
4322        spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
4323
4324        /* disconnect any connected qp's on ifdown */
4325        if (!ifup)
4326                i40iw_cm_disconnect_all(iwdev);
4327}
4328