linux/net/smc/smc_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Shared Memory Communications over RDMA (SMC-R) and RoCE
   4 *
   5 *  Basic Transport Functions exploiting Infiniband API
   6 *
   7 *  Copyright IBM Corp. 2016
   8 *
   9 *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
  10 */
  11
  12#include <linux/socket.h>
  13#include <linux/if_vlan.h>
  14#include <linux/random.h>
  15#include <linux/workqueue.h>
  16#include <linux/wait.h>
  17#include <linux/reboot.h>
  18#include <linux/mutex.h>
  19#include <linux/list.h>
  20#include <linux/smc.h>
  21#include <net/tcp.h>
  22#include <net/sock.h>
  23#include <rdma/ib_verbs.h>
  24#include <rdma/ib_cache.h>
  25
  26#include "smc.h"
  27#include "smc_clc.h"
  28#include "smc_core.h"
  29#include "smc_ib.h"
  30#include "smc_wr.h"
  31#include "smc_llc.h"
  32#include "smc_cdc.h"
  33#include "smc_close.h"
  34#include "smc_ism.h"
  35#include "smc_netlink.h"
  36#include "smc_stats.h"
  37
  38#define SMC_LGR_NUM_INCR                256
  39#define SMC_LGR_FREE_DELAY_SERV         (600 * HZ)
  40#define SMC_LGR_FREE_DELAY_CLNT         (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
  41
  42struct smc_lgr_list smc_lgr_list = {    /* established link groups */
  43        .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
  44        .list = LIST_HEAD_INIT(smc_lgr_list.list),
  45        .num = 0,
  46};
  47
  48static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
  49static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
  50
  51static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
  52                         struct smc_buf_desc *buf_desc);
  53static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
  54
  55static void smc_link_down_work(struct work_struct *work);
  56
  57/* return head of link group list and its lock for a given link group */
  58static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
  59                                                  spinlock_t **lgr_lock)
  60{
  61        if (lgr->is_smcd) {
  62                *lgr_lock = &lgr->smcd->lgr_lock;
  63                return &lgr->smcd->lgr_list;
  64        }
  65
  66        *lgr_lock = &smc_lgr_list.lock;
  67        return &smc_lgr_list.list;
  68}
  69
  70static void smc_ibdev_cnt_inc(struct smc_link *lnk)
  71{
  72        atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
  73}
  74
  75static void smc_ibdev_cnt_dec(struct smc_link *lnk)
  76{
  77        atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
  78}
  79
  80static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
  81{
  82        /* client link group creation always follows the server link group
  83         * creation. For client use a somewhat higher removal delay time,
  84         * otherwise there is a risk of out-of-sync link groups.
  85         */
  86        if (!lgr->freeing) {
  87                mod_delayed_work(system_wq, &lgr->free_work,
  88                                 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
  89                                                SMC_LGR_FREE_DELAY_CLNT :
  90                                                SMC_LGR_FREE_DELAY_SERV);
  91        }
  92}
  93
  94/* Register connection's alert token in our lookup structure.
  95 * To use rbtrees we have to implement our own insert core.
  96 * Requires @conns_lock
  97 * @smc         connection to register
  98 * Returns 0 on success, != otherwise.
  99 */
 100static void smc_lgr_add_alert_token(struct smc_connection *conn)
 101{
 102        struct rb_node **link, *parent = NULL;
 103        u32 token = conn->alert_token_local;
 104
 105        link = &conn->lgr->conns_all.rb_node;
 106        while (*link) {
 107                struct smc_connection *cur = rb_entry(*link,
 108                                        struct smc_connection, alert_node);
 109
 110                parent = *link;
 111                if (cur->alert_token_local > token)
 112                        link = &parent->rb_left;
 113                else
 114                        link = &parent->rb_right;
 115        }
 116        /* Put the new node there */
 117        rb_link_node(&conn->alert_node, parent, link);
 118        rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
 119}
 120
 121/* assign an SMC-R link to the connection */
 122static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
 123{
 124        enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
 125                                       SMC_LNK_ACTIVE;
 126        int i, j;
 127
 128        /* do link balancing */
 129        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 130                struct smc_link *lnk = &conn->lgr->lnk[i];
 131
 132                if (lnk->state != expected || lnk->link_is_asym)
 133                        continue;
 134                if (conn->lgr->role == SMC_CLNT) {
 135                        conn->lnk = lnk; /* temporary, SMC server assigns link*/
 136                        break;
 137                }
 138                if (conn->lgr->conns_num % 2) {
 139                        for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
 140                                struct smc_link *lnk2;
 141
 142                                lnk2 = &conn->lgr->lnk[j];
 143                                if (lnk2->state == expected &&
 144                                    !lnk2->link_is_asym) {
 145                                        conn->lnk = lnk2;
 146                                        break;
 147                                }
 148                        }
 149                }
 150                if (!conn->lnk)
 151                        conn->lnk = lnk;
 152                break;
 153        }
 154        if (!conn->lnk)
 155                return SMC_CLC_DECL_NOACTLINK;
 156        atomic_inc(&conn->lnk->conn_cnt);
 157        return 0;
 158}
 159
 160/* Register connection in link group by assigning an alert token
 161 * registered in a search tree.
 162 * Requires @conns_lock
 163 * Note that '0' is a reserved value and not assigned.
 164 */
 165static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
 166{
 167        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
 168        static atomic_t nexttoken = ATOMIC_INIT(0);
 169        int rc;
 170
 171        if (!conn->lgr->is_smcd) {
 172                rc = smcr_lgr_conn_assign_link(conn, first);
 173                if (rc)
 174                        return rc;
 175        }
 176        /* find a new alert_token_local value not yet used by some connection
 177         * in this link group
 178         */
 179        sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
 180        while (!conn->alert_token_local) {
 181                conn->alert_token_local = atomic_inc_return(&nexttoken);
 182                if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
 183                        conn->alert_token_local = 0;
 184        }
 185        smc_lgr_add_alert_token(conn);
 186        conn->lgr->conns_num++;
 187        return 0;
 188}
 189
 190/* Unregister connection and reset the alert token of the given connection<
 191 */
 192static void __smc_lgr_unregister_conn(struct smc_connection *conn)
 193{
 194        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
 195        struct smc_link_group *lgr = conn->lgr;
 196
 197        rb_erase(&conn->alert_node, &lgr->conns_all);
 198        if (conn->lnk)
 199                atomic_dec(&conn->lnk->conn_cnt);
 200        lgr->conns_num--;
 201        conn->alert_token_local = 0;
 202        sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
 203}
 204
 205/* Unregister connection from lgr
 206 */
 207static void smc_lgr_unregister_conn(struct smc_connection *conn)
 208{
 209        struct smc_link_group *lgr = conn->lgr;
 210
 211        if (!lgr)
 212                return;
 213        write_lock_bh(&lgr->conns_lock);
 214        if (conn->alert_token_local) {
 215                __smc_lgr_unregister_conn(conn);
 216        }
 217        write_unlock_bh(&lgr->conns_lock);
 218        conn->lgr = NULL;
 219}
 220
 221int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
 222{
 223        struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
 224        char hostname[SMC_MAX_HOSTNAME_LEN + 1];
 225        char smc_seid[SMC_MAX_EID_LEN + 1];
 226        struct smcd_dev *smcd_dev;
 227        struct nlattr *attrs;
 228        u8 *seid = NULL;
 229        u8 *host = NULL;
 230        void *nlh;
 231
 232        nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 233                          &smc_gen_nl_family, NLM_F_MULTI,
 234                          SMC_NETLINK_GET_SYS_INFO);
 235        if (!nlh)
 236                goto errmsg;
 237        if (cb_ctx->pos[0])
 238                goto errout;
 239        attrs = nla_nest_start(skb, SMC_GEN_SYS_INFO);
 240        if (!attrs)
 241                goto errout;
 242        if (nla_put_u8(skb, SMC_NLA_SYS_VER, SMC_V2))
 243                goto errattr;
 244        if (nla_put_u8(skb, SMC_NLA_SYS_REL, SMC_RELEASE))
 245                goto errattr;
 246        if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable()))
 247                goto errattr;
 248        smc_clc_get_hostname(&host);
 249        if (host) {
 250                memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN);
 251                hostname[SMC_MAX_HOSTNAME_LEN] = 0;
 252                if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
 253                        goto errattr;
 254        }
 255        mutex_lock(&smcd_dev_list.mutex);
 256        smcd_dev = list_first_entry_or_null(&smcd_dev_list.list,
 257                                            struct smcd_dev, list);
 258        if (smcd_dev)
 259                smc_ism_get_system_eid(smcd_dev, &seid);
 260        mutex_unlock(&smcd_dev_list.mutex);
 261        if (seid && smc_ism_is_v2_capable()) {
 262                memcpy(smc_seid, seid, SMC_MAX_EID_LEN);
 263                smc_seid[SMC_MAX_EID_LEN] = 0;
 264                if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
 265                        goto errattr;
 266        }
 267        nla_nest_end(skb, attrs);
 268        genlmsg_end(skb, nlh);
 269        cb_ctx->pos[0] = 1;
 270        return skb->len;
 271
 272errattr:
 273        nla_nest_cancel(skb, attrs);
 274errout:
 275        genlmsg_cancel(skb, nlh);
 276errmsg:
 277        return skb->len;
 278}
 279
 280static int smc_nl_fill_lgr(struct smc_link_group *lgr,
 281                           struct sk_buff *skb,
 282                           struct netlink_callback *cb)
 283{
 284        char smc_target[SMC_MAX_PNETID_LEN + 1];
 285        struct nlattr *attrs;
 286
 287        attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR);
 288        if (!attrs)
 289                goto errout;
 290
 291        if (nla_put_u32(skb, SMC_NLA_LGR_R_ID, *((u32 *)&lgr->id)))
 292                goto errattr;
 293        if (nla_put_u32(skb, SMC_NLA_LGR_R_CONNS_NUM, lgr->conns_num))
 294                goto errattr;
 295        if (nla_put_u8(skb, SMC_NLA_LGR_R_ROLE, lgr->role))
 296                goto errattr;
 297        if (nla_put_u8(skb, SMC_NLA_LGR_R_TYPE, lgr->type))
 298                goto errattr;
 299        if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
 300                goto errattr;
 301        memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN);
 302        smc_target[SMC_MAX_PNETID_LEN] = 0;
 303        if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
 304                goto errattr;
 305
 306        nla_nest_end(skb, attrs);
 307        return 0;
 308errattr:
 309        nla_nest_cancel(skb, attrs);
 310errout:
 311        return -EMSGSIZE;
 312}
 313
 314static int smc_nl_fill_lgr_link(struct smc_link_group *lgr,
 315                                struct smc_link *link,
 316                                struct sk_buff *skb,
 317                                struct netlink_callback *cb)
 318{
 319        char smc_ibname[IB_DEVICE_NAME_MAX];
 320        u8 smc_gid_target[41];
 321        struct nlattr *attrs;
 322        u32 link_uid = 0;
 323        void *nlh;
 324
 325        nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 326                          &smc_gen_nl_family, NLM_F_MULTI,
 327                          SMC_NETLINK_GET_LINK_SMCR);
 328        if (!nlh)
 329                goto errmsg;
 330
 331        attrs = nla_nest_start(skb, SMC_GEN_LINK_SMCR);
 332        if (!attrs)
 333                goto errout;
 334
 335        if (nla_put_u8(skb, SMC_NLA_LINK_ID, link->link_id))
 336                goto errattr;
 337        if (nla_put_u32(skb, SMC_NLA_LINK_STATE, link->state))
 338                goto errattr;
 339        if (nla_put_u32(skb, SMC_NLA_LINK_CONN_CNT,
 340                        atomic_read(&link->conn_cnt)))
 341                goto errattr;
 342        if (nla_put_u8(skb, SMC_NLA_LINK_IB_PORT, link->ibport))
 343                goto errattr;
 344        if (nla_put_u32(skb, SMC_NLA_LINK_NET_DEV, link->ndev_ifidx))
 345                goto errattr;
 346        snprintf(smc_ibname, sizeof(smc_ibname), "%s", link->ibname);
 347        if (nla_put_string(skb, SMC_NLA_LINK_IB_DEV, smc_ibname))
 348                goto errattr;
 349        memcpy(&link_uid, link->link_uid, sizeof(link_uid));
 350        if (nla_put_u32(skb, SMC_NLA_LINK_UID, link_uid))
 351                goto errattr;
 352        memcpy(&link_uid, link->peer_link_uid, sizeof(link_uid));
 353        if (nla_put_u32(skb, SMC_NLA_LINK_PEER_UID, link_uid))
 354                goto errattr;
 355        memset(smc_gid_target, 0, sizeof(smc_gid_target));
 356        smc_gid_be16_convert(smc_gid_target, link->gid);
 357        if (nla_put_string(skb, SMC_NLA_LINK_GID, smc_gid_target))
 358                goto errattr;
 359        memset(smc_gid_target, 0, sizeof(smc_gid_target));
 360        smc_gid_be16_convert(smc_gid_target, link->peer_gid);
 361        if (nla_put_string(skb, SMC_NLA_LINK_PEER_GID, smc_gid_target))
 362                goto errattr;
 363
 364        nla_nest_end(skb, attrs);
 365        genlmsg_end(skb, nlh);
 366        return 0;
 367errattr:
 368        nla_nest_cancel(skb, attrs);
 369errout:
 370        genlmsg_cancel(skb, nlh);
 371errmsg:
 372        return -EMSGSIZE;
 373}
 374
 375static int smc_nl_handle_lgr(struct smc_link_group *lgr,
 376                             struct sk_buff *skb,
 377                             struct netlink_callback *cb,
 378                             bool list_links)
 379{
 380        void *nlh;
 381        int i;
 382
 383        nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 384                          &smc_gen_nl_family, NLM_F_MULTI,
 385                          SMC_NETLINK_GET_LGR_SMCR);
 386        if (!nlh)
 387                goto errmsg;
 388        if (smc_nl_fill_lgr(lgr, skb, cb))
 389                goto errout;
 390
 391        genlmsg_end(skb, nlh);
 392        if (!list_links)
 393                goto out;
 394        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 395                if (!smc_link_usable(&lgr->lnk[i]))
 396                        continue;
 397                if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb))
 398                        goto errout;
 399        }
 400out:
 401        return 0;
 402
 403errout:
 404        genlmsg_cancel(skb, nlh);
 405errmsg:
 406        return -EMSGSIZE;
 407}
 408
 409static void smc_nl_fill_lgr_list(struct smc_lgr_list *smc_lgr,
 410                                 struct sk_buff *skb,
 411                                 struct netlink_callback *cb,
 412                                 bool list_links)
 413{
 414        struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
 415        struct smc_link_group *lgr;
 416        int snum = cb_ctx->pos[0];
 417        int num = 0;
 418
 419        spin_lock_bh(&smc_lgr->lock);
 420        list_for_each_entry(lgr, &smc_lgr->list, list) {
 421                if (num < snum)
 422                        goto next;
 423                if (smc_nl_handle_lgr(lgr, skb, cb, list_links))
 424                        goto errout;
 425next:
 426                num++;
 427        }
 428errout:
 429        spin_unlock_bh(&smc_lgr->lock);
 430        cb_ctx->pos[0] = num;
 431}
 432
 433static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
 434                                struct sk_buff *skb,
 435                                struct netlink_callback *cb)
 436{
 437        char smc_host[SMC_MAX_HOSTNAME_LEN + 1];
 438        char smc_pnet[SMC_MAX_PNETID_LEN + 1];
 439        char smc_eid[SMC_MAX_EID_LEN + 1];
 440        struct nlattr *v2_attrs;
 441        struct nlattr *attrs;
 442        void *nlh;
 443
 444        nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
 445                          &smc_gen_nl_family, NLM_F_MULTI,
 446                          SMC_NETLINK_GET_LGR_SMCD);
 447        if (!nlh)
 448                goto errmsg;
 449
 450        attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCD);
 451        if (!attrs)
 452                goto errout;
 453
 454        if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id)))
 455                goto errattr;
 456        if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID, lgr->smcd->local_gid,
 457                              SMC_NLA_LGR_D_PAD))
 458                goto errattr;
 459        if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_GID, lgr->peer_gid,
 460                              SMC_NLA_LGR_D_PAD))
 461                goto errattr;
 462        if (nla_put_u8(skb, SMC_NLA_LGR_D_VLAN_ID, lgr->vlan_id))
 463                goto errattr;
 464        if (nla_put_u32(skb, SMC_NLA_LGR_D_CONNS_NUM, lgr->conns_num))
 465                goto errattr;
 466        if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
 467                goto errattr;
 468        memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN);
 469        smc_pnet[SMC_MAX_PNETID_LEN] = 0;
 470        if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
 471                goto errattr;
 472
 473        v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_V2);
 474        if (!v2_attrs)
 475                goto errattr;
 476        if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version))
 477                goto errv2attr;
 478        if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release))
 479                goto errv2attr;
 480        if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
 481                goto errv2attr;
 482        memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
 483        smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
 484        if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
 485                goto errv2attr;
 486        memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
 487        smc_eid[SMC_MAX_EID_LEN] = 0;
 488        if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
 489                goto errv2attr;
 490
 491        nla_nest_end(skb, v2_attrs);
 492        nla_nest_end(skb, attrs);
 493        genlmsg_end(skb, nlh);
 494        return 0;
 495
 496errv2attr:
 497        nla_nest_cancel(skb, v2_attrs);
 498errattr:
 499        nla_nest_cancel(skb, attrs);
 500errout:
 501        genlmsg_cancel(skb, nlh);
 502errmsg:
 503        return -EMSGSIZE;
 504}
 505
 506static int smc_nl_handle_smcd_lgr(struct smcd_dev *dev,
 507                                  struct sk_buff *skb,
 508                                  struct netlink_callback *cb)
 509{
 510        struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
 511        struct smc_link_group *lgr;
 512        int snum = cb_ctx->pos[1];
 513        int rc = 0, num = 0;
 514
 515        spin_lock_bh(&dev->lgr_lock);
 516        list_for_each_entry(lgr, &dev->lgr_list, list) {
 517                if (!lgr->is_smcd)
 518                        continue;
 519                if (num < snum)
 520                        goto next;
 521                rc = smc_nl_fill_smcd_lgr(lgr, skb, cb);
 522                if (rc)
 523                        goto errout;
 524next:
 525                num++;
 526        }
 527errout:
 528        spin_unlock_bh(&dev->lgr_lock);
 529        cb_ctx->pos[1] = num;
 530        return rc;
 531}
 532
 533static int smc_nl_fill_smcd_dev(struct smcd_dev_list *dev_list,
 534                                struct sk_buff *skb,
 535                                struct netlink_callback *cb)
 536{
 537        struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
 538        struct smcd_dev *smcd_dev;
 539        int snum = cb_ctx->pos[0];
 540        int rc = 0, num = 0;
 541
 542        mutex_lock(&dev_list->mutex);
 543        list_for_each_entry(smcd_dev, &dev_list->list, list) {
 544                if (list_empty(&smcd_dev->lgr_list))
 545                        continue;
 546                if (num < snum)
 547                        goto next;
 548                rc = smc_nl_handle_smcd_lgr(smcd_dev, skb, cb);
 549                if (rc)
 550                        goto errout;
 551next:
 552                num++;
 553        }
 554errout:
 555        mutex_unlock(&dev_list->mutex);
 556        cb_ctx->pos[0] = num;
 557        return rc;
 558}
 559
 560int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
 561{
 562        bool list_links = false;
 563
 564        smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
 565        return skb->len;
 566}
 567
 568int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb)
 569{
 570        bool list_links = true;
 571
 572        smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
 573        return skb->len;
 574}
 575
 576int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
 577{
 578        smc_nl_fill_smcd_dev(&smcd_dev_list, skb, cb);
 579        return skb->len;
 580}
 581
 582void smc_lgr_cleanup_early(struct smc_connection *conn)
 583{
 584        struct smc_link_group *lgr = conn->lgr;
 585        struct list_head *lgr_list;
 586        spinlock_t *lgr_lock;
 587
 588        if (!lgr)
 589                return;
 590
 591        smc_conn_free(conn);
 592        lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
 593        spin_lock_bh(lgr_lock);
 594        /* do not use this link group for new connections */
 595        if (!list_empty(lgr_list))
 596                list_del_init(lgr_list);
 597        spin_unlock_bh(lgr_lock);
 598        __smc_lgr_terminate(lgr, true);
 599}
 600
 601static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
 602{
 603        int i;
 604
 605        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 606                struct smc_link *lnk = &lgr->lnk[i];
 607
 608                if (smc_link_usable(lnk))
 609                        lnk->state = SMC_LNK_INACTIVE;
 610        }
 611        wake_up_all(&lgr->llc_msg_waiter);
 612        wake_up_all(&lgr->llc_flow_waiter);
 613}
 614
 615static void smc_lgr_free(struct smc_link_group *lgr);
 616
 617static void smc_lgr_free_work(struct work_struct *work)
 618{
 619        struct smc_link_group *lgr = container_of(to_delayed_work(work),
 620                                                  struct smc_link_group,
 621                                                  free_work);
 622        spinlock_t *lgr_lock;
 623        bool conns;
 624
 625        smc_lgr_list_head(lgr, &lgr_lock);
 626        spin_lock_bh(lgr_lock);
 627        if (lgr->freeing) {
 628                spin_unlock_bh(lgr_lock);
 629                return;
 630        }
 631        read_lock_bh(&lgr->conns_lock);
 632        conns = RB_EMPTY_ROOT(&lgr->conns_all);
 633        read_unlock_bh(&lgr->conns_lock);
 634        if (!conns) { /* number of lgr connections is no longer zero */
 635                spin_unlock_bh(lgr_lock);
 636                return;
 637        }
 638        list_del_init(&lgr->list); /* remove from smc_lgr_list */
 639        lgr->freeing = 1; /* this instance does the freeing, no new schedule */
 640        spin_unlock_bh(lgr_lock);
 641        cancel_delayed_work(&lgr->free_work);
 642
 643        if (!lgr->is_smcd && !lgr->terminating)
 644                smc_llc_send_link_delete_all(lgr, true,
 645                                             SMC_LLC_DEL_PROG_INIT_TERM);
 646        if (lgr->is_smcd && !lgr->terminating)
 647                smc_ism_signal_shutdown(lgr);
 648        if (!lgr->is_smcd)
 649                smcr_lgr_link_deactivate_all(lgr);
 650        smc_lgr_free(lgr);
 651}
 652
 653static void smc_lgr_terminate_work(struct work_struct *work)
 654{
 655        struct smc_link_group *lgr = container_of(work, struct smc_link_group,
 656                                                  terminate_work);
 657
 658        __smc_lgr_terminate(lgr, true);
 659}
 660
 661/* return next unique link id for the lgr */
 662static u8 smcr_next_link_id(struct smc_link_group *lgr)
 663{
 664        u8 link_id;
 665        int i;
 666
 667        while (1) {
 668                link_id = ++lgr->next_link_id;
 669                if (!link_id)   /* skip zero as link_id */
 670                        link_id = ++lgr->next_link_id;
 671                for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 672                        if (smc_link_usable(&lgr->lnk[i]) &&
 673                            lgr->lnk[i].link_id == link_id)
 674                                continue;
 675                }
 676                break;
 677        }
 678        return link_id;
 679}
 680
 681static void smcr_copy_dev_info_to_link(struct smc_link *link)
 682{
 683        struct smc_ib_device *smcibdev = link->smcibdev;
 684
 685        snprintf(link->ibname, sizeof(link->ibname), "%s",
 686                 smcibdev->ibdev->name);
 687        link->ndev_ifidx = smcibdev->ndev_ifidx[link->ibport - 1];
 688}
 689
 690int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
 691                   u8 link_idx, struct smc_init_info *ini)
 692{
 693        u8 rndvec[3];
 694        int rc;
 695
 696        get_device(&ini->ib_dev->ibdev->dev);
 697        atomic_inc(&ini->ib_dev->lnk_cnt);
 698        lnk->link_id = smcr_next_link_id(lgr);
 699        lnk->lgr = lgr;
 700        lnk->link_idx = link_idx;
 701        lnk->smcibdev = ini->ib_dev;
 702        lnk->ibport = ini->ib_port;
 703        smc_ibdev_cnt_inc(lnk);
 704        smcr_copy_dev_info_to_link(lnk);
 705        lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
 706        atomic_set(&lnk->conn_cnt, 0);
 707        smc_llc_link_set_uid(lnk);
 708        INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
 709        if (!ini->ib_dev->initialized) {
 710                rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev);
 711                if (rc)
 712                        goto out;
 713        }
 714        get_random_bytes(rndvec, sizeof(rndvec));
 715        lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
 716                (rndvec[2] << 16);
 717        rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
 718                                  ini->vlan_id, lnk->gid, &lnk->sgid_index);
 719        if (rc)
 720                goto out;
 721        rc = smc_llc_link_init(lnk);
 722        if (rc)
 723                goto out;
 724        rc = smc_wr_alloc_link_mem(lnk);
 725        if (rc)
 726                goto clear_llc_lnk;
 727        rc = smc_ib_create_protection_domain(lnk);
 728        if (rc)
 729                goto free_link_mem;
 730        rc = smc_ib_create_queue_pair(lnk);
 731        if (rc)
 732                goto dealloc_pd;
 733        rc = smc_wr_create_link(lnk);
 734        if (rc)
 735                goto destroy_qp;
 736        lnk->state = SMC_LNK_ACTIVATING;
 737        return 0;
 738
 739destroy_qp:
 740        smc_ib_destroy_queue_pair(lnk);
 741dealloc_pd:
 742        smc_ib_dealloc_protection_domain(lnk);
 743free_link_mem:
 744        smc_wr_free_link_mem(lnk);
 745clear_llc_lnk:
 746        smc_llc_link_clear(lnk, false);
 747out:
 748        smc_ibdev_cnt_dec(lnk);
 749        put_device(&ini->ib_dev->ibdev->dev);
 750        memset(lnk, 0, sizeof(struct smc_link));
 751        lnk->state = SMC_LNK_UNUSED;
 752        if (!atomic_dec_return(&ini->ib_dev->lnk_cnt))
 753                wake_up(&ini->ib_dev->lnks_deleted);
 754        return rc;
 755}
 756
 757/* create a new SMC link group */
 758static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
 759{
 760        struct smc_link_group *lgr;
 761        struct list_head *lgr_list;
 762        struct smc_link *lnk;
 763        spinlock_t *lgr_lock;
 764        u8 link_idx;
 765        int rc = 0;
 766        int i;
 767
 768        if (ini->is_smcd && ini->vlan_id) {
 769                if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
 770                                     ini->vlan_id)) {
 771                        rc = SMC_CLC_DECL_ISMVLANERR;
 772                        goto out;
 773                }
 774        }
 775
 776        lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
 777        if (!lgr) {
 778                rc = SMC_CLC_DECL_MEM;
 779                goto ism_put_vlan;
 780        }
 781        lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
 782                                     SMC_LGR_ID_SIZE, &lgr->id);
 783        if (!lgr->tx_wq) {
 784                rc = -ENOMEM;
 785                goto free_lgr;
 786        }
 787        lgr->is_smcd = ini->is_smcd;
 788        lgr->sync_err = 0;
 789        lgr->terminating = 0;
 790        lgr->freeing = 0;
 791        lgr->vlan_id = ini->vlan_id;
 792        mutex_init(&lgr->sndbufs_lock);
 793        mutex_init(&lgr->rmbs_lock);
 794        rwlock_init(&lgr->conns_lock);
 795        for (i = 0; i < SMC_RMBE_SIZES; i++) {
 796                INIT_LIST_HEAD(&lgr->sndbufs[i]);
 797                INIT_LIST_HEAD(&lgr->rmbs[i]);
 798        }
 799        lgr->next_link_id = 0;
 800        smc_lgr_list.num += SMC_LGR_NUM_INCR;
 801        memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
 802        INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
 803        INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
 804        lgr->conns_all = RB_ROOT;
 805        if (ini->is_smcd) {
 806                /* SMC-D specific settings */
 807                get_device(&ini->ism_dev[ini->ism_selected]->dev);
 808                lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected];
 809                lgr->smcd = ini->ism_dev[ini->ism_selected];
 810                lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
 811                lgr_lock = &lgr->smcd->lgr_lock;
 812                lgr->smc_version = ini->smcd_version;
 813                lgr->peer_shutdown = 0;
 814                atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
 815        } else {
 816                /* SMC-R specific settings */
 817                lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
 818                memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
 819                       SMC_SYSTEMID_LEN);
 820                memcpy(lgr->pnet_id, ini->ib_dev->pnetid[ini->ib_port - 1],
 821                       SMC_MAX_PNETID_LEN);
 822                smc_llc_lgr_init(lgr, smc);
 823
 824                link_idx = SMC_SINGLE_LINK;
 825                lnk = &lgr->lnk[link_idx];
 826                rc = smcr_link_init(lgr, lnk, link_idx, ini);
 827                if (rc)
 828                        goto free_wq;
 829                lgr_list = &smc_lgr_list.list;
 830                lgr_lock = &smc_lgr_list.lock;
 831                atomic_inc(&lgr_cnt);
 832        }
 833        smc->conn.lgr = lgr;
 834        spin_lock_bh(lgr_lock);
 835        list_add_tail(&lgr->list, lgr_list);
 836        spin_unlock_bh(lgr_lock);
 837        return 0;
 838
 839free_wq:
 840        destroy_workqueue(lgr->tx_wq);
 841free_lgr:
 842        kfree(lgr);
 843ism_put_vlan:
 844        if (ini->is_smcd && ini->vlan_id)
 845                smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
 846out:
 847        if (rc < 0) {
 848                if (rc == -ENOMEM)
 849                        rc = SMC_CLC_DECL_MEM;
 850                else
 851                        rc = SMC_CLC_DECL_INTERR;
 852        }
 853        return rc;
 854}
 855
 856static int smc_write_space(struct smc_connection *conn)
 857{
 858        int buffer_len = conn->peer_rmbe_size;
 859        union smc_host_cursor prod;
 860        union smc_host_cursor cons;
 861        int space;
 862
 863        smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
 864        smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
 865        /* determine rx_buf space */
 866        space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
 867        return space;
 868}
 869
 870static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
 871                             struct smc_wr_buf *wr_buf)
 872{
 873        struct smc_connection *conn = &smc->conn;
 874        union smc_host_cursor cons, fin;
 875        int rc = 0;
 876        int diff;
 877
 878        smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
 879        smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
 880        /* set prod cursor to old state, enforce tx_rdma_writes() */
 881        smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
 882        smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
 883
 884        if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
 885                /* cons cursor advanced more than fin, and prod was set
 886                 * fin above, so now prod is smaller than cons. Fix that.
 887                 */
 888                diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
 889                smc_curs_add(conn->sndbuf_desc->len,
 890                             &conn->tx_curs_sent, diff);
 891                smc_curs_add(conn->sndbuf_desc->len,
 892                             &conn->tx_curs_fin, diff);
 893
 894                smp_mb__before_atomic();
 895                atomic_add(diff, &conn->sndbuf_space);
 896                smp_mb__after_atomic();
 897
 898                smc_curs_add(conn->peer_rmbe_size,
 899                             &conn->local_tx_ctrl.prod, diff);
 900                smc_curs_add(conn->peer_rmbe_size,
 901                             &conn->local_tx_ctrl_fin, diff);
 902        }
 903        /* recalculate, value is used by tx_rdma_writes() */
 904        atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
 905
 906        if (smc->sk.sk_state != SMC_INIT &&
 907            smc->sk.sk_state != SMC_CLOSED) {
 908                rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
 909                if (!rc) {
 910                        queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
 911                        smc->sk.sk_data_ready(&smc->sk);
 912                }
 913        } else {
 914                smc_wr_tx_put_slot(conn->lnk,
 915                                   (struct smc_wr_tx_pend_priv *)pend);
 916        }
 917        return rc;
 918}
 919
 920void smc_switch_link_and_count(struct smc_connection *conn,
 921                               struct smc_link *to_lnk)
 922{
 923        atomic_dec(&conn->lnk->conn_cnt);
 924        conn->lnk = to_lnk;
 925        atomic_inc(&conn->lnk->conn_cnt);
 926}
 927
 928struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
 929                                  struct smc_link *from_lnk, bool is_dev_err)
 930{
 931        struct smc_link *to_lnk = NULL;
 932        struct smc_cdc_tx_pend *pend;
 933        struct smc_connection *conn;
 934        struct smc_wr_buf *wr_buf;
 935        struct smc_sock *smc;
 936        struct rb_node *node;
 937        int i, rc = 0;
 938
 939        /* link is inactive, wake up tx waiters */
 940        smc_wr_wakeup_tx_wait(from_lnk);
 941
 942        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
 943                if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
 944                        continue;
 945                if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
 946                    from_lnk->ibport == lgr->lnk[i].ibport) {
 947                        continue;
 948                }
 949                to_lnk = &lgr->lnk[i];
 950                break;
 951        }
 952        if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
 953                smc_lgr_terminate_sched(lgr);
 954                return NULL;
 955        }
 956again:
 957        read_lock_bh(&lgr->conns_lock);
 958        for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
 959                conn = rb_entry(node, struct smc_connection, alert_node);
 960                if (conn->lnk != from_lnk)
 961                        continue;
 962                smc = container_of(conn, struct smc_sock, conn);
 963                /* conn->lnk not yet set in SMC_INIT state */
 964                if (smc->sk.sk_state == SMC_INIT)
 965                        continue;
 966                if (smc->sk.sk_state == SMC_CLOSED ||
 967                    smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
 968                    smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
 969                    smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
 970                    smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
 971                    smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
 972                    smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
 973                    smc->sk.sk_state == SMC_PEERABORTWAIT ||
 974                    smc->sk.sk_state == SMC_PROCESSABORT) {
 975                        spin_lock_bh(&conn->send_lock);
 976                        smc_switch_link_and_count(conn, to_lnk);
 977                        spin_unlock_bh(&conn->send_lock);
 978                        continue;
 979                }
 980                sock_hold(&smc->sk);
 981                read_unlock_bh(&lgr->conns_lock);
 982                /* pre-fetch buffer outside of send_lock, might sleep */
 983                rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
 984                if (rc)
 985                        goto err_out;
 986                /* avoid race with smcr_tx_sndbuf_nonempty() */
 987                spin_lock_bh(&conn->send_lock);
 988                smc_switch_link_and_count(conn, to_lnk);
 989                rc = smc_switch_cursor(smc, pend, wr_buf);
 990                spin_unlock_bh(&conn->send_lock);
 991                sock_put(&smc->sk);
 992                if (rc)
 993                        goto err_out;
 994                goto again;
 995        }
 996        read_unlock_bh(&lgr->conns_lock);
 997        smc_wr_tx_link_put(to_lnk);
 998        return to_lnk;
 999
1000err_out:
1001        smcr_link_down_cond_sched(to_lnk);
1002        smc_wr_tx_link_put(to_lnk);
1003        return NULL;
1004}
1005
1006static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
1007                           struct smc_link_group *lgr)
1008{
1009        int rc;
1010
1011        if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
1012                /* unregister rmb with peer */
1013                rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
1014                if (!rc) {
1015                        /* protect against smc_llc_cli_rkey_exchange() */
1016                        mutex_lock(&lgr->llc_conf_mutex);
1017                        smc_llc_do_delete_rkey(lgr, rmb_desc);
1018                        rmb_desc->is_conf_rkey = false;
1019                        mutex_unlock(&lgr->llc_conf_mutex);
1020                        smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1021                }
1022        }
1023
1024        if (rmb_desc->is_reg_err) {
1025                /* buf registration failed, reuse not possible */
1026                mutex_lock(&lgr->rmbs_lock);
1027                list_del(&rmb_desc->list);
1028                mutex_unlock(&lgr->rmbs_lock);
1029
1030                smc_buf_free(lgr, true, rmb_desc);
1031        } else {
1032                rmb_desc->used = 0;
1033        }
1034}
1035
1036static void smc_buf_unuse(struct smc_connection *conn,
1037                          struct smc_link_group *lgr)
1038{
1039        if (conn->sndbuf_desc)
1040                conn->sndbuf_desc->used = 0;
1041        if (conn->rmb_desc && lgr->is_smcd)
1042                conn->rmb_desc->used = 0;
1043        else if (conn->rmb_desc)
1044                smcr_buf_unuse(conn->rmb_desc, lgr);
1045}
1046
1047/* remove a finished connection from its link group */
1048void smc_conn_free(struct smc_connection *conn)
1049{
1050        struct smc_link_group *lgr = conn->lgr;
1051
1052        if (!lgr)
1053                return;
1054        if (lgr->is_smcd) {
1055                if (!list_empty(&lgr->list))
1056                        smc_ism_unset_conn(conn);
1057                tasklet_kill(&conn->rx_tsklet);
1058        } else {
1059                smc_cdc_tx_dismiss_slots(conn);
1060                if (current_work() != &conn->abort_work)
1061                        cancel_work_sync(&conn->abort_work);
1062        }
1063        if (!list_empty(&lgr->list)) {
1064                smc_lgr_unregister_conn(conn);
1065                smc_buf_unuse(conn, lgr); /* allow buffer reuse */
1066        }
1067
1068        if (!lgr->conns_num)
1069                smc_lgr_schedule_free_work(lgr);
1070}
1071
1072/* unregister a link from a buf_desc */
1073static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1074                                struct smc_link *lnk)
1075{
1076        if (is_rmb)
1077                buf_desc->is_reg_mr[lnk->link_idx] = false;
1078        if (!buf_desc->is_map_ib[lnk->link_idx])
1079                return;
1080        if (is_rmb) {
1081                if (buf_desc->mr_rx[lnk->link_idx]) {
1082                        smc_ib_put_memory_region(
1083                                        buf_desc->mr_rx[lnk->link_idx]);
1084                        buf_desc->mr_rx[lnk->link_idx] = NULL;
1085                }
1086                smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
1087        } else {
1088                smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
1089        }
1090        sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1091        buf_desc->is_map_ib[lnk->link_idx] = false;
1092}
1093
1094/* unmap all buffers of lgr for a deleted link */
1095static void smcr_buf_unmap_lgr(struct smc_link *lnk)
1096{
1097        struct smc_link_group *lgr = lnk->lgr;
1098        struct smc_buf_desc *buf_desc, *bf;
1099        int i;
1100
1101        for (i = 0; i < SMC_RMBE_SIZES; i++) {
1102                mutex_lock(&lgr->rmbs_lock);
1103                list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
1104                        smcr_buf_unmap_link(buf_desc, true, lnk);
1105                mutex_unlock(&lgr->rmbs_lock);
1106                mutex_lock(&lgr->sndbufs_lock);
1107                list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
1108                                         list)
1109                        smcr_buf_unmap_link(buf_desc, false, lnk);
1110                mutex_unlock(&lgr->sndbufs_lock);
1111        }
1112}
1113
1114static void smcr_rtoken_clear_link(struct smc_link *lnk)
1115{
1116        struct smc_link_group *lgr = lnk->lgr;
1117        int i;
1118
1119        for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1120                lgr->rtokens[i][lnk->link_idx].rkey = 0;
1121                lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
1122        }
1123}
1124
1125/* must be called under lgr->llc_conf_mutex lock */
1126void smcr_link_clear(struct smc_link *lnk, bool log)
1127{
1128        struct smc_ib_device *smcibdev;
1129
1130        if (!lnk->lgr || lnk->state == SMC_LNK_UNUSED)
1131                return;
1132        lnk->peer_qpn = 0;
1133        smc_llc_link_clear(lnk, log);
1134        smcr_buf_unmap_lgr(lnk);
1135        smcr_rtoken_clear_link(lnk);
1136        smc_ib_modify_qp_reset(lnk);
1137        smc_wr_free_link(lnk);
1138        smc_ib_destroy_queue_pair(lnk);
1139        smc_ib_dealloc_protection_domain(lnk);
1140        smc_wr_free_link_mem(lnk);
1141        smc_ibdev_cnt_dec(lnk);
1142        put_device(&lnk->smcibdev->ibdev->dev);
1143        smcibdev = lnk->smcibdev;
1144        memset(lnk, 0, sizeof(struct smc_link));
1145        lnk->state = SMC_LNK_UNUSED;
1146        if (!atomic_dec_return(&smcibdev->lnk_cnt))
1147                wake_up(&smcibdev->lnks_deleted);
1148}
1149
1150static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
1151                          struct smc_buf_desc *buf_desc)
1152{
1153        int i;
1154
1155        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1156                smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
1157
1158        if (buf_desc->pages)
1159                __free_pages(buf_desc->pages, buf_desc->order);
1160        kfree(buf_desc);
1161}
1162
1163static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
1164                          struct smc_buf_desc *buf_desc)
1165{
1166        if (is_dmb) {
1167                /* restore original buf len */
1168                buf_desc->len += sizeof(struct smcd_cdc_msg);
1169                smc_ism_unregister_dmb(lgr->smcd, buf_desc);
1170        } else {
1171                kfree(buf_desc->cpu_addr);
1172        }
1173        kfree(buf_desc);
1174}
1175
1176static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
1177                         struct smc_buf_desc *buf_desc)
1178{
1179        if (lgr->is_smcd)
1180                smcd_buf_free(lgr, is_rmb, buf_desc);
1181        else
1182                smcr_buf_free(lgr, is_rmb, buf_desc);
1183}
1184
1185static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
1186{
1187        struct smc_buf_desc *buf_desc, *bf_desc;
1188        struct list_head *buf_list;
1189        int i;
1190
1191        for (i = 0; i < SMC_RMBE_SIZES; i++) {
1192                if (is_rmb)
1193                        buf_list = &lgr->rmbs[i];
1194                else
1195                        buf_list = &lgr->sndbufs[i];
1196                list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
1197                                         list) {
1198                        list_del(&buf_desc->list);
1199                        smc_buf_free(lgr, is_rmb, buf_desc);
1200                }
1201        }
1202}
1203
1204static void smc_lgr_free_bufs(struct smc_link_group *lgr)
1205{
1206        /* free send buffers */
1207        __smc_lgr_free_bufs(lgr, false);
1208        /* free rmbs */
1209        __smc_lgr_free_bufs(lgr, true);
1210}
1211
1212/* remove a link group */
1213static void smc_lgr_free(struct smc_link_group *lgr)
1214{
1215        int i;
1216
1217        if (!lgr->is_smcd) {
1218                mutex_lock(&lgr->llc_conf_mutex);
1219                for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1220                        if (lgr->lnk[i].state != SMC_LNK_UNUSED)
1221                                smcr_link_clear(&lgr->lnk[i], false);
1222                }
1223                mutex_unlock(&lgr->llc_conf_mutex);
1224                smc_llc_lgr_clear(lgr);
1225        }
1226
1227        smc_lgr_free_bufs(lgr);
1228        destroy_workqueue(lgr->tx_wq);
1229        if (lgr->is_smcd) {
1230                smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
1231                put_device(&lgr->smcd->dev);
1232                if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
1233                        wake_up(&lgr->smcd->lgrs_deleted);
1234        } else {
1235                if (!atomic_dec_return(&lgr_cnt))
1236                        wake_up(&lgrs_deleted);
1237        }
1238        kfree(lgr);
1239}
1240
1241static void smc_sk_wake_ups(struct smc_sock *smc)
1242{
1243        smc->sk.sk_write_space(&smc->sk);
1244        smc->sk.sk_data_ready(&smc->sk);
1245        smc->sk.sk_state_change(&smc->sk);
1246}
1247
1248/* kill a connection */
1249static void smc_conn_kill(struct smc_connection *conn, bool soft)
1250{
1251        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1252
1253        if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
1254                conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
1255        else
1256                smc_close_abort(conn);
1257        conn->killed = 1;
1258        smc->sk.sk_err = ECONNABORTED;
1259        smc_sk_wake_ups(smc);
1260        if (conn->lgr->is_smcd) {
1261                smc_ism_unset_conn(conn);
1262                if (soft)
1263                        tasklet_kill(&conn->rx_tsklet);
1264                else
1265                        tasklet_unlock_wait(&conn->rx_tsklet);
1266        } else {
1267                smc_cdc_tx_dismiss_slots(conn);
1268        }
1269        smc_lgr_unregister_conn(conn);
1270        smc_close_active_abort(smc);
1271}
1272
1273static void smc_lgr_cleanup(struct smc_link_group *lgr)
1274{
1275        if (lgr->is_smcd) {
1276                smc_ism_signal_shutdown(lgr);
1277        } else {
1278                u32 rsn = lgr->llc_termination_rsn;
1279
1280                if (!rsn)
1281                        rsn = SMC_LLC_DEL_PROG_INIT_TERM;
1282                smc_llc_send_link_delete_all(lgr, false, rsn);
1283                smcr_lgr_link_deactivate_all(lgr);
1284        }
1285}
1286
1287/* terminate link group
1288 * @soft: true if link group shutdown can take its time
1289 *        false if immediate link group shutdown is required
1290 */
1291static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
1292{
1293        struct smc_connection *conn;
1294        struct smc_sock *smc;
1295        struct rb_node *node;
1296
1297        if (lgr->terminating)
1298                return; /* lgr already terminating */
1299        /* cancel free_work sync, will terminate when lgr->freeing is set */
1300        cancel_delayed_work_sync(&lgr->free_work);
1301        lgr->terminating = 1;
1302
1303        /* kill remaining link group connections */
1304        read_lock_bh(&lgr->conns_lock);
1305        node = rb_first(&lgr->conns_all);
1306        while (node) {
1307                read_unlock_bh(&lgr->conns_lock);
1308                conn = rb_entry(node, struct smc_connection, alert_node);
1309                smc = container_of(conn, struct smc_sock, conn);
1310                sock_hold(&smc->sk); /* sock_put below */
1311                lock_sock(&smc->sk);
1312                smc_conn_kill(conn, soft);
1313                release_sock(&smc->sk);
1314                sock_put(&smc->sk); /* sock_hold above */
1315                read_lock_bh(&lgr->conns_lock);
1316                node = rb_first(&lgr->conns_all);
1317        }
1318        read_unlock_bh(&lgr->conns_lock);
1319        smc_lgr_cleanup(lgr);
1320        smc_lgr_free(lgr);
1321}
1322
1323/* unlink link group and schedule termination */
1324void smc_lgr_terminate_sched(struct smc_link_group *lgr)
1325{
1326        spinlock_t *lgr_lock;
1327
1328        smc_lgr_list_head(lgr, &lgr_lock);
1329        spin_lock_bh(lgr_lock);
1330        if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
1331                spin_unlock_bh(lgr_lock);
1332                return; /* lgr already terminating */
1333        }
1334        list_del_init(&lgr->list);
1335        lgr->freeing = 1;
1336        spin_unlock_bh(lgr_lock);
1337        schedule_work(&lgr->terminate_work);
1338}
1339
1340/* Called when peer lgr shutdown (regularly or abnormally) is received */
1341void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
1342{
1343        struct smc_link_group *lgr, *l;
1344        LIST_HEAD(lgr_free_list);
1345
1346        /* run common cleanup function and build free list */
1347        spin_lock_bh(&dev->lgr_lock);
1348        list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
1349                if ((!peer_gid || lgr->peer_gid == peer_gid) &&
1350                    (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
1351                        if (peer_gid) /* peer triggered termination */
1352                                lgr->peer_shutdown = 1;
1353                        list_move(&lgr->list, &lgr_free_list);
1354                        lgr->freeing = 1;
1355                }
1356        }
1357        spin_unlock_bh(&dev->lgr_lock);
1358
1359        /* cancel the regular free workers and actually free lgrs */
1360        list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
1361                list_del_init(&lgr->list);
1362                schedule_work(&lgr->terminate_work);
1363        }
1364}
1365
1366/* Called when an SMCD device is removed or the smc module is unloaded */
1367void smc_smcd_terminate_all(struct smcd_dev *smcd)
1368{
1369        struct smc_link_group *lgr, *lg;
1370        LIST_HEAD(lgr_free_list);
1371
1372        spin_lock_bh(&smcd->lgr_lock);
1373        list_splice_init(&smcd->lgr_list, &lgr_free_list);
1374        list_for_each_entry(lgr, &lgr_free_list, list)
1375                lgr->freeing = 1;
1376        spin_unlock_bh(&smcd->lgr_lock);
1377
1378        list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1379                list_del_init(&lgr->list);
1380                __smc_lgr_terminate(lgr, false);
1381        }
1382
1383        if (atomic_read(&smcd->lgr_cnt))
1384                wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
1385}
1386
1387/* Called when an SMCR device is removed or the smc module is unloaded.
1388 * If smcibdev is given, all SMCR link groups using this device are terminated.
1389 * If smcibdev is NULL, all SMCR link groups are terminated.
1390 */
1391void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1392{
1393        struct smc_link_group *lgr, *lg;
1394        LIST_HEAD(lgr_free_list);
1395        int i;
1396
1397        spin_lock_bh(&smc_lgr_list.lock);
1398        if (!smcibdev) {
1399                list_splice_init(&smc_lgr_list.list, &lgr_free_list);
1400                list_for_each_entry(lgr, &lgr_free_list, list)
1401                        lgr->freeing = 1;
1402        } else {
1403                list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1404                        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1405                                if (lgr->lnk[i].smcibdev == smcibdev)
1406                                        smcr_link_down_cond_sched(&lgr->lnk[i]);
1407                        }
1408                }
1409        }
1410        spin_unlock_bh(&smc_lgr_list.lock);
1411
1412        list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1413                list_del_init(&lgr->list);
1414                smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
1415                __smc_lgr_terminate(lgr, false);
1416        }
1417
1418        if (smcibdev) {
1419                if (atomic_read(&smcibdev->lnk_cnt))
1420                        wait_event(smcibdev->lnks_deleted,
1421                                   !atomic_read(&smcibdev->lnk_cnt));
1422        } else {
1423                if (atomic_read(&lgr_cnt))
1424                        wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
1425        }
1426}
1427
1428/* set new lgr type and clear all asymmetric link tagging */
1429void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
1430{
1431        char *lgr_type = "";
1432        int i;
1433
1434        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1435                if (smc_link_usable(&lgr->lnk[i]))
1436                        lgr->lnk[i].link_is_asym = false;
1437        if (lgr->type == new_type)
1438                return;
1439        lgr->type = new_type;
1440
1441        switch (lgr->type) {
1442        case SMC_LGR_NONE:
1443                lgr_type = "NONE";
1444                break;
1445        case SMC_LGR_SINGLE:
1446                lgr_type = "SINGLE";
1447                break;
1448        case SMC_LGR_SYMMETRIC:
1449                lgr_type = "SYMMETRIC";
1450                break;
1451        case SMC_LGR_ASYMMETRIC_PEER:
1452                lgr_type = "ASYMMETRIC_PEER";
1453                break;
1454        case SMC_LGR_ASYMMETRIC_LOCAL:
1455                lgr_type = "ASYMMETRIC_LOCAL";
1456                break;
1457        }
1458        pr_warn_ratelimited("smc: SMC-R lg %*phN state changed: "
1459                            "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
1460                            lgr_type, lgr->pnet_id);
1461}
1462
1463/* set new lgr type and tag a link as asymmetric */
1464void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
1465                            enum smc_lgr_type new_type, int asym_lnk_idx)
1466{
1467        smcr_lgr_set_type(lgr, new_type);
1468        lgr->lnk[asym_lnk_idx].link_is_asym = true;
1469}
1470
1471/* abort connection, abort_work scheduled from tasklet context */
1472static void smc_conn_abort_work(struct work_struct *work)
1473{
1474        struct smc_connection *conn = container_of(work,
1475                                                   struct smc_connection,
1476                                                   abort_work);
1477        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1478
1479        lock_sock(&smc->sk);
1480        smc_conn_kill(conn, true);
1481        release_sock(&smc->sk);
1482        sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
1483}
1484
1485void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1486{
1487        struct smc_link_group *lgr, *n;
1488
1489        list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1490                struct smc_link *link;
1491
1492                if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1493                            SMC_MAX_PNETID_LEN) ||
1494                    lgr->type == SMC_LGR_SYMMETRIC ||
1495                    lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1496                        continue;
1497
1498                /* trigger local add link processing */
1499                link = smc_llc_usable_link(lgr);
1500                if (link)
1501                        smc_llc_add_link_local(link);
1502        }
1503}
1504
1505/* link is down - switch connections to alternate link,
1506 * must be called under lgr->llc_conf_mutex lock
1507 */
1508static void smcr_link_down(struct smc_link *lnk)
1509{
1510        struct smc_link_group *lgr = lnk->lgr;
1511        struct smc_link *to_lnk;
1512        int del_link_id;
1513
1514        if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1515                return;
1516
1517        smc_ib_modify_qp_reset(lnk);
1518        to_lnk = smc_switch_conns(lgr, lnk, true);
1519        if (!to_lnk) { /* no backup link available */
1520                smcr_link_clear(lnk, true);
1521                return;
1522        }
1523        smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1524        del_link_id = lnk->link_id;
1525
1526        if (lgr->role == SMC_SERV) {
1527                /* trigger local delete link processing */
1528                smc_llc_srv_delete_link_local(to_lnk, del_link_id);
1529        } else {
1530                if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1531                        /* another llc task is ongoing */
1532                        mutex_unlock(&lgr->llc_conf_mutex);
1533                        wait_event_timeout(lgr->llc_flow_waiter,
1534                                (list_empty(&lgr->list) ||
1535                                 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
1536                                SMC_LLC_WAIT_TIME);
1537                        mutex_lock(&lgr->llc_conf_mutex);
1538                }
1539                if (!list_empty(&lgr->list)) {
1540                        smc_llc_send_delete_link(to_lnk, del_link_id,
1541                                                 SMC_LLC_REQ, true,
1542                                                 SMC_LLC_DEL_LOST_PATH);
1543                        smcr_link_clear(lnk, true);
1544                }
1545                wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
1546        }
1547}
1548
1549/* must be called under lgr->llc_conf_mutex lock */
1550void smcr_link_down_cond(struct smc_link *lnk)
1551{
1552        if (smc_link_downing(&lnk->state))
1553                smcr_link_down(lnk);
1554}
1555
1556/* will get the lgr->llc_conf_mutex lock */
1557void smcr_link_down_cond_sched(struct smc_link *lnk)
1558{
1559        if (smc_link_downing(&lnk->state))
1560                schedule_work(&lnk->link_down_wrk);
1561}
1562
1563void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
1564{
1565        struct smc_link_group *lgr, *n;
1566        int i;
1567
1568        list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1569                if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1570                            SMC_MAX_PNETID_LEN))
1571                        continue; /* lgr is not affected */
1572                if (list_empty(&lgr->list))
1573                        continue;
1574                for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1575                        struct smc_link *lnk = &lgr->lnk[i];
1576
1577                        if (smc_link_usable(lnk) &&
1578                            lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1579                                smcr_link_down_cond_sched(lnk);
1580                }
1581        }
1582}
1583
1584static void smc_link_down_work(struct work_struct *work)
1585{
1586        struct smc_link *link = container_of(work, struct smc_link,
1587                                             link_down_wrk);
1588        struct smc_link_group *lgr = link->lgr;
1589
1590        if (list_empty(&lgr->list))
1591                return;
1592        wake_up_all(&lgr->llc_msg_waiter);
1593        mutex_lock(&lgr->llc_conf_mutex);
1594        smcr_link_down(link);
1595        mutex_unlock(&lgr->llc_conf_mutex);
1596}
1597
1598/* Determine vlan of internal TCP socket.
1599 * @vlan_id: address to store the determined vlan id into
1600 */
1601int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
1602{
1603        struct dst_entry *dst = sk_dst_get(clcsock->sk);
1604        struct net_device *ndev;
1605        int i, nest_lvl, rc = 0;
1606
1607        ini->vlan_id = 0;
1608        if (!dst) {
1609                rc = -ENOTCONN;
1610                goto out;
1611        }
1612        if (!dst->dev) {
1613                rc = -ENODEV;
1614                goto out_rel;
1615        }
1616
1617        ndev = dst->dev;
1618        if (is_vlan_dev(ndev)) {
1619                ini->vlan_id = vlan_dev_vlan_id(ndev);
1620                goto out_rel;
1621        }
1622
1623        rtnl_lock();
1624        nest_lvl = ndev->lower_level;
1625        for (i = 0; i < nest_lvl; i++) {
1626                struct list_head *lower = &ndev->adj_list.lower;
1627
1628                if (list_empty(lower))
1629                        break;
1630                lower = lower->next;
1631                ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
1632                if (is_vlan_dev(ndev)) {
1633                        ini->vlan_id = vlan_dev_vlan_id(ndev);
1634                        break;
1635                }
1636        }
1637        rtnl_unlock();
1638
1639out_rel:
1640        dst_release(dst);
1641out:
1642        return rc;
1643}
1644
1645static bool smcr_lgr_match(struct smc_link_group *lgr,
1646                           struct smc_clc_msg_local *lcl,
1647                           enum smc_lgr_role role, u32 clcqpn)
1648{
1649        int i;
1650
1651        if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) ||
1652            lgr->role != role)
1653                return false;
1654
1655        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1656                if (!smc_link_active(&lgr->lnk[i]))
1657                        continue;
1658                if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
1659                    !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
1660                    !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac)))
1661                        return true;
1662        }
1663        return false;
1664}
1665
1666static bool smcd_lgr_match(struct smc_link_group *lgr,
1667                           struct smcd_dev *smcismdev, u64 peer_gid)
1668{
1669        return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
1670}
1671
1672/* create a new SMC connection (and a new link group if necessary) */
1673int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
1674{
1675        struct smc_connection *conn = &smc->conn;
1676        struct list_head *lgr_list;
1677        struct smc_link_group *lgr;
1678        enum smc_lgr_role role;
1679        spinlock_t *lgr_lock;
1680        int rc = 0;
1681
1682        lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
1683                                  &smc_lgr_list.list;
1684        lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
1685                                  &smc_lgr_list.lock;
1686        ini->first_contact_local = 1;
1687        role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
1688        if (role == SMC_CLNT && ini->first_contact_peer)
1689                /* create new link group as well */
1690                goto create;
1691
1692        /* determine if an existing link group can be reused */
1693        spin_lock_bh(lgr_lock);
1694        list_for_each_entry(lgr, lgr_list, list) {
1695                write_lock_bh(&lgr->conns_lock);
1696                if ((ini->is_smcd ?
1697                     smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
1698                                    ini->ism_peer_gid[ini->ism_selected]) :
1699                     smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
1700                    !lgr->sync_err &&
1701                    (ini->smcd_version == SMC_V2 ||
1702                     lgr->vlan_id == ini->vlan_id) &&
1703                    (role == SMC_CLNT || ini->is_smcd ||
1704                     lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
1705                        /* link group found */
1706                        ini->first_contact_local = 0;
1707                        conn->lgr = lgr;
1708                        rc = smc_lgr_register_conn(conn, false);
1709                        write_unlock_bh(&lgr->conns_lock);
1710                        if (!rc && delayed_work_pending(&lgr->free_work))
1711                                cancel_delayed_work(&lgr->free_work);
1712                        break;
1713                }
1714                write_unlock_bh(&lgr->conns_lock);
1715        }
1716        spin_unlock_bh(lgr_lock);
1717        if (rc)
1718                return rc;
1719
1720        if (role == SMC_CLNT && !ini->first_contact_peer &&
1721            ini->first_contact_local) {
1722                /* Server reuses a link group, but Client wants to start
1723                 * a new one
1724                 * send out_of_sync decline, reason synchr. error
1725                 */
1726                return SMC_CLC_DECL_SYNCERR;
1727        }
1728
1729create:
1730        if (ini->first_contact_local) {
1731                rc = smc_lgr_create(smc, ini);
1732                if (rc)
1733                        goto out;
1734                lgr = conn->lgr;
1735                write_lock_bh(&lgr->conns_lock);
1736                rc = smc_lgr_register_conn(conn, true);
1737                write_unlock_bh(&lgr->conns_lock);
1738                if (rc)
1739                        goto out;
1740        }
1741        conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
1742        conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
1743        conn->urg_state = SMC_URG_READ;
1744        INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
1745        if (ini->is_smcd) {
1746                conn->rx_off = sizeof(struct smcd_cdc_msg);
1747                smcd_cdc_rx_init(conn); /* init tasklet for this conn */
1748        } else {
1749                conn->rx_off = 0;
1750        }
1751#ifndef KERNEL_HAS_ATOMIC64
1752        spin_lock_init(&conn->acurs_lock);
1753#endif
1754
1755out:
1756        return rc;
1757}
1758
1759#define SMCD_DMBE_SIZES         6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1760#define SMCR_RMBE_SIZES         5 /* 0 -> 16KB, 1 -> 32KB, .. 5 -> 512KB */
1761
1762/* convert the RMB size into the compressed notation (minimum 16K, see
1763 * SMCD/R_DMBE_SIZES.
1764 * In contrast to plain ilog2, this rounds towards the next power of 2,
1765 * so the socket application gets at least its desired sndbuf / rcvbuf size.
1766 */
1767static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
1768{
1769        const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE;
1770        u8 compressed;
1771
1772        if (size <= SMC_BUF_MIN_SIZE)
1773                return 0;
1774
1775        size = (size - 1) >> 14;  /* convert to 16K multiple */
1776        compressed = min_t(u8, ilog2(size) + 1,
1777                           is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES);
1778
1779        if (!is_smcd && is_rmb)
1780                /* RMBs are backed by & limited to max size of scatterlists */
1781                compressed = min_t(u8, compressed, ilog2(max_scat >> 14));
1782
1783        return compressed;
1784}
1785
1786/* convert the RMB size from compressed notation into integer */
1787int smc_uncompress_bufsize(u8 compressed)
1788{
1789        u32 size;
1790
1791        size = 0x00000001 << (((int)compressed) + 14);
1792        return (int)size;
1793}
1794
1795/* try to reuse a sndbuf or rmb description slot for a certain
1796 * buffer size; if not available, return NULL
1797 */
1798static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
1799                                             struct mutex *lock,
1800                                             struct list_head *buf_list)
1801{
1802        struct smc_buf_desc *buf_slot;
1803
1804        mutex_lock(lock);
1805        list_for_each_entry(buf_slot, buf_list, list) {
1806                if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
1807                        mutex_unlock(lock);
1808                        return buf_slot;
1809                }
1810        }
1811        mutex_unlock(lock);
1812        return NULL;
1813}
1814
1815/* one of the conditions for announcing a receiver's current window size is
1816 * that it "results in a minimum increase in the window size of 10% of the
1817 * receive buffer space" [RFC7609]
1818 */
1819static inline int smc_rmb_wnd_update_limit(int rmbe_size)
1820{
1821        return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
1822}
1823
1824/* map an rmb buf to a link */
1825static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1826                             struct smc_link *lnk)
1827{
1828        int rc;
1829
1830        if (buf_desc->is_map_ib[lnk->link_idx])
1831                return 0;
1832
1833        rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], 1, GFP_KERNEL);
1834        if (rc)
1835                return rc;
1836        sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
1837                   buf_desc->cpu_addr, buf_desc->len);
1838
1839        /* map sg table to DMA address */
1840        rc = smc_ib_buf_map_sg(lnk, buf_desc,
1841                               is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1842        /* SMC protocol depends on mapping to one DMA address only */
1843        if (rc != 1) {
1844                rc = -EAGAIN;
1845                goto free_table;
1846        }
1847
1848        /* create a new memory region for the RMB */
1849        if (is_rmb) {
1850                rc = smc_ib_get_memory_region(lnk->roce_pd,
1851                                              IB_ACCESS_REMOTE_WRITE |
1852                                              IB_ACCESS_LOCAL_WRITE,
1853                                              buf_desc, lnk->link_idx);
1854                if (rc)
1855                        goto buf_unmap;
1856                smc_ib_sync_sg_for_device(lnk, buf_desc, DMA_FROM_DEVICE);
1857        }
1858        buf_desc->is_map_ib[lnk->link_idx] = true;
1859        return 0;
1860
1861buf_unmap:
1862        smc_ib_buf_unmap_sg(lnk, buf_desc,
1863                            is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1864free_table:
1865        sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1866        return rc;
1867}
1868
1869/* register a new rmb on IB device,
1870 * must be called under lgr->llc_conf_mutex lock
1871 */
1872int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
1873{
1874        if (list_empty(&link->lgr->list))
1875                return -ENOLINK;
1876        if (!rmb_desc->is_reg_mr[link->link_idx]) {
1877                /* register memory region for new rmb */
1878                if (smc_wr_reg_send(link, rmb_desc->mr_rx[link->link_idx])) {
1879                        rmb_desc->is_reg_err = true;
1880                        return -EFAULT;
1881                }
1882                rmb_desc->is_reg_mr[link->link_idx] = true;
1883        }
1884        return 0;
1885}
1886
1887static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
1888                             struct list_head *lst, bool is_rmb)
1889{
1890        struct smc_buf_desc *buf_desc, *bf;
1891        int rc = 0;
1892
1893        mutex_lock(lock);
1894        list_for_each_entry_safe(buf_desc, bf, lst, list) {
1895                if (!buf_desc->used)
1896                        continue;
1897                rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
1898                if (rc)
1899                        goto out;
1900        }
1901out:
1902        mutex_unlock(lock);
1903        return rc;
1904}
1905
1906/* map all used buffers of lgr for a new link */
1907int smcr_buf_map_lgr(struct smc_link *lnk)
1908{
1909        struct smc_link_group *lgr = lnk->lgr;
1910        int i, rc = 0;
1911
1912        for (i = 0; i < SMC_RMBE_SIZES; i++) {
1913                rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
1914                                       &lgr->rmbs[i], true);
1915                if (rc)
1916                        return rc;
1917                rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
1918                                       &lgr->sndbufs[i], false);
1919                if (rc)
1920                        return rc;
1921        }
1922        return 0;
1923}
1924
1925/* register all used buffers of lgr for a new link,
1926 * must be called under lgr->llc_conf_mutex lock
1927 */
1928int smcr_buf_reg_lgr(struct smc_link *lnk)
1929{
1930        struct smc_link_group *lgr = lnk->lgr;
1931        struct smc_buf_desc *buf_desc, *bf;
1932        int i, rc = 0;
1933
1934        mutex_lock(&lgr->rmbs_lock);
1935        for (i = 0; i < SMC_RMBE_SIZES; i++) {
1936                list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
1937                        if (!buf_desc->used)
1938                                continue;
1939                        rc = smcr_link_reg_rmb(lnk, buf_desc);
1940                        if (rc)
1941                                goto out;
1942                }
1943        }
1944out:
1945        mutex_unlock(&lgr->rmbs_lock);
1946        return rc;
1947}
1948
1949static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
1950                                                bool is_rmb, int bufsize)
1951{
1952        struct smc_buf_desc *buf_desc;
1953
1954        /* try to alloc a new buffer */
1955        buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1956        if (!buf_desc)
1957                return ERR_PTR(-ENOMEM);
1958
1959        buf_desc->order = get_order(bufsize);
1960        buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
1961                                      __GFP_NOMEMALLOC | __GFP_COMP |
1962                                      __GFP_NORETRY | __GFP_ZERO,
1963                                      buf_desc->order);
1964        if (!buf_desc->pages) {
1965                kfree(buf_desc);
1966                return ERR_PTR(-EAGAIN);
1967        }
1968        buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
1969        buf_desc->len = bufsize;
1970        return buf_desc;
1971}
1972
1973/* map buf_desc on all usable links,
1974 * unused buffers stay mapped as long as the link is up
1975 */
1976static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
1977                                     struct smc_buf_desc *buf_desc, bool is_rmb)
1978{
1979        int i, rc = 0;
1980
1981        /* protect against parallel link reconfiguration */
1982        mutex_lock(&lgr->llc_conf_mutex);
1983        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1984                struct smc_link *lnk = &lgr->lnk[i];
1985
1986                if (!smc_link_usable(lnk))
1987                        continue;
1988                if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
1989                        rc = -ENOMEM;
1990                        goto out;
1991                }
1992        }
1993out:
1994        mutex_unlock(&lgr->llc_conf_mutex);
1995        return rc;
1996}
1997
1998static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
1999                                                bool is_dmb, int bufsize)
2000{
2001        struct smc_buf_desc *buf_desc;
2002        int rc;
2003
2004        /* try to alloc a new DMB */
2005        buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2006        if (!buf_desc)
2007                return ERR_PTR(-ENOMEM);
2008        if (is_dmb) {
2009                rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
2010                if (rc) {
2011                        kfree(buf_desc);
2012                        if (rc == -ENOMEM)
2013                                return ERR_PTR(-EAGAIN);
2014                        if (rc == -ENOSPC)
2015                                return ERR_PTR(-ENOSPC);
2016                        return ERR_PTR(-EIO);
2017                }
2018                buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
2019                /* CDC header stored in buf. So, pretend it was smaller */
2020                buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
2021        } else {
2022                buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
2023                                             __GFP_NOWARN | __GFP_NORETRY |
2024                                             __GFP_NOMEMALLOC);
2025                if (!buf_desc->cpu_addr) {
2026                        kfree(buf_desc);
2027                        return ERR_PTR(-EAGAIN);
2028                }
2029                buf_desc->len = bufsize;
2030        }
2031        return buf_desc;
2032}
2033
2034static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
2035{
2036        struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
2037        struct smc_connection *conn = &smc->conn;
2038        struct smc_link_group *lgr = conn->lgr;
2039        struct list_head *buf_list;
2040        int bufsize, bufsize_short;
2041        bool is_dgraded = false;
2042        struct mutex *lock;     /* lock buffer list */
2043        int sk_buf_size;
2044
2045        if (is_rmb)
2046                /* use socket recv buffer size (w/o overhead) as start value */
2047                sk_buf_size = smc->sk.sk_rcvbuf / 2;
2048        else
2049                /* use socket send buffer size (w/o overhead) as start value */
2050                sk_buf_size = smc->sk.sk_sndbuf / 2;
2051
2052        for (bufsize_short = smc_compress_bufsize(sk_buf_size, is_smcd, is_rmb);
2053             bufsize_short >= 0; bufsize_short--) {
2054                if (is_rmb) {
2055                        lock = &lgr->rmbs_lock;
2056                        buf_list = &lgr->rmbs[bufsize_short];
2057                } else {
2058                        lock = &lgr->sndbufs_lock;
2059                        buf_list = &lgr->sndbufs[bufsize_short];
2060                }
2061                bufsize = smc_uncompress_bufsize(bufsize_short);
2062
2063                /* check for reusable slot in the link group */
2064                buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
2065                if (buf_desc) {
2066                        SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
2067                        SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
2068                        memset(buf_desc->cpu_addr, 0, bufsize);
2069                        break; /* found reusable slot */
2070                }
2071
2072                if (is_smcd)
2073                        buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
2074                else
2075                        buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
2076
2077                if (PTR_ERR(buf_desc) == -ENOMEM)
2078                        break;
2079                if (IS_ERR(buf_desc)) {
2080                        if (!is_dgraded) {
2081                                is_dgraded = true;
2082                                SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb);
2083                        }
2084                        continue;
2085                }
2086
2087                SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
2088                SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
2089                buf_desc->used = 1;
2090                mutex_lock(lock);
2091                list_add(&buf_desc->list, buf_list);
2092                mutex_unlock(lock);
2093                break; /* found */
2094        }
2095
2096        if (IS_ERR(buf_desc))
2097                return PTR_ERR(buf_desc);
2098
2099        if (!is_smcd) {
2100                if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
2101                        smcr_buf_unuse(buf_desc, lgr);
2102                        return -ENOMEM;
2103                }
2104        }
2105
2106        if (is_rmb) {
2107                conn->rmb_desc = buf_desc;
2108                conn->rmbe_size_short = bufsize_short;
2109                smc->sk.sk_rcvbuf = bufsize * 2;
2110                atomic_set(&conn->bytes_to_rcv, 0);
2111                conn->rmbe_update_limit =
2112                        smc_rmb_wnd_update_limit(buf_desc->len);
2113                if (is_smcd)
2114                        smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
2115        } else {
2116                conn->sndbuf_desc = buf_desc;
2117                smc->sk.sk_sndbuf = bufsize * 2;
2118                atomic_set(&conn->sndbuf_space, bufsize);
2119        }
2120        return 0;
2121}
2122
2123void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
2124{
2125        if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
2126                return;
2127        smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
2128}
2129
2130void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
2131{
2132        if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
2133                return;
2134        smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
2135}
2136
2137void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
2138{
2139        int i;
2140
2141        if (!conn->lgr || conn->lgr->is_smcd)
2142                return;
2143        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2144                if (!smc_link_active(&conn->lgr->lnk[i]))
2145                        continue;
2146                smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
2147                                       DMA_FROM_DEVICE);
2148        }
2149}
2150
2151void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
2152{
2153        int i;
2154
2155        if (!conn->lgr || conn->lgr->is_smcd)
2156                return;
2157        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2158                if (!smc_link_active(&conn->lgr->lnk[i]))
2159                        continue;
2160                smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
2161                                          DMA_FROM_DEVICE);
2162        }
2163}
2164
2165/* create the send and receive buffer for an SMC socket;
2166 * receive buffers are called RMBs;
2167 * (even though the SMC protocol allows more than one RMB-element per RMB,
2168 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
2169 * extra RMB for every connection in a link group
2170 */
2171int smc_buf_create(struct smc_sock *smc, bool is_smcd)
2172{
2173        int rc;
2174
2175        /* create send buffer */
2176        rc = __smc_buf_create(smc, is_smcd, false);
2177        if (rc)
2178                return rc;
2179        /* create rmb */
2180        rc = __smc_buf_create(smc, is_smcd, true);
2181        if (rc) {
2182                mutex_lock(&smc->conn.lgr->sndbufs_lock);
2183                list_del(&smc->conn.sndbuf_desc->list);
2184                mutex_unlock(&smc->conn.lgr->sndbufs_lock);
2185                smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
2186                smc->conn.sndbuf_desc = NULL;
2187        }
2188        return rc;
2189}
2190
2191static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
2192{
2193        int i;
2194
2195        for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
2196                if (!test_and_set_bit(i, lgr->rtokens_used_mask))
2197                        return i;
2198        }
2199        return -ENOSPC;
2200}
2201
2202static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
2203                                   u32 rkey)
2204{
2205        int i;
2206
2207        for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2208                if (test_bit(i, lgr->rtokens_used_mask) &&
2209                    lgr->rtokens[i][lnk_idx].rkey == rkey)
2210                        return i;
2211        }
2212        return -ENOENT;
2213}
2214
2215/* set rtoken for a new link to an existing rmb */
2216void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
2217                    __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
2218{
2219        int rtok_idx;
2220
2221        rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
2222        if (rtok_idx == -ENOENT)
2223                return;
2224        lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
2225        lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
2226}
2227
2228/* set rtoken for a new link whose link_id is given */
2229void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
2230                     __be64 nw_vaddr, __be32 nw_rkey)
2231{
2232        u64 dma_addr = be64_to_cpu(nw_vaddr);
2233        u32 rkey = ntohl(nw_rkey);
2234        bool found = false;
2235        int link_idx;
2236
2237        for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
2238                if (lgr->lnk[link_idx].link_id == link_id) {
2239                        found = true;
2240                        break;
2241                }
2242        }
2243        if (!found)
2244                return;
2245        lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
2246        lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
2247}
2248
2249/* add a new rtoken from peer */
2250int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
2251{
2252        struct smc_link_group *lgr = smc_get_lgr(lnk);
2253        u64 dma_addr = be64_to_cpu(nw_vaddr);
2254        u32 rkey = ntohl(nw_rkey);
2255        int i;
2256
2257        for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2258                if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2259                    lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
2260                    test_bit(i, lgr->rtokens_used_mask)) {
2261                        /* already in list */
2262                        return i;
2263                }
2264        }
2265        i = smc_rmb_reserve_rtoken_idx(lgr);
2266        if (i < 0)
2267                return i;
2268        lgr->rtokens[i][lnk->link_idx].rkey = rkey;
2269        lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
2270        return i;
2271}
2272
2273/* delete an rtoken from all links */
2274int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
2275{
2276        struct smc_link_group *lgr = smc_get_lgr(lnk);
2277        u32 rkey = ntohl(nw_rkey);
2278        int i, j;
2279
2280        for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2281                if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2282                    test_bit(i, lgr->rtokens_used_mask)) {
2283                        for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
2284                                lgr->rtokens[i][j].rkey = 0;
2285                                lgr->rtokens[i][j].dma_addr = 0;
2286                        }
2287                        clear_bit(i, lgr->rtokens_used_mask);
2288                        return 0;
2289                }
2290        }
2291        return -ENOENT;
2292}
2293
2294/* save rkey and dma_addr received from peer during clc handshake */
2295int smc_rmb_rtoken_handling(struct smc_connection *conn,
2296                            struct smc_link *lnk,
2297                            struct smc_clc_msg_accept_confirm *clc)
2298{
2299        conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
2300                                          clc->r0.rmb_rkey);
2301        if (conn->rtoken_idx < 0)
2302                return conn->rtoken_idx;
2303        return 0;
2304}
2305
2306static void smc_core_going_away(void)
2307{
2308        struct smc_ib_device *smcibdev;
2309        struct smcd_dev *smcd;
2310
2311        mutex_lock(&smc_ib_devices.mutex);
2312        list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
2313                int i;
2314
2315                for (i = 0; i < SMC_MAX_PORTS; i++)
2316                        set_bit(i, smcibdev->ports_going_away);
2317        }
2318        mutex_unlock(&smc_ib_devices.mutex);
2319
2320        mutex_lock(&smcd_dev_list.mutex);
2321        list_for_each_entry(smcd, &smcd_dev_list.list, list) {
2322                smcd->going_away = 1;
2323        }
2324        mutex_unlock(&smcd_dev_list.mutex);
2325}
2326
2327/* Clean up all SMC link groups */
2328static void smc_lgrs_shutdown(void)
2329{
2330        struct smcd_dev *smcd;
2331
2332        smc_core_going_away();
2333
2334        smc_smcr_terminate_all(NULL);
2335
2336        mutex_lock(&smcd_dev_list.mutex);
2337        list_for_each_entry(smcd, &smcd_dev_list.list, list)
2338                smc_smcd_terminate_all(smcd);
2339        mutex_unlock(&smcd_dev_list.mutex);
2340}
2341
2342static int smc_core_reboot_event(struct notifier_block *this,
2343                                 unsigned long event, void *ptr)
2344{
2345        smc_lgrs_shutdown();
2346        smc_ib_unregister_client();
2347        return 0;
2348}
2349
2350static struct notifier_block smc_reboot_notifier = {
2351        .notifier_call = smc_core_reboot_event,
2352};
2353
2354int __init smc_core_init(void)
2355{
2356        return register_reboot_notifier(&smc_reboot_notifier);
2357}
2358
2359/* Called (from smc_exit) when module is removed */
2360void smc_core_exit(void)
2361{
2362        unregister_reboot_notifier(&smc_reboot_notifier);
2363        smc_lgrs_shutdown();
2364}
2365