linux/drivers/infiniband/sw/rxe/rxe_recv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   5 */
   6
   7#include <linux/skbuff.h>
   8
   9#include "rxe.h"
  10#include "rxe_loc.h"
  11
  12/* check that QP matches packet opcode type and is in a valid state */
  13static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  14                            struct rxe_qp *qp)
  15{
  16        unsigned int pkt_type;
  17
  18        if (unlikely(!qp->valid))
  19                goto err1;
  20
  21        pkt_type = pkt->opcode & 0xe0;
  22
  23        switch (qp_type(qp)) {
  24        case IB_QPT_RC:
  25                if (unlikely(pkt_type != IB_OPCODE_RC)) {
  26                        pr_warn_ratelimited("bad qp type\n");
  27                        goto err1;
  28                }
  29                break;
  30        case IB_QPT_UC:
  31                if (unlikely(pkt_type != IB_OPCODE_UC)) {
  32                        pr_warn_ratelimited("bad qp type\n");
  33                        goto err1;
  34                }
  35                break;
  36        case IB_QPT_UD:
  37        case IB_QPT_SMI:
  38        case IB_QPT_GSI:
  39                if (unlikely(pkt_type != IB_OPCODE_UD)) {
  40                        pr_warn_ratelimited("bad qp type\n");
  41                        goto err1;
  42                }
  43                break;
  44        default:
  45                pr_warn_ratelimited("unsupported qp type\n");
  46                goto err1;
  47        }
  48
  49        if (pkt->mask & RXE_REQ_MASK) {
  50                if (unlikely(qp->resp.state != QP_STATE_READY))
  51                        goto err1;
  52        } else if (unlikely(qp->req.state < QP_STATE_READY ||
  53                                qp->req.state > QP_STATE_DRAINED)) {
  54                goto err1;
  55        }
  56
  57        return 0;
  58
  59err1:
  60        return -EINVAL;
  61}
  62
  63static void set_bad_pkey_cntr(struct rxe_port *port)
  64{
  65        spin_lock_bh(&port->port_lock);
  66        port->attr.bad_pkey_cntr = min((u32)0xffff,
  67                                       port->attr.bad_pkey_cntr + 1);
  68        spin_unlock_bh(&port->port_lock);
  69}
  70
  71static void set_qkey_viol_cntr(struct rxe_port *port)
  72{
  73        spin_lock_bh(&port->port_lock);
  74        port->attr.qkey_viol_cntr = min((u32)0xffff,
  75                                        port->attr.qkey_viol_cntr + 1);
  76        spin_unlock_bh(&port->port_lock);
  77}
  78
  79static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  80                      u32 qpn, struct rxe_qp *qp)
  81{
  82        struct rxe_port *port = &rxe->port;
  83        u16 pkey = bth_pkey(pkt);
  84
  85        pkt->pkey_index = 0;
  86
  87        if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) {
  88                pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
  89                set_bad_pkey_cntr(port);
  90                goto err1;
  91        }
  92
  93        if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
  94                u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
  95
  96                if (unlikely(deth_qkey(pkt) != qkey)) {
  97                        pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
  98                                            deth_qkey(pkt), qkey, qpn);
  99                        set_qkey_viol_cntr(port);
 100                        goto err1;
 101                }
 102        }
 103
 104        return 0;
 105
 106err1:
 107        return -EINVAL;
 108}
 109
 110static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
 111                      struct rxe_qp *qp)
 112{
 113        struct sk_buff *skb = PKT_TO_SKB(pkt);
 114
 115        if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
 116                goto done;
 117
 118        if (unlikely(pkt->port_num != qp->attr.port_num)) {
 119                pr_warn_ratelimited("port %d != qp port %d\n",
 120                                    pkt->port_num, qp->attr.port_num);
 121                goto err1;
 122        }
 123
 124        if (skb->protocol == htons(ETH_P_IP)) {
 125                struct in_addr *saddr =
 126                        &qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
 127                struct in_addr *daddr =
 128                        &qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
 129
 130                if (ip_hdr(skb)->daddr != saddr->s_addr) {
 131                        pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
 132                                            &ip_hdr(skb)->daddr,
 133                                            &saddr->s_addr);
 134                        goto err1;
 135                }
 136
 137                if (ip_hdr(skb)->saddr != daddr->s_addr) {
 138                        pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
 139                                            &ip_hdr(skb)->saddr,
 140                                            &daddr->s_addr);
 141                        goto err1;
 142                }
 143
 144        } else if (skb->protocol == htons(ETH_P_IPV6)) {
 145                struct in6_addr *saddr =
 146                        &qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
 147                struct in6_addr *daddr =
 148                        &qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
 149
 150                if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) {
 151                        pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
 152                                            &ipv6_hdr(skb)->daddr, saddr);
 153                        goto err1;
 154                }
 155
 156                if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
 157                        pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
 158                                            &ipv6_hdr(skb)->saddr, daddr);
 159                        goto err1;
 160                }
 161        }
 162
 163done:
 164        return 0;
 165
 166err1:
 167        return -EINVAL;
 168}
 169
 170static int hdr_check(struct rxe_pkt_info *pkt)
 171{
 172        struct rxe_dev *rxe = pkt->rxe;
 173        struct rxe_port *port = &rxe->port;
 174        struct rxe_qp *qp = NULL;
 175        u32 qpn = bth_qpn(pkt);
 176        int index;
 177        int err;
 178
 179        if (unlikely(bth_tver(pkt) != BTH_TVER)) {
 180                pr_warn_ratelimited("bad tver\n");
 181                goto err1;
 182        }
 183
 184        if (unlikely(qpn == 0)) {
 185                pr_warn_once("QP 0 not supported");
 186                goto err1;
 187        }
 188
 189        if (qpn != IB_MULTICAST_QPN) {
 190                index = (qpn == 1) ? port->qp_gsi_index : qpn;
 191
 192                qp = rxe_pool_get_index(&rxe->qp_pool, index);
 193                if (unlikely(!qp)) {
 194                        pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
 195                        goto err1;
 196                }
 197
 198                err = check_type_state(rxe, pkt, qp);
 199                if (unlikely(err))
 200                        goto err2;
 201
 202                err = check_addr(rxe, pkt, qp);
 203                if (unlikely(err))
 204                        goto err2;
 205
 206                err = check_keys(rxe, pkt, qpn, qp);
 207                if (unlikely(err))
 208                        goto err2;
 209        } else {
 210                if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
 211                        pr_warn_ratelimited("no grh for mcast qpn\n");
 212                        goto err1;
 213                }
 214        }
 215
 216        pkt->qp = qp;
 217        return 0;
 218
 219err2:
 220        rxe_drop_ref(qp);
 221err1:
 222        return -EINVAL;
 223}
 224
 225static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
 226{
 227        if (pkt->mask & RXE_REQ_MASK)
 228                rxe_resp_queue_pkt(pkt->qp, skb);
 229        else
 230                rxe_comp_queue_pkt(pkt->qp, skb);
 231}
 232
 233static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
 234{
 235        struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
 236        struct rxe_mc_grp *mcg;
 237        struct rxe_mc_elem *mce;
 238        struct rxe_qp *qp;
 239        union ib_gid dgid;
 240        int err;
 241
 242        if (skb->protocol == htons(ETH_P_IP))
 243                ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
 244                                       (struct in6_addr *)&dgid);
 245        else if (skb->protocol == htons(ETH_P_IPV6))
 246                memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
 247
 248        /* lookup mcast group corresponding to mgid, takes a ref */
 249        mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
 250        if (!mcg)
 251                goto drop;      /* mcast group not registered */
 252
 253        spin_lock_bh(&mcg->mcg_lock);
 254
 255        /* this is unreliable datagram service so we let
 256         * failures to deliver a multicast packet to a
 257         * single QP happen and just move on and try
 258         * the rest of them on the list
 259         */
 260        list_for_each_entry(mce, &mcg->qp_list, qp_list) {
 261                qp = mce->qp;
 262
 263                /* validate qp for incoming packet */
 264                err = check_type_state(rxe, pkt, qp);
 265                if (err)
 266                        continue;
 267
 268                err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
 269                if (err)
 270                        continue;
 271
 272                /* for all but the last QP create a new clone of the
 273                 * skb and pass to the QP. Pass the original skb to
 274                 * the last QP in the list.
 275                 */
 276                if (mce->qp_list.next != &mcg->qp_list) {
 277                        struct sk_buff *cskb;
 278                        struct rxe_pkt_info *cpkt;
 279
 280                        cskb = skb_clone(skb, GFP_ATOMIC);
 281                        if (unlikely(!cskb))
 282                                continue;
 283
 284                        if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
 285                                kfree_skb(cskb);
 286                                break;
 287                        }
 288
 289                        cpkt = SKB_TO_PKT(cskb);
 290                        cpkt->qp = qp;
 291                        rxe_add_ref(qp);
 292                        rxe_rcv_pkt(cpkt, cskb);
 293                } else {
 294                        pkt->qp = qp;
 295                        rxe_add_ref(qp);
 296                        rxe_rcv_pkt(pkt, skb);
 297                        skb = NULL;     /* mark consumed */
 298                }
 299        }
 300
 301        spin_unlock_bh(&mcg->mcg_lock);
 302
 303        rxe_drop_ref(mcg);      /* drop ref from rxe_pool_get_key. */
 304
 305        if (likely(!skb))
 306                return;
 307
 308        /* This only occurs if one of the checks fails on the last
 309         * QP in the list above
 310         */
 311
 312drop:
 313        kfree_skb(skb);
 314        ib_device_put(&rxe->ib_dev);
 315}
 316
 317/**
 318 * rxe_chk_dgid - validate destination IP address
 319 * @rxe: rxe device that received packet
 320 * @skb: the received packet buffer
 321 *
 322 * Accept any loopback packets
 323 * Extract IP address from packet and
 324 * Accept if multicast packet
 325 * Accept if matches an SGID table entry
 326 */
 327static int rxe_chk_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
 328{
 329        struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
 330        const struct ib_gid_attr *gid_attr;
 331        union ib_gid dgid;
 332        union ib_gid *pdgid;
 333
 334        if (pkt->mask & RXE_LOOPBACK_MASK)
 335                return 0;
 336
 337        if (skb->protocol == htons(ETH_P_IP)) {
 338                ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
 339                                       (struct in6_addr *)&dgid);
 340                pdgid = &dgid;
 341        } else {
 342                pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
 343        }
 344
 345        if (rdma_is_multicast_addr((struct in6_addr *)pdgid))
 346                return 0;
 347
 348        gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid,
 349                                         IB_GID_TYPE_ROCE_UDP_ENCAP,
 350                                         1, skb->dev);
 351        if (IS_ERR(gid_attr))
 352                return PTR_ERR(gid_attr);
 353
 354        rdma_put_gid_attr(gid_attr);
 355        return 0;
 356}
 357
 358/* rxe_rcv is called from the interface driver */
 359void rxe_rcv(struct sk_buff *skb)
 360{
 361        int err;
 362        struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
 363        struct rxe_dev *rxe = pkt->rxe;
 364
 365        if (unlikely(skb->len < RXE_BTH_BYTES))
 366                goto drop;
 367
 368        if (rxe_chk_dgid(rxe, skb) < 0) {
 369                pr_warn_ratelimited("failed checking dgid\n");
 370                goto drop;
 371        }
 372
 373        pkt->opcode = bth_opcode(pkt);
 374        pkt->psn = bth_psn(pkt);
 375        pkt->qp = NULL;
 376        pkt->mask |= rxe_opcode[pkt->opcode].mask;
 377
 378        if (unlikely(skb->len < header_size(pkt)))
 379                goto drop;
 380
 381        err = hdr_check(pkt);
 382        if (unlikely(err))
 383                goto drop;
 384
 385        err = rxe_icrc_check(skb, pkt);
 386        if (unlikely(err))
 387                goto drop;
 388
 389        rxe_counter_inc(rxe, RXE_CNT_RCVD_PKTS);
 390
 391        if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
 392                rxe_rcv_mcast_pkt(rxe, skb);
 393        else
 394                rxe_rcv_pkt(pkt, skb);
 395
 396        return;
 397
 398drop:
 399        if (pkt->qp)
 400                rxe_drop_ref(pkt->qp);
 401
 402        kfree_skb(skb);
 403        ib_device_put(&rxe->ib_dev);
 404}
 405