linux/drivers/infiniband/core/cache.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
   4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/module.h>
  37#include <linux/errno.h>
  38#include <linux/slab.h>
  39#include <linux/workqueue.h>
  40#include <linux/netdevice.h>
  41#include <net/addrconf.h>
  42
  43#include <rdma/ib_cache.h>
  44
  45#include "core_priv.h"
  46
  47struct ib_pkey_cache {
  48        int             table_len;
  49        u16             table[0];
  50};
  51
  52struct ib_update_work {
  53        struct work_struct work;
  54        struct ib_device  *device;
  55        u8                 port_num;
  56        bool               enforce_security;
  57};
  58
  59union ib_gid zgid;
  60EXPORT_SYMBOL(zgid);
  61
  62enum gid_attr_find_mask {
  63        GID_ATTR_FIND_MASK_GID          = 1UL << 0,
  64        GID_ATTR_FIND_MASK_NETDEV       = 1UL << 1,
  65        GID_ATTR_FIND_MASK_DEFAULT      = 1UL << 2,
  66        GID_ATTR_FIND_MASK_GID_TYPE     = 1UL << 3,
  67};
  68
  69enum gid_table_entry_state {
  70        GID_TABLE_ENTRY_INVALID         = 1,
  71        GID_TABLE_ENTRY_VALID           = 2,
  72        /*
  73         * Indicates that entry is pending to be removed, there may
  74         * be active users of this GID entry.
  75         * When last user of the GID entry releases reference to it,
  76         * GID entry is detached from the table.
  77         */
  78        GID_TABLE_ENTRY_PENDING_DEL     = 3,
  79};
  80
  81struct roce_gid_ndev_storage {
  82        struct rcu_head rcu_head;
  83        struct net_device *ndev;
  84};
  85
  86struct ib_gid_table_entry {
  87        struct kref                     kref;
  88        struct work_struct              del_work;
  89        struct ib_gid_attr              attr;
  90        void                            *context;
  91        /* Store the ndev pointer to release reference later on in
  92         * call_rcu context because by that time gid_table_entry
  93         * and attr might be already freed. So keep a copy of it.
  94         * ndev_storage is freed by rcu callback.
  95         */
  96        struct roce_gid_ndev_storage    *ndev_storage;
  97        enum gid_table_entry_state      state;
  98};
  99
 100struct ib_gid_table {
 101        int                             sz;
 102        /* In RoCE, adding a GID to the table requires:
 103         * (a) Find if this GID is already exists.
 104         * (b) Find a free space.
 105         * (c) Write the new GID
 106         *
 107         * Delete requires different set of operations:
 108         * (a) Find the GID
 109         * (b) Delete it.
 110         *
 111         **/
 112        /* Any writer to data_vec must hold this lock and the write side of
 113         * rwlock. Readers must hold only rwlock. All writers must be in a
 114         * sleepable context.
 115         */
 116        struct mutex                    lock;
 117        /* rwlock protects data_vec[ix]->state and entry pointer.
 118         */
 119        rwlock_t                        rwlock;
 120        struct ib_gid_table_entry       **data_vec;
 121        /* bit field, each bit indicates the index of default GID */
 122        u32                             default_gid_indices;
 123};
 124
 125static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
 126{
 127        struct ib_event event;
 128
 129        event.device            = ib_dev;
 130        event.element.port_num  = port;
 131        event.event             = IB_EVENT_GID_CHANGE;
 132
 133        ib_dispatch_event(&event);
 134}
 135
 136static const char * const gid_type_str[] = {
 137        [IB_GID_TYPE_IB]        = "IB/RoCE v1",
 138        [IB_GID_TYPE_ROCE_UDP_ENCAP]    = "RoCE v2",
 139};
 140
 141const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
 142{
 143        if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
 144                return gid_type_str[gid_type];
 145
 146        return "Invalid GID type";
 147}
 148EXPORT_SYMBOL(ib_cache_gid_type_str);
 149
 150/** rdma_is_zero_gid - Check if given GID is zero or not.
 151 * @gid:        GID to check
 152 * Returns true if given GID is zero, returns false otherwise.
 153 */
 154bool rdma_is_zero_gid(const union ib_gid *gid)
 155{
 156        return !memcmp(gid, &zgid, sizeof(*gid));
 157}
 158EXPORT_SYMBOL(rdma_is_zero_gid);
 159
 160/** is_gid_index_default - Check if a given index belongs to
 161 * reserved default GIDs or not.
 162 * @table:      GID table pointer
 163 * @index:      Index to check in GID table
 164 * Returns true if index is one of the reserved default GID index otherwise
 165 * returns false.
 166 */
 167static bool is_gid_index_default(const struct ib_gid_table *table,
 168                                 unsigned int index)
 169{
 170        return index < 32 && (BIT(index) & table->default_gid_indices);
 171}
 172
 173int ib_cache_gid_parse_type_str(const char *buf)
 174{
 175        unsigned int i;
 176        size_t len;
 177        int err = -EINVAL;
 178
 179        len = strlen(buf);
 180        if (len == 0)
 181                return -EINVAL;
 182
 183        if (buf[len - 1] == '\n')
 184                len--;
 185
 186        for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
 187                if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
 188                    len == strlen(gid_type_str[i])) {
 189                        err = i;
 190                        break;
 191                }
 192
 193        return err;
 194}
 195EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
 196
 197static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
 198{
 199        return device->port_data[port].cache.gid;
 200}
 201
 202static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
 203{
 204        return !entry;
 205}
 206
 207static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
 208{
 209        return entry && entry->state == GID_TABLE_ENTRY_VALID;
 210}
 211
 212static void schedule_free_gid(struct kref *kref)
 213{
 214        struct ib_gid_table_entry *entry =
 215                        container_of(kref, struct ib_gid_table_entry, kref);
 216
 217        queue_work(ib_wq, &entry->del_work);
 218}
 219
 220static void put_gid_ndev(struct rcu_head *head)
 221{
 222        struct roce_gid_ndev_storage *storage =
 223                container_of(head, struct roce_gid_ndev_storage, rcu_head);
 224
 225        WARN_ON(!storage->ndev);
 226        /* At this point its safe to release netdev reference,
 227         * as all callers working on gid_attr->ndev are done
 228         * using this netdev.
 229         */
 230        dev_put(storage->ndev);
 231        kfree(storage);
 232}
 233
 234static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
 235{
 236        struct ib_device *device = entry->attr.device;
 237        u8 port_num = entry->attr.port_num;
 238        struct ib_gid_table *table = rdma_gid_table(device, port_num);
 239
 240        dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
 241                port_num, entry->attr.index, entry->attr.gid.raw);
 242
 243        write_lock_irq(&table->rwlock);
 244
 245        /*
 246         * The only way to avoid overwriting NULL in table is
 247         * by comparing if it is same entry in table or not!
 248         * If new entry in table is added by the time we free here,
 249         * don't overwrite the table entry.
 250         */
 251        if (entry == table->data_vec[entry->attr.index])
 252                table->data_vec[entry->attr.index] = NULL;
 253        /* Now this index is ready to be allocated */
 254        write_unlock_irq(&table->rwlock);
 255
 256        if (entry->ndev_storage)
 257                call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev);
 258        kfree(entry);
 259}
 260
 261static void free_gid_entry(struct kref *kref)
 262{
 263        struct ib_gid_table_entry *entry =
 264                        container_of(kref, struct ib_gid_table_entry, kref);
 265
 266        free_gid_entry_locked(entry);
 267}
 268
 269/**
 270 * free_gid_work - Release reference to the GID entry
 271 * @work: Work structure to refer to GID entry which needs to be
 272 * deleted.
 273 *
 274 * free_gid_work() frees the entry from the HCA's hardware table
 275 * if provider supports it. It releases reference to netdevice.
 276 */
 277static void free_gid_work(struct work_struct *work)
 278{
 279        struct ib_gid_table_entry *entry =
 280                container_of(work, struct ib_gid_table_entry, del_work);
 281        struct ib_device *device = entry->attr.device;
 282        u8 port_num = entry->attr.port_num;
 283        struct ib_gid_table *table = rdma_gid_table(device, port_num);
 284
 285        mutex_lock(&table->lock);
 286        free_gid_entry_locked(entry);
 287        mutex_unlock(&table->lock);
 288}
 289
 290static struct ib_gid_table_entry *
 291alloc_gid_entry(const struct ib_gid_attr *attr)
 292{
 293        struct ib_gid_table_entry *entry;
 294        struct net_device *ndev;
 295
 296        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 297        if (!entry)
 298                return NULL;
 299
 300        ndev = rcu_dereference_protected(attr->ndev, 1);
 301        if (ndev) {
 302                entry->ndev_storage = kzalloc(sizeof(*entry->ndev_storage),
 303                                              GFP_KERNEL);
 304                if (!entry->ndev_storage) {
 305                        kfree(entry);
 306                        return NULL;
 307                }
 308                dev_hold(ndev);
 309                entry->ndev_storage->ndev = ndev;
 310        }
 311        kref_init(&entry->kref);
 312        memcpy(&entry->attr, attr, sizeof(*attr));
 313        INIT_WORK(&entry->del_work, free_gid_work);
 314        entry->state = GID_TABLE_ENTRY_INVALID;
 315        return entry;
 316}
 317
 318static void store_gid_entry(struct ib_gid_table *table,
 319                            struct ib_gid_table_entry *entry)
 320{
 321        entry->state = GID_TABLE_ENTRY_VALID;
 322
 323        dev_dbg(&entry->attr.device->dev, "%s port=%d index=%d gid %pI6\n",
 324                __func__, entry->attr.port_num, entry->attr.index,
 325                entry->attr.gid.raw);
 326
 327        lockdep_assert_held(&table->lock);
 328        write_lock_irq(&table->rwlock);
 329        table->data_vec[entry->attr.index] = entry;
 330        write_unlock_irq(&table->rwlock);
 331}
 332
 333static void get_gid_entry(struct ib_gid_table_entry *entry)
 334{
 335        kref_get(&entry->kref);
 336}
 337
 338static void put_gid_entry(struct ib_gid_table_entry *entry)
 339{
 340        kref_put(&entry->kref, schedule_free_gid);
 341}
 342
 343static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
 344{
 345        kref_put(&entry->kref, free_gid_entry);
 346}
 347
 348static int add_roce_gid(struct ib_gid_table_entry *entry)
 349{
 350        const struct ib_gid_attr *attr = &entry->attr;
 351        int ret;
 352
 353        if (!attr->ndev) {
 354                dev_err(&attr->device->dev, "%s NULL netdev port=%d index=%d\n",
 355                        __func__, attr->port_num, attr->index);
 356                return -EINVAL;
 357        }
 358        if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
 359                ret = attr->device->ops.add_gid(attr, &entry->context);
 360                if (ret) {
 361                        dev_err(&attr->device->dev,
 362                                "%s GID add failed port=%d index=%d\n",
 363                                __func__, attr->port_num, attr->index);
 364                        return ret;
 365                }
 366        }
 367        return 0;
 368}
 369
 370/**
 371 * del_gid - Delete GID table entry
 372 *
 373 * @ib_dev:     IB device whose GID entry to be deleted
 374 * @port:       Port number of the IB device
 375 * @table:      GID table of the IB device for a port
 376 * @ix:         GID entry index to delete
 377 *
 378 */
 379static void del_gid(struct ib_device *ib_dev, u8 port,
 380                    struct ib_gid_table *table, int ix)
 381{
 382        struct roce_gid_ndev_storage *ndev_storage;
 383        struct ib_gid_table_entry *entry;
 384
 385        lockdep_assert_held(&table->lock);
 386
 387        dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
 388                ix, table->data_vec[ix]->attr.gid.raw);
 389
 390        write_lock_irq(&table->rwlock);
 391        entry = table->data_vec[ix];
 392        entry->state = GID_TABLE_ENTRY_PENDING_DEL;
 393        /*
 394         * For non RoCE protocol, GID entry slot is ready to use.
 395         */
 396        if (!rdma_protocol_roce(ib_dev, port))
 397                table->data_vec[ix] = NULL;
 398        write_unlock_irq(&table->rwlock);
 399
 400        ndev_storage = entry->ndev_storage;
 401        if (ndev_storage) {
 402                entry->ndev_storage = NULL;
 403                rcu_assign_pointer(entry->attr.ndev, NULL);
 404                call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
 405        }
 406
 407        if (rdma_cap_roce_gid_table(ib_dev, port))
 408                ib_dev->ops.del_gid(&entry->attr, &entry->context);
 409
 410        put_gid_entry_locked(entry);
 411}
 412
 413/**
 414 * add_modify_gid - Add or modify GID table entry
 415 *
 416 * @table:      GID table in which GID to be added or modified
 417 * @attr:       Attributes of the GID
 418 *
 419 * Returns 0 on success or appropriate error code. It accepts zero
 420 * GID addition for non RoCE ports for HCA's who report them as valid
 421 * GID. However such zero GIDs are not added to the cache.
 422 */
 423static int add_modify_gid(struct ib_gid_table *table,
 424                          const struct ib_gid_attr *attr)
 425{
 426        struct ib_gid_table_entry *entry;
 427        int ret = 0;
 428
 429        /*
 430         * Invalidate any old entry in the table to make it safe to write to
 431         * this index.
 432         */
 433        if (is_gid_entry_valid(table->data_vec[attr->index]))
 434                del_gid(attr->device, attr->port_num, table, attr->index);
 435
 436        /*
 437         * Some HCA's report multiple GID entries with only one valid GID, and
 438         * leave other unused entries as the zero GID. Convert zero GIDs to
 439         * empty table entries instead of storing them.
 440         */
 441        if (rdma_is_zero_gid(&attr->gid))
 442                return 0;
 443
 444        entry = alloc_gid_entry(attr);
 445        if (!entry)
 446                return -ENOMEM;
 447
 448        if (rdma_protocol_roce(attr->device, attr->port_num)) {
 449                ret = add_roce_gid(entry);
 450                if (ret)
 451                        goto done;
 452        }
 453
 454        store_gid_entry(table, entry);
 455        return 0;
 456
 457done:
 458        put_gid_entry(entry);
 459        return ret;
 460}
 461
 462/* rwlock should be read locked, or lock should be held */
 463static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
 464                    const struct ib_gid_attr *val, bool default_gid,
 465                    unsigned long mask, int *pempty)
 466{
 467        int i = 0;
 468        int found = -1;
 469        int empty = pempty ? -1 : 0;
 470
 471        while (i < table->sz && (found < 0 || empty < 0)) {
 472                struct ib_gid_table_entry *data = table->data_vec[i];
 473                struct ib_gid_attr *attr;
 474                int curr_index = i;
 475
 476                i++;
 477
 478                /* find_gid() is used during GID addition where it is expected
 479                 * to return a free entry slot which is not duplicate.
 480                 * Free entry slot is requested and returned if pempty is set,
 481                 * so lookup free slot only if requested.
 482                 */
 483                if (pempty && empty < 0) {
 484                        if (is_gid_entry_free(data) &&
 485                            default_gid ==
 486                                is_gid_index_default(table, curr_index)) {
 487                                /*
 488                                 * Found an invalid (free) entry; allocate it.
 489                                 * If default GID is requested, then our
 490                                 * found slot must be one of the DEFAULT
 491                                 * reserved slots or we fail.
 492                                 * This ensures that only DEFAULT reserved
 493                                 * slots are used for default property GIDs.
 494                                 */
 495                                empty = curr_index;
 496                        }
 497                }
 498
 499                /*
 500                 * Additionally find_gid() is used to find valid entry during
 501                 * lookup operation; so ignore the entries which are marked as
 502                 * pending for removal and the entries which are marked as
 503                 * invalid.
 504                 */
 505                if (!is_gid_entry_valid(data))
 506                        continue;
 507
 508                if (found >= 0)
 509                        continue;
 510
 511                attr = &data->attr;
 512                if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
 513                    attr->gid_type != val->gid_type)
 514                        continue;
 515
 516                if (mask & GID_ATTR_FIND_MASK_GID &&
 517                    memcmp(gid, &data->attr.gid, sizeof(*gid)))
 518                        continue;
 519
 520                if (mask & GID_ATTR_FIND_MASK_NETDEV &&
 521                    attr->ndev != val->ndev)
 522                        continue;
 523
 524                if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
 525                    is_gid_index_default(table, curr_index) != default_gid)
 526                        continue;
 527
 528                found = curr_index;
 529        }
 530
 531        if (pempty)
 532                *pempty = empty;
 533
 534        return found;
 535}
 536
 537static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
 538{
 539        gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
 540        addrconf_ifid_eui48(&gid->raw[8], dev);
 541}
 542
 543static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 544                              union ib_gid *gid, struct ib_gid_attr *attr,
 545                              unsigned long mask, bool default_gid)
 546{
 547        struct ib_gid_table *table;
 548        int ret = 0;
 549        int empty;
 550        int ix;
 551
 552        /* Do not allow adding zero GID in support of
 553         * IB spec version 1.3 section 4.1.1 point (6) and
 554         * section 12.7.10 and section 12.7.20
 555         */
 556        if (rdma_is_zero_gid(gid))
 557                return -EINVAL;
 558
 559        table = rdma_gid_table(ib_dev, port);
 560
 561        mutex_lock(&table->lock);
 562
 563        ix = find_gid(table, gid, attr, default_gid, mask, &empty);
 564        if (ix >= 0)
 565                goto out_unlock;
 566
 567        if (empty < 0) {
 568                ret = -ENOSPC;
 569                goto out_unlock;
 570        }
 571        attr->device = ib_dev;
 572        attr->index = empty;
 573        attr->port_num = port;
 574        attr->gid = *gid;
 575        ret = add_modify_gid(table, attr);
 576        if (!ret)
 577                dispatch_gid_change_event(ib_dev, port);
 578
 579out_unlock:
 580        mutex_unlock(&table->lock);
 581        if (ret)
 582                pr_warn("%s: unable to add gid %pI6 error=%d\n",
 583                        __func__, gid->raw, ret);
 584        return ret;
 585}
 586
 587int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 588                     union ib_gid *gid, struct ib_gid_attr *attr)
 589{
 590        unsigned long mask = GID_ATTR_FIND_MASK_GID |
 591                             GID_ATTR_FIND_MASK_GID_TYPE |
 592                             GID_ATTR_FIND_MASK_NETDEV;
 593
 594        return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
 595}
 596
 597static int
 598_ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 599                  union ib_gid *gid, struct ib_gid_attr *attr,
 600                  unsigned long mask, bool default_gid)
 601{
 602        struct ib_gid_table *table;
 603        int ret = 0;
 604        int ix;
 605
 606        table = rdma_gid_table(ib_dev, port);
 607
 608        mutex_lock(&table->lock);
 609
 610        ix = find_gid(table, gid, attr, default_gid, mask, NULL);
 611        if (ix < 0) {
 612                ret = -EINVAL;
 613                goto out_unlock;
 614        }
 615
 616        del_gid(ib_dev, port, table, ix);
 617        dispatch_gid_change_event(ib_dev, port);
 618
 619out_unlock:
 620        mutex_unlock(&table->lock);
 621        if (ret)
 622                pr_debug("%s: can't delete gid %pI6 error=%d\n",
 623                         __func__, gid->raw, ret);
 624        return ret;
 625}
 626
 627int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 628                     union ib_gid *gid, struct ib_gid_attr *attr)
 629{
 630        unsigned long mask = GID_ATTR_FIND_MASK_GID       |
 631                             GID_ATTR_FIND_MASK_GID_TYPE |
 632                             GID_ATTR_FIND_MASK_DEFAULT  |
 633                             GID_ATTR_FIND_MASK_NETDEV;
 634
 635        return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
 636}
 637
 638int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 639                                     struct net_device *ndev)
 640{
 641        struct ib_gid_table *table;
 642        int ix;
 643        bool deleted = false;
 644
 645        table = rdma_gid_table(ib_dev, port);
 646
 647        mutex_lock(&table->lock);
 648
 649        for (ix = 0; ix < table->sz; ix++) {
 650                if (is_gid_entry_valid(table->data_vec[ix]) &&
 651                    table->data_vec[ix]->attr.ndev == ndev) {
 652                        del_gid(ib_dev, port, table, ix);
 653                        deleted = true;
 654                }
 655        }
 656
 657        mutex_unlock(&table->lock);
 658
 659        if (deleted)
 660                dispatch_gid_change_event(ib_dev, port);
 661
 662        return 0;
 663}
 664
 665/**
 666 * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
 667 * a valid GID entry for given search parameters. It searches for the specified
 668 * GID value in the local software cache.
 669 * @device: The device to query.
 670 * @gid: The GID value to search for.
 671 * @gid_type: The GID type to search for.
 672 * @port_num: The port number of the device where the GID value should be
 673 *   searched.
 674 * @ndev: In RoCE, the net device of the device. NULL means ignore.
 675 *
 676 * Returns sgid attributes if the GID is found with valid reference or
 677 * returns ERR_PTR for the error.
 678 * The caller must invoke rdma_put_gid_attr() to release the reference.
 679 */
 680const struct ib_gid_attr *
 681rdma_find_gid_by_port(struct ib_device *ib_dev,
 682                      const union ib_gid *gid,
 683                      enum ib_gid_type gid_type,
 684                      u8 port, struct net_device *ndev)
 685{
 686        int local_index;
 687        struct ib_gid_table *table;
 688        unsigned long mask = GID_ATTR_FIND_MASK_GID |
 689                             GID_ATTR_FIND_MASK_GID_TYPE;
 690        struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
 691        const struct ib_gid_attr *attr;
 692        unsigned long flags;
 693
 694        if (!rdma_is_port_valid(ib_dev, port))
 695                return ERR_PTR(-ENOENT);
 696
 697        table = rdma_gid_table(ib_dev, port);
 698
 699        if (ndev)
 700                mask |= GID_ATTR_FIND_MASK_NETDEV;
 701
 702        read_lock_irqsave(&table->rwlock, flags);
 703        local_index = find_gid(table, gid, &val, false, mask, NULL);
 704        if (local_index >= 0) {
 705                get_gid_entry(table->data_vec[local_index]);
 706                attr = &table->data_vec[local_index]->attr;
 707                read_unlock_irqrestore(&table->rwlock, flags);
 708                return attr;
 709        }
 710
 711        read_unlock_irqrestore(&table->rwlock, flags);
 712        return ERR_PTR(-ENOENT);
 713}
 714EXPORT_SYMBOL(rdma_find_gid_by_port);
 715
 716/**
 717 * rdma_find_gid_by_filter - Returns the GID table attribute where a
 718 * specified GID value occurs
 719 * @device: The device to query.
 720 * @gid: The GID value to search for.
 721 * @port: The port number of the device where the GID value could be
 722 *   searched.
 723 * @filter: The filter function is executed on any matching GID in the table.
 724 *   If the filter function returns true, the corresponding index is returned,
 725 *   otherwise, we continue searching the GID table. It's guaranteed that
 726 *   while filter is executed, ndev field is valid and the structure won't
 727 *   change. filter is executed in an atomic context. filter must not be NULL.
 728 *
 729 * rdma_find_gid_by_filter() searches for the specified GID value
 730 * of which the filter function returns true in the port's GID table.
 731 *
 732 */
 733const struct ib_gid_attr *rdma_find_gid_by_filter(
 734        struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
 735        bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
 736                       void *),
 737        void *context)
 738{
 739        const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
 740        struct ib_gid_table *table;
 741        unsigned long flags;
 742        unsigned int i;
 743
 744        if (!rdma_is_port_valid(ib_dev, port))
 745                return ERR_PTR(-EINVAL);
 746
 747        table = rdma_gid_table(ib_dev, port);
 748
 749        read_lock_irqsave(&table->rwlock, flags);
 750        for (i = 0; i < table->sz; i++) {
 751                struct ib_gid_table_entry *entry = table->data_vec[i];
 752
 753                if (!is_gid_entry_valid(entry))
 754                        continue;
 755
 756                if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
 757                        continue;
 758
 759                if (filter(gid, &entry->attr, context)) {
 760                        get_gid_entry(entry);
 761                        res = &entry->attr;
 762                        break;
 763                }
 764        }
 765        read_unlock_irqrestore(&table->rwlock, flags);
 766        return res;
 767}
 768
 769static struct ib_gid_table *alloc_gid_table(int sz)
 770{
 771        struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
 772
 773        if (!table)
 774                return NULL;
 775
 776        table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
 777        if (!table->data_vec)
 778                goto err_free_table;
 779
 780        mutex_init(&table->lock);
 781
 782        table->sz = sz;
 783        rwlock_init(&table->rwlock);
 784        return table;
 785
 786err_free_table:
 787        kfree(table);
 788        return NULL;
 789}
 790
 791static void release_gid_table(struct ib_device *device,
 792                              struct ib_gid_table *table)
 793{
 794        bool leak = false;
 795        int i;
 796
 797        if (!table)
 798                return;
 799
 800        for (i = 0; i < table->sz; i++) {
 801                if (is_gid_entry_free(table->data_vec[i]))
 802                        continue;
 803                if (kref_read(&table->data_vec[i]->kref) > 1) {
 804                        dev_err(&device->dev,
 805                                "GID entry ref leak for index %d ref=%d\n", i,
 806                                kref_read(&table->data_vec[i]->kref));
 807                        leak = true;
 808                }
 809        }
 810        if (leak)
 811                return;
 812
 813        kfree(table->data_vec);
 814        kfree(table);
 815}
 816
 817static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
 818                                   struct ib_gid_table *table)
 819{
 820        int i;
 821        bool deleted = false;
 822
 823        if (!table)
 824                return;
 825
 826        mutex_lock(&table->lock);
 827        for (i = 0; i < table->sz; ++i) {
 828                if (is_gid_entry_valid(table->data_vec[i])) {
 829                        del_gid(ib_dev, port, table, i);
 830                        deleted = true;
 831                }
 832        }
 833        mutex_unlock(&table->lock);
 834
 835        if (deleted)
 836                dispatch_gid_change_event(ib_dev, port);
 837}
 838
 839void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
 840                                  struct net_device *ndev,
 841                                  unsigned long gid_type_mask,
 842                                  enum ib_cache_gid_default_mode mode)
 843{
 844        union ib_gid gid = { };
 845        struct ib_gid_attr gid_attr;
 846        unsigned int gid_type;
 847        unsigned long mask;
 848
 849        mask = GID_ATTR_FIND_MASK_GID_TYPE |
 850               GID_ATTR_FIND_MASK_DEFAULT |
 851               GID_ATTR_FIND_MASK_NETDEV;
 852        memset(&gid_attr, 0, sizeof(gid_attr));
 853        gid_attr.ndev = ndev;
 854
 855        for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
 856                if (1UL << gid_type & ~gid_type_mask)
 857                        continue;
 858
 859                gid_attr.gid_type = gid_type;
 860
 861                if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
 862                        make_default_gid(ndev, &gid);
 863                        __ib_cache_gid_add(ib_dev, port, &gid,
 864                                           &gid_attr, mask, true);
 865                } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
 866                        _ib_cache_gid_del(ib_dev, port, &gid,
 867                                          &gid_attr, mask, true);
 868                }
 869        }
 870}
 871
 872static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
 873                                      struct ib_gid_table *table)
 874{
 875        unsigned int i;
 876        unsigned long roce_gid_type_mask;
 877        unsigned int num_default_gids;
 878
 879        roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
 880        num_default_gids = hweight_long(roce_gid_type_mask);
 881        /* Reserve starting indices for default GIDs */
 882        for (i = 0; i < num_default_gids && i < table->sz; i++)
 883                table->default_gid_indices |= BIT(i);
 884}
 885
 886
 887static void gid_table_release_one(struct ib_device *ib_dev)
 888{
 889        unsigned int p;
 890
 891        rdma_for_each_port (ib_dev, p) {
 892                release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
 893                ib_dev->port_data[p].cache.gid = NULL;
 894        }
 895}
 896
 897static int _gid_table_setup_one(struct ib_device *ib_dev)
 898{
 899        struct ib_gid_table *table;
 900        unsigned int rdma_port;
 901
 902        rdma_for_each_port (ib_dev, rdma_port) {
 903                table = alloc_gid_table(
 904                        ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
 905                if (!table)
 906                        goto rollback_table_setup;
 907
 908                gid_table_reserve_default(ib_dev, rdma_port, table);
 909                ib_dev->port_data[rdma_port].cache.gid = table;
 910        }
 911        return 0;
 912
 913rollback_table_setup:
 914        gid_table_release_one(ib_dev);
 915        return -ENOMEM;
 916}
 917
 918static void gid_table_cleanup_one(struct ib_device *ib_dev)
 919{
 920        unsigned int p;
 921
 922        rdma_for_each_port (ib_dev, p)
 923                cleanup_gid_table_port(ib_dev, p,
 924                                       ib_dev->port_data[p].cache.gid);
 925}
 926
 927static int gid_table_setup_one(struct ib_device *ib_dev)
 928{
 929        int err;
 930
 931        err = _gid_table_setup_one(ib_dev);
 932
 933        if (err)
 934                return err;
 935
 936        rdma_roce_rescan_device(ib_dev);
 937
 938        return err;
 939}
 940
 941/**
 942 * rdma_query_gid - Read the GID content from the GID software cache
 943 * @device:             Device to query the GID
 944 * @port_num:           Port number of the device
 945 * @index:              Index of the GID table entry to read
 946 * @gid:                Pointer to GID where to store the entry's GID
 947 *
 948 * rdma_query_gid() only reads the GID entry content for requested device,
 949 * port and index. It reads for IB, RoCE and iWarp link layers.  It doesn't
 950 * hold any reference to the GID table entry in the HCA or software cache.
 951 *
 952 * Returns 0 on success or appropriate error code.
 953 *
 954 */
 955int rdma_query_gid(struct ib_device *device, u8 port_num,
 956                   int index, union ib_gid *gid)
 957{
 958        struct ib_gid_table *table;
 959        unsigned long flags;
 960        int res = -EINVAL;
 961
 962        if (!rdma_is_port_valid(device, port_num))
 963                return -EINVAL;
 964
 965        table = rdma_gid_table(device, port_num);
 966        read_lock_irqsave(&table->rwlock, flags);
 967
 968        if (index < 0 || index >= table->sz ||
 969            !is_gid_entry_valid(table->data_vec[index]))
 970                goto done;
 971
 972        memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
 973        res = 0;
 974
 975done:
 976        read_unlock_irqrestore(&table->rwlock, flags);
 977        return res;
 978}
 979EXPORT_SYMBOL(rdma_query_gid);
 980
 981/**
 982 * rdma_find_gid - Returns SGID attributes if the matching GID is found.
 983 * @device: The device to query.
 984 * @gid: The GID value to search for.
 985 * @gid_type: The GID type to search for.
 986 * @ndev: In RoCE, the net device of the device. NULL means ignore.
 987 *
 988 * rdma_find_gid() searches for the specified GID value in the software cache.
 989 *
 990 * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
 991 * error. The caller must invoke rdma_put_gid_attr() to release the reference.
 992 *
 993 */
 994const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
 995                                        const union ib_gid *gid,
 996                                        enum ib_gid_type gid_type,
 997                                        struct net_device *ndev)
 998{
 999        unsigned long mask = GID_ATTR_FIND_MASK_GID |
1000                             GID_ATTR_FIND_MASK_GID_TYPE;
1001        struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
1002        unsigned int p;
1003
1004        if (ndev)
1005                mask |= GID_ATTR_FIND_MASK_NETDEV;
1006
1007        rdma_for_each_port(device, p) {
1008                struct ib_gid_table *table;
1009                unsigned long flags;
1010                int index;
1011
1012                table = device->port_data[p].cache.gid;
1013                read_lock_irqsave(&table->rwlock, flags);
1014                index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
1015                if (index >= 0) {
1016                        const struct ib_gid_attr *attr;
1017
1018                        get_gid_entry(table->data_vec[index]);
1019                        attr = &table->data_vec[index]->attr;
1020                        read_unlock_irqrestore(&table->rwlock, flags);
1021                        return attr;
1022                }
1023                read_unlock_irqrestore(&table->rwlock, flags);
1024        }
1025
1026        return ERR_PTR(-ENOENT);
1027}
1028EXPORT_SYMBOL(rdma_find_gid);
1029
1030int ib_get_cached_pkey(struct ib_device *device,
1031                       u8                port_num,
1032                       int               index,
1033                       u16              *pkey)
1034{
1035        struct ib_pkey_cache *cache;
1036        unsigned long flags;
1037        int ret = 0;
1038
1039        if (!rdma_is_port_valid(device, port_num))
1040                return -EINVAL;
1041
1042        read_lock_irqsave(&device->cache.lock, flags);
1043
1044        cache = device->port_data[port_num].cache.pkey;
1045
1046        if (index < 0 || index >= cache->table_len)
1047                ret = -EINVAL;
1048        else
1049                *pkey = cache->table[index];
1050
1051        read_unlock_irqrestore(&device->cache.lock, flags);
1052
1053        return ret;
1054}
1055EXPORT_SYMBOL(ib_get_cached_pkey);
1056
1057int ib_get_cached_subnet_prefix(struct ib_device *device,
1058                                u8                port_num,
1059                                u64              *sn_pfx)
1060{
1061        unsigned long flags;
1062
1063        if (!rdma_is_port_valid(device, port_num))
1064                return -EINVAL;
1065
1066        read_lock_irqsave(&device->cache.lock, flags);
1067        *sn_pfx = device->port_data[port_num].cache.subnet_prefix;
1068        read_unlock_irqrestore(&device->cache.lock, flags);
1069
1070        return 0;
1071}
1072EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1073
1074int ib_find_cached_pkey(struct ib_device *device,
1075                        u8                port_num,
1076                        u16               pkey,
1077                        u16              *index)
1078{
1079        struct ib_pkey_cache *cache;
1080        unsigned long flags;
1081        int i;
1082        int ret = -ENOENT;
1083        int partial_ix = -1;
1084
1085        if (!rdma_is_port_valid(device, port_num))
1086                return -EINVAL;
1087
1088        read_lock_irqsave(&device->cache.lock, flags);
1089
1090        cache = device->port_data[port_num].cache.pkey;
1091
1092        *index = -1;
1093
1094        for (i = 0; i < cache->table_len; ++i)
1095                if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
1096                        if (cache->table[i] & 0x8000) {
1097                                *index = i;
1098                                ret = 0;
1099                                break;
1100                        } else
1101                                partial_ix = i;
1102                }
1103
1104        if (ret && partial_ix >= 0) {
1105                *index = partial_ix;
1106                ret = 0;
1107        }
1108
1109        read_unlock_irqrestore(&device->cache.lock, flags);
1110
1111        return ret;
1112}
1113EXPORT_SYMBOL(ib_find_cached_pkey);
1114
1115int ib_find_exact_cached_pkey(struct ib_device *device,
1116                              u8                port_num,
1117                              u16               pkey,
1118                              u16              *index)
1119{
1120        struct ib_pkey_cache *cache;
1121        unsigned long flags;
1122        int i;
1123        int ret = -ENOENT;
1124
1125        if (!rdma_is_port_valid(device, port_num))
1126                return -EINVAL;
1127
1128        read_lock_irqsave(&device->cache.lock, flags);
1129
1130        cache = device->port_data[port_num].cache.pkey;
1131
1132        *index = -1;
1133
1134        for (i = 0; i < cache->table_len; ++i)
1135                if (cache->table[i] == pkey) {
1136                        *index = i;
1137                        ret = 0;
1138                        break;
1139                }
1140
1141        read_unlock_irqrestore(&device->cache.lock, flags);
1142
1143        return ret;
1144}
1145EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1146
1147int ib_get_cached_lmc(struct ib_device *device,
1148                      u8                port_num,
1149                      u8                *lmc)
1150{
1151        unsigned long flags;
1152        int ret = 0;
1153
1154        if (!rdma_is_port_valid(device, port_num))
1155                return -EINVAL;
1156
1157        read_lock_irqsave(&device->cache.lock, flags);
1158        *lmc = device->port_data[port_num].cache.lmc;
1159        read_unlock_irqrestore(&device->cache.lock, flags);
1160
1161        return ret;
1162}
1163EXPORT_SYMBOL(ib_get_cached_lmc);
1164
1165int ib_get_cached_port_state(struct ib_device   *device,
1166                             u8                  port_num,
1167                             enum ib_port_state *port_state)
1168{
1169        unsigned long flags;
1170        int ret = 0;
1171
1172        if (!rdma_is_port_valid(device, port_num))
1173                return -EINVAL;
1174
1175        read_lock_irqsave(&device->cache.lock, flags);
1176        *port_state = device->port_data[port_num].cache.port_state;
1177        read_unlock_irqrestore(&device->cache.lock, flags);
1178
1179        return ret;
1180}
1181EXPORT_SYMBOL(ib_get_cached_port_state);
1182
1183/**
1184 * rdma_get_gid_attr - Returns GID attributes for a port of a device
1185 * at a requested gid_index, if a valid GID entry exists.
1186 * @device:             The device to query.
1187 * @port_num:           The port number on the device where the GID value
1188 *                      is to be queried.
1189 * @index:              Index of the GID table entry whose attributes are to
1190 *                      be queried.
1191 *
1192 * rdma_get_gid_attr() acquires reference count of gid attributes from the
1193 * cached GID table. Caller must invoke rdma_put_gid_attr() to release
1194 * reference to gid attribute regardless of link layer.
1195 *
1196 * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
1197 * code.
1198 */
1199const struct ib_gid_attr *
1200rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
1201{
1202        const struct ib_gid_attr *attr = ERR_PTR(-EINVAL);
1203        struct ib_gid_table *table;
1204        unsigned long flags;
1205
1206        if (!rdma_is_port_valid(device, port_num))
1207                return ERR_PTR(-EINVAL);
1208
1209        table = rdma_gid_table(device, port_num);
1210        if (index < 0 || index >= table->sz)
1211                return ERR_PTR(-EINVAL);
1212
1213        read_lock_irqsave(&table->rwlock, flags);
1214        if (!is_gid_entry_valid(table->data_vec[index]))
1215                goto done;
1216
1217        get_gid_entry(table->data_vec[index]);
1218        attr = &table->data_vec[index]->attr;
1219done:
1220        read_unlock_irqrestore(&table->rwlock, flags);
1221        return attr;
1222}
1223EXPORT_SYMBOL(rdma_get_gid_attr);
1224
1225/**
1226 * rdma_put_gid_attr - Release reference to the GID attribute
1227 * @attr:               Pointer to the GID attribute whose reference
1228 *                      needs to be released.
1229 *
1230 * rdma_put_gid_attr() must be used to release reference whose
1231 * reference is acquired using rdma_get_gid_attr() or any APIs
1232 * which returns a pointer to the ib_gid_attr regardless of link layer
1233 * of IB or RoCE.
1234 *
1235 */
1236void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1237{
1238        struct ib_gid_table_entry *entry =
1239                container_of(attr, struct ib_gid_table_entry, attr);
1240
1241        put_gid_entry(entry);
1242}
1243EXPORT_SYMBOL(rdma_put_gid_attr);
1244
1245/**
1246 * rdma_hold_gid_attr - Get reference to existing GID attribute
1247 *
1248 * @attr:               Pointer to the GID attribute whose reference
1249 *                      needs to be taken.
1250 *
1251 * Increase the reference count to a GID attribute to keep it from being
1252 * freed. Callers are required to already be holding a reference to attribute.
1253 *
1254 */
1255void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1256{
1257        struct ib_gid_table_entry *entry =
1258                container_of(attr, struct ib_gid_table_entry, attr);
1259
1260        get_gid_entry(entry);
1261}
1262EXPORT_SYMBOL(rdma_hold_gid_attr);
1263
1264/**
1265 * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
1266 * which must be in UP state.
1267 *
1268 * @attr:Pointer to the GID attribute
1269 *
1270 * Returns pointer to netdevice if the netdevice was attached to GID and
1271 * netdevice is in UP state. Caller must hold RCU lock as this API
1272 * reads the netdev flags which can change while netdevice migrates to
1273 * different net namespace. Returns ERR_PTR with error code otherwise.
1274 *
1275 */
1276struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
1277{
1278        struct ib_gid_table_entry *entry =
1279                        container_of(attr, struct ib_gid_table_entry, attr);
1280        struct ib_device *device = entry->attr.device;
1281        struct net_device *ndev = ERR_PTR(-ENODEV);
1282        u8 port_num = entry->attr.port_num;
1283        struct ib_gid_table *table;
1284        unsigned long flags;
1285        bool valid;
1286
1287        table = rdma_gid_table(device, port_num);
1288
1289        read_lock_irqsave(&table->rwlock, flags);
1290        valid = is_gid_entry_valid(table->data_vec[attr->index]);
1291        if (valid) {
1292                ndev = rcu_dereference(attr->ndev);
1293                if (!ndev ||
1294                    (ndev && ((READ_ONCE(ndev->flags) & IFF_UP) == 0)))
1295                        ndev = ERR_PTR(-ENODEV);
1296        }
1297        read_unlock_irqrestore(&table->rwlock, flags);
1298        return ndev;
1299}
1300EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
1301
1302static int get_lower_dev_vlan(struct net_device *lower_dev, void *data)
1303{
1304        u16 *vlan_id = data;
1305
1306        if (is_vlan_dev(lower_dev))
1307                *vlan_id = vlan_dev_vlan_id(lower_dev);
1308
1309        /* We are interested only in first level vlan device, so
1310         * always return 1 to stop iterating over next level devices.
1311         */
1312        return 1;
1313}
1314
1315/**
1316 * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address
1317 *                           of a GID entry.
1318 *
1319 * @attr:       GID attribute pointer whose L2 fields to be read
1320 * @vlan_id:    Pointer to vlan id to fill up if the GID entry has
1321 *              vlan id. It is optional.
1322 * @smac:       Pointer to smac to fill up for a GID entry. It is optional.
1323 *
1324 * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id
1325 * (if gid entry has vlan) and source MAC, or returns error.
1326 */
1327int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
1328                            u16 *vlan_id, u8 *smac)
1329{
1330        struct net_device *ndev;
1331
1332        rcu_read_lock();
1333        ndev = rcu_dereference(attr->ndev);
1334        if (!ndev) {
1335                rcu_read_unlock();
1336                return -ENODEV;
1337        }
1338        if (smac)
1339                ether_addr_copy(smac, ndev->dev_addr);
1340        if (vlan_id) {
1341                *vlan_id = 0xffff;
1342                if (is_vlan_dev(ndev)) {
1343                        *vlan_id = vlan_dev_vlan_id(ndev);
1344                } else {
1345                        /* If the netdev is upper device and if it's lower
1346                         * device is vlan device, consider vlan id of the
1347                         * the lower vlan device for this gid entry.
1348                         */
1349                        netdev_walk_all_lower_dev_rcu(attr->ndev,
1350                                        get_lower_dev_vlan, vlan_id);
1351                }
1352        }
1353        rcu_read_unlock();
1354        return 0;
1355}
1356EXPORT_SYMBOL(rdma_read_gid_l2_fields);
1357
1358static int config_non_roce_gid_cache(struct ib_device *device,
1359                                     u8 port, int gid_tbl_len)
1360{
1361        struct ib_gid_attr gid_attr = {};
1362        struct ib_gid_table *table;
1363        int ret = 0;
1364        int i;
1365
1366        gid_attr.device = device;
1367        gid_attr.port_num = port;
1368        table = rdma_gid_table(device, port);
1369
1370        mutex_lock(&table->lock);
1371        for (i = 0; i < gid_tbl_len; ++i) {
1372                if (!device->ops.query_gid)
1373                        continue;
1374                ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
1375                if (ret) {
1376                        dev_warn(&device->dev,
1377                                 "query_gid failed (%d) for index %d\n", ret,
1378                                 i);
1379                        goto err;
1380                }
1381                gid_attr.index = i;
1382                add_modify_gid(table, &gid_attr);
1383        }
1384err:
1385        mutex_unlock(&table->lock);
1386        return ret;
1387}
1388
1389static void ib_cache_update(struct ib_device *device,
1390                            u8                port,
1391                            bool              enforce_security)
1392{
1393        struct ib_port_attr       *tprops = NULL;
1394        struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1395        int                        i;
1396        int                        ret;
1397
1398        if (!rdma_is_port_valid(device, port))
1399                return;
1400
1401        tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1402        if (!tprops)
1403                return;
1404
1405        ret = ib_query_port(device, port, tprops);
1406        if (ret) {
1407                dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
1408                goto err;
1409        }
1410
1411        if (!rdma_protocol_roce(device, port)) {
1412                ret = config_non_roce_gid_cache(device, port,
1413                                                tprops->gid_tbl_len);
1414                if (ret)
1415                        goto err;
1416        }
1417
1418        pkey_cache = kmalloc(struct_size(pkey_cache, table,
1419                                         tprops->pkey_tbl_len),
1420                             GFP_KERNEL);
1421        if (!pkey_cache)
1422                goto err;
1423
1424        pkey_cache->table_len = tprops->pkey_tbl_len;
1425
1426        for (i = 0; i < pkey_cache->table_len; ++i) {
1427                ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1428                if (ret) {
1429                        dev_warn(&device->dev,
1430                                 "ib_query_pkey failed (%d) for index %d\n",
1431                                 ret, i);
1432                        goto err;
1433                }
1434        }
1435
1436        write_lock_irq(&device->cache.lock);
1437
1438        old_pkey_cache = device->port_data[port].cache.pkey;
1439
1440        device->port_data[port].cache.pkey = pkey_cache;
1441        device->port_data[port].cache.lmc = tprops->lmc;
1442        device->port_data[port].cache.port_state = tprops->state;
1443
1444        device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
1445        write_unlock_irq(&device->cache.lock);
1446
1447        if (enforce_security)
1448                ib_security_cache_change(device,
1449                                         port,
1450                                         tprops->subnet_prefix);
1451
1452        kfree(old_pkey_cache);
1453        kfree(tprops);
1454        return;
1455
1456err:
1457        kfree(pkey_cache);
1458        kfree(tprops);
1459}
1460
1461static void ib_cache_task(struct work_struct *_work)
1462{
1463        struct ib_update_work *work =
1464                container_of(_work, struct ib_update_work, work);
1465
1466        ib_cache_update(work->device,
1467                        work->port_num,
1468                        work->enforce_security);
1469        kfree(work);
1470}
1471
1472static void ib_cache_event(struct ib_event_handler *handler,
1473                           struct ib_event *event)
1474{
1475        struct ib_update_work *work;
1476
1477        if (event->event == IB_EVENT_PORT_ERR    ||
1478            event->event == IB_EVENT_PORT_ACTIVE ||
1479            event->event == IB_EVENT_LID_CHANGE  ||
1480            event->event == IB_EVENT_PKEY_CHANGE ||
1481            event->event == IB_EVENT_CLIENT_REREGISTER ||
1482            event->event == IB_EVENT_GID_CHANGE) {
1483                work = kmalloc(sizeof *work, GFP_ATOMIC);
1484                if (work) {
1485                        INIT_WORK(&work->work, ib_cache_task);
1486                        work->device   = event->device;
1487                        work->port_num = event->element.port_num;
1488                        if (event->event == IB_EVENT_PKEY_CHANGE ||
1489                            event->event == IB_EVENT_GID_CHANGE)
1490                                work->enforce_security = true;
1491                        else
1492                                work->enforce_security = false;
1493
1494                        queue_work(ib_wq, &work->work);
1495                }
1496        }
1497}
1498
1499int ib_cache_setup_one(struct ib_device *device)
1500{
1501        unsigned int p;
1502        int err;
1503
1504        rwlock_init(&device->cache.lock);
1505
1506        err = gid_table_setup_one(device);
1507        if (err)
1508                return err;
1509
1510        rdma_for_each_port (device, p)
1511                ib_cache_update(device, p, true);
1512
1513        INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1514                              device, ib_cache_event);
1515        ib_register_event_handler(&device->cache.event_handler);
1516        return 0;
1517}
1518
1519void ib_cache_release_one(struct ib_device *device)
1520{
1521        unsigned int p;
1522
1523        /*
1524         * The release function frees all the cache elements.
1525         * This function should be called as part of freeing
1526         * all the device's resources when the cache could no
1527         * longer be accessed.
1528         */
1529        rdma_for_each_port (device, p)
1530                kfree(device->port_data[p].cache.pkey);
1531
1532        gid_table_release_one(device);
1533}
1534
1535void ib_cache_cleanup_one(struct ib_device *device)
1536{
1537        /* The cleanup function unregisters the event handler,
1538         * waits for all in-progress workqueue elements and cleans
1539         * up the GID cache. This function should be called after
1540         * the device was removed from the devices list and all
1541         * clients were removed, so the cache exists but is
1542         * non-functional and shouldn't be updated anymore.
1543         */
1544        ib_unregister_event_handler(&device->cache.event_handler);
1545        flush_workqueue(ib_wq);
1546        gid_table_cleanup_one(device);
1547
1548        /*
1549         * Flush the wq second time for any pending GID delete work.
1550         */
1551        flush_workqueue(ib_wq);
1552}
1553