linux/drivers/infiniband/core/sa_query.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
   4 * Copyright (c) 2006 Intel Corporation.  All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/err.h>
  38#include <linux/random.h>
  39#include <linux/spinlock.h>
  40#include <linux/slab.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/kref.h>
  43#include <linux/idr.h>
  44#include <linux/workqueue.h>
  45
  46#include <rdma/ib_pack.h>
  47#include <rdma/ib_cache.h>
  48#include "sa.h"
  49
  50MODULE_AUTHOR("Roland Dreier");
  51MODULE_DESCRIPTION("InfiniBand subnet administration query support");
  52MODULE_LICENSE("Dual BSD/GPL");
  53
  54struct ib_sa_sm_ah {
  55        struct ib_ah        *ah;
  56        struct kref          ref;
  57        u16                  pkey_index;
  58        u8                   src_path_mask;
  59};
  60
  61struct ib_sa_port {
  62        struct ib_mad_agent *agent;
  63        struct ib_sa_sm_ah  *sm_ah;
  64        struct work_struct   update_task;
  65        spinlock_t           ah_lock;
  66        u8                   port_num;
  67};
  68
  69struct ib_sa_device {
  70        int                     start_port, end_port;
  71        struct ib_event_handler event_handler;
  72        struct ib_sa_port port[0];
  73};
  74
  75struct ib_sa_query {
  76        void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
  77        void (*release)(struct ib_sa_query *);
  78        struct ib_sa_client    *client;
  79        struct ib_sa_port      *port;
  80        struct ib_mad_send_buf *mad_buf;
  81        struct ib_sa_sm_ah     *sm_ah;
  82        int                     id;
  83};
  84
  85struct ib_sa_service_query {
  86        void (*callback)(int, struct ib_sa_service_rec *, void *);
  87        void *context;
  88        struct ib_sa_query sa_query;
  89};
  90
  91struct ib_sa_path_query {
  92        void (*callback)(int, struct ib_sa_path_rec *, void *);
  93        void *context;
  94        struct ib_sa_query sa_query;
  95};
  96
  97struct ib_sa_guidinfo_query {
  98        void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
  99        void *context;
 100        struct ib_sa_query sa_query;
 101};
 102
 103struct ib_sa_mcmember_query {
 104        void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
 105        void *context;
 106        struct ib_sa_query sa_query;
 107};
 108
 109static void ib_sa_add_one(struct ib_device *device);
 110static void ib_sa_remove_one(struct ib_device *device);
 111
 112static struct ib_client sa_client = {
 113        .name   = "sa",
 114        .add    = ib_sa_add_one,
 115        .remove = ib_sa_remove_one
 116};
 117
 118static DEFINE_SPINLOCK(idr_lock);
 119static DEFINE_IDR(query_idr);
 120
 121static DEFINE_SPINLOCK(tid_lock);
 122static u32 tid;
 123
 124#define PATH_REC_FIELD(field) \
 125        .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field),          \
 126        .struct_size_bytes   = sizeof ((struct ib_sa_path_rec *) 0)->field,     \
 127        .field_name          = "sa_path_rec:" #field
 128
 129static const struct ib_field path_rec_table[] = {
 130        { PATH_REC_FIELD(service_id),
 131          .offset_words = 0,
 132          .offset_bits  = 0,
 133          .size_bits    = 64 },
 134        { PATH_REC_FIELD(dgid),
 135          .offset_words = 2,
 136          .offset_bits  = 0,
 137          .size_bits    = 128 },
 138        { PATH_REC_FIELD(sgid),
 139          .offset_words = 6,
 140          .offset_bits  = 0,
 141          .size_bits    = 128 },
 142        { PATH_REC_FIELD(dlid),
 143          .offset_words = 10,
 144          .offset_bits  = 0,
 145          .size_bits    = 16 },
 146        { PATH_REC_FIELD(slid),
 147          .offset_words = 10,
 148          .offset_bits  = 16,
 149          .size_bits    = 16 },
 150        { PATH_REC_FIELD(raw_traffic),
 151          .offset_words = 11,
 152          .offset_bits  = 0,
 153          .size_bits    = 1 },
 154        { RESERVED,
 155          .offset_words = 11,
 156          .offset_bits  = 1,
 157          .size_bits    = 3 },
 158        { PATH_REC_FIELD(flow_label),
 159          .offset_words = 11,
 160          .offset_bits  = 4,
 161          .size_bits    = 20 },
 162        { PATH_REC_FIELD(hop_limit),
 163          .offset_words = 11,
 164          .offset_bits  = 24,
 165          .size_bits    = 8 },
 166        { PATH_REC_FIELD(traffic_class),
 167          .offset_words = 12,
 168          .offset_bits  = 0,
 169          .size_bits    = 8 },
 170        { PATH_REC_FIELD(reversible),
 171          .offset_words = 12,
 172          .offset_bits  = 8,
 173          .size_bits    = 1 },
 174        { PATH_REC_FIELD(numb_path),
 175          .offset_words = 12,
 176          .offset_bits  = 9,
 177          .size_bits    = 7 },
 178        { PATH_REC_FIELD(pkey),
 179          .offset_words = 12,
 180          .offset_bits  = 16,
 181          .size_bits    = 16 },
 182        { PATH_REC_FIELD(qos_class),
 183          .offset_words = 13,
 184          .offset_bits  = 0,
 185          .size_bits    = 12 },
 186        { PATH_REC_FIELD(sl),
 187          .offset_words = 13,
 188          .offset_bits  = 12,
 189          .size_bits    = 4 },
 190        { PATH_REC_FIELD(mtu_selector),
 191          .offset_words = 13,
 192          .offset_bits  = 16,
 193          .size_bits    = 2 },
 194        { PATH_REC_FIELD(mtu),
 195          .offset_words = 13,
 196          .offset_bits  = 18,
 197          .size_bits    = 6 },
 198        { PATH_REC_FIELD(rate_selector),
 199          .offset_words = 13,
 200          .offset_bits  = 24,
 201          .size_bits    = 2 },
 202        { PATH_REC_FIELD(rate),
 203          .offset_words = 13,
 204          .offset_bits  = 26,
 205          .size_bits    = 6 },
 206        { PATH_REC_FIELD(packet_life_time_selector),
 207          .offset_words = 14,
 208          .offset_bits  = 0,
 209          .size_bits    = 2 },
 210        { PATH_REC_FIELD(packet_life_time),
 211          .offset_words = 14,
 212          .offset_bits  = 2,
 213          .size_bits    = 6 },
 214        { PATH_REC_FIELD(preference),
 215          .offset_words = 14,
 216          .offset_bits  = 8,
 217          .size_bits    = 8 },
 218        { RESERVED,
 219          .offset_words = 14,
 220          .offset_bits  = 16,
 221          .size_bits    = 48 },
 222};
 223
 224#define MCMEMBER_REC_FIELD(field) \
 225        .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field),      \
 226        .struct_size_bytes   = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
 227        .field_name          = "sa_mcmember_rec:" #field
 228
 229static const struct ib_field mcmember_rec_table[] = {
 230        { MCMEMBER_REC_FIELD(mgid),
 231          .offset_words = 0,
 232          .offset_bits  = 0,
 233          .size_bits    = 128 },
 234        { MCMEMBER_REC_FIELD(port_gid),
 235          .offset_words = 4,
 236          .offset_bits  = 0,
 237          .size_bits    = 128 },
 238        { MCMEMBER_REC_FIELD(qkey),
 239          .offset_words = 8,
 240          .offset_bits  = 0,
 241          .size_bits    = 32 },
 242        { MCMEMBER_REC_FIELD(mlid),
 243          .offset_words = 9,
 244          .offset_bits  = 0,
 245          .size_bits    = 16 },
 246        { MCMEMBER_REC_FIELD(mtu_selector),
 247          .offset_words = 9,
 248          .offset_bits  = 16,
 249          .size_bits    = 2 },
 250        { MCMEMBER_REC_FIELD(mtu),
 251          .offset_words = 9,
 252          .offset_bits  = 18,
 253          .size_bits    = 6 },
 254        { MCMEMBER_REC_FIELD(traffic_class),
 255          .offset_words = 9,
 256          .offset_bits  = 24,
 257          .size_bits    = 8 },
 258        { MCMEMBER_REC_FIELD(pkey),
 259          .offset_words = 10,
 260          .offset_bits  = 0,
 261          .size_bits    = 16 },
 262        { MCMEMBER_REC_FIELD(rate_selector),
 263          .offset_words = 10,
 264          .offset_bits  = 16,
 265          .size_bits    = 2 },
 266        { MCMEMBER_REC_FIELD(rate),
 267          .offset_words = 10,
 268          .offset_bits  = 18,
 269          .size_bits    = 6 },
 270        { MCMEMBER_REC_FIELD(packet_life_time_selector),
 271          .offset_words = 10,
 272          .offset_bits  = 24,
 273          .size_bits    = 2 },
 274        { MCMEMBER_REC_FIELD(packet_life_time),
 275          .offset_words = 10,
 276          .offset_bits  = 26,
 277          .size_bits    = 6 },
 278        { MCMEMBER_REC_FIELD(sl),
 279          .offset_words = 11,
 280          .offset_bits  = 0,
 281          .size_bits    = 4 },
 282        { MCMEMBER_REC_FIELD(flow_label),
 283          .offset_words = 11,
 284          .offset_bits  = 4,
 285          .size_bits    = 20 },
 286        { MCMEMBER_REC_FIELD(hop_limit),
 287          .offset_words = 11,
 288          .offset_bits  = 24,
 289          .size_bits    = 8 },
 290        { MCMEMBER_REC_FIELD(scope),
 291          .offset_words = 12,
 292          .offset_bits  = 0,
 293          .size_bits    = 4 },
 294        { MCMEMBER_REC_FIELD(join_state),
 295          .offset_words = 12,
 296          .offset_bits  = 4,
 297          .size_bits    = 4 },
 298        { MCMEMBER_REC_FIELD(proxy_join),
 299          .offset_words = 12,
 300          .offset_bits  = 8,
 301          .size_bits    = 1 },
 302        { RESERVED,
 303          .offset_words = 12,
 304          .offset_bits  = 9,
 305          .size_bits    = 23 },
 306};
 307
 308#define SERVICE_REC_FIELD(field) \
 309        .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field),       \
 310        .struct_size_bytes   = sizeof ((struct ib_sa_service_rec *) 0)->field,  \
 311        .field_name          = "sa_service_rec:" #field
 312
 313static const struct ib_field service_rec_table[] = {
 314        { SERVICE_REC_FIELD(id),
 315          .offset_words = 0,
 316          .offset_bits  = 0,
 317          .size_bits    = 64 },
 318        { SERVICE_REC_FIELD(gid),
 319          .offset_words = 2,
 320          .offset_bits  = 0,
 321          .size_bits    = 128 },
 322        { SERVICE_REC_FIELD(pkey),
 323          .offset_words = 6,
 324          .offset_bits  = 0,
 325          .size_bits    = 16 },
 326        { SERVICE_REC_FIELD(lease),
 327          .offset_words = 7,
 328          .offset_bits  = 0,
 329          .size_bits    = 32 },
 330        { SERVICE_REC_FIELD(key),
 331          .offset_words = 8,
 332          .offset_bits  = 0,
 333          .size_bits    = 128 },
 334        { SERVICE_REC_FIELD(name),
 335          .offset_words = 12,
 336          .offset_bits  = 0,
 337          .size_bits    = 64*8 },
 338        { SERVICE_REC_FIELD(data8),
 339          .offset_words = 28,
 340          .offset_bits  = 0,
 341          .size_bits    = 16*8 },
 342        { SERVICE_REC_FIELD(data16),
 343          .offset_words = 32,
 344          .offset_bits  = 0,
 345          .size_bits    = 8*16 },
 346        { SERVICE_REC_FIELD(data32),
 347          .offset_words = 36,
 348          .offset_bits  = 0,
 349          .size_bits    = 4*32 },
 350        { SERVICE_REC_FIELD(data64),
 351          .offset_words = 40,
 352          .offset_bits  = 0,
 353          .size_bits    = 2*64 },
 354};
 355
 356#define GUIDINFO_REC_FIELD(field) \
 357        .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field),      \
 358        .struct_size_bytes   = sizeof((struct ib_sa_guidinfo_rec *) 0)->field,  \
 359        .field_name          = "sa_guidinfo_rec:" #field
 360
 361static const struct ib_field guidinfo_rec_table[] = {
 362        { GUIDINFO_REC_FIELD(lid),
 363          .offset_words = 0,
 364          .offset_bits  = 0,
 365          .size_bits    = 16 },
 366        { GUIDINFO_REC_FIELD(block_num),
 367          .offset_words = 0,
 368          .offset_bits  = 16,
 369          .size_bits    = 8 },
 370        { GUIDINFO_REC_FIELD(res1),
 371          .offset_words = 0,
 372          .offset_bits  = 24,
 373          .size_bits    = 8 },
 374        { GUIDINFO_REC_FIELD(res2),
 375          .offset_words = 1,
 376          .offset_bits  = 0,
 377          .size_bits    = 32 },
 378        { GUIDINFO_REC_FIELD(guid_info_list),
 379          .offset_words = 2,
 380          .offset_bits  = 0,
 381          .size_bits    = 512 },
 382};
 383
 384static void free_sm_ah(struct kref *kref)
 385{
 386        struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
 387
 388        ib_destroy_ah(sm_ah->ah);
 389        kfree(sm_ah);
 390}
 391
 392static void update_sm_ah(struct work_struct *work)
 393{
 394        struct ib_sa_port *port =
 395                container_of(work, struct ib_sa_port, update_task);
 396        struct ib_sa_sm_ah *new_ah;
 397        struct ib_port_attr port_attr;
 398        struct ib_ah_attr   ah_attr;
 399
 400        if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
 401                printk(KERN_WARNING "Couldn't query port\n");
 402                return;
 403        }
 404
 405        new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
 406        if (!new_ah) {
 407                printk(KERN_WARNING "Couldn't allocate new SM AH\n");
 408                return;
 409        }
 410
 411        kref_init(&new_ah->ref);
 412        new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
 413
 414        new_ah->pkey_index = 0;
 415        if (ib_find_pkey(port->agent->device, port->port_num,
 416                         IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
 417                printk(KERN_ERR "Couldn't find index for default PKey\n");
 418
 419        memset(&ah_attr, 0, sizeof ah_attr);
 420        ah_attr.dlid     = port_attr.sm_lid;
 421        ah_attr.sl       = port_attr.sm_sl;
 422        ah_attr.port_num = port->port_num;
 423
 424        new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
 425        if (IS_ERR(new_ah->ah)) {
 426                printk(KERN_WARNING "Couldn't create new SM AH\n");
 427                kfree(new_ah);
 428                return;
 429        }
 430
 431        spin_lock_irq(&port->ah_lock);
 432        if (port->sm_ah)
 433                kref_put(&port->sm_ah->ref, free_sm_ah);
 434        port->sm_ah = new_ah;
 435        spin_unlock_irq(&port->ah_lock);
 436
 437}
 438
 439static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
 440{
 441        if (event->event == IB_EVENT_PORT_ERR    ||
 442            event->event == IB_EVENT_PORT_ACTIVE ||
 443            event->event == IB_EVENT_LID_CHANGE  ||
 444            event->event == IB_EVENT_PKEY_CHANGE ||
 445            event->event == IB_EVENT_SM_CHANGE   ||
 446            event->event == IB_EVENT_CLIENT_REREGISTER) {
 447                unsigned long flags;
 448                struct ib_sa_device *sa_dev =
 449                        container_of(handler, typeof(*sa_dev), event_handler);
 450                struct ib_sa_port *port =
 451                        &sa_dev->port[event->element.port_num - sa_dev->start_port];
 452
 453                if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
 454                        return;
 455
 456                spin_lock_irqsave(&port->ah_lock, flags);
 457                if (port->sm_ah)
 458                        kref_put(&port->sm_ah->ref, free_sm_ah);
 459                port->sm_ah = NULL;
 460                spin_unlock_irqrestore(&port->ah_lock, flags);
 461
 462                queue_work(ib_wq, &sa_dev->port[event->element.port_num -
 463                                            sa_dev->start_port].update_task);
 464        }
 465}
 466
 467void ib_sa_register_client(struct ib_sa_client *client)
 468{
 469        atomic_set(&client->users, 1);
 470        init_completion(&client->comp);
 471}
 472EXPORT_SYMBOL(ib_sa_register_client);
 473
 474void ib_sa_unregister_client(struct ib_sa_client *client)
 475{
 476        ib_sa_client_put(client);
 477        wait_for_completion(&client->comp);
 478}
 479EXPORT_SYMBOL(ib_sa_unregister_client);
 480
 481/**
 482 * ib_sa_cancel_query - try to cancel an SA query
 483 * @id:ID of query to cancel
 484 * @query:query pointer to cancel
 485 *
 486 * Try to cancel an SA query.  If the id and query don't match up or
 487 * the query has already completed, nothing is done.  Otherwise the
 488 * query is canceled and will complete with a status of -EINTR.
 489 */
 490void ib_sa_cancel_query(int id, struct ib_sa_query *query)
 491{
 492        unsigned long flags;
 493        struct ib_mad_agent *agent;
 494        struct ib_mad_send_buf *mad_buf;
 495
 496        spin_lock_irqsave(&idr_lock, flags);
 497        if (idr_find(&query_idr, id) != query) {
 498                spin_unlock_irqrestore(&idr_lock, flags);
 499                return;
 500        }
 501        agent = query->port->agent;
 502        mad_buf = query->mad_buf;
 503        spin_unlock_irqrestore(&idr_lock, flags);
 504
 505        ib_cancel_mad(agent, mad_buf);
 506}
 507EXPORT_SYMBOL(ib_sa_cancel_query);
 508
 509static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
 510{
 511        struct ib_sa_device *sa_dev;
 512        struct ib_sa_port   *port;
 513        unsigned long flags;
 514        u8 src_path_mask;
 515
 516        sa_dev = ib_get_client_data(device, &sa_client);
 517        if (!sa_dev)
 518                return 0x7f;
 519
 520        port  = &sa_dev->port[port_num - sa_dev->start_port];
 521        spin_lock_irqsave(&port->ah_lock, flags);
 522        src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
 523        spin_unlock_irqrestore(&port->ah_lock, flags);
 524
 525        return src_path_mask;
 526}
 527
 528int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
 529                         struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
 530{
 531        int ret;
 532        u16 gid_index;
 533        int force_grh;
 534
 535        memset(ah_attr, 0, sizeof *ah_attr);
 536        ah_attr->dlid = be16_to_cpu(rec->dlid);
 537        ah_attr->sl = rec->sl;
 538        ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
 539                                 get_src_path_mask(device, port_num);
 540        ah_attr->port_num = port_num;
 541        ah_attr->static_rate = rec->rate;
 542
 543        force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET;
 544
 545        if (rec->hop_limit > 1 || force_grh) {
 546                ah_attr->ah_flags = IB_AH_GRH;
 547                ah_attr->grh.dgid = rec->dgid;
 548
 549                ret = ib_find_cached_gid(device, &rec->sgid, &port_num,
 550                                         &gid_index);
 551                if (ret)
 552                        return ret;
 553
 554                ah_attr->grh.sgid_index    = gid_index;
 555                ah_attr->grh.flow_label    = be32_to_cpu(rec->flow_label);
 556                ah_attr->grh.hop_limit     = rec->hop_limit;
 557                ah_attr->grh.traffic_class = rec->traffic_class;
 558        }
 559        return 0;
 560}
 561EXPORT_SYMBOL(ib_init_ah_from_path);
 562
 563static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
 564{
 565        unsigned long flags;
 566
 567        spin_lock_irqsave(&query->port->ah_lock, flags);
 568        if (!query->port->sm_ah) {
 569                spin_unlock_irqrestore(&query->port->ah_lock, flags);
 570                return -EAGAIN;
 571        }
 572        kref_get(&query->port->sm_ah->ref);
 573        query->sm_ah = query->port->sm_ah;
 574        spin_unlock_irqrestore(&query->port->ah_lock, flags);
 575
 576        query->mad_buf = ib_create_send_mad(query->port->agent, 1,
 577                                            query->sm_ah->pkey_index,
 578                                            0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
 579                                            gfp_mask);
 580        if (IS_ERR(query->mad_buf)) {
 581                kref_put(&query->sm_ah->ref, free_sm_ah);
 582                return -ENOMEM;
 583        }
 584
 585        query->mad_buf->ah = query->sm_ah->ah;
 586
 587        return 0;
 588}
 589
 590static void free_mad(struct ib_sa_query *query)
 591{
 592        ib_free_send_mad(query->mad_buf);
 593        kref_put(&query->sm_ah->ref, free_sm_ah);
 594}
 595
 596static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
 597{
 598        unsigned long flags;
 599
 600        memset(mad, 0, sizeof *mad);
 601
 602        mad->mad_hdr.base_version  = IB_MGMT_BASE_VERSION;
 603        mad->mad_hdr.mgmt_class    = IB_MGMT_CLASS_SUBN_ADM;
 604        mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
 605
 606        spin_lock_irqsave(&tid_lock, flags);
 607        mad->mad_hdr.tid           =
 608                cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
 609        spin_unlock_irqrestore(&tid_lock, flags);
 610}
 611
 612static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
 613{
 614        bool preload = gfp_mask & __GFP_WAIT;
 615        unsigned long flags;
 616        int ret, id;
 617
 618        if (preload)
 619                idr_preload(gfp_mask);
 620        spin_lock_irqsave(&idr_lock, flags);
 621
 622        id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
 623
 624        spin_unlock_irqrestore(&idr_lock, flags);
 625        if (preload)
 626                idr_preload_end();
 627        if (id < 0)
 628                return id;
 629
 630        query->mad_buf->timeout_ms  = timeout_ms;
 631        query->mad_buf->context[0] = query;
 632        query->id = id;
 633
 634        ret = ib_post_send_mad(query->mad_buf, NULL);
 635        if (ret) {
 636                spin_lock_irqsave(&idr_lock, flags);
 637                idr_remove(&query_idr, id);
 638                spin_unlock_irqrestore(&idr_lock, flags);
 639        }
 640
 641        /*
 642         * It's not safe to dereference query any more, because the
 643         * send may already have completed and freed the query in
 644         * another context.
 645         */
 646        return ret ? ret : id;
 647}
 648
 649void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
 650{
 651        ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
 652}
 653EXPORT_SYMBOL(ib_sa_unpack_path);
 654
 655static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
 656                                    int status,
 657                                    struct ib_sa_mad *mad)
 658{
 659        struct ib_sa_path_query *query =
 660                container_of(sa_query, struct ib_sa_path_query, sa_query);
 661
 662        if (mad) {
 663                struct ib_sa_path_rec rec;
 664
 665                ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
 666                          mad->data, &rec);
 667                query->callback(status, &rec, query->context);
 668        } else
 669                query->callback(status, NULL, query->context);
 670}
 671
 672static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
 673{
 674        kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
 675}
 676
 677/**
 678 * ib_sa_path_rec_get - Start a Path get query
 679 * @client:SA client
 680 * @device:device to send query on
 681 * @port_num: port number to send query on
 682 * @rec:Path Record to send in query
 683 * @comp_mask:component mask to send in query
 684 * @timeout_ms:time to wait for response
 685 * @gfp_mask:GFP mask to use for internal allocations
 686 * @callback:function called when query completes, times out or is
 687 * canceled
 688 * @context:opaque user context passed to callback
 689 * @sa_query:query context, used to cancel query
 690 *
 691 * Send a Path Record Get query to the SA to look up a path.  The
 692 * callback function will be called when the query completes (or
 693 * fails); status is 0 for a successful response, -EINTR if the query
 694 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
 695 * occurred sending the query.  The resp parameter of the callback is
 696 * only valid if status is 0.
 697 *
 698 * If the return value of ib_sa_path_rec_get() is negative, it is an
 699 * error code.  Otherwise it is a query ID that can be used to cancel
 700 * the query.
 701 */
 702int ib_sa_path_rec_get(struct ib_sa_client *client,
 703                       struct ib_device *device, u8 port_num,
 704                       struct ib_sa_path_rec *rec,
 705                       ib_sa_comp_mask comp_mask,
 706                       int timeout_ms, gfp_t gfp_mask,
 707                       void (*callback)(int status,
 708                                        struct ib_sa_path_rec *resp,
 709                                        void *context),
 710                       void *context,
 711                       struct ib_sa_query **sa_query)
 712{
 713        struct ib_sa_path_query *query;
 714        struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
 715        struct ib_sa_port   *port;
 716        struct ib_mad_agent *agent;
 717        struct ib_sa_mad *mad;
 718        int ret;
 719
 720        if (!sa_dev)
 721                return -ENODEV;
 722
 723        port  = &sa_dev->port[port_num - sa_dev->start_port];
 724        agent = port->agent;
 725
 726        query = kmalloc(sizeof *query, gfp_mask);
 727        if (!query)
 728                return -ENOMEM;
 729
 730        query->sa_query.port     = port;
 731        ret = alloc_mad(&query->sa_query, gfp_mask);
 732        if (ret)
 733                goto err1;
 734
 735        ib_sa_client_get(client);
 736        query->sa_query.client = client;
 737        query->callback        = callback;
 738        query->context         = context;
 739
 740        mad = query->sa_query.mad_buf->mad;
 741        init_mad(mad, agent);
 742
 743        query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
 744        query->sa_query.release  = ib_sa_path_rec_release;
 745        mad->mad_hdr.method      = IB_MGMT_METHOD_GET;
 746        mad->mad_hdr.attr_id     = cpu_to_be16(IB_SA_ATTR_PATH_REC);
 747        mad->sa_hdr.comp_mask    = comp_mask;
 748
 749        ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
 750
 751        *sa_query = &query->sa_query;
 752
 753        ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
 754        if (ret < 0)
 755                goto err2;
 756
 757        return ret;
 758
 759err2:
 760        *sa_query = NULL;
 761        ib_sa_client_put(query->sa_query.client);
 762        free_mad(&query->sa_query);
 763
 764err1:
 765        kfree(query);
 766        return ret;
 767}
 768EXPORT_SYMBOL(ib_sa_path_rec_get);
 769
 770static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
 771                                    int status,
 772                                    struct ib_sa_mad *mad)
 773{
 774        struct ib_sa_service_query *query =
 775                container_of(sa_query, struct ib_sa_service_query, sa_query);
 776
 777        if (mad) {
 778                struct ib_sa_service_rec rec;
 779
 780                ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
 781                          mad->data, &rec);
 782                query->callback(status, &rec, query->context);
 783        } else
 784                query->callback(status, NULL, query->context);
 785}
 786
 787static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
 788{
 789        kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
 790}
 791
 792/**
 793 * ib_sa_service_rec_query - Start Service Record operation
 794 * @client:SA client
 795 * @device:device to send request on
 796 * @port_num: port number to send request on
 797 * @method:SA method - should be get, set, or delete
 798 * @rec:Service Record to send in request
 799 * @comp_mask:component mask to send in request
 800 * @timeout_ms:time to wait for response
 801 * @gfp_mask:GFP mask to use for internal allocations
 802 * @callback:function called when request completes, times out or is
 803 * canceled
 804 * @context:opaque user context passed to callback
 805 * @sa_query:request context, used to cancel request
 806 *
 807 * Send a Service Record set/get/delete to the SA to register,
 808 * unregister or query a service record.
 809 * The callback function will be called when the request completes (or
 810 * fails); status is 0 for a successful response, -EINTR if the query
 811 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
 812 * occurred sending the query.  The resp parameter of the callback is
 813 * only valid if status is 0.
 814 *
 815 * If the return value of ib_sa_service_rec_query() is negative, it is an
 816 * error code.  Otherwise it is a request ID that can be used to cancel
 817 * the query.
 818 */
 819int ib_sa_service_rec_query(struct ib_sa_client *client,
 820                            struct ib_device *device, u8 port_num, u8 method,
 821                            struct ib_sa_service_rec *rec,
 822                            ib_sa_comp_mask comp_mask,
 823                            int timeout_ms, gfp_t gfp_mask,
 824                            void (*callback)(int status,
 825                                             struct ib_sa_service_rec *resp,
 826                                             void *context),
 827                            void *context,
 828                            struct ib_sa_query **sa_query)
 829{
 830        struct ib_sa_service_query *query;
 831        struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
 832        struct ib_sa_port   *port;
 833        struct ib_mad_agent *agent;
 834        struct ib_sa_mad *mad;
 835        int ret;
 836
 837        if (!sa_dev)
 838                return -ENODEV;
 839
 840        port  = &sa_dev->port[port_num - sa_dev->start_port];
 841        agent = port->agent;
 842
 843        if (method != IB_MGMT_METHOD_GET &&
 844            method != IB_MGMT_METHOD_SET &&
 845            method != IB_SA_METHOD_DELETE)
 846                return -EINVAL;
 847
 848        query = kmalloc(sizeof *query, gfp_mask);
 849        if (!query)
 850                return -ENOMEM;
 851
 852        query->sa_query.port     = port;
 853        ret = alloc_mad(&query->sa_query, gfp_mask);
 854        if (ret)
 855                goto err1;
 856
 857        ib_sa_client_get(client);
 858        query->sa_query.client = client;
 859        query->callback        = callback;
 860        query->context         = context;
 861
 862        mad = query->sa_query.mad_buf->mad;
 863        init_mad(mad, agent);
 864
 865        query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
 866        query->sa_query.release  = ib_sa_service_rec_release;
 867        mad->mad_hdr.method      = method;
 868        mad->mad_hdr.attr_id     = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
 869        mad->sa_hdr.comp_mask    = comp_mask;
 870
 871        ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
 872                rec, mad->data);
 873
 874        *sa_query = &query->sa_query;
 875
 876        ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
 877        if (ret < 0)
 878                goto err2;
 879
 880        return ret;
 881
 882err2:
 883        *sa_query = NULL;
 884        ib_sa_client_put(query->sa_query.client);
 885        free_mad(&query->sa_query);
 886
 887err1:
 888        kfree(query);
 889        return ret;
 890}
 891EXPORT_SYMBOL(ib_sa_service_rec_query);
 892
 893static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
 894                                        int status,
 895                                        struct ib_sa_mad *mad)
 896{
 897        struct ib_sa_mcmember_query *query =
 898                container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
 899
 900        if (mad) {
 901                struct ib_sa_mcmember_rec rec;
 902
 903                ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
 904                          mad->data, &rec);
 905                query->callback(status, &rec, query->context);
 906        } else
 907                query->callback(status, NULL, query->context);
 908}
 909
 910static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
 911{
 912        kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
 913}
 914
 915int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
 916                             struct ib_device *device, u8 port_num,
 917                             u8 method,
 918                             struct ib_sa_mcmember_rec *rec,
 919                             ib_sa_comp_mask comp_mask,
 920                             int timeout_ms, gfp_t gfp_mask,
 921                             void (*callback)(int status,
 922                                              struct ib_sa_mcmember_rec *resp,
 923                                              void *context),
 924                             void *context,
 925                             struct ib_sa_query **sa_query)
 926{
 927        struct ib_sa_mcmember_query *query;
 928        struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
 929        struct ib_sa_port   *port;
 930        struct ib_mad_agent *agent;
 931        struct ib_sa_mad *mad;
 932        int ret;
 933
 934        if (!sa_dev)
 935                return -ENODEV;
 936
 937        port  = &sa_dev->port[port_num - sa_dev->start_port];
 938        agent = port->agent;
 939
 940        query = kmalloc(sizeof *query, gfp_mask);
 941        if (!query)
 942                return -ENOMEM;
 943
 944        query->sa_query.port     = port;
 945        ret = alloc_mad(&query->sa_query, gfp_mask);
 946        if (ret)
 947                goto err1;
 948
 949        ib_sa_client_get(client);
 950        query->sa_query.client = client;
 951        query->callback        = callback;
 952        query->context         = context;
 953
 954        mad = query->sa_query.mad_buf->mad;
 955        init_mad(mad, agent);
 956
 957        query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
 958        query->sa_query.release  = ib_sa_mcmember_rec_release;
 959        mad->mad_hdr.method      = method;
 960        mad->mad_hdr.attr_id     = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
 961        mad->sa_hdr.comp_mask    = comp_mask;
 962
 963        ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
 964                rec, mad->data);
 965
 966        *sa_query = &query->sa_query;
 967
 968        ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
 969        if (ret < 0)
 970                goto err2;
 971
 972        return ret;
 973
 974err2:
 975        *sa_query = NULL;
 976        ib_sa_client_put(query->sa_query.client);
 977        free_mad(&query->sa_query);
 978
 979err1:
 980        kfree(query);
 981        return ret;
 982}
 983
 984/* Support GuidInfoRecord */
 985static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
 986                                        int status,
 987                                        struct ib_sa_mad *mad)
 988{
 989        struct ib_sa_guidinfo_query *query =
 990                container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
 991
 992        if (mad) {
 993                struct ib_sa_guidinfo_rec rec;
 994
 995                ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
 996                          mad->data, &rec);
 997                query->callback(status, &rec, query->context);
 998        } else
 999                query->callback(status, NULL, query->context);
1000}
1001
1002static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1003{
1004        kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1005}
1006
1007int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1008                              struct ib_device *device, u8 port_num,
1009                              struct ib_sa_guidinfo_rec *rec,
1010                              ib_sa_comp_mask comp_mask, u8 method,
1011                              int timeout_ms, gfp_t gfp_mask,
1012                              void (*callback)(int status,
1013                                               struct ib_sa_guidinfo_rec *resp,
1014                                               void *context),
1015                              void *context,
1016                              struct ib_sa_query **sa_query)
1017{
1018        struct ib_sa_guidinfo_query *query;
1019        struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1020        struct ib_sa_port *port;
1021        struct ib_mad_agent *agent;
1022        struct ib_sa_mad *mad;
1023        int ret;
1024
1025        if (!sa_dev)
1026                return -ENODEV;
1027
1028        if (method != IB_MGMT_METHOD_GET &&
1029            method != IB_MGMT_METHOD_SET &&
1030            method != IB_SA_METHOD_DELETE) {
1031                return -EINVAL;
1032        }
1033
1034        port  = &sa_dev->port[port_num - sa_dev->start_port];
1035        agent = port->agent;
1036
1037        query = kmalloc(sizeof *query, gfp_mask);
1038        if (!query)
1039                return -ENOMEM;
1040
1041        query->sa_query.port = port;
1042        ret = alloc_mad(&query->sa_query, gfp_mask);
1043        if (ret)
1044                goto err1;
1045
1046        ib_sa_client_get(client);
1047        query->sa_query.client = client;
1048        query->callback        = callback;
1049        query->context         = context;
1050
1051        mad = query->sa_query.mad_buf->mad;
1052        init_mad(mad, agent);
1053
1054        query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1055        query->sa_query.release  = ib_sa_guidinfo_rec_release;
1056
1057        mad->mad_hdr.method      = method;
1058        mad->mad_hdr.attr_id     = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1059        mad->sa_hdr.comp_mask    = comp_mask;
1060
1061        ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1062                mad->data);
1063
1064        *sa_query = &query->sa_query;
1065
1066        ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1067        if (ret < 0)
1068                goto err2;
1069
1070        return ret;
1071
1072err2:
1073        *sa_query = NULL;
1074        ib_sa_client_put(query->sa_query.client);
1075        free_mad(&query->sa_query);
1076
1077err1:
1078        kfree(query);
1079        return ret;
1080}
1081EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1082
1083static void send_handler(struct ib_mad_agent *agent,
1084                         struct ib_mad_send_wc *mad_send_wc)
1085{
1086        struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1087        unsigned long flags;
1088
1089        if (query->callback)
1090                switch (mad_send_wc->status) {
1091                case IB_WC_SUCCESS:
1092                        /* No callback -- already got recv */
1093                        break;
1094                case IB_WC_RESP_TIMEOUT_ERR:
1095                        query->callback(query, -ETIMEDOUT, NULL);
1096                        break;
1097                case IB_WC_WR_FLUSH_ERR:
1098                        query->callback(query, -EINTR, NULL);
1099                        break;
1100                default:
1101                        query->callback(query, -EIO, NULL);
1102                        break;
1103                }
1104
1105        spin_lock_irqsave(&idr_lock, flags);
1106        idr_remove(&query_idr, query->id);
1107        spin_unlock_irqrestore(&idr_lock, flags);
1108
1109        free_mad(query);
1110        ib_sa_client_put(query->client);
1111        query->release(query);
1112}
1113
1114static void recv_handler(struct ib_mad_agent *mad_agent,
1115                         struct ib_mad_recv_wc *mad_recv_wc)
1116{
1117        struct ib_sa_query *query;
1118        struct ib_mad_send_buf *mad_buf;
1119
1120        mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
1121        query = mad_buf->context[0];
1122
1123        if (query->callback) {
1124                if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1125                        query->callback(query,
1126                                        mad_recv_wc->recv_buf.mad->mad_hdr.status ?
1127                                        -EINVAL : 0,
1128                                        (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
1129                else
1130                        query->callback(query, -EIO, NULL);
1131        }
1132
1133        ib_free_recv_mad(mad_recv_wc);
1134}
1135
1136static void ib_sa_add_one(struct ib_device *device)
1137{
1138        struct ib_sa_device *sa_dev;
1139        int s, e, i;
1140
1141        if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1142                return;
1143
1144        if (device->node_type == RDMA_NODE_IB_SWITCH)
1145                s = e = 0;
1146        else {
1147                s = 1;
1148                e = device->phys_port_cnt;
1149        }
1150
1151        sa_dev = kzalloc(sizeof *sa_dev +
1152                         (e - s + 1) * sizeof (struct ib_sa_port),
1153                         GFP_KERNEL);
1154        if (!sa_dev)
1155                return;
1156
1157        sa_dev->start_port = s;
1158        sa_dev->end_port   = e;
1159
1160        for (i = 0; i <= e - s; ++i) {
1161                spin_lock_init(&sa_dev->port[i].ah_lock);
1162                if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
1163                        continue;
1164
1165                sa_dev->port[i].sm_ah    = NULL;
1166                sa_dev->port[i].port_num = i + s;
1167
1168                sa_dev->port[i].agent =
1169                        ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1170                                              NULL, 0, send_handler,
1171                                              recv_handler, sa_dev);
1172                if (IS_ERR(sa_dev->port[i].agent))
1173                        goto err;
1174
1175                INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
1176        }
1177
1178        ib_set_client_data(device, &sa_client, sa_dev);
1179
1180        /*
1181         * We register our event handler after everything is set up,
1182         * and then update our cached info after the event handler is
1183         * registered to avoid any problems if a port changes state
1184         * during our initialization.
1185         */
1186
1187        INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1188        if (ib_register_event_handler(&sa_dev->event_handler))
1189                goto err;
1190
1191        for (i = 0; i <= e - s; ++i)
1192                if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
1193                        update_sm_ah(&sa_dev->port[i].update_task);
1194
1195        return;
1196
1197err:
1198        while (--i >= 0)
1199                if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
1200                        ib_unregister_mad_agent(sa_dev->port[i].agent);
1201
1202        kfree(sa_dev);
1203
1204        return;
1205}
1206
1207static void ib_sa_remove_one(struct ib_device *device)
1208{
1209        struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1210        int i;
1211
1212        if (!sa_dev)
1213                return;
1214
1215        ib_unregister_event_handler(&sa_dev->event_handler);
1216
1217        flush_workqueue(ib_wq);
1218
1219        for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1220                if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
1221                        ib_unregister_mad_agent(sa_dev->port[i].agent);
1222                        if (sa_dev->port[i].sm_ah)
1223                                kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1224                }
1225
1226        }
1227
1228        kfree(sa_dev);
1229}
1230
1231static int __init ib_sa_init(void)
1232{
1233        int ret;
1234
1235        get_random_bytes(&tid, sizeof tid);
1236
1237        ret = ib_register_client(&sa_client);
1238        if (ret) {
1239                printk(KERN_ERR "Couldn't register ib_sa client\n");
1240                goto err1;
1241        }
1242
1243        ret = mcast_init();
1244        if (ret) {
1245                printk(KERN_ERR "Couldn't initialize multicast handling\n");
1246                goto err2;
1247        }
1248
1249        return 0;
1250err2:
1251        ib_unregister_client(&sa_client);
1252err1:
1253        return ret;
1254}
1255
1256static void __exit ib_sa_cleanup(void)
1257{
1258        mcast_cleanup();
1259        ib_unregister_client(&sa_client);
1260        idr_destroy(&query_idr);
1261}
1262
1263module_init(ib_sa_init);
1264module_exit(ib_sa_cleanup);
1265